]> Git Repo - linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
Linux 6.14-rc3
[linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_surface.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28
29 #include "vmwgfx_bo.h"
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_resource_priv.h"
32 #include "vmwgfx_so.h"
33 #include "vmwgfx_binding.h"
34 #include "vmw_surface_cache.h"
35 #include "device_include/svga3d_surfacedefs.h"
36
37 #include <drm/ttm/ttm_placement.h>
38
39 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
40
41 /**
42  * struct vmw_user_surface - User-space visible surface resource
43  *
44  * @prime:          The TTM prime object.
45  * @srf:            The surface metadata.
46  * @master:         Master of the creating client. Used for security check.
47  */
48 struct vmw_user_surface {
49         struct ttm_prime_object prime;
50         struct vmw_surface srf;
51         struct drm_master *master;
52 };
53
54 /**
55  * struct vmw_surface_offset - Backing store mip level offset info
56  *
57  * @face:           Surface face.
58  * @mip:            Mip level.
59  * @bo_offset:      Offset into backing store of this mip level.
60  *
61  */
62 struct vmw_surface_offset {
63         uint32_t face;
64         uint32_t mip;
65         uint32_t bo_offset;
66 };
67
68 /**
69  * struct vmw_surface_dirty - Surface dirty-tracker
70  * @cache: Cached layout information of the surface.
71  * @num_subres: Number of subresources.
72  * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
73  */
74 struct vmw_surface_dirty {
75         struct vmw_surface_cache cache;
76         u32 num_subres;
77         SVGA3dBox boxes[] __counted_by(num_subres);
78 };
79
80 static void vmw_user_surface_free(struct vmw_resource *res);
81 static struct vmw_resource *
82 vmw_user_surface_base_to_res(struct ttm_base_object *base);
83 static int vmw_legacy_srf_bind(struct vmw_resource *res,
84                                struct ttm_validate_buffer *val_buf);
85 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
86                                  bool readback,
87                                  struct ttm_validate_buffer *val_buf);
88 static int vmw_legacy_srf_create(struct vmw_resource *res);
89 static int vmw_legacy_srf_destroy(struct vmw_resource *res);
90 static int vmw_gb_surface_create(struct vmw_resource *res);
91 static int vmw_gb_surface_bind(struct vmw_resource *res,
92                                struct ttm_validate_buffer *val_buf);
93 static int vmw_gb_surface_unbind(struct vmw_resource *res,
94                                  bool readback,
95                                  struct ttm_validate_buffer *val_buf);
96 static int vmw_gb_surface_destroy(struct vmw_resource *res);
97 static int
98 vmw_gb_surface_define_internal(struct drm_device *dev,
99                                struct drm_vmw_gb_surface_create_ext_req *req,
100                                struct drm_vmw_gb_surface_create_rep *rep,
101                                struct drm_file *file_priv);
102 static int
103 vmw_gb_surface_reference_internal(struct drm_device *dev,
104                                   struct drm_vmw_surface_arg *req,
105                                   struct drm_vmw_gb_surface_ref_ext_rep *rep,
106                                   struct drm_file *file_priv);
107
108 static void vmw_surface_dirty_free(struct vmw_resource *res);
109 static int vmw_surface_dirty_alloc(struct vmw_resource *res);
110 static int vmw_surface_dirty_sync(struct vmw_resource *res);
111 static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
112                                         size_t end);
113 static int vmw_surface_clean(struct vmw_resource *res);
114
115 static const struct vmw_user_resource_conv user_surface_conv = {
116         .object_type = VMW_RES_SURFACE,
117         .base_obj_to_res = vmw_user_surface_base_to_res,
118         .res_free = vmw_user_surface_free
119 };
120
121 const struct vmw_user_resource_conv *user_surface_converter =
122         &user_surface_conv;
123
124 static const struct vmw_res_func vmw_legacy_surface_func = {
125         .res_type = vmw_res_surface,
126         .needs_guest_memory = false,
127         .may_evict = true,
128         .prio = 1,
129         .dirty_prio = 1,
130         .type_name = "legacy surfaces",
131         .domain = VMW_BO_DOMAIN_GMR,
132         .busy_domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
133         .create = &vmw_legacy_srf_create,
134         .destroy = &vmw_legacy_srf_destroy,
135         .bind = &vmw_legacy_srf_bind,
136         .unbind = &vmw_legacy_srf_unbind
137 };
138
139 static const struct vmw_res_func vmw_gb_surface_func = {
140         .res_type = vmw_res_surface,
141         .needs_guest_memory = true,
142         .may_evict = true,
143         .prio = 1,
144         .dirty_prio = 2,
145         .type_name = "guest backed surfaces",
146         .domain = VMW_BO_DOMAIN_MOB,
147         .busy_domain = VMW_BO_DOMAIN_MOB,
148         .create = vmw_gb_surface_create,
149         .destroy = vmw_gb_surface_destroy,
150         .bind = vmw_gb_surface_bind,
151         .unbind = vmw_gb_surface_unbind,
152         .dirty_alloc = vmw_surface_dirty_alloc,
153         .dirty_free = vmw_surface_dirty_free,
154         .dirty_sync = vmw_surface_dirty_sync,
155         .dirty_range_add = vmw_surface_dirty_range_add,
156         .clean = vmw_surface_clean,
157 };
158
159 /*
160  * struct vmw_surface_dma - SVGA3D DMA command
161  */
162 struct vmw_surface_dma {
163         SVGA3dCmdHeader header;
164         SVGA3dCmdSurfaceDMA body;
165         SVGA3dCopyBox cb;
166         SVGA3dCmdSurfaceDMASuffix suffix;
167 };
168
169 /*
170  * struct vmw_surface_define - SVGA3D Surface Define command
171  */
172 struct vmw_surface_define {
173         SVGA3dCmdHeader header;
174         SVGA3dCmdDefineSurface body;
175 };
176
177 /*
178  * struct vmw_surface_destroy - SVGA3D Surface Destroy command
179  */
180 struct vmw_surface_destroy {
181         SVGA3dCmdHeader header;
182         SVGA3dCmdDestroySurface body;
183 };
184
185
186 /**
187  * vmw_surface_dma_size - Compute fifo size for a dma command.
188  *
189  * @srf: Pointer to a struct vmw_surface
190  *
191  * Computes the required size for a surface dma command for backup or
192  * restoration of the surface represented by @srf.
193  */
194 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
195 {
196         return srf->metadata.num_sizes * sizeof(struct vmw_surface_dma);
197 }
198
199
200 /**
201  * vmw_surface_define_size - Compute fifo size for a surface define command.
202  *
203  * @srf: Pointer to a struct vmw_surface
204  *
205  * Computes the required size for a surface define command for the definition
206  * of the surface represented by @srf.
207  */
208 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
209 {
210         return sizeof(struct vmw_surface_define) + srf->metadata.num_sizes *
211                 sizeof(SVGA3dSize);
212 }
213
214
215 /**
216  * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
217  *
218  * Computes the required size for a surface destroy command for the destruction
219  * of a hw surface.
220  */
221 static inline uint32_t vmw_surface_destroy_size(void)
222 {
223         return sizeof(struct vmw_surface_destroy);
224 }
225
226 /**
227  * vmw_surface_destroy_encode - Encode a surface_destroy command.
228  *
229  * @id: The surface id
230  * @cmd_space: Pointer to memory area in which the commands should be encoded.
231  */
232 static void vmw_surface_destroy_encode(uint32_t id,
233                                        void *cmd_space)
234 {
235         struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
236                 cmd_space;
237
238         cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
239         cmd->header.size = sizeof(cmd->body);
240         cmd->body.sid = id;
241 }
242
243 /**
244  * vmw_surface_define_encode - Encode a surface_define command.
245  *
246  * @srf: Pointer to a struct vmw_surface object.
247  * @cmd_space: Pointer to memory area in which the commands should be encoded.
248  */
249 static void vmw_surface_define_encode(const struct vmw_surface *srf,
250                                       void *cmd_space)
251 {
252         struct vmw_surface_define *cmd = (struct vmw_surface_define *)
253                 cmd_space;
254         struct drm_vmw_size *src_size;
255         SVGA3dSize *cmd_size;
256         uint32_t cmd_len;
257         int i;
258
259         cmd_len = sizeof(cmd->body) + srf->metadata.num_sizes *
260                 sizeof(SVGA3dSize);
261
262         cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
263         cmd->header.size = cmd_len;
264         cmd->body.sid = srf->res.id;
265         /*
266          * Downcast of surfaceFlags, was upcasted when received from user-space,
267          * since driver internally stores as 64 bit.
268          * For legacy surface define only 32 bit flag is supported.
269          */
270         cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->metadata.flags;
271         cmd->body.format = srf->metadata.format;
272         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
273                 cmd->body.face[i].numMipLevels = srf->metadata.mip_levels[i];
274
275         cmd += 1;
276         cmd_size = (SVGA3dSize *) cmd;
277         src_size = srf->metadata.sizes;
278
279         for (i = 0; i < srf->metadata.num_sizes; ++i, cmd_size++, src_size++) {
280                 cmd_size->width = src_size->width;
281                 cmd_size->height = src_size->height;
282                 cmd_size->depth = src_size->depth;
283         }
284 }
285
286 /**
287  * vmw_surface_dma_encode - Encode a surface_dma command.
288  *
289  * @srf: Pointer to a struct vmw_surface object.
290  * @cmd_space: Pointer to memory area in which the commands should be encoded.
291  * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
292  * should be placed or read from.
293  * @to_surface: Boolean whether to DMA to the surface or from the surface.
294  */
295 static void vmw_surface_dma_encode(struct vmw_surface *srf,
296                                    void *cmd_space,
297                                    const SVGAGuestPtr *ptr,
298                                    bool to_surface)
299 {
300         uint32_t i;
301         struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
302         const struct SVGA3dSurfaceDesc *desc =
303                 vmw_surface_get_desc(srf->metadata.format);
304
305         for (i = 0; i < srf->metadata.num_sizes; ++i) {
306                 SVGA3dCmdHeader *header = &cmd->header;
307                 SVGA3dCmdSurfaceDMA *body = &cmd->body;
308                 SVGA3dCopyBox *cb = &cmd->cb;
309                 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
310                 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
311                 const struct drm_vmw_size *cur_size = &srf->metadata.sizes[i];
312
313                 header->id = SVGA_3D_CMD_SURFACE_DMA;
314                 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
315
316                 body->guest.ptr = *ptr;
317                 body->guest.ptr.offset += cur_offset->bo_offset;
318                 body->guest.pitch = vmw_surface_calculate_pitch(desc, cur_size);
319                 body->host.sid = srf->res.id;
320                 body->host.face = cur_offset->face;
321                 body->host.mipmap = cur_offset->mip;
322                 body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
323                                   SVGA3D_READ_HOST_VRAM);
324                 cb->x = 0;
325                 cb->y = 0;
326                 cb->z = 0;
327                 cb->srcx = 0;
328                 cb->srcy = 0;
329                 cb->srcz = 0;
330                 cb->w = cur_size->width;
331                 cb->h = cur_size->height;
332                 cb->d = cur_size->depth;
333
334                 suffix->suffixSize = sizeof(*suffix);
335                 suffix->maximumOffset =
336                         vmw_surface_get_image_buffer_size(desc, cur_size,
337                                                             body->guest.pitch);
338                 suffix->flags.discard = 0;
339                 suffix->flags.unsynchronized = 0;
340                 suffix->flags.reserved = 0;
341                 ++cmd;
342         }
343 };
344
345
346 /**
347  * vmw_hw_surface_destroy - destroy a Device surface
348  *
349  * @res:        Pointer to a struct vmw_resource embedded in a struct
350  *              vmw_surface.
351  *
352  * Destroys a the device surface associated with a struct vmw_surface if
353  * any, and adjusts resource count accordingly.
354  */
355 static void vmw_hw_surface_destroy(struct vmw_resource *res)
356 {
357
358         struct vmw_private *dev_priv = res->dev_priv;
359         void *cmd;
360
361         if (res->func->destroy == vmw_gb_surface_destroy) {
362                 (void) vmw_gb_surface_destroy(res);
363                 return;
364         }
365
366         if (res->id != -1) {
367
368                 cmd = VMW_CMD_RESERVE(dev_priv, vmw_surface_destroy_size());
369                 if (unlikely(!cmd))
370                         return;
371
372                 vmw_surface_destroy_encode(res->id, cmd);
373                 vmw_cmd_commit(dev_priv, vmw_surface_destroy_size());
374
375                 /*
376                  * used_memory_size_atomic, or separate lock
377                  * to avoid taking dev_priv::cmdbuf_mutex in
378                  * the destroy path.
379                  */
380
381                 mutex_lock(&dev_priv->cmdbuf_mutex);
382                 dev_priv->used_memory_size -= res->guest_memory_size;
383                 mutex_unlock(&dev_priv->cmdbuf_mutex);
384         }
385 }
386
387 /**
388  * vmw_legacy_srf_create - Create a device surface as part of the
389  * resource validation process.
390  *
391  * @res: Pointer to a struct vmw_surface.
392  *
393  * If the surface doesn't have a hw id.
394  *
395  * Returns -EBUSY if there wasn't sufficient device resources to
396  * complete the validation. Retry after freeing up resources.
397  *
398  * May return other errors if the kernel is out of guest resources.
399  */
400 static int vmw_legacy_srf_create(struct vmw_resource *res)
401 {
402         struct vmw_private *dev_priv = res->dev_priv;
403         struct vmw_surface *srf;
404         uint32_t submit_size;
405         uint8_t *cmd;
406         int ret;
407
408         if (likely(res->id != -1))
409                 return 0;
410
411         srf = vmw_res_to_srf(res);
412         if (unlikely(dev_priv->used_memory_size + res->guest_memory_size >=
413                      dev_priv->memory_size))
414                 return -EBUSY;
415
416         /*
417          * Alloc id for the resource.
418          */
419
420         ret = vmw_resource_alloc_id(res);
421         if (unlikely(ret != 0)) {
422                 DRM_ERROR("Failed to allocate a surface id.\n");
423                 goto out_no_id;
424         }
425
426         if (unlikely(res->id >= SVGA3D_HB_MAX_SURFACE_IDS)) {
427                 ret = -EBUSY;
428                 goto out_no_fifo;
429         }
430
431         /*
432          * Encode surface define- commands.
433          */
434
435         submit_size = vmw_surface_define_size(srf);
436         cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
437         if (unlikely(!cmd)) {
438                 ret = -ENOMEM;
439                 goto out_no_fifo;
440         }
441
442         vmw_surface_define_encode(srf, cmd);
443         vmw_cmd_commit(dev_priv, submit_size);
444         vmw_fifo_resource_inc(dev_priv);
445
446         /*
447          * Surface memory usage accounting.
448          */
449
450         dev_priv->used_memory_size += res->guest_memory_size;
451         return 0;
452
453 out_no_fifo:
454         vmw_resource_release_id(res);
455 out_no_id:
456         return ret;
457 }
458
459 /**
460  * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
461  *
462  * @res:            Pointer to a struct vmw_res embedded in a struct
463  *                  vmw_surface.
464  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
465  *                  information about the backup buffer.
466  * @bind:           Boolean wether to DMA to the surface.
467  *
468  * Transfer backup data to or from a legacy surface as part of the
469  * validation process.
470  * May return other errors if the kernel is out of guest resources.
471  * The backup buffer will be fenced or idle upon successful completion,
472  * and if the surface needs persistent backup storage, the backup buffer
473  * will also be returned reserved iff @bind is true.
474  */
475 static int vmw_legacy_srf_dma(struct vmw_resource *res,
476                               struct ttm_validate_buffer *val_buf,
477                               bool bind)
478 {
479         SVGAGuestPtr ptr;
480         struct vmw_fence_obj *fence;
481         uint32_t submit_size;
482         struct vmw_surface *srf = vmw_res_to_srf(res);
483         uint8_t *cmd;
484         struct vmw_private *dev_priv = res->dev_priv;
485
486         BUG_ON(!val_buf->bo);
487         submit_size = vmw_surface_dma_size(srf);
488         cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
489         if (unlikely(!cmd))
490                 return -ENOMEM;
491
492         vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
493         vmw_surface_dma_encode(srf, cmd, &ptr, bind);
494
495         vmw_cmd_commit(dev_priv, submit_size);
496
497         /*
498          * Create a fence object and fence the backup buffer.
499          */
500
501         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
502                                           &fence, NULL);
503
504         vmw_bo_fence_single(val_buf->bo, fence);
505
506         if (likely(fence != NULL))
507                 vmw_fence_obj_unreference(&fence);
508
509         return 0;
510 }
511
512 /**
513  * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
514  *                       surface validation process.
515  *
516  * @res:            Pointer to a struct vmw_res embedded in a struct
517  *                  vmw_surface.
518  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
519  *                  information about the backup buffer.
520  *
521  * This function will copy backup data to the surface if the
522  * backup buffer is dirty.
523  */
524 static int vmw_legacy_srf_bind(struct vmw_resource *res,
525                                struct ttm_validate_buffer *val_buf)
526 {
527         if (!res->guest_memory_dirty)
528                 return 0;
529
530         return vmw_legacy_srf_dma(res, val_buf, true);
531 }
532
533
534 /**
535  * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
536  *                         surface eviction process.
537  *
538  * @res:            Pointer to a struct vmw_res embedded in a struct
539  *                  vmw_surface.
540  * @readback:       Readback - only true if dirty
541  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
542  *                  information about the backup buffer.
543  *
544  * This function will copy backup data from the surface.
545  */
546 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
547                                  bool readback,
548                                  struct ttm_validate_buffer *val_buf)
549 {
550         if (unlikely(readback))
551                 return vmw_legacy_srf_dma(res, val_buf, false);
552         return 0;
553 }
554
555 /**
556  * vmw_legacy_srf_destroy - Destroy a device surface as part of a
557  *                          resource eviction process.
558  *
559  * @res:            Pointer to a struct vmw_res embedded in a struct
560  *                  vmw_surface.
561  */
562 static int vmw_legacy_srf_destroy(struct vmw_resource *res)
563 {
564         struct vmw_private *dev_priv = res->dev_priv;
565         uint32_t submit_size;
566         uint8_t *cmd;
567
568         BUG_ON(res->id == -1);
569
570         /*
571          * Encode the dma- and surface destroy commands.
572          */
573
574         submit_size = vmw_surface_destroy_size();
575         cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
576         if (unlikely(!cmd))
577                 return -ENOMEM;
578
579         vmw_surface_destroy_encode(res->id, cmd);
580         vmw_cmd_commit(dev_priv, submit_size);
581
582         /*
583          * Surface memory usage accounting.
584          */
585
586         dev_priv->used_memory_size -= res->guest_memory_size;
587
588         /*
589          * Release the surface ID.
590          */
591
592         vmw_resource_release_id(res);
593         vmw_fifo_resource_dec(dev_priv);
594
595         return 0;
596 }
597
598
599 /**
600  * vmw_surface_init - initialize a struct vmw_surface
601  *
602  * @dev_priv:       Pointer to a device private struct.
603  * @srf:            Pointer to the struct vmw_surface to initialize.
604  * @res_free:       Pointer to a resource destructor used to free
605  *                  the object.
606  */
607 static int vmw_surface_init(struct vmw_private *dev_priv,
608                             struct vmw_surface *srf,
609                             void (*res_free) (struct vmw_resource *res))
610 {
611         int ret;
612         struct vmw_resource *res = &srf->res;
613
614         BUG_ON(!res_free);
615         ret = vmw_resource_init(dev_priv, res, true, res_free,
616                                 (dev_priv->has_mob) ? &vmw_gb_surface_func :
617                                 &vmw_legacy_surface_func);
618
619         if (unlikely(ret != 0)) {
620                 res_free(res);
621                 return ret;
622         }
623
624         /*
625          * The surface won't be visible to hardware until a
626          * surface validate.
627          */
628
629         INIT_LIST_HEAD(&srf->view_list);
630         res->hw_destroy = vmw_hw_surface_destroy;
631         return ret;
632 }
633
634 /**
635  * vmw_user_surface_base_to_res - TTM base object to resource converter for
636  *                                user visible surfaces
637  *
638  * @base:           Pointer to a TTM base object
639  *
640  * Returns the struct vmw_resource embedded in a struct vmw_surface
641  * for the user-visible object identified by the TTM base object @base.
642  */
643 static struct vmw_resource *
644 vmw_user_surface_base_to_res(struct ttm_base_object *base)
645 {
646         return &(container_of(base, struct vmw_user_surface,
647                               prime.base)->srf.res);
648 }
649
650 /**
651  * vmw_user_surface_free - User visible surface resource destructor
652  *
653  * @res:            A struct vmw_resource embedded in a struct vmw_surface.
654  */
655 static void vmw_user_surface_free(struct vmw_resource *res)
656 {
657         struct vmw_surface *srf = vmw_res_to_srf(res);
658         struct vmw_user_surface *user_srf =
659             container_of(srf, struct vmw_user_surface, srf);
660
661         WARN_ON_ONCE(res->dirty);
662         if (user_srf->master)
663                 drm_master_put(&user_srf->master);
664         kfree(srf->offsets);
665         kfree(srf->metadata.sizes);
666         kfree(srf->snooper.image);
667         ttm_prime_object_kfree(user_srf, prime);
668 }
669
670 /**
671  * vmw_user_surface_base_release - User visible surface TTM base object destructor
672  *
673  * @p_base:         Pointer to a pointer to a TTM base object
674  *                  embedded in a struct vmw_user_surface.
675  *
676  * Drops the base object's reference on its resource, and the
677  * pointer pointed to by *p_base is set to NULL.
678  */
679 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
680 {
681         struct ttm_base_object *base = *p_base;
682         struct vmw_user_surface *user_srf =
683             container_of(base, struct vmw_user_surface, prime.base);
684         struct vmw_resource *res = &user_srf->srf.res;
685
686         *p_base = NULL;
687
688         /*
689          * Dumb buffers own the resource and they'll unref the
690          * resource themselves
691          */
692         if (res && res->guest_memory_bo && res->guest_memory_bo->is_dumb)
693                 return;
694
695         vmw_resource_unreference(&res);
696 }
697
698 /**
699  * vmw_surface_destroy_ioctl - Ioctl function implementing
700  *                                  the user surface destroy functionality.
701  *
702  * @dev:            Pointer to a struct drm_device.
703  * @data:           Pointer to data copied from / to user-space.
704  * @file_priv:      Pointer to a drm file private structure.
705  */
706 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
707                               struct drm_file *file_priv)
708 {
709         struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
710         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
711
712         return ttm_ref_object_base_unref(tfile, arg->sid);
713 }
714
715 /**
716  * vmw_surface_define_ioctl - Ioctl function implementing
717  *                                  the user surface define functionality.
718  *
719  * @dev:            Pointer to a struct drm_device.
720  * @data:           Pointer to data copied from / to user-space.
721  * @file_priv:      Pointer to a drm file private structure.
722  */
723 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
724                              struct drm_file *file_priv)
725 {
726         struct vmw_private *dev_priv = vmw_priv(dev);
727         struct vmw_user_surface *user_srf;
728         struct vmw_surface *srf;
729         struct vmw_surface_metadata *metadata;
730         struct vmw_resource *res;
731         struct vmw_resource *tmp;
732         union drm_vmw_surface_create_arg *arg =
733             (union drm_vmw_surface_create_arg *)data;
734         struct drm_vmw_surface_create_req *req = &arg->req;
735         struct drm_vmw_surface_arg *rep = &arg->rep;
736         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
737         int ret;
738         int i, j;
739         uint32_t cur_bo_offset;
740         struct drm_vmw_size *cur_size;
741         struct vmw_surface_offset *cur_offset;
742         uint32_t num_sizes;
743         const SVGA3dSurfaceDesc *desc;
744
745         num_sizes = 0;
746         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
747                 if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
748                         return -EINVAL;
749                 num_sizes += req->mip_levels[i];
750         }
751
752         if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
753             num_sizes == 0)
754                 return -EINVAL;
755
756         desc = vmw_surface_get_desc(req->format);
757         if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) {
758                 VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
759                                req->format);
760                 return -EINVAL;
761         }
762
763         user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
764         if (unlikely(!user_srf)) {
765                 ret = -ENOMEM;
766                 goto out_unlock;
767         }
768
769         srf = &user_srf->srf;
770         metadata = &srf->metadata;
771         res = &srf->res;
772
773         /* Driver internally stores as 64-bit flags */
774         metadata->flags = (SVGA3dSurfaceAllFlags)req->flags;
775         metadata->format = req->format;
776         metadata->scanout = req->scanout;
777
778         memcpy(metadata->mip_levels, req->mip_levels,
779                sizeof(metadata->mip_levels));
780         metadata->num_sizes = num_sizes;
781         metadata->sizes =
782                 memdup_array_user((struct drm_vmw_size __user *)(unsigned long)
783                             req->size_addr,
784                             metadata->num_sizes, sizeof(*metadata->sizes));
785         if (IS_ERR(metadata->sizes)) {
786                 ret = PTR_ERR(metadata->sizes);
787                 goto out_no_sizes;
788         }
789         srf->offsets = kmalloc_array(metadata->num_sizes, sizeof(*srf->offsets),
790                                      GFP_KERNEL);
791         if (unlikely(!srf->offsets)) {
792                 ret = -ENOMEM;
793                 goto out_no_offsets;
794         }
795
796         metadata->base_size = *srf->metadata.sizes;
797         metadata->autogen_filter = SVGA3D_TEX_FILTER_NONE;
798         metadata->multisample_count = 0;
799         metadata->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
800         metadata->quality_level = SVGA3D_MS_QUALITY_NONE;
801
802         cur_bo_offset = 0;
803         cur_offset = srf->offsets;
804         cur_size = metadata->sizes;
805
806         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
807                 for (j = 0; j < metadata->mip_levels[i]; ++j) {
808                         uint32_t stride = vmw_surface_calculate_pitch(
809                                                   desc, cur_size);
810
811                         cur_offset->face = i;
812                         cur_offset->mip = j;
813                         cur_offset->bo_offset = cur_bo_offset;
814                         cur_bo_offset += vmw_surface_get_image_buffer_size
815                                 (desc, cur_size, stride);
816                         ++cur_offset;
817                         ++cur_size;
818                 }
819         }
820         res->guest_memory_size = cur_bo_offset;
821         if (!file_priv->atomic &&
822             metadata->scanout &&
823             metadata->num_sizes == 1 &&
824             metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
825             metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
826             metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
827                 const struct SVGA3dSurfaceDesc *desc =
828                         vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
829                 const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
830                                               VMW_CURSOR_SNOOP_HEIGHT *
831                                               desc->pitchBytesPerBlock;
832                 srf->snooper.image = kzalloc(cursor_size_bytes, GFP_KERNEL);
833                 if (!srf->snooper.image) {
834                         DRM_ERROR("Failed to allocate cursor_image\n");
835                         ret = -ENOMEM;
836                         goto out_no_copy;
837                 }
838         } else {
839                 srf->snooper.image = NULL;
840         }
841
842         if (drm_is_primary_client(file_priv))
843                 user_srf->master = drm_file_get_master(file_priv);
844
845         /**
846          * From this point, the generic resource management functions
847          * destroy the object on failure.
848          */
849
850         ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
851         if (unlikely(ret != 0))
852                 goto out_unlock;
853
854         /*
855          * A gb-aware client referencing a surface will expect a backup
856          * buffer to be present.
857          */
858         if (dev_priv->has_mob) {
859                 struct vmw_bo_params params = {
860                         .domain = VMW_BO_DOMAIN_SYS,
861                         .busy_domain = VMW_BO_DOMAIN_SYS,
862                         .bo_type = ttm_bo_type_device,
863                         .size = res->guest_memory_size,
864                         .pin = false
865                 };
866
867                 ret = vmw_gem_object_create(dev_priv,
868                                             &params,
869                                             &res->guest_memory_bo);
870                 if (unlikely(ret != 0)) {
871                         vmw_resource_unreference(&res);
872                         goto out_unlock;
873                 }
874                 vmw_bo_add_detached_resource(res->guest_memory_bo, res);
875         }
876
877         tmp = vmw_resource_reference(&srf->res);
878         ret = ttm_prime_object_init(tfile, res->guest_memory_size,
879                                     &user_srf->prime,
880                                     VMW_RES_SURFACE,
881                                     &vmw_user_surface_base_release);
882
883         if (unlikely(ret != 0)) {
884                 vmw_resource_unreference(&tmp);
885                 vmw_resource_unreference(&res);
886                 goto out_unlock;
887         }
888
889         rep->sid = user_srf->prime.base.handle;
890         vmw_resource_unreference(&res);
891
892         return 0;
893 out_no_copy:
894         kfree(srf->offsets);
895 out_no_offsets:
896         kfree(metadata->sizes);
897 out_no_sizes:
898         ttm_prime_object_kfree(user_srf, prime);
899 out_unlock:
900         return ret;
901 }
902
903 static struct vmw_user_surface *
904 vmw_lookup_user_surface_for_buffer(struct vmw_private *vmw, struct vmw_bo *bo,
905                                    u32 handle)
906 {
907         struct vmw_user_surface *user_srf = NULL;
908         struct vmw_surface *surf;
909         struct ttm_base_object *base;
910
911         surf = vmw_bo_surface(bo);
912         if (surf) {
913                 rcu_read_lock();
914                 user_srf = container_of(surf, struct vmw_user_surface, srf);
915                 base = &user_srf->prime.base;
916                 if (base && !kref_get_unless_zero(&base->refcount)) {
917                         drm_dbg_driver(&vmw->drm,
918                                        "%s: referencing a stale surface handle %d\n",
919                                         __func__, handle);
920                         base = NULL;
921                         user_srf = NULL;
922                 }
923                 rcu_read_unlock();
924         }
925
926         return user_srf;
927 }
928
929 struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw,
930                                                   struct vmw_bo *bo,
931                                                   u32 handle)
932 {
933         struct vmw_user_surface *user_srf =
934                 vmw_lookup_user_surface_for_buffer(vmw, bo, handle);
935         struct vmw_surface *surf = NULL;
936         struct ttm_base_object *base;
937
938         if (user_srf) {
939                 surf = vmw_surface_reference(&user_srf->srf);
940                 base = &user_srf->prime.base;
941                 ttm_base_object_unref(&base);
942         }
943         return surf;
944 }
945
946 u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw,
947                                          struct vmw_bo *bo,
948                                          u32 handle)
949 {
950         struct vmw_user_surface *user_srf =
951                 vmw_lookup_user_surface_for_buffer(vmw, bo, handle);
952         int surf_handle = 0;
953         struct ttm_base_object *base;
954
955         if (user_srf) {
956                 base = &user_srf->prime.base;
957                 surf_handle = (u32)base->handle;
958                 ttm_base_object_unref(&base);
959         }
960         return surf_handle;
961 }
962
963 static int vmw_buffer_prime_to_surface_base(struct vmw_private *dev_priv,
964                                             struct drm_file *file_priv,
965                                             u32 fd, u32 *handle,
966                                             struct ttm_base_object **base_p)
967 {
968         struct ttm_base_object *base;
969         struct vmw_bo *bo;
970         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
971         struct vmw_user_surface *user_srf;
972         int ret;
973
974         ret = drm_gem_prime_fd_to_handle(&dev_priv->drm, file_priv, fd, handle);
975         if (ret) {
976                 drm_warn(&dev_priv->drm,
977                          "Wasn't able to find user buffer for fd = %u.\n", fd);
978                 return ret;
979         }
980
981         ret = vmw_user_bo_lookup(file_priv, *handle, &bo);
982         if (ret) {
983                 drm_warn(&dev_priv->drm,
984                          "Wasn't able to lookup user buffer for handle = %u.\n", *handle);
985                 return ret;
986         }
987
988         user_srf = vmw_lookup_user_surface_for_buffer(dev_priv, bo, *handle);
989         if (WARN_ON(!user_srf)) {
990                 drm_warn(&dev_priv->drm,
991                          "User surface fd %d (handle %d) is null.\n", fd, *handle);
992                 ret = -EINVAL;
993                 goto out;
994         }
995
996         base = &user_srf->prime.base;
997         ret = ttm_ref_object_add(tfile, base, NULL, false);
998         if (ret) {
999                 drm_warn(&dev_priv->drm,
1000                          "Couldn't add an object ref for the buffer (%d).\n", *handle);
1001                 goto out;
1002         }
1003
1004         *base_p = base;
1005 out:
1006         vmw_user_bo_unref(&bo);
1007
1008         return ret;
1009 }
1010
1011 static int
1012 vmw_surface_handle_reference(struct vmw_private *dev_priv,
1013                              struct drm_file *file_priv,
1014                              uint32_t u_handle,
1015                              enum drm_vmw_handle_type handle_type,
1016                              struct ttm_base_object **base_p)
1017 {
1018         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1019         struct vmw_user_surface *user_srf = NULL;
1020         uint32_t handle;
1021         struct ttm_base_object *base;
1022         int ret;
1023
1024         if (handle_type == DRM_VMW_HANDLE_PRIME) {
1025                 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
1026                 if (ret)
1027                         return vmw_buffer_prime_to_surface_base(dev_priv,
1028                                                                 file_priv,
1029                                                                 u_handle,
1030                                                                 &handle,
1031                                                                 base_p);
1032         } else {
1033                 handle = u_handle;
1034         }
1035
1036         ret = -EINVAL;
1037         base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
1038         if (unlikely(!base)) {
1039                 VMW_DEBUG_USER("Could not find surface to reference.\n");
1040                 goto out_no_lookup;
1041         }
1042
1043         if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
1044                 VMW_DEBUG_USER("Referenced object is not a surface.\n");
1045                 goto out_bad_resource;
1046         }
1047         if (handle_type != DRM_VMW_HANDLE_PRIME) {
1048                 bool require_exist = false;
1049
1050                 user_srf = container_of(base, struct vmw_user_surface,
1051                                         prime.base);
1052
1053                 /* Error out if we are unauthenticated primary */
1054                 if (drm_is_primary_client(file_priv) &&
1055                     !file_priv->authenticated) {
1056                         ret = -EACCES;
1057                         goto out_bad_resource;
1058                 }
1059
1060                 /*
1061                  * Make sure the surface creator has the same
1062                  * authenticating master, or is already registered with us.
1063                  */
1064                 if (drm_is_primary_client(file_priv) &&
1065                     user_srf->master != file_priv->master)
1066                         require_exist = true;
1067
1068                 if (unlikely(drm_is_render_client(file_priv)))
1069                         require_exist = true;
1070
1071                 ret = ttm_ref_object_add(tfile, base, NULL, require_exist);
1072                 if (unlikely(ret != 0)) {
1073                         DRM_ERROR("Could not add a reference to a surface.\n");
1074                         goto out_bad_resource;
1075                 }
1076         }
1077
1078         *base_p = base;
1079         return 0;
1080
1081 out_bad_resource:
1082         ttm_base_object_unref(&base);
1083 out_no_lookup:
1084         if (handle_type == DRM_VMW_HANDLE_PRIME)
1085                 (void) ttm_ref_object_base_unref(tfile, handle);
1086
1087         return ret;
1088 }
1089
1090 /**
1091  * vmw_surface_reference_ioctl - Ioctl function implementing
1092  *                                  the user surface reference functionality.
1093  *
1094  * @dev:            Pointer to a struct drm_device.
1095  * @data:           Pointer to data copied from / to user-space.
1096  * @file_priv:      Pointer to a drm file private structure.
1097  */
1098 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1099                                 struct drm_file *file_priv)
1100 {
1101         struct vmw_private *dev_priv = vmw_priv(dev);
1102         union drm_vmw_surface_reference_arg *arg =
1103             (union drm_vmw_surface_reference_arg *)data;
1104         struct drm_vmw_surface_arg *req = &arg->req;
1105         struct drm_vmw_surface_create_req *rep = &arg->rep;
1106         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1107         struct vmw_surface *srf;
1108         struct vmw_user_surface *user_srf;
1109         struct drm_vmw_size __user *user_sizes;
1110         struct ttm_base_object *base;
1111         int ret;
1112
1113         ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
1114                                            req->handle_type, &base);
1115         if (unlikely(ret != 0))
1116                 return ret;
1117
1118         user_srf = container_of(base, struct vmw_user_surface, prime.base);
1119         srf = &user_srf->srf;
1120
1121         /* Downcast of flags when sending back to user space */
1122         rep->flags = (uint32_t)srf->metadata.flags;
1123         rep->format = srf->metadata.format;
1124         memcpy(rep->mip_levels, srf->metadata.mip_levels,
1125                sizeof(srf->metadata.mip_levels));
1126         user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1127             rep->size_addr;
1128
1129         if (user_sizes)
1130                 ret = copy_to_user(user_sizes, &srf->metadata.base_size,
1131                                    sizeof(srf->metadata.base_size));
1132         if (unlikely(ret != 0)) {
1133                 VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
1134                                srf->metadata.num_sizes);
1135                 ttm_ref_object_base_unref(tfile, base->handle);
1136                 ret = -EFAULT;
1137         }
1138
1139         ttm_base_object_unref(&base);
1140
1141         return ret;
1142 }
1143
1144 /**
1145  * vmw_gb_surface_create - Encode a surface_define command.
1146  *
1147  * @res:        Pointer to a struct vmw_resource embedded in a struct
1148  *              vmw_surface.
1149  */
1150 static int vmw_gb_surface_create(struct vmw_resource *res)
1151 {
1152         struct vmw_private *dev_priv = res->dev_priv;
1153         struct vmw_surface *srf = vmw_res_to_srf(res);
1154         struct vmw_surface_metadata *metadata = &srf->metadata;
1155         uint32_t cmd_len, cmd_id, submit_len;
1156         int ret;
1157         struct {
1158                 SVGA3dCmdHeader header;
1159                 SVGA3dCmdDefineGBSurface body;
1160         } *cmd;
1161         struct {
1162                 SVGA3dCmdHeader header;
1163                 SVGA3dCmdDefineGBSurface_v2 body;
1164         } *cmd2;
1165         struct {
1166                 SVGA3dCmdHeader header;
1167                 SVGA3dCmdDefineGBSurface_v3 body;
1168         } *cmd3;
1169         struct {
1170                 SVGA3dCmdHeader header;
1171                 SVGA3dCmdDefineGBSurface_v4 body;
1172         } *cmd4;
1173
1174         if (likely(res->id != -1))
1175                 return 0;
1176
1177         vmw_fifo_resource_inc(dev_priv);
1178         ret = vmw_resource_alloc_id(res);
1179         if (unlikely(ret != 0)) {
1180                 DRM_ERROR("Failed to allocate a surface id.\n");
1181                 goto out_no_id;
1182         }
1183
1184         if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
1185                 ret = -EBUSY;
1186                 goto out_no_fifo;
1187         }
1188
1189         if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
1190                 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V4;
1191                 cmd_len = sizeof(cmd4->body);
1192                 submit_len = sizeof(*cmd4);
1193         } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
1194                 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
1195                 cmd_len = sizeof(cmd3->body);
1196                 submit_len = sizeof(*cmd3);
1197         } else if (metadata->array_size > 0) {
1198                 /* VMW_SM_4 support verified at creation time. */
1199                 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
1200                 cmd_len = sizeof(cmd2->body);
1201                 submit_len = sizeof(*cmd2);
1202         } else {
1203                 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
1204                 cmd_len = sizeof(cmd->body);
1205                 submit_len = sizeof(*cmd);
1206         }
1207
1208         cmd = VMW_CMD_RESERVE(dev_priv, submit_len);
1209         cmd2 = (typeof(cmd2))cmd;
1210         cmd3 = (typeof(cmd3))cmd;
1211         cmd4 = (typeof(cmd4))cmd;
1212         if (unlikely(!cmd)) {
1213                 ret = -ENOMEM;
1214                 goto out_no_fifo;
1215         }
1216
1217         if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
1218                 cmd4->header.id = cmd_id;
1219                 cmd4->header.size = cmd_len;
1220                 cmd4->body.sid = srf->res.id;
1221                 cmd4->body.surfaceFlags = metadata->flags;
1222                 cmd4->body.format = metadata->format;
1223                 cmd4->body.numMipLevels = metadata->mip_levels[0];
1224                 cmd4->body.multisampleCount = metadata->multisample_count;
1225                 cmd4->body.multisamplePattern = metadata->multisample_pattern;
1226                 cmd4->body.qualityLevel = metadata->quality_level;
1227                 cmd4->body.autogenFilter = metadata->autogen_filter;
1228                 cmd4->body.size.width = metadata->base_size.width;
1229                 cmd4->body.size.height = metadata->base_size.height;
1230                 cmd4->body.size.depth = metadata->base_size.depth;
1231                 cmd4->body.arraySize = metadata->array_size;
1232                 cmd4->body.bufferByteStride = metadata->buffer_byte_stride;
1233         } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
1234                 cmd3->header.id = cmd_id;
1235                 cmd3->header.size = cmd_len;
1236                 cmd3->body.sid = srf->res.id;
1237                 cmd3->body.surfaceFlags = metadata->flags;
1238                 cmd3->body.format = metadata->format;
1239                 cmd3->body.numMipLevels = metadata->mip_levels[0];
1240                 cmd3->body.multisampleCount = metadata->multisample_count;
1241                 cmd3->body.multisamplePattern = metadata->multisample_pattern;
1242                 cmd3->body.qualityLevel = metadata->quality_level;
1243                 cmd3->body.autogenFilter = metadata->autogen_filter;
1244                 cmd3->body.size.width = metadata->base_size.width;
1245                 cmd3->body.size.height = metadata->base_size.height;
1246                 cmd3->body.size.depth = metadata->base_size.depth;
1247                 cmd3->body.arraySize = metadata->array_size;
1248         } else if (metadata->array_size > 0) {
1249                 cmd2->header.id = cmd_id;
1250                 cmd2->header.size = cmd_len;
1251                 cmd2->body.sid = srf->res.id;
1252                 cmd2->body.surfaceFlags = metadata->flags;
1253                 cmd2->body.format = metadata->format;
1254                 cmd2->body.numMipLevels = metadata->mip_levels[0];
1255                 cmd2->body.multisampleCount = metadata->multisample_count;
1256                 cmd2->body.autogenFilter = metadata->autogen_filter;
1257                 cmd2->body.size.width = metadata->base_size.width;
1258                 cmd2->body.size.height = metadata->base_size.height;
1259                 cmd2->body.size.depth = metadata->base_size.depth;
1260                 cmd2->body.arraySize = metadata->array_size;
1261         } else {
1262                 cmd->header.id = cmd_id;
1263                 cmd->header.size = cmd_len;
1264                 cmd->body.sid = srf->res.id;
1265                 cmd->body.surfaceFlags = metadata->flags;
1266                 cmd->body.format = metadata->format;
1267                 cmd->body.numMipLevels = metadata->mip_levels[0];
1268                 cmd->body.multisampleCount = metadata->multisample_count;
1269                 cmd->body.autogenFilter = metadata->autogen_filter;
1270                 cmd->body.size.width = metadata->base_size.width;
1271                 cmd->body.size.height = metadata->base_size.height;
1272                 cmd->body.size.depth = metadata->base_size.depth;
1273         }
1274
1275         vmw_cmd_commit(dev_priv, submit_len);
1276
1277         return 0;
1278
1279 out_no_fifo:
1280         vmw_resource_release_id(res);
1281 out_no_id:
1282         vmw_fifo_resource_dec(dev_priv);
1283         return ret;
1284 }
1285
1286
1287 static int vmw_gb_surface_bind(struct vmw_resource *res,
1288                                struct ttm_validate_buffer *val_buf)
1289 {
1290         struct vmw_private *dev_priv = res->dev_priv;
1291         struct {
1292                 SVGA3dCmdHeader header;
1293                 SVGA3dCmdBindGBSurface body;
1294         } *cmd1;
1295         struct {
1296                 SVGA3dCmdHeader header;
1297                 SVGA3dCmdUpdateGBSurface body;
1298         } *cmd2;
1299         uint32_t submit_size;
1300         struct ttm_buffer_object *bo = val_buf->bo;
1301
1302         BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
1303
1304         submit_size = sizeof(*cmd1) + (res->guest_memory_dirty ? sizeof(*cmd2) : 0);
1305
1306         cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
1307         if (unlikely(!cmd1))
1308                 return -ENOMEM;
1309
1310         cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1311         cmd1->header.size = sizeof(cmd1->body);
1312         cmd1->body.sid = res->id;
1313         cmd1->body.mobid = bo->resource->start;
1314         if (res->guest_memory_dirty) {
1315                 cmd2 = (void *) &cmd1[1];
1316                 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
1317                 cmd2->header.size = sizeof(cmd2->body);
1318                 cmd2->body.sid = res->id;
1319         }
1320         vmw_cmd_commit(dev_priv, submit_size);
1321
1322         if (res->guest_memory_bo->dirty && res->guest_memory_dirty) {
1323                 /* We've just made a full upload. Cear dirty regions. */
1324                 vmw_bo_dirty_clear_res(res);
1325         }
1326
1327         res->guest_memory_dirty = false;
1328
1329         return 0;
1330 }
1331
1332 static int vmw_gb_surface_unbind(struct vmw_resource *res,
1333                                  bool readback,
1334                                  struct ttm_validate_buffer *val_buf)
1335 {
1336         struct vmw_private *dev_priv = res->dev_priv;
1337         struct ttm_buffer_object *bo = val_buf->bo;
1338         struct vmw_fence_obj *fence;
1339
1340         struct {
1341                 SVGA3dCmdHeader header;
1342                 SVGA3dCmdReadbackGBSurface body;
1343         } *cmd1;
1344         struct {
1345                 SVGA3dCmdHeader header;
1346                 SVGA3dCmdInvalidateGBSurface body;
1347         } *cmd2;
1348         struct {
1349                 SVGA3dCmdHeader header;
1350                 SVGA3dCmdBindGBSurface body;
1351         } *cmd3;
1352         uint32_t submit_size;
1353         uint8_t *cmd;
1354
1355
1356         BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
1357
1358         submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1359         cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
1360         if (unlikely(!cmd))
1361                 return -ENOMEM;
1362
1363         if (readback) {
1364                 cmd1 = (void *) cmd;
1365                 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1366                 cmd1->header.size = sizeof(cmd1->body);
1367                 cmd1->body.sid = res->id;
1368                 cmd3 = (void *) &cmd1[1];
1369         } else {
1370                 cmd2 = (void *) cmd;
1371                 cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
1372                 cmd2->header.size = sizeof(cmd2->body);
1373                 cmd2->body.sid = res->id;
1374                 cmd3 = (void *) &cmd2[1];
1375         }
1376
1377         cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1378         cmd3->header.size = sizeof(cmd3->body);
1379         cmd3->body.sid = res->id;
1380         cmd3->body.mobid = SVGA3D_INVALID_ID;
1381
1382         vmw_cmd_commit(dev_priv, submit_size);
1383
1384         /*
1385          * Create a fence object and fence the backup buffer.
1386          */
1387
1388         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
1389                                           &fence, NULL);
1390
1391         vmw_bo_fence_single(val_buf->bo, fence);
1392
1393         if (likely(fence != NULL))
1394                 vmw_fence_obj_unreference(&fence);
1395
1396         return 0;
1397 }
1398
1399 static int vmw_gb_surface_destroy(struct vmw_resource *res)
1400 {
1401         struct vmw_private *dev_priv = res->dev_priv;
1402         struct vmw_surface *srf = vmw_res_to_srf(res);
1403         struct {
1404                 SVGA3dCmdHeader header;
1405                 SVGA3dCmdDestroyGBSurface body;
1406         } *cmd;
1407
1408         if (likely(res->id == -1))
1409                 return 0;
1410
1411         mutex_lock(&dev_priv->binding_mutex);
1412         vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
1413         vmw_binding_res_list_scrub(&res->binding_head);
1414
1415         cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
1416         if (unlikely(!cmd)) {
1417                 mutex_unlock(&dev_priv->binding_mutex);
1418                 return -ENOMEM;
1419         }
1420
1421         cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
1422         cmd->header.size = sizeof(cmd->body);
1423         cmd->body.sid = res->id;
1424         vmw_cmd_commit(dev_priv, sizeof(*cmd));
1425         mutex_unlock(&dev_priv->binding_mutex);
1426         vmw_resource_release_id(res);
1427         vmw_fifo_resource_dec(dev_priv);
1428
1429         return 0;
1430 }
1431
1432 /**
1433  * vmw_gb_surface_define_ioctl - Ioctl function implementing
1434  * the user surface define functionality.
1435  *
1436  * @dev: Pointer to a struct drm_device.
1437  * @data: Pointer to data copied from / to user-space.
1438  * @file_priv: Pointer to a drm file private structure.
1439  */
1440 int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1441                                 struct drm_file *file_priv)
1442 {
1443         union drm_vmw_gb_surface_create_arg *arg =
1444             (union drm_vmw_gb_surface_create_arg *)data;
1445         struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1446         struct drm_vmw_gb_surface_create_ext_req req_ext;
1447
1448         req_ext.base = arg->req;
1449         req_ext.version = drm_vmw_gb_surface_v1;
1450         req_ext.svga3d_flags_upper_32_bits = 0;
1451         req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
1452         req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
1453         req_ext.buffer_byte_stride = 0;
1454         req_ext.must_be_zero = 0;
1455
1456         return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
1457 }
1458
1459 /**
1460  * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1461  * the user surface reference functionality.
1462  *
1463  * @dev: Pointer to a struct drm_device.
1464  * @data: Pointer to data copied from / to user-space.
1465  * @file_priv: Pointer to a drm file private structure.
1466  */
1467 int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1468                                    struct drm_file *file_priv)
1469 {
1470         union drm_vmw_gb_surface_reference_arg *arg =
1471             (union drm_vmw_gb_surface_reference_arg *)data;
1472         struct drm_vmw_surface_arg *req = &arg->req;
1473         struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
1474         struct drm_vmw_gb_surface_ref_ext_rep rep_ext;
1475         int ret;
1476
1477         ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv);
1478
1479         if (unlikely(ret != 0))
1480                 return ret;
1481
1482         rep->creq = rep_ext.creq.base;
1483         rep->crep = rep_ext.crep;
1484
1485         return ret;
1486 }
1487
1488 /**
1489  * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
1490  * the user surface define functionality.
1491  *
1492  * @dev: Pointer to a struct drm_device.
1493  * @data: Pointer to data copied from / to user-space.
1494  * @file_priv: Pointer to a drm file private structure.
1495  */
1496 int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data,
1497                                 struct drm_file *file_priv)
1498 {
1499         union drm_vmw_gb_surface_create_ext_arg *arg =
1500             (union drm_vmw_gb_surface_create_ext_arg *)data;
1501         struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
1502         struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1503
1504         return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
1505 }
1506
1507 /**
1508  * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
1509  * the user surface reference functionality.
1510  *
1511  * @dev: Pointer to a struct drm_device.
1512  * @data: Pointer to data copied from / to user-space.
1513  * @file_priv: Pointer to a drm file private structure.
1514  */
1515 int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data,
1516                                    struct drm_file *file_priv)
1517 {
1518         union drm_vmw_gb_surface_reference_ext_arg *arg =
1519             (union drm_vmw_gb_surface_reference_ext_arg *)data;
1520         struct drm_vmw_surface_arg *req = &arg->req;
1521         struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep;
1522
1523         return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
1524 }
1525
1526 /**
1527  * vmw_gb_surface_define_internal - Ioctl function implementing
1528  * the user surface define functionality.
1529  *
1530  * @dev: Pointer to a struct drm_device.
1531  * @req: Request argument from user-space.
1532  * @rep: Response argument to user-space.
1533  * @file_priv: Pointer to a drm file private structure.
1534  */
1535 static int
1536 vmw_gb_surface_define_internal(struct drm_device *dev,
1537                                struct drm_vmw_gb_surface_create_ext_req *req,
1538                                struct drm_vmw_gb_surface_create_rep *rep,
1539                                struct drm_file *file_priv)
1540 {
1541         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1542         struct vmw_private *dev_priv = vmw_priv(dev);
1543         struct vmw_user_surface *user_srf;
1544         struct vmw_surface_metadata metadata = {0};
1545         struct vmw_surface *srf;
1546         struct vmw_resource *res;
1547         struct vmw_resource *tmp;
1548         int ret = 0;
1549         uint32_t backup_handle = 0;
1550         SVGA3dSurfaceAllFlags svga3d_flags_64 =
1551                 SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
1552                                 req->base.svga3d_flags);
1553
1554         /* array_size must be null for non-GL3 host. */
1555         if (req->base.array_size > 0 && !has_sm4_context(dev_priv)) {
1556                 VMW_DEBUG_USER("SM4 surface not supported.\n");
1557                 return -EINVAL;
1558         }
1559
1560         if (!has_sm4_1_context(dev_priv)) {
1561                 if (req->svga3d_flags_upper_32_bits != 0)
1562                         ret = -EINVAL;
1563
1564                 if (req->base.multisample_count != 0)
1565                         ret = -EINVAL;
1566
1567                 if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
1568                         ret = -EINVAL;
1569
1570                 if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
1571                         ret = -EINVAL;
1572
1573                 if (ret) {
1574                         VMW_DEBUG_USER("SM4.1 surface not supported.\n");
1575                         return ret;
1576                 }
1577         }
1578
1579         if (req->buffer_byte_stride > 0 && !has_sm5_context(dev_priv)) {
1580                 VMW_DEBUG_USER("SM5 surface not supported.\n");
1581                 return -EINVAL;
1582         }
1583
1584         if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
1585             req->base.multisample_count == 0) {
1586                 VMW_DEBUG_USER("Invalid sample count.\n");
1587                 return -EINVAL;
1588         }
1589
1590         if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS) {
1591                 VMW_DEBUG_USER("Invalid mip level.\n");
1592                 return -EINVAL;
1593         }
1594
1595         metadata.flags = svga3d_flags_64;
1596         metadata.format = req->base.format;
1597         metadata.mip_levels[0] = req->base.mip_levels;
1598         metadata.multisample_count = req->base.multisample_count;
1599         metadata.multisample_pattern = req->multisample_pattern;
1600         metadata.quality_level = req->quality_level;
1601         metadata.array_size = req->base.array_size;
1602         metadata.buffer_byte_stride = req->buffer_byte_stride;
1603         metadata.num_sizes = 1;
1604         metadata.base_size = req->base.base_size;
1605         metadata.scanout = req->base.drm_surface_flags &
1606                 drm_vmw_surface_flag_scanout;
1607
1608         /* Define a surface based on the parameters. */
1609         ret = vmw_gb_surface_define(dev_priv, &metadata, &srf);
1610         if (ret != 0) {
1611                 VMW_DEBUG_USER("Failed to define surface.\n");
1612                 return ret;
1613         }
1614
1615         user_srf = container_of(srf, struct vmw_user_surface, srf);
1616         if (drm_is_primary_client(file_priv))
1617                 user_srf->master = drm_file_get_master(file_priv);
1618
1619         res = &user_srf->srf.res;
1620
1621         if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
1622                 ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
1623                                          &res->guest_memory_bo);
1624                 if (ret == 0) {
1625                         if (res->guest_memory_bo->is_dumb) {
1626                                 VMW_DEBUG_USER("Can't backup surface with a dumb buffer.\n");
1627                                 vmw_user_bo_unref(&res->guest_memory_bo);
1628                                 ret = -EINVAL;
1629                                 goto out_unlock;
1630                         } else if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
1631                                 VMW_DEBUG_USER("Surface backup buffer too small.\n");
1632                                 vmw_user_bo_unref(&res->guest_memory_bo);
1633                                 ret = -EINVAL;
1634                                 goto out_unlock;
1635                         } else {
1636                                 backup_handle = req->base.buffer_handle;
1637                         }
1638                 }
1639         } else if (req->base.drm_surface_flags &
1640                    (drm_vmw_surface_flag_create_buffer |
1641                     drm_vmw_surface_flag_coherent)) {
1642                 ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
1643                                                         res->guest_memory_size,
1644                                                         &backup_handle,
1645                                                         &res->guest_memory_bo);
1646         }
1647
1648         if (unlikely(ret != 0)) {
1649                 vmw_resource_unreference(&res);
1650                 goto out_unlock;
1651         }
1652
1653         if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) {
1654                 struct vmw_bo *backup = res->guest_memory_bo;
1655
1656                 ttm_bo_reserve(&backup->tbo, false, false, NULL);
1657                 if (!res->func->dirty_alloc)
1658                         ret = -EINVAL;
1659                 if (!ret)
1660                         ret = vmw_bo_dirty_add(backup);
1661                 if (!ret) {
1662                         res->coherent = true;
1663                         ret = res->func->dirty_alloc(res);
1664                 }
1665                 ttm_bo_unreserve(&backup->tbo);
1666                 if (ret) {
1667                         vmw_resource_unreference(&res);
1668                         goto out_unlock;
1669                 }
1670
1671         }
1672
1673         tmp = vmw_resource_reference(res);
1674         ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
1675                                     VMW_RES_SURFACE,
1676                                     &vmw_user_surface_base_release);
1677
1678         if (unlikely(ret != 0)) {
1679                 vmw_resource_unreference(&tmp);
1680                 vmw_resource_unreference(&res);
1681                 goto out_unlock;
1682         }
1683
1684         rep->handle      = user_srf->prime.base.handle;
1685         rep->backup_size = res->guest_memory_size;
1686         if (res->guest_memory_bo) {
1687                 vmw_bo_add_detached_resource(res->guest_memory_bo, res);
1688                 rep->buffer_map_handle =
1689                         drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node);
1690                 rep->buffer_size = res->guest_memory_bo->tbo.base.size;
1691                 rep->buffer_handle = backup_handle;
1692         } else {
1693                 rep->buffer_map_handle = 0;
1694                 rep->buffer_size = 0;
1695                 rep->buffer_handle = SVGA3D_INVALID_ID;
1696         }
1697         vmw_resource_unreference(&res);
1698
1699 out_unlock:
1700         return ret;
1701 }
1702
1703 /**
1704  * vmw_gb_surface_reference_internal - Ioctl function implementing
1705  * the user surface reference functionality.
1706  *
1707  * @dev: Pointer to a struct drm_device.
1708  * @req: Pointer to user-space request surface arg.
1709  * @rep: Pointer to response to user-space.
1710  * @file_priv: Pointer to a drm file private structure.
1711  */
1712 static int
1713 vmw_gb_surface_reference_internal(struct drm_device *dev,
1714                                   struct drm_vmw_surface_arg *req,
1715                                   struct drm_vmw_gb_surface_ref_ext_rep *rep,
1716                                   struct drm_file *file_priv)
1717 {
1718         struct vmw_private *dev_priv = vmw_priv(dev);
1719         struct vmw_surface *srf;
1720         struct vmw_user_surface *user_srf;
1721         struct vmw_surface_metadata *metadata;
1722         struct ttm_base_object *base;
1723         u32 backup_handle;
1724         int ret;
1725
1726         ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
1727                                            req->handle_type, &base);
1728         if (unlikely(ret != 0))
1729                 return ret;
1730
1731         user_srf = container_of(base, struct vmw_user_surface, prime.base);
1732         srf = &user_srf->srf;
1733         if (!srf->res.guest_memory_bo) {
1734                 DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1735                 goto out_bad_resource;
1736         }
1737         metadata = &srf->metadata;
1738
1739         mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
1740         ret = drm_gem_handle_create(file_priv, &srf->res.guest_memory_bo->tbo.base,
1741                                     &backup_handle);
1742         mutex_unlock(&dev_priv->cmdbuf_mutex);
1743         if (ret != 0) {
1744                 drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n",
1745                         req->sid);
1746                 goto out_bad_resource;
1747         }
1748
1749         rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(metadata->flags);
1750         rep->creq.base.format = metadata->format;
1751         rep->creq.base.mip_levels = metadata->mip_levels[0];
1752         rep->creq.base.drm_surface_flags = 0;
1753         rep->creq.base.multisample_count = metadata->multisample_count;
1754         rep->creq.base.autogen_filter = metadata->autogen_filter;
1755         rep->creq.base.array_size = metadata->array_size;
1756         rep->creq.base.buffer_handle = backup_handle;
1757         rep->creq.base.base_size = metadata->base_size;
1758         rep->crep.handle = user_srf->prime.base.handle;
1759         rep->crep.backup_size = srf->res.guest_memory_size;
1760         rep->crep.buffer_handle = backup_handle;
1761         rep->crep.buffer_map_handle =
1762                 drm_vma_node_offset_addr(&srf->res.guest_memory_bo->tbo.base.vma_node);
1763         rep->crep.buffer_size = srf->res.guest_memory_bo->tbo.base.size;
1764
1765         rep->creq.version = drm_vmw_gb_surface_v1;
1766         rep->creq.svga3d_flags_upper_32_bits =
1767                 SVGA3D_FLAGS_UPPER_32(metadata->flags);
1768         rep->creq.multisample_pattern = metadata->multisample_pattern;
1769         rep->creq.quality_level = metadata->quality_level;
1770         rep->creq.must_be_zero = 0;
1771
1772 out_bad_resource:
1773         ttm_base_object_unref(&base);
1774
1775         return ret;
1776 }
1777
1778 /**
1779  * vmw_subres_dirty_add - Add a dirty region to a subresource
1780  * @dirty: The surfaces's dirty tracker.
1781  * @loc_start: The location corresponding to the start of the region.
1782  * @loc_end: The location corresponding to the end of the region.
1783  *
1784  * As we are assuming that @loc_start and @loc_end represent a sequential
1785  * range of backing store memory, if the region spans multiple lines then
1786  * regardless of the x coordinate, the full lines are dirtied.
1787  * Correspondingly if the region spans multiple z slices, then full rather
1788  * than partial z slices are dirtied.
1789  */
1790 static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty,
1791                                  const struct vmw_surface_loc *loc_start,
1792                                  const struct vmw_surface_loc *loc_end)
1793 {
1794         const struct vmw_surface_cache *cache = &dirty->cache;
1795         SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource];
1796         u32 mip = loc_start->sub_resource % cache->num_mip_levels;
1797         const struct drm_vmw_size *size = &cache->mip[mip].size;
1798         u32 box_c2 = box->z + box->d;
1799
1800         if (WARN_ON(loc_start->sub_resource >= dirty->num_subres))
1801                 return;
1802
1803         if (box->d == 0 || box->z > loc_start->z)
1804                 box->z = loc_start->z;
1805         if (box_c2 < loc_end->z)
1806                 box->d = loc_end->z - box->z;
1807
1808         if (loc_start->z + 1 == loc_end->z) {
1809                 box_c2 = box->y + box->h;
1810                 if (box->h == 0 || box->y > loc_start->y)
1811                         box->y = loc_start->y;
1812                 if (box_c2 < loc_end->y)
1813                         box->h = loc_end->y - box->y;
1814
1815                 if (loc_start->y + 1 == loc_end->y) {
1816                         box_c2 = box->x + box->w;
1817                         if (box->w == 0 || box->x > loc_start->x)
1818                                 box->x = loc_start->x;
1819                         if (box_c2 < loc_end->x)
1820                                 box->w = loc_end->x - box->x;
1821                 } else {
1822                         box->x = 0;
1823                         box->w = size->width;
1824                 }
1825         } else {
1826                 box->y = 0;
1827                 box->h = size->height;
1828                 box->x = 0;
1829                 box->w = size->width;
1830         }
1831 }
1832
1833 /**
1834  * vmw_subres_dirty_full - Mark a full subresource as dirty
1835  * @dirty: The surface's dirty tracker.
1836  * @subres: The subresource
1837  */
1838 static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres)
1839 {
1840         const struct vmw_surface_cache *cache = &dirty->cache;
1841         u32 mip = subres % cache->num_mip_levels;
1842         const struct drm_vmw_size *size = &cache->mip[mip].size;
1843         SVGA3dBox *box = &dirty->boxes[subres];
1844
1845         box->x = 0;
1846         box->y = 0;
1847         box->z = 0;
1848         box->w = size->width;
1849         box->h = size->height;
1850         box->d = size->depth;
1851 }
1852
1853 /*
1854  * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture
1855  * surfaces.
1856  */
1857 static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
1858                                             size_t start, size_t end)
1859 {
1860         struct vmw_surface_dirty *dirty =
1861                 (struct vmw_surface_dirty *) res->dirty;
1862         size_t backup_end = res->guest_memory_offset + res->guest_memory_size;
1863         struct vmw_surface_loc loc1, loc2;
1864         const struct vmw_surface_cache *cache;
1865
1866         start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset;
1867         end = min(end, backup_end) - res->guest_memory_offset;
1868         cache = &dirty->cache;
1869         vmw_surface_get_loc(cache, &loc1, start);
1870         vmw_surface_get_loc(cache, &loc2, end - 1);
1871         vmw_surface_inc_loc(cache, &loc2);
1872
1873         if (loc1.sheet != loc2.sheet) {
1874                 u32 sub_res;
1875
1876                 /*
1877                  * Multiple multisample sheets. To do this in an optimized
1878                  * fashion, compute the dirty region for each sheet and the
1879                  * resulting union. Since this is not a common case, just dirty
1880                  * the whole surface.
1881                  */
1882                 for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res)
1883                         vmw_subres_dirty_full(dirty, sub_res);
1884                 return;
1885         }
1886         if (loc1.sub_resource + 1 == loc2.sub_resource) {
1887                 /* Dirty range covers a single sub-resource */
1888                 vmw_subres_dirty_add(dirty, &loc1, &loc2);
1889         } else {
1890                 /* Dirty range covers multiple sub-resources */
1891                 struct vmw_surface_loc loc_min, loc_max;
1892                 u32 sub_res;
1893
1894                 vmw_surface_max_loc(cache, loc1.sub_resource, &loc_max);
1895                 vmw_subres_dirty_add(dirty, &loc1, &loc_max);
1896                 vmw_surface_min_loc(cache, loc2.sub_resource - 1, &loc_min);
1897                 vmw_subres_dirty_add(dirty, &loc_min, &loc2);
1898                 for (sub_res = loc1.sub_resource + 1;
1899                      sub_res < loc2.sub_resource - 1; ++sub_res)
1900                         vmw_subres_dirty_full(dirty, sub_res);
1901         }
1902 }
1903
1904 /*
1905  * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer
1906  * surfaces.
1907  */
1908 static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res,
1909                                             size_t start, size_t end)
1910 {
1911         struct vmw_surface_dirty *dirty =
1912                 (struct vmw_surface_dirty *) res->dirty;
1913         const struct vmw_surface_cache *cache = &dirty->cache;
1914         size_t backup_end = res->guest_memory_offset + cache->mip_chain_bytes;
1915         SVGA3dBox *box = &dirty->boxes[0];
1916         u32 box_c2;
1917
1918         box->h = box->d = 1;
1919         start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset;
1920         end = min(end, backup_end) - res->guest_memory_offset;
1921         box_c2 = box->x + box->w;
1922         if (box->w == 0 || box->x > start)
1923                 box->x = start;
1924         if (box_c2 < end)
1925                 box->w = end - box->x;
1926 }
1927
1928 /*
1929  * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces
1930  */
1931 static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
1932                                         size_t end)
1933 {
1934         struct vmw_surface *srf = vmw_res_to_srf(res);
1935
1936         if (WARN_ON(end <= res->guest_memory_offset ||
1937                     start >= res->guest_memory_offset + res->guest_memory_size))
1938                 return;
1939
1940         if (srf->metadata.format == SVGA3D_BUFFER)
1941                 vmw_surface_buf_dirty_range_add(res, start, end);
1942         else
1943                 vmw_surface_tex_dirty_range_add(res, start, end);
1944 }
1945
1946 /*
1947  * vmw_surface_dirty_sync - The surface's dirty_sync callback.
1948  */
1949 static int vmw_surface_dirty_sync(struct vmw_resource *res)
1950 {
1951         struct vmw_private *dev_priv = res->dev_priv;
1952         u32 i, num_dirty;
1953         struct vmw_surface_dirty *dirty =
1954                 (struct vmw_surface_dirty *) res->dirty;
1955         size_t alloc_size;
1956         const struct vmw_surface_cache *cache = &dirty->cache;
1957         struct {
1958                 SVGA3dCmdHeader header;
1959                 SVGA3dCmdDXUpdateSubResource body;
1960         } *cmd1;
1961         struct {
1962                 SVGA3dCmdHeader header;
1963                 SVGA3dCmdUpdateGBImage body;
1964         } *cmd2;
1965         void *cmd;
1966
1967         num_dirty = 0;
1968         for (i = 0; i < dirty->num_subres; ++i) {
1969                 const SVGA3dBox *box = &dirty->boxes[i];
1970
1971                 if (box->d)
1972                         num_dirty++;
1973         }
1974
1975         if (!num_dirty)
1976                 goto out;
1977
1978         alloc_size = num_dirty * ((has_sm4_context(dev_priv)) ? sizeof(*cmd1) : sizeof(*cmd2));
1979         cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
1980         if (!cmd)
1981                 return -ENOMEM;
1982
1983         cmd1 = cmd;
1984         cmd2 = cmd;
1985
1986         for (i = 0; i < dirty->num_subres; ++i) {
1987                 const SVGA3dBox *box = &dirty->boxes[i];
1988
1989                 if (!box->d)
1990                         continue;
1991
1992                 /*
1993                  * DX_UPDATE_SUBRESOURCE is aware of array surfaces.
1994                  * UPDATE_GB_IMAGE is not.
1995                  */
1996                 if (has_sm4_context(dev_priv)) {
1997                         cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE;
1998                         cmd1->header.size = sizeof(cmd1->body);
1999                         cmd1->body.sid = res->id;
2000                         cmd1->body.subResource = i;
2001                         cmd1->body.box = *box;
2002                         cmd1++;
2003                 } else {
2004                         cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2005                         cmd2->header.size = sizeof(cmd2->body);
2006                         cmd2->body.image.sid = res->id;
2007                         cmd2->body.image.face = i / cache->num_mip_levels;
2008                         cmd2->body.image.mipmap = i -
2009                                 (cache->num_mip_levels * cmd2->body.image.face);
2010                         cmd2->body.box = *box;
2011                         cmd2++;
2012                 }
2013
2014         }
2015         vmw_cmd_commit(dev_priv, alloc_size);
2016  out:
2017         memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) *
2018                dirty->num_subres);
2019
2020         return 0;
2021 }
2022
2023 /*
2024  * vmw_surface_dirty_alloc - The surface's dirty_alloc callback.
2025  */
2026 static int vmw_surface_dirty_alloc(struct vmw_resource *res)
2027 {
2028         struct vmw_surface *srf = vmw_res_to_srf(res);
2029         const struct vmw_surface_metadata *metadata = &srf->metadata;
2030         struct vmw_surface_dirty *dirty;
2031         u32 num_layers = 1;
2032         u32 num_mip;
2033         u32 num_subres;
2034         u32 num_samples;
2035         size_t dirty_size;
2036         int ret;
2037
2038         if (metadata->array_size)
2039                 num_layers = metadata->array_size;
2040         else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
2041                 num_layers *= SVGA3D_MAX_SURFACE_FACES;
2042
2043         num_mip = metadata->mip_levels[0];
2044         if (!num_mip)
2045                 num_mip = 1;
2046
2047         num_subres = num_layers * num_mip;
2048         dirty_size = struct_size(dirty, boxes, num_subres);
2049
2050         dirty = kvzalloc(dirty_size, GFP_KERNEL);
2051         if (!dirty) {
2052                 ret = -ENOMEM;
2053                 goto out_no_dirty;
2054         }
2055
2056         num_samples = max_t(u32, 1, metadata->multisample_count);
2057         ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format,
2058                                       num_mip, num_layers, num_samples,
2059                                       &dirty->cache);
2060         if (ret)
2061                 goto out_no_cache;
2062
2063         dirty->num_subres = num_subres;
2064         res->dirty = (struct vmw_resource_dirty *) dirty;
2065
2066         return 0;
2067
2068 out_no_cache:
2069         kvfree(dirty);
2070 out_no_dirty:
2071         return ret;
2072 }
2073
2074 /*
2075  * vmw_surface_dirty_free - The surface's dirty_free callback
2076  */
2077 static void vmw_surface_dirty_free(struct vmw_resource *res)
2078 {
2079         struct vmw_surface_dirty *dirty =
2080                 (struct vmw_surface_dirty *) res->dirty;
2081
2082         kvfree(dirty);
2083         res->dirty = NULL;
2084 }
2085
2086 /*
2087  * vmw_surface_clean - The surface's clean callback
2088  */
2089 static int vmw_surface_clean(struct vmw_resource *res)
2090 {
2091         struct vmw_private *dev_priv = res->dev_priv;
2092         size_t alloc_size;
2093         struct {
2094                 SVGA3dCmdHeader header;
2095                 SVGA3dCmdReadbackGBSurface body;
2096         } *cmd;
2097
2098         alloc_size = sizeof(*cmd);
2099         cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
2100         if (!cmd)
2101                 return -ENOMEM;
2102
2103         cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
2104         cmd->header.size = sizeof(cmd->body);
2105         cmd->body.sid = res->id;
2106         vmw_cmd_commit(dev_priv, alloc_size);
2107
2108         return 0;
2109 }
2110
2111 /*
2112  * vmw_gb_surface_define - Define a private GB surface
2113  *
2114  * @dev_priv: Pointer to a device private.
2115  * @metadata: Metadata representing the surface to create.
2116  * @user_srf_out: allocated user_srf. Set to NULL on failure.
2117  *
2118  * GB surfaces allocated by this function will not have a user mode handle, and
2119  * thus will only be visible to vmwgfx.  For optimization reasons the
2120  * surface may later be given a user mode handle by another function to make
2121  * it available to user mode drivers.
2122  */
2123 int vmw_gb_surface_define(struct vmw_private *dev_priv,
2124                           const struct vmw_surface_metadata *req,
2125                           struct vmw_surface **srf_out)
2126 {
2127         struct vmw_surface_metadata *metadata;
2128         struct vmw_user_surface *user_srf;
2129         struct vmw_surface *srf;
2130         u32 sample_count = 1;
2131         u32 num_layers = 1;
2132         int ret;
2133
2134         *srf_out = NULL;
2135
2136         if (req->scanout) {
2137                 if (!vmw_surface_is_screen_target_format(req->format)) {
2138                         VMW_DEBUG_USER("Invalid Screen Target surface format.");
2139                         return -EINVAL;
2140                 }
2141
2142                 if (req->base_size.width > dev_priv->texture_max_width ||
2143                     req->base_size.height > dev_priv->texture_max_height) {
2144                         VMW_DEBUG_USER("%ux%u\n, exceed max surface size %ux%u",
2145                                        req->base_size.width,
2146                                        req->base_size.height,
2147                                        dev_priv->texture_max_width,
2148                                        dev_priv->texture_max_height);
2149                         return -EINVAL;
2150                 }
2151         } else {
2152                 const SVGA3dSurfaceDesc *desc =
2153                         vmw_surface_get_desc(req->format);
2154
2155                 if (desc->blockDesc == SVGA3DBLOCKDESC_NONE) {
2156                         VMW_DEBUG_USER("Invalid surface format.\n");
2157                         return -EINVAL;
2158                 }
2159         }
2160
2161         if (req->autogen_filter != SVGA3D_TEX_FILTER_NONE)
2162                 return -EINVAL;
2163
2164         if (req->num_sizes != 1)
2165                 return -EINVAL;
2166
2167         if (req->sizes != NULL)
2168                 return -EINVAL;
2169
2170         user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
2171         if (unlikely(!user_srf)) {
2172                 ret = -ENOMEM;
2173                 goto out_unlock;
2174         }
2175
2176         *srf_out  = &user_srf->srf;
2177
2178         srf = &user_srf->srf;
2179         srf->metadata = *req;
2180         srf->offsets = NULL;
2181
2182         metadata = &srf->metadata;
2183
2184         if (metadata->array_size)
2185                 num_layers = req->array_size;
2186         else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
2187                 num_layers = SVGA3D_MAX_SURFACE_FACES;
2188
2189         if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE)
2190                 sample_count = metadata->multisample_count;
2191
2192         srf->res.guest_memory_size =
2193                 vmw_surface_get_serialized_size_extended(
2194                                 metadata->format,
2195                                 metadata->base_size,
2196                                 metadata->mip_levels[0],
2197                                 num_layers,
2198                                 sample_count);
2199
2200         if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
2201                 srf->res.guest_memory_size += sizeof(SVGA3dDXSOState);
2202
2203         /*
2204          * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
2205          * size greater than STDU max width/height. This is really a workaround
2206          * to support creation of big framebuffer requested by some user-space
2207          * for whole topology. That big framebuffer won't really be used for
2208          * binding with screen target as during prepare_fb a separate surface is
2209          * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
2210          */
2211         if (dev_priv->active_display_unit == vmw_du_screen_target &&
2212             metadata->scanout &&
2213             metadata->base_size.width <= dev_priv->stdu_max_width &&
2214             metadata->base_size.height <= dev_priv->stdu_max_height)
2215                 metadata->flags |= SVGA3D_SURFACE_SCREENTARGET;
2216
2217         /*
2218          * From this point, the generic resource management functions
2219          * destroy the object on failure.
2220          */
2221         ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
2222
2223         return ret;
2224
2225 out_unlock:
2226         return ret;
2227 }
2228
2229 static SVGA3dSurfaceFormat vmw_format_bpp_to_svga(struct vmw_private *vmw,
2230                                                   int bpp)
2231 {
2232         switch (bpp) {
2233         case 8: /* DRM_FORMAT_C8 */
2234                 return SVGA3D_P8;
2235         case 16: /* DRM_FORMAT_RGB565 */
2236                 return SVGA3D_R5G6B5;
2237         case 32: /* DRM_FORMAT_XRGB8888 */
2238                 if (has_sm4_context(vmw))
2239                         return SVGA3D_B8G8R8X8_UNORM;
2240                 return SVGA3D_X8R8G8B8;
2241         default:
2242                 drm_warn(&vmw->drm, "Unsupported format bpp: %d\n", bpp);
2243                 return SVGA3D_X8R8G8B8;
2244         }
2245 }
2246
2247 /**
2248  * vmw_dumb_create - Create a dumb kms buffer
2249  *
2250  * @file_priv: Pointer to a struct drm_file identifying the caller.
2251  * @dev: Pointer to the drm device.
2252  * @args: Pointer to a struct drm_mode_create_dumb structure
2253  * Return: Zero on success, negative error code on failure.
2254  *
2255  * This is a driver callback for the core drm create_dumb functionality.
2256  * Note that this is very similar to the vmw_bo_alloc ioctl, except
2257  * that the arguments have a different format.
2258  */
2259 int vmw_dumb_create(struct drm_file *file_priv,
2260                     struct drm_device *dev,
2261                     struct drm_mode_create_dumb *args)
2262 {
2263         struct vmw_private *dev_priv = vmw_priv(dev);
2264         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
2265         struct vmw_bo *vbo = NULL;
2266         struct vmw_resource *res = NULL;
2267         union drm_vmw_gb_surface_create_ext_arg arg = { 0 };
2268         struct drm_vmw_gb_surface_create_ext_req *req = &arg.req;
2269         int ret;
2270         struct drm_vmw_size drm_size = {
2271                 .width = args->width,
2272                 .height = args->height,
2273                 .depth = 1,
2274         };
2275         SVGA3dSurfaceFormat format = vmw_format_bpp_to_svga(dev_priv, args->bpp);
2276         const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format);
2277         SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE |
2278                                       SVGA3D_SURFACE_HINT_RENDERTARGET |
2279                                       SVGA3D_SURFACE_SCREENTARGET;
2280
2281         if (vmw_surface_is_dx_screen_target_format(format)) {
2282                 flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE |
2283                          SVGA3D_SURFACE_BIND_RENDER_TARGET;
2284         }
2285
2286         /*
2287          * Without mob support we're just going to use raw memory buffer
2288          * because we wouldn't be able to support full surface coherency
2289          * without mobs. There also no reason to support surface coherency
2290          * without 3d (i.e. gpu usage on the host) because then all the
2291          * contents is going to be rendered guest side.
2292          */
2293         if (!dev_priv->has_mob || !vmw_supports_3d(dev_priv)) {
2294                 int cpp = DIV_ROUND_UP(args->bpp, 8);
2295
2296                 switch (cpp) {
2297                 case 1: /* DRM_FORMAT_C8 */
2298                 case 2: /* DRM_FORMAT_RGB565 */
2299                 case 4: /* DRM_FORMAT_XRGB8888 */
2300                         break;
2301                 default:
2302                         /*
2303                          * Dumb buffers don't allow anything else.
2304                          * This is tested via IGT's dumb_buffers
2305                          */
2306                         return -EINVAL;
2307                 }
2308
2309                 args->pitch = args->width * cpp;
2310                 args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
2311
2312                 ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
2313                                                         args->size, &args->handle,
2314                                                         &vbo);
2315                 /* drop reference from allocate - handle holds it now */
2316                 drm_gem_object_put(&vbo->tbo.base);
2317                 return ret;
2318         }
2319
2320         req->version = drm_vmw_gb_surface_v1;
2321         req->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
2322         req->quality_level = SVGA3D_MS_QUALITY_NONE;
2323         req->buffer_byte_stride = 0;
2324         req->must_be_zero = 0;
2325         req->base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(flags);
2326         req->svga3d_flags_upper_32_bits = SVGA3D_FLAGS_UPPER_32(flags);
2327         req->base.format = (uint32_t)format;
2328         req->base.drm_surface_flags = drm_vmw_surface_flag_scanout;
2329         req->base.drm_surface_flags |= drm_vmw_surface_flag_shareable;
2330         req->base.drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
2331         req->base.drm_surface_flags |= drm_vmw_surface_flag_coherent;
2332         req->base.base_size.width = args->width;
2333         req->base.base_size.height = args->height;
2334         req->base.base_size.depth = 1;
2335         req->base.array_size = 0;
2336         req->base.mip_levels = 1;
2337         req->base.multisample_count = 0;
2338         req->base.buffer_handle = SVGA3D_INVALID_ID;
2339         req->base.autogen_filter = SVGA3D_TEX_FILTER_NONE;
2340         ret = vmw_gb_surface_define_ext_ioctl(dev, &arg, file_priv);
2341         if (ret) {
2342                 drm_warn(dev, "Unable to create a dumb buffer\n");
2343                 return ret;
2344         }
2345
2346         args->handle = arg.rep.buffer_handle;
2347         args->size = arg.rep.buffer_size;
2348         args->pitch = vmw_surface_calculate_pitch(desc, &drm_size);
2349
2350         ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg.rep.handle,
2351                                               user_surface_converter,
2352                                               &res);
2353         if (ret) {
2354                 drm_err(dev, "Created resource handle doesn't exist!\n");
2355                 goto err;
2356         }
2357
2358         vbo = res->guest_memory_bo;
2359         vbo->is_dumb = true;
2360         vbo->dumb_surface = vmw_res_to_srf(res);
2361
2362 err:
2363         if (res)
2364                 vmw_resource_unreference(&res);
2365         if (ret)
2366                 ttm_ref_object_base_unref(tfile, arg.rep.handle);
2367
2368         return ret;
2369 }
This page took 0.168183 seconds and 4 git commands to generate.