]> Git Repo - linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
Merge tag 'sound-5.13-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_ioctl.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include "vmwgfx_kms.h"
31 #include "device_include/svga3d_caps.h"
32
33 struct svga_3d_compat_cap {
34         SVGA3dCapsRecordHeader header;
35         SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
36 };
37
38 int vmw_getparam_ioctl(struct drm_device *dev, void *data,
39                        struct drm_file *file_priv)
40 {
41         struct vmw_private *dev_priv = vmw_priv(dev);
42         struct drm_vmw_getparam_arg *param =
43             (struct drm_vmw_getparam_arg *)data;
44         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
45
46         switch (param->param) {
47         case DRM_VMW_PARAM_NUM_STREAMS:
48                 param->value = vmw_overlay_num_overlays(dev_priv);
49                 break;
50         case DRM_VMW_PARAM_NUM_FREE_STREAMS:
51                 param->value = vmw_overlay_num_free_overlays(dev_priv);
52                 break;
53         case DRM_VMW_PARAM_3D:
54                 param->value = vmw_supports_3d(dev_priv) ? 1 : 0;
55                 break;
56         case DRM_VMW_PARAM_HW_CAPS:
57                 param->value = dev_priv->capabilities;
58                 break;
59         case DRM_VMW_PARAM_HW_CAPS2:
60                 param->value = dev_priv->capabilities2;
61                 break;
62         case DRM_VMW_PARAM_FIFO_CAPS:
63                 param->value = dev_priv->fifo.capabilities;
64                 break;
65         case DRM_VMW_PARAM_MAX_FB_SIZE:
66                 param->value = dev_priv->prim_bb_mem;
67                 break;
68         case DRM_VMW_PARAM_FIFO_HW_VERSION:
69         {
70                 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
71
72                 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
73                         param->value = SVGA3D_HWVERSION_WS8_B1;
74                         break;
75                 }
76
77                 param->value =
78                         vmw_fifo_mem_read(dev_priv,
79                                           ((fifo->capabilities &
80                                             SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
81                                                    SVGA_FIFO_3D_HWVERSION_REVISED :
82                                                    SVGA_FIFO_3D_HWVERSION));
83                 break;
84         }
85         case DRM_VMW_PARAM_MAX_SURF_MEMORY:
86                 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
87                     !vmw_fp->gb_aware)
88                         param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
89                 else
90                         param->value = dev_priv->memory_size;
91                 break;
92         case DRM_VMW_PARAM_3D_CAPS_SIZE:
93                 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
94                     vmw_fp->gb_aware)
95                         param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
96                 else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
97                         param->value = sizeof(struct svga_3d_compat_cap) +
98                                 sizeof(uint32_t);
99                 else
100                         param->value = (SVGA_FIFO_3D_CAPS_LAST -
101                                         SVGA_FIFO_3D_CAPS + 1) *
102                                 sizeof(uint32_t);
103                 break;
104         case DRM_VMW_PARAM_MAX_MOB_MEMORY:
105                 vmw_fp->gb_aware = true;
106                 param->value = dev_priv->max_mob_pages * PAGE_SIZE;
107                 break;
108         case DRM_VMW_PARAM_MAX_MOB_SIZE:
109                 param->value = dev_priv->max_mob_size;
110                 break;
111         case DRM_VMW_PARAM_SCREEN_TARGET:
112                 param->value =
113                         (dev_priv->active_display_unit == vmw_du_screen_target);
114                 break;
115         case DRM_VMW_PARAM_DX:
116                 param->value = has_sm4_context(dev_priv);
117                 break;
118         case DRM_VMW_PARAM_SM4_1:
119                 param->value = has_sm4_1_context(dev_priv);
120                 break;
121         case DRM_VMW_PARAM_SM5:
122                 param->value = has_sm5_context(dev_priv);
123                 break;
124         default:
125                 return -EINVAL;
126         }
127
128         return 0;
129 }
130
131 static u32 vmw_mask_legacy_multisample(unsigned int cap, u32 fmt_value)
132 {
133         /*
134          * A version of user-space exists which use MULTISAMPLE_MASKABLESAMPLES
135          * to check the sample count supported by virtual device. Since there
136          * never was support for multisample count for backing MOB return 0.
137          *
138          * MULTISAMPLE_MASKABLESAMPLES devcap is marked as deprecated by virtual
139          * device.
140          */
141         if (cap == SVGA3D_DEVCAP_DEAD5)
142                 return 0;
143
144         return fmt_value;
145 }
146
147 static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
148                                size_t size)
149 {
150         struct svga_3d_compat_cap *compat_cap =
151                 (struct svga_3d_compat_cap *) bounce;
152         unsigned int i;
153         size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
154         unsigned int max_size;
155
156         if (size < pair_offset)
157                 return -EINVAL;
158
159         max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
160
161         if (max_size > SVGA3D_DEVCAP_MAX)
162                 max_size = SVGA3D_DEVCAP_MAX;
163
164         compat_cap->header.length =
165                 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
166         compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
167
168         spin_lock(&dev_priv->cap_lock);
169         for (i = 0; i < max_size; ++i) {
170                 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
171                 compat_cap->pairs[i][0] = i;
172                 compat_cap->pairs[i][1] = vmw_mask_legacy_multisample
173                         (i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
174         }
175         spin_unlock(&dev_priv->cap_lock);
176
177         return 0;
178 }
179
180
181 int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
182                          struct drm_file *file_priv)
183 {
184         struct drm_vmw_get_3d_cap_arg *arg =
185                 (struct drm_vmw_get_3d_cap_arg *) data;
186         struct vmw_private *dev_priv = vmw_priv(dev);
187         uint32_t size;
188         u32 *fifo_mem;
189         void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
190         void *bounce;
191         int ret;
192         bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
193         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
194
195         if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
196                 VMW_DEBUG_USER("Illegal GET_3D_CAP argument.\n");
197                 return -EINVAL;
198         }
199
200         if (gb_objects && vmw_fp->gb_aware)
201                 size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
202         else if (gb_objects)
203                 size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
204         else
205                 size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
206                         sizeof(uint32_t);
207
208         if (arg->max_size < size)
209                 size = arg->max_size;
210
211         bounce = vzalloc(size);
212         if (unlikely(bounce == NULL)) {
213                 DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
214                 return -ENOMEM;
215         }
216
217         if (gb_objects && vmw_fp->gb_aware) {
218                 int i, num;
219                 uint32_t *bounce32 = (uint32_t *) bounce;
220
221                 num = size / sizeof(uint32_t);
222                 if (num > SVGA3D_DEVCAP_MAX)
223                         num = SVGA3D_DEVCAP_MAX;
224
225                 spin_lock(&dev_priv->cap_lock);
226                 for (i = 0; i < num; ++i) {
227                         vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
228                         *bounce32++ = vmw_mask_legacy_multisample
229                                 (i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
230                 }
231                 spin_unlock(&dev_priv->cap_lock);
232         } else if (gb_objects) {
233                 ret = vmw_fill_compat_cap(dev_priv, bounce, size);
234                 if (unlikely(ret != 0))
235                         goto out_err;
236         } else {
237                 fifo_mem = dev_priv->fifo_mem;
238                 memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
239         }
240
241         ret = copy_to_user(buffer, bounce, size);
242         if (ret)
243                 ret = -EFAULT;
244 out_err:
245         vfree(bounce);
246
247         if (unlikely(ret != 0))
248                 DRM_ERROR("Failed to report 3D caps info.\n");
249
250         return ret;
251 }
252
253 int vmw_present_ioctl(struct drm_device *dev, void *data,
254                       struct drm_file *file_priv)
255 {
256         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
257         struct vmw_private *dev_priv = vmw_priv(dev);
258         struct drm_vmw_present_arg *arg =
259                 (struct drm_vmw_present_arg *)data;
260         struct vmw_surface *surface;
261         struct drm_vmw_rect __user *clips_ptr;
262         struct drm_vmw_rect *clips = NULL;
263         struct drm_framebuffer *fb;
264         struct vmw_framebuffer *vfb;
265         struct vmw_resource *res;
266         uint32_t num_clips;
267         int ret;
268
269         num_clips = arg->num_clips;
270         clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
271
272         if (unlikely(num_clips == 0))
273                 return 0;
274
275         if (clips_ptr == NULL) {
276                 VMW_DEBUG_USER("Variable clips_ptr must be specified.\n");
277                 ret = -EINVAL;
278                 goto out_clips;
279         }
280
281         clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
282         if (clips == NULL) {
283                 DRM_ERROR("Failed to allocate clip rect list.\n");
284                 ret = -ENOMEM;
285                 goto out_clips;
286         }
287
288         ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
289         if (ret) {
290                 DRM_ERROR("Failed to copy clip rects from userspace.\n");
291                 ret = -EFAULT;
292                 goto out_no_copy;
293         }
294
295         drm_modeset_lock_all(dev);
296
297         fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
298         if (!fb) {
299                 VMW_DEBUG_USER("Invalid framebuffer id.\n");
300                 ret = -ENOENT;
301                 goto out_no_fb;
302         }
303         vfb = vmw_framebuffer_to_vfb(fb);
304
305         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
306         if (unlikely(ret != 0))
307                 goto out_no_ttm_lock;
308
309         ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
310                                               user_surface_converter,
311                                               &res);
312         if (ret)
313                 goto out_no_surface;
314
315         surface = vmw_res_to_srf(res);
316         ret = vmw_kms_present(dev_priv, file_priv,
317                               vfb, surface, arg->sid,
318                               arg->dest_x, arg->dest_y,
319                               clips, num_clips);
320
321         /* vmw_user_surface_lookup takes one ref so does new_fb */
322         vmw_surface_unreference(&surface);
323
324 out_no_surface:
325         ttm_read_unlock(&dev_priv->reservation_sem);
326 out_no_ttm_lock:
327         drm_framebuffer_put(fb);
328 out_no_fb:
329         drm_modeset_unlock_all(dev);
330 out_no_copy:
331         kfree(clips);
332 out_clips:
333         return ret;
334 }
335
336 int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
337                                struct drm_file *file_priv)
338 {
339         struct vmw_private *dev_priv = vmw_priv(dev);
340         struct drm_vmw_present_readback_arg *arg =
341                 (struct drm_vmw_present_readback_arg *)data;
342         struct drm_vmw_fence_rep __user *user_fence_rep =
343                 (struct drm_vmw_fence_rep __user *)
344                 (unsigned long)arg->fence_rep;
345         struct drm_vmw_rect __user *clips_ptr;
346         struct drm_vmw_rect *clips = NULL;
347         struct drm_framebuffer *fb;
348         struct vmw_framebuffer *vfb;
349         uint32_t num_clips;
350         int ret;
351
352         num_clips = arg->num_clips;
353         clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
354
355         if (unlikely(num_clips == 0))
356                 return 0;
357
358         if (clips_ptr == NULL) {
359                 VMW_DEBUG_USER("Argument clips_ptr must be specified.\n");
360                 ret = -EINVAL;
361                 goto out_clips;
362         }
363
364         clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
365         if (clips == NULL) {
366                 DRM_ERROR("Failed to allocate clip rect list.\n");
367                 ret = -ENOMEM;
368                 goto out_clips;
369         }
370
371         ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
372         if (ret) {
373                 DRM_ERROR("Failed to copy clip rects from userspace.\n");
374                 ret = -EFAULT;
375                 goto out_no_copy;
376         }
377
378         drm_modeset_lock_all(dev);
379
380         fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
381         if (!fb) {
382                 VMW_DEBUG_USER("Invalid framebuffer id.\n");
383                 ret = -ENOENT;
384                 goto out_no_fb;
385         }
386
387         vfb = vmw_framebuffer_to_vfb(fb);
388         if (!vfb->bo) {
389                 VMW_DEBUG_USER("Framebuffer not buffer backed.\n");
390                 ret = -EINVAL;
391                 goto out_no_ttm_lock;
392         }
393
394         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
395         if (unlikely(ret != 0))
396                 goto out_no_ttm_lock;
397
398         ret = vmw_kms_readback(dev_priv, file_priv,
399                                vfb, user_fence_rep,
400                                clips, num_clips);
401
402         ttm_read_unlock(&dev_priv->reservation_sem);
403 out_no_ttm_lock:
404         drm_framebuffer_put(fb);
405 out_no_fb:
406         drm_modeset_unlock_all(dev);
407 out_no_copy:
408         kfree(clips);
409 out_clips:
410         return ret;
411 }
412
413
414 /**
415  * vmw_fops_poll - wrapper around the drm_poll function
416  *
417  * @filp: See the linux fops poll documentation.
418  * @wait: See the linux fops poll documentation.
419  *
420  * Wrapper around the drm_poll function that makes sure the device is
421  * processing the fifo if drm_poll decides to wait.
422  */
423 __poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
424 {
425         struct drm_file *file_priv = filp->private_data;
426         struct vmw_private *dev_priv =
427                 vmw_priv(file_priv->minor->dev);
428
429         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
430         return drm_poll(filp, wait);
431 }
432
433
434 /**
435  * vmw_fops_read - wrapper around the drm_read function
436  *
437  * @filp: See the linux fops read documentation.
438  * @buffer: See the linux fops read documentation.
439  * @count: See the linux fops read documentation.
440  * @offset: See the linux fops read documentation.
441  *
442  * Wrapper around the drm_read function that makes sure the device is
443  * processing the fifo if drm_read decides to wait.
444  */
445 ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
446                       size_t count, loff_t *offset)
447 {
448         struct drm_file *file_priv = filp->private_data;
449         struct vmw_private *dev_priv =
450                 vmw_priv(file_priv->minor->dev);
451
452         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
453         return drm_read(filp, buffer, count, offset);
454 }
This page took 0.063311 seconds and 4 git commands to generate.