]> Git Repo - linux.git/blob - drivers/gpu/drm/qxl/qxl_object.c
net: wan: Add framer framework support
[linux.git] / drivers / gpu / drm / qxl / qxl_object.c
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25
26 #include <linux/iosys-map.h>
27 #include <linux/io-mapping.h>
28
29 #include "qxl_drv.h"
30 #include "qxl_object.h"
31
32 static int __qxl_bo_pin(struct qxl_bo *bo);
33 static void __qxl_bo_unpin(struct qxl_bo *bo);
34
35 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
36 {
37         struct qxl_bo *bo;
38         struct qxl_device *qdev;
39
40         bo = to_qxl_bo(tbo);
41         qdev = to_qxl(bo->tbo.base.dev);
42
43         qxl_surface_evict(qdev, bo, false);
44         WARN_ON_ONCE(bo->map_count > 0);
45         mutex_lock(&qdev->gem.mutex);
46         list_del_init(&bo->list);
47         mutex_unlock(&qdev->gem.mutex);
48         drm_gem_object_release(&bo->tbo.base);
49         kfree(bo);
50 }
51
52 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
53 {
54         if (bo->destroy == &qxl_ttm_bo_destroy)
55                 return true;
56         return false;
57 }
58
59 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
60 {
61         u32 c = 0;
62         u32 pflag = 0;
63         unsigned int i;
64
65         if (qbo->tbo.base.size <= PAGE_SIZE)
66                 pflag |= TTM_PL_FLAG_TOPDOWN;
67
68         qbo->placement.placement = qbo->placements;
69         qbo->placement.busy_placement = qbo->placements;
70         if (domain == QXL_GEM_DOMAIN_VRAM) {
71                 qbo->placements[c].mem_type = TTM_PL_VRAM;
72                 qbo->placements[c++].flags = pflag;
73         }
74         if (domain == QXL_GEM_DOMAIN_SURFACE) {
75                 qbo->placements[c].mem_type = TTM_PL_PRIV;
76                 qbo->placements[c++].flags = pflag;
77                 qbo->placements[c].mem_type = TTM_PL_VRAM;
78                 qbo->placements[c++].flags = pflag;
79         }
80         if (domain == QXL_GEM_DOMAIN_CPU) {
81                 qbo->placements[c].mem_type = TTM_PL_SYSTEM;
82                 qbo->placements[c++].flags = pflag;
83         }
84         if (!c) {
85                 qbo->placements[c].mem_type = TTM_PL_SYSTEM;
86                 qbo->placements[c++].flags = 0;
87         }
88         qbo->placement.num_placement = c;
89         qbo->placement.num_busy_placement = c;
90         for (i = 0; i < c; ++i) {
91                 qbo->placements[i].fpfn = 0;
92                 qbo->placements[i].lpfn = 0;
93         }
94 }
95
96 static const struct drm_gem_object_funcs qxl_object_funcs = {
97         .free = qxl_gem_object_free,
98         .open = qxl_gem_object_open,
99         .close = qxl_gem_object_close,
100         .pin = qxl_gem_prime_pin,
101         .unpin = qxl_gem_prime_unpin,
102         .get_sg_table = qxl_gem_prime_get_sg_table,
103         .vmap = qxl_gem_prime_vmap,
104         .vunmap = qxl_gem_prime_vunmap,
105         .mmap = drm_gem_ttm_mmap,
106         .print_info = drm_gem_ttm_print_info,
107 };
108
109 int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
110                   bool kernel, bool pinned, u32 domain, u32 priority,
111                   struct qxl_surface *surf,
112                   struct qxl_bo **bo_ptr)
113 {
114         struct ttm_operation_ctx ctx = { !kernel, false };
115         struct qxl_bo *bo;
116         enum ttm_bo_type type;
117         int r;
118
119         if (kernel)
120                 type = ttm_bo_type_kernel;
121         else
122                 type = ttm_bo_type_device;
123         *bo_ptr = NULL;
124         bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
125         if (bo == NULL)
126                 return -ENOMEM;
127         size = roundup(size, PAGE_SIZE);
128         r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
129         if (unlikely(r)) {
130                 kfree(bo);
131                 return r;
132         }
133         bo->tbo.base.funcs = &qxl_object_funcs;
134         bo->type = domain;
135         bo->surface_id = 0;
136         INIT_LIST_HEAD(&bo->list);
137
138         if (surf)
139                 bo->surf = *surf;
140
141         qxl_ttm_placement_from_domain(bo, domain);
142
143         bo->tbo.priority = priority;
144         r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, type,
145                                  &bo->placement, 0, &ctx, NULL, NULL,
146                                  &qxl_ttm_bo_destroy);
147         if (unlikely(r != 0)) {
148                 if (r != -ERESTARTSYS)
149                         dev_err(qdev->ddev.dev,
150                                 "object_init failed for (%lu, 0x%08X)\n",
151                                 size, domain);
152                 return r;
153         }
154         if (pinned)
155                 ttm_bo_pin(&bo->tbo);
156         ttm_bo_unreserve(&bo->tbo);
157         *bo_ptr = bo;
158         return 0;
159 }
160
161 int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map)
162 {
163         int r;
164
165         dma_resv_assert_held(bo->tbo.base.resv);
166
167         if (bo->kptr) {
168                 bo->map_count++;
169                 goto out;
170         }
171
172         r = __qxl_bo_pin(bo);
173         if (r)
174                 return r;
175
176         r = ttm_bo_vmap(&bo->tbo, &bo->map);
177         if (r) {
178                 __qxl_bo_unpin(bo);
179                 return r;
180         }
181         bo->map_count = 1;
182
183         /* TODO: Remove kptr in favor of map everywhere. */
184         if (bo->map.is_iomem)
185                 bo->kptr = (void *)bo->map.vaddr_iomem;
186         else
187                 bo->kptr = bo->map.vaddr;
188
189 out:
190         *map = bo->map;
191         return 0;
192 }
193
194 int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map)
195 {
196         int r;
197
198         r = qxl_bo_reserve(bo);
199         if (r)
200                 return r;
201
202         r = qxl_bo_vmap_locked(bo, map);
203         qxl_bo_unreserve(bo);
204         return r;
205 }
206
207 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
208                               struct qxl_bo *bo, int page_offset)
209 {
210         unsigned long offset;
211         void *rptr;
212         int ret;
213         struct io_mapping *map;
214         struct iosys_map bo_map;
215
216         if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
217                 map = qdev->vram_mapping;
218         else if (bo->tbo.resource->mem_type == TTM_PL_PRIV)
219                 map = qdev->surface_mapping;
220         else
221                 goto fallback;
222
223         offset = bo->tbo.resource->start << PAGE_SHIFT;
224         return io_mapping_map_atomic_wc(map, offset + page_offset);
225 fallback:
226         if (bo->kptr) {
227                 rptr = bo->kptr + (page_offset * PAGE_SIZE);
228                 return rptr;
229         }
230
231         ret = qxl_bo_vmap_locked(bo, &bo_map);
232         if (ret)
233                 return NULL;
234         rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */
235
236         rptr += page_offset * PAGE_SIZE;
237         return rptr;
238 }
239
240 void qxl_bo_vunmap_locked(struct qxl_bo *bo)
241 {
242         dma_resv_assert_held(bo->tbo.base.resv);
243
244         if (bo->kptr == NULL)
245                 return;
246         bo->map_count--;
247         if (bo->map_count > 0)
248                 return;
249         bo->kptr = NULL;
250         ttm_bo_vunmap(&bo->tbo, &bo->map);
251         __qxl_bo_unpin(bo);
252 }
253
254 int qxl_bo_vunmap(struct qxl_bo *bo)
255 {
256         int r;
257
258         r = qxl_bo_reserve(bo);
259         if (r)
260                 return r;
261
262         qxl_bo_vunmap_locked(bo);
263         qxl_bo_unreserve(bo);
264         return 0;
265 }
266
267 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
268                                struct qxl_bo *bo, void *pmap)
269 {
270         if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) &&
271             (bo->tbo.resource->mem_type != TTM_PL_PRIV))
272                 goto fallback;
273
274         io_mapping_unmap_atomic(pmap);
275         return;
276  fallback:
277         qxl_bo_vunmap_locked(bo);
278 }
279
280 void qxl_bo_unref(struct qxl_bo **bo)
281 {
282         if ((*bo) == NULL)
283                 return;
284
285         drm_gem_object_put(&(*bo)->tbo.base);
286         *bo = NULL;
287 }
288
289 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
290 {
291         drm_gem_object_get(&bo->tbo.base);
292         return bo;
293 }
294
295 static int __qxl_bo_pin(struct qxl_bo *bo)
296 {
297         struct ttm_operation_ctx ctx = { false, false };
298         struct drm_device *ddev = bo->tbo.base.dev;
299         int r;
300
301         if (bo->tbo.pin_count) {
302                 ttm_bo_pin(&bo->tbo);
303                 return 0;
304         }
305         qxl_ttm_placement_from_domain(bo, bo->type);
306         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
307         if (likely(r == 0))
308                 ttm_bo_pin(&bo->tbo);
309         if (unlikely(r != 0))
310                 dev_err(ddev->dev, "%p pin failed\n", bo);
311         return r;
312 }
313
314 static void __qxl_bo_unpin(struct qxl_bo *bo)
315 {
316         ttm_bo_unpin(&bo->tbo);
317 }
318
319 /*
320  * Reserve the BO before pinning the object.  If the BO was reserved
321  * beforehand, use the internal version directly __qxl_bo_pin.
322  *
323  */
324 int qxl_bo_pin(struct qxl_bo *bo)
325 {
326         int r;
327
328         r = qxl_bo_reserve(bo);
329         if (r)
330                 return r;
331
332         r = __qxl_bo_pin(bo);
333         qxl_bo_unreserve(bo);
334         return r;
335 }
336
337 /*
338  * Reserve the BO before pinning the object.  If the BO was reserved
339  * beforehand, use the internal version directly __qxl_bo_unpin.
340  *
341  */
342 int qxl_bo_unpin(struct qxl_bo *bo)
343 {
344         int r;
345
346         r = qxl_bo_reserve(bo);
347         if (r)
348                 return r;
349
350         __qxl_bo_unpin(bo);
351         qxl_bo_unreserve(bo);
352         return 0;
353 }
354
355 void qxl_bo_force_delete(struct qxl_device *qdev)
356 {
357         struct qxl_bo *bo, *n;
358
359         if (list_empty(&qdev->gem.objects))
360                 return;
361         dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
362         list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
363                 dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
364                         &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
365                         *((unsigned long *)&bo->tbo.base.refcount));
366                 mutex_lock(&qdev->gem.mutex);
367                 list_del_init(&bo->list);
368                 mutex_unlock(&qdev->gem.mutex);
369                 /* this should unref the ttm bo */
370                 drm_gem_object_put(&bo->tbo.base);
371         }
372 }
373
374 int qxl_bo_init(struct qxl_device *qdev)
375 {
376         return qxl_ttm_init(qdev);
377 }
378
379 void qxl_bo_fini(struct qxl_device *qdev)
380 {
381         qxl_ttm_fini(qdev);
382 }
383
384 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
385 {
386         int ret;
387
388         if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
389                 /* allocate a surface id for this surface now */
390                 ret = qxl_surface_id_alloc(qdev, bo);
391                 if (ret)
392                         return ret;
393
394                 ret = qxl_hw_surface_alloc(qdev, bo);
395                 if (ret)
396                         return ret;
397         }
398         return 0;
399 }
400
401 int qxl_surf_evict(struct qxl_device *qdev)
402 {
403         struct ttm_resource_manager *man;
404
405         man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV);
406         return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
407 }
408
409 int qxl_vram_evict(struct qxl_device *qdev)
410 {
411         struct ttm_resource_manager *man;
412
413         man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM);
414         return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
415 }
This page took 0.055829 seconds and 4 git commands to generate.