2 * Copyright 2011 Red Hat, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #include "qxl_object.h"
24 #include <trace/events/dma_fence.h>
27 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
28 * into 256 byte chunks for now - gives 16 cmds per page.
30 * use an ida to index into the chunks?
32 /* manage releaseables */
33 /* stack them 16 high for now -drawable object is 191 */
34 #define RELEASE_SIZE 256
35 #define RELEASES_PER_BO (4096 / RELEASE_SIZE)
36 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
37 #define SURFACE_RELEASE_SIZE 128
38 #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
40 static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
41 static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
43 static const char *qxl_get_driver_name(struct dma_fence *fence)
48 static const char *qxl_get_timeline_name(struct dma_fence *fence)
53 static long qxl_fence_wait(struct dma_fence *fence, bool intr,
56 struct qxl_device *qdev;
57 struct qxl_release *release;
58 int count = 0, sc = 0;
59 bool have_drawable_releases;
60 unsigned long cur, end = jiffies + timeout;
62 qdev = container_of(fence->lock, struct qxl_device, release_lock);
63 release = container_of(fence, struct qxl_release, base);
64 have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
69 if (dma_fence_is_signaled(fence))
72 qxl_io_notify_oom(qdev);
74 for (count = 0; count < 11; count++) {
75 if (!qxl_queue_garbage_collect(qdev, true))
78 if (dma_fence_is_signaled(fence))
82 if (dma_fence_is_signaled(fence))
85 if (have_drawable_releases || sc < 4) {
88 usleep_range(500, 1000);
90 if (time_after(jiffies, end))
93 if (have_drawable_releases && sc > 300) {
94 DMA_FENCE_WARN(fence, "failed to wait on release %llu "
95 "after spincount %d\n",
96 fence->context & ~0xf0000000, sc);
102 * yeah, original sync_obj_wait gave up after 3 spins when
103 * have_drawable_releases is not set.
108 if (time_after(cur, end))
113 static const struct dma_fence_ops qxl_fence_ops = {
114 .get_driver_name = qxl_get_driver_name,
115 .get_timeline_name = qxl_get_timeline_name,
116 .wait = qxl_fence_wait,
120 qxl_release_alloc(struct qxl_device *qdev, int type,
121 struct qxl_release **ret)
123 struct qxl_release *release;
125 size_t size = sizeof(*release);
127 release = kmalloc(size, GFP_KERNEL);
129 DRM_ERROR("Out of memory\n");
132 release->base.ops = NULL;
133 release->type = type;
134 release->release_offset = 0;
135 release->surface_release_id = 0;
136 INIT_LIST_HEAD(&release->bos);
138 idr_preload(GFP_KERNEL);
139 spin_lock(&qdev->release_idr_lock);
140 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
141 release->base.seqno = ++qdev->release_seqno;
142 spin_unlock(&qdev->release_idr_lock);
150 DRM_DEBUG_DRIVER("allocated release %d\n", handle);
151 release->id = handle;
156 qxl_release_free_list(struct qxl_release *release)
158 while (!list_empty(&release->bos)) {
159 struct qxl_bo_list *entry;
162 entry = container_of(release->bos.next,
163 struct qxl_bo_list, tv.head);
164 bo = to_qxl_bo(entry->tv.bo);
166 list_del(&entry->tv.head);
169 release->release_bo = NULL;
173 qxl_release_free(struct qxl_device *qdev,
174 struct qxl_release *release)
176 DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
178 if (release->surface_release_id)
179 qxl_surface_id_dealloc(qdev, release->surface_release_id);
181 spin_lock(&qdev->release_idr_lock);
182 idr_remove(&qdev->release_idr, release->id);
183 spin_unlock(&qdev->release_idr_lock);
185 if (release->base.ops) {
186 WARN_ON(list_empty(&release->bos));
187 qxl_release_free_list(release);
189 dma_fence_signal(&release->base);
190 dma_fence_put(&release->base);
192 qxl_release_free_list(release);
197 static int qxl_release_bo_alloc(struct qxl_device *qdev,
200 /* pin releases bo's they are too messy to evict */
201 return qxl_bo_create(qdev, PAGE_SIZE, false, true,
202 QXL_GEM_DOMAIN_VRAM, NULL, bo);
205 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
207 struct qxl_bo_list *entry;
209 list_for_each_entry(entry, &release->bos, tv.head) {
210 if (entry->tv.bo == &bo->tbo)
214 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
219 entry->tv.bo = &bo->tbo;
220 entry->tv.num_shared = 0;
221 list_add_tail(&entry->tv.head, &release->bos);
225 static int qxl_release_validate_bo(struct qxl_bo *bo)
227 struct ttm_operation_ctx ctx = { true, false };
230 if (!bo->pin_count) {
231 qxl_ttm_placement_from_domain(bo, bo->type, false);
232 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
237 ret = reservation_object_reserve_shared(bo->tbo.resv, 1);
241 /* allocate a surface for reserved + validated buffers */
242 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
248 int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
251 struct qxl_bo_list *entry;
253 /* if only one object on the release its the release itself
254 since these objects are pinned no need to reserve */
255 if (list_is_singular(&release->bos))
258 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
263 list_for_each_entry(entry, &release->bos, tv.head) {
264 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
266 ret = qxl_release_validate_bo(bo);
268 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
275 void qxl_release_backoff_reserve_list(struct qxl_release *release)
277 /* if only one object on the release its the release itself
278 since these objects are pinned no need to reserve */
279 if (list_is_singular(&release->bos))
282 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
285 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
286 enum qxl_surface_cmd_type surface_cmd_type,
287 struct qxl_release *create_rel,
288 struct qxl_release **release)
290 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
293 union qxl_release_info *info;
295 /* stash the release after the create command */
296 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
299 bo = create_rel->release_bo;
301 (*release)->release_bo = bo;
302 (*release)->release_offset = create_rel->release_offset + 64;
304 qxl_release_list_add(*release, bo);
306 info = qxl_release_map(qdev, *release);
308 qxl_release_unmap(qdev, *release, info);
312 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
313 QXL_RELEASE_SURFACE_CMD, release, NULL);
316 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
317 int type, struct qxl_release **release,
323 union qxl_release_info *info;
326 if (type == QXL_RELEASE_DRAWABLE)
328 else if (type == QXL_RELEASE_SURFACE_CMD)
330 else if (type == QXL_RELEASE_CURSOR_CMD)
333 DRM_ERROR("got illegal type: %d\n", type);
337 idr_ret = qxl_release_alloc(qdev, type, release);
344 mutex_lock(&qdev->release_mutex);
345 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
346 qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
347 qdev->current_release_bo_offset[cur_idx] = 0;
348 qdev->current_release_bo[cur_idx] = NULL;
350 if (!qdev->current_release_bo[cur_idx]) {
351 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
353 mutex_unlock(&qdev->release_mutex);
354 qxl_release_free(qdev, *release);
359 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
361 (*release)->release_bo = bo;
362 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
363 qdev->current_release_bo_offset[cur_idx]++;
368 mutex_unlock(&qdev->release_mutex);
370 ret = qxl_release_list_add(*release, bo);
373 qxl_release_free(qdev, *release);
377 info = qxl_release_map(qdev, *release);
379 qxl_release_unmap(qdev, *release, info);
384 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
387 struct qxl_release *release;
389 spin_lock(&qdev->release_idr_lock);
390 release = idr_find(&qdev->release_idr, id);
391 spin_unlock(&qdev->release_idr_lock);
393 DRM_ERROR("failed to find id in release_idr\n");
400 union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
401 struct qxl_release *release)
404 union qxl_release_info *info;
405 struct qxl_bo *bo = release->release_bo;
407 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
410 info = ptr + (release->release_offset & ~PAGE_MASK);
414 void qxl_release_unmap(struct qxl_device *qdev,
415 struct qxl_release *release,
416 union qxl_release_info *info)
418 struct qxl_bo *bo = release->release_bo;
421 ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
422 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
425 void qxl_release_fence_buffer_objects(struct qxl_release *release)
427 struct ttm_buffer_object *bo;
428 struct ttm_bo_global *glob;
429 struct ttm_bo_device *bdev;
430 struct ttm_validate_buffer *entry;
431 struct qxl_device *qdev;
433 /* if only one object on the release its the release itself
434 since these objects are pinned no need to reserve */
435 if (list_is_singular(&release->bos) || list_empty(&release->bos))
438 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
440 qdev = container_of(bdev, struct qxl_device, mman.bdev);
443 * Since we never really allocated a context and we don't want to conflict,
444 * set the highest bits. This will break if we really allow exporting of dma-bufs.
446 dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
447 release->id | 0xf0000000, release->base.seqno);
448 trace_dma_fence_emit(&release->base);
452 spin_lock(&glob->lru_lock);
454 list_for_each_entry(entry, &release->bos, head) {
457 reservation_object_add_shared_fence(bo->resv, &release->base);
458 ttm_bo_add_to_lru(bo);
459 reservation_object_unlock(bo->resv);
461 spin_unlock(&glob->lru_lock);
462 ww_acquire_fini(&release->ticket);