]> Git Repo - linux.git/blob - drivers/gpu/drm/qxl/qxl_cmd.c
Merge branch 'work.sock_recvmsg' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / gpu / drm / qxl / qxl_cmd.c
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25
26 /* QXL cmd/ring handling */
27
28 #include "qxl_drv.h"
29 #include "qxl_object.h"
30
31 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
32
33 struct ring {
34         struct qxl_ring_header      header;
35         uint8_t                     elements[0];
36 };
37
38 struct qxl_ring {
39         struct ring            *ring;
40         int                     element_size;
41         int                     n_elements;
42         int                     prod_notify;
43         wait_queue_head_t      *push_event;
44         spinlock_t             lock;
45 };
46
47 void qxl_ring_free(struct qxl_ring *ring)
48 {
49         kfree(ring);
50 }
51
52 void qxl_ring_init_hdr(struct qxl_ring *ring)
53 {
54         ring->ring->header.notify_on_prod = ring->n_elements;
55 }
56
57 struct qxl_ring *
58 qxl_ring_create(struct qxl_ring_header *header,
59                 int element_size,
60                 int n_elements,
61                 int prod_notify,
62                 bool set_prod_notify,
63                 wait_queue_head_t *push_event)
64 {
65         struct qxl_ring *ring;
66
67         ring = kmalloc(sizeof(*ring), GFP_KERNEL);
68         if (!ring)
69                 return NULL;
70
71         ring->ring = (struct ring *)header;
72         ring->element_size = element_size;
73         ring->n_elements = n_elements;
74         ring->prod_notify = prod_notify;
75         ring->push_event = push_event;
76         if (set_prod_notify)
77                 qxl_ring_init_hdr(ring);
78         spin_lock_init(&ring->lock);
79         return ring;
80 }
81
82 static int qxl_check_header(struct qxl_ring *ring)
83 {
84         int ret;
85         struct qxl_ring_header *header = &(ring->ring->header);
86         unsigned long flags;
87         spin_lock_irqsave(&ring->lock, flags);
88         ret = header->prod - header->cons < header->num_items;
89         if (ret == 0)
90                 header->notify_on_cons = header->cons + 1;
91         spin_unlock_irqrestore(&ring->lock, flags);
92         return ret;
93 }
94
95 int qxl_check_idle(struct qxl_ring *ring)
96 {
97         int ret;
98         struct qxl_ring_header *header = &(ring->ring->header);
99         unsigned long flags;
100         spin_lock_irqsave(&ring->lock, flags);
101         ret = header->prod == header->cons;
102         spin_unlock_irqrestore(&ring->lock, flags);
103         return ret;
104 }
105
106 int qxl_ring_push(struct qxl_ring *ring,
107                   const void *new_elt, bool interruptible)
108 {
109         struct qxl_ring_header *header = &(ring->ring->header);
110         uint8_t *elt;
111         int idx, ret;
112         unsigned long flags;
113         spin_lock_irqsave(&ring->lock, flags);
114         if (header->prod - header->cons == header->num_items) {
115                 header->notify_on_cons = header->cons + 1;
116                 mb();
117                 spin_unlock_irqrestore(&ring->lock, flags);
118                 if (!drm_can_sleep()) {
119                         while (!qxl_check_header(ring))
120                                 udelay(1);
121                 } else {
122                         if (interruptible) {
123                                 ret = wait_event_interruptible(*ring->push_event,
124                                                                qxl_check_header(ring));
125                                 if (ret)
126                                         return ret;
127                         } else {
128                                 wait_event(*ring->push_event,
129                                            qxl_check_header(ring));
130                         }
131
132                 }
133                 spin_lock_irqsave(&ring->lock, flags);
134         }
135
136         idx = header->prod & (ring->n_elements - 1);
137         elt = ring->ring->elements + idx * ring->element_size;
138
139         memcpy((void *)elt, new_elt, ring->element_size);
140
141         header->prod++;
142
143         mb();
144
145         if (header->prod == header->notify_on_prod)
146                 outb(0, ring->prod_notify);
147
148         spin_unlock_irqrestore(&ring->lock, flags);
149         return 0;
150 }
151
152 static bool qxl_ring_pop(struct qxl_ring *ring,
153                          void *element)
154 {
155         volatile struct qxl_ring_header *header = &(ring->ring->header);
156         volatile uint8_t *ring_elt;
157         int idx;
158         unsigned long flags;
159         spin_lock_irqsave(&ring->lock, flags);
160         if (header->cons == header->prod) {
161                 header->notify_on_prod = header->cons + 1;
162                 spin_unlock_irqrestore(&ring->lock, flags);
163                 return false;
164         }
165
166         idx = header->cons & (ring->n_elements - 1);
167         ring_elt = ring->ring->elements + idx * ring->element_size;
168
169         memcpy(element, (void *)ring_elt, ring->element_size);
170
171         header->cons++;
172
173         spin_unlock_irqrestore(&ring->lock, flags);
174         return true;
175 }
176
177 int
178 qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
179                               uint32_t type, bool interruptible)
180 {
181         struct qxl_command cmd;
182         struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
183
184         cmd.type = type;
185         cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
186
187         return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
188 }
189
190 int
191 qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
192                              uint32_t type, bool interruptible)
193 {
194         struct qxl_command cmd;
195         struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
196
197         cmd.type = type;
198         cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
199
200         return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
201 }
202
203 bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
204 {
205         if (!qxl_check_idle(qdev->release_ring)) {
206                 schedule_work(&qdev->gc_work);
207                 if (flush)
208                         flush_work(&qdev->gc_work);
209                 return true;
210         }
211         return false;
212 }
213
214 int qxl_garbage_collect(struct qxl_device *qdev)
215 {
216         struct qxl_release *release;
217         uint64_t id, next_id;
218         int i = 0;
219         union qxl_release_info *info;
220
221         while (qxl_ring_pop(qdev->release_ring, &id)) {
222                 DRM_DEBUG_DRIVER("popped %lld\n", id);
223                 while (id) {
224                         release = qxl_release_from_id_locked(qdev, id);
225                         if (release == NULL)
226                                 break;
227
228                         info = qxl_release_map(qdev, release);
229                         next_id = info->next;
230                         qxl_release_unmap(qdev, release, info);
231
232                         DRM_DEBUG_DRIVER("popped %lld, next %lld\n", id,
233                                          next_id);
234
235                         switch (release->type) {
236                         case QXL_RELEASE_DRAWABLE:
237                         case QXL_RELEASE_SURFACE_CMD:
238                         case QXL_RELEASE_CURSOR_CMD:
239                                 break;
240                         default:
241                                 DRM_ERROR("unexpected release type\n");
242                                 break;
243                         }
244                         id = next_id;
245
246                         qxl_release_free(qdev, release);
247                         ++i;
248                 }
249         }
250
251         DRM_DEBUG_DRIVER("%d\n", i);
252
253         return i;
254 }
255
256 int qxl_alloc_bo_reserved(struct qxl_device *qdev,
257                           struct qxl_release *release,
258                           unsigned long size,
259                           struct qxl_bo **_bo)
260 {
261         struct qxl_bo *bo;
262         int ret;
263
264         ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
265                             false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
266         if (ret) {
267                 DRM_ERROR("failed to allocate VRAM BO\n");
268                 return ret;
269         }
270         ret = qxl_release_list_add(release, bo);
271         if (ret)
272                 goto out_unref;
273
274         *_bo = bo;
275         return 0;
276 out_unref:
277         qxl_bo_unref(&bo);
278         return ret;
279 }
280
281 static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
282 {
283         int irq_num;
284         long addr = qdev->io_base + port;
285         int ret;
286
287         mutex_lock(&qdev->async_io_mutex);
288         irq_num = atomic_read(&qdev->irq_received_io_cmd);
289         if (qdev->last_sent_io_cmd > irq_num) {
290                 if (intr)
291                         ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
292                                                                atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
293                 else
294                         ret = wait_event_timeout(qdev->io_cmd_event,
295                                                  atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
296                 /* 0 is timeout, just bail the "hw" has gone away */
297                 if (ret <= 0)
298                         goto out;
299                 irq_num = atomic_read(&qdev->irq_received_io_cmd);
300         }
301         outb(val, addr);
302         qdev->last_sent_io_cmd = irq_num + 1;
303         if (intr)
304                 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
305                                                        atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
306         else
307                 ret = wait_event_timeout(qdev->io_cmd_event,
308                                          atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
309 out:
310         if (ret > 0)
311                 ret = 0;
312         mutex_unlock(&qdev->async_io_mutex);
313         return ret;
314 }
315
316 static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
317 {
318         int ret;
319
320 restart:
321         ret = wait_for_io_cmd_user(qdev, val, port, false);
322         if (ret == -ERESTARTSYS)
323                 goto restart;
324 }
325
326 int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
327                         const struct qxl_rect *area)
328 {
329         int surface_id;
330         uint32_t surface_width, surface_height;
331         int ret;
332
333         if (!surf->hw_surf_alloc)
334                 DRM_ERROR("got io update area with no hw surface\n");
335
336         if (surf->is_primary)
337                 surface_id = 0;
338         else
339                 surface_id = surf->surface_id;
340         surface_width = surf->surf.width;
341         surface_height = surf->surf.height;
342
343         if (area->left < 0 || area->top < 0 ||
344             area->right > surface_width || area->bottom > surface_height) {
345                 qxl_io_log(qdev, "%s: not doing area update for "
346                            "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left,
347                            area->top, area->right, area->bottom, surface_width, surface_height);
348                 return -EINVAL;
349         }
350         mutex_lock(&qdev->update_area_mutex);
351         qdev->ram_header->update_area = *area;
352         qdev->ram_header->update_surface = surface_id;
353         ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
354         mutex_unlock(&qdev->update_area_mutex);
355         return ret;
356 }
357
358 void qxl_io_notify_oom(struct qxl_device *qdev)
359 {
360         outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
361 }
362
363 void qxl_io_flush_release(struct qxl_device *qdev)
364 {
365         outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
366 }
367
368 void qxl_io_flush_surfaces(struct qxl_device *qdev)
369 {
370         wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
371 }
372
373
374 void qxl_io_destroy_primary(struct qxl_device *qdev)
375 {
376         wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
377 }
378
379 void qxl_io_create_primary(struct qxl_device *qdev,
380                            unsigned offset, struct qxl_bo *bo)
381 {
382         struct qxl_surface_create *create;
383
384         DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header);
385         create = &qdev->ram_header->create_surface;
386         create->format = bo->surf.format;
387         create->width = bo->surf.width;
388         create->height = bo->surf.height;
389         create->stride = bo->surf.stride;
390         if (bo->shadow) {
391                 create->mem = qxl_bo_physical_address(qdev, bo->shadow, offset);
392         } else {
393                 create->mem = qxl_bo_physical_address(qdev, bo, offset);
394         }
395
396         DRM_DEBUG_DRIVER("mem = %llx, from %p\n", create->mem, bo->kptr);
397
398         create->flags = QXL_SURF_FLAG_KEEP_DATA;
399         create->type = QXL_SURF_TYPE_PRIMARY;
400
401         wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
402 }
403
404 void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
405 {
406         DRM_DEBUG_DRIVER("qxl_memslot_add %d\n", id);
407         wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
408 }
409
410 void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...)
411 {
412         va_list args;
413
414         va_start(args, fmt);
415         vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args);
416         va_end(args);
417         /*
418          * DO not do a DRM output here - this will call printk, which will
419          * call back into qxl for rendering (qxl_fb)
420          */
421         outb(0, qdev->io_base + QXL_IO_LOG);
422 }
423
424 void qxl_io_reset(struct qxl_device *qdev)
425 {
426         outb(0, qdev->io_base + QXL_IO_RESET);
427 }
428
429 void qxl_io_monitors_config(struct qxl_device *qdev)
430 {
431         qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__,
432                    qdev->monitors_config ?
433                    qdev->monitors_config->count : -1,
434                    qdev->monitors_config && qdev->monitors_config->count ?
435                    qdev->monitors_config->heads[0].width : -1,
436                    qdev->monitors_config && qdev->monitors_config->count ?
437                    qdev->monitors_config->heads[0].height : -1,
438                    qdev->monitors_config && qdev->monitors_config->count ?
439                    qdev->monitors_config->heads[0].x : -1,
440                    qdev->monitors_config && qdev->monitors_config->count ?
441                    qdev->monitors_config->heads[0].y : -1
442                    );
443
444         wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
445 }
446
447 int qxl_surface_id_alloc(struct qxl_device *qdev,
448                       struct qxl_bo *surf)
449 {
450         uint32_t handle;
451         int idr_ret;
452         int count = 0;
453 again:
454         idr_preload(GFP_ATOMIC);
455         spin_lock(&qdev->surf_id_idr_lock);
456         idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
457         spin_unlock(&qdev->surf_id_idr_lock);
458         idr_preload_end();
459         if (idr_ret < 0)
460                 return idr_ret;
461         handle = idr_ret;
462
463         if (handle >= qdev->rom->n_surfaces) {
464                 count++;
465                 spin_lock(&qdev->surf_id_idr_lock);
466                 idr_remove(&qdev->surf_id_idr, handle);
467                 spin_unlock(&qdev->surf_id_idr_lock);
468                 qxl_reap_surface_id(qdev, 2);
469                 goto again;
470         }
471         surf->surface_id = handle;
472
473         spin_lock(&qdev->surf_id_idr_lock);
474         qdev->last_alloced_surf_id = handle;
475         spin_unlock(&qdev->surf_id_idr_lock);
476         return 0;
477 }
478
479 void qxl_surface_id_dealloc(struct qxl_device *qdev,
480                             uint32_t surface_id)
481 {
482         spin_lock(&qdev->surf_id_idr_lock);
483         idr_remove(&qdev->surf_id_idr, surface_id);
484         spin_unlock(&qdev->surf_id_idr_lock);
485 }
486
487 int qxl_hw_surface_alloc(struct qxl_device *qdev,
488                          struct qxl_bo *surf,
489                          struct ttm_mem_reg *new_mem)
490 {
491         struct qxl_surface_cmd *cmd;
492         struct qxl_release *release;
493         int ret;
494
495         if (surf->hw_surf_alloc)
496                 return 0;
497
498         ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
499                                                  NULL,
500                                                  &release);
501         if (ret)
502                 return ret;
503
504         ret = qxl_release_reserve_list(release, true);
505         if (ret)
506                 return ret;
507
508         cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
509         cmd->type = QXL_SURFACE_CMD_CREATE;
510         cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
511         cmd->u.surface_create.format = surf->surf.format;
512         cmd->u.surface_create.width = surf->surf.width;
513         cmd->u.surface_create.height = surf->surf.height;
514         cmd->u.surface_create.stride = surf->surf.stride;
515         if (new_mem) {
516                 int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
517                 struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
518
519                 /* TODO - need to hold one of the locks to read tbo.offset */
520                 cmd->u.surface_create.data = slot->high_bits;
521
522                 cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset;
523         } else
524                 cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
525         cmd->surface_id = surf->surface_id;
526         qxl_release_unmap(qdev, release, &cmd->release_info);
527
528         surf->surf_create = release;
529
530         /* no need to add a release to the fence for this surface bo,
531            since it is only released when we ask to destroy the surface
532            and it would never signal otherwise */
533         qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
534         qxl_release_fence_buffer_objects(release);
535
536         surf->hw_surf_alloc = true;
537         spin_lock(&qdev->surf_id_idr_lock);
538         idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
539         spin_unlock(&qdev->surf_id_idr_lock);
540         return 0;
541 }
542
543 int qxl_hw_surface_dealloc(struct qxl_device *qdev,
544                            struct qxl_bo *surf)
545 {
546         struct qxl_surface_cmd *cmd;
547         struct qxl_release *release;
548         int ret;
549         int id;
550
551         if (!surf->hw_surf_alloc)
552                 return 0;
553
554         ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
555                                                  surf->surf_create,
556                                                  &release);
557         if (ret)
558                 return ret;
559
560         surf->surf_create = NULL;
561         /* remove the surface from the idr, but not the surface id yet */
562         spin_lock(&qdev->surf_id_idr_lock);
563         idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
564         spin_unlock(&qdev->surf_id_idr_lock);
565         surf->hw_surf_alloc = false;
566
567         id = surf->surface_id;
568         surf->surface_id = 0;
569
570         release->surface_release_id = id;
571         cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
572         cmd->type = QXL_SURFACE_CMD_DESTROY;
573         cmd->surface_id = id;
574         qxl_release_unmap(qdev, release, &cmd->release_info);
575
576         qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
577
578         qxl_release_fence_buffer_objects(release);
579
580         return 0;
581 }
582
583 static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
584 {
585         struct qxl_rect rect;
586         int ret;
587
588         /* if we are evicting, we need to make sure the surface is up
589            to date */
590         rect.left = 0;
591         rect.right = surf->surf.width;
592         rect.top = 0;
593         rect.bottom = surf->surf.height;
594 retry:
595         ret = qxl_io_update_area(qdev, surf, &rect);
596         if (ret == -ERESTARTSYS)
597                 goto retry;
598         return ret;
599 }
600
601 static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
602 {
603         /* no need to update area if we are just freeing the surface normally */
604         if (do_update_area)
605                 qxl_update_surface(qdev, surf);
606
607         /* nuke the surface id at the hw */
608         qxl_hw_surface_dealloc(qdev, surf);
609 }
610
611 void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
612 {
613         mutex_lock(&qdev->surf_evict_mutex);
614         qxl_surface_evict_locked(qdev, surf, do_update_area);
615         mutex_unlock(&qdev->surf_evict_mutex);
616 }
617
618 static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
619 {
620         int ret;
621
622         ret = qxl_bo_reserve(surf, false);
623         if (ret)
624                 return ret;
625
626         if (stall)
627                 mutex_unlock(&qdev->surf_evict_mutex);
628
629         ret = ttm_bo_wait(&surf->tbo, true, !stall);
630
631         if (stall)
632                 mutex_lock(&qdev->surf_evict_mutex);
633         if (ret) {
634                 qxl_bo_unreserve(surf);
635                 return ret;
636         }
637
638         qxl_surface_evict_locked(qdev, surf, true);
639         qxl_bo_unreserve(surf);
640         return 0;
641 }
642
643 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
644 {
645         int num_reaped = 0;
646         int i, ret;
647         bool stall = false;
648         int start = 0;
649
650         mutex_lock(&qdev->surf_evict_mutex);
651 again:
652
653         spin_lock(&qdev->surf_id_idr_lock);
654         start = qdev->last_alloced_surf_id + 1;
655         spin_unlock(&qdev->surf_id_idr_lock);
656
657         for (i = start; i < start + qdev->rom->n_surfaces; i++) {
658                 void *objptr;
659                 int surfid = i % qdev->rom->n_surfaces;
660
661                 /* this avoids the case where the objects is in the
662                    idr but has been evicted half way - its makes
663                    the idr lookup atomic with the eviction */
664                 spin_lock(&qdev->surf_id_idr_lock);
665                 objptr = idr_find(&qdev->surf_id_idr, surfid);
666                 spin_unlock(&qdev->surf_id_idr_lock);
667
668                 if (!objptr)
669                         continue;
670
671                 ret = qxl_reap_surf(qdev, objptr, stall);
672                 if (ret == 0)
673                         num_reaped++;
674                 if (num_reaped >= max_to_reap)
675                         break;
676         }
677         if (num_reaped == 0 && stall == false) {
678                 stall = true;
679                 goto again;
680         }
681
682         mutex_unlock(&qdev->surf_evict_mutex);
683         if (num_reaped) {
684                 usleep_range(500, 1000);
685                 qxl_queue_garbage_collect(qdev, true);
686         }
687
688         return 0;
689 }
This page took 0.073134 seconds and 4 git commands to generate.