1 /* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
3 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 * Partially based on code obtained from Digeo Inc.
31 * Unmaps the DMA mappings.
32 * FIXME: Is this a NoOp on x86? Also
33 * FIXME: What happens if this one is called and a pending blit has previously done
34 * the same DMA mappings?
37 #include <linux/pagemap.h>
38 #include <linux/pci.h>
39 #include <linux/slab.h>
40 #include <linux/vmalloc.h>
42 #include <drm/drm_device.h>
43 #include <drm/via_drm.h>
45 #include "via_dmablit.h"
48 #define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
49 #define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
50 #define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
52 typedef struct _drm_via_descriptor {
57 } drm_via_descriptor_t;
61 * Unmap a DMA mapping.
67 via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
69 int num_desc = vsg->num_desc;
70 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
71 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
72 drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
74 dma_addr_t next = vsg->chain_start;
77 if (descriptor_this_page-- == 0) {
78 cur_descriptor_page--;
79 descriptor_this_page = vsg->descriptors_per_page - 1;
80 desc_ptr = vsg->desc_pages[cur_descriptor_page] +
83 dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
84 dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
85 next = (dma_addr_t) desc_ptr->next;
91 * If mode = 0, count how many descriptors are needed.
92 * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
93 * Descriptors are run in reverse order by the hardware because we are not allowed to update the
94 * 'next' field without syncing calls when the descriptor is already mapped.
98 via_map_blit_for_device(struct pci_dev *pdev,
99 const drm_via_dmablit_t *xfer,
100 drm_via_sg_info_t *vsg,
103 unsigned cur_descriptor_page = 0;
104 unsigned num_descriptors_this_page = 0;
105 unsigned char *mem_addr = xfer->mem_addr;
106 unsigned char *cur_mem;
107 unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
108 uint32_t fb_addr = xfer->fb_addr;
110 unsigned long line_len;
111 unsigned remaining_len;
114 dma_addr_t next = 0 | VIA_DMA_DPR_EC;
115 drm_via_descriptor_t *desc_ptr = NULL;
118 desc_ptr = vsg->desc_pages[cur_descriptor_page];
120 for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
122 line_len = xfer->line_length;
126 while (line_len > 0) {
128 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
129 line_len -= remaining_len;
133 dma_map_page(&pdev->dev,
134 vsg->pages[VIA_PFN(cur_mem) -
135 VIA_PFN(first_addr)],
136 VIA_PGOFF(cur_mem), remaining_len,
138 desc_ptr->dev_addr = cur_fb;
140 desc_ptr->size = remaining_len;
141 desc_ptr->next = (uint32_t) next;
142 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
145 if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
146 num_descriptors_this_page = 0;
147 desc_ptr = vsg->desc_pages[++cur_descriptor_page];
152 cur_mem += remaining_len;
153 cur_fb += remaining_len;
156 mem_addr += xfer->mem_stride;
157 fb_addr += xfer->fb_stride;
161 vsg->chain_start = next;
162 vsg->state = dr_via_device_mapped;
164 vsg->num_desc = num_desc;
168 * Function that frees up all resources for a blit. It is usable even if the
169 * blit info has only been partially built as long as the status enum is consistent
170 * with the actual status of the used resources.
175 via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
179 switch (vsg->state) {
180 case dr_via_device_mapped:
181 via_unmap_blit_from_device(pdev, vsg);
183 case dr_via_desc_pages_alloc:
184 for (i = 0; i < vsg->num_desc_pages; ++i) {
185 if (vsg->desc_pages[i] != NULL)
186 free_page((unsigned long)vsg->desc_pages[i]);
188 kfree(vsg->desc_pages);
190 case dr_via_pages_locked:
191 unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
192 (vsg->direction == DMA_FROM_DEVICE));
194 case dr_via_pages_alloc:
198 vsg->state = dr_via_sg_init;
200 vfree(vsg->bounce_buffer);
201 vsg->bounce_buffer = NULL;
202 vsg->free_on_sequence = 0;
206 * Fire a blit engine.
210 via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
212 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
214 via_write(dev_priv, VIA_PCI_DMA_MAR0 + engine*0x10, 0);
215 via_write(dev_priv, VIA_PCI_DMA_DAR0 + engine*0x10, 0);
216 via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
218 via_write(dev_priv, VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
219 via_write(dev_priv, VIA_PCI_DMA_BCR0 + engine*0x10, 0);
220 via_write(dev_priv, VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
222 via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
223 via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04);
227 * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
228 * occur here if the calling user does not have access to the submitted address.
232 via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
235 unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
236 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
239 vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
240 if (NULL == vsg->pages)
242 ret = pin_user_pages_fast((unsigned long)xfer->mem_addr,
244 vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
246 if (ret != vsg->num_pages) {
249 vsg->state = dr_via_pages_locked;
252 vsg->state = dr_via_pages_locked;
253 DRM_DEBUG("DMA pages locked\n");
258 * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
259 * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
260 * quite large for some blits, and pages don't need to be contiguous.
264 via_alloc_desc_pages(drm_via_sg_info_t *vsg)
268 vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
269 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
270 vsg->descriptors_per_page;
272 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
275 vsg->state = dr_via_desc_pages_alloc;
276 for (i = 0; i < vsg->num_desc_pages; ++i) {
277 if (NULL == (vsg->desc_pages[i] =
278 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
281 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
287 via_abort_dmablit(struct drm_device *dev, int engine)
289 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
291 via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
295 via_dmablit_engine_off(struct drm_device *dev, int engine)
297 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
299 via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
305 * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
306 * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
307 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
308 * the workqueue task takes care of processing associated with the old blit.
312 via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
314 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
315 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
318 unsigned long irqsave = 0;
321 DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
322 engine, from_irq, (unsigned long) blitq);
325 spin_lock(&blitq->blit_lock);
327 spin_lock_irqsave(&blitq->blit_lock, irqsave);
329 done_transfer = blitq->is_active &&
330 ((status = via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
331 done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
336 blitq->blits[cur]->aborted = blitq->aborting;
337 blitq->done_blit_handle++;
338 wake_up(blitq->blit_queue + cur);
341 if (cur >= VIA_NUM_BLIT_SLOTS)
346 * Clear transfer done flag.
349 via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
351 blitq->is_active = 0;
353 schedule_work(&blitq->wq);
355 } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
358 * Abort transfer after one second.
361 via_abort_dmablit(dev, engine);
363 blitq->end = jiffies + HZ;
366 if (!blitq->is_active) {
367 if (blitq->num_outstanding) {
368 via_fire_dmablit(dev, blitq->blits[cur], engine);
369 blitq->is_active = 1;
371 blitq->num_outstanding--;
372 blitq->end = jiffies + HZ;
373 if (!timer_pending(&blitq->poll_timer))
374 mod_timer(&blitq->poll_timer, jiffies + 1);
376 if (timer_pending(&blitq->poll_timer))
377 del_timer(&blitq->poll_timer);
378 via_dmablit_engine_off(dev, engine);
383 spin_unlock(&blitq->blit_lock);
385 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
391 * Check whether this blit is still active, performing necessary locking.
395 via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
397 unsigned long irqsave;
401 spin_lock_irqsave(&blitq->blit_lock, irqsave);
404 * Allow for handle wraparounds.
407 active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
408 ((blitq->cur_blit_handle - handle) <= (1 << 23));
410 if (queue && active) {
411 slot = handle - blitq->done_blit_handle + blitq->cur - 1;
412 if (slot >= VIA_NUM_BLIT_SLOTS)
413 slot -= VIA_NUM_BLIT_SLOTS;
414 *queue = blitq->blit_queue + slot;
417 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
423 * Sync. Wait for at least three seconds for the blit to be performed.
427 via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
430 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
431 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
432 wait_queue_head_t *queue;
435 if (via_dmablit_active(blitq, engine, handle, &queue)) {
436 VIA_WAIT_ON(ret, *queue, 3 * HZ,
437 !via_dmablit_active(blitq, engine, handle, NULL));
439 DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
440 handle, engine, ret);
447 * A timer that regularly polls the blit engine in cases where we don't have interrupts:
448 * a) Broken hardware (typically those that don't have any video capture facility).
449 * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
450 * The timer and hardware IRQ's can and do work in parallel. If the hardware has
451 * irqs, it will shorten the latency somewhat.
457 via_dmablit_timer(struct timer_list *t)
459 drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
460 struct drm_device *dev = blitq->dev;
462 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
464 DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
465 (unsigned long) jiffies);
467 via_dmablit_handler(dev, engine, 0);
469 if (!timer_pending(&blitq->poll_timer)) {
470 mod_timer(&blitq->poll_timer, jiffies + 1);
473 * Rerun handler to delete timer if engines are off, and
474 * to shorten abort latency. This is a little nasty.
477 via_dmablit_handler(dev, engine, 0);
486 * Workqueue task that frees data and mappings associated with a blit.
487 * Also wakes up waiting processes. Each of these tasks handles one
488 * blit engine only and may not be called on each interrupt.
493 via_dmablit_workqueue(struct work_struct *work)
495 drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
496 struct drm_device *dev = blitq->dev;
497 struct pci_dev *pdev = to_pci_dev(dev->dev);
498 unsigned long irqsave;
499 drm_via_sg_info_t *cur_sg;
503 DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
504 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
506 spin_lock_irqsave(&blitq->blit_lock, irqsave);
508 while (blitq->serviced != blitq->cur) {
510 cur_released = blitq->serviced++;
512 DRM_DEBUG("Releasing blit slot %d\n", cur_released);
514 if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
517 cur_sg = blitq->blits[cur_released];
520 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
522 wake_up(&blitq->busy_queue);
524 via_free_sg_info(pdev, cur_sg);
527 spin_lock_irqsave(&blitq->blit_lock, irqsave);
530 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
535 * Init all blit engines. Currently we use two, but some hardware have 4.
540 via_init_dmablit(struct drm_device *dev)
543 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
544 struct pci_dev *pdev = to_pci_dev(dev->dev);
545 drm_via_blitq_t *blitq;
547 pci_set_master(pdev);
549 for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
550 blitq = dev_priv->blit_queues + i;
552 blitq->cur_blit_handle = 0;
553 blitq->done_blit_handle = 0;
557 blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
558 blitq->num_outstanding = 0;
559 blitq->is_active = 0;
561 spin_lock_init(&blitq->blit_lock);
562 for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
563 init_waitqueue_head(blitq->blit_queue + j);
564 init_waitqueue_head(&blitq->busy_queue);
565 INIT_WORK(&blitq->wq, via_dmablit_workqueue);
566 timer_setup(&blitq->poll_timer, via_dmablit_timer, 0);
571 * Build all info and do all mappings required for a blit.
576 via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
578 struct pci_dev *pdev = to_pci_dev(dev->dev);
579 int draw = xfer->to_fb;
582 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
583 vsg->bounce_buffer = NULL;
585 vsg->state = dr_via_sg_init;
587 if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
588 DRM_ERROR("Zero size bitblt.\n");
593 * Below check is a driver limitation, not a hardware one. We
594 * don't want to lock unused pages, and don't want to incoporate the
595 * extra logic of avoiding them. Make sure there are no.
596 * (Not a big limitation anyway.)
599 if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
600 DRM_ERROR("Too large system memory stride. Stride: %d, "
601 "Length: %d\n", xfer->mem_stride, xfer->line_length);
605 if ((xfer->mem_stride == xfer->line_length) &&
606 (xfer->fb_stride == xfer->line_length)) {
607 xfer->mem_stride *= xfer->num_lines;
608 xfer->line_length = xfer->mem_stride;
609 xfer->fb_stride = xfer->mem_stride;
614 * Don't lock an arbitrary large number of pages, since that causes a
618 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
619 DRM_ERROR("Too large PCI DMA bitblt.\n");
624 * we allow a negative fb stride to allow flipping of images in
628 if (xfer->mem_stride < xfer->line_length ||
629 abs(xfer->fb_stride) < xfer->line_length) {
630 DRM_ERROR("Invalid frame-buffer / memory stride.\n");
635 * A hardware bug seems to be worked around if system memory addresses start on
636 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
637 * about this. Meanwhile, impose the following restrictions:
641 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
642 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
643 DRM_ERROR("Invalid DRM bitblt alignment.\n");
647 if ((((unsigned long)xfer->mem_addr & 15) ||
648 ((unsigned long)xfer->fb_addr & 3)) ||
649 ((xfer->num_lines > 1) &&
650 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
651 DRM_ERROR("Invalid DRM bitblt alignment.\n");
656 if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
657 DRM_ERROR("Could not lock DMA pages.\n");
658 via_free_sg_info(pdev, vsg);
662 via_map_blit_for_device(pdev, xfer, vsg, 0);
663 if (0 != (ret = via_alloc_desc_pages(vsg))) {
664 DRM_ERROR("Could not allocate DMA descriptor pages.\n");
665 via_free_sg_info(pdev, vsg);
668 via_map_blit_for_device(pdev, xfer, vsg, 1);
675 * Reserve one free slot in the blit queue. Will wait for one second for one
676 * to become available. Otherwise -EBUSY is returned.
680 via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
683 unsigned long irqsave;
685 DRM_DEBUG("Num free is %d\n", blitq->num_free);
686 spin_lock_irqsave(&blitq->blit_lock, irqsave);
687 while (blitq->num_free == 0) {
688 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
690 VIA_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
692 return (-EINTR == ret) ? -EAGAIN : ret;
694 spin_lock_irqsave(&blitq->blit_lock, irqsave);
698 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
704 * Hand back a free slot if we changed our mind.
708 via_dmablit_release_slot(drm_via_blitq_t *blitq)
710 unsigned long irqsave;
712 spin_lock_irqsave(&blitq->blit_lock, irqsave);
714 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
715 wake_up(&blitq->busy_queue);
719 * Grab a free slot. Build blit info and queue a blit.
724 via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
726 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
727 drm_via_sg_info_t *vsg;
728 drm_via_blitq_t *blitq;
731 unsigned long irqsave;
733 if (dev_priv == NULL) {
734 DRM_ERROR("Called without initialization.\n");
738 engine = (xfer->to_fb) ? 0 : 1;
739 blitq = dev_priv->blit_queues + engine;
740 if (0 != (ret = via_dmablit_grab_slot(blitq, engine)))
742 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
743 via_dmablit_release_slot(blitq);
746 if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
747 via_dmablit_release_slot(blitq);
751 spin_lock_irqsave(&blitq->blit_lock, irqsave);
753 blitq->blits[blitq->head++] = vsg;
754 if (blitq->head >= VIA_NUM_BLIT_SLOTS)
756 blitq->num_outstanding++;
757 xfer->sync.sync_handle = ++blitq->cur_blit_handle;
759 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
760 xfer->sync.engine = engine;
762 via_dmablit_handler(dev, engine, 0);
768 * Sync on a previously submitted blit. Note that the X server use signals extensively, and
769 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
770 * case it returns with -EAGAIN for the signal to be delivered.
771 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
775 via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv)
777 drm_via_blitsync_t *sync = data;
780 if (sync->engine >= VIA_NUM_BLIT_ENGINES)
783 err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
793 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
794 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
795 * be reissued. See the above IOCTL code.
799 via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
801 drm_via_dmablit_t *xfer = data;
804 err = via_dmablit(dev, xfer);