]> Git Repo - linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
Linux 6.14-rc3
[linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_blit.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2017 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28
29 #include "vmwgfx_drv.h"
30
31 #include "vmwgfx_bo.h"
32 #include <linux/highmem.h>
33
34 /*
35  * Template that implements find_first_diff() for a generic
36  * unsigned integer type. @size and return value are in bytes.
37  */
38 #define VMW_FIND_FIRST_DIFF(_type)                       \
39 static size_t vmw_find_first_diff_ ## _type              \
40         (const _type * dst, const _type * src, size_t size)\
41 {                                                        \
42         size_t i;                                        \
43                                                          \
44         for (i = 0; i < size; i += sizeof(_type)) {      \
45                 if (*dst++ != *src++)                    \
46                         break;                           \
47         }                                                \
48                                                          \
49         return i;                                        \
50 }
51
52
53 /*
54  * Template that implements find_last_diff() for a generic
55  * unsigned integer type. Pointers point to the item following the
56  * *end* of the area to be examined. @size and return value are in
57  * bytes.
58  */
59 #define VMW_FIND_LAST_DIFF(_type)                                       \
60 static ssize_t vmw_find_last_diff_ ## _type(                            \
61         const _type * dst, const _type * src, size_t size)              \
62 {                                                                       \
63         while (size) {                                                  \
64                 if (*--dst != *--src)                                   \
65                         break;                                          \
66                                                                         \
67                 size -= sizeof(_type);                                  \
68         }                                                               \
69         return size;                                                    \
70 }
71
72
73 /*
74  * Instantiate find diff functions for relevant unsigned integer sizes,
75  * assuming that wider integers are faster (including aligning) up to the
76  * architecture native width, which is assumed to be 32 bit unless
77  * CONFIG_64BIT is defined.
78  */
79 VMW_FIND_FIRST_DIFF(u8);
80 VMW_FIND_LAST_DIFF(u8);
81
82 VMW_FIND_FIRST_DIFF(u16);
83 VMW_FIND_LAST_DIFF(u16);
84
85 VMW_FIND_FIRST_DIFF(u32);
86 VMW_FIND_LAST_DIFF(u32);
87
88 #ifdef CONFIG_64BIT
89 VMW_FIND_FIRST_DIFF(u64);
90 VMW_FIND_LAST_DIFF(u64);
91 #endif
92
93
94 /* We use size aligned copies. This computes (addr - align(addr)) */
95 #define SPILL(_var, _type) ((unsigned long) _var & (sizeof(_type) - 1))
96
97
98 /*
99  * Template to compute find_first_diff() for a certain integer type
100  * including a head copy for alignment, and adjustment of parameters
101  * for tail find or increased resolution find using an unsigned integer find
102  * of smaller width. If finding is complete, and resolution is sufficient,
103  * the macro executes a return statement. Otherwise it falls through.
104  */
105 #define VMW_TRY_FIND_FIRST_DIFF(_type)                                  \
106 do {                                                                    \
107         unsigned int spill = SPILL(dst, _type);                         \
108         size_t diff_offs;                                               \
109                                                                         \
110         if (spill && spill == SPILL(src, _type) &&                      \
111             sizeof(_type) - spill <= size) {                            \
112                 spill = sizeof(_type) - spill;                          \
113                 diff_offs = vmw_find_first_diff_u8(dst, src, spill);    \
114                 if (diff_offs < spill)                                  \
115                         return round_down(offset + diff_offs, granularity); \
116                                                                         \
117                 dst += spill;                                           \
118                 src += spill;                                           \
119                 size -= spill;                                          \
120                 offset += spill;                                        \
121                 spill = 0;                                              \
122         }                                                               \
123         if (!spill && !SPILL(src, _type)) {                             \
124                 size_t to_copy = size &  ~(sizeof(_type) - 1);          \
125                                                                         \
126                 diff_offs = vmw_find_first_diff_ ## _type               \
127                         ((_type *) dst, (_type *) src, to_copy);        \
128                 if (diff_offs >= size || granularity == sizeof(_type))  \
129                         return (offset + diff_offs);                    \
130                                                                         \
131                 dst += diff_offs;                                       \
132                 src += diff_offs;                                       \
133                 size -= diff_offs;                                      \
134                 offset += diff_offs;                                    \
135         }                                                               \
136 } while (0)                                                             \
137
138
139 /**
140  * vmw_find_first_diff - find the first difference between dst and src
141  *
142  * @dst: The destination address
143  * @src: The source address
144  * @size: Number of bytes to compare
145  * @granularity: The granularity needed for the return value in bytes.
146  * return: The offset from find start where the first difference was
147  * encountered in bytes. If no difference was found, the function returns
148  * a value >= @size.
149  */
150 static size_t vmw_find_first_diff(const u8 *dst, const u8 *src, size_t size,
151                                   size_t granularity)
152 {
153         size_t offset = 0;
154
155         /*
156          * Try finding with large integers if alignment allows, or we can
157          * fix it. Fall through if we need better resolution or alignment
158          * was bad.
159          */
160 #ifdef CONFIG_64BIT
161         VMW_TRY_FIND_FIRST_DIFF(u64);
162 #endif
163         VMW_TRY_FIND_FIRST_DIFF(u32);
164         VMW_TRY_FIND_FIRST_DIFF(u16);
165
166         return round_down(offset + vmw_find_first_diff_u8(dst, src, size),
167                           granularity);
168 }
169
170
171 /*
172  * Template to compute find_last_diff() for a certain integer type
173  * including a tail copy for alignment, and adjustment of parameters
174  * for head find or increased resolution find using an unsigned integer find
175  * of smaller width. If finding is complete, and resolution is sufficient,
176  * the macro executes a return statement. Otherwise it falls through.
177  */
178 #define VMW_TRY_FIND_LAST_DIFF(_type)                                   \
179 do {                                                                    \
180         unsigned int spill = SPILL(dst, _type);                         \
181         ssize_t location;                                               \
182         ssize_t diff_offs;                                              \
183                                                                         \
184         if (spill && spill <= size && spill == SPILL(src, _type)) {     \
185                 diff_offs = vmw_find_last_diff_u8(dst, src, spill);     \
186                 if (diff_offs) {                                        \
187                         location = size - spill + diff_offs - 1;        \
188                         return round_down(location, granularity);       \
189                 }                                                       \
190                                                                         \
191                 dst -= spill;                                           \
192                 src -= spill;                                           \
193                 size -= spill;                                          \
194                 spill = 0;                                              \
195         }                                                               \
196         if (!spill && !SPILL(src, _type)) {                             \
197                 size_t to_copy = round_down(size, sizeof(_type));       \
198                                                                         \
199                 diff_offs = vmw_find_last_diff_ ## _type                \
200                         ((_type *) dst, (_type *) src, to_copy);        \
201                 location = size - to_copy + diff_offs - sizeof(_type);  \
202                 if (location < 0 || granularity == sizeof(_type))       \
203                         return location;                                \
204                                                                         \
205                 dst -= to_copy - diff_offs;                             \
206                 src -= to_copy - diff_offs;                             \
207                 size -= to_copy - diff_offs;                            \
208         }                                                               \
209 } while (0)
210
211
212 /**
213  * vmw_find_last_diff - find the last difference between dst and src
214  *
215  * @dst: The destination address
216  * @src: The source address
217  * @size: Number of bytes to compare
218  * @granularity: The granularity needed for the return value in bytes.
219  * return: The offset from find start where the last difference was
220  * encountered in bytes, or a negative value if no difference was found.
221  */
222 static ssize_t vmw_find_last_diff(const u8 *dst, const u8 *src, size_t size,
223                                   size_t granularity)
224 {
225         dst += size;
226         src += size;
227
228 #ifdef CONFIG_64BIT
229         VMW_TRY_FIND_LAST_DIFF(u64);
230 #endif
231         VMW_TRY_FIND_LAST_DIFF(u32);
232         VMW_TRY_FIND_LAST_DIFF(u16);
233
234         return round_down(vmw_find_last_diff_u8(dst, src, size) - 1,
235                           granularity);
236 }
237
238
239 /**
240  * vmw_memcpy - A wrapper around kernel memcpy with allowing to plug it into a
241  * struct vmw_diff_cpy.
242  *
243  * @diff: The struct vmw_diff_cpy closure argument (unused).
244  * @dest: The copy destination.
245  * @src: The copy source.
246  * @n: Number of bytes to copy.
247  */
248 void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n)
249 {
250         memcpy(dest, src, n);
251 }
252
253
254 /**
255  * vmw_adjust_rect - Adjust rectangle coordinates for newly found difference
256  *
257  * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
258  * @diff_offs: The offset from @diff->line_offset where the difference was
259  * found.
260  */
261 static void vmw_adjust_rect(struct vmw_diff_cpy *diff, size_t diff_offs)
262 {
263         size_t offs = (diff_offs + diff->line_offset) / diff->cpp;
264         struct drm_rect *rect = &diff->rect;
265
266         rect->x1 = min_t(int, rect->x1, offs);
267         rect->x2 = max_t(int, rect->x2, offs + 1);
268         rect->y1 = min_t(int, rect->y1, diff->line);
269         rect->y2 = max_t(int, rect->y2, diff->line + 1);
270 }
271
272 /**
273  * vmw_diff_memcpy - memcpy that creates a bounding box of modified content.
274  *
275  * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
276  * @dest: The copy destination.
277  * @src: The copy source.
278  * @n: Number of bytes to copy.
279  *
280  * In order to correctly track the modified content, the field @diff->line must
281  * be pre-loaded with the current line number, the field @diff->line_offset must
282  * be pre-loaded with the line offset in bytes where the copy starts, and
283  * finally the field @diff->cpp need to be preloaded with the number of bytes
284  * per unit in the horizontal direction of the area we're examining.
285  * Typically bytes per pixel.
286  * This is needed to know the needed granularity of the difference computing
287  * operations. A higher cpp generally leads to faster execution at the cost of
288  * bounding box width precision.
289  */
290 void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
291                      size_t n)
292 {
293         ssize_t csize, byte_len;
294
295         if (WARN_ON_ONCE(round_down(n, diff->cpp) != n))
296                 return;
297
298         /* TODO: Possibly use a single vmw_find_first_diff per line? */
299         csize = vmw_find_first_diff(dest, src, n, diff->cpp);
300         if (csize < n) {
301                 vmw_adjust_rect(diff, csize);
302                 byte_len = diff->cpp;
303
304                 /*
305                  * Starting from where first difference was found, find
306                  * location of last difference, and then copy.
307                  */
308                 diff->line_offset += csize;
309                 dest += csize;
310                 src += csize;
311                 n -= csize;
312                 csize = vmw_find_last_diff(dest, src, n, diff->cpp);
313                 if (csize >= 0) {
314                         byte_len += csize;
315                         vmw_adjust_rect(diff, csize);
316                 }
317                 memcpy(dest, src, byte_len);
318         }
319         diff->line_offset += n;
320 }
321
322 /**
323  * struct vmw_bo_blit_line_data - Convenience argument to vmw_bo_cpu_blit_line
324  *
325  * @mapped_dst: Already mapped destination page index in @dst_pages.
326  * @dst_addr: Kernel virtual address of mapped destination page.
327  * @dst_pages: Array of destination bo pages.
328  * @dst_num_pages: Number of destination bo pages.
329  * @dst_prot: Destination bo page protection.
330  * @mapped_src: Already mapped source page index in @dst_pages.
331  * @src_addr: Kernel virtual address of mapped source page.
332  * @src_pages: Array of source bo pages.
333  * @src_num_pages: Number of source bo pages.
334  * @src_prot: Source bo page protection.
335  * @diff: Struct vmw_diff_cpy, in the end forwarded to the memcpy routine.
336  */
337 struct vmw_bo_blit_line_data {
338         u32 mapped_dst;
339         u8 *dst_addr;
340         struct page **dst_pages;
341         u32 dst_num_pages;
342         pgprot_t dst_prot;
343         u32 mapped_src;
344         u8 *src_addr;
345         struct page **src_pages;
346         u32 src_num_pages;
347         pgprot_t src_prot;
348         struct vmw_diff_cpy *diff;
349 };
350
351 /**
352  * vmw_bo_cpu_blit_line - Blit part of a line from one bo to another.
353  *
354  * @d: Blit data as described above.
355  * @dst_offset: Destination copy start offset from start of bo.
356  * @src_offset: Source copy start offset from start of bo.
357  * @bytes_to_copy: Number of bytes to copy in this line.
358  */
359 static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
360                                 u32 dst_offset,
361                                 u32 src_offset,
362                                 u32 bytes_to_copy)
363 {
364         struct vmw_diff_cpy *diff = d->diff;
365
366         while (bytes_to_copy) {
367                 u32 copy_size = bytes_to_copy;
368                 u32 dst_page = dst_offset >> PAGE_SHIFT;
369                 u32 src_page = src_offset >> PAGE_SHIFT;
370                 u32 dst_page_offset = dst_offset & ~PAGE_MASK;
371                 u32 src_page_offset = src_offset & ~PAGE_MASK;
372                 bool unmap_dst = d->dst_addr && dst_page != d->mapped_dst;
373                 bool unmap_src = d->src_addr && (src_page != d->mapped_src ||
374                                                  unmap_dst);
375
376                 copy_size = min_t(u32, copy_size, PAGE_SIZE - dst_page_offset);
377                 copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
378
379                 if (unmap_src) {
380                         kunmap_atomic(d->src_addr);
381                         d->src_addr = NULL;
382                 }
383
384                 if (unmap_dst) {
385                         kunmap_atomic(d->dst_addr);
386                         d->dst_addr = NULL;
387                 }
388
389                 if (!d->dst_addr) {
390                         if (WARN_ON_ONCE(dst_page >= d->dst_num_pages))
391                                 return -EINVAL;
392
393                         d->dst_addr =
394                                 kmap_atomic_prot(d->dst_pages[dst_page],
395                                                  d->dst_prot);
396                         if (!d->dst_addr)
397                                 return -ENOMEM;
398
399                         d->mapped_dst = dst_page;
400                 }
401
402                 if (!d->src_addr) {
403                         if (WARN_ON_ONCE(src_page >= d->src_num_pages))
404                                 return -EINVAL;
405
406                         d->src_addr =
407                                 kmap_atomic_prot(d->src_pages[src_page],
408                                                  d->src_prot);
409                         if (!d->src_addr)
410                                 return -ENOMEM;
411
412                         d->mapped_src = src_page;
413                 }
414                 diff->do_cpy(diff, d->dst_addr + dst_page_offset,
415                              d->src_addr + src_page_offset, copy_size);
416
417                 bytes_to_copy -= copy_size;
418                 dst_offset += copy_size;
419                 src_offset += copy_size;
420         }
421
422         return 0;
423 }
424
425 static void *map_external(struct vmw_bo *bo, struct iosys_map *map)
426 {
427         struct vmw_private *vmw =
428                 container_of(bo->tbo.bdev, struct vmw_private, bdev);
429         void *ptr = NULL;
430         int ret;
431
432         if (bo->tbo.base.import_attach) {
433                 ret = dma_buf_vmap(bo->tbo.base.dma_buf, map);
434                 if (ret) {
435                         drm_dbg_driver(&vmw->drm,
436                                        "Wasn't able to map external bo!\n");
437                         goto out;
438                 }
439                 ptr = map->vaddr;
440         } else {
441                 ptr = vmw_bo_map_and_cache(bo);
442         }
443
444 out:
445         return ptr;
446 }
447
448 static void unmap_external(struct vmw_bo *bo, struct iosys_map *map)
449 {
450         if (bo->tbo.base.import_attach)
451                 dma_buf_vunmap(bo->tbo.base.dma_buf, map);
452         else
453                 vmw_bo_unmap(bo);
454 }
455
456 static int vmw_external_bo_copy(struct vmw_bo *dst, u32 dst_offset,
457                                 u32 dst_stride, struct vmw_bo *src,
458                                 u32 src_offset, u32 src_stride,
459                                 u32 width_in_bytes, u32 height,
460                                 struct vmw_diff_cpy *diff)
461 {
462         struct vmw_private *vmw =
463                 container_of(dst->tbo.bdev, struct vmw_private, bdev);
464         size_t dst_size = dst->tbo.resource->size;
465         size_t src_size = src->tbo.resource->size;
466         struct iosys_map dst_map = {0};
467         struct iosys_map src_map = {0};
468         int ret, i;
469         int x_in_bytes;
470         u8 *vsrc;
471         u8 *vdst;
472
473         vsrc = map_external(src, &src_map);
474         if (!vsrc) {
475                 drm_dbg_driver(&vmw->drm, "Wasn't able to map src\n");
476                 ret = -ENOMEM;
477                 goto out;
478         }
479
480         vdst = map_external(dst, &dst_map);
481         if (!vdst) {
482                 drm_dbg_driver(&vmw->drm, "Wasn't able to map dst\n");
483                 ret = -ENOMEM;
484                 goto out;
485         }
486
487         vsrc += src_offset;
488         vdst += dst_offset;
489         if (src_stride == dst_stride) {
490                 dst_size -= dst_offset;
491                 src_size -= src_offset;
492                 memcpy(vdst, vsrc,
493                        min(dst_stride * height, min(dst_size, src_size)));
494         } else {
495                 WARN_ON(dst_stride < width_in_bytes);
496                 for (i = 0; i < height; ++i) {
497                         memcpy(vdst, vsrc, width_in_bytes);
498                         vsrc += src_stride;
499                         vdst += dst_stride;
500                 }
501         }
502
503         x_in_bytes = (dst_offset % dst_stride);
504         diff->rect.x1 =  x_in_bytes / diff->cpp;
505         diff->rect.y1 = ((dst_offset - x_in_bytes) / dst_stride);
506         diff->rect.x2 = diff->rect.x1 + width_in_bytes / diff->cpp;
507         diff->rect.y2 = diff->rect.y1 + height;
508
509         ret = 0;
510 out:
511         unmap_external(src, &src_map);
512         unmap_external(dst, &dst_map);
513
514         return ret;
515 }
516
517 /**
518  * vmw_bo_cpu_blit - in-kernel cpu blit.
519  *
520  * @vmw_dst: Destination buffer object.
521  * @dst_offset: Destination offset of blit start in bytes.
522  * @dst_stride: Destination stride in bytes.
523  * @vmw_src: Source buffer object.
524  * @src_offset: Source offset of blit start in bytes.
525  * @src_stride: Source stride in bytes.
526  * @w: Width of blit.
527  * @h: Height of blit.
528  * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
529  * return: Zero on success. Negative error value on failure. Will print out
530  * kernel warnings on caller bugs.
531  *
532  * Performs a CPU blit from one buffer object to another avoiding a full
533  * bo vmap which may exhaust- or fragment vmalloc space.
534  * On supported architectures (x86), we're using kmap_atomic which avoids
535  * cross-processor TLB- and cache flushes and may, on non-HIGHMEM systems
536  * reference already set-up mappings.
537  *
538  * Neither of the buffer objects may be placed in PCI memory
539  * (Fixed memory in TTM terminology) when using this function.
540  */
541 int vmw_bo_cpu_blit(struct vmw_bo *vmw_dst,
542                     u32 dst_offset, u32 dst_stride,
543                     struct vmw_bo *vmw_src,
544                     u32 src_offset, u32 src_stride,
545                     u32 w, u32 h,
546                     struct vmw_diff_cpy *diff)
547 {
548         struct ttm_buffer_object *src = &vmw_src->tbo;
549         struct ttm_buffer_object *dst = &vmw_dst->tbo;
550         struct ttm_operation_ctx ctx = {
551                 .interruptible = false,
552                 .no_wait_gpu = false
553         };
554         u32 j, initial_line = dst_offset / dst_stride;
555         struct vmw_bo_blit_line_data d = {0};
556         int ret = 0;
557         struct page **dst_pages = NULL;
558         struct page **src_pages = NULL;
559         bool src_external = (src->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
560         bool dst_external = (dst->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
561
562         if (WARN_ON(dst == src))
563                 return -EINVAL;
564
565         /* Buffer objects need to be either pinned or reserved: */
566         if (!(dst->pin_count))
567                 dma_resv_assert_held(dst->base.resv);
568         if (!(src->pin_count))
569                 dma_resv_assert_held(src->base.resv);
570
571         if (!ttm_tt_is_populated(dst->ttm)) {
572                 ret = dst->bdev->funcs->ttm_tt_populate(dst->bdev, dst->ttm, &ctx);
573                 if (ret)
574                         return ret;
575         }
576
577         if (!ttm_tt_is_populated(src->ttm)) {
578                 ret = src->bdev->funcs->ttm_tt_populate(src->bdev, src->ttm, &ctx);
579                 if (ret)
580                         return ret;
581         }
582
583         if (src_external || dst_external)
584                 return vmw_external_bo_copy(vmw_dst, dst_offset, dst_stride,
585                                             vmw_src, src_offset, src_stride,
586                                             w, h, diff);
587
588         if (!src->ttm->pages && src->ttm->sg) {
589                 src_pages = kvmalloc_array(src->ttm->num_pages,
590                                            sizeof(struct page *), GFP_KERNEL);
591                 if (!src_pages)
592                         return -ENOMEM;
593                 ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
594                                                  src->ttm->num_pages);
595                 if (ret)
596                         goto out;
597         }
598         if (!dst->ttm->pages && dst->ttm->sg) {
599                 dst_pages = kvmalloc_array(dst->ttm->num_pages,
600                                            sizeof(struct page *), GFP_KERNEL);
601                 if (!dst_pages) {
602                         ret = -ENOMEM;
603                         goto out;
604                 }
605                 ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
606                                                  dst->ttm->num_pages);
607                 if (ret)
608                         goto out;
609         }
610
611         d.mapped_dst = 0;
612         d.mapped_src = 0;
613         d.dst_addr = NULL;
614         d.src_addr = NULL;
615         d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
616         d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
617         d.dst_num_pages = PFN_UP(dst->resource->size);
618         d.src_num_pages = PFN_UP(src->resource->size);
619         d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
620         d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL);
621         d.diff = diff;
622
623         for (j = 0; j < h; ++j) {
624                 diff->line = j + initial_line;
625                 diff->line_offset = dst_offset % dst_stride;
626                 ret = vmw_bo_cpu_blit_line(&d, dst_offset, src_offset, w);
627                 if (ret)
628                         goto out;
629
630                 dst_offset += dst_stride;
631                 src_offset += src_stride;
632         }
633 out:
634         if (d.src_addr)
635                 kunmap_atomic(d.src_addr);
636         if (d.dst_addr)
637                 kunmap_atomic(d.dst_addr);
638         kvfree(src_pages);
639         kvfree(dst_pages);
640
641         return ret;
642 }
This page took 0.070659 seconds and 4 git commands to generate.