1 /**************************************************************************
3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/ttm/ttm_page_alloc.h>
33 static const struct ttm_place vram_placement_flags = {
36 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
39 static const struct ttm_place vram_ne_placement_flags = {
42 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
45 static const struct ttm_place sys_placement_flags = {
48 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
51 static const struct ttm_place sys_ne_placement_flags = {
54 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
57 static const struct ttm_place gmr_placement_flags = {
60 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
63 static const struct ttm_place gmr_ne_placement_flags = {
66 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
69 static const struct ttm_place mob_placement_flags = {
72 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
75 static const struct ttm_place mob_ne_placement_flags = {
78 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
81 struct ttm_placement vmw_vram_placement = {
83 .placement = &vram_placement_flags,
84 .num_busy_placement = 1,
85 .busy_placement = &vram_placement_flags
88 static const struct ttm_place vram_gmr_placement_flags[] = {
92 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
96 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
100 static const struct ttm_place gmr_vram_placement_flags[] = {
104 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
108 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
112 struct ttm_placement vmw_vram_gmr_placement = {
114 .placement = vram_gmr_placement_flags,
115 .num_busy_placement = 1,
116 .busy_placement = &gmr_placement_flags
119 static const struct ttm_place vram_gmr_ne_placement_flags[] = {
123 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
128 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
133 struct ttm_placement vmw_vram_gmr_ne_placement = {
135 .placement = vram_gmr_ne_placement_flags,
136 .num_busy_placement = 1,
137 .busy_placement = &gmr_ne_placement_flags
140 struct ttm_placement vmw_vram_sys_placement = {
142 .placement = &vram_placement_flags,
143 .num_busy_placement = 1,
144 .busy_placement = &sys_placement_flags
147 struct ttm_placement vmw_vram_ne_placement = {
149 .placement = &vram_ne_placement_flags,
150 .num_busy_placement = 1,
151 .busy_placement = &vram_ne_placement_flags
154 struct ttm_placement vmw_sys_placement = {
156 .placement = &sys_placement_flags,
157 .num_busy_placement = 1,
158 .busy_placement = &sys_placement_flags
161 struct ttm_placement vmw_sys_ne_placement = {
163 .placement = &sys_ne_placement_flags,
164 .num_busy_placement = 1,
165 .busy_placement = &sys_ne_placement_flags
168 static const struct ttm_place evictable_placement_flags[] = {
172 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
176 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
180 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
184 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
188 static const struct ttm_place nonfixed_placement_flags[] = {
192 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
196 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
200 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
204 struct ttm_placement vmw_evictable_placement = {
206 .placement = evictable_placement_flags,
207 .num_busy_placement = 1,
208 .busy_placement = &sys_placement_flags
211 struct ttm_placement vmw_srf_placement = {
213 .num_busy_placement = 2,
214 .placement = &gmr_placement_flags,
215 .busy_placement = gmr_vram_placement_flags
218 struct ttm_placement vmw_mob_placement = {
220 .num_busy_placement = 1,
221 .placement = &mob_placement_flags,
222 .busy_placement = &mob_placement_flags
225 struct ttm_placement vmw_mob_ne_placement = {
227 .num_busy_placement = 1,
228 .placement = &mob_ne_placement_flags,
229 .busy_placement = &mob_ne_placement_flags
232 struct ttm_placement vmw_nonfixed_placement = {
234 .placement = nonfixed_placement_flags,
235 .num_busy_placement = 1,
236 .busy_placement = &sys_placement_flags
240 struct ttm_dma_tt dma_ttm;
241 struct vmw_private *dev_priv;
246 struct vmw_sg_table vsgt;
247 uint64_t sg_alloc_size;
251 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
254 * Helper functions to advance a struct vmw_piter iterator.
256 * @viter: Pointer to the iterator.
258 * These functions return false if past the end of the list,
259 * true otherwise. Functions are selected depending on the current
262 static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
264 return ++(viter->i) < viter->num_pages;
267 static bool __vmw_piter_sg_next(struct vmw_piter *viter)
269 return __sg_page_iter_next(&viter->iter);
274 * Helper functions to return a pointer to the current page.
276 * @viter: Pointer to the iterator
278 * These functions return a pointer to the page currently
279 * pointed to by @viter. Functions are selected depending on the
280 * current mapping mode.
282 static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
284 return viter->pages[viter->i];
287 static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
289 return sg_page_iter_page(&viter->iter);
294 * Helper functions to return the DMA address of the current page.
296 * @viter: Pointer to the iterator
298 * These functions return the DMA address of the page currently
299 * pointed to by @viter. Functions are selected depending on the
300 * current mapping mode.
302 static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
304 return page_to_phys(viter->pages[viter->i]);
307 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
309 return viter->addrs[viter->i];
312 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
314 return sg_page_iter_dma_address(&viter->iter);
319 * vmw_piter_start - Initialize a struct vmw_piter.
321 * @viter: Pointer to the iterator to initialize
322 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
324 * Note that we're following the convention of __sg_page_iter_start, so that
325 * the iterator doesn't point to a valid page after initialization; it has
326 * to be advanced one step first.
328 void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
329 unsigned long p_offset)
331 viter->i = p_offset - 1;
332 viter->num_pages = vsgt->num_pages;
333 switch (vsgt->mode) {
335 viter->next = &__vmw_piter_non_sg_next;
336 viter->dma_address = &__vmw_piter_phys_addr;
337 viter->page = &__vmw_piter_non_sg_page;
338 viter->pages = vsgt->pages;
340 case vmw_dma_alloc_coherent:
341 viter->next = &__vmw_piter_non_sg_next;
342 viter->dma_address = &__vmw_piter_dma_addr;
343 viter->page = &__vmw_piter_non_sg_page;
344 viter->addrs = vsgt->addrs;
345 viter->pages = vsgt->pages;
347 case vmw_dma_map_populate:
348 case vmw_dma_map_bind:
349 viter->next = &__vmw_piter_sg_next;
350 viter->dma_address = &__vmw_piter_sg_addr;
351 viter->page = &__vmw_piter_sg_page;
352 __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
353 vsgt->sgt->orig_nents, p_offset);
361 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
364 * @vmw_tt: Pointer to a struct vmw_ttm_backend
366 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
368 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
370 struct device *dev = vmw_tt->dev_priv->dev->dev;
372 dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
374 vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
378 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
380 * @vmw_tt: Pointer to a struct vmw_ttm_backend
382 * This function is used to get device addresses from the kernel DMA layer.
383 * However, it's violating the DMA API in that when this operation has been
384 * performed, it's illegal for the CPU to write to the pages without first
385 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
386 * therefore only legal to call this function if we know that the function
387 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
388 * a CPU write buffer flush.
390 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
392 struct device *dev = vmw_tt->dev_priv->dev->dev;
395 ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
397 if (unlikely(ret == 0))
400 vmw_tt->sgt.nents = ret;
406 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
408 * @vmw_tt: Pointer to a struct vmw_ttm_tt
410 * Select the correct function for and make sure the TTM pages are
411 * visible to the device. Allocate storage for the device mappings.
412 * If a mapping has already been performed, indicated by the storage
413 * pointer being non NULL, the function returns success.
415 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
417 struct vmw_private *dev_priv = vmw_tt->dev_priv;
418 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
419 struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
420 struct ttm_operation_ctx ctx = {
421 .interruptible = true,
424 struct vmw_piter iter;
427 static size_t sgl_size;
428 static size_t sgt_size;
433 vsgt->mode = dev_priv->map_mode;
434 vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
435 vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
436 vsgt->addrs = vmw_tt->dma_ttm.dma_address;
437 vsgt->sgt = &vmw_tt->sgt;
439 switch (dev_priv->map_mode) {
440 case vmw_dma_map_bind:
441 case vmw_dma_map_populate:
442 if (unlikely(!sgl_size)) {
443 sgl_size = ttm_round_pot(sizeof(struct scatterlist));
444 sgt_size = ttm_round_pot(sizeof(struct sg_table));
446 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
447 ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
448 if (unlikely(ret != 0))
451 ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
454 vsgt->num_pages << PAGE_SHIFT,
456 if (unlikely(ret != 0))
457 goto out_sg_alloc_fail;
459 if (vsgt->num_pages > vmw_tt->sgt.nents) {
460 uint64_t over_alloc =
461 sgl_size * (vsgt->num_pages -
464 ttm_mem_global_free(glob, over_alloc);
465 vmw_tt->sg_alloc_size -= over_alloc;
468 ret = vmw_ttm_map_for_dma(vmw_tt);
469 if (unlikely(ret != 0))
477 old = ~((dma_addr_t) 0);
478 vmw_tt->vsgt.num_regions = 0;
479 for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
480 dma_addr_t cur = vmw_piter_dma_addr(&iter);
482 if (cur != old + PAGE_SIZE)
483 vmw_tt->vsgt.num_regions++;
487 vmw_tt->mapped = true;
491 sg_free_table(vmw_tt->vsgt.sgt);
492 vmw_tt->vsgt.sgt = NULL;
494 ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
499 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
501 * @vmw_tt: Pointer to a struct vmw_ttm_tt
503 * Tear down any previously set up device DMA mappings and free
504 * any storage space allocated for them. If there are no mappings set up,
505 * this function is a NOP.
507 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
509 struct vmw_private *dev_priv = vmw_tt->dev_priv;
511 if (!vmw_tt->vsgt.sgt)
514 switch (dev_priv->map_mode) {
515 case vmw_dma_map_bind:
516 case vmw_dma_map_populate:
517 vmw_ttm_unmap_from_dma(vmw_tt);
518 sg_free_table(vmw_tt->vsgt.sgt);
519 vmw_tt->vsgt.sgt = NULL;
520 ttm_mem_global_free(vmw_mem_glob(dev_priv),
521 vmw_tt->sg_alloc_size);
526 vmw_tt->mapped = false;
531 * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
533 * @bo: Pointer to a struct ttm_buffer_object
535 * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
536 * instead of a pointer to a struct vmw_ttm_backend as argument.
537 * Note that the buffer object must be either pinned or reserved before
538 * calling this function.
540 int vmw_bo_map_dma(struct ttm_buffer_object *bo)
542 struct vmw_ttm_tt *vmw_tt =
543 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
545 return vmw_ttm_map_dma(vmw_tt);
550 * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
552 * @bo: Pointer to a struct ttm_buffer_object
554 * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
555 * instead of a pointer to a struct vmw_ttm_backend as argument.
557 void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
559 struct vmw_ttm_tt *vmw_tt =
560 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
562 vmw_ttm_unmap_dma(vmw_tt);
567 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
570 * @bo: Pointer to a struct ttm_buffer_object
572 * Returns a pointer to a struct vmw_sg_table object. The object should
573 * not be freed after use.
574 * Note that for the device addresses to be valid, the buffer object must
575 * either be reserved or pinned.
577 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
579 struct vmw_ttm_tt *vmw_tt =
580 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
582 return &vmw_tt->vsgt;
586 static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
588 struct vmw_ttm_tt *vmw_be =
589 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
592 ret = vmw_ttm_map_dma(vmw_be);
593 if (unlikely(ret != 0))
596 vmw_be->gmr_id = bo_mem->start;
597 vmw_be->mem_type = bo_mem->mem_type;
599 switch (bo_mem->mem_type) {
601 return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
602 ttm->num_pages, vmw_be->gmr_id);
604 if (unlikely(vmw_be->mob == NULL)) {
606 vmw_mob_create(ttm->num_pages);
607 if (unlikely(vmw_be->mob == NULL))
611 return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
612 &vmw_be->vsgt, ttm->num_pages,
620 static int vmw_ttm_unbind(struct ttm_tt *ttm)
622 struct vmw_ttm_tt *vmw_be =
623 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
625 switch (vmw_be->mem_type) {
627 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
630 vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
636 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
637 vmw_ttm_unmap_dma(vmw_be);
643 static void vmw_ttm_destroy(struct ttm_tt *ttm)
645 struct vmw_ttm_tt *vmw_be =
646 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
648 vmw_ttm_unmap_dma(vmw_be);
649 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
650 ttm_dma_tt_fini(&vmw_be->dma_ttm);
655 vmw_mob_destroy(vmw_be->mob);
661 static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
663 struct vmw_ttm_tt *vmw_tt =
664 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
665 struct vmw_private *dev_priv = vmw_tt->dev_priv;
666 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
669 if (ttm->state != tt_unpopulated)
672 if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
674 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
675 ret = ttm_mem_global_alloc(glob, size, ctx);
676 if (unlikely(ret != 0))
679 ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
681 if (unlikely(ret != 0))
682 ttm_mem_global_free(glob, size);
684 ret = ttm_pool_populate(ttm, ctx);
689 static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
691 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
693 struct vmw_private *dev_priv = vmw_tt->dev_priv;
694 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
698 vmw_mob_destroy(vmw_tt->mob);
702 vmw_ttm_unmap_dma(vmw_tt);
703 if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
705 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
707 ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
708 ttm_mem_global_free(glob, size);
710 ttm_pool_unpopulate(ttm);
713 static struct ttm_backend_func vmw_ttm_func = {
714 .bind = vmw_ttm_bind,
715 .unbind = vmw_ttm_unbind,
716 .destroy = vmw_ttm_destroy,
719 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
722 struct vmw_ttm_tt *vmw_be;
725 vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
729 vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
730 vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
733 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
734 ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
736 ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
737 if (unlikely(ret != 0))
740 return &vmw_be->dma_ttm.ttm;
746 static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
751 static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
752 struct ttm_mem_type_manager *man)
758 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
759 man->available_caching = TTM_PL_FLAG_CACHED;
760 man->default_caching = TTM_PL_FLAG_CACHED;
763 /* "On-card" video ram */
764 man->func = &ttm_bo_manager_func;
766 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
767 man->available_caching = TTM_PL_FLAG_CACHED;
768 man->default_caching = TTM_PL_FLAG_CACHED;
773 * "Guest Memory Regions" is an aperture like feature with
774 * one slot per bo. There is an upper limit of the number of
775 * slots as well as the bo size.
777 man->func = &vmw_gmrid_manager_func;
779 man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
780 man->available_caching = TTM_PL_FLAG_CACHED;
781 man->default_caching = TTM_PL_FLAG_CACHED;
784 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
790 static void vmw_evict_flags(struct ttm_buffer_object *bo,
791 struct ttm_placement *placement)
793 *placement = vmw_sys_placement;
796 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
798 struct ttm_object_file *tfile =
799 vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
801 return vmw_user_dmabuf_verify_access(bo, tfile);
804 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
806 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
807 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
809 mem->bus.addr = NULL;
810 mem->bus.is_iomem = false;
812 mem->bus.size = mem->num_pages << PAGE_SHIFT;
814 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
816 switch (mem->mem_type) {
822 mem->bus.offset = mem->start << PAGE_SHIFT;
823 mem->bus.base = dev_priv->vram_start;
824 mem->bus.is_iomem = true;
832 static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
836 static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
842 * vmw_move_notify - TTM move_notify_callback
844 * @bo: The TTM buffer object about to move.
845 * @mem: The struct ttm_mem_reg indicating to what memory
846 * region the move is taking place.
848 * Calls move_notify for all subsystems needing it.
849 * (currently only resources).
851 static void vmw_move_notify(struct ttm_buffer_object *bo,
853 struct ttm_mem_reg *mem)
855 vmw_resource_move_notify(bo, mem);
856 vmw_query_move_notify(bo, mem);
861 * vmw_swap_notify - TTM move_notify_callback
863 * @bo: The TTM buffer object about to be swapped out.
865 static void vmw_swap_notify(struct ttm_buffer_object *bo)
867 vmw_resource_swap_notify(bo);
868 (void) ttm_bo_wait(bo, false, false);
872 struct ttm_bo_driver vmw_bo_driver = {
873 .ttm_tt_create = &vmw_ttm_tt_create,
874 .ttm_tt_populate = &vmw_ttm_populate,
875 .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
876 .invalidate_caches = vmw_invalidate_caches,
877 .init_mem_type = vmw_init_mem_type,
878 .eviction_valuable = ttm_bo_eviction_valuable,
879 .evict_flags = vmw_evict_flags,
881 .verify_access = vmw_verify_access,
882 .move_notify = vmw_move_notify,
883 .swap_notify = vmw_swap_notify,
884 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
885 .io_mem_reserve = &vmw_ttm_io_mem_reserve,
886 .io_mem_free = &vmw_ttm_io_mem_free,