1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
28 #include <linux/slab.h>
29 #include "vmwgfx_validation.h"
30 #include "vmwgfx_drv.h"
33 * struct vmw_validation_bo_node - Buffer object validation metadata.
34 * @base: Metadata used for TTM reservation- and validation.
35 * @hash: A hash entry used for the duplicate detection hash table.
36 * @as_mob: Validate as mob.
37 * @cpu_blit: Validate for cpu blit access.
39 * Bit fields are used since these structures are allocated and freed in
40 * large numbers and space conservation is desired.
42 struct vmw_validation_bo_node {
43 struct ttm_validate_buffer base;
44 struct drm_hash_item hash;
50 * struct vmw_validation_res_node - Resource validation metadata.
51 * @head: List head for the resource validation list.
52 * @hash: A hash entry used for the duplicate detection hash table.
53 * @res: Reference counted resource pointer.
54 * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
56 * @new_backup_offset: Offset into the new backup mob for resources that can
58 * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
59 * the command stream provides a mob bind operation.
60 * @switching_backup: The validation process is switching backup MOB.
61 * @first_usage: True iff the resource has been seen only once in the current
63 * @reserved: Whether the resource is currently reserved by this process.
64 * @private: Optionally additional memory for caller-private data.
66 * Bit fields are used since these structures are allocated and freed in
67 * large numbers and space conservation is desired.
69 struct vmw_validation_res_node {
70 struct list_head head;
71 struct drm_hash_item hash;
72 struct vmw_resource *res;
73 struct vmw_buffer_object *new_backup;
74 unsigned long new_backup_offset;
75 u32 no_buffer_needed : 1;
76 u32 switching_backup : 1;
81 unsigned long private[0];
85 * vmw_validation_mem_alloc - Allocate kernel memory from the validation
86 * context based allocator
87 * @ctx: The validation context
88 * @size: The number of bytes to allocated.
90 * The memory allocated may not exceed PAGE_SIZE, and the returned
91 * address is aligned to sizeof(long). All memory allocated this way is
92 * reclaimed after validation when calling any of the exported functions:
93 * vmw_validation_unref_lists()
94 * vmw_validation_revert()
95 * vmw_validation_done()
97 * Return: Pointer to the allocated memory on success. NULL on failure.
99 void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
104 size = vmw_validation_align(size);
105 if (size > PAGE_SIZE)
108 if (ctx->mem_size_left < size) {
111 if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
112 int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
117 ctx->vm_size_left += ctx->vm->gran;
118 ctx->total_mem += ctx->vm->gran;
121 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
126 ctx->vm_size_left -= PAGE_SIZE;
128 list_add_tail(&page->lru, &ctx->page_list);
129 ctx->page_address = page_address(page);
130 ctx->mem_size_left = PAGE_SIZE;
133 addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
134 ctx->mem_size_left -= size;
140 * vmw_validation_mem_free - Free all memory allocated using
141 * vmw_validation_mem_alloc()
142 * @ctx: The validation context
144 * All memory previously allocated for this context using
145 * vmw_validation_mem_alloc() is freed.
147 static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
149 struct page *entry, *next;
151 list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
152 list_del_init(&entry->lru);
156 ctx->mem_size_left = 0;
157 if (ctx->vm && ctx->total_mem) {
158 ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
160 ctx->vm_size_left = 0;
165 * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
166 * validation context's lists.
167 * @ctx: The validation context to search.
168 * @vbo: The buffer object to search for.
170 * Return: Pointer to the struct vmw_validation_bo_node referencing the
171 * duplicate, or NULL if none found.
173 static struct vmw_validation_bo_node *
174 vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
175 struct vmw_buffer_object *vbo)
177 struct vmw_validation_bo_node *bo_node = NULL;
179 if (!ctx->merge_dups)
183 struct drm_hash_item *hash;
185 if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
186 bo_node = container_of(hash, typeof(*bo_node), hash);
188 struct vmw_validation_bo_node *entry;
190 list_for_each_entry(entry, &ctx->bo_list, base.head) {
191 if (entry->base.bo == &vbo->base) {
202 * vmw_validation_find_res_dup - Find a duplicate resource entry in the
203 * validation context's lists.
204 * @ctx: The validation context to search.
205 * @vbo: The buffer object to search for.
207 * Return: Pointer to the struct vmw_validation_bo_node referencing the
208 * duplicate, or NULL if none found.
210 static struct vmw_validation_res_node *
211 vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
212 struct vmw_resource *res)
214 struct vmw_validation_res_node *res_node = NULL;
216 if (!ctx->merge_dups)
220 struct drm_hash_item *hash;
222 if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
223 res_node = container_of(hash, typeof(*res_node), hash);
225 struct vmw_validation_res_node *entry;
227 list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
228 if (entry->res == res) {
234 list_for_each_entry(entry, &ctx->resource_list, head) {
235 if (entry->res == res) {
247 * vmw_validation_add_bo - Add a buffer object to the validation context.
248 * @ctx: The validation context.
249 * @vbo: The buffer object.
250 * @as_mob: Validate as mob, otherwise suitable for GMR operations.
251 * @cpu_blit: Validate in a page-mappable location.
253 * Return: Zero on success, negative error code otherwise.
255 int vmw_validation_add_bo(struct vmw_validation_context *ctx,
256 struct vmw_buffer_object *vbo,
260 struct vmw_validation_bo_node *bo_node;
262 bo_node = vmw_validation_find_bo_dup(ctx, vbo);
264 if (bo_node->as_mob != as_mob ||
265 bo_node->cpu_blit != cpu_blit) {
266 DRM_ERROR("Inconsistent buffer usage.\n");
270 struct ttm_validate_buffer *val_buf;
273 bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
278 bo_node->hash.key = (unsigned long) vbo;
279 ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
281 DRM_ERROR("Failed to initialize a buffer "
282 "validation entry.\n");
286 val_buf = &bo_node->base;
287 val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
290 val_buf->num_shared = 0;
291 list_add_tail(&val_buf->head, &ctx->bo_list);
292 bo_node->as_mob = as_mob;
293 bo_node->cpu_blit = cpu_blit;
300 * vmw_validation_add_resource - Add a resource to the validation context.
301 * @ctx: The validation context.
302 * @res: The resource.
303 * @priv_size: Size of private, additional metadata.
304 * @dirty: Whether to change dirty status.
305 * @p_node: Output pointer of additional metadata address.
306 * @first_usage: Whether this was the first time this resource was seen.
308 * Return: Zero on success, negative error code otherwise.
310 int vmw_validation_add_resource(struct vmw_validation_context *ctx,
311 struct vmw_resource *res,
317 struct vmw_validation_res_node *node;
320 node = vmw_validation_find_res_dup(ctx, res);
322 node->first_usage = 0;
326 node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
328 VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
333 node->hash.key = (unsigned long) res;
334 ret = drm_ht_insert_item(ctx->ht, &node->hash);
336 DRM_ERROR("Failed to initialize a resource validation "
341 node->res = vmw_resource_reference_unless_doomed(res);
345 node->first_usage = 1;
346 if (!res->dev_priv->has_mob) {
347 list_add_tail(&node->head, &ctx->resource_list);
349 switch (vmw_res_type(res)) {
350 case vmw_res_context:
351 case vmw_res_dx_context:
352 list_add(&node->head, &ctx->resource_ctx_list);
354 case vmw_res_cotable:
355 list_add_tail(&node->head, &ctx->resource_ctx_list);
358 list_add_tail(&node->head, &ctx->resource_list);
366 /* Overwriting previous information here is intentional! */
367 node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
370 *first_usage = node->first_usage;
372 *p_node = &node->private;
378 * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
380 * @ctx: The validation context.
381 * @val_private: The additional meta-data pointer returned when the
382 * resource was registered with the validation context. Used to identify
384 * @dirty: Dirty information VMW_RES_DIRTY_XX
386 void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
387 void *val_private, u32 dirty)
389 struct vmw_validation_res_node *val;
394 val = container_of(val_private, typeof(*val), private);
396 /* Overwriting previous information here is intentional! */
397 val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
401 * vmw_validation_res_switch_backup - Register a backup MOB switch during
403 * @ctx: The validation context.
404 * @val_private: The additional meta-data pointer returned when the
405 * resource was registered with the validation context. Used to identify
407 * @vbo: The new backup buffer object MOB. This buffer object needs to have
408 * already been registered with the validation context.
409 * @backup_offset: Offset into the new backup MOB.
411 void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
413 struct vmw_buffer_object *vbo,
414 unsigned long backup_offset)
416 struct vmw_validation_res_node *val;
418 val = container_of(val_private, typeof(*val), private);
420 val->switching_backup = 1;
421 if (val->first_usage)
422 val->no_buffer_needed = 1;
424 val->new_backup = vbo;
425 val->new_backup_offset = backup_offset;
429 * vmw_validation_res_reserve - Reserve all resources registered with this
430 * validation context.
431 * @ctx: The validation context.
432 * @intr: Use interruptible waits when possible.
434 * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
437 int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
440 struct vmw_validation_res_node *val;
443 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
445 list_for_each_entry(val, &ctx->resource_list, head) {
446 struct vmw_resource *res = val->res;
448 ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
454 struct vmw_buffer_object *vbo = res->backup;
456 ret = vmw_validation_add_bo
457 (ctx, vbo, vmw_resource_needs_backup(res),
467 vmw_validation_res_unreserve(ctx, true);
472 * vmw_validation_res_unreserve - Unreserve all reserved resources
473 * registered with this validation context.
474 * @ctx: The validation context.
475 * @backoff: Whether this is a backoff- of a commit-type operation. This
476 * is used to determine whether to switch backup MOBs or not.
478 void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
481 struct vmw_validation_res_node *val;
483 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
485 list_for_each_entry(val, &ctx->resource_list, head) {
487 vmw_resource_unreserve(val->res,
492 list_for_each_entry(val, &ctx->resource_list, head) {
494 vmw_resource_unreserve(val->res,
497 val->switching_backup,
499 val->new_backup_offset);
504 * vmw_validation_bo_validate_single - Validate a single buffer object.
505 * @bo: The TTM buffer object base.
506 * @interruptible: Whether to perform waits interruptible if possible.
507 * @validate_as_mob: Whether to validate in MOB memory.
509 * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
512 int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
514 bool validate_as_mob)
516 struct vmw_buffer_object *vbo =
517 container_of(bo, struct vmw_buffer_object, base);
518 struct ttm_operation_ctx ctx = {
519 .interruptible = interruptible,
524 if (vbo->pin_count > 0)
528 return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
531 * Put BO in VRAM if there is space, otherwise as a GMR.
532 * If there is no space in VRAM and GMR ids are all used up,
533 * start evicting GMRs to make room. If the DMA buffer can't be
534 * used as a GMR, this will return -ENOMEM.
537 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
538 if (ret == 0 || ret == -ERESTARTSYS)
542 * If that failed, try VRAM again, this time evicting
546 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
551 * vmw_validation_bo_validate - Validate all buffer objects registered with
552 * the validation context.
553 * @ctx: The validation context.
554 * @intr: Whether to perform waits interruptible if possible.
556 * Return: Zero on success, -ERESTARTSYS if interrupted,
557 * negative error code on failure.
559 int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
561 struct vmw_validation_bo_node *entry;
564 list_for_each_entry(entry, &ctx->bo_list, base.head) {
565 if (entry->cpu_blit) {
566 struct ttm_operation_ctx ctx = {
567 .interruptible = intr,
571 ret = ttm_bo_validate(entry->base.bo,
572 &vmw_nonfixed_placement, &ctx);
574 ret = vmw_validation_bo_validate_single
575 (entry->base.bo, intr, entry->as_mob);
584 * vmw_validation_res_validate - Validate all resources registered with the
585 * validation context.
586 * @ctx: The validation context.
587 * @intr: Whether to perform waits interruptible if possible.
589 * Before this function is called, all resource backup buffers must have
592 * Return: Zero on success, -ERESTARTSYS if interrupted,
593 * negative error code on failure.
595 int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
597 struct vmw_validation_res_node *val;
600 list_for_each_entry(val, &ctx->resource_list, head) {
601 struct vmw_resource *res = val->res;
602 struct vmw_buffer_object *backup = res->backup;
604 ret = vmw_resource_validate(res, intr);
606 if (ret != -ERESTARTSYS)
607 DRM_ERROR("Failed to validate resource.\n");
611 /* Check if the resource switched backup buffer */
612 if (backup && res->backup && (backup != res->backup)) {
613 struct vmw_buffer_object *vbo = res->backup;
615 ret = vmw_validation_add_bo
616 (ctx, vbo, vmw_resource_needs_backup(res),
626 * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
627 * and unregister it from this validation context.
628 * @ctx: The validation context.
630 * The hash table used for duplicate finding is an expensive resource and
631 * may be protected by mutexes that may cause deadlocks during resource
632 * unreferencing if held. After resource- and buffer object registering,
633 * there is no longer any use for this hash table, so allow freeing it
634 * either to shorten any mutex locking time, or before resources- and
635 * buffer objects are freed during validation context cleanup.
637 void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
639 struct vmw_validation_bo_node *entry;
640 struct vmw_validation_res_node *val;
645 list_for_each_entry(entry, &ctx->bo_list, base.head)
646 (void) drm_ht_remove_item(ctx->ht, &entry->hash);
648 list_for_each_entry(val, &ctx->resource_list, head)
649 (void) drm_ht_remove_item(ctx->ht, &val->hash);
651 list_for_each_entry(val, &ctx->resource_ctx_list, head)
652 (void) drm_ht_remove_item(ctx->ht, &val->hash);
658 * vmw_validation_unref_lists - Unregister previously registered buffer
659 * object and resources.
660 * @ctx: The validation context.
662 * Note that this function may cause buffer object- and resource destructors
665 void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
667 struct vmw_validation_bo_node *entry;
668 struct vmw_validation_res_node *val;
670 list_for_each_entry(entry, &ctx->bo_list, base.head) {
671 ttm_bo_put(entry->base.bo);
672 entry->base.bo = NULL;
675 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
676 list_for_each_entry(val, &ctx->resource_list, head)
677 vmw_resource_unreference(&val->res);
680 * No need to detach each list entry since they are all freed with
681 * vmw_validation_free_mem. Just make the inaccessible.
683 INIT_LIST_HEAD(&ctx->bo_list);
684 INIT_LIST_HEAD(&ctx->resource_list);
686 vmw_validation_mem_free(ctx);
690 * vmw_validation_prepare - Prepare a validation context for command
692 * @ctx: The validation context.
693 * @mutex: The mutex used to protect resource reservation.
694 * @intr: Whether to perform waits interruptible if possible.
696 * Note that the single reservation mutex @mutex is an unfortunate
697 * construct. Ideally resource reservation should be moved to per-resource
699 * If this functions doesn't return Zero to indicate success, all resources
700 * are left unreserved but still referenced.
701 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
704 int vmw_validation_prepare(struct vmw_validation_context *ctx,
712 ret = mutex_lock_interruptible(mutex);
719 ctx->res_mutex = mutex;
720 ret = vmw_validation_res_reserve(ctx, intr);
722 goto out_no_res_reserve;
724 ret = vmw_validation_bo_reserve(ctx, intr);
726 goto out_no_bo_reserve;
728 ret = vmw_validation_bo_validate(ctx, intr);
730 goto out_no_validate;
732 ret = vmw_validation_res_validate(ctx, intr);
734 goto out_no_validate;
739 vmw_validation_bo_backoff(ctx);
741 vmw_validation_res_unreserve(ctx, true);
750 * vmw_validation_revert - Revert validation actions if command submission
753 * @ctx: The validation context.
755 * The caller still needs to unref resources after a call to this function.
757 void vmw_validation_revert(struct vmw_validation_context *ctx)
759 vmw_validation_bo_backoff(ctx);
760 vmw_validation_res_unreserve(ctx, true);
762 mutex_unlock(ctx->res_mutex);
763 vmw_validation_unref_lists(ctx);
767 * vmw_validation_cone - Commit validation actions after command submission
769 * @ctx: The validation context.
770 * @fence: Fence with which to fence all buffer objects taking part in the
771 * command submission.
773 * The caller does NOT need to unref resources after a call to this function.
775 void vmw_validation_done(struct vmw_validation_context *ctx,
776 struct vmw_fence_obj *fence)
778 vmw_validation_bo_fence(ctx, fence);
779 vmw_validation_res_unreserve(ctx, false);
781 mutex_unlock(ctx->res_mutex);
782 vmw_validation_unref_lists(ctx);
786 * vmw_validation_preload_bo - Preload the validation memory allocator for a
787 * call to vmw_validation_add_bo().
788 * @ctx: Pointer to the validation context.
790 * Iff this function returns successfully, the next call to
791 * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
792 * but voids the guarantee.
794 * Returns: Zero if successful, %-EINVAL otherwise.
796 int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
798 unsigned int size = sizeof(struct vmw_validation_bo_node);
800 if (!vmw_validation_mem_alloc(ctx, size))
803 ctx->mem_size_left += size;
808 * vmw_validation_preload_res - Preload the validation memory allocator for a
809 * call to vmw_validation_add_res().
810 * @ctx: Pointer to the validation context.
811 * @size: Size of the validation node extra data. See below.
813 * Iff this function returns successfully, the next call to
814 * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
815 * sleep. An error is not fatal but voids the guarantee.
817 * Returns: Zero if successful, %-EINVAL otherwise.
819 int vmw_validation_preload_res(struct vmw_validation_context *ctx,
822 size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
824 vmw_validation_align(sizeof(struct vmw_validation_bo_node));
825 if (!vmw_validation_mem_alloc(ctx, size))
828 ctx->mem_size_left += size;