1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include "xe_migrate.h"
8 #include <linux/bitfield.h>
9 #include <linux/sizes.h>
11 #include <drm/drm_managed.h>
12 #include <drm/ttm/ttm_tt.h>
13 #include <drm/xe_drm.h>
15 #include <generated/xe_wa_oob.h>
17 #include "instructions/xe_mi_commands.h"
18 #include "regs/xe_gpu_commands.h"
19 #include "regs/xe_gtt_defs.h"
20 #include "tests/xe_test.h"
21 #include "xe_assert.h"
24 #include "xe_exec_queue.h"
27 #include "xe_hw_engine.h"
32 #include "xe_res_cursor.h"
33 #include "xe_sched_job.h"
40 * struct xe_migrate - migrate context.
43 /** @q: Default exec queue used for migration */
44 struct xe_exec_queue *q;
45 /** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
47 /** @job_mutex: Timeline mutex for @eng. */
48 struct mutex job_mutex;
49 /** @pt_bo: Page-table buffer object. */
51 /** @batch_base_ofs: VM offset of the migration batch buffer */
53 /** @usm_batch_base_ofs: VM offset of the usm batch buffer */
54 u64 usm_batch_base_ofs;
55 /** @cleared_mem_ofs: VM offset of @cleared_bo. */
58 * @fence: dma-fence representing the last migration job batch.
59 * Protected by @job_mutex.
61 struct dma_fence *fence;
63 * @vm_update_sa: For integrated, used to suballocate page-tables
66 struct drm_suballoc_manager vm_update_sa;
67 /** @min_chunk_size: For dgfx, Minimum chunk size */
71 #define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
72 #define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
73 #define NUM_KERNEL_PDE 17
74 #define NUM_PT_SLOTS 32
75 #define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
76 #define MAX_NUM_PTE 512
79 * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
80 * legal value accepted. Since that instruction field is always stored in
81 * (val-2) format, this translates to 0x400 dwords for the true maximum length
82 * of the instruction. Subtracting the instruction header (1 dword) and
83 * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
85 #define MAX_PTE_PER_SDI 0x1FE
88 * xe_tile_migrate_engine() - Get this tile's migrate engine.
91 * Returns the default migrate engine of this tile.
92 * TODO: Perhaps this function is slightly misplaced, and even unneeded?
94 * Return: The default migrate engine
96 struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile)
98 return tile->migrate->q;
101 static void xe_migrate_fini(struct drm_device *dev, void *arg)
103 struct xe_migrate *m = arg;
105 xe_vm_lock(m->q->vm, false);
106 xe_bo_unpin(m->pt_bo);
107 xe_vm_unlock(m->q->vm);
109 dma_fence_put(m->fence);
111 drm_suballoc_manager_fini(&m->vm_update_sa);
112 mutex_destroy(&m->job_mutex);
113 xe_vm_close_and_put(m->q->vm);
114 xe_exec_queue_put(m->q);
117 static u64 xe_migrate_vm_addr(u64 slot, u32 level)
119 XE_WARN_ON(slot >= NUM_PT_SLOTS);
121 /* First slot is reserved for mapping of PT bo and bb, start from 1 */
122 return (slot + 1ULL) << xe_pt_shift(level + 1);
125 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr)
128 * Remove the DPA to get a correct offset into identity table for the
131 addr -= xe->mem.vram.dpa_base;
132 return addr + (256ULL << xe_pt_shift(2));
135 static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
138 struct xe_device *xe = tile_to_xe(tile);
139 u16 pat_index = xe->pat.idx[XE_CACHE_WB];
141 u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
142 u32 map_ofs, level, i;
143 struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
146 /* Can't bump NUM_PT_SLOTS too high */
147 BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
148 /* Must be a multiple of 64K to support all platforms */
149 BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K);
150 /* And one slot reserved for the 4KiB page table updates */
151 BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
153 /* Need to be sure everything fits in the first PT, or create more */
154 xe_tile_assert(tile, m->batch_base_ofs + batch->size < SZ_2M);
156 bo = xe_bo_create_pin_map(vm->xe, tile, vm,
157 num_entries * XE_PAGE_SIZE,
159 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
164 entry = vm->pt_ops->pde_encode_bo(bo, bo->size - XE_PAGE_SIZE, pat_index);
165 xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
167 map_ofs = (num_entries - num_level) * XE_PAGE_SIZE;
169 /* Map the entire BO in our level 0 pt */
170 for (i = 0, level = 0; i < num_entries; level++) {
171 entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
174 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
176 if (vm->flags & XE_VM_FLAG_64K)
183 /* Write out batch too */
184 m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
185 for (i = 0; i < batch->size;
186 i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
188 entry = vm->pt_ops->pte_encode_bo(batch, i,
191 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
195 if (xe->info.has_usm) {
196 xe_tile_assert(tile, batch->size == SZ_1M);
198 batch = tile->primary_gt->usm.bb_pool->bo;
199 m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
200 xe_tile_assert(tile, batch->size == SZ_512K);
202 for (i = 0; i < batch->size;
203 i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
205 entry = vm->pt_ops->pte_encode_bo(batch, i,
208 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
214 u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
216 m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
218 if (xe->info.has_usm) {
219 batch = tile->primary_gt->usm.bb_pool->bo;
220 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
221 m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
225 for (level = 1; level < num_level; level++) {
228 if (vm->flags & XE_VM_FLAG_64K && level == 1)
231 entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
232 XE_PAGE_SIZE, pat_index);
233 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
237 /* Write PDE's that point to our BO. */
238 for (i = 0; i < num_entries - num_level; i++) {
239 entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
242 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
243 (i + 1) * 8, u64, entry);
246 /* Set up a 1GiB NULL mapping at 255GiB offset. */
248 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
249 vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0)
251 m->cleared_mem_ofs = (255ULL << xe_pt_shift(level));
253 /* Identity map the entire vram at 256GiB offset */
258 ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
259 flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
263 * Use 1GB pages, it shouldn't matter the physical amount of
264 * vram is less, when we don't access it.
266 for (pos = xe->mem.vram.dpa_base;
267 pos < xe->mem.vram.actual_physical_size + xe->mem.vram.dpa_base;
268 pos += SZ_1G, ofs += 8)
269 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
273 * Example layout created above, with root level = 3:
274 * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
275 * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
276 * [PT9...PT28]: Userspace PT's for VM_BIND, 4 KiB PTE's
277 * [PT29 = PDE 0] [PT30 = PDE 1] [PT31 = PDE 2]
279 * This makes the lowest part of the VM point to the pagetables.
280 * Hence the lowest 2M in the vm should point to itself, with a few writes
281 * and flushes, other parts of the VM can be used either for copying and
284 * For performance, the kernel reserves PDE's, so about 20 are left
285 * for async VM updates.
287 * To make it easier to work, each scratch PT is put in slot (1 + PT #)
288 * everywhere, this allows lockless updates to scratch pages by using
289 * the different addresses in VM.
291 #define NUM_VMUSA_UNIT_PER_PAGE 32
292 #define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
293 #define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
294 drm_suballoc_manager_init(&m->vm_update_sa,
295 (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
296 NUM_VMUSA_UNIT_PER_PAGE, 0);
303 * Due to workaround 16017236439, odd instance hardware copy engines are
304 * faster than even instance ones.
305 * This function returns the mask involving all fast copy engines and the
306 * reserved copy engine to be used as logical mask for migrate engine.
307 * Including the reserved copy engine is required to avoid deadlocks due to
308 * migrate jobs servicing the faults gets stuck behind the job that faulted.
310 static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
312 u32 logical_mask = 0;
313 struct xe_hw_engine *hwe;
314 enum xe_hw_engine_id id;
316 for_each_hw_engine(hwe, gt, id) {
317 if (hwe->class != XE_ENGINE_CLASS_COPY)
320 if (!XE_WA(gt, 16017236439) ||
321 xe_gt_is_usm_hwe(gt, hwe) || hwe->instance & 1)
322 logical_mask |= BIT(hwe->logical_instance);
329 * xe_migrate_init() - Initialize a migrate context
330 * @tile: Back-pointer to the tile we're initializing for.
332 * Return: Pointer to a migrate context on success. Error pointer on error.
334 struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
336 struct xe_device *xe = tile_to_xe(tile);
337 struct xe_gt *primary_gt = tile->primary_gt;
338 struct xe_migrate *m;
342 m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL);
344 return ERR_PTR(-ENOMEM);
348 /* Special layout, prepared below.. */
349 vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
350 XE_VM_FLAG_SET_TILE_ID(tile));
354 xe_vm_lock(vm, false);
355 err = xe_migrate_prepare_vm(tile, m, vm);
358 xe_vm_close_and_put(vm);
362 if (xe->info.has_usm) {
363 struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
364 XE_ENGINE_CLASS_COPY,
365 primary_gt->usm.reserved_bcs_instance,
367 u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
369 if (!hwe || !logical_mask)
370 return ERR_PTR(-EINVAL);
372 m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
373 EXEC_QUEUE_FLAG_KERNEL |
374 EXEC_QUEUE_FLAG_PERMANENT |
375 EXEC_QUEUE_FLAG_HIGH_PRIORITY, 0);
377 m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
378 XE_ENGINE_CLASS_COPY,
379 EXEC_QUEUE_FLAG_KERNEL |
380 EXEC_QUEUE_FLAG_PERMANENT);
383 xe_vm_close_and_put(vm);
384 return ERR_CAST(m->q);
387 mutex_init(&m->job_mutex);
389 err = drmm_add_action_or_reset(&xe->drm, xe_migrate_fini, m);
394 if (xe_device_has_flat_ccs(xe))
395 /* min chunk size corresponds to 4K of CCS Metadata */
396 m->min_chunk_size = SZ_4K * SZ_64K /
397 xe_device_ccs_bytes(xe, SZ_64K);
399 /* Somewhat arbitrary to avoid a huge amount of blits */
400 m->min_chunk_size = SZ_64K;
401 m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size);
402 drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n",
403 (unsigned long long)m->min_chunk_size);
409 static u64 max_mem_transfer_per_pass(struct xe_device *xe)
411 if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
412 return MAX_CCS_LIMITED_TRANSFER;
414 return MAX_PREEMPTDISABLE_TRANSFER;
417 static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur)
419 struct xe_device *xe = tile_to_xe(m->tile);
420 u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
422 if (mem_type_is_vram(cur->mem_type)) {
424 * VRAM we want to blit in chunks with sizes aligned to
425 * min_chunk_size in order for the offset to CCS metadata to be
426 * page-aligned. If it's the last chunk it may be smaller.
428 * Another constraint is that we need to limit the blit to
429 * the VRAM block size, unless size is smaller than
432 u64 chunk = max_t(u64, cur->size, m->min_chunk_size);
434 size = min_t(u64, size, chunk);
435 if (size > m->min_chunk_size)
436 size = round_down(size, m->min_chunk_size);
442 static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
444 /* If the chunk is not fragmented, allow identity map. */
445 return cur->size >= size;
448 static u32 pte_update_size(struct xe_migrate *m,
450 struct ttm_resource *res,
451 struct xe_res_cursor *cur,
452 u64 *L0, u64 *L0_ofs, u32 *L0_pt,
453 u32 cmd_size, u32 pt_ofs, u32 avail_pts)
458 if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
459 /* Offset into identity map. */
460 *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
461 cur->start + vram_region_gpu_offset(res));
464 /* Clip L0 to available size */
465 u64 size = min(*L0, (u64)avail_pts * SZ_2M);
466 u32 num_4k_pages = (size + XE_PAGE_SIZE - 1) >> XE_PTE_SHIFT;
469 *L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
471 /* MI_STORE_DATA_IMM */
472 cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI);
475 cmds += num_4k_pages * 2;
477 /* Each chunk has a single blit command */
484 static void emit_pte(struct xe_migrate *m,
485 struct xe_bb *bb, u32 at_pt,
486 bool is_vram, bool is_comp_pte,
487 struct xe_res_cursor *cur,
488 u32 size, struct ttm_resource *res)
490 struct xe_device *xe = tile_to_xe(m->tile);
491 struct xe_vm *vm = m->q->vm;
494 u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
497 /* Indirect access needs compression enabled uncached PAT index */
498 if (GRAPHICS_VERx100(xe) >= 2000)
499 pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
500 xe->pat.idx[XE_CACHE_WB];
502 pat_index = xe->pat.idx[XE_CACHE_WB];
504 ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
507 u32 chunk = min(MAX_PTE_PER_SDI, ptes);
509 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
510 bb->cs[bb->len++] = ofs;
511 bb->cs[bb->len++] = 0;
521 addr = xe_res_dma(cur) & PAGE_MASK;
523 if (vm->flags & XE_VM_FLAG_64K) {
524 u64 va = cur_ofs * XE_PAGE_SIZE / 8;
526 xe_assert(xe, (va & (SZ_64K - 1)) ==
527 (addr & (SZ_64K - 1)));
529 flags |= XE_PTE_PS64;
532 addr += vram_region_gpu_offset(res);
536 addr = vm->pt_ops->pte_encode_addr(m->tile->xe,
539 bb->cs[bb->len++] = lower_32_bits(addr);
540 bb->cs[bb->len++] = upper_32_bits(addr);
542 xe_res_next(cur, min_t(u32, size, PAGE_SIZE));
548 #define EMIT_COPY_CCS_DW 5
549 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
550 u64 dst_ofs, bool dst_is_indirect,
551 u64 src_ofs, bool src_is_indirect,
554 struct xe_device *xe = gt_to_xe(gt);
555 u32 *cs = bb->cs + bb->len;
561 if (GRAPHICS_VERx100(xe) >= 2000) {
562 num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
563 xe_gt_assert(gt, FIELD_FIT(XE2_CCS_SIZE_MASK, num_pages - 1));
565 ccs_copy_size = REG_FIELD_PREP(XE2_CCS_SIZE_MASK, num_pages - 1);
566 mocs = FIELD_PREP(XE2_XY_CTRL_SURF_MOCS_INDEX_MASK, gt->mocs.uc_index);
569 num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
570 NUM_CCS_BYTES_PER_BLOCK);
571 xe_gt_assert(gt, FIELD_FIT(CCS_SIZE_MASK, num_ccs_blks - 1));
573 ccs_copy_size = REG_FIELD_PREP(CCS_SIZE_MASK, num_ccs_blks - 1);
574 mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index);
577 *cs++ = XY_CTRL_SURF_COPY_BLT |
578 (src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
579 (dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
581 *cs++ = lower_32_bits(src_ofs);
582 *cs++ = upper_32_bits(src_ofs) | mocs;
583 *cs++ = lower_32_bits(dst_ofs);
584 *cs++ = upper_32_bits(dst_ofs) | mocs;
586 bb->len = cs - bb->cs;
589 #define EMIT_COPY_DW 10
590 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
591 u64 src_ofs, u64 dst_ofs, unsigned int size,
594 struct xe_device *xe = gt_to_xe(gt);
598 xe_gt_assert(gt, size / pitch <= S16_MAX);
599 xe_gt_assert(gt, pitch / 4 <= S16_MAX);
600 xe_gt_assert(gt, pitch <= U16_MAX);
602 if (GRAPHICS_VER(xe) >= 20)
603 mocs = FIELD_PREP(XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index);
605 if (GRAPHICS_VERx100(xe) >= 1250)
606 tile_y = XY_FAST_COPY_BLT_D1_SRC_TILE4 | XY_FAST_COPY_BLT_D1_DST_TILE4;
608 bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
609 bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs;
610 bb->cs[bb->len++] = 0;
611 bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4;
612 bb->cs[bb->len++] = lower_32_bits(dst_ofs);
613 bb->cs[bb->len++] = upper_32_bits(dst_ofs);
614 bb->cs[bb->len++] = 0;
615 bb->cs[bb->len++] = pitch | mocs;
616 bb->cs[bb->len++] = lower_32_bits(src_ofs);
617 bb->cs[bb->len++] = upper_32_bits(src_ofs);
620 static int job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
621 enum dma_resv_usage usage)
623 return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
626 static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
628 return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
631 static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
633 u64 src_ofs, bool src_is_indirect,
634 u64 dst_ofs, bool dst_is_indirect, u32 dst_size,
635 u64 ccs_ofs, bool copy_ccs)
637 struct xe_gt *gt = m->tile->primary_gt;
640 if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_indirect) {
642 * If the src is already in vram, then it should already
643 * have been cleared by us, or has been populated by the
644 * user. Make sure we copy the CCS aux state as-is.
646 * Otherwise if the bo doesn't have any CCS metadata attached,
647 * we still need to clear it for security reasons.
649 u64 ccs_src_ofs = src_is_indirect ? src_ofs : m->cleared_mem_ofs;
651 emit_copy_ccs(gt, bb,
653 ccs_src_ofs, src_is_indirect, dst_size);
655 flush_flags = MI_FLUSH_DW_CCS;
656 } else if (copy_ccs) {
657 if (!src_is_indirect)
659 else if (!dst_is_indirect)
662 xe_gt_assert(gt, src_is_indirect || dst_is_indirect);
664 emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs,
665 src_is_indirect, dst_size);
667 flush_flags = MI_FLUSH_DW_CCS;
674 * xe_migrate_copy() - Copy content of TTM resources.
675 * @m: The migration context.
676 * @src_bo: The buffer object @src is currently bound to.
677 * @dst_bo: If copying between resources created for the same bo, set this to
678 * the same value as @src_bo. If copying between buffer objects, set it to
679 * the buffer object @dst is currently bound to.
680 * @src: The source TTM resource.
681 * @dst: The dst TTM resource.
682 * @copy_only_ccs: If true copy only CCS metadata
684 * Copies the contents of @src to @dst: On flat CCS devices,
685 * the CCS metadata is copied as well if needed, or if not present,
686 * the CCS metadata of @dst is cleared for security reasons.
688 * Return: Pointer to a dma_fence representing the last copy batch, or
689 * an error pointer on failure. If there is a failure, any copy operation
690 * started by the function call has been synced.
692 struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
693 struct xe_bo *src_bo,
694 struct xe_bo *dst_bo,
695 struct ttm_resource *src,
696 struct ttm_resource *dst,
699 struct xe_gt *gt = m->tile->primary_gt;
700 struct xe_device *xe = gt_to_xe(gt);
701 struct dma_fence *fence = NULL;
702 u64 size = src_bo->size;
703 struct xe_res_cursor src_it, dst_it, ccs_it;
704 u64 src_L0_ofs, dst_L0_ofs;
705 u32 src_L0_pt, dst_L0_pt;
709 bool src_is_pltt = src->mem_type == XE_PL_TT;
710 bool dst_is_pltt = dst->mem_type == XE_PL_TT;
711 bool src_is_vram = mem_type_is_vram(src->mem_type);
712 bool dst_is_vram = mem_type_is_vram(dst->mem_type);
713 bool copy_ccs = xe_device_has_flat_ccs(xe) &&
714 xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
715 bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
717 /* Copying CCS between two different BOs is not supported yet. */
718 if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
719 return ERR_PTR(-EINVAL);
721 if (src_bo != dst_bo && XE_WARN_ON(src_bo->size != dst_bo->size))
722 return ERR_PTR(-EINVAL);
725 xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
727 xe_res_first(src, 0, size, &src_it);
729 xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
731 xe_res_first(dst, 0, size, &dst_it);
734 xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
735 PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
739 u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
740 struct xe_sched_job *job;
744 u64 ccs_ofs, ccs_size;
747 bool usm = xe->info.has_usm;
748 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
750 src_L0 = xe_migrate_res_sizes(m, &src_it);
751 dst_L0 = xe_migrate_res_sizes(m, &dst_it);
753 drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
754 pass++, src_L0, dst_L0);
756 src_L0 = min(src_L0, dst_L0);
758 batch_size += pte_update_size(m, src_is_vram, src, &src_it, &src_L0,
759 &src_L0_ofs, &src_L0_pt, 0, 0,
762 batch_size += pte_update_size(m, dst_is_vram, dst, &dst_it, &src_L0,
763 &dst_L0_ofs, &dst_L0_pt, 0,
764 avail_pts, avail_pts);
766 if (copy_system_ccs) {
767 ccs_size = xe_device_ccs_bytes(xe, src_L0);
768 batch_size += pte_update_size(m, false, NULL, &ccs_it, &ccs_size,
769 &ccs_ofs, &ccs_pt, 0,
772 xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
775 /* Add copy commands size here */
776 batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
777 ((xe_device_has_flat_ccs(xe) ? EMIT_COPY_CCS_DW : 0));
779 bb = xe_bb_new(gt, batch_size, usm);
785 if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
786 xe_res_next(&src_it, src_L0);
788 emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs,
789 &src_it, src_L0, src);
791 if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
792 xe_res_next(&dst_it, src_L0);
794 emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
795 &dst_it, src_L0, dst);
798 emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
800 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
801 update_idx = bb->len;
804 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
806 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
807 IS_DGFX(xe) ? src_is_vram : src_is_pltt,
809 IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
810 src_L0, ccs_ofs, copy_ccs);
812 mutex_lock(&m->job_mutex);
813 job = xe_bb_create_migration_job(m->q, bb,
814 xe_migrate_batch_base(m, usm),
821 xe_sched_job_add_migrate_flush(job, flush_flags);
823 err = job_add_deps(job, src_bo->ttm.base.resv,
824 DMA_RESV_USAGE_BOOKKEEP);
825 if (!err && src_bo != dst_bo)
826 err = job_add_deps(job, dst_bo->ttm.base.resv,
827 DMA_RESV_USAGE_BOOKKEEP);
832 xe_sched_job_arm(job);
833 dma_fence_put(fence);
834 fence = dma_fence_get(&job->drm.s_fence->finished);
835 xe_sched_job_push(job);
837 dma_fence_put(m->fence);
838 m->fence = dma_fence_get(fence);
840 mutex_unlock(&m->job_mutex);
842 xe_bb_free(bb, fence);
847 xe_sched_job_put(job);
849 mutex_unlock(&m->job_mutex);
850 xe_bb_free(bb, NULL);
853 /* Sync partial copy if any. FIXME: under job_mutex? */
855 dma_fence_wait(fence, false);
856 dma_fence_put(fence);
865 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
868 struct xe_device *xe = gt_to_xe(gt);
869 u32 *cs = bb->cs + bb->len;
870 u32 len = PVC_MEM_SET_CMD_LEN_DW;
872 *cs++ = PVC_MEM_SET_CMD | PVC_MEM_SET_MATRIX | (len - 2);
874 *cs++ = (size / pitch) - 1;
876 *cs++ = lower_32_bits(src_ofs);
877 *cs++ = upper_32_bits(src_ofs);
878 if (GRAPHICS_VERx100(xe) >= 2000)
879 *cs++ = FIELD_PREP(XE2_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
881 *cs++ = FIELD_PREP(PVC_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
883 xe_gt_assert(gt, cs - bb->cs == len + bb->len);
888 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
889 u64 src_ofs, u32 size, u32 pitch, bool is_vram)
891 struct xe_device *xe = gt_to_xe(gt);
892 u32 *cs = bb->cs + bb->len;
893 u32 len = XY_FAST_COLOR_BLT_DW;
895 if (GRAPHICS_VERx100(xe) < 1250)
898 *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
900 if (GRAPHICS_VERx100(xe) >= 2000)
901 *cs++ = FIELD_PREP(XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index) |
904 *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, gt->mocs.uc_index) |
907 *cs++ = (size / pitch) << 16 | pitch / 4;
908 *cs++ = lower_32_bits(src_ofs);
909 *cs++ = upper_32_bits(src_ofs);
910 *cs++ = (is_vram ? 0x0 : 0x1) << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
924 xe_gt_assert(gt, cs - bb->cs == len + bb->len);
929 static bool has_service_copy_support(struct xe_gt *gt)
932 * What we care about is whether the architecture was designed with
933 * service copy functionality (specifically the new MEM_SET / MEM_COPY
934 * instructions) so check the architectural engine list rather than the
935 * actual list since these instructions are usable on BCS0 even if
936 * all of the actual service copy engines (BCS1-BCS8) have been fused
939 return gt->info.__engine_mask & GENMASK(XE_HW_ENGINE_BCS8,
943 static u32 emit_clear_cmd_len(struct xe_gt *gt)
945 if (has_service_copy_support(gt))
946 return PVC_MEM_SET_CMD_LEN_DW;
948 return XY_FAST_COLOR_BLT_DW;
951 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
952 u32 size, u32 pitch, bool is_vram)
954 if (has_service_copy_support(gt))
955 emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
957 emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
962 * xe_migrate_clear() - Copy content of TTM resources.
963 * @m: The migration context.
964 * @bo: The buffer object @dst is currently bound to.
965 * @dst: The dst TTM resource to be cleared.
967 * Clear the contents of @dst to zero. On flat CCS devices,
968 * the CCS metadata is cleared to zero as well on VRAM destinations.
969 * TODO: Eliminate the @bo argument.
971 * Return: Pointer to a dma_fence representing the last clear batch, or
972 * an error pointer on failure. If there is a failure, any clear operation
973 * started by the function call has been synced.
975 struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
977 struct ttm_resource *dst)
979 bool clear_vram = mem_type_is_vram(dst->mem_type);
980 struct xe_gt *gt = m->tile->primary_gt;
981 struct xe_device *xe = gt_to_xe(gt);
982 bool clear_system_ccs = (xe_bo_needs_ccs_pages(bo) && !IS_DGFX(xe)) ? true : false;
983 struct dma_fence *fence = NULL;
985 struct xe_res_cursor src_it;
986 struct ttm_resource *src = dst;
990 xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
992 xe_res_first(src, 0, bo->size, &src_it);
999 struct xe_sched_job *job;
1001 u32 batch_size, update_idx;
1003 bool usm = xe->info.has_usm;
1004 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1006 clear_L0 = xe_migrate_res_sizes(m, &src_it);
1008 /* Calculate final sizes and batch size.. */
1010 pte_update_size(m, clear_vram, src, &src_it,
1011 &clear_L0, &clear_L0_ofs, &clear_L0_pt,
1012 clear_system_ccs ? 0 : emit_clear_cmd_len(gt), 0,
1015 if (xe_device_has_flat_ccs(xe))
1016 batch_size += EMIT_COPY_CCS_DW;
1018 /* Clear commands */
1020 if (WARN_ON_ONCE(!clear_L0))
1023 bb = xe_bb_new(gt, batch_size, usm);
1030 /* Preemption is enabled again by the ring ops. */
1031 if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
1032 xe_res_next(&src_it, clear_L0);
1034 emit_pte(m, bb, clear_L0_pt, clear_vram, clear_system_ccs,
1035 &src_it, clear_L0, dst);
1037 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1038 update_idx = bb->len;
1040 if (!clear_system_ccs)
1041 emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
1043 if (xe_device_has_flat_ccs(xe)) {
1044 emit_copy_ccs(gt, bb, clear_L0_ofs, true,
1045 m->cleared_mem_ofs, false, clear_L0);
1046 flush_flags = MI_FLUSH_DW_CCS;
1049 mutex_lock(&m->job_mutex);
1050 job = xe_bb_create_migration_job(m->q, bb,
1051 xe_migrate_batch_base(m, usm),
1058 xe_sched_job_add_migrate_flush(job, flush_flags);
1061 * There can't be anything userspace related at this
1062 * point, so we just need to respect any potential move
1063 * fences, which are always tracked as
1064 * DMA_RESV_USAGE_KERNEL.
1066 err = job_add_deps(job, bo->ttm.base.resv,
1067 DMA_RESV_USAGE_KERNEL);
1072 xe_sched_job_arm(job);
1073 dma_fence_put(fence);
1074 fence = dma_fence_get(&job->drm.s_fence->finished);
1075 xe_sched_job_push(job);
1077 dma_fence_put(m->fence);
1078 m->fence = dma_fence_get(fence);
1080 mutex_unlock(&m->job_mutex);
1082 xe_bb_free(bb, fence);
1086 xe_sched_job_put(job);
1088 mutex_unlock(&m->job_mutex);
1089 xe_bb_free(bb, NULL);
1091 /* Sync partial copies if any. FIXME: job_mutex? */
1093 dma_fence_wait(m->fence, false);
1094 dma_fence_put(fence);
1097 return ERR_PTR(err);
1100 if (clear_system_ccs)
1101 bo->ccs_cleared = true;
1106 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
1107 const struct xe_vm_pgtable_update *update,
1108 struct xe_migrate_pt_update *pt_update)
1110 const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1112 u32 ofs = update->ofs, size = update->qwords;
1115 * If we have 512 entries (max), we would populate it ourselves,
1116 * and update the PDE above it to the new pointer.
1117 * The only time this can only happen if we have to update the top
1118 * PDE. This requires a BO that is almost vm->size big.
1120 * This shouldn't be possible in practice.. might change when 16K
1121 * pages are used. Hence the assert.
1123 xe_tile_assert(tile, update->qwords < MAX_NUM_PTE);
1125 ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
1126 xe_bo_addr(update->pt_bo, 0,
1130 u64 addr = ppgtt_ofs + ofs * 8;
1132 chunk = min(size, MAX_PTE_PER_SDI);
1134 /* Ensure populatefn can do memset64 by aligning bb->cs */
1136 bb->cs[bb->len++] = MI_NOOP;
1138 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1139 bb->cs[bb->len++] = lower_32_bits(addr);
1140 bb->cs[bb->len++] = upper_32_bits(addr);
1141 ops->populate(pt_update, tile, NULL, bb->cs + bb->len, ofs, chunk,
1144 bb->len += chunk * 2;
1150 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
1152 return xe_vm_get(m->q->vm);
1155 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1156 struct migrate_test_params {
1157 struct xe_test_priv base;
1161 #define to_migrate_test_params(_priv) \
1162 container_of(_priv, struct migrate_test_params, base)
1165 static struct dma_fence *
1166 xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
1167 struct xe_vm *vm, struct xe_bo *bo,
1168 const struct xe_vm_pgtable_update *updates,
1169 u32 num_updates, bool wait_vm,
1170 struct xe_migrate_pt_update *pt_update)
1172 XE_TEST_DECLARE(struct migrate_test_params *test =
1173 to_migrate_test_params
1174 (xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
1175 const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1176 struct dma_fence *fence;
1180 if (XE_TEST_ONLY(test && test->force_gpu))
1181 return ERR_PTR(-ETIME);
1183 if (bo && !dma_resv_test_signaled(bo->ttm.base.resv,
1184 DMA_RESV_USAGE_KERNEL))
1185 return ERR_PTR(-ETIME);
1187 if (wait_vm && !dma_resv_test_signaled(xe_vm_resv(vm),
1188 DMA_RESV_USAGE_BOOKKEEP))
1189 return ERR_PTR(-ETIME);
1191 if (ops->pre_commit) {
1192 pt_update->job = NULL;
1193 err = ops->pre_commit(pt_update);
1195 return ERR_PTR(err);
1197 for (i = 0; i < num_updates; i++) {
1198 const struct xe_vm_pgtable_update *update = &updates[i];
1200 ops->populate(pt_update, m->tile, &update->pt_bo->vmap, NULL,
1201 update->ofs, update->qwords, update);
1205 trace_xe_vm_cpu_bind(vm);
1206 xe_device_wmb(vm->xe);
1209 fence = dma_fence_get_stub();
1214 static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
1215 struct xe_sync_entry *syncs, u32 num_syncs)
1217 struct dma_fence *fence;
1220 for (i = 0; i < num_syncs; i++) {
1221 fence = syncs[i].fence;
1223 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1228 fence = xe_exec_queue_last_fence_get(q, vm);
1229 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1230 dma_fence_put(fence);
1233 dma_fence_put(fence);
1240 * xe_migrate_update_pgtables() - Pipelined page-table update
1241 * @m: The migrate context.
1242 * @vm: The vm we'll be updating.
1243 * @bo: The bo whose dma-resv we will await before updating, or NULL if userptr.
1244 * @q: The exec queue to be used for the update or NULL if the default
1245 * migration engine is to be used.
1246 * @updates: An array of update descriptors.
1247 * @num_updates: Number of descriptors in @updates.
1248 * @syncs: Array of xe_sync_entry to await before updating. Note that waits
1249 * will block the engine timeline.
1250 * @num_syncs: Number of entries in @syncs.
1251 * @pt_update: Pointer to a struct xe_migrate_pt_update, which contains
1252 * pointers to callback functions and, if subclassed, private arguments to
1255 * Perform a pipelined page-table update. The update descriptors are typically
1256 * built under the same lock critical section as a call to this function. If
1257 * using the default engine for the updates, they will be performed in the
1258 * order they grab the job_mutex. If different engines are used, external
1259 * synchronization is needed for overlapping updates to maintain page-table
1260 * consistency. Note that the meaing of "overlapping" is that the updates
1261 * touch the same page-table, which might be a higher-level page-directory.
1262 * If no pipelining is needed, then updates may be performed by the cpu.
1264 * Return: A dma_fence that, when signaled, indicates the update completion.
1267 xe_migrate_update_pgtables(struct xe_migrate *m,
1270 struct xe_exec_queue *q,
1271 const struct xe_vm_pgtable_update *updates,
1273 struct xe_sync_entry *syncs, u32 num_syncs,
1274 struct xe_migrate_pt_update *pt_update)
1276 const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1277 struct xe_tile *tile = m->tile;
1278 struct xe_gt *gt = tile->primary_gt;
1279 struct xe_device *xe = tile_to_xe(tile);
1280 struct xe_sched_job *job;
1281 struct dma_fence *fence;
1282 struct drm_suballoc *sa_bo = NULL;
1283 struct xe_vma *vma = pt_update->vma;
1285 u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
1288 bool usm = !q && xe->info.has_usm;
1289 bool first_munmap_rebind = vma &&
1290 vma->gpuva.flags & XE_VMA_FIRST_REBIND;
1291 struct xe_exec_queue *q_override = !q ? m->q : q;
1292 u16 pat_index = xe->pat.idx[XE_CACHE_WB];
1294 /* Use the CPU if no in syncs and engine is idle */
1295 if (no_in_syncs(vm, q, syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
1296 fence = xe_migrate_update_pgtables_cpu(m, vm, bo, updates,
1298 first_munmap_rebind,
1300 if (!IS_ERR(fence) || fence == ERR_PTR(-EAGAIN))
1304 /* fixed + PTE entries */
1308 batch_size = 6 + num_updates * 2;
1310 for (i = 0; i < num_updates; i++) {
1311 u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, MAX_PTE_PER_SDI);
1313 /* align noop + MI_STORE_DATA_IMM cmd prefix */
1314 batch_size += 4 * num_cmds + updates[i].qwords * 2;
1318 * XXX: Create temp bo to copy from, if batch_size becomes too big?
1320 * Worst case: Sum(2 * (each lower level page size) + (top level page size))
1321 * Should be reasonably bound..
1323 xe_tile_assert(tile, batch_size < SZ_128K);
1325 bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm);
1327 return ERR_CAST(bb);
1329 /* For sysmem PTE's, need to map them in our hole.. */
1331 ppgtt_ofs = NUM_KERNEL_PDE - 1;
1333 xe_tile_assert(tile, num_updates <= NUM_VMUSA_WRITES_PER_UNIT);
1335 sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
1336 GFP_KERNEL, true, 0);
1337 if (IS_ERR(sa_bo)) {
1338 err = PTR_ERR(sa_bo);
1342 ppgtt_ofs = NUM_KERNEL_PDE +
1343 (drm_suballoc_soffset(sa_bo) /
1344 NUM_VMUSA_UNIT_PER_PAGE);
1345 page_ofs = (drm_suballoc_soffset(sa_bo) %
1346 NUM_VMUSA_UNIT_PER_PAGE) *
1347 VM_SA_UPDATE_UNIT_SIZE;
1350 /* Map our PT's to gtt */
1351 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates);
1352 bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
1353 bb->cs[bb->len++] = 0; /* upper_32_bits */
1355 for (i = 0; i < num_updates; i++) {
1356 struct xe_bo *pt_bo = updates[i].pt_bo;
1358 xe_tile_assert(tile, pt_bo->size == SZ_4K);
1360 addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0);
1361 bb->cs[bb->len++] = lower_32_bits(addr);
1362 bb->cs[bb->len++] = upper_32_bits(addr);
1365 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1366 update_idx = bb->len;
1368 addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
1369 (page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
1370 for (i = 0; i < num_updates; i++)
1371 write_pgtable(tile, bb, addr + i * XE_PAGE_SIZE,
1372 &updates[i], pt_update);
1374 /* phys pages, no preamble required */
1375 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1376 update_idx = bb->len;
1378 for (i = 0; i < num_updates; i++)
1379 write_pgtable(tile, bb, 0, &updates[i], pt_update);
1383 mutex_lock(&m->job_mutex);
1385 job = xe_bb_create_migration_job(q ?: m->q, bb,
1386 xe_migrate_batch_base(m, usm),
1393 /* Wait on BO move */
1395 err = job_add_deps(job, bo->ttm.base.resv,
1396 DMA_RESV_USAGE_KERNEL);
1402 * Munmap style VM unbind, need to wait for all jobs to be complete /
1403 * trigger preempts before moving forward
1405 if (first_munmap_rebind) {
1406 err = job_add_deps(job, xe_vm_resv(vm),
1407 DMA_RESV_USAGE_BOOKKEEP);
1412 err = xe_sched_job_last_fence_add_dep(job, vm);
1413 for (i = 0; !err && i < num_syncs; i++)
1414 err = xe_sync_entry_add_deps(&syncs[i], job);
1419 if (ops->pre_commit) {
1420 pt_update->job = job;
1421 err = ops->pre_commit(pt_update);
1425 xe_sched_job_arm(job);
1426 fence = dma_fence_get(&job->drm.s_fence->finished);
1427 xe_sched_job_push(job);
1430 mutex_unlock(&m->job_mutex);
1432 xe_bb_free(bb, fence);
1433 drm_suballoc_free(sa_bo, fence);
1438 xe_sched_job_put(job);
1441 mutex_unlock(&m->job_mutex);
1442 xe_bb_free(bb, NULL);
1444 drm_suballoc_free(sa_bo, NULL);
1445 return ERR_PTR(err);
1449 * xe_migrate_wait() - Complete all operations using the xe_migrate context
1450 * @m: Migrate context to wait for.
1452 * Waits until the GPU no longer uses the migrate context's default engine
1453 * or its page-table objects. FIXME: What about separate page-table update
1456 void xe_migrate_wait(struct xe_migrate *m)
1459 dma_fence_wait(m->fence, false);
1462 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1463 #include "tests/xe_migrate.c"