1 // SPDX-License-Identifier: MIT
3 * Copyright © 2024 Intel Corporation
6 #include <drm/drm_managed.h>
8 #include "abi/guc_actions_sriov_abi.h"
10 #include "xe_gt_sriov_pf_helpers.h"
11 #include "xe_gt_sriov_pf_migration.h"
12 #include "xe_gt_sriov_printk.h"
14 #include "xe_guc_ct.h"
17 /* Return: number of dwords saved/restored/required or a negative error code on failure */
18 static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode,
19 u64 addr, u32 ndwords)
21 u32 request[PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_LEN] = {
22 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
23 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
24 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_PF2GUC_SAVE_RESTORE_VF) |
25 FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_0_OPCODE, opcode),
26 FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_1_VFID, vfid),
27 FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_2_ADDR_LO, lower_32_bits(addr)),
28 FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_3_ADDR_HI, upper_32_bits(addr)),
29 FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_4_SIZE, ndwords),
32 return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
35 /* Return: size of the state in dwords or a negative error code on failure */
36 static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid)
40 ret = guc_action_vf_save_restore(>->uc.guc, vfid, GUC_PF_OPCODE_VF_SAVE, 0, 0);
41 return ret ?: -ENODATA;
44 /* Return: number of state dwords saved or a negative error code on failure */
45 static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
46 void *buff, size_t size)
48 const int ndwords = size / sizeof(u32);
49 struct xe_tile *tile = gt_to_tile(gt);
50 struct xe_device *xe = tile_to_xe(tile);
51 struct xe_guc *guc = >->uc.guc;
55 xe_gt_assert(gt, size % sizeof(u32) == 0);
56 xe_gt_assert(gt, size == ndwords * sizeof(u32));
58 bo = xe_bo_create_pin_map(xe, tile, NULL,
59 ALIGN(size, PAGE_SIZE),
63 XE_BO_FLAG_GGTT_INVALIDATE);
67 ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_SAVE,
68 xe_bo_ggtt_addr(bo), ndwords);
71 else if (ret > ndwords)
74 xe_map_memcpy_from(xe, buff, &bo->vmap, 0, ret * sizeof(u32));
76 xe_bo_unpin_map_no_vm(bo);
80 /* Return: number of state dwords restored or a negative error code on failure */
81 static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid,
82 const void *buff, size_t size)
84 const int ndwords = size / sizeof(u32);
85 struct xe_tile *tile = gt_to_tile(gt);
86 struct xe_device *xe = tile_to_xe(tile);
87 struct xe_guc *guc = >->uc.guc;
91 xe_gt_assert(gt, size % sizeof(u32) == 0);
92 xe_gt_assert(gt, size == ndwords * sizeof(u32));
94 bo = xe_bo_create_pin_map(xe, tile, NULL,
95 ALIGN(size, PAGE_SIZE),
99 XE_BO_FLAG_GGTT_INVALIDATE);
103 xe_map_memcpy_to(xe, &bo->vmap, 0, buff, size);
105 ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_RESTORE,
106 xe_bo_ggtt_addr(bo), ndwords);
109 else if (ret > ndwords)
112 xe_bo_unpin_map_no_vm(bo);
116 static bool pf_migration_supported(struct xe_gt *gt)
118 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
119 return gt->sriov.pf.migration.supported;
122 static struct mutex *pf_migration_mutex(struct xe_gt *gt)
124 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
125 return >->sriov.pf.migration.snapshot_lock;
128 static struct xe_gt_sriov_state_snapshot *pf_pick_vf_snapshot(struct xe_gt *gt,
131 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
132 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
133 lockdep_assert_held(pf_migration_mutex(gt));
135 return >->sriov.pf.vfs[vfid].snapshot;
138 static unsigned int pf_snapshot_index(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
140 return container_of(snapshot, struct xe_gt_sriov_metadata, snapshot) - gt->sriov.pf.vfs;
143 static void pf_free_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
145 struct xe_device *xe = gt_to_xe(gt);
147 drmm_kfree(&xe->drm, snapshot->guc.buff);
148 snapshot->guc.buff = NULL;
149 snapshot->guc.size = 0;
152 static int pf_alloc_guc_state(struct xe_gt *gt,
153 struct xe_gt_sriov_state_snapshot *snapshot,
156 struct xe_device *xe = gt_to_xe(gt);
159 pf_free_guc_state(gt, snapshot);
164 if (size % sizeof(u32))
170 p = drmm_kzalloc(&xe->drm, size, GFP_KERNEL);
174 snapshot->guc.buff = p;
175 snapshot->guc.size = size;
179 static void pf_dump_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
181 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
182 unsigned int vfid __maybe_unused = pf_snapshot_index(gt, snapshot);
184 xe_gt_sriov_dbg_verbose(gt, "VF%u GuC state is %zu dwords:\n",
185 vfid, snapshot->guc.size / sizeof(u32));
186 print_hex_dump_bytes("state: ", DUMP_PREFIX_OFFSET,
187 snapshot->guc.buff, min(SZ_64, snapshot->guc.size));
191 static int pf_save_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
193 struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
197 ret = pf_send_guc_query_vf_state_size(gt, vfid);
200 size = ret * sizeof(u32);
201 xe_gt_sriov_dbg_verbose(gt, "VF%u state size is %d dwords (%zu bytes)\n", vfid, ret, size);
203 ret = pf_alloc_guc_state(gt, snapshot, size);
207 ret = pf_send_guc_save_vf_state(gt, vfid, snapshot->guc.buff, size);
210 size = ret * sizeof(u32);
211 xe_gt_assert(gt, size);
212 xe_gt_assert(gt, size <= snapshot->guc.size);
213 snapshot->guc.size = size;
215 pf_dump_guc_state(gt, snapshot);
219 xe_gt_sriov_dbg(gt, "Unable to save VF%u state (%pe)\n", vfid, ERR_PTR(ret));
220 pf_free_guc_state(gt, snapshot);
225 * xe_gt_sriov_pf_migration_save_guc_state() - Take a GuC VF state snapshot.
227 * @vfid: the VF identifier
229 * This function is for PF only.
231 * Return: 0 on success or a negative error code on failure.
233 int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid)
237 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
238 xe_gt_assert(gt, vfid != PFID);
239 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
241 if (!pf_migration_supported(gt))
244 mutex_lock(pf_migration_mutex(gt));
245 err = pf_save_vf_guc_state(gt, vfid);
246 mutex_unlock(pf_migration_mutex(gt));
251 static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
253 struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
256 if (!snapshot->guc.size)
259 xe_gt_sriov_dbg_verbose(gt, "restoring %zu dwords of VF%u GuC state\n",
260 snapshot->guc.size / sizeof(u32), vfid);
261 ret = pf_send_guc_restore_vf_state(gt, vfid, snapshot->guc.buff, snapshot->guc.size);
265 xe_gt_sriov_dbg_verbose(gt, "restored %d dwords of VF%u GuC state\n", ret, vfid);
269 xe_gt_sriov_dbg(gt, "Failed to restore VF%u GuC state (%pe)\n", vfid, ERR_PTR(ret));
274 * xe_gt_sriov_pf_migration_restore_guc_state() - Restore a GuC VF state.
276 * @vfid: the VF identifier
278 * This function is for PF only.
280 * Return: 0 on success or a negative error code on failure.
282 int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid)
286 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
287 xe_gt_assert(gt, vfid != PFID);
288 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
290 if (!pf_migration_supported(gt))
293 mutex_lock(pf_migration_mutex(gt));
294 ret = pf_restore_vf_guc_state(gt, vfid);
295 mutex_unlock(pf_migration_mutex(gt));
300 #ifdef CONFIG_DEBUG_FS
302 * xe_gt_sriov_pf_migration_read_guc_state() - Read a GuC VF state.
304 * @vfid: the VF identifier
305 * @buf: the user space buffer to read to
306 * @count: the maximum number of bytes to read
307 * @pos: the current position in the buffer
309 * This function is for PF only.
311 * This function reads up to @count bytes from the saved VF GuC state buffer
312 * at offset @pos into the user space address starting at @buf.
314 * Return: the number of bytes read or a negative error code on failure.
316 ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
317 char __user *buf, size_t count, loff_t *pos)
319 struct xe_gt_sriov_state_snapshot *snapshot;
322 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
323 xe_gt_assert(gt, vfid != PFID);
324 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
326 if (!pf_migration_supported(gt))
329 mutex_lock(pf_migration_mutex(gt));
330 snapshot = pf_pick_vf_snapshot(gt, vfid);
331 if (snapshot->guc.size)
332 ret = simple_read_from_buffer(buf, count, pos, snapshot->guc.buff,
336 mutex_unlock(pf_migration_mutex(gt));
342 * xe_gt_sriov_pf_migration_write_guc_state() - Write a GuC VF state.
344 * @vfid: the VF identifier
345 * @buf: the user space buffer with GuC VF state
346 * @size: the size of GuC VF state (in bytes)
348 * This function is for PF only.
350 * This function reads @size bytes of the VF GuC state stored at user space
351 * address @buf and writes it into a internal VF state buffer.
353 * Return: the number of bytes used or a negative error code on failure.
355 ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
356 const char __user *buf, size_t size)
358 struct xe_gt_sriov_state_snapshot *snapshot;
362 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
363 xe_gt_assert(gt, vfid != PFID);
364 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
366 if (!pf_migration_supported(gt))
369 mutex_lock(pf_migration_mutex(gt));
370 snapshot = pf_pick_vf_snapshot(gt, vfid);
371 ret = pf_alloc_guc_state(gt, snapshot, size);
373 ret = simple_write_to_buffer(snapshot->guc.buff, size, &pos, buf, size);
375 pf_free_guc_state(gt, snapshot);
377 pf_dump_guc_state(gt, snapshot);
379 mutex_unlock(pf_migration_mutex(gt));
383 #endif /* CONFIG_DEBUG_FS */
385 static bool pf_check_migration_support(struct xe_gt *gt)
387 /* GuC 70.25 with save/restore v2 is required */
388 xe_gt_assert(gt, GUC_FIRMWARE_VER(>->uc.guc) >= MAKE_GUC_VER(70, 25, 0));
390 /* XXX: for now this is for feature enabling only */
391 return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
395 * xe_gt_sriov_pf_migration_init() - Initialize support for VF migration.
398 * This function is for PF only.
400 * Return: 0 on success or a negative error code on failure.
402 int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
404 struct xe_device *xe = gt_to_xe(gt);
407 xe_gt_assert(gt, IS_SRIOV_PF(xe));
409 gt->sriov.pf.migration.supported = pf_check_migration_support(gt);
411 if (!pf_migration_supported(gt))
414 err = drmm_mutex_init(&xe->drm, >->sriov.pf.migration.snapshot_lock);