]> Git Repo - linux.git/blob - drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
Linux 6.14-rc3
[linux.git] / drivers / gpu / drm / xe / xe_gt_sriov_pf_service.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5
6 #include <drm/drm_managed.h>
7
8 #include "abi/guc_actions_sriov_abi.h"
9 #include "abi/guc_relay_actions_abi.h"
10
11 #include "regs/xe_gt_regs.h"
12 #include "regs/xe_guc_regs.h"
13 #include "regs/xe_regs.h"
14
15 #include "xe_mmio.h"
16 #include "xe_gt_sriov_printk.h"
17 #include "xe_gt_sriov_pf_helpers.h"
18 #include "xe_gt_sriov_pf_service.h"
19 #include "xe_gt_sriov_pf_service_types.h"
20 #include "xe_guc_ct.h"
21 #include "xe_guc_hxg_helpers.h"
22
23 static void pf_init_versions(struct xe_gt *gt)
24 {
25         BUILD_BUG_ON(!GUC_RELAY_VERSION_BASE_MAJOR && !GUC_RELAY_VERSION_BASE_MINOR);
26         BUILD_BUG_ON(GUC_RELAY_VERSION_BASE_MAJOR > GUC_RELAY_VERSION_LATEST_MAJOR);
27
28         /* base versions may differ between platforms */
29         gt->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR;
30         gt->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR;
31
32         /* latest version is same for all platforms */
33         gt->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR;
34         gt->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR;
35 }
36
37 /* Return: 0 on success or a negative error code on failure. */
38 static int pf_negotiate_version(struct xe_gt *gt,
39                                 u32 wanted_major, u32 wanted_minor,
40                                 u32 *major, u32 *minor)
41 {
42         struct xe_gt_sriov_pf_service_version base = gt->sriov.pf.service.version.base;
43         struct xe_gt_sriov_pf_service_version latest = gt->sriov.pf.service.version.latest;
44
45         xe_gt_assert(gt, base.major);
46         xe_gt_assert(gt, base.major <= latest.major);
47         xe_gt_assert(gt, (base.major < latest.major) || (base.minor <= latest.minor));
48
49         /* VF doesn't care - return our latest  */
50         if (wanted_major == VF2PF_HANDSHAKE_MAJOR_ANY &&
51             wanted_minor == VF2PF_HANDSHAKE_MINOR_ANY) {
52                 *major = latest.major;
53                 *minor = latest.minor;
54                 return 0;
55         }
56
57         /* VF wants newer than our - return our latest  */
58         if (wanted_major > latest.major) {
59                 *major = latest.major;
60                 *minor = latest.minor;
61                 return 0;
62         }
63
64         /* VF wants older than min required - reject */
65         if (wanted_major < base.major ||
66             (wanted_major == base.major && wanted_minor < base.minor)) {
67                 return -EPERM;
68         }
69
70         /* previous major - return wanted, as we should still support it */
71         if (wanted_major < latest.major) {
72                 /* XXX: we are not prepared for multi-versions yet */
73                 xe_gt_assert(gt, base.major == latest.major);
74                 return -ENOPKG;
75         }
76
77         /* same major - return common minor */
78         *major = wanted_major;
79         *minor = min_t(u32, latest.minor, wanted_minor);
80         return 0;
81 }
82
83 static void pf_connect(struct xe_gt *gt, u32 vfid, u32 major, u32 minor)
84 {
85         xe_gt_sriov_pf_assert_vfid(gt, vfid);
86         xe_gt_assert(gt, major || minor);
87
88         gt->sriov.pf.vfs[vfid].version.major = major;
89         gt->sriov.pf.vfs[vfid].version.minor = minor;
90 }
91
92 static void pf_disconnect(struct xe_gt *gt, u32 vfid)
93 {
94         xe_gt_sriov_pf_assert_vfid(gt, vfid);
95
96         gt->sriov.pf.vfs[vfid].version.major = 0;
97         gt->sriov.pf.vfs[vfid].version.minor = 0;
98 }
99
100 static bool pf_is_negotiated(struct xe_gt *gt, u32 vfid, u32 major, u32 minor)
101 {
102         xe_gt_sriov_pf_assert_vfid(gt, vfid);
103
104         return major == gt->sriov.pf.vfs[vfid].version.major &&
105                minor <= gt->sriov.pf.vfs[vfid].version.minor;
106 }
107
108 static const struct xe_reg tgl_runtime_regs[] = {
109         RPM_CONFIG0,                    /* _MMIO(0x0d00) */
110         MIRROR_FUSE3,                   /* _MMIO(0x9118) */
111         XELP_EU_ENABLE,                 /* _MMIO(0x9134) */
112         XELP_GT_SLICE_ENABLE,           /* _MMIO(0x9138) */
113         XELP_GT_GEOMETRY_DSS_ENABLE,    /* _MMIO(0x913c) */
114         GT_VEBOX_VDBOX_DISABLE,         /* _MMIO(0x9140) */
115         CTC_MODE,                       /* _MMIO(0xa26c) */
116         HUC_KERNEL_LOAD_INFO,           /* _MMIO(0xc1dc) */
117         TIMESTAMP_OVERRIDE,             /* _MMIO(0x44074) */
118 };
119
120 static const struct xe_reg ats_m_runtime_regs[] = {
121         RPM_CONFIG0,                    /* _MMIO(0x0d00) */
122         MIRROR_FUSE3,                   /* _MMIO(0x9118) */
123         MIRROR_FUSE1,                   /* _MMIO(0x911c) */
124         XELP_EU_ENABLE,                 /* _MMIO(0x9134) */
125         XELP_GT_GEOMETRY_DSS_ENABLE,    /* _MMIO(0x913c) */
126         GT_VEBOX_VDBOX_DISABLE,         /* _MMIO(0x9140) */
127         XEHP_GT_COMPUTE_DSS_ENABLE,     /* _MMIO(0x9144) */
128         CTC_MODE,                       /* _MMIO(0xa26c) */
129         HUC_KERNEL_LOAD_INFO,           /* _MMIO(0xc1dc) */
130         TIMESTAMP_OVERRIDE,             /* _MMIO(0x44074) */
131 };
132
133 static const struct xe_reg pvc_runtime_regs[] = {
134         RPM_CONFIG0,                    /* _MMIO(0x0d00) */
135         MIRROR_FUSE3,                   /* _MMIO(0x9118) */
136         XELP_EU_ENABLE,                 /* _MMIO(0x9134) */
137         XELP_GT_GEOMETRY_DSS_ENABLE,    /* _MMIO(0x913c) */
138         GT_VEBOX_VDBOX_DISABLE,         /* _MMIO(0x9140) */
139         XEHP_GT_COMPUTE_DSS_ENABLE,     /* _MMIO(0x9144) */
140         XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
141         CTC_MODE,                       /* _MMIO(0xA26C) */
142         HUC_KERNEL_LOAD_INFO,           /* _MMIO(0xc1dc) */
143         TIMESTAMP_OVERRIDE,             /* _MMIO(0x44074) */
144 };
145
146 static const struct xe_reg ver_1270_runtime_regs[] = {
147         RPM_CONFIG0,                    /* _MMIO(0x0d00) */
148         XEHP_FUSE4,                     /* _MMIO(0x9114) */
149         MIRROR_FUSE3,                   /* _MMIO(0x9118) */
150         MIRROR_FUSE1,                   /* _MMIO(0x911c) */
151         XELP_EU_ENABLE,                 /* _MMIO(0x9134) */
152         XELP_GT_GEOMETRY_DSS_ENABLE,    /* _MMIO(0x913c) */
153         GT_VEBOX_VDBOX_DISABLE,         /* _MMIO(0x9140) */
154         XEHP_GT_COMPUTE_DSS_ENABLE,     /* _MMIO(0x9144) */
155         XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
156         CTC_MODE,                       /* _MMIO(0xa26c) */
157         HUC_KERNEL_LOAD_INFO,           /* _MMIO(0xc1dc) */
158         TIMESTAMP_OVERRIDE,             /* _MMIO(0x44074) */
159 };
160
161 static const struct xe_reg ver_2000_runtime_regs[] = {
162         RPM_CONFIG0,                    /* _MMIO(0x0d00) */
163         XEHP_FUSE4,                     /* _MMIO(0x9114) */
164         MIRROR_FUSE3,                   /* _MMIO(0x9118) */
165         MIRROR_FUSE1,                   /* _MMIO(0x911c) */
166         XELP_EU_ENABLE,                 /* _MMIO(0x9134) */
167         XELP_GT_GEOMETRY_DSS_ENABLE,    /* _MMIO(0x913c) */
168         GT_VEBOX_VDBOX_DISABLE,         /* _MMIO(0x9140) */
169         XEHP_GT_COMPUTE_DSS_ENABLE,     /* _MMIO(0x9144) */
170         XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
171         XE2_GT_COMPUTE_DSS_2,           /* _MMIO(0x914c) */
172         XE2_GT_GEOMETRY_DSS_1,          /* _MMIO(0x9150) */
173         XE2_GT_GEOMETRY_DSS_2,          /* _MMIO(0x9154) */
174         CTC_MODE,                       /* _MMIO(0xa26c) */
175         HUC_KERNEL_LOAD_INFO,           /* _MMIO(0xc1dc) */
176         TIMESTAMP_OVERRIDE,             /* _MMIO(0x44074) */
177 };
178
179 static const struct xe_reg *pick_runtime_regs(struct xe_device *xe, unsigned int *count)
180 {
181         const struct xe_reg *regs;
182
183         if (GRAPHICS_VERx100(xe) >= 2000) {
184                 *count = ARRAY_SIZE(ver_2000_runtime_regs);
185                 regs = ver_2000_runtime_regs;
186         } else if (GRAPHICS_VERx100(xe) >= 1270) {
187                 *count = ARRAY_SIZE(ver_1270_runtime_regs);
188                 regs = ver_1270_runtime_regs;
189         } else if (GRAPHICS_VERx100(xe) == 1260) {
190                 *count = ARRAY_SIZE(pvc_runtime_regs);
191                 regs = pvc_runtime_regs;
192         } else if (GRAPHICS_VERx100(xe) == 1255) {
193                 *count = ARRAY_SIZE(ats_m_runtime_regs);
194                 regs = ats_m_runtime_regs;
195         } else if (GRAPHICS_VERx100(xe) == 1200) {
196                 *count = ARRAY_SIZE(tgl_runtime_regs);
197                 regs = tgl_runtime_regs;
198         } else {
199                 regs = ERR_PTR(-ENOPKG);
200                 *count = 0;
201         }
202
203         return regs;
204 }
205
206 static int pf_alloc_runtime_info(struct xe_gt *gt)
207 {
208         struct xe_device *xe = gt_to_xe(gt);
209         const struct xe_reg *regs;
210         unsigned int size;
211         u32 *values;
212
213         xe_gt_assert(gt, IS_SRIOV_PF(xe));
214         xe_gt_assert(gt, !gt->sriov.pf.service.runtime.size);
215         xe_gt_assert(gt, !gt->sriov.pf.service.runtime.regs);
216         xe_gt_assert(gt, !gt->sriov.pf.service.runtime.values);
217
218         regs = pick_runtime_regs(xe, &size);
219         if (IS_ERR(regs))
220                 return PTR_ERR(regs);
221
222         if (unlikely(!size))
223                 return 0;
224
225         values = drmm_kcalloc(&xe->drm, size, sizeof(u32), GFP_KERNEL);
226         if (!values)
227                 return -ENOMEM;
228
229         gt->sriov.pf.service.runtime.size = size;
230         gt->sriov.pf.service.runtime.regs = regs;
231         gt->sriov.pf.service.runtime.values = values;
232
233         return 0;
234 }
235
236 static void read_many(struct xe_gt *gt, unsigned int count,
237                       const struct xe_reg *regs, u32 *values)
238 {
239         while (count--)
240                 *values++ = xe_mmio_read32(&gt->mmio, *regs++);
241 }
242
243 static void pf_prepare_runtime_info(struct xe_gt *gt)
244 {
245         const struct xe_reg *regs;
246         unsigned int size;
247         u32 *values;
248
249         if (!gt->sriov.pf.service.runtime.size)
250                 return;
251
252         size = gt->sriov.pf.service.runtime.size;
253         regs = gt->sriov.pf.service.runtime.regs;
254         values = gt->sriov.pf.service.runtime.values;
255
256         read_many(gt, size, regs, values);
257
258         if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
259                 struct drm_printer p = xe_gt_info_printer(gt);
260
261                 xe_gt_sriov_pf_service_print_runtime(gt, &p);
262         }
263 }
264
265 /**
266  * xe_gt_sriov_pf_service_init - Early initialization of the GT SR-IOV PF services.
267  * @gt: the &xe_gt to initialize
268  *
269  * Performs early initialization of the GT SR-IOV PF services, including preparation
270  * of the runtime info that will be shared with VFs.
271  *
272  * This function can only be called on PF.
273  */
274 int xe_gt_sriov_pf_service_init(struct xe_gt *gt)
275 {
276         int err;
277
278         pf_init_versions(gt);
279
280         err = pf_alloc_runtime_info(gt);
281         if (unlikely(err))
282                 goto failed;
283
284         return 0;
285 failed:
286         xe_gt_sriov_err(gt, "Failed to initialize service (%pe)\n", ERR_PTR(err));
287         return err;
288 }
289
290 /**
291  * xe_gt_sriov_pf_service_update - Update PF SR-IOV services.
292  * @gt: the &xe_gt to update
293  *
294  * Updates runtime data shared with VFs.
295  *
296  * This function can be called more than once.
297  * This function can only be called on PF.
298  */
299 void xe_gt_sriov_pf_service_update(struct xe_gt *gt)
300 {
301         pf_prepare_runtime_info(gt);
302 }
303
304 /**
305  * xe_gt_sriov_pf_service_reset - Reset a connection with the VF.
306  * @gt: the &xe_gt
307  * @vfid: the VF identifier
308  *
309  * Reset a VF driver negotiated VF/PF ABI version.
310  * After that point, the VF driver will have to perform new version handshake
311  * to continue use of the PF services again.
312  *
313  * This function can only be called on PF.
314  */
315 void xe_gt_sriov_pf_service_reset(struct xe_gt *gt, unsigned int vfid)
316 {
317         pf_disconnect(gt, vfid);
318 }
319
320 /* Return: 0 on success or a negative error code on failure. */
321 static int pf_process_handshake(struct xe_gt *gt, u32 vfid,
322                                 u32 wanted_major, u32 wanted_minor,
323                                 u32 *major, u32 *minor)
324 {
325         int err;
326
327         xe_gt_sriov_dbg_verbose(gt, "VF%u wants ABI version %u.%u\n",
328                                 vfid, wanted_major, wanted_minor);
329
330         err = pf_negotiate_version(gt, wanted_major, wanted_minor, major, minor);
331
332         if (err < 0) {
333                 xe_gt_sriov_notice(gt, "VF%u failed to negotiate ABI %u.%u (%pe)\n",
334                                    vfid, wanted_major, wanted_minor, ERR_PTR(err));
335                 pf_disconnect(gt, vfid);
336         } else {
337                 xe_gt_sriov_dbg(gt, "VF%u negotiated ABI version %u.%u\n",
338                                 vfid, *major, *minor);
339                 pf_connect(gt, vfid, *major, *minor);
340         }
341
342         return 0;
343 }
344
345 /* Return: length of the response message or a negative error code on failure. */
346 static int pf_process_handshake_msg(struct xe_gt *gt, u32 origin,
347                                     const u32 *request, u32 len, u32 *response, u32 size)
348 {
349         u32 wanted_major, wanted_minor;
350         u32 major, minor;
351         u32 mbz;
352         int err;
353
354         if (unlikely(len != VF2PF_HANDSHAKE_REQUEST_MSG_LEN))
355                 return -EMSGSIZE;
356
357         mbz = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_0_MBZ, request[0]);
358         if (unlikely(mbz))
359                 return -EPFNOSUPPORT;
360
361         wanted_major = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, request[1]);
362         wanted_minor = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, request[1]);
363
364         err = pf_process_handshake(gt, origin, wanted_major, wanted_minor, &major, &minor);
365         if (err < 0)
366                 return err;
367
368         xe_gt_assert(gt, major || minor);
369         xe_gt_assert(gt, size >= VF2PF_HANDSHAKE_RESPONSE_MSG_LEN);
370
371         response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
372                       FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) |
373                       FIELD_PREP(GUC_HXG_RESPONSE_MSG_0_DATA0, 0);
374         response[1] = FIELD_PREP(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, major) |
375                       FIELD_PREP(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, minor);
376
377         return VF2PF_HANDSHAKE_RESPONSE_MSG_LEN;
378 }
379
380 struct reg_data {
381         u32 offset;
382         u32 value;
383 } __packed;
384 static_assert(hxg_sizeof(struct reg_data) == 2);
385
386 /* Return: number of entries copied or negative error code on failure. */
387 static int pf_service_runtime_query(struct xe_gt *gt, u32 start, u32 limit,
388                                     struct reg_data *data, u32 *remaining)
389 {
390         struct xe_gt_sriov_pf_service_runtime_regs *runtime;
391         unsigned int count, i;
392         u32 addr;
393
394         xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
395
396         runtime = &gt->sriov.pf.service.runtime;
397
398         if (start > runtime->size)
399                 return -ERANGE;
400
401         count = min_t(u32, runtime->size - start, limit);
402
403         for (i = 0; i < count; ++i, ++data) {
404                 addr = runtime->regs[start + i].addr;
405                 data->offset = xe_mmio_adjusted_addr(&gt->mmio, addr);
406                 data->value = runtime->values[start + i];
407         }
408
409         *remaining = runtime->size - start - count;
410         return count;
411 }
412
413 /* Return: length of the response message or a negative error code on failure. */
414 static int pf_process_runtime_query_msg(struct xe_gt *gt, u32 origin,
415                                         const u32 *msg, u32 msg_len, u32 *response, u32 resp_size)
416 {
417         const u32 chunk_size = hxg_sizeof(struct reg_data);
418         struct reg_data *reg_data_buf;
419         u32 limit, start, max_chunks;
420         u32 remaining = 0;
421         int ret;
422
423         if (!pf_is_negotiated(gt, origin, 1, 0))
424                 return -EACCES;
425         if (unlikely(msg_len > VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
426                 return -EMSGSIZE;
427         if (unlikely(msg_len < VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
428                 return -EPROTO;
429         if (unlikely(resp_size < VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN))
430                 return -EINVAL;
431
432         limit = FIELD_GET(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, msg[0]);
433         start = FIELD_GET(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, msg[1]);
434
435         resp_size = min_t(u32, resp_size, VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MAX_LEN);
436         max_chunks = (resp_size - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / chunk_size;
437         limit = limit == VF2PF_QUERY_RUNTIME_NO_LIMIT ? max_chunks : min_t(u32, max_chunks, limit);
438         reg_data_buf = (void *)(response + VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN);
439
440         ret = pf_service_runtime_query(gt, start, limit, reg_data_buf, &remaining);
441         if (ret < 0)
442                 return ret;
443
444         response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
445                       FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) |
446                       FIELD_PREP(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, ret);
447         response[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, remaining);
448
449         return VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + ret * hxg_sizeof(struct reg_data);
450 }
451
452 /**
453  * xe_gt_sriov_pf_service_process_request - Service GT level SR-IOV request message from the VF.
454  * @gt: the &xe_gt that provides the service
455  * @origin: VF number that is requesting the service
456  * @msg: request message
457  * @msg_len: length of the request message (in dwords)
458  * @response: placeholder for the response message
459  * @resp_size: length of the response message buffer (in dwords)
460  *
461  * This function processes `Relay Message`_ request from the VF.
462  *
463  * Return: length of the response message or a negative error code on failure.
464  */
465 int xe_gt_sriov_pf_service_process_request(struct xe_gt *gt, u32 origin,
466                                            const u32 *msg, u32 msg_len,
467                                            u32 *response, u32 resp_size)
468 {
469         u32 action, data __maybe_unused;
470         int ret;
471
472         xe_gt_assert(gt, msg_len >= GUC_HXG_MSG_MIN_LEN);
473         xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_REQUEST);
474
475         action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
476         data = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]);
477         xe_gt_sriov_dbg_verbose(gt, "service action %#x:%u from VF%u\n",
478                                 action, data, origin);
479
480         switch (action) {
481         case GUC_RELAY_ACTION_VF2PF_HANDSHAKE:
482                 ret = pf_process_handshake_msg(gt, origin, msg, msg_len, response, resp_size);
483                 break;
484         case GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME:
485                 ret = pf_process_runtime_query_msg(gt, origin, msg, msg_len, response, resp_size);
486                 break;
487         default:
488                 ret = -EOPNOTSUPP;
489                 break;
490         }
491
492         return ret;
493 }
494
495 /**
496  * xe_gt_sriov_pf_service_print_runtime - Print PF runtime data shared with VFs.
497  * @gt: the &xe_gt
498  * @p: the &drm_printer
499  *
500  * This function is for PF use only.
501  */
502 int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p)
503 {
504         const struct xe_reg *regs;
505         unsigned int size;
506         u32 *values;
507
508         xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
509
510         size = gt->sriov.pf.service.runtime.size;
511         regs = gt->sriov.pf.service.runtime.regs;
512         values = gt->sriov.pf.service.runtime.values;
513
514         for (; size--; regs++, values++) {
515                 drm_printf(p, "reg[%#x] = %#x\n",
516                            xe_mmio_adjusted_addr(&gt->mmio, regs->addr), *values);
517         }
518
519         return 0;
520 }
521
522 /**
523  * xe_gt_sriov_pf_service_print_version - Print ABI versions negotiated with VFs.
524  * @gt: the &xe_gt
525  * @p: the &drm_printer
526  *
527  * This function is for PF use only.
528  */
529 int xe_gt_sriov_pf_service_print_version(struct xe_gt *gt, struct drm_printer *p)
530 {
531         struct xe_device *xe = gt_to_xe(gt);
532         unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
533         struct xe_gt_sriov_pf_service_version *version;
534
535         xe_gt_assert(gt, IS_SRIOV_PF(xe));
536
537         for (n = 1; n <= total_vfs; n++) {
538                 version = &gt->sriov.pf.vfs[n].version;
539                 if (!version->major && !version->minor)
540                         continue;
541
542                 drm_printf(p, "VF%u:\t%u.%u\n", n, version->major, version->minor);
543         }
544
545         return 0;
546 }
547
548 #if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
549 #include "tests/xe_gt_sriov_pf_service_test.c"
550 #endif
This page took 0.065754 seconds and 4 git commands to generate.