1 // SPDX-License-Identifier: MIT
3 * Copyright © 2022 Intel Corporation
8 #include <drm/drm_managed.h>
10 #include "abi/gsc_pxp_commands_abi.h"
11 #include "regs/xe_gsc_regs.h"
12 #include "regs/xe_guc_regs.h"
13 #include "xe_assert.h"
15 #include "xe_device.h"
16 #include "xe_force_wake.h"
17 #include "xe_gsc_submit.h"
25 huc_to_gt(struct xe_huc *huc)
27 return container_of(huc, struct xe_gt, uc.huc);
30 static struct xe_device *
31 huc_to_xe(struct xe_huc *huc)
33 return gt_to_xe(huc_to_gt(huc));
36 static struct xe_guc *
37 huc_to_guc(struct xe_huc *huc)
39 return &container_of(huc, struct xe_uc, huc)->guc;
42 static void free_gsc_pkt(struct drm_device *drm, void *arg)
44 struct xe_huc *huc = arg;
46 xe_bo_unpin_map_no_vm(huc->gsc_pkt);
50 #define PXP43_HUC_AUTH_INOUT_SIZE SZ_4K
51 static int huc_alloc_gsc_pkt(struct xe_huc *huc)
53 struct xe_gt *gt = huc_to_gt(huc);
54 struct xe_device *xe = gt_to_xe(gt);
58 /* we use a single object for both input and output */
59 bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
60 PXP43_HUC_AUTH_INOUT_SIZE * 2,
62 XE_BO_CREATE_SYSTEM_BIT |
63 XE_BO_CREATE_GGTT_BIT);
69 err = drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
71 free_gsc_pkt(&xe->drm, huc);
78 int xe_huc_init(struct xe_huc *huc)
80 struct xe_gt *gt = huc_to_gt(huc);
81 struct xe_tile *tile = gt_to_tile(gt);
82 struct xe_device *xe = gt_to_xe(gt);
85 huc->fw.type = XE_UC_FW_TYPE_HUC;
87 /* On platforms with a media GT the HuC is only available there */
88 if (tile->media_gt && (gt != tile->media_gt)) {
89 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_NOT_SUPPORTED);
93 ret = xe_uc_fw_init(&huc->fw);
97 if (!xe_uc_fw_is_enabled(&huc->fw))
100 if (huc->fw.has_gsc_headers) {
101 ret = huc_alloc_gsc_pkt(huc);
106 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOADABLE);
111 drm_err(&xe->drm, "HuC init failed with %d", ret);
115 int xe_huc_init_post_hwconfig(struct xe_huc *huc)
117 struct xe_tile *tile = gt_to_tile(huc_to_gt(huc));
118 struct xe_device *xe = huc_to_xe(huc);
121 if (!IS_DGFX(huc_to_xe(huc)))
124 if (!xe_uc_fw_is_loadable(&huc->fw))
127 ret = xe_managed_bo_reinit_in_vram(xe, tile, &huc->fw.bo);
134 int xe_huc_upload(struct xe_huc *huc)
136 if (!xe_uc_fw_is_loadable(&huc->fw))
138 return xe_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL);
141 #define huc_auth_msg_wr(xe_, map_, offset_, field_, val_) \
142 xe_map_wr_field(xe_, map_, offset_, struct pxp43_new_huc_auth_in, field_, val_)
143 #define huc_auth_msg_rd(xe_, map_, offset_, field_) \
144 xe_map_rd_field(xe_, map_, offset_, struct pxp43_huc_auth_out, field_)
146 static u32 huc_emit_pxp_auth_msg(struct xe_device *xe, struct iosys_map *map,
147 u32 wr_offset, u32 huc_offset, u32 huc_size)
149 xe_map_memset(xe, map, wr_offset, 0, sizeof(struct pxp43_new_huc_auth_in));
151 huc_auth_msg_wr(xe, map, wr_offset, header.api_version, PXP_APIVER(4, 3));
152 huc_auth_msg_wr(xe, map, wr_offset, header.command_id, PXP43_CMDID_NEW_HUC_AUTH);
153 huc_auth_msg_wr(xe, map, wr_offset, header.status, 0);
154 huc_auth_msg_wr(xe, map, wr_offset, header.buffer_len,
155 sizeof(struct pxp43_new_huc_auth_in) - sizeof(struct pxp_cmd_header));
156 huc_auth_msg_wr(xe, map, wr_offset, huc_base_address, huc_offset);
157 huc_auth_msg_wr(xe, map, wr_offset, huc_size, huc_size);
159 return wr_offset + sizeof(struct pxp43_new_huc_auth_in);
162 static int huc_auth_via_gsccs(struct xe_huc *huc)
164 struct xe_gt *gt = huc_to_gt(huc);
165 struct xe_device *xe = gt_to_xe(gt);
166 struct xe_bo *pkt = huc->gsc_pkt;
177 ggtt_offset = xe_bo_ggtt_addr(pkt);
179 wr_offset = xe_gsc_emit_header(xe, &pkt->vmap, 0, HECI_MEADDRESS_PXP, 0,
180 sizeof(struct pxp43_new_huc_auth_in));
181 wr_offset = huc_emit_pxp_auth_msg(xe, &pkt->vmap, wr_offset,
182 xe_bo_ggtt_addr(huc->fw.bo),
185 err = xe_gsc_pkt_submit_kernel(>->uc.gsc, ggtt_offset, wr_offset,
186 ggtt_offset + PXP43_HUC_AUTH_INOUT_SIZE,
187 PXP43_HUC_AUTH_INOUT_SIZE);
191 if (xe_gsc_check_and_update_pending(xe, &pkt->vmap, 0, &pkt->vmap,
192 PXP43_HUC_AUTH_INOUT_SIZE)) {
196 } while (--retry && err == -EBUSY);
199 drm_err(&xe->drm, "failed to submit GSC request to auth: %d\n", err);
203 err = xe_gsc_read_out_header(xe, &pkt->vmap, PXP43_HUC_AUTH_INOUT_SIZE,
204 sizeof(struct pxp43_huc_auth_out), &rd_offset);
206 drm_err(&xe->drm, "HuC: invalid GSC reply for auth (err=%d)\n", err);
211 * The GSC will return PXP_STATUS_OP_NOT_PERMITTED if the HuC is already
212 * authenticated. If the same error is ever returned with HuC not loaded
213 * we'll still catch it when we check the authentication bit later.
215 out_status = huc_auth_msg_rd(xe, &pkt->vmap, rd_offset, header.status);
216 if (out_status != PXP_STATUS_SUCCESS && out_status != PXP_STATUS_OP_NOT_PERMITTED) {
217 drm_err(&xe->drm, "auth failed with GSC error = 0x%x\n", out_status);
224 static const struct {
228 } huc_auth_modes[XE_HUC_AUTH_TYPES_COUNT] = {
229 [XE_HUC_AUTH_VIA_GUC] = { "GuC",
230 HUC_KERNEL_LOAD_INFO,
231 HUC_LOAD_SUCCESSFUL },
232 [XE_HUC_AUTH_VIA_GSC] = { "GSC",
233 HECI_FWSTS5(MTL_GSC_HECI1_BASE),
234 HECI1_FWSTS5_HUC_AUTH_DONE },
237 bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type)
239 struct xe_gt *gt = huc_to_gt(huc);
241 return xe_mmio_read32(gt, huc_auth_modes[type].reg) & huc_auth_modes[type].val;
244 int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type)
246 struct xe_device *xe = huc_to_xe(huc);
247 struct xe_gt *gt = huc_to_gt(huc);
248 struct xe_guc *guc = huc_to_guc(huc);
251 if (!xe_uc_fw_is_loadable(&huc->fw))
254 /* On newer platforms the HuC survives reset, so no need to re-auth */
255 if (xe_huc_is_authenticated(huc, type)) {
256 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING);
260 if (!xe_uc_fw_is_loaded(&huc->fw))
264 case XE_HUC_AUTH_VIA_GUC:
265 ret = xe_guc_auth_huc(guc, xe_bo_ggtt_addr(huc->fw.bo) +
266 xe_uc_fw_rsa_offset(&huc->fw));
268 case XE_HUC_AUTH_VIA_GSC:
269 ret = huc_auth_via_gsccs(huc);
276 drm_err(&xe->drm, "Failed to trigger HuC auth via %s: %d\n",
277 huc_auth_modes[type].name, ret);
281 ret = xe_mmio_wait32(gt, huc_auth_modes[type].reg, huc_auth_modes[type].val,
282 huc_auth_modes[type].val, 100000, NULL, false);
284 drm_err(&xe->drm, "HuC: Firmware not verified %d\n", ret);
288 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING);
289 drm_dbg(&xe->drm, "HuC authenticated via %s\n", huc_auth_modes[type].name);
294 drm_err(&xe->drm, "HuC: Auth via %s failed: %d\n",
295 huc_auth_modes[type].name, ret);
296 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
301 void xe_huc_sanitize(struct xe_huc *huc)
303 if (!xe_uc_fw_is_loadable(&huc->fw))
305 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOADABLE);
308 void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p)
310 struct xe_gt *gt = huc_to_gt(huc);
313 xe_uc_fw_print(&huc->fw, p);
315 if (!xe_uc_fw_is_enabled(&huc->fw))
318 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
322 drm_printf(p, "\nHuC status: 0x%08x\n",
323 xe_mmio_read32(gt, HUC_KERNEL_LOAD_INFO));
325 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);