1 // SPDX-License-Identifier: MIT
3 * Copyright © 2022 Intel Corporation
6 #include "gt/intel_engine_pm.h"
7 #include "gt/intel_gpu_commands.h"
8 #include "gt/intel_gt.h"
9 #include "gt/intel_gt_print.h"
10 #include "gt/intel_ring.h"
11 #include "intel_gsc_fw.h"
13 #define GSC_FW_STATUS_REG _MMIO(0x116C40)
14 #define GSC_FW_CURRENT_STATE REG_GENMASK(3, 0)
15 #define GSC_FW_CURRENT_STATE_RESET 0
16 #define GSC_FW_PROXY_STATE_NORMAL 5
17 #define GSC_FW_INIT_COMPLETE_BIT REG_BIT(9)
19 static bool gsc_is_in_reset(struct intel_uncore *uncore)
21 u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
23 return REG_FIELD_GET(GSC_FW_CURRENT_STATE, fw_status) ==
24 GSC_FW_CURRENT_STATE_RESET;
27 bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc)
29 struct intel_uncore *uncore = gsc_uc_to_gt(gsc)->uncore;
30 u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
32 return REG_FIELD_GET(GSC_FW_CURRENT_STATE, fw_status) ==
33 GSC_FW_PROXY_STATE_NORMAL;
36 bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc)
38 struct intel_uncore *uncore = gsc_uc_to_gt(gsc)->uncore;
39 u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
41 return fw_status & GSC_FW_INIT_COMPLETE_BIT;
44 static int emit_gsc_fw_load(struct i915_request *rq, struct intel_gsc_uc *gsc)
46 u32 offset = i915_ggtt_offset(gsc->local);
49 cs = intel_ring_begin(rq, 4);
54 *cs++ = lower_32_bits(offset);
55 *cs++ = upper_32_bits(offset);
56 *cs++ = (gsc->local->size / SZ_4K) | HECI1_FW_LIMIT_VALID;
58 intel_ring_advance(rq, cs);
63 static int gsc_fw_load(struct intel_gsc_uc *gsc)
65 struct intel_context *ce = gsc->ce;
66 struct i915_request *rq;
72 rq = i915_request_create(ce);
76 if (ce->engine->emit_init_breadcrumb) {
77 err = ce->engine->emit_init_breadcrumb(rq);
82 err = emit_gsc_fw_load(rq, gsc);
86 err = ce->engine->emit_flush(rq, 0);
92 i915_request_set_error_once(rq, err);
96 if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0)
102 gt_err(gsc_uc_to_gt(gsc), "Request submission for GSC load failed %pe\n",
108 static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
110 struct intel_gt *gt = gsc_uc_to_gt(gsc);
111 struct drm_i915_private *i915 = gt->i915;
112 struct drm_i915_gem_object *obj;
118 obj = gsc->local->obj;
120 if (obj->base.size < gsc->fw.size)
124 * Wa_22016122933: For MTL the shared memory needs to be mapped
125 * as WC on CPU side and UC (PAT index 2) on GPU side
127 if (IS_METEORLAKE(i915))
128 i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
130 dst = i915_gem_object_pin_map_unlocked(obj,
131 i915_coherent_map_type(i915, obj, true));
135 src = i915_gem_object_pin_map_unlocked(gsc->fw.obj,
136 i915_coherent_map_type(i915, gsc->fw.obj, true));
138 i915_gem_object_unpin_map(obj);
142 memset(dst, 0, obj->base.size);
143 memcpy(dst, src, gsc->fw.size);
146 * Wa_22016122933: Making sure the data in dst is
147 * visible to GSC right away
149 intel_guc_write_barrier(>->uc.guc);
151 i915_gem_object_unpin_map(gsc->fw.obj);
152 i915_gem_object_unpin_map(obj);
157 static int gsc_fw_wait(struct intel_gt *gt)
159 return intel_wait_for_register(gt->uncore,
161 GSC_FW_INIT_COMPLETE_BIT,
162 GSC_FW_INIT_COMPLETE_BIT,
166 int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
168 struct intel_gt *gt = gsc_uc_to_gt(gsc);
169 struct intel_uc_fw *gsc_fw = &gsc->fw;
172 /* check current fw status */
173 if (intel_gsc_uc_fw_init_done(gsc)) {
174 if (GEM_WARN_ON(!intel_uc_fw_is_loaded(gsc_fw)))
175 intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
179 if (!intel_uc_fw_is_loadable(gsc_fw))
182 /* FW blob is ok, so clean the status */
183 intel_uc_fw_sanitize(&gsc->fw);
185 if (!gsc_is_in_reset(gt->uncore))
188 err = gsc_fw_load_prepare(gsc);
193 * GSC is only killed by an FLR, so we need to trigger one on unload to
194 * make sure we stop it. This is because we assign a chunk of memory to
195 * the GSC as part of the FW load , so we need to make sure it stops
196 * using it when we release it to the system on driver unload. Note that
197 * this is not a problem of the unload per-se, because the GSC will not
198 * touch that memory unless there are requests for it coming from the
199 * driver; therefore, no accesses will happen while i915 is not loaded,
200 * but if we re-load the driver then the GSC might wake up and try to
201 * access that old memory location again.
202 * Given that an FLR is a very disruptive action (see the FLR function
203 * for details), we want to do it as the last action before releasing
204 * the access to the MMIO bar, which means we need to do it as part of
205 * the primary uncore cleanup.
206 * An alternative approach to the FLR would be to use a memory location
207 * that survives driver unload, like e.g. stolen memory, and keep the
208 * GSC loaded across reloads. However, this requires us to make sure we
209 * preserve that memory location on unload and then determine and
210 * reserve its offset on each subsequent load, which is not trivial, so
211 * it is easier to just kill everything and start fresh.
213 intel_uncore_set_flr_on_fini(>->i915->uncore);
215 err = gsc_fw_load(gsc);
219 err = gsc_fw_wait(gt);
223 /* FW is not fully operational until we enable SW proxy */
224 intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
226 gt_info(gt, "Loaded GSC firmware %s\n", gsc_fw->file_selected.path);
231 return intel_uc_fw_mark_load_failed(gsc_fw, err);