1 // SPDX-License-Identifier: MIT
3 * Copyright(c) 2020 Intel Corporation.
5 #include <linux/workqueue.h>
7 #include "gem/i915_gem_context.h"
9 #include "gt/intel_context.h"
10 #include "gt/intel_gt.h"
14 #include "intel_pxp.h"
15 #include "intel_pxp_gsccs.h"
16 #include "intel_pxp_irq.h"
17 #include "intel_pxp_regs.h"
18 #include "intel_pxp_session.h"
19 #include "intel_pxp_tee.h"
20 #include "intel_pxp_types.h"
25 * PXP (Protected Xe Path) is a feature available in Gen12 and newer platforms.
26 * It allows execution and flip to display of protected (i.e. encrypted)
27 * objects. The SW support is enabled via the CONFIG_DRM_I915_PXP kconfig.
29 * Objects can opt-in to PXP encryption at creation time via the
30 * I915_GEM_CREATE_EXT_PROTECTED_CONTENT create_ext flag. For objects to be
31 * correctly protected they must be used in conjunction with a context created
32 * with the I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. See the documentation
33 * of those two uapi flags for details and restrictions.
35 * Protected objects are tied to a pxp session; currently we only support one
36 * session, which i915 manages and whose index is available in the uapi
37 * (I915_PROTECTED_CONTENT_DEFAULT_SESSION) for use in instructions targeting
39 * The session is invalidated by the HW when certain events occur (e.g.
40 * suspend/resume). When this happens, all the objects that were used with the
41 * session are marked as invalid and all contexts marked as using protected
42 * content are banned. Any further attempt at using them in an execbuf call is
43 * rejected, while flips are converted to black frames.
45 * Some of the PXP setup operations are performed by the Management Engine,
46 * which is handled by the mei driver; communication between i915 and mei is
47 * performed via the mei_pxp component module.
50 bool intel_pxp_is_supported(const struct intel_pxp *pxp)
52 return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp;
55 bool intel_pxp_is_enabled(const struct intel_pxp *pxp)
57 return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->ce;
60 bool intel_pxp_is_active(const struct intel_pxp *pxp)
62 return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->arb_is_valid;
65 static void kcr_pxp_set_status(const struct intel_pxp *pxp, bool enable)
67 u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
68 _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
70 intel_uncore_write(pxp->ctrl_gt->uncore, KCR_INIT(pxp->kcr_base), val);
73 static void kcr_pxp_enable(const struct intel_pxp *pxp)
75 kcr_pxp_set_status(pxp, true);
78 static void kcr_pxp_disable(const struct intel_pxp *pxp)
80 kcr_pxp_set_status(pxp, false);
83 static int create_vcs_context(struct intel_pxp *pxp)
85 static struct lock_class_key pxp_lock;
86 struct intel_gt *gt = pxp->ctrl_gt;
87 struct intel_engine_cs *engine;
88 struct intel_context *ce;
92 * Find the first VCS engine present. We're guaranteed there is one
93 * if we're in this function due to the check in has_pxp
95 for (i = 0, engine = NULL; !engine; i++)
96 engine = gt->engine_class[VIDEO_DECODE_CLASS][i];
98 GEM_BUG_ON(!engine || engine->class != VIDEO_DECODE_CLASS);
100 ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
101 I915_GEM_HWS_PXP_ADDR,
102 &pxp_lock, "pxp_context");
104 drm_err(>->i915->drm, "failed to create VCS ctx for PXP\n");
113 static void destroy_vcs_context(struct intel_pxp *pxp)
116 intel_engine_destroy_pinned_context(fetch_and_zero(&pxp->ce));
119 static void pxp_init_full(struct intel_pxp *pxp)
121 struct intel_gt *gt = pxp->ctrl_gt;
125 * we'll use the completion to check if there is a termination pending,
126 * so we start it as completed and we reinit it when a termination
129 init_completion(&pxp->termination);
130 complete_all(&pxp->termination);
132 if (pxp->ctrl_gt->type == GT_MEDIA)
133 pxp->kcr_base = MTL_KCR_BASE;
135 pxp->kcr_base = GEN12_KCR_BASE;
137 intel_pxp_session_management_init(pxp);
139 ret = create_vcs_context(pxp);
143 if (HAS_ENGINE(pxp->ctrl_gt, GSC0))
144 ret = intel_pxp_gsccs_init(pxp);
146 ret = intel_pxp_tee_component_init(pxp);
150 drm_info(>->i915->drm, "Protected Xe Path (PXP) protected content support initialized\n");
155 destroy_vcs_context(pxp);
158 static struct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i915)
161 * NOTE: Only certain platforms require PXP-tee-backend dependencies
162 * for HuC authentication. For now, its limited to DG2.
164 if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC) &&
165 intel_huc_is_loaded_by_gsc(&to_gt(i915)->uc.huc) && intel_uc_uses_huc(&to_gt(i915)->uc))
171 static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_private *i915)
173 if (!IS_ENABLED(CONFIG_DRM_I915_PXP) || !INTEL_INFO(i915)->has_pxp)
177 * For MTL onwards, PXP-controller-GT needs to have a valid GSC engine
178 * on the media GT. NOTE: if we have a media-tile with a GSC-engine,
179 * the VDBOX is already present so skip that check. We also have to
180 * ensure the GSC and HUC firmware are coming online
182 if (i915->media_gt && HAS_ENGINE(i915->media_gt, GSC0) &&
183 intel_uc_fw_is_loadable(&i915->media_gt->uc.gsc.fw) &&
184 intel_uc_fw_is_loadable(&i915->media_gt->uc.huc.fw))
185 return i915->media_gt;
188 * Else we rely on mei-pxp module but only on legacy platforms
189 * prior to having separate media GTs and has a valid VDBOX.
191 if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(to_gt(i915)))
197 int intel_pxp_init(struct drm_i915_private *i915)
200 bool is_full_feature = false;
202 if (intel_gt_is_wedged(to_gt(i915)))
206 * NOTE: Get the ctrl_gt before checking intel_pxp_is_supported since
207 * we still need it if PXP's backend tee transport is needed.
209 gt = find_gt_for_required_protected_content(i915);
211 is_full_feature = true;
213 gt = find_gt_for_required_teelink(i915);
219 * At this point, we will either enable full featured PXP capabilities
220 * including session and object management, or we will init the backend tee
221 * channel for internal users such as HuC loading by GSC
223 i915->pxp = kzalloc(sizeof(*i915->pxp), GFP_KERNEL);
227 /* init common info used by all feature-mode usages*/
228 i915->pxp->ctrl_gt = gt;
229 mutex_init(&i915->pxp->tee_mutex);
232 * If full PXP feature is not available but HuC is loaded by GSC on pre-MTL
233 * such as DG2, we can skip the init of the full PXP session/object management
234 * and just init the tee channel.
237 pxp_init_full(i915->pxp);
239 intel_pxp_tee_component_init(i915->pxp);
244 void intel_pxp_fini(struct drm_i915_private *i915)
249 i915->pxp->arb_is_valid = false;
251 if (HAS_ENGINE(i915->pxp->ctrl_gt, GSC0))
252 intel_pxp_gsccs_fini(i915->pxp);
254 intel_pxp_tee_component_fini(i915->pxp);
256 destroy_vcs_context(i915->pxp);
262 void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp)
264 pxp->arb_is_valid = false;
265 reinit_completion(&pxp->termination);
268 static void pxp_queue_termination(struct intel_pxp *pxp)
270 struct intel_gt *gt = pxp->ctrl_gt;
273 * We want to get the same effect as if we received a termination
274 * interrupt, so just pretend that we did.
276 spin_lock_irq(gt->irq_lock);
277 intel_pxp_mark_termination_in_progress(pxp);
278 pxp->session_events |= PXP_TERMINATION_REQUEST;
279 queue_work(system_unbound_wq, &pxp->session_work);
280 spin_unlock_irq(gt->irq_lock);
283 static bool pxp_component_bound(struct intel_pxp *pxp)
287 mutex_lock(&pxp->tee_mutex);
288 if (pxp->pxp_component)
290 mutex_unlock(&pxp->tee_mutex);
295 int intel_pxp_get_backend_timeout_ms(struct intel_pxp *pxp)
297 if (HAS_ENGINE(pxp->ctrl_gt, GSC0))
298 return GSCFW_MAX_ROUND_TRIP_LATENCY_MS;
303 static int __pxp_global_teardown_final(struct intel_pxp *pxp)
307 if (!pxp->arb_is_valid)
310 drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for suspend/fini");
312 * To ensure synchronous and coherent session teardown completion
313 * in response to suspend or shutdown triggers, don't use a worker.
315 intel_pxp_mark_termination_in_progress(pxp);
316 intel_pxp_terminate(pxp, false);
318 timeout = intel_pxp_get_backend_timeout_ms(pxp);
320 if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout)))
326 static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
330 if (pxp->arb_is_valid)
333 drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for restart");
335 * The arb-session is currently inactive and we are doing a reset and restart
336 * due to a runtime event. Use the worker that was designed for this.
338 pxp_queue_termination(pxp);
340 timeout = intel_pxp_get_backend_timeout_ms(pxp);
342 if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout))) {
343 drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: restart backend timed out (%d ms)",
351 void intel_pxp_end(struct intel_pxp *pxp)
353 struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
354 intel_wakeref_t wakeref;
356 if (!intel_pxp_is_enabled(pxp))
359 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
361 mutex_lock(&pxp->arb_mutex);
363 if (__pxp_global_teardown_final(pxp))
364 drm_dbg(&i915->drm, "PXP end timed out\n");
366 mutex_unlock(&pxp->arb_mutex);
368 intel_pxp_fini_hw(pxp);
369 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
372 static bool pxp_required_fw_failed(struct intel_pxp *pxp)
374 if (__intel_uc_fw_status(&pxp->ctrl_gt->uc.huc.fw) == INTEL_UC_FIRMWARE_LOAD_FAIL)
376 if (HAS_ENGINE(pxp->ctrl_gt, GSC0) &&
377 __intel_uc_fw_status(&pxp->ctrl_gt->uc.gsc.fw) == INTEL_UC_FIRMWARE_LOAD_FAIL)
383 static bool pxp_fw_dependencies_completed(struct intel_pxp *pxp)
385 if (HAS_ENGINE(pxp->ctrl_gt, GSC0))
386 return intel_pxp_gsccs_is_ready_for_sessions(pxp);
388 return pxp_component_bound(pxp);
392 * this helper is used by both intel_pxp_start and by
393 * the GET_PARAM IOCTL that user space calls. Thus, the
394 * return values here should match the UAPI spec.
396 int intel_pxp_get_readiness_status(struct intel_pxp *pxp, int timeout_ms)
398 if (!intel_pxp_is_enabled(pxp))
401 if (pxp_required_fw_failed(pxp))
404 if (pxp->platform_cfg_is_bad)
408 if (wait_for(pxp_fw_dependencies_completed(pxp), timeout_ms))
410 } else if (!pxp_fw_dependencies_completed(pxp)) {
417 * the arb session is restarted from the irq work when we receive the
418 * termination completion interrupt
420 #define PXP_READINESS_TIMEOUT 250
422 int intel_pxp_start(struct intel_pxp *pxp)
426 ret = intel_pxp_get_readiness_status(pxp, PXP_READINESS_TIMEOUT);
428 drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: tried but not-avail (%d)", ret);
430 } else if (ret > 1) {
431 return -EIO; /* per UAPI spec, user may retry later */
434 mutex_lock(&pxp->arb_mutex);
436 ret = __pxp_global_teardown_restart(pxp);
440 /* make sure the compiler doesn't optimize the double access */
443 if (!pxp->arb_is_valid)
447 mutex_unlock(&pxp->arb_mutex);
451 void intel_pxp_init_hw(struct intel_pxp *pxp)
454 intel_pxp_irq_enable(pxp);
457 void intel_pxp_fini_hw(struct intel_pxp *pxp)
459 kcr_pxp_disable(pxp);
460 intel_pxp_irq_disable(pxp);
463 int intel_pxp_key_check(struct intel_pxp *pxp,
464 struct drm_i915_gem_object *obj,
467 if (!intel_pxp_is_active(pxp))
470 if (!i915_gem_object_is_protected(obj))
473 GEM_BUG_ON(!pxp->key_instance);
476 * If this is the first time we're using this object, it's not
477 * encrypted yet; it will be encrypted with the current key, so mark it
478 * as such. If the object is already encrypted, check instead if the
479 * used key is still valid.
481 if (!obj->pxp_key_instance && assign)
482 obj->pxp_key_instance = pxp->key_instance;
484 if (obj->pxp_key_instance != pxp->key_instance)
490 void intel_pxp_invalidate(struct intel_pxp *pxp)
492 struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
493 struct i915_gem_context *ctx, *cn;
495 /* ban all contexts marked as protected */
496 spin_lock_irq(&i915->gem.contexts.lock);
497 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
498 struct i915_gem_engines_iter it;
499 struct intel_context *ce;
501 if (!kref_get_unless_zero(&ctx->ref))
504 if (likely(!i915_gem_context_uses_protected_content(ctx))) {
505 i915_gem_context_put(ctx);
509 spin_unlock_irq(&i915->gem.contexts.lock);
512 * By the time we get here we are either going to suspend with
513 * quiesced execution or the HW keys are already long gone and
514 * in this case it is worthless to attempt to close the context
515 * and wait for its execution. It will hang the GPU if it has
516 * not already. So, as a fast mitigation, we can ban the
517 * context as quick as we can. That might race with the
518 * execbuffer, but currently this is the best that can be done.
520 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
521 intel_context_ban(ce, NULL);
522 i915_gem_context_unlock_engines(ctx);
525 * The context has been banned, no need to keep the wakeref.
526 * This is safe from races because the only other place this
527 * is touched is context_release and we're holding a ctx ref
529 if (ctx->pxp_wakeref) {
530 intel_runtime_pm_put(&i915->runtime_pm,
532 ctx->pxp_wakeref = 0;
535 spin_lock_irq(&i915->gem.contexts.lock);
536 list_safe_reset_next(ctx, cn, link);
537 i915_gem_context_put(ctx);
539 spin_unlock_irq(&i915->gem.contexts.lock);