1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2015 Linaro Ltd.
6 #include <linux/arm-smccc.h>
7 #include <linux/bitfield.h>
8 #include <linux/bits.h>
9 #include <linux/cleanup.h>
10 #include <linux/clk.h>
11 #include <linux/completion.h>
12 #include <linux/cpumask.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/firmware/qcom/qcom_scm.h>
17 #include <linux/firmware/qcom/qcom_tzmem.h>
18 #include <linux/init.h>
19 #include <linux/interconnect.h>
20 #include <linux/interrupt.h>
21 #include <linux/module.h>
23 #include <linux/of_address.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_platform.h>
26 #include <linux/of_reserved_mem.h>
27 #include <linux/platform_device.h>
28 #include <linux/reset-controller.h>
29 #include <linux/sizes.h>
30 #include <linux/types.h>
33 #include "qcom_tzmem.h"
35 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
36 module_param(download_mode, bool, 0);
41 struct clk *iface_clk;
43 struct icc_path *path;
44 struct completion waitq_comp;
45 struct reset_controller_dev reset;
47 /* control access to the interconnect path */
48 struct mutex scm_bw_lock;
53 struct qcom_tzmem_pool *mempool;
56 struct qcom_scm_current_perm_info {
64 struct qcom_scm_mem_map_info {
70 * struct qcom_scm_qseecom_resp - QSEECOM SCM call response.
71 * @result: Result or status of the SCM call. See &enum qcom_scm_qseecom_result.
72 * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type.
73 * @data: Response data. The type of this data is given in @resp_type.
75 struct qcom_scm_qseecom_resp {
81 enum qcom_scm_qseecom_result {
82 QSEECOM_RESULT_SUCCESS = 0,
83 QSEECOM_RESULT_INCOMPLETE = 1,
84 QSEECOM_RESULT_BLOCKED_ON_LISTENER = 2,
85 QSEECOM_RESULT_FAILURE = 0xFFFFFFFF,
88 enum qcom_scm_qseecom_resp_type {
89 QSEECOM_SCM_RES_APP_ID = 0xEE01,
90 QSEECOM_SCM_RES_QSEOS_LISTENER_ID = 0xEE02,
93 enum qcom_scm_qseecom_tz_owner {
94 QSEECOM_TZ_OWNER_SIP = 2,
95 QSEECOM_TZ_OWNER_TZ_APPS = 48,
96 QSEECOM_TZ_OWNER_QSEE_OS = 50
99 enum qcom_scm_qseecom_tz_svc {
100 QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER = 0,
101 QSEECOM_TZ_SVC_APP_MGR = 1,
102 QSEECOM_TZ_SVC_INFO = 6,
105 enum qcom_scm_qseecom_tz_cmd_app {
106 QSEECOM_TZ_CMD_APP_SEND = 1,
107 QSEECOM_TZ_CMD_APP_LOOKUP = 3,
110 enum qcom_scm_qseecom_tz_cmd_info {
111 QSEECOM_TZ_CMD_INFO_VERSION = 3,
114 #define QSEECOM_MAX_APP_NAME_SIZE 64
116 /* Each bit configures cold/warm boot address for one of the 4 CPUs */
117 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
118 0, BIT(0), BIT(3), BIT(5)
120 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
121 BIT(2), BIT(1), BIT(4), BIT(6)
124 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
126 #define QCOM_DLOAD_MASK GENMASK(5, 4)
127 #define QCOM_DLOAD_NODUMP 0
128 #define QCOM_DLOAD_FULLDUMP 1
130 static const char * const qcom_scm_convention_names[] = {
131 [SMC_CONVENTION_UNKNOWN] = "unknown",
132 [SMC_CONVENTION_ARM_32] = "smc arm 32",
133 [SMC_CONVENTION_ARM_64] = "smc arm 64",
134 [SMC_CONVENTION_LEGACY] = "smc legacy",
137 static struct qcom_scm *__scm;
139 static int qcom_scm_clk_enable(void)
143 ret = clk_prepare_enable(__scm->core_clk);
147 ret = clk_prepare_enable(__scm->iface_clk);
151 ret = clk_prepare_enable(__scm->bus_clk);
158 clk_disable_unprepare(__scm->iface_clk);
160 clk_disable_unprepare(__scm->core_clk);
165 static void qcom_scm_clk_disable(void)
167 clk_disable_unprepare(__scm->core_clk);
168 clk_disable_unprepare(__scm->iface_clk);
169 clk_disable_unprepare(__scm->bus_clk);
172 static int qcom_scm_bw_enable(void)
179 mutex_lock(&__scm->scm_bw_lock);
180 if (!__scm->scm_vote_count) {
181 ret = icc_set_bw(__scm->path, 0, UINT_MAX);
183 dev_err(__scm->dev, "failed to set bandwidth request\n");
187 __scm->scm_vote_count++;
189 mutex_unlock(&__scm->scm_bw_lock);
194 static void qcom_scm_bw_disable(void)
199 mutex_lock(&__scm->scm_bw_lock);
200 if (__scm->scm_vote_count-- == 1)
201 icc_set_bw(__scm->path, 0, 0);
202 mutex_unlock(&__scm->scm_bw_lock);
205 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
206 static DEFINE_SPINLOCK(scm_query_lock);
208 struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
210 return __scm->mempool;
213 static enum qcom_scm_convention __get_convention(void)
216 struct qcom_scm_desc desc = {
217 .svc = QCOM_SCM_SVC_INFO,
218 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
219 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
220 QCOM_SCM_INFO_IS_CALL_AVAIL) |
221 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
222 .arginfo = QCOM_SCM_ARGS(1),
223 .owner = ARM_SMCCC_OWNER_SIP,
225 struct qcom_scm_res res;
226 enum qcom_scm_convention probed_convention;
230 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
231 return qcom_scm_convention;
234 * Per the "SMC calling convention specification", the 64-bit calling
235 * convention can only be used when the client is 64-bit, otherwise
236 * system will encounter the undefined behaviour.
238 #if IS_ENABLED(CONFIG_ARM64)
240 * Device isn't required as there is only one argument - no device
241 * needed to dma_map_single to secure world
243 probed_convention = SMC_CONVENTION_ARM_64;
244 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
245 if (!ret && res.result[0] == 1)
249 * Some SC7180 firmwares didn't implement the
250 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
251 * calling conventions on these firmwares. Luckily we don't make any
252 * early calls into the firmware on these SoCs so the device pointer
253 * will be valid here to check if the compatible matches.
255 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
261 probed_convention = SMC_CONVENTION_ARM_32;
262 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
263 if (!ret && res.result[0] == 1)
266 probed_convention = SMC_CONVENTION_LEGACY;
268 spin_lock_irqsave(&scm_query_lock, flags);
269 if (probed_convention != qcom_scm_convention) {
270 qcom_scm_convention = probed_convention;
271 pr_info("qcom_scm: convention: %s%s\n",
272 qcom_scm_convention_names[qcom_scm_convention],
273 forced ? " (forced)" : "");
275 spin_unlock_irqrestore(&scm_query_lock, flags);
277 return qcom_scm_convention;
281 * qcom_scm_call() - Invoke a syscall in the secure world
283 * @desc: Descriptor structure containing arguments and return values
284 * @res: Structure containing results from SMC/HVC call
286 * Sends a command to the SCM and waits for the command to finish processing.
287 * This should *only* be called in pre-emptible context.
289 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
290 struct qcom_scm_res *res)
293 switch (__get_convention()) {
294 case SMC_CONVENTION_ARM_32:
295 case SMC_CONVENTION_ARM_64:
296 return scm_smc_call(dev, desc, res, false);
297 case SMC_CONVENTION_LEGACY:
298 return scm_legacy_call(dev, desc, res);
300 pr_err("Unknown current SCM calling convention.\n");
306 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
308 * @desc: Descriptor structure containing arguments and return values
309 * @res: Structure containing results from SMC/HVC call
311 * Sends a command to the SCM and waits for the command to finish processing.
312 * This can be called in atomic context.
314 static int qcom_scm_call_atomic(struct device *dev,
315 const struct qcom_scm_desc *desc,
316 struct qcom_scm_res *res)
318 switch (__get_convention()) {
319 case SMC_CONVENTION_ARM_32:
320 case SMC_CONVENTION_ARM_64:
321 return scm_smc_call(dev, desc, res, true);
322 case SMC_CONVENTION_LEGACY:
323 return scm_legacy_call_atomic(dev, desc, res);
325 pr_err("Unknown current SCM calling convention.\n");
330 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
334 struct qcom_scm_desc desc = {
335 .svc = QCOM_SCM_SVC_INFO,
336 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
337 .owner = ARM_SMCCC_OWNER_SIP,
339 struct qcom_scm_res res;
341 desc.arginfo = QCOM_SCM_ARGS(1);
342 switch (__get_convention()) {
343 case SMC_CONVENTION_ARM_32:
344 case SMC_CONVENTION_ARM_64:
345 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
346 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
348 case SMC_CONVENTION_LEGACY:
349 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
352 pr_err("Unknown SMC convention being used\n");
356 ret = qcom_scm_call(dev, &desc, &res);
358 return ret ? false : !!res.result[0];
361 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
364 unsigned int flags = 0;
365 struct qcom_scm_desc desc = {
366 .svc = QCOM_SCM_SVC_BOOT,
367 .cmd = QCOM_SCM_BOOT_SET_ADDR,
368 .arginfo = QCOM_SCM_ARGS(2),
369 .owner = ARM_SMCCC_OWNER_SIP,
372 for_each_present_cpu(cpu) {
373 if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
375 flags |= cpu_bits[cpu];
378 desc.args[0] = flags;
379 desc.args[1] = virt_to_phys(entry);
381 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
384 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
386 struct qcom_scm_desc desc = {
387 .svc = QCOM_SCM_SVC_BOOT,
388 .cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
389 .owner = ARM_SMCCC_OWNER_SIP,
390 .arginfo = QCOM_SCM_ARGS(6),
393 /* Apply to all CPUs in all affinity levels */
394 ~0ULL, ~0ULL, ~0ULL, ~0ULL,
399 /* Need a device for DMA of the additional arguments */
400 if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
403 return qcom_scm_call(__scm->dev, &desc, NULL);
407 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
408 * @entry: Entry point function for the cpus
410 * Set the Linux entry point for the SCM to transfer control to when coming
411 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
413 int qcom_scm_set_warm_boot_addr(void *entry)
415 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
416 /* Fallback to old SCM call */
417 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
420 EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
423 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
424 * @entry: Entry point function for the cpus
426 int qcom_scm_set_cold_boot_addr(void *entry)
428 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
429 /* Fallback to old SCM call */
430 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
433 EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
436 * qcom_scm_cpu_power_down() - Power down the cpu
437 * @flags: Flags to flush cache
439 * This is an end point to power down cpu. If there was a pending interrupt,
440 * the control would return from this function, otherwise, the cpu jumps to the
441 * warm boot entry point set for this cpu upon reset.
443 void qcom_scm_cpu_power_down(u32 flags)
445 struct qcom_scm_desc desc = {
446 .svc = QCOM_SCM_SVC_BOOT,
447 .cmd = QCOM_SCM_BOOT_TERMINATE_PC,
448 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
449 .arginfo = QCOM_SCM_ARGS(1),
450 .owner = ARM_SMCCC_OWNER_SIP,
453 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
455 EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
457 int qcom_scm_set_remote_state(u32 state, u32 id)
459 struct qcom_scm_desc desc = {
460 .svc = QCOM_SCM_SVC_BOOT,
461 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
462 .arginfo = QCOM_SCM_ARGS(2),
465 .owner = ARM_SMCCC_OWNER_SIP,
467 struct qcom_scm_res res;
470 ret = qcom_scm_call(__scm->dev, &desc, &res);
472 return ret ? : res.result[0];
474 EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
476 static int qcom_scm_disable_sdi(void)
479 struct qcom_scm_desc desc = {
480 .svc = QCOM_SCM_SVC_BOOT,
481 .cmd = QCOM_SCM_BOOT_SDI_CONFIG,
482 .args[0] = 1, /* Disable watchdog debug */
483 .args[1] = 0, /* Disable SDI */
484 .arginfo = QCOM_SCM_ARGS(2),
485 .owner = ARM_SMCCC_OWNER_SIP,
487 struct qcom_scm_res res;
489 ret = qcom_scm_clk_enable();
492 ret = qcom_scm_call(__scm->dev, &desc, &res);
494 qcom_scm_clk_disable();
496 return ret ? : res.result[0];
499 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
501 struct qcom_scm_desc desc = {
502 .svc = QCOM_SCM_SVC_BOOT,
503 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
504 .arginfo = QCOM_SCM_ARGS(2),
505 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
506 .owner = ARM_SMCCC_OWNER_SIP,
509 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
511 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
514 static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val)
520 ret = qcom_scm_io_readl(addr, &old);
524 new = (old & ~mask) | (val & mask);
526 return qcom_scm_io_writel(addr, new);
529 static void qcom_scm_set_download_mode(bool enable)
531 u32 val = enable ? QCOM_DLOAD_FULLDUMP : QCOM_DLOAD_NODUMP;
534 if (__scm->dload_mode_addr) {
535 ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK,
536 FIELD_PREP(QCOM_DLOAD_MASK, val));
537 } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
538 QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
539 ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
542 "No available mechanism for setting download mode\n");
546 dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
550 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
551 * state machine for a given peripheral, using the
553 * @peripheral: peripheral id
554 * @metadata: pointer to memory containing ELF header, program header table
555 * and optional blob of data used for authenticating the metadata
556 * and the rest of the firmware
557 * @size: size of the metadata
558 * @ctx: optional metadata context
560 * Return: 0 on success.
562 * Upon successful return, the PAS metadata context (@ctx) will be used to
563 * track the metadata allocation, this needs to be released by invoking
564 * qcom_scm_pas_metadata_release() by the caller.
566 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
567 struct qcom_scm_pas_metadata *ctx)
569 dma_addr_t mdata_phys;
572 struct qcom_scm_desc desc = {
573 .svc = QCOM_SCM_SVC_PIL,
574 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
575 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
576 .args[0] = peripheral,
577 .owner = ARM_SMCCC_OWNER_SIP,
579 struct qcom_scm_res res;
582 * During the scm call memory protection will be enabled for the meta
583 * data blob, so make sure it's physically contiguous, 4K aligned and
584 * non-cachable to avoid XPU violations.
586 * For PIL calls the hypervisor creates SHM Bridges for the blob
587 * buffers on behalf of Linux so we must not do it ourselves hence
588 * not using the TZMem allocator here.
590 * If we pass a buffer that is already part of an SHM Bridge to this
591 * call, it will fail.
593 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
598 memcpy(mdata_buf, metadata, size);
600 ret = qcom_scm_clk_enable();
604 ret = qcom_scm_bw_enable();
608 desc.args[1] = mdata_phys;
610 ret = qcom_scm_call(__scm->dev, &desc, &res);
611 qcom_scm_bw_disable();
614 qcom_scm_clk_disable();
617 if (ret < 0 || !ctx) {
618 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
620 ctx->ptr = mdata_buf;
621 ctx->phys = mdata_phys;
625 return ret ? : res.result[0];
627 EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
630 * qcom_scm_pas_metadata_release() - release metadata context
631 * @ctx: metadata context
633 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
638 dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
644 EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
647 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
648 * for firmware loading
649 * @peripheral: peripheral id
650 * @addr: start address of memory area to prepare
651 * @size: size of the memory area to prepare
653 * Returns 0 on success.
655 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
658 struct qcom_scm_desc desc = {
659 .svc = QCOM_SCM_SVC_PIL,
660 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
661 .arginfo = QCOM_SCM_ARGS(3),
662 .args[0] = peripheral,
665 .owner = ARM_SMCCC_OWNER_SIP,
667 struct qcom_scm_res res;
669 ret = qcom_scm_clk_enable();
673 ret = qcom_scm_bw_enable();
677 ret = qcom_scm_call(__scm->dev, &desc, &res);
678 qcom_scm_bw_disable();
681 qcom_scm_clk_disable();
683 return ret ? : res.result[0];
685 EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
688 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
689 * and reset the remote processor
690 * @peripheral: peripheral id
692 * Return 0 on success.
694 int qcom_scm_pas_auth_and_reset(u32 peripheral)
697 struct qcom_scm_desc desc = {
698 .svc = QCOM_SCM_SVC_PIL,
699 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
700 .arginfo = QCOM_SCM_ARGS(1),
701 .args[0] = peripheral,
702 .owner = ARM_SMCCC_OWNER_SIP,
704 struct qcom_scm_res res;
706 ret = qcom_scm_clk_enable();
710 ret = qcom_scm_bw_enable();
714 ret = qcom_scm_call(__scm->dev, &desc, &res);
715 qcom_scm_bw_disable();
718 qcom_scm_clk_disable();
720 return ret ? : res.result[0];
722 EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
725 * qcom_scm_pas_shutdown() - Shut down the remote processor
726 * @peripheral: peripheral id
728 * Returns 0 on success.
730 int qcom_scm_pas_shutdown(u32 peripheral)
733 struct qcom_scm_desc desc = {
734 .svc = QCOM_SCM_SVC_PIL,
735 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
736 .arginfo = QCOM_SCM_ARGS(1),
737 .args[0] = peripheral,
738 .owner = ARM_SMCCC_OWNER_SIP,
740 struct qcom_scm_res res;
742 ret = qcom_scm_clk_enable();
746 ret = qcom_scm_bw_enable();
750 ret = qcom_scm_call(__scm->dev, &desc, &res);
751 qcom_scm_bw_disable();
754 qcom_scm_clk_disable();
756 return ret ? : res.result[0];
758 EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
761 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
762 * available for the given peripherial
763 * @peripheral: peripheral id
765 * Returns true if PAS is supported for this peripheral, otherwise false.
767 bool qcom_scm_pas_supported(u32 peripheral)
770 struct qcom_scm_desc desc = {
771 .svc = QCOM_SCM_SVC_PIL,
772 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
773 .arginfo = QCOM_SCM_ARGS(1),
774 .args[0] = peripheral,
775 .owner = ARM_SMCCC_OWNER_SIP,
777 struct qcom_scm_res res;
779 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
780 QCOM_SCM_PIL_PAS_IS_SUPPORTED))
783 ret = qcom_scm_call(__scm->dev, &desc, &res);
785 return ret ? false : !!res.result[0];
787 EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
789 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
791 struct qcom_scm_desc desc = {
792 .svc = QCOM_SCM_SVC_PIL,
793 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
794 .arginfo = QCOM_SCM_ARGS(2),
797 .owner = ARM_SMCCC_OWNER_SIP,
799 struct qcom_scm_res res;
802 ret = qcom_scm_call(__scm->dev, &desc, &res);
804 return ret ? : res.result[0];
807 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
813 return __qcom_scm_pas_mss_reset(__scm->dev, 1);
816 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
822 return __qcom_scm_pas_mss_reset(__scm->dev, 0);
825 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
826 .assert = qcom_scm_pas_reset_assert,
827 .deassert = qcom_scm_pas_reset_deassert,
830 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
832 struct qcom_scm_desc desc = {
833 .svc = QCOM_SCM_SVC_IO,
834 .cmd = QCOM_SCM_IO_READ,
835 .arginfo = QCOM_SCM_ARGS(1),
837 .owner = ARM_SMCCC_OWNER_SIP,
839 struct qcom_scm_res res;
843 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
845 *val = res.result[0];
847 return ret < 0 ? ret : 0;
849 EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
851 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
853 struct qcom_scm_desc desc = {
854 .svc = QCOM_SCM_SVC_IO,
855 .cmd = QCOM_SCM_IO_WRITE,
856 .arginfo = QCOM_SCM_ARGS(2),
859 .owner = ARM_SMCCC_OWNER_SIP,
862 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
864 EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
867 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
868 * supports restore security config interface.
870 * Return true if restore-cfg interface is supported, false if not.
872 bool qcom_scm_restore_sec_cfg_available(void)
874 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
875 QCOM_SCM_MP_RESTORE_SEC_CFG);
877 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
879 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
881 struct qcom_scm_desc desc = {
882 .svc = QCOM_SCM_SVC_MP,
883 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
884 .arginfo = QCOM_SCM_ARGS(2),
885 .args[0] = device_id,
887 .owner = ARM_SMCCC_OWNER_SIP,
889 struct qcom_scm_res res;
892 ret = qcom_scm_call(__scm->dev, &desc, &res);
894 return ret ? : res.result[0];
896 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
898 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
900 struct qcom_scm_desc desc = {
901 .svc = QCOM_SCM_SVC_MP,
902 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
903 .arginfo = QCOM_SCM_ARGS(1),
905 .owner = ARM_SMCCC_OWNER_SIP,
907 struct qcom_scm_res res;
910 ret = qcom_scm_call(__scm->dev, &desc, &res);
913 *size = res.result[0];
915 return ret ? : res.result[1];
917 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
919 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
921 struct qcom_scm_desc desc = {
922 .svc = QCOM_SCM_SVC_MP,
923 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
924 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
929 .owner = ARM_SMCCC_OWNER_SIP,
933 ret = qcom_scm_call(__scm->dev, &desc, NULL);
935 /* the pg table has been initialized already, ignore the error */
941 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
943 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
945 struct qcom_scm_desc desc = {
946 .svc = QCOM_SCM_SVC_MP,
947 .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
948 .arginfo = QCOM_SCM_ARGS(2),
951 .owner = ARM_SMCCC_OWNER_SIP,
954 return qcom_scm_call(__scm->dev, &desc, NULL);
956 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
958 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
959 u32 cp_nonpixel_start,
960 u32 cp_nonpixel_size)
963 struct qcom_scm_desc desc = {
964 .svc = QCOM_SCM_SVC_MP,
965 .cmd = QCOM_SCM_MP_VIDEO_VAR,
966 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
967 QCOM_SCM_VAL, QCOM_SCM_VAL),
970 .args[2] = cp_nonpixel_start,
971 .args[3] = cp_nonpixel_size,
972 .owner = ARM_SMCCC_OWNER_SIP,
974 struct qcom_scm_res res;
976 ret = qcom_scm_call(__scm->dev, &desc, &res);
978 return ret ? : res.result[0];
980 EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
982 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
983 size_t mem_sz, phys_addr_t src, size_t src_sz,
984 phys_addr_t dest, size_t dest_sz)
987 struct qcom_scm_desc desc = {
988 .svc = QCOM_SCM_SVC_MP,
989 .cmd = QCOM_SCM_MP_ASSIGN,
990 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
991 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
992 QCOM_SCM_VAL, QCOM_SCM_VAL),
993 .args[0] = mem_region,
1000 .owner = ARM_SMCCC_OWNER_SIP,
1002 struct qcom_scm_res res;
1004 ret = qcom_scm_call(dev, &desc, &res);
1006 return ret ? : res.result[0];
1010 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
1011 * @mem_addr: mem region whose ownership need to be reassigned
1012 * @mem_sz: size of the region.
1013 * @srcvm: vmid for current set of owners, each set bit in
1014 * flag indicate a unique owner
1015 * @newvm: array having new owners and corresponding permission
1017 * @dest_cnt: number of owners in next set.
1019 * Return negative errno on failure or 0 on success with @srcvm updated.
1021 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
1023 const struct qcom_scm_vmperm *newvm,
1024 unsigned int dest_cnt)
1026 struct qcom_scm_current_perm_info *destvm;
1027 struct qcom_scm_mem_map_info *mem_to_map;
1028 phys_addr_t mem_to_map_phys;
1029 phys_addr_t dest_phys;
1030 phys_addr_t ptr_phys;
1031 size_t mem_to_map_sz;
1038 u64 srcvm_bits = *srcvm;
1040 src_sz = hweight64(srcvm_bits) * sizeof(*src);
1041 mem_to_map_sz = sizeof(*mem_to_map);
1042 dest_sz = dest_cnt * sizeof(*destvm);
1043 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
1044 ALIGN(dest_sz, SZ_64);
1046 void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1047 ptr_sz, GFP_KERNEL);
1051 ptr_phys = qcom_tzmem_to_phys(ptr);
1053 /* Fill source vmid detail */
1056 for (b = 0; b < BITS_PER_TYPE(u64); b++) {
1057 if (srcvm_bits & BIT(b))
1058 src[i++] = cpu_to_le32(b);
1061 /* Fill details of mem buff to map */
1062 mem_to_map = ptr + ALIGN(src_sz, SZ_64);
1063 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
1064 mem_to_map->mem_addr = cpu_to_le64(mem_addr);
1065 mem_to_map->mem_size = cpu_to_le64(mem_sz);
1068 /* Fill details of next vmid detail */
1069 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1070 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1071 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
1072 destvm->vmid = cpu_to_le32(newvm->vmid);
1073 destvm->perm = cpu_to_le32(newvm->perm);
1075 destvm->ctx_size = 0;
1076 next_vm |= BIT(newvm->vmid);
1079 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
1080 ptr_phys, src_sz, dest_phys, dest_sz);
1083 "Assign memory protection call failed %d\n", ret);
1090 EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
1093 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
1095 bool qcom_scm_ocmem_lock_available(void)
1097 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
1098 QCOM_SCM_OCMEM_LOCK_CMD);
1100 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
1103 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
1104 * region to the specified initiator
1106 * @id: tz initiator id
1107 * @offset: OCMEM offset
1109 * @mode: access mode (WIDE/NARROW)
1111 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
1114 struct qcom_scm_desc desc = {
1115 .svc = QCOM_SCM_SVC_OCMEM,
1116 .cmd = QCOM_SCM_OCMEM_LOCK_CMD,
1121 .arginfo = QCOM_SCM_ARGS(4),
1124 return qcom_scm_call(__scm->dev, &desc, NULL);
1126 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
1129 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1130 * region from the specified initiator
1132 * @id: tz initiator id
1133 * @offset: OCMEM offset
1136 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1138 struct qcom_scm_desc desc = {
1139 .svc = QCOM_SCM_SVC_OCMEM,
1140 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1144 .arginfo = QCOM_SCM_ARGS(3),
1147 return qcom_scm_call(__scm->dev, &desc, NULL);
1149 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
1152 * qcom_scm_ice_available() - Is the ICE key programming interface available?
1154 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1155 * qcom_scm_ice_set_key() are available.
1157 bool qcom_scm_ice_available(void)
1159 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1160 QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1161 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1162 QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1164 EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
1167 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1168 * @index: the keyslot to invalidate
1170 * The UFSHCI and eMMC standards define a standard way to do this, but it
1171 * doesn't work on these SoCs; only this SCM call does.
1173 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1174 * call doesn't specify which ICE instance the keyslot belongs to.
1176 * Return: 0 on success; -errno on failure.
1178 int qcom_scm_ice_invalidate_key(u32 index)
1180 struct qcom_scm_desc desc = {
1181 .svc = QCOM_SCM_SVC_ES,
1182 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1183 .arginfo = QCOM_SCM_ARGS(1),
1185 .owner = ARM_SMCCC_OWNER_SIP,
1188 return qcom_scm_call(__scm->dev, &desc, NULL);
1190 EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
1193 * qcom_scm_ice_set_key() - Set an inline encryption key
1194 * @index: the keyslot into which to set the key
1195 * @key: the key to program
1196 * @key_size: the size of the key in bytes
1197 * @cipher: the encryption algorithm the key is for
1198 * @data_unit_size: the encryption data unit size, i.e. the size of each
1199 * individual plaintext and ciphertext. Given in 512-byte
1200 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1202 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1203 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1205 * The UFSHCI and eMMC standards define a standard way to do this, but it
1206 * doesn't work on these SoCs; only this SCM call does.
1208 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1209 * call doesn't specify which ICE instance the keyslot belongs to.
1211 * Return: 0 on success; -errno on failure.
1213 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1214 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1216 struct qcom_scm_desc desc = {
1217 .svc = QCOM_SCM_SVC_ES,
1218 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1219 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1220 QCOM_SCM_VAL, QCOM_SCM_VAL,
1223 .args[2] = key_size,
1225 .args[4] = data_unit_size,
1226 .owner = ARM_SMCCC_OWNER_SIP,
1231 void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1236 memcpy(keybuf, key, key_size);
1237 desc.args[1] = qcom_tzmem_to_phys(keybuf);
1239 ret = qcom_scm_call(__scm->dev, &desc, NULL);
1241 memzero_explicit(keybuf, key_size);
1245 EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
1248 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1250 * Return true if HDCP is supported, false if not.
1252 bool qcom_scm_hdcp_available(void)
1255 int ret = qcom_scm_clk_enable();
1260 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1261 QCOM_SCM_HDCP_INVOKE);
1263 qcom_scm_clk_disable();
1267 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
1270 * qcom_scm_hdcp_req() - Send HDCP request.
1271 * @req: HDCP request array
1272 * @req_cnt: HDCP request array count
1273 * @resp: response buffer passed to SCM
1275 * Write HDCP register(s) through SCM.
1277 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1280 struct qcom_scm_desc desc = {
1281 .svc = QCOM_SCM_SVC_HDCP,
1282 .cmd = QCOM_SCM_HDCP_INVOKE,
1283 .arginfo = QCOM_SCM_ARGS(10),
1296 .owner = ARM_SMCCC_OWNER_SIP,
1298 struct qcom_scm_res res;
1300 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1303 ret = qcom_scm_clk_enable();
1307 ret = qcom_scm_call(__scm->dev, &desc, &res);
1308 *resp = res.result[0];
1310 qcom_scm_clk_disable();
1314 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
1316 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1318 struct qcom_scm_desc desc = {
1319 .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1320 .cmd = QCOM_SCM_SMMU_PT_FORMAT,
1321 .arginfo = QCOM_SCM_ARGS(3),
1324 .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1325 .owner = ARM_SMCCC_OWNER_SIP,
1328 return qcom_scm_call(__scm->dev, &desc, NULL);
1330 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
1332 int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1334 struct qcom_scm_desc desc = {
1335 .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1336 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1337 .arginfo = QCOM_SCM_ARGS(2),
1338 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1340 .owner = ARM_SMCCC_OWNER_SIP,
1344 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1346 EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
1348 bool qcom_scm_lmh_dcvsh_available(void)
1350 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1352 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
1354 int qcom_scm_shm_bridge_enable(void)
1356 struct qcom_scm_desc desc = {
1357 .svc = QCOM_SCM_SVC_MP,
1358 .cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE,
1359 .owner = ARM_SMCCC_OWNER_SIP
1362 struct qcom_scm_res res;
1364 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
1365 QCOM_SCM_MP_SHM_BRIDGE_ENABLE))
1368 return qcom_scm_call(__scm->dev, &desc, &res) ?: res.result[0];
1370 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable);
1372 int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags,
1373 u64 ipfn_and_s_perm_flags, u64 size_and_flags,
1374 u64 ns_vmids, u64 *handle)
1376 struct qcom_scm_desc desc = {
1377 .svc = QCOM_SCM_SVC_MP,
1378 .cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE,
1379 .owner = ARM_SMCCC_OWNER_SIP,
1380 .args[0] = pfn_and_ns_perm_flags,
1381 .args[1] = ipfn_and_s_perm_flags,
1382 .args[2] = size_and_flags,
1383 .args[3] = ns_vmids,
1384 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
1385 QCOM_SCM_VAL, QCOM_SCM_VAL),
1388 struct qcom_scm_res res;
1391 ret = qcom_scm_call(__scm->dev, &desc, &res);
1394 *handle = res.result[1];
1396 return ret ?: res.result[0];
1398 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create);
1400 int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle)
1402 struct qcom_scm_desc desc = {
1403 .svc = QCOM_SCM_SVC_MP,
1404 .cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE,
1405 .owner = ARM_SMCCC_OWNER_SIP,
1407 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1410 return qcom_scm_call(__scm->dev, &desc, NULL);
1412 EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete);
1414 int qcom_scm_lmh_profile_change(u32 profile_id)
1416 struct qcom_scm_desc desc = {
1417 .svc = QCOM_SCM_SVC_LMH,
1418 .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1419 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1420 .args[0] = profile_id,
1421 .owner = ARM_SMCCC_OWNER_SIP,
1424 return qcom_scm_call(__scm->dev, &desc, NULL);
1426 EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
1428 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1429 u64 limit_node, u32 node_id, u64 version)
1431 int ret, payload_size = 5 * sizeof(u32);
1433 struct qcom_scm_desc desc = {
1434 .svc = QCOM_SCM_SVC_LMH,
1435 .cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1436 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1437 QCOM_SCM_VAL, QCOM_SCM_VAL),
1438 .args[1] = payload_size,
1439 .args[2] = limit_node,
1442 .owner = ARM_SMCCC_OWNER_SIP,
1445 u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1451 payload_buf[0] = payload_fn;
1453 payload_buf[2] = payload_reg;
1455 payload_buf[4] = payload_val;
1457 desc.args[0] = qcom_tzmem_to_phys(payload_buf);
1459 ret = qcom_scm_call(__scm->dev, &desc, NULL);
1463 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
1465 int qcom_scm_gpu_init_regs(u32 gpu_req)
1467 struct qcom_scm_desc desc = {
1468 .svc = QCOM_SCM_SVC_GPU,
1469 .cmd = QCOM_SCM_SVC_GPU_INIT_REGS,
1470 .arginfo = QCOM_SCM_ARGS(1),
1472 .owner = ARM_SMCCC_OWNER_SIP,
1475 return qcom_scm_call(__scm->dev, &desc, NULL);
1477 EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs);
1479 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1481 struct device_node *tcsr;
1482 struct device_node *np = dev->of_node;
1483 struct resource res;
1487 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1491 ret = of_address_to_resource(tcsr, 0, &res);
1496 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1500 *addr = res.start + offset;
1505 #ifdef CONFIG_QCOM_QSEECOM
1507 /* Lock for QSEECOM SCM call executions */
1508 static DEFINE_MUTEX(qcom_scm_qseecom_call_lock);
1510 static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1511 struct qcom_scm_qseecom_resp *res)
1513 struct qcom_scm_res scm_res = {};
1517 * QSEECOM SCM calls should not be executed concurrently. Therefore, we
1518 * require the respective call lock to be held.
1520 lockdep_assert_held(&qcom_scm_qseecom_call_lock);
1522 status = qcom_scm_call(__scm->dev, desc, &scm_res);
1524 res->result = scm_res.result[0];
1525 res->resp_type = scm_res.result[1];
1526 res->data = scm_res.result[2];
1535 * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call.
1536 * @desc: SCM call descriptor.
1537 * @res: SCM call response (output).
1539 * Performs the QSEECOM SCM call described by @desc, returning the response in
1542 * Return: Zero on success, nonzero on failure.
1544 static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1545 struct qcom_scm_qseecom_resp *res)
1550 * Note: Multiple QSEECOM SCM calls should not be executed same time,
1551 * so lock things here. This needs to be extended to callback/listener
1552 * handling when support for that is implemented.
1555 mutex_lock(&qcom_scm_qseecom_call_lock);
1556 status = __qcom_scm_qseecom_call(desc, res);
1557 mutex_unlock(&qcom_scm_qseecom_call_lock);
1559 dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n",
1560 __func__, desc->owner, desc->svc, desc->cmd, res->result,
1561 res->resp_type, res->data);
1564 dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status);
1569 * TODO: Handle incomplete and blocked calls:
1571 * Incomplete and blocked calls are not supported yet. Some devices
1572 * and/or commands require those, some don't. Let's warn about them
1573 * prominently in case someone attempts to try these commands with a
1574 * device/command combination that isn't supported yet.
1576 WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE);
1577 WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER);
1583 * qcom_scm_qseecom_get_version() - Query the QSEECOM version.
1584 * @version: Pointer where the QSEECOM version will be stored.
1586 * Performs the QSEECOM SCM querying the QSEECOM version currently running in
1589 * Return: Zero on success, nonzero on failure.
1591 static int qcom_scm_qseecom_get_version(u32 *version)
1593 struct qcom_scm_desc desc = {};
1594 struct qcom_scm_qseecom_resp res = {};
1598 desc.owner = QSEECOM_TZ_OWNER_SIP;
1599 desc.svc = QSEECOM_TZ_SVC_INFO;
1600 desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION;
1601 desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL);
1602 desc.args[0] = feature;
1604 ret = qcom_scm_qseecom_call(&desc, &res);
1608 *version = res.result;
1613 * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name.
1614 * @app_name: The name of the app.
1615 * @app_id: The returned app ID.
1617 * Query and return the application ID of the SEE app identified by the given
1618 * name. This returned ID is the unique identifier of the app required for
1619 * subsequent communication.
1621 * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been
1622 * loaded or could not be found.
1624 int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
1626 unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE;
1627 unsigned long app_name_len = strlen(app_name);
1628 struct qcom_scm_desc desc = {};
1629 struct qcom_scm_qseecom_resp res = {};
1632 if (app_name_len >= name_buf_size)
1635 char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1641 memcpy(name_buf, app_name, app_name_len);
1643 desc.owner = QSEECOM_TZ_OWNER_QSEE_OS;
1644 desc.svc = QSEECOM_TZ_SVC_APP_MGR;
1645 desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP;
1646 desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL);
1647 desc.args[0] = qcom_tzmem_to_phys(name_buf);
1648 desc.args[1] = app_name_len;
1650 status = qcom_scm_qseecom_call(&desc, &res);
1655 if (res.result == QSEECOM_RESULT_FAILURE)
1658 if (res.result != QSEECOM_RESULT_SUCCESS)
1661 if (res.resp_type != QSEECOM_SCM_RES_APP_ID)
1667 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
1670 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
1671 * @app_id: The ID of the target app.
1672 * @req: Request buffer sent to the app (must be TZ memory)
1673 * @req_size: Size of the request buffer.
1674 * @rsp: Response buffer, written to by the app (must be TZ memory)
1675 * @rsp_size: Size of the response buffer.
1677 * Sends a request to the QSEE app associated with the given ID and read back
1678 * its response. The caller must provide two DMA memory regions, one for the
1679 * request and one for the response, and fill out the @req region with the
1680 * respective (app-specific) request data. The QSEE app reads this and returns
1681 * its response in the @rsp region.
1683 * Return: Zero on success, nonzero on failure.
1685 int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size,
1686 void *rsp, size_t rsp_size)
1688 struct qcom_scm_qseecom_resp res = {};
1689 struct qcom_scm_desc desc = {};
1690 phys_addr_t req_phys;
1691 phys_addr_t rsp_phys;
1694 req_phys = qcom_tzmem_to_phys(req);
1695 rsp_phys = qcom_tzmem_to_phys(rsp);
1697 desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
1698 desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
1699 desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
1700 desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL,
1701 QCOM_SCM_RW, QCOM_SCM_VAL,
1702 QCOM_SCM_RW, QCOM_SCM_VAL);
1703 desc.args[0] = app_id;
1704 desc.args[1] = req_phys;
1705 desc.args[2] = req_size;
1706 desc.args[3] = rsp_phys;
1707 desc.args[4] = rsp_size;
1709 status = qcom_scm_qseecom_call(&desc, &res);
1714 if (res.result != QSEECOM_RESULT_SUCCESS)
1719 EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
1722 * We do not yet support re-entrant calls via the qseecom interface. To prevent
1723 + any potential issues with this, only allow validated machines for now.
1725 static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
1726 { .compatible = "lenovo,flex-5g" },
1727 { .compatible = "lenovo,thinkpad-x13s", },
1728 { .compatible = "qcom,sc8180x-primus" },
1729 { .compatible = "qcom,x1e80100-crd" },
1730 { .compatible = "qcom,x1e80100-qcp" },
1734 static bool qcom_scm_qseecom_machine_is_allowed(void)
1736 struct device_node *np;
1739 np = of_find_node_by_path("/");
1743 match = of_match_node(qcom_scm_qseecom_allowlist, np);
1749 static void qcom_scm_qseecom_free(void *data)
1751 struct platform_device *qseecom_dev = data;
1753 platform_device_del(qseecom_dev);
1754 platform_device_put(qseecom_dev);
1757 static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1759 struct platform_device *qseecom_dev;
1764 * Note: We do two steps of validation here: First, we try to query the
1765 * QSEECOM version as a check to see if the interface exists on this
1766 * device. Second, we check against known good devices due to current
1767 * driver limitations (see comment in qcom_scm_qseecom_allowlist).
1769 * Note that we deliberately do the machine check after the version
1770 * check so that we can log potentially supported devices. This should
1771 * be safe as downstream sources indicate that the version query is
1772 * neither blocking nor reentrant.
1774 ret = qcom_scm_qseecom_get_version(&version);
1778 dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
1780 if (!qcom_scm_qseecom_machine_is_allowed()) {
1781 dev_info(scm->dev, "qseecom: untested machine, skipping\n");
1786 * Set up QSEECOM interface device. All application clients will be
1787 * set up and managed by the corresponding driver for it.
1789 qseecom_dev = platform_device_alloc("qcom_qseecom", -1);
1793 qseecom_dev->dev.parent = scm->dev;
1795 ret = platform_device_add(qseecom_dev);
1797 platform_device_put(qseecom_dev);
1801 return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev);
1804 #else /* CONFIG_QCOM_QSEECOM */
1806 static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1811 #endif /* CONFIG_QCOM_QSEECOM */
1814 * qcom_scm_is_available() - Checks if SCM is available
1816 bool qcom_scm_is_available(void)
1818 return !!READ_ONCE(__scm);
1820 EXPORT_SYMBOL_GPL(qcom_scm_is_available);
1822 static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
1824 /* FW currently only supports a single wq_ctx (zero).
1825 * TODO: Update this logic to include dynamic allocation and lookup of
1826 * completion structs when FW supports more wq_ctx values.
1829 dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
1836 int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
1840 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1844 wait_for_completion(&__scm->waitq_comp);
1849 static int qcom_scm_waitq_wakeup(unsigned int wq_ctx)
1853 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1857 complete(&__scm->waitq_comp);
1862 static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
1865 struct qcom_scm *scm = data;
1866 u32 wq_ctx, flags, more_pending = 0;
1869 ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
1871 dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
1875 if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) {
1876 dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags);
1880 ret = qcom_scm_waitq_wakeup(wq_ctx);
1883 } while (more_pending);
1889 static int qcom_scm_probe(struct platform_device *pdev)
1891 struct qcom_tzmem_pool_config pool_config;
1892 struct qcom_scm *scm;
1895 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1899 scm->dev = &pdev->dev;
1900 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1904 init_completion(&scm->waitq_comp);
1905 mutex_init(&scm->scm_bw_lock);
1907 scm->path = devm_of_icc_get(&pdev->dev, NULL);
1908 if (IS_ERR(scm->path))
1909 return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
1910 "failed to acquire interconnect path\n");
1912 scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
1913 if (IS_ERR(scm->core_clk))
1914 return PTR_ERR(scm->core_clk);
1916 scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
1917 if (IS_ERR(scm->iface_clk))
1918 return PTR_ERR(scm->iface_clk);
1920 scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
1921 if (IS_ERR(scm->bus_clk))
1922 return PTR_ERR(scm->bus_clk);
1924 scm->reset.ops = &qcom_scm_pas_reset_ops;
1925 scm->reset.nr_resets = 1;
1926 scm->reset.of_node = pdev->dev.of_node;
1927 ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1931 /* vote for max clk rate for highest performance */
1932 ret = clk_set_rate(scm->core_clk, INT_MAX);
1936 /* Let all above stores be available after this */
1937 smp_store_release(&__scm, scm);
1939 irq = platform_get_irq_optional(pdev, 0);
1944 ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
1945 IRQF_ONESHOT, "qcom-scm", __scm);
1947 return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
1953 * If requested enable "download mode", from this point on warmboot
1954 * will cause the boot stages to enter download mode, unless
1955 * disabled below by a clean shutdown/reboot.
1958 qcom_scm_set_download_mode(true);
1962 * Disable SDI if indicated by DT that it is enabled by default.
1964 if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled"))
1965 qcom_scm_disable_sdi();
1967 ret = of_reserved_mem_device_init(__scm->dev);
1968 if (ret && ret != -ENODEV)
1969 return dev_err_probe(__scm->dev, ret,
1970 "Failed to setup the reserved memory region for TZ mem\n");
1972 ret = qcom_tzmem_enable(__scm->dev);
1974 return dev_err_probe(__scm->dev, ret,
1975 "Failed to enable the TrustZone memory allocator\n");
1977 memset(&pool_config, 0, sizeof(pool_config));
1978 pool_config.initial_size = 0;
1979 pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND;
1980 pool_config.max_size = SZ_256K;
1982 __scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
1983 if (IS_ERR(__scm->mempool))
1984 return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
1985 "Failed to create the SCM memory pool\n");
1988 * Initialize the QSEECOM interface.
1990 * Note: QSEECOM is fairly self-contained and this only adds the
1991 * interface device (the driver of which does most of the heavy
1992 * lifting). So any errors returned here should be either -ENOMEM or
1993 * -EINVAL (with the latter only in case there's a bug in our code).
1994 * This means that there is no need to bring down the whole SCM driver.
1995 * Just log the error instead and let SCM live.
1997 ret = qcom_scm_qseecom_init(scm);
1998 WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
2003 static void qcom_scm_shutdown(struct platform_device *pdev)
2005 /* Clean shutdown, disable download mode to allow normal restart */
2006 qcom_scm_set_download_mode(false);
2009 static const struct of_device_id qcom_scm_dt_match[] = {
2010 { .compatible = "qcom,scm" },
2012 /* Legacy entries kept for backwards compatibility */
2013 { .compatible = "qcom,scm-apq8064" },
2014 { .compatible = "qcom,scm-apq8084" },
2015 { .compatible = "qcom,scm-ipq4019" },
2016 { .compatible = "qcom,scm-msm8953" },
2017 { .compatible = "qcom,scm-msm8974" },
2018 { .compatible = "qcom,scm-msm8996" },
2021 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
2023 static struct platform_driver qcom_scm_driver = {
2026 .of_match_table = qcom_scm_dt_match,
2027 .suppress_bind_attrs = true,
2029 .probe = qcom_scm_probe,
2030 .shutdown = qcom_scm_shutdown,
2033 static int __init qcom_scm_init(void)
2035 return platform_driver_register(&qcom_scm_driver);
2037 subsys_initcall(qcom_scm_init);
2039 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
2040 MODULE_LICENSE("GPL v2");