1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (c) 2023 Linaro Limited
7 #include <dm/of_access.h>
9 #include <tpm-common.h>
12 #include <u-boot/sha1.h>
13 #include <u-boot/sha256.h>
14 #include <u-boot/sha512.h>
15 #include <version_string.h>
17 #include <linux/bitops.h>
18 #include <linux/unaligned/be_byteshift.h>
19 #include <linux/unaligned/generic.h>
20 #include <linux/unaligned/le_byteshift.h>
21 #include "tpm-utils.h"
23 int tcg2_get_pcr_info(struct udevice *dev, u32 *supported_bank, u32 *active_bank,
26 struct tpml_pcr_selection pcrs;
34 ret = tpm2_get_pcr_info(dev, &pcrs);
38 for (i = 0; i < pcrs.count; i++) {
39 struct tpms_pcr_selection *sel = &pcrs.selection[i];
40 u32 hash_mask = tcg2_algorithm_to_mask(sel->hash);
42 if (tpm2_algorithm_supported(sel->hash))
43 *supported_bank |= hash_mask;
45 log_warning("%s: unknown algorithm %x\n", __func__,
48 if (tpm2_is_active_bank(sel))
49 *active_bank |= hash_mask;
52 *bank_num = pcrs.count;
57 int tcg2_get_active_pcr_banks(struct udevice *dev, u32 *active_pcr_banks)
64 rc = tcg2_get_pcr_info(dev, &supported, &active, &pcr_banks);
68 *active_pcr_banks = active;
73 u32 tcg2_event_get_size(struct tpml_digest_values *digest_list)
78 len = offsetof(struct tcg_pcr_event2, digests);
79 len += offsetof(struct tpml_digest_values, digests);
80 for (i = 0; i < digest_list->count; ++i) {
81 u16 l = tpm2_algorithm_to_len(digest_list->digests[i].hash_alg);
86 len += l + offsetof(struct tpmt_ha, digest);
93 int tcg2_create_digest(struct udevice *dev, const u8 *input, u32 length,
94 struct tpml_digest_values *digest_list)
96 struct tpm_chip_priv *priv = dev_get_uclass_priv(dev);
97 u8 final[sizeof(union tpmu_ha)];
98 #if IS_ENABLED(CONFIG_SHA256)
99 sha256_context ctx_256;
101 #if IS_ENABLED(CONFIG_SHA512)
102 sha512_context ctx_512;
104 #if IS_ENABLED(CONFIG_SHA1)
110 digest_list->count = 0;
111 for (i = 0; i < priv->active_bank_count; i++) {
113 switch (priv->active_banks[i]) {
114 #if IS_ENABLED(CONFIG_SHA1)
117 sha1_update(&ctx, input, length);
118 sha1_finish(&ctx, final);
119 len = TPM2_SHA1_DIGEST_SIZE;
122 #if IS_ENABLED(CONFIG_SHA256)
123 case TPM2_ALG_SHA256:
124 sha256_starts(&ctx_256);
125 sha256_update(&ctx_256, input, length);
126 sha256_finish(&ctx_256, final);
127 len = TPM2_SHA256_DIGEST_SIZE;
130 #if IS_ENABLED(CONFIG_SHA384)
131 case TPM2_ALG_SHA384:
132 sha384_starts(&ctx_512);
133 sha384_update(&ctx_512, input, length);
134 sha384_finish(&ctx_512, final);
135 len = TPM2_SHA384_DIGEST_SIZE;
138 #if IS_ENABLED(CONFIG_SHA512)
139 case TPM2_ALG_SHA512:
140 sha512_starts(&ctx_512);
141 sha512_update(&ctx_512, input, length);
142 sha512_finish(&ctx_512, final);
143 len = TPM2_SHA512_DIGEST_SIZE;
147 printf("%s: unsupported algorithm %x\n", __func__,
148 priv->active_banks[i]);
152 digest_list->digests[digest_list->count].hash_alg =
153 priv->active_banks[i];
154 memcpy(&digest_list->digests[digest_list->count].digest, final,
156 digest_list->count++;
162 void tcg2_log_append(u32 pcr_index, u32 event_type,
163 struct tpml_digest_values *digest_list, u32 size,
164 const u8 *event, u8 *log)
170 pos = offsetof(struct tcg_pcr_event2, pcr_index);
171 put_unaligned_le32(pcr_index, log);
172 pos = offsetof(struct tcg_pcr_event2, event_type);
173 put_unaligned_le32(event_type, log + pos);
174 pos = offsetof(struct tcg_pcr_event2, digests) +
175 offsetof(struct tpml_digest_values, count);
176 put_unaligned_le32(digest_list->count, log + pos);
178 pos = offsetof(struct tcg_pcr_event2, digests) +
179 offsetof(struct tpml_digest_values, digests);
180 for (i = 0; i < digest_list->count; ++i) {
181 u16 hash_alg = digest_list->digests[i].hash_alg;
183 len = tpm2_algorithm_to_len(hash_alg);
187 pos += offsetof(struct tpmt_ha, hash_alg);
188 put_unaligned_le16(hash_alg, log + pos);
189 pos += offsetof(struct tpmt_ha, digest);
190 memcpy(log + pos, (u8 *)&digest_list->digests[i].digest, len);
194 put_unaligned_le32(size, log + pos);
196 memcpy(log + pos, event, size);
199 static int tcg2_log_append_check(struct tcg2_event_log *elog, u32 pcr_index,
201 struct tpml_digest_values *digest_list,
202 u32 size, const u8 *event)
207 event_size = size + tcg2_event_get_size(digest_list);
208 if (elog->log_position + event_size > elog->log_size) {
209 printf("%s: log too large: %u + %u > %u\n", __func__,
210 elog->log_position, event_size, elog->log_size);
214 log = elog->log + elog->log_position;
215 elog->log_position += event_size;
217 tcg2_log_append(pcr_index, event_type, digest_list, size, event, log);
222 static int tcg2_log_init(struct udevice *dev, struct tcg2_event_log *elog)
224 struct tpm_chip_priv *priv = dev_get_uclass_priv(dev);
225 struct tcg_efi_spec_id_event *ev;
226 struct tcg_pcr_event *log;
233 count = priv->active_bank_count;
234 event_size = offsetof(struct tcg_efi_spec_id_event, digest_sizes);
236 (sizeof(struct tcg_efi_spec_id_event_algorithm_size) * count);
237 log_size = offsetof(struct tcg_pcr_event, event) + event_size;
239 if (log_size > elog->log_size) {
240 printf("%s: log too large: %u > %u\n", __func__, log_size,
245 log = (struct tcg_pcr_event *)elog->log;
246 put_unaligned_le32(0, &log->pcr_index);
247 put_unaligned_le32(EV_NO_ACTION, &log->event_type);
248 memset(&log->digest, 0, sizeof(log->digest));
249 put_unaligned_le32(event_size, &log->event_size);
251 ev = (struct tcg_efi_spec_id_event *)log->event;
252 strlcpy((char *)ev->signature, TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03,
253 sizeof(ev->signature));
254 put_unaligned_le32(0, &ev->platform_class);
255 ev->spec_version_minor = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MINOR_TPM2;
256 ev->spec_version_major = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MAJOR_TPM2;
257 ev->spec_errata = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_ERRATA_TPM2;
258 ev->uintn_size = sizeof(size_t) / sizeof(u32);
259 put_unaligned_le32(count, &ev->number_of_algorithms);
261 for (i = 0; i < count; ++i) {
262 len = tpm2_algorithm_to_len(priv->active_banks[i]);
263 put_unaligned_le16(priv->active_banks[i],
264 &ev->digest_sizes[i].algorithm_id);
265 put_unaligned_le16(len, &ev->digest_sizes[i].digest_size);
268 *((u8 *)ev + (event_size - 1)) = 0;
269 elog->log_position = log_size;
274 static int tcg2_replay_eventlog(struct tcg2_event_log *elog,
276 struct tpml_digest_values *digest_list,
279 const u32 offset = offsetof(struct tcg_pcr_event2, digests) +
280 offsetof(struct tpml_digest_values, digests);
291 while (log_position + offset < elog->log_size) {
292 log = elog->log + log_position;
294 pos = offsetof(struct tcg_pcr_event2, pcr_index);
295 pcr = get_unaligned_le32(log + pos);
296 pos = offsetof(struct tcg_pcr_event2, event_type);
297 if (!get_unaligned_le32(log + pos))
300 pos = offsetof(struct tcg_pcr_event2, digests) +
301 offsetof(struct tpml_digest_values, count);
302 count = get_unaligned_le32(log + pos);
303 if (count > ARRAY_SIZE(hash_algo_list) ||
304 (digest_list->count && digest_list->count != count))
307 pos = offsetof(struct tcg_pcr_event2, digests) +
308 offsetof(struct tpml_digest_values, digests);
309 for (i = 0; i < count; ++i) {
310 pos += offsetof(struct tpmt_ha, hash_alg);
311 if (log_position + pos + sizeof(u16) >= elog->log_size)
314 algo = get_unaligned_le16(log + pos);
315 pos += offsetof(struct tpmt_ha, digest);
318 case TPM2_ALG_SHA256:
319 case TPM2_ALG_SHA384:
320 case TPM2_ALG_SHA512:
321 len = tpm2_algorithm_to_len(algo);
327 if (digest_list->count) {
328 if (algo != digest_list->digests[i].hash_alg ||
329 log_position + pos + len >= elog->log_size)
332 memcpy(digest_list->digests[i].digest.sha512,
339 if (log_position + pos + sizeof(u32) >= elog->log_size)
342 event_size = get_unaligned_le32(log + pos);
343 pos += event_size + sizeof(u32);
344 if (log_position + pos > elog->log_size)
347 if (digest_list->count) {
348 rc = tcg2_pcr_extend(dev, pcr, digest_list);
356 elog->log_position = log_position;
361 static int tcg2_log_parse(struct udevice *dev, struct tcg2_event_log *elog)
363 struct tpml_digest_values digest_list;
364 struct tcg_efi_spec_id_event *event;
365 struct tcg_pcr_event *log;
377 if (elog->log_size <= offsetof(struct tcg_pcr_event, event))
380 log = (struct tcg_pcr_event *)elog->log;
381 if (get_unaligned_le32(&log->pcr_index) != 0 ||
382 get_unaligned_le32(&log->event_type) != EV_NO_ACTION)
385 for (i = 0; i < sizeof(log->digest); i++) {
390 evsz = get_unaligned_le32(&log->event_size);
391 if (evsz < offsetof(struct tcg_efi_spec_id_event, digest_sizes) ||
392 evsz + offsetof(struct tcg_pcr_event, event) > elog->log_size)
395 event = (struct tcg_efi_spec_id_event *)log->event;
396 if (memcmp(event->signature, TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03,
397 sizeof(TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03)))
400 if (event->spec_version_minor != TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MINOR_TPM2 ||
401 event->spec_version_major != TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MAJOR_TPM2)
404 count = get_unaligned_le32(&event->number_of_algorithms);
405 if (count > ARRAY_SIZE(hash_algo_list))
408 calc_size = offsetof(struct tcg_efi_spec_id_event, digest_sizes) +
409 (sizeof(struct tcg_efi_spec_id_event_algorithm_size) * count) +
411 if (evsz != calc_size)
415 * Go through the algorithms the EventLog contains. If the EventLog
416 * algorithms don't match the active TPM ones exit and report the
418 * We've already checked that U-Boot supports all the enabled TPM
419 * algorithms, so just check the EvenLog against the TPM active ones.
421 digest_list.count = 0;
423 for (i = 0; i < count; ++i) {
424 algo = get_unaligned_le16(&event->digest_sizes[i].algorithm_id);
425 mask = tcg2_algorithm_to_mask(algo);
429 case TPM2_ALG_SHA256:
430 case TPM2_ALG_SHA384:
431 case TPM2_ALG_SHA512:
432 len = get_unaligned_le16(&event->digest_sizes[i].digest_size);
433 if (tpm2_algorithm_to_len(algo) != len) {
434 log_err("EventLog invalid algorithm length\n");
437 digest_list.digests[digest_list.count++].hash_alg = algo;
441 * We can ignore this if the TPM PCRs is not extended
442 * by the previous bootloader. But for now just exit
444 log_err("EventLog has unsupported algorithm 0x%x\n",
451 rc = tcg2_get_active_pcr_banks(dev, &active);
454 /* If the EventLog and active algorithms don't match exit */
455 if (log_active != active) {
456 log_err("EventLog doesn't contain all active PCR banks\n");
460 /* Read PCR0 to check if previous firmware extended the PCRs or not. */
461 rc = tcg2_pcr_read(dev, 0, &digest_list);
465 for (i = 0; i < digest_list.count; ++i) {
466 u8 hash_buf[TPM2_SHA512_DIGEST_SIZE] = { 0 };
467 u16 hash_alg = digest_list.digests[i].hash_alg;
469 if (memcmp((u8 *)&digest_list.digests[i].digest, hash_buf,
470 tpm2_algorithm_to_len(hash_alg)))
471 digest_list.count = 0;
475 return tcg2_replay_eventlog(elog, dev, &digest_list,
476 offsetof(struct tcg_pcr_event, event) +
480 int tcg2_pcr_extend(struct udevice *dev, u32 pcr_index,
481 struct tpml_digest_values *digest_list)
486 for (i = 0; i < digest_list->count; i++) {
487 u32 alg = digest_list->digests[i].hash_alg;
489 rc = tpm2_pcr_extend(dev, pcr_index, alg,
490 (u8 *)&digest_list->digests[i].digest,
491 tpm2_algorithm_to_len(alg));
493 printf("%s: error pcr:%u alg:%08x\n", __func__,
502 int tcg2_pcr_read(struct udevice *dev, u32 pcr_index,
503 struct tpml_digest_values *digest_list)
505 struct tpm_chip_priv *priv;
509 priv = dev_get_uclass_priv(dev);
513 for (i = 0; i < digest_list->count; i++) {
514 u32 alg = digest_list->digests[i].hash_alg;
515 u8 *digest = (u8 *)&digest_list->digests[i].digest;
517 rc = tpm2_pcr_read(dev, pcr_index, priv->pcr_select_min, alg,
518 digest, tpm2_algorithm_to_len(alg), NULL);
520 printf("%s: error pcr:%u alg:%08x\n", __func__,
529 int tcg2_measure_data(struct udevice *dev, struct tcg2_event_log *elog,
530 u32 pcr_index, u32 size, const u8 *data, u32 event_type,
531 u32 event_size, const u8 *event)
533 struct tpml_digest_values digest_list;
537 rc = tcg2_create_digest(dev, data, size, &digest_list);
539 rc = tcg2_create_digest(dev, event, event_size, &digest_list);
543 rc = tcg2_pcr_extend(dev, pcr_index, &digest_list);
547 return tcg2_log_append_check(elog, pcr_index, event_type, &digest_list,
551 int tcg2_log_prepare_buffer(struct udevice *dev, struct tcg2_event_log *elog,
552 bool ignore_existing_log)
554 struct tcg2_event_log log;
557 elog->log_position = 0;
561 * Make sure U-Boot is compiled with all the active PCRs
562 * since we are about to create an EventLog and we won't
563 * measure anything if the PCR banks don't match
565 if (!tpm2_check_active_banks(dev)) {
566 log_err("Cannot create EventLog\n");
567 log_err("Mismatch between U-Boot and TPM hash algos\n");
569 tpm2_print_active_banks(dev);
570 log_info("U-Boot:\n");
571 for (i = 0; i < ARRAY_SIZE(hash_algo_list); i++) {
572 const struct digest_info *algo = &hash_algo_list[i];
575 if (!algo->supported)
578 str = tpm2_algorithm_name(algo->hash_alg);
580 log_info("%s\n", str);
585 rc = tcg2_platform_get_log(dev, (void **)&log.log, &log.log_size);
587 log.log_position = 0;
590 if (!ignore_existing_log) {
591 rc = tcg2_log_parse(dev, &log);
596 if (elog->log_size) {
598 if (elog->log_size < log.log_position)
602 * Copy the discovered log into the user buffer
603 * if there's enough space.
605 memcpy(elog->log, log.log, log.log_position);
608 unmap_physmem(log.log, MAP_NOCACHE);
611 elog->log_size = log.log_size;
614 elog->log_position = log.log_position;
615 elog->found = log.found;
619 * Initialize the log buffer if no log was discovered and the buffer is
620 * valid. User's can pass in their own buffer as a fallback if no
621 * memory region is found.
623 if (!elog->found && elog->log_size)
624 rc = tcg2_log_init(dev, elog);
629 int tcg2_measurement_init(struct udevice **dev, struct tcg2_event_log *elog,
630 bool ignore_existing_log)
634 rc = tcg2_platform_get_tpm2(dev);
638 rc = tpm_auto_start(*dev);
642 rc = tcg2_log_prepare_buffer(*dev, elog, ignore_existing_log);
644 tcg2_measurement_term(*dev, elog, true);
648 rc = tcg2_measure_event(*dev, elog, 0, EV_S_CRTM_VERSION,
649 strlen(version_string) + 1,
650 (u8 *)version_string);
652 tcg2_measurement_term(*dev, elog, true);
659 void tcg2_measurement_term(struct udevice *dev, struct tcg2_event_log *elog,
662 u32 event = error ? 0x1 : 0xffffffff;
665 for (i = 0; i < 8; ++i)
666 tcg2_measure_event(dev, elog, i, EV_SEPARATOR, sizeof(event),
670 unmap_physmem(elog->log, MAP_NOCACHE);
673 __weak int tcg2_platform_get_log(struct udevice *dev, void **addr, u32 *size)
675 const __be32 *addr_prop;
676 const __be32 *size_prop;
683 addr_prop = dev_read_prop(dev, "tpm_event_log_addr", &asize);
685 addr_prop = dev_read_prop(dev, "linux,sml-base", &asize);
687 size_prop = dev_read_prop(dev, "tpm_event_log_size", &ssize);
689 size_prop = dev_read_prop(dev, "linux,sml-size", &ssize);
691 if (addr_prop && size_prop) {
692 u64 a = of_read_number(addr_prop, asize / sizeof(__be32));
693 u64 s = of_read_number(size_prop, ssize / sizeof(__be32));
695 *addr = map_physmem(a, s, MAP_NOCACHE);
698 struct ofnode_phandle_args args;
702 if (dev_read_phandle_with_args(dev, "memory-region", NULL, 0,
706 a = ofnode_get_addr_size(args.node, "reg", &s);
707 if (a == FDT_ADDR_T_NONE)
710 *addr = map_physmem(a, s, MAP_NOCACHE);
717 __weak int tcg2_platform_get_tpm2(struct udevice **dev)
719 for_each_tpm_device(*dev) {
720 if (tpm_get_version(*dev) == TPM_V2)
727 u32 tcg2_algorithm_to_mask(enum tpm2_algorithms algo)
731 for (i = 0; i < ARRAY_SIZE(hash_algo_list); i++) {
732 if (hash_algo_list[i].hash_alg == algo)
733 return hash_algo_list[i].hash_mask;
739 __weak void tcg2_platform_startup_error(struct udevice *dev, int rc) {}