]>
Commit | Line | Data |
---|---|---|
a56037a4 IA |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
3 | * Copyright (c) 2023 Linaro Limited | |
4 | */ | |
5 | ||
6 | #include <dm.h> | |
7 | #include <dm/of_access.h> | |
8 | #include <tpm_api.h> | |
9 | #include <tpm-common.h> | |
10 | #include <tpm-v2.h> | |
11 | #include <tpm_tcg2.h> | |
12 | #include <u-boot/sha1.h> | |
13 | #include <u-boot/sha256.h> | |
14 | #include <u-boot/sha512.h> | |
15 | #include <version_string.h> | |
16 | #include <asm/io.h> | |
17 | #include <linux/bitops.h> | |
18 | #include <linux/unaligned/be_byteshift.h> | |
19 | #include <linux/unaligned/generic.h> | |
20 | #include <linux/unaligned/le_byteshift.h> | |
21 | #include "tpm-utils.h" | |
22 | ||
9f9797aa RM |
23 | int tcg2_get_pcr_info(struct udevice *dev, u32 *supported_bank, u32 *active_bank, |
24 | u32 *bank_num) | |
cba3fa90 | 25 | { |
cba3fa90 IA |
26 | struct tpml_pcr_selection pcrs; |
27 | size_t i; | |
28 | u32 ret; | |
29 | ||
9f9797aa RM |
30 | *supported_bank = 0; |
31 | *active_bank = 0; | |
32 | *bank_num = 0; | |
cba3fa90 IA |
33 | |
34 | ret = tpm2_get_pcr_info(dev, &pcrs); | |
35 | if (ret) | |
36 | return ret; | |
37 | ||
38 | for (i = 0; i < pcrs.count; i++) { | |
27891e85 RM |
39 | struct tpms_pcr_selection *sel = &pcrs.selection[i]; |
40 | u32 hash_mask = tcg2_algorithm_to_mask(sel->hash); | |
cba3fa90 | 41 | |
27891e85 | 42 | if (tpm2_algorithm_supported(sel->hash)) |
9f9797aa | 43 | *supported_bank |= hash_mask; |
27891e85 RM |
44 | else |
45 | log_warning("%s: unknown algorithm %x\n", __func__, | |
46 | sel->hash); | |
47 | ||
48 | if (tpm2_is_active_bank(sel)) | |
49 | *active_bank |= hash_mask; | |
cba3fa90 IA |
50 | } |
51 | ||
9f9797aa | 52 | *bank_num = pcrs.count; |
cba3fa90 IA |
53 | |
54 | return 0; | |
55 | } | |
56 | ||
a56037a4 IA |
57 | int tcg2_get_active_pcr_banks(struct udevice *dev, u32 *active_pcr_banks) |
58 | { | |
59 | u32 supported = 0; | |
60 | u32 pcr_banks = 0; | |
61 | u32 active = 0; | |
62 | int rc; | |
63 | ||
cba3fa90 | 64 | rc = tcg2_get_pcr_info(dev, &supported, &active, &pcr_banks); |
a56037a4 IA |
65 | if (rc) |
66 | return rc; | |
67 | ||
68 | *active_pcr_banks = active; | |
69 | ||
70 | return 0; | |
71 | } | |
72 | ||
73 | u32 tcg2_event_get_size(struct tpml_digest_values *digest_list) | |
74 | { | |
75 | u32 len; | |
76 | size_t i; | |
77 | ||
78 | len = offsetof(struct tcg_pcr_event2, digests); | |
79 | len += offsetof(struct tpml_digest_values, digests); | |
80 | for (i = 0; i < digest_list->count; ++i) { | |
81 | u16 l = tpm2_algorithm_to_len(digest_list->digests[i].hash_alg); | |
82 | ||
83 | if (!l) | |
84 | continue; | |
85 | ||
86 | len += l + offsetof(struct tpmt_ha, digest); | |
87 | } | |
88 | len += sizeof(u32); | |
89 | ||
90 | return len; | |
91 | } | |
92 | ||
93 | int tcg2_create_digest(struct udevice *dev, const u8 *input, u32 length, | |
94 | struct tpml_digest_values *digest_list) | |
95 | { | |
c575f28c | 96 | struct tpm_chip_priv *priv = dev_get_uclass_priv(dev); |
a56037a4 | 97 | u8 final[sizeof(union tpmu_ha)]; |
3a054eca | 98 | #if IS_ENABLED(CONFIG_SHA256) |
a56037a4 | 99 | sha256_context ctx_256; |
3a054eca RM |
100 | #endif |
101 | #if IS_ENABLED(CONFIG_SHA512) | |
a56037a4 | 102 | sha512_context ctx_512; |
3a054eca RM |
103 | #endif |
104 | #if IS_ENABLED(CONFIG_SHA1) | |
a56037a4 | 105 | sha1_context ctx; |
3a054eca | 106 | #endif |
a56037a4 IA |
107 | size_t i; |
108 | u32 len; | |
a56037a4 IA |
109 | |
110 | digest_list->count = 0; | |
c575f28c | 111 | for (i = 0; i < priv->active_bank_count; i++) { |
a56037a4 | 112 | |
c575f28c | 113 | switch (priv->active_banks[i]) { |
3a054eca | 114 | #if IS_ENABLED(CONFIG_SHA1) |
a56037a4 IA |
115 | case TPM2_ALG_SHA1: |
116 | sha1_starts(&ctx); | |
117 | sha1_update(&ctx, input, length); | |
118 | sha1_finish(&ctx, final); | |
119 | len = TPM2_SHA1_DIGEST_SIZE; | |
120 | break; | |
3a054eca RM |
121 | #endif |
122 | #if IS_ENABLED(CONFIG_SHA256) | |
a56037a4 IA |
123 | case TPM2_ALG_SHA256: |
124 | sha256_starts(&ctx_256); | |
125 | sha256_update(&ctx_256, input, length); | |
126 | sha256_finish(&ctx_256, final); | |
127 | len = TPM2_SHA256_DIGEST_SIZE; | |
128 | break; | |
3a054eca RM |
129 | #endif |
130 | #if IS_ENABLED(CONFIG_SHA384) | |
a56037a4 IA |
131 | case TPM2_ALG_SHA384: |
132 | sha384_starts(&ctx_512); | |
133 | sha384_update(&ctx_512, input, length); | |
134 | sha384_finish(&ctx_512, final); | |
135 | len = TPM2_SHA384_DIGEST_SIZE; | |
136 | break; | |
3a054eca RM |
137 | #endif |
138 | #if IS_ENABLED(CONFIG_SHA512) | |
a56037a4 IA |
139 | case TPM2_ALG_SHA512: |
140 | sha512_starts(&ctx_512); | |
141 | sha512_update(&ctx_512, input, length); | |
142 | sha512_finish(&ctx_512, final); | |
143 | len = TPM2_SHA512_DIGEST_SIZE; | |
144 | break; | |
3a054eca | 145 | #endif |
a56037a4 IA |
146 | default: |
147 | printf("%s: unsupported algorithm %x\n", __func__, | |
c575f28c | 148 | priv->active_banks[i]); |
a56037a4 IA |
149 | continue; |
150 | } | |
151 | ||
152 | digest_list->digests[digest_list->count].hash_alg = | |
c575f28c | 153 | priv->active_banks[i]; |
a56037a4 IA |
154 | memcpy(&digest_list->digests[digest_list->count].digest, final, |
155 | len); | |
156 | digest_list->count++; | |
157 | } | |
158 | ||
159 | return 0; | |
160 | } | |
161 | ||
162 | void tcg2_log_append(u32 pcr_index, u32 event_type, | |
163 | struct tpml_digest_values *digest_list, u32 size, | |
164 | const u8 *event, u8 *log) | |
165 | { | |
166 | size_t len; | |
167 | size_t pos; | |
168 | u32 i; | |
169 | ||
170 | pos = offsetof(struct tcg_pcr_event2, pcr_index); | |
171 | put_unaligned_le32(pcr_index, log); | |
172 | pos = offsetof(struct tcg_pcr_event2, event_type); | |
173 | put_unaligned_le32(event_type, log + pos); | |
174 | pos = offsetof(struct tcg_pcr_event2, digests) + | |
175 | offsetof(struct tpml_digest_values, count); | |
176 | put_unaligned_le32(digest_list->count, log + pos); | |
177 | ||
178 | pos = offsetof(struct tcg_pcr_event2, digests) + | |
179 | offsetof(struct tpml_digest_values, digests); | |
180 | for (i = 0; i < digest_list->count; ++i) { | |
181 | u16 hash_alg = digest_list->digests[i].hash_alg; | |
182 | ||
183 | len = tpm2_algorithm_to_len(hash_alg); | |
184 | if (!len) | |
185 | continue; | |
186 | ||
187 | pos += offsetof(struct tpmt_ha, hash_alg); | |
188 | put_unaligned_le16(hash_alg, log + pos); | |
189 | pos += offsetof(struct tpmt_ha, digest); | |
190 | memcpy(log + pos, (u8 *)&digest_list->digests[i].digest, len); | |
191 | pos += len; | |
192 | } | |
193 | ||
194 | put_unaligned_le32(size, log + pos); | |
195 | pos += sizeof(u32); | |
196 | memcpy(log + pos, event, size); | |
197 | } | |
198 | ||
199 | static int tcg2_log_append_check(struct tcg2_event_log *elog, u32 pcr_index, | |
200 | u32 event_type, | |
201 | struct tpml_digest_values *digest_list, | |
202 | u32 size, const u8 *event) | |
203 | { | |
204 | u32 event_size; | |
205 | u8 *log; | |
206 | ||
207 | event_size = size + tcg2_event_get_size(digest_list); | |
208 | if (elog->log_position + event_size > elog->log_size) { | |
209 | printf("%s: log too large: %u + %u > %u\n", __func__, | |
210 | elog->log_position, event_size, elog->log_size); | |
211 | return -ENOBUFS; | |
212 | } | |
213 | ||
214 | log = elog->log + elog->log_position; | |
215 | elog->log_position += event_size; | |
216 | ||
217 | tcg2_log_append(pcr_index, event_type, digest_list, size, event, log); | |
218 | ||
219 | return 0; | |
220 | } | |
221 | ||
222 | static int tcg2_log_init(struct udevice *dev, struct tcg2_event_log *elog) | |
223 | { | |
05834d4b | 224 | struct tpm_chip_priv *priv = dev_get_uclass_priv(dev); |
a56037a4 IA |
225 | struct tcg_efi_spec_id_event *ev; |
226 | struct tcg_pcr_event *log; | |
227 | u32 event_size; | |
228 | u32 count = 0; | |
229 | u32 log_size; | |
a56037a4 IA |
230 | size_t i; |
231 | u16 len; | |
a56037a4 | 232 | |
05834d4b | 233 | count = priv->active_bank_count; |
a56037a4 | 234 | event_size = offsetof(struct tcg_efi_spec_id_event, digest_sizes); |
a56037a4 IA |
235 | event_size += 1 + |
236 | (sizeof(struct tcg_efi_spec_id_event_algorithm_size) * count); | |
237 | log_size = offsetof(struct tcg_pcr_event, event) + event_size; | |
238 | ||
239 | if (log_size > elog->log_size) { | |
240 | printf("%s: log too large: %u > %u\n", __func__, log_size, | |
241 | elog->log_size); | |
242 | return -ENOBUFS; | |
243 | } | |
244 | ||
245 | log = (struct tcg_pcr_event *)elog->log; | |
246 | put_unaligned_le32(0, &log->pcr_index); | |
247 | put_unaligned_le32(EV_NO_ACTION, &log->event_type); | |
248 | memset(&log->digest, 0, sizeof(log->digest)); | |
249 | put_unaligned_le32(event_size, &log->event_size); | |
250 | ||
251 | ev = (struct tcg_efi_spec_id_event *)log->event; | |
252 | strlcpy((char *)ev->signature, TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03, | |
253 | sizeof(ev->signature)); | |
254 | put_unaligned_le32(0, &ev->platform_class); | |
255 | ev->spec_version_minor = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MINOR_TPM2; | |
256 | ev->spec_version_major = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MAJOR_TPM2; | |
257 | ev->spec_errata = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_ERRATA_TPM2; | |
258 | ev->uintn_size = sizeof(size_t) / sizeof(u32); | |
259 | put_unaligned_le32(count, &ev->number_of_algorithms); | |
260 | ||
05834d4b IA |
261 | for (i = 0; i < count; ++i) { |
262 | len = tpm2_algorithm_to_len(priv->active_banks[i]); | |
263 | put_unaligned_le16(priv->active_banks[i], | |
264 | &ev->digest_sizes[i].algorithm_id); | |
265 | put_unaligned_le16(len, &ev->digest_sizes[i].digest_size); | |
a56037a4 IA |
266 | } |
267 | ||
268 | *((u8 *)ev + (event_size - 1)) = 0; | |
269 | elog->log_position = log_size; | |
270 | ||
271 | return 0; | |
272 | } | |
273 | ||
274 | static int tcg2_replay_eventlog(struct tcg2_event_log *elog, | |
275 | struct udevice *dev, | |
276 | struct tpml_digest_values *digest_list, | |
277 | u32 log_position) | |
278 | { | |
279 | const u32 offset = offsetof(struct tcg_pcr_event2, digests) + | |
280 | offsetof(struct tpml_digest_values, digests); | |
281 | u32 event_size; | |
282 | u32 count; | |
283 | u16 algo; | |
284 | u32 pcr; | |
285 | u32 pos; | |
286 | u16 len; | |
287 | u8 *log; | |
288 | int rc; | |
289 | u32 i; | |
290 | ||
291 | while (log_position + offset < elog->log_size) { | |
292 | log = elog->log + log_position; | |
293 | ||
294 | pos = offsetof(struct tcg_pcr_event2, pcr_index); | |
295 | pcr = get_unaligned_le32(log + pos); | |
296 | pos = offsetof(struct tcg_pcr_event2, event_type); | |
297 | if (!get_unaligned_le32(log + pos)) | |
298 | return 0; | |
299 | ||
300 | pos = offsetof(struct tcg_pcr_event2, digests) + | |
301 | offsetof(struct tpml_digest_values, count); | |
302 | count = get_unaligned_le32(log + pos); | |
303 | if (count > ARRAY_SIZE(hash_algo_list) || | |
304 | (digest_list->count && digest_list->count != count)) | |
305 | return 0; | |
306 | ||
307 | pos = offsetof(struct tcg_pcr_event2, digests) + | |
308 | offsetof(struct tpml_digest_values, digests); | |
309 | for (i = 0; i < count; ++i) { | |
310 | pos += offsetof(struct tpmt_ha, hash_alg); | |
311 | if (log_position + pos + sizeof(u16) >= elog->log_size) | |
312 | return 0; | |
313 | ||
314 | algo = get_unaligned_le16(log + pos); | |
315 | pos += offsetof(struct tpmt_ha, digest); | |
316 | switch (algo) { | |
317 | case TPM2_ALG_SHA1: | |
318 | case TPM2_ALG_SHA256: | |
319 | case TPM2_ALG_SHA384: | |
320 | case TPM2_ALG_SHA512: | |
321 | len = tpm2_algorithm_to_len(algo); | |
322 | break; | |
323 | default: | |
324 | return 0; | |
325 | } | |
326 | ||
327 | if (digest_list->count) { | |
328 | if (algo != digest_list->digests[i].hash_alg || | |
329 | log_position + pos + len >= elog->log_size) | |
330 | return 0; | |
331 | ||
332 | memcpy(digest_list->digests[i].digest.sha512, | |
333 | log + pos, len); | |
334 | } | |
335 | ||
336 | pos += len; | |
337 | } | |
338 | ||
339 | if (log_position + pos + sizeof(u32) >= elog->log_size) | |
340 | return 0; | |
341 | ||
342 | event_size = get_unaligned_le32(log + pos); | |
343 | pos += event_size + sizeof(u32); | |
344 | if (log_position + pos > elog->log_size) | |
345 | return 0; | |
346 | ||
347 | if (digest_list->count) { | |
348 | rc = tcg2_pcr_extend(dev, pcr, digest_list); | |
349 | if (rc) | |
350 | return rc; | |
351 | } | |
352 | ||
353 | log_position += pos; | |
354 | } | |
355 | ||
356 | elog->log_position = log_position; | |
357 | elog->found = true; | |
358 | return 0; | |
359 | } | |
360 | ||
361 | static int tcg2_log_parse(struct udevice *dev, struct tcg2_event_log *elog) | |
362 | { | |
363 | struct tpml_digest_values digest_list; | |
364 | struct tcg_efi_spec_id_event *event; | |
365 | struct tcg_pcr_event *log; | |
366 | u32 log_active; | |
367 | u32 calc_size; | |
368 | u32 active; | |
369 | u32 count; | |
370 | u32 evsz; | |
371 | u32 mask; | |
372 | u16 algo; | |
373 | u16 len; | |
374 | int rc; | |
375 | u32 i; | |
a56037a4 IA |
376 | |
377 | if (elog->log_size <= offsetof(struct tcg_pcr_event, event)) | |
378 | return 0; | |
379 | ||
380 | log = (struct tcg_pcr_event *)elog->log; | |
381 | if (get_unaligned_le32(&log->pcr_index) != 0 || | |
382 | get_unaligned_le32(&log->event_type) != EV_NO_ACTION) | |
383 | return 0; | |
384 | ||
385 | for (i = 0; i < sizeof(log->digest); i++) { | |
386 | if (log->digest[i]) | |
387 | return 0; | |
388 | } | |
389 | ||
390 | evsz = get_unaligned_le32(&log->event_size); | |
391 | if (evsz < offsetof(struct tcg_efi_spec_id_event, digest_sizes) || | |
392 | evsz + offsetof(struct tcg_pcr_event, event) > elog->log_size) | |
393 | return 0; | |
394 | ||
395 | event = (struct tcg_efi_spec_id_event *)log->event; | |
396 | if (memcmp(event->signature, TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03, | |
397 | sizeof(TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03))) | |
398 | return 0; | |
399 | ||
400 | if (event->spec_version_minor != TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MINOR_TPM2 || | |
401 | event->spec_version_major != TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MAJOR_TPM2) | |
402 | return 0; | |
403 | ||
404 | count = get_unaligned_le32(&event->number_of_algorithms); | |
405 | if (count > ARRAY_SIZE(hash_algo_list)) | |
406 | return 0; | |
407 | ||
408 | calc_size = offsetof(struct tcg_efi_spec_id_event, digest_sizes) + | |
409 | (sizeof(struct tcg_efi_spec_id_event_algorithm_size) * count) + | |
410 | 1; | |
411 | if (evsz != calc_size) | |
412 | return 0; | |
413 | ||
3dbd84cb IA |
414 | /* |
415 | * Go through the algorithms the EventLog contains. If the EventLog | |
416 | * algorithms don't match the active TPM ones exit and report the | |
417 | * erroneous banks. | |
418 | * We've already checked that U-Boot supports all the enabled TPM | |
419 | * algorithms, so just check the EvenLog against the TPM active ones. | |
420 | */ | |
a56037a4 IA |
421 | digest_list.count = 0; |
422 | log_active = 0; | |
a56037a4 IA |
423 | for (i = 0; i < count; ++i) { |
424 | algo = get_unaligned_le16(&event->digest_sizes[i].algorithm_id); | |
425 | mask = tcg2_algorithm_to_mask(algo); | |
426 | ||
a56037a4 IA |
427 | switch (algo) { |
428 | case TPM2_ALG_SHA1: | |
429 | case TPM2_ALG_SHA256: | |
430 | case TPM2_ALG_SHA384: | |
431 | case TPM2_ALG_SHA512: | |
432 | len = get_unaligned_le16(&event->digest_sizes[i].digest_size); | |
3dbd84cb IA |
433 | if (tpm2_algorithm_to_len(algo) != len) { |
434 | log_err("EventLog invalid algorithm length\n"); | |
435 | return -1; | |
436 | } | |
a56037a4 IA |
437 | digest_list.digests[digest_list.count++].hash_alg = algo; |
438 | break; | |
439 | default: | |
3dbd84cb IA |
440 | /* |
441 | * We can ignore this if the TPM PCRs is not extended | |
442 | * by the previous bootloader. But for now just exit | |
443 | */ | |
444 | log_err("EventLog has unsupported algorithm 0x%x\n", | |
445 | algo); | |
446 | return -1; | |
a56037a4 | 447 | } |
a56037a4 IA |
448 | log_active |= mask; |
449 | } | |
450 | ||
3dbd84cb IA |
451 | rc = tcg2_get_active_pcr_banks(dev, &active); |
452 | if (rc) | |
453 | return rc; | |
454 | /* If the EventLog and active algorithms don't match exit */ | |
455 | if (log_active != active) { | |
456 | log_err("EventLog doesn't contain all active PCR banks\n"); | |
457 | return -1; | |
458 | } | |
a56037a4 IA |
459 | |
460 | /* Read PCR0 to check if previous firmware extended the PCRs or not. */ | |
461 | rc = tcg2_pcr_read(dev, 0, &digest_list); | |
462 | if (rc) | |
463 | return rc; | |
464 | ||
465 | for (i = 0; i < digest_list.count; ++i) { | |
3dbd84cb IA |
466 | u8 hash_buf[TPM2_SHA512_DIGEST_SIZE] = { 0 }; |
467 | u16 hash_alg = digest_list.digests[i].hash_alg; | |
a56037a4 | 468 | |
3dbd84cb IA |
469 | if (memcmp((u8 *)&digest_list.digests[i].digest, hash_buf, |
470 | tpm2_algorithm_to_len(hash_alg))) | |
a56037a4 | 471 | digest_list.count = 0; |
3dbd84cb | 472 | |
a56037a4 IA |
473 | } |
474 | ||
475 | return tcg2_replay_eventlog(elog, dev, &digest_list, | |
476 | offsetof(struct tcg_pcr_event, event) + | |
477 | evsz); | |
478 | } | |
479 | ||
480 | int tcg2_pcr_extend(struct udevice *dev, u32 pcr_index, | |
481 | struct tpml_digest_values *digest_list) | |
482 | { | |
483 | u32 rc; | |
484 | u32 i; | |
485 | ||
486 | for (i = 0; i < digest_list->count; i++) { | |
487 | u32 alg = digest_list->digests[i].hash_alg; | |
488 | ||
489 | rc = tpm2_pcr_extend(dev, pcr_index, alg, | |
490 | (u8 *)&digest_list->digests[i].digest, | |
491 | tpm2_algorithm_to_len(alg)); | |
492 | if (rc) { | |
493 | printf("%s: error pcr:%u alg:%08x\n", __func__, | |
494 | pcr_index, alg); | |
495 | return rc; | |
496 | } | |
497 | } | |
498 | ||
499 | return 0; | |
500 | } | |
501 | ||
502 | int tcg2_pcr_read(struct udevice *dev, u32 pcr_index, | |
503 | struct tpml_digest_values *digest_list) | |
504 | { | |
505 | struct tpm_chip_priv *priv; | |
506 | u32 rc; | |
507 | u32 i; | |
508 | ||
509 | priv = dev_get_uclass_priv(dev); | |
510 | if (!priv) | |
511 | return -ENODEV; | |
512 | ||
513 | for (i = 0; i < digest_list->count; i++) { | |
514 | u32 alg = digest_list->digests[i].hash_alg; | |
515 | u8 *digest = (u8 *)&digest_list->digests[i].digest; | |
516 | ||
517 | rc = tpm2_pcr_read(dev, pcr_index, priv->pcr_select_min, alg, | |
518 | digest, tpm2_algorithm_to_len(alg), NULL); | |
519 | if (rc) { | |
520 | printf("%s: error pcr:%u alg:%08x\n", __func__, | |
521 | pcr_index, alg); | |
522 | return rc; | |
523 | } | |
524 | } | |
525 | ||
526 | return 0; | |
527 | } | |
528 | ||
529 | int tcg2_measure_data(struct udevice *dev, struct tcg2_event_log *elog, | |
530 | u32 pcr_index, u32 size, const u8 *data, u32 event_type, | |
531 | u32 event_size, const u8 *event) | |
532 | { | |
533 | struct tpml_digest_values digest_list; | |
534 | int rc; | |
535 | ||
536 | if (data) | |
537 | rc = tcg2_create_digest(dev, data, size, &digest_list); | |
538 | else | |
539 | rc = tcg2_create_digest(dev, event, event_size, &digest_list); | |
540 | if (rc) | |
541 | return rc; | |
542 | ||
543 | rc = tcg2_pcr_extend(dev, pcr_index, &digest_list); | |
544 | if (rc) | |
545 | return rc; | |
546 | ||
547 | return tcg2_log_append_check(elog, pcr_index, event_type, &digest_list, | |
548 | event_size, event); | |
549 | } | |
550 | ||
551 | int tcg2_log_prepare_buffer(struct udevice *dev, struct tcg2_event_log *elog, | |
552 | bool ignore_existing_log) | |
553 | { | |
554 | struct tcg2_event_log log; | |
8dc886ce | 555 | int rc, i; |
a56037a4 IA |
556 | |
557 | elog->log_position = 0; | |
558 | elog->found = false; | |
559 | ||
8dc886ce IA |
560 | /* |
561 | * Make sure U-Boot is compiled with all the active PCRs | |
562 | * since we are about to create an EventLog and we won't | |
563 | * measure anything if the PCR banks don't match | |
564 | */ | |
565 | if (!tpm2_check_active_banks(dev)) { | |
566 | log_err("Cannot create EventLog\n"); | |
567 | log_err("Mismatch between U-Boot and TPM hash algos\n"); | |
568 | log_info("TPM:\n"); | |
569 | tpm2_print_active_banks(dev); | |
570 | log_info("U-Boot:\n"); | |
571 | for (i = 0; i < ARRAY_SIZE(hash_algo_list); i++) { | |
572 | const struct digest_info *algo = &hash_algo_list[i]; | |
573 | const char *str; | |
574 | ||
575 | if (!algo->supported) | |
576 | continue; | |
577 | ||
578 | str = tpm2_algorithm_name(algo->hash_alg); | |
579 | if (str) | |
580 | log_info("%s\n", str); | |
581 | } | |
582 | return -EINVAL; | |
583 | } | |
584 | ||
a56037a4 IA |
585 | rc = tcg2_platform_get_log(dev, (void **)&log.log, &log.log_size); |
586 | if (!rc) { | |
587 | log.log_position = 0; | |
588 | log.found = false; | |
589 | ||
590 | if (!ignore_existing_log) { | |
591 | rc = tcg2_log_parse(dev, &log); | |
592 | if (rc) | |
593 | return rc; | |
594 | } | |
595 | ||
596 | if (elog->log_size) { | |
597 | if (log.found) { | |
598 | if (elog->log_size < log.log_position) | |
599 | return -ENOBUFS; | |
600 | ||
601 | /* | |
602 | * Copy the discovered log into the user buffer | |
603 | * if there's enough space. | |
604 | */ | |
605 | memcpy(elog->log, log.log, log.log_position); | |
606 | } | |
607 | ||
608 | unmap_physmem(log.log, MAP_NOCACHE); | |
609 | } else { | |
610 | elog->log = log.log; | |
611 | elog->log_size = log.log_size; | |
612 | } | |
613 | ||
614 | elog->log_position = log.log_position; | |
615 | elog->found = log.found; | |
616 | } | |
617 | ||
618 | /* | |
619 | * Initialize the log buffer if no log was discovered and the buffer is | |
620 | * valid. User's can pass in their own buffer as a fallback if no | |
621 | * memory region is found. | |
622 | */ | |
623 | if (!elog->found && elog->log_size) | |
624 | rc = tcg2_log_init(dev, elog); | |
625 | ||
626 | return rc; | |
627 | } | |
628 | ||
629 | int tcg2_measurement_init(struct udevice **dev, struct tcg2_event_log *elog, | |
630 | bool ignore_existing_log) | |
631 | { | |
632 | int rc; | |
633 | ||
634 | rc = tcg2_platform_get_tpm2(dev); | |
635 | if (rc) | |
636 | return rc; | |
637 | ||
638 | rc = tpm_auto_start(*dev); | |
639 | if (rc) | |
640 | return rc; | |
641 | ||
642 | rc = tcg2_log_prepare_buffer(*dev, elog, ignore_existing_log); | |
643 | if (rc) { | |
644 | tcg2_measurement_term(*dev, elog, true); | |
645 | return rc; | |
646 | } | |
647 | ||
648 | rc = tcg2_measure_event(*dev, elog, 0, EV_S_CRTM_VERSION, | |
649 | strlen(version_string) + 1, | |
650 | (u8 *)version_string); | |
651 | if (rc) { | |
652 | tcg2_measurement_term(*dev, elog, true); | |
653 | return rc; | |
654 | } | |
655 | ||
656 | return 0; | |
657 | } | |
658 | ||
659 | void tcg2_measurement_term(struct udevice *dev, struct tcg2_event_log *elog, | |
660 | bool error) | |
661 | { | |
662 | u32 event = error ? 0x1 : 0xffffffff; | |
663 | int i; | |
664 | ||
665 | for (i = 0; i < 8; ++i) | |
666 | tcg2_measure_event(dev, elog, i, EV_SEPARATOR, sizeof(event), | |
667 | (const u8 *)&event); | |
668 | ||
669 | if (elog->log) | |
670 | unmap_physmem(elog->log, MAP_NOCACHE); | |
671 | } | |
672 | ||
673 | __weak int tcg2_platform_get_log(struct udevice *dev, void **addr, u32 *size) | |
674 | { | |
675 | const __be32 *addr_prop; | |
676 | const __be32 *size_prop; | |
677 | int asize; | |
678 | int ssize; | |
679 | ||
680 | *addr = NULL; | |
681 | *size = 0; | |
682 | ||
683 | addr_prop = dev_read_prop(dev, "tpm_event_log_addr", &asize); | |
684 | if (!addr_prop) | |
685 | addr_prop = dev_read_prop(dev, "linux,sml-base", &asize); | |
686 | ||
687 | size_prop = dev_read_prop(dev, "tpm_event_log_size", &ssize); | |
688 | if (!size_prop) | |
689 | size_prop = dev_read_prop(dev, "linux,sml-size", &ssize); | |
690 | ||
691 | if (addr_prop && size_prop) { | |
692 | u64 a = of_read_number(addr_prop, asize / sizeof(__be32)); | |
693 | u64 s = of_read_number(size_prop, ssize / sizeof(__be32)); | |
694 | ||
695 | *addr = map_physmem(a, s, MAP_NOCACHE); | |
696 | *size = (u32)s; | |
697 | } else { | |
698 | struct ofnode_phandle_args args; | |
699 | phys_addr_t a; | |
700 | fdt_size_t s; | |
701 | ||
702 | if (dev_read_phandle_with_args(dev, "memory-region", NULL, 0, | |
703 | 0, &args)) | |
704 | return -ENODEV; | |
705 | ||
706 | a = ofnode_get_addr_size(args.node, "reg", &s); | |
707 | if (a == FDT_ADDR_T_NONE) | |
708 | return -ENOMEM; | |
709 | ||
710 | *addr = map_physmem(a, s, MAP_NOCACHE); | |
711 | *size = (u32)s; | |
712 | } | |
713 | ||
714 | return 0; | |
715 | } | |
716 | ||
717 | __weak int tcg2_platform_get_tpm2(struct udevice **dev) | |
718 | { | |
719 | for_each_tpm_device(*dev) { | |
720 | if (tpm_get_version(*dev) == TPM_V2) | |
721 | return 0; | |
722 | } | |
723 | ||
724 | return -ENODEV; | |
725 | } | |
726 | ||
727 | u32 tcg2_algorithm_to_mask(enum tpm2_algorithms algo) | |
728 | { | |
729 | size_t i; | |
730 | ||
731 | for (i = 0; i < ARRAY_SIZE(hash_algo_list); i++) { | |
732 | if (hash_algo_list[i].hash_alg == algo) | |
733 | return hash_algo_list[i].hash_mask; | |
734 | } | |
735 | ||
736 | return 0; | |
737 | } | |
738 | ||
739 | __weak void tcg2_platform_startup_error(struct udevice *dev, int rc) {} |