1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Infineon Technologies AG
4 * Copyright (C) 2016 STMicroelectronics SAS
12 * Device driver for TCG/TCPA TPM (trusted platform module).
13 * Specifications at www.trustedcomputinggroup.org
15 * This device driver implements the TPM interface as defined in
16 * the TCG TPM Interface Spec version 1.3, revision 27 via _raw/native
19 * It is based on the original tpm_tis device driver from Leendert van
20 * Dorn and Kyleen Hall and Jarko Sakkinnen.
23 #include <linux/acpi.h>
24 #include <linux/completion.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/slab.h>
32 #include <linux/spi/spi.h>
33 #include <linux/tpm.h>
36 #include "tpm_tis_core.h"
37 #include "tpm_tis_spi.h"
39 #define MAX_SPI_FRAMESIZE 64
42 * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
43 * keep trying to read from the device until MISO goes high indicating the
44 * wait state has ended.
46 * [1] https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
48 static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
49 struct spi_transfer *spi_xfer)
54 if ((phy->iobuf[3] & 0x01) == 0) {
55 // handle SPI wait states
56 for (i = 0; i < TPM_RETRY; i++) {
59 spi_message_add_tail(spi_xfer, &m);
60 ret = spi_sync_locked(phy->spi_device, &m);
63 if (phy->iobuf[0] & 0x01)
75 * Half duplex controller with support for TPM wait state detection like
76 * Tegra QSPI need CMD, ADDR & DATA sent in single message to manage HW flow
77 * control. Each phase sent in different transfer for controller to idenity
80 static int tpm_tis_spi_transfer_half(struct tpm_tis_data *data, u32 addr,
81 u16 len, u8 *in, const u8 *out)
83 struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
84 struct spi_transfer spi_xfer[3];
90 transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
93 phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
95 phy->iobuf[2] = addr >> 8;
98 memset(&spi_xfer, 0, sizeof(spi_xfer));
100 spi_xfer[0].tx_buf = phy->iobuf;
102 spi_message_add_tail(&spi_xfer[0], &m);
104 spi_xfer[1].tx_buf = phy->iobuf + 1;
106 spi_message_add_tail(&spi_xfer[1], &m);
109 spi_xfer[2].tx_buf = &phy->iobuf[4];
110 spi_xfer[2].rx_buf = NULL;
111 memcpy(&phy->iobuf[4], out, transfer_len);
116 spi_xfer[2].tx_buf = NULL;
117 spi_xfer[2].rx_buf = &phy->iobuf[4];
120 spi_xfer[2].len = transfer_len;
121 spi_message_add_tail(&spi_xfer[2], &m);
123 reinit_completion(&phy->ready);
125 ret = spi_sync(phy->spi_device, &m);
130 memcpy(in, &phy->iobuf[4], transfer_len);
140 static int tpm_tis_spi_transfer_full(struct tpm_tis_data *data, u32 addr,
141 u16 len, u8 *in, const u8 *out)
143 struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
145 struct spi_message m;
146 struct spi_transfer spi_xfer;
149 spi_bus_lock(phy->spi_device->master);
152 transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
154 phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
155 phy->iobuf[1] = 0xd4;
156 phy->iobuf[2] = addr >> 8;
157 phy->iobuf[3] = addr;
159 memset(&spi_xfer, 0, sizeof(spi_xfer));
160 spi_xfer.tx_buf = phy->iobuf;
161 spi_xfer.rx_buf = phy->iobuf;
163 spi_xfer.cs_change = 1;
165 spi_message_init(&m);
166 spi_message_add_tail(&spi_xfer, &m);
167 ret = spi_sync_locked(phy->spi_device, &m);
171 /* Flow control transfers are receive only */
172 spi_xfer.tx_buf = NULL;
173 ret = phy->flow_control(phy, &spi_xfer);
177 spi_xfer.cs_change = 0;
178 spi_xfer.len = transfer_len;
179 spi_xfer.delay.value = 5;
180 spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS;
183 spi_xfer.tx_buf = phy->iobuf;
184 spi_xfer.rx_buf = NULL;
185 memcpy(phy->iobuf, out, transfer_len);
189 spi_message_init(&m);
190 spi_message_add_tail(&spi_xfer, &m);
191 reinit_completion(&phy->ready);
192 ret = spi_sync_locked(phy->spi_device, &m);
197 memcpy(in, phy->iobuf, transfer_len);
206 /* Deactivate chip select */
207 memset(&spi_xfer, 0, sizeof(spi_xfer));
208 spi_message_init(&m);
209 spi_message_add_tail(&spi_xfer, &m);
210 spi_sync_locked(phy->spi_device, &m);
213 spi_bus_unlock(phy->spi_device->master);
217 int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
218 u8 *in, const u8 *out)
220 struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
221 struct spi_controller *ctlr = phy->spi_device->controller;
224 * TPM flow control over SPI requires full duplex support.
225 * Send entire message to a half duplex controller to handle
226 * wait polling in controller.
227 * Set TPM HW flow control flag..
229 if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
230 return tpm_tis_spi_transfer_half(data, addr, len, in, out);
232 return tpm_tis_spi_transfer_full(data, addr, len, in, out);
235 static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
236 u16 len, u8 *result, enum tpm_tis_io_mode io_mode)
238 return tpm_tis_spi_transfer(data, addr, len, result, NULL);
241 static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
242 u16 len, const u8 *value, enum tpm_tis_io_mode io_mode)
244 return tpm_tis_spi_transfer(data, addr, len, NULL, value);
247 int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
248 int irq, const struct tpm_tis_phy_ops *phy_ops)
250 phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
254 phy->spi_device = spi;
256 return tpm_tis_core_init(&spi->dev, &phy->priv, irq, phy_ops, NULL);
259 static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
260 .read_bytes = tpm_tis_spi_read_bytes,
261 .write_bytes = tpm_tis_spi_write_bytes,
264 static int tpm_tis_spi_probe(struct spi_device *dev)
266 struct tpm_tis_spi_phy *phy;
269 phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
274 phy->flow_control = tpm_tis_spi_flow_control;
276 if (dev->controller->flags & SPI_CONTROLLER_HALF_DUPLEX)
277 dev->mode |= SPI_TPM_HW_FLOW;
279 /* If the SPI device has an IRQ then use that */
285 init_completion(&phy->ready);
286 return tpm_tis_spi_init(dev, phy, irq, &tpm_spi_phy_ops);
289 typedef int (*tpm_tis_spi_probe_func)(struct spi_device *);
291 static int tpm_tis_spi_driver_probe(struct spi_device *spi)
293 const struct spi_device_id *spi_dev_id = spi_get_device_id(spi);
294 tpm_tis_spi_probe_func probe_func;
296 probe_func = of_device_get_match_data(&spi->dev);
299 probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data;
303 probe_func = tpm_tis_spi_probe;
306 return probe_func(spi);
309 static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume);
311 static void tpm_tis_spi_remove(struct spi_device *dev)
313 struct tpm_chip *chip = spi_get_drvdata(dev);
315 tpm_chip_unregister(chip);
316 tpm_tis_remove(chip);
319 static const struct spi_device_id tpm_tis_spi_id[] = {
320 { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe },
321 { "slb9670", (unsigned long)tpm_tis_spi_probe },
322 { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
323 { "tpm_tis-spi", (unsigned long)tpm_tis_spi_probe },
324 { "cr50", (unsigned long)cr50_spi_probe },
327 MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id);
329 static const struct of_device_id of_tis_spi_match[] __maybe_unused = {
330 { .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe },
331 { .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe },
332 { .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe },
333 { .compatible = "google,cr50", .data = cr50_spi_probe },
336 MODULE_DEVICE_TABLE(of, of_tis_spi_match);
338 static const struct acpi_device_id acpi_tis_spi_match[] __maybe_unused = {
342 MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match);
344 static struct spi_driver tpm_tis_spi_driver = {
346 .name = "tpm_tis_spi",
348 .of_match_table = of_match_ptr(of_tis_spi_match),
349 .acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
350 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
352 .probe = tpm_tis_spi_driver_probe,
353 .remove = tpm_tis_spi_remove,
354 .id_table = tpm_tis_spi_id,
356 module_spi_driver(tpm_tis_spi_driver);
358 MODULE_DESCRIPTION("TPM Driver for native SPI access");
359 MODULE_LICENSE("GPL");