1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
4 * Description: CoreSight Trace Memory Controller driver
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/device.h>
11 #include <linux/idr.h>
13 #include <linux/err.h>
15 #include <linux/miscdevice.h>
16 #include <linux/mutex.h>
17 #include <linux/property.h>
18 #include <linux/uaccess.h>
19 #include <linux/slab.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/spinlock.h>
22 #include <linux/pm_runtime.h>
24 #include <linux/coresight.h>
25 #include <linux/amba/bus.h>
27 #include "coresight-priv.h"
28 #include "coresight-tmc.h"
30 DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
31 DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
32 DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
34 void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
36 struct coresight_device *csdev = drvdata->csdev;
37 struct csdev_access *csa = &csdev->access;
39 /* Ensure formatter, unformatter and hardware fifo are empty */
40 if (coresight_timeout(csa, TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
42 "timeout while waiting for TMC to be Ready\n");
46 void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
48 struct coresight_device *csdev = drvdata->csdev;
49 struct csdev_access *csa = &csdev->access;
52 ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
53 ffcr |= TMC_FFCR_STOP_ON_FLUSH;
54 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
55 ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
56 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
57 /* Ensure flush completes */
58 if (coresight_timeout(csa, TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
60 "timeout while waiting for completion of Manual Flush\n");
63 tmc_wait_for_tmcready(drvdata);
66 void tmc_enable_hw(struct tmc_drvdata *drvdata)
68 writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
71 void tmc_disable_hw(struct tmc_drvdata *drvdata)
73 writel_relaxed(0x0, drvdata->base + TMC_CTL);
76 u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
81 * When moving RRP or an offset address forward, the new values must
82 * be byte-address aligned to the width of the trace memory databus
83 * _and_ to a frame boundary (16 byte), whichever is the biggest. For
84 * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
85 * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
88 switch (drvdata->memwidth) {
89 case TMC_MEM_INTF_WIDTH_32BITS:
90 case TMC_MEM_INTF_WIDTH_64BITS:
91 case TMC_MEM_INTF_WIDTH_128BITS:
92 mask = GENMASK(31, 4);
94 case TMC_MEM_INTF_WIDTH_256BITS:
95 mask = GENMASK(31, 5);
102 static int tmc_read_prepare(struct tmc_drvdata *drvdata)
106 switch (drvdata->config_type) {
107 case TMC_CONFIG_TYPE_ETB:
108 case TMC_CONFIG_TYPE_ETF:
109 ret = tmc_read_prepare_etb(drvdata);
111 case TMC_CONFIG_TYPE_ETR:
112 ret = tmc_read_prepare_etr(drvdata);
119 dev_dbg(&drvdata->csdev->dev, "TMC read start\n");
124 static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
128 switch (drvdata->config_type) {
129 case TMC_CONFIG_TYPE_ETB:
130 case TMC_CONFIG_TYPE_ETF:
131 ret = tmc_read_unprepare_etb(drvdata);
133 case TMC_CONFIG_TYPE_ETR:
134 ret = tmc_read_unprepare_etr(drvdata);
141 dev_dbg(&drvdata->csdev->dev, "TMC read end\n");
146 static int tmc_open(struct inode *inode, struct file *file)
149 struct tmc_drvdata *drvdata = container_of(file->private_data,
150 struct tmc_drvdata, miscdev);
152 ret = tmc_read_prepare(drvdata);
156 nonseekable_open(inode, file);
158 dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
162 static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
163 loff_t pos, size_t len, char **bufpp)
165 switch (drvdata->config_type) {
166 case TMC_CONFIG_TYPE_ETB:
167 case TMC_CONFIG_TYPE_ETF:
168 return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
169 case TMC_CONFIG_TYPE_ETR:
170 return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
176 static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
181 struct tmc_drvdata *drvdata = container_of(file->private_data,
182 struct tmc_drvdata, miscdev);
183 actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
187 if (copy_to_user(data, bufp, actual)) {
188 dev_dbg(&drvdata->csdev->dev,
189 "%s: copy_to_user failed\n", __func__);
194 dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
199 static int tmc_release(struct inode *inode, struct file *file)
202 struct tmc_drvdata *drvdata = container_of(file->private_data,
203 struct tmc_drvdata, miscdev);
205 ret = tmc_read_unprepare(drvdata);
209 dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
213 static const struct file_operations tmc_fops = {
214 .owner = THIS_MODULE,
217 .release = tmc_release,
221 static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
223 enum tmc_mem_intf_width memwidth;
226 * Excerpt from the TRM:
228 * DEVID::MEMWIDTH[10:8]
229 * 0x2 Memory interface databus is 32 bits wide.
230 * 0x3 Memory interface databus is 64 bits wide.
231 * 0x4 Memory interface databus is 128 bits wide.
232 * 0x5 Memory interface databus is 256 bits wide.
234 switch (BMVAL(devid, 8, 10)) {
236 memwidth = TMC_MEM_INTF_WIDTH_32BITS;
239 memwidth = TMC_MEM_INTF_WIDTH_64BITS;
242 memwidth = TMC_MEM_INTF_WIDTH_128BITS;
245 memwidth = TMC_MEM_INTF_WIDTH_256BITS;
254 #define coresight_tmc_reg(name, offset) \
255 coresight_simple_reg32(struct tmc_drvdata, name, offset)
256 #define coresight_tmc_reg64(name, lo_off, hi_off) \
257 coresight_simple_reg64(struct tmc_drvdata, name, lo_off, hi_off)
259 coresight_tmc_reg(rsz, TMC_RSZ);
260 coresight_tmc_reg(sts, TMC_STS);
261 coresight_tmc_reg(trg, TMC_TRG);
262 coresight_tmc_reg(ctl, TMC_CTL);
263 coresight_tmc_reg(ffsr, TMC_FFSR);
264 coresight_tmc_reg(ffcr, TMC_FFCR);
265 coresight_tmc_reg(mode, TMC_MODE);
266 coresight_tmc_reg(pscr, TMC_PSCR);
267 coresight_tmc_reg(axictl, TMC_AXICTL);
268 coresight_tmc_reg(authstatus, TMC_AUTHSTATUS);
269 coresight_tmc_reg(devid, CORESIGHT_DEVID);
270 coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
271 coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
272 coresight_tmc_reg64(dba, TMC_DBALO, TMC_DBAHI);
274 static struct attribute *coresight_tmc_mgmt_attrs[] = {
285 &dev_attr_devid.attr,
287 &dev_attr_axictl.attr,
288 &dev_attr_authstatus.attr,
292 static ssize_t trigger_cntr_show(struct device *dev,
293 struct device_attribute *attr, char *buf)
295 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
296 unsigned long val = drvdata->trigger_cntr;
298 return sprintf(buf, "%#lx\n", val);
301 static ssize_t trigger_cntr_store(struct device *dev,
302 struct device_attribute *attr,
303 const char *buf, size_t size)
307 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
309 ret = kstrtoul(buf, 16, &val);
313 drvdata->trigger_cntr = val;
316 static DEVICE_ATTR_RW(trigger_cntr);
318 static ssize_t buffer_size_show(struct device *dev,
319 struct device_attribute *attr, char *buf)
321 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
323 return sprintf(buf, "%#x\n", drvdata->size);
326 static ssize_t buffer_size_store(struct device *dev,
327 struct device_attribute *attr,
328 const char *buf, size_t size)
332 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
334 /* Only permitted for TMC-ETRs */
335 if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
338 ret = kstrtoul(buf, 0, &val);
341 /* The buffer size should be page aligned */
342 if (val & (PAGE_SIZE - 1))
348 static DEVICE_ATTR_RW(buffer_size);
350 static struct attribute *coresight_tmc_attrs[] = {
351 &dev_attr_trigger_cntr.attr,
352 &dev_attr_buffer_size.attr,
356 static const struct attribute_group coresight_tmc_group = {
357 .attrs = coresight_tmc_attrs,
360 static const struct attribute_group coresight_tmc_mgmt_group = {
361 .attrs = coresight_tmc_mgmt_attrs,
365 static const struct attribute_group *coresight_tmc_groups[] = {
366 &coresight_tmc_group,
367 &coresight_tmc_mgmt_group,
371 static inline bool tmc_etr_can_use_sg(struct device *dev)
373 return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
376 static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
378 u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
380 return (auth & TMC_AUTH_NSID_MASK) == 0x3;
383 /* Detect and initialise the capabilities of a TMC ETR */
384 static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
388 struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
390 if (!tmc_etr_has_non_secure_access(drvdata))
393 /* Set the unadvertised capabilities */
394 tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
396 if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(parent))
397 tmc_etr_set_cap(drvdata, TMC_ETR_SG);
399 /* Check if the AXI address width is available */
400 if (devid & TMC_DEVID_AXIAW_VALID)
401 dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
402 TMC_DEVID_AXIAW_MASK);
405 * Unless specified in the device configuration, ETR uses a 40-bit
406 * AXI master in place of the embedded SRAM of ETB/ETF.
414 dev_info(parent, "Detected dma mask %dbits\n", dma_mask);
420 rc = dma_set_mask_and_coherent(parent, DMA_BIT_MASK(dma_mask));
422 dev_err(parent, "Failed to setup DMA mask: %d\n", rc);
426 static u32 tmc_etr_get_default_buffer_size(struct device *dev)
430 if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
435 static u32 tmc_etr_get_max_burst_size(struct device *dev)
439 if (fwnode_property_read_u32(dev->fwnode, "arm,max-burst-size",
441 return TMC_AXICTL_WR_BURST_16;
443 /* Only permissible values are 0 to 15 */
444 if (burst_size > 0xF)
445 burst_size = TMC_AXICTL_WR_BURST_16;
450 static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
455 struct device *dev = &adev->dev;
456 struct coresight_platform_data *pdata = NULL;
457 struct tmc_drvdata *drvdata;
458 struct resource *res = &adev->res;
459 struct coresight_desc desc = { 0 };
460 struct coresight_dev_list *dev_list = NULL;
463 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
467 dev_set_drvdata(dev, drvdata);
469 /* Validity for the resource is already checked by the AMBA core */
470 base = devm_ioremap_resource(dev, res);
476 drvdata->base = base;
477 desc.access = CSDEV_ACCESS_IOMEM(base);
479 spin_lock_init(&drvdata->spinlock);
481 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
482 drvdata->config_type = BMVAL(devid, 6, 7);
483 drvdata->memwidth = tmc_get_memwidth(devid);
484 /* This device is not associated with a session */
487 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
488 drvdata->size = tmc_etr_get_default_buffer_size(dev);
489 drvdata->max_burst_size = tmc_etr_get_max_burst_size(dev);
491 drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
495 desc.groups = coresight_tmc_groups;
497 switch (drvdata->config_type) {
498 case TMC_CONFIG_TYPE_ETB:
499 desc.type = CORESIGHT_DEV_TYPE_SINK;
500 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
501 desc.ops = &tmc_etb_cs_ops;
502 dev_list = &etb_devs;
504 case TMC_CONFIG_TYPE_ETR:
505 desc.type = CORESIGHT_DEV_TYPE_SINK;
506 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
507 desc.ops = &tmc_etr_cs_ops;
508 ret = tmc_etr_setup_caps(dev, devid,
509 coresight_get_uci_data(id));
512 idr_init(&drvdata->idr);
513 mutex_init(&drvdata->idr_mutex);
514 dev_list = &etr_devs;
516 case TMC_CONFIG_TYPE_ETF:
517 desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
518 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
519 desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
520 desc.ops = &tmc_etf_cs_ops;
521 dev_list = &etf_devs;
524 pr_err("%s: Unsupported TMC config\n", desc.name);
529 desc.name = coresight_alloc_device_name(dev_list, dev);
535 pdata = coresight_get_platform_data(dev);
537 ret = PTR_ERR(pdata);
540 adev->dev.platform_data = pdata;
543 drvdata->csdev = coresight_register(&desc);
544 if (IS_ERR(drvdata->csdev)) {
545 ret = PTR_ERR(drvdata->csdev);
549 drvdata->miscdev.name = desc.name;
550 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
551 drvdata->miscdev.fops = &tmc_fops;
552 ret = misc_register(&drvdata->miscdev);
554 coresight_unregister(drvdata->csdev);
556 pm_runtime_put(&adev->dev);
561 static void tmc_shutdown(struct amba_device *adev)
564 struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
566 spin_lock_irqsave(&drvdata->spinlock, flags);
568 if (drvdata->mode == CS_MODE_DISABLED)
571 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
572 tmc_etr_disable_hw(drvdata);
575 * We do not care about coresight unregister here unlike remove
576 * callback which is required for making coresight modular since
577 * the system is going down after this.
580 spin_unlock_irqrestore(&drvdata->spinlock, flags);
583 static void tmc_remove(struct amba_device *adev)
585 struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
588 * Since misc_open() holds a refcount on the f_ops, which is
589 * etb fops in this case, device is there until last file
590 * handler to this device is closed.
592 misc_deregister(&drvdata->miscdev);
593 coresight_unregister(drvdata->csdev);
596 static const struct amba_id tmc_ids[] = {
597 CS_AMBA_ID(0x000bb961),
598 /* Coresight SoC 600 TMC-ETR/ETS */
599 CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
600 /* Coresight SoC 600 TMC-ETB */
601 CS_AMBA_ID(0x000bb9e9),
602 /* Coresight SoC 600 TMC-ETF */
603 CS_AMBA_ID(0x000bb9ea),
607 MODULE_DEVICE_TABLE(amba, tmc_ids);
609 static struct amba_driver tmc_driver = {
611 .name = "coresight-tmc",
612 .owner = THIS_MODULE,
613 .suppress_bind_attrs = true,
616 .shutdown = tmc_shutdown,
617 .remove = tmc_remove,
621 module_amba_driver(tmc_driver);
624 MODULE_DESCRIPTION("Arm CoreSight Trace Memory Controller driver");
625 MODULE_LICENSE("GPL v2");