1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm.h"
11 #include "coresight-priv.h"
13 static ssize_t nr_addr_cmp_show(struct device *dev,
14 struct device_attribute *attr, char *buf)
17 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
19 val = drvdata->nr_addr_cmp;
20 return sprintf(buf, "%#lx\n", val);
22 static DEVICE_ATTR_RO(nr_addr_cmp);
24 static ssize_t nr_cntr_show(struct device *dev,
25 struct device_attribute *attr, char *buf)
27 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
29 val = drvdata->nr_cntr;
30 return sprintf(buf, "%#lx\n", val);
32 static DEVICE_ATTR_RO(nr_cntr);
34 static ssize_t nr_ctxid_cmp_show(struct device *dev,
35 struct device_attribute *attr, char *buf)
38 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
40 val = drvdata->nr_ctxid_cmp;
41 return sprintf(buf, "%#lx\n", val);
43 static DEVICE_ATTR_RO(nr_ctxid_cmp);
45 static ssize_t etmsr_show(struct device *dev,
46 struct device_attribute *attr, char *buf)
48 unsigned long flags, val;
49 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
51 pm_runtime_get_sync(drvdata->dev);
52 spin_lock_irqsave(&drvdata->spinlock, flags);
53 CS_UNLOCK(drvdata->base);
55 val = etm_readl(drvdata, ETMSR);
57 CS_LOCK(drvdata->base);
58 spin_unlock_irqrestore(&drvdata->spinlock, flags);
59 pm_runtime_put(drvdata->dev);
61 return sprintf(buf, "%#lx\n", val);
63 static DEVICE_ATTR_RO(etmsr);
65 static ssize_t reset_store(struct device *dev,
66 struct device_attribute *attr,
67 const char *buf, size_t size)
71 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
72 struct etm_config *config = &drvdata->config;
74 ret = kstrtoul(buf, 16, &val);
79 spin_lock(&drvdata->spinlock);
80 memset(config, 0, sizeof(struct etm_config));
81 config->mode = ETM_MODE_EXCLUDE;
82 config->trigger_event = ETM_DEFAULT_EVENT_VAL;
83 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
84 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
87 etm_set_default(config);
88 spin_unlock(&drvdata->spinlock);
93 static DEVICE_ATTR_WO(reset);
95 static ssize_t mode_show(struct device *dev,
96 struct device_attribute *attr, char *buf)
99 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
100 struct etm_config *config = &drvdata->config;
103 return sprintf(buf, "%#lx\n", val);
106 static ssize_t mode_store(struct device *dev,
107 struct device_attribute *attr,
108 const char *buf, size_t size)
112 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
113 struct etm_config *config = &drvdata->config;
115 ret = kstrtoul(buf, 16, &val);
119 spin_lock(&drvdata->spinlock);
120 config->mode = val & ETM_MODE_ALL;
122 if (config->mode & ETM_MODE_EXCLUDE)
123 config->enable_ctrl1 |= ETMTECR1_INC_EXC;
125 config->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
127 if (config->mode & ETM_MODE_CYCACC)
128 config->ctrl |= ETMCR_CYC_ACC;
130 config->ctrl &= ~ETMCR_CYC_ACC;
132 if (config->mode & ETM_MODE_STALL) {
133 if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
134 dev_warn(drvdata->dev, "stall mode not supported\n");
138 config->ctrl |= ETMCR_STALL_MODE;
140 config->ctrl &= ~ETMCR_STALL_MODE;
142 if (config->mode & ETM_MODE_TIMESTAMP) {
143 if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
144 dev_warn(drvdata->dev, "timestamp not supported\n");
148 config->ctrl |= ETMCR_TIMESTAMP_EN;
150 config->ctrl &= ~ETMCR_TIMESTAMP_EN;
152 if (config->mode & ETM_MODE_CTXID)
153 config->ctrl |= ETMCR_CTXID_SIZE;
155 config->ctrl &= ~ETMCR_CTXID_SIZE;
157 if (config->mode & ETM_MODE_BBROAD)
158 config->ctrl |= ETMCR_BRANCH_BROADCAST;
160 config->ctrl &= ~ETMCR_BRANCH_BROADCAST;
162 if (config->mode & ETM_MODE_RET_STACK)
163 config->ctrl |= ETMCR_RETURN_STACK;
165 config->ctrl &= ~ETMCR_RETURN_STACK;
167 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
168 etm_config_trace_mode(config);
170 spin_unlock(&drvdata->spinlock);
175 spin_unlock(&drvdata->spinlock);
178 static DEVICE_ATTR_RW(mode);
180 static ssize_t trigger_event_show(struct device *dev,
181 struct device_attribute *attr, char *buf)
184 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
185 struct etm_config *config = &drvdata->config;
187 val = config->trigger_event;
188 return sprintf(buf, "%#lx\n", val);
191 static ssize_t trigger_event_store(struct device *dev,
192 struct device_attribute *attr,
193 const char *buf, size_t size)
197 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
198 struct etm_config *config = &drvdata->config;
200 ret = kstrtoul(buf, 16, &val);
204 config->trigger_event = val & ETM_EVENT_MASK;
208 static DEVICE_ATTR_RW(trigger_event);
210 static ssize_t enable_event_show(struct device *dev,
211 struct device_attribute *attr, char *buf)
214 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
215 struct etm_config *config = &drvdata->config;
217 val = config->enable_event;
218 return sprintf(buf, "%#lx\n", val);
221 static ssize_t enable_event_store(struct device *dev,
222 struct device_attribute *attr,
223 const char *buf, size_t size)
227 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
228 struct etm_config *config = &drvdata->config;
230 ret = kstrtoul(buf, 16, &val);
234 config->enable_event = val & ETM_EVENT_MASK;
238 static DEVICE_ATTR_RW(enable_event);
240 static ssize_t fifofull_level_show(struct device *dev,
241 struct device_attribute *attr, char *buf)
244 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
245 struct etm_config *config = &drvdata->config;
247 val = config->fifofull_level;
248 return sprintf(buf, "%#lx\n", val);
251 static ssize_t fifofull_level_store(struct device *dev,
252 struct device_attribute *attr,
253 const char *buf, size_t size)
257 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
258 struct etm_config *config = &drvdata->config;
260 ret = kstrtoul(buf, 16, &val);
264 config->fifofull_level = val;
268 static DEVICE_ATTR_RW(fifofull_level);
270 static ssize_t addr_idx_show(struct device *dev,
271 struct device_attribute *attr, char *buf)
274 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
275 struct etm_config *config = &drvdata->config;
277 val = config->addr_idx;
278 return sprintf(buf, "%#lx\n", val);
281 static ssize_t addr_idx_store(struct device *dev,
282 struct device_attribute *attr,
283 const char *buf, size_t size)
287 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
288 struct etm_config *config = &drvdata->config;
290 ret = kstrtoul(buf, 16, &val);
294 if (val >= drvdata->nr_addr_cmp)
298 * Use spinlock to ensure index doesn't change while it gets
299 * dereferenced multiple times within a spinlock block elsewhere.
301 spin_lock(&drvdata->spinlock);
302 config->addr_idx = val;
303 spin_unlock(&drvdata->spinlock);
307 static DEVICE_ATTR_RW(addr_idx);
309 static ssize_t addr_single_show(struct device *dev,
310 struct device_attribute *attr, char *buf)
314 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
315 struct etm_config *config = &drvdata->config;
317 spin_lock(&drvdata->spinlock);
318 idx = config->addr_idx;
319 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
320 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
321 spin_unlock(&drvdata->spinlock);
325 val = config->addr_val[idx];
326 spin_unlock(&drvdata->spinlock);
328 return sprintf(buf, "%#lx\n", val);
331 static ssize_t addr_single_store(struct device *dev,
332 struct device_attribute *attr,
333 const char *buf, size_t size)
338 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
339 struct etm_config *config = &drvdata->config;
341 ret = kstrtoul(buf, 16, &val);
345 spin_lock(&drvdata->spinlock);
346 idx = config->addr_idx;
347 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
348 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
349 spin_unlock(&drvdata->spinlock);
353 config->addr_val[idx] = val;
354 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
355 spin_unlock(&drvdata->spinlock);
359 static DEVICE_ATTR_RW(addr_single);
361 static ssize_t addr_range_show(struct device *dev,
362 struct device_attribute *attr, char *buf)
365 unsigned long val1, val2;
366 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
367 struct etm_config *config = &drvdata->config;
369 spin_lock(&drvdata->spinlock);
370 idx = config->addr_idx;
372 spin_unlock(&drvdata->spinlock);
375 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
376 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
377 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
378 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
379 spin_unlock(&drvdata->spinlock);
383 val1 = config->addr_val[idx];
384 val2 = config->addr_val[idx + 1];
385 spin_unlock(&drvdata->spinlock);
387 return sprintf(buf, "%#lx %#lx\n", val1, val2);
390 static ssize_t addr_range_store(struct device *dev,
391 struct device_attribute *attr,
392 const char *buf, size_t size)
395 unsigned long val1, val2;
396 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
397 struct etm_config *config = &drvdata->config;
399 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
401 /* Lower address comparator cannot have a higher address value */
405 spin_lock(&drvdata->spinlock);
406 idx = config->addr_idx;
408 spin_unlock(&drvdata->spinlock);
411 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
412 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
413 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
414 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
415 spin_unlock(&drvdata->spinlock);
419 config->addr_val[idx] = val1;
420 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
421 config->addr_val[idx + 1] = val2;
422 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
423 config->enable_ctrl1 |= (1 << (idx/2));
424 spin_unlock(&drvdata->spinlock);
428 static DEVICE_ATTR_RW(addr_range);
430 static ssize_t addr_start_show(struct device *dev,
431 struct device_attribute *attr, char *buf)
435 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
436 struct etm_config *config = &drvdata->config;
438 spin_lock(&drvdata->spinlock);
439 idx = config->addr_idx;
440 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
441 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
442 spin_unlock(&drvdata->spinlock);
446 val = config->addr_val[idx];
447 spin_unlock(&drvdata->spinlock);
449 return sprintf(buf, "%#lx\n", val);
452 static ssize_t addr_start_store(struct device *dev,
453 struct device_attribute *attr,
454 const char *buf, size_t size)
459 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
460 struct etm_config *config = &drvdata->config;
462 ret = kstrtoul(buf, 16, &val);
466 spin_lock(&drvdata->spinlock);
467 idx = config->addr_idx;
468 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
469 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
470 spin_unlock(&drvdata->spinlock);
474 config->addr_val[idx] = val;
475 config->addr_type[idx] = ETM_ADDR_TYPE_START;
476 config->startstop_ctrl |= (1 << idx);
477 config->enable_ctrl1 |= BIT(25);
478 spin_unlock(&drvdata->spinlock);
482 static DEVICE_ATTR_RW(addr_start);
484 static ssize_t addr_stop_show(struct device *dev,
485 struct device_attribute *attr, char *buf)
489 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
490 struct etm_config *config = &drvdata->config;
492 spin_lock(&drvdata->spinlock);
493 idx = config->addr_idx;
494 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
495 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
496 spin_unlock(&drvdata->spinlock);
500 val = config->addr_val[idx];
501 spin_unlock(&drvdata->spinlock);
503 return sprintf(buf, "%#lx\n", val);
506 static ssize_t addr_stop_store(struct device *dev,
507 struct device_attribute *attr,
508 const char *buf, size_t size)
513 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
514 struct etm_config *config = &drvdata->config;
516 ret = kstrtoul(buf, 16, &val);
520 spin_lock(&drvdata->spinlock);
521 idx = config->addr_idx;
522 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
523 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
524 spin_unlock(&drvdata->spinlock);
528 config->addr_val[idx] = val;
529 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
530 config->startstop_ctrl |= (1 << (idx + 16));
531 config->enable_ctrl1 |= ETMTECR1_START_STOP;
532 spin_unlock(&drvdata->spinlock);
536 static DEVICE_ATTR_RW(addr_stop);
538 static ssize_t addr_acctype_show(struct device *dev,
539 struct device_attribute *attr, char *buf)
542 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
543 struct etm_config *config = &drvdata->config;
545 spin_lock(&drvdata->spinlock);
546 val = config->addr_acctype[config->addr_idx];
547 spin_unlock(&drvdata->spinlock);
549 return sprintf(buf, "%#lx\n", val);
552 static ssize_t addr_acctype_store(struct device *dev,
553 struct device_attribute *attr,
554 const char *buf, size_t size)
558 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
559 struct etm_config *config = &drvdata->config;
561 ret = kstrtoul(buf, 16, &val);
565 spin_lock(&drvdata->spinlock);
566 config->addr_acctype[config->addr_idx] = val;
567 spin_unlock(&drvdata->spinlock);
571 static DEVICE_ATTR_RW(addr_acctype);
573 static ssize_t cntr_idx_show(struct device *dev,
574 struct device_attribute *attr, char *buf)
577 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
578 struct etm_config *config = &drvdata->config;
580 val = config->cntr_idx;
581 return sprintf(buf, "%#lx\n", val);
584 static ssize_t cntr_idx_store(struct device *dev,
585 struct device_attribute *attr,
586 const char *buf, size_t size)
590 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
591 struct etm_config *config = &drvdata->config;
593 ret = kstrtoul(buf, 16, &val);
597 if (val >= drvdata->nr_cntr)
600 * Use spinlock to ensure index doesn't change while it gets
601 * dereferenced multiple times within a spinlock block elsewhere.
603 spin_lock(&drvdata->spinlock);
604 config->cntr_idx = val;
605 spin_unlock(&drvdata->spinlock);
609 static DEVICE_ATTR_RW(cntr_idx);
611 static ssize_t cntr_rld_val_show(struct device *dev,
612 struct device_attribute *attr, char *buf)
615 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
616 struct etm_config *config = &drvdata->config;
618 spin_lock(&drvdata->spinlock);
619 val = config->cntr_rld_val[config->cntr_idx];
620 spin_unlock(&drvdata->spinlock);
622 return sprintf(buf, "%#lx\n", val);
625 static ssize_t cntr_rld_val_store(struct device *dev,
626 struct device_attribute *attr,
627 const char *buf, size_t size)
631 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
632 struct etm_config *config = &drvdata->config;
634 ret = kstrtoul(buf, 16, &val);
638 spin_lock(&drvdata->spinlock);
639 config->cntr_rld_val[config->cntr_idx] = val;
640 spin_unlock(&drvdata->spinlock);
644 static DEVICE_ATTR_RW(cntr_rld_val);
646 static ssize_t cntr_event_show(struct device *dev,
647 struct device_attribute *attr, char *buf)
650 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
651 struct etm_config *config = &drvdata->config;
653 spin_lock(&drvdata->spinlock);
654 val = config->cntr_event[config->cntr_idx];
655 spin_unlock(&drvdata->spinlock);
657 return sprintf(buf, "%#lx\n", val);
660 static ssize_t cntr_event_store(struct device *dev,
661 struct device_attribute *attr,
662 const char *buf, size_t size)
666 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
667 struct etm_config *config = &drvdata->config;
669 ret = kstrtoul(buf, 16, &val);
673 spin_lock(&drvdata->spinlock);
674 config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK;
675 spin_unlock(&drvdata->spinlock);
679 static DEVICE_ATTR_RW(cntr_event);
681 static ssize_t cntr_rld_event_show(struct device *dev,
682 struct device_attribute *attr, char *buf)
685 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
686 struct etm_config *config = &drvdata->config;
688 spin_lock(&drvdata->spinlock);
689 val = config->cntr_rld_event[config->cntr_idx];
690 spin_unlock(&drvdata->spinlock);
692 return sprintf(buf, "%#lx\n", val);
695 static ssize_t cntr_rld_event_store(struct device *dev,
696 struct device_attribute *attr,
697 const char *buf, size_t size)
701 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
702 struct etm_config *config = &drvdata->config;
704 ret = kstrtoul(buf, 16, &val);
708 spin_lock(&drvdata->spinlock);
709 config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK;
710 spin_unlock(&drvdata->spinlock);
714 static DEVICE_ATTR_RW(cntr_rld_event);
716 static ssize_t cntr_val_show(struct device *dev,
717 struct device_attribute *attr, char *buf)
721 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
722 struct etm_config *config = &drvdata->config;
724 if (!local_read(&drvdata->mode)) {
725 spin_lock(&drvdata->spinlock);
726 for (i = 0; i < drvdata->nr_cntr; i++)
727 ret += sprintf(buf, "counter %d: %x\n",
728 i, config->cntr_val[i]);
729 spin_unlock(&drvdata->spinlock);
733 for (i = 0; i < drvdata->nr_cntr; i++) {
734 val = etm_readl(drvdata, ETMCNTVRn(i));
735 ret += sprintf(buf, "counter %d: %x\n", i, val);
741 static ssize_t cntr_val_store(struct device *dev,
742 struct device_attribute *attr,
743 const char *buf, size_t size)
747 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
748 struct etm_config *config = &drvdata->config;
750 ret = kstrtoul(buf, 16, &val);
754 spin_lock(&drvdata->spinlock);
755 config->cntr_val[config->cntr_idx] = val;
756 spin_unlock(&drvdata->spinlock);
760 static DEVICE_ATTR_RW(cntr_val);
762 static ssize_t seq_12_event_show(struct device *dev,
763 struct device_attribute *attr, char *buf)
766 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
767 struct etm_config *config = &drvdata->config;
769 val = config->seq_12_event;
770 return sprintf(buf, "%#lx\n", val);
773 static ssize_t seq_12_event_store(struct device *dev,
774 struct device_attribute *attr,
775 const char *buf, size_t size)
779 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
780 struct etm_config *config = &drvdata->config;
782 ret = kstrtoul(buf, 16, &val);
786 config->seq_12_event = val & ETM_EVENT_MASK;
789 static DEVICE_ATTR_RW(seq_12_event);
791 static ssize_t seq_21_event_show(struct device *dev,
792 struct device_attribute *attr, char *buf)
795 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
796 struct etm_config *config = &drvdata->config;
798 val = config->seq_21_event;
799 return sprintf(buf, "%#lx\n", val);
802 static ssize_t seq_21_event_store(struct device *dev,
803 struct device_attribute *attr,
804 const char *buf, size_t size)
808 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
809 struct etm_config *config = &drvdata->config;
811 ret = kstrtoul(buf, 16, &val);
815 config->seq_21_event = val & ETM_EVENT_MASK;
818 static DEVICE_ATTR_RW(seq_21_event);
820 static ssize_t seq_23_event_show(struct device *dev,
821 struct device_attribute *attr, char *buf)
824 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
825 struct etm_config *config = &drvdata->config;
827 val = config->seq_23_event;
828 return sprintf(buf, "%#lx\n", val);
831 static ssize_t seq_23_event_store(struct device *dev,
832 struct device_attribute *attr,
833 const char *buf, size_t size)
837 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
838 struct etm_config *config = &drvdata->config;
840 ret = kstrtoul(buf, 16, &val);
844 config->seq_23_event = val & ETM_EVENT_MASK;
847 static DEVICE_ATTR_RW(seq_23_event);
849 static ssize_t seq_31_event_show(struct device *dev,
850 struct device_attribute *attr, char *buf)
853 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
854 struct etm_config *config = &drvdata->config;
856 val = config->seq_31_event;
857 return sprintf(buf, "%#lx\n", val);
860 static ssize_t seq_31_event_store(struct device *dev,
861 struct device_attribute *attr,
862 const char *buf, size_t size)
866 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
867 struct etm_config *config = &drvdata->config;
869 ret = kstrtoul(buf, 16, &val);
873 config->seq_31_event = val & ETM_EVENT_MASK;
876 static DEVICE_ATTR_RW(seq_31_event);
878 static ssize_t seq_32_event_show(struct device *dev,
879 struct device_attribute *attr, char *buf)
882 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
883 struct etm_config *config = &drvdata->config;
885 val = config->seq_32_event;
886 return sprintf(buf, "%#lx\n", val);
889 static ssize_t seq_32_event_store(struct device *dev,
890 struct device_attribute *attr,
891 const char *buf, size_t size)
895 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
896 struct etm_config *config = &drvdata->config;
898 ret = kstrtoul(buf, 16, &val);
902 config->seq_32_event = val & ETM_EVENT_MASK;
905 static DEVICE_ATTR_RW(seq_32_event);
907 static ssize_t seq_13_event_show(struct device *dev,
908 struct device_attribute *attr, char *buf)
911 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
912 struct etm_config *config = &drvdata->config;
914 val = config->seq_13_event;
915 return sprintf(buf, "%#lx\n", val);
918 static ssize_t seq_13_event_store(struct device *dev,
919 struct device_attribute *attr,
920 const char *buf, size_t size)
924 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
925 struct etm_config *config = &drvdata->config;
927 ret = kstrtoul(buf, 16, &val);
931 config->seq_13_event = val & ETM_EVENT_MASK;
934 static DEVICE_ATTR_RW(seq_13_event);
936 static ssize_t seq_curr_state_show(struct device *dev,
937 struct device_attribute *attr, char *buf)
939 unsigned long val, flags;
940 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
941 struct etm_config *config = &drvdata->config;
943 if (!local_read(&drvdata->mode)) {
944 val = config->seq_curr_state;
948 pm_runtime_get_sync(drvdata->dev);
949 spin_lock_irqsave(&drvdata->spinlock, flags);
951 CS_UNLOCK(drvdata->base);
952 val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
953 CS_LOCK(drvdata->base);
955 spin_unlock_irqrestore(&drvdata->spinlock, flags);
956 pm_runtime_put(drvdata->dev);
958 return sprintf(buf, "%#lx\n", val);
961 static ssize_t seq_curr_state_store(struct device *dev,
962 struct device_attribute *attr,
963 const char *buf, size_t size)
967 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
968 struct etm_config *config = &drvdata->config;
970 ret = kstrtoul(buf, 16, &val);
974 if (val > ETM_SEQ_STATE_MAX_VAL)
977 config->seq_curr_state = val;
981 static DEVICE_ATTR_RW(seq_curr_state);
983 static ssize_t ctxid_idx_show(struct device *dev,
984 struct device_attribute *attr, char *buf)
987 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
988 struct etm_config *config = &drvdata->config;
990 val = config->ctxid_idx;
991 return sprintf(buf, "%#lx\n", val);
994 static ssize_t ctxid_idx_store(struct device *dev,
995 struct device_attribute *attr,
996 const char *buf, size_t size)
1000 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1001 struct etm_config *config = &drvdata->config;
1003 ret = kstrtoul(buf, 16, &val);
1007 if (val >= drvdata->nr_ctxid_cmp)
1011 * Use spinlock to ensure index doesn't change while it gets
1012 * dereferenced multiple times within a spinlock block elsewhere.
1014 spin_lock(&drvdata->spinlock);
1015 config->ctxid_idx = val;
1016 spin_unlock(&drvdata->spinlock);
1020 static DEVICE_ATTR_RW(ctxid_idx);
1022 static ssize_t ctxid_pid_show(struct device *dev,
1023 struct device_attribute *attr, char *buf)
1026 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1027 struct etm_config *config = &drvdata->config;
1030 * Don't use contextID tracing if coming from a PID namespace. See
1031 * comment in ctxid_pid_store().
1033 if (task_active_pid_ns(current) != &init_pid_ns)
1036 spin_lock(&drvdata->spinlock);
1037 val = config->ctxid_pid[config->ctxid_idx];
1038 spin_unlock(&drvdata->spinlock);
1040 return sprintf(buf, "%#lx\n", val);
1043 static ssize_t ctxid_pid_store(struct device *dev,
1044 struct device_attribute *attr,
1045 const char *buf, size_t size)
1049 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1050 struct etm_config *config = &drvdata->config;
1053 * When contextID tracing is enabled the tracers will insert the
1054 * value found in the contextID register in the trace stream. But if
1055 * a process is in a namespace the PID of that process as seen from the
1056 * namespace won't be what the kernel sees, something that makes the
1057 * feature confusing and can potentially leak kernel only information.
1058 * As such refuse to use the feature if @current is not in the initial
1061 if (task_active_pid_ns(current) != &init_pid_ns)
1064 ret = kstrtoul(buf, 16, &pid);
1068 spin_lock(&drvdata->spinlock);
1069 config->ctxid_pid[config->ctxid_idx] = pid;
1070 spin_unlock(&drvdata->spinlock);
1074 static DEVICE_ATTR_RW(ctxid_pid);
1076 static ssize_t ctxid_mask_show(struct device *dev,
1077 struct device_attribute *attr, char *buf)
1080 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1081 struct etm_config *config = &drvdata->config;
1084 * Don't use contextID tracing if coming from a PID namespace. See
1085 * comment in ctxid_pid_store().
1087 if (task_active_pid_ns(current) != &init_pid_ns)
1090 val = config->ctxid_mask;
1091 return sprintf(buf, "%#lx\n", val);
1094 static ssize_t ctxid_mask_store(struct device *dev,
1095 struct device_attribute *attr,
1096 const char *buf, size_t size)
1100 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1101 struct etm_config *config = &drvdata->config;
1104 * Don't use contextID tracing if coming from a PID namespace. See
1105 * comment in ctxid_pid_store().
1107 if (task_active_pid_ns(current) != &init_pid_ns)
1110 ret = kstrtoul(buf, 16, &val);
1114 config->ctxid_mask = val;
1117 static DEVICE_ATTR_RW(ctxid_mask);
1119 static ssize_t sync_freq_show(struct device *dev,
1120 struct device_attribute *attr, char *buf)
1123 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1124 struct etm_config *config = &drvdata->config;
1126 val = config->sync_freq;
1127 return sprintf(buf, "%#lx\n", val);
1130 static ssize_t sync_freq_store(struct device *dev,
1131 struct device_attribute *attr,
1132 const char *buf, size_t size)
1136 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1137 struct etm_config *config = &drvdata->config;
1139 ret = kstrtoul(buf, 16, &val);
1143 config->sync_freq = val & ETM_SYNC_MASK;
1146 static DEVICE_ATTR_RW(sync_freq);
1148 static ssize_t timestamp_event_show(struct device *dev,
1149 struct device_attribute *attr, char *buf)
1152 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1153 struct etm_config *config = &drvdata->config;
1155 val = config->timestamp_event;
1156 return sprintf(buf, "%#lx\n", val);
1159 static ssize_t timestamp_event_store(struct device *dev,
1160 struct device_attribute *attr,
1161 const char *buf, size_t size)
1165 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1166 struct etm_config *config = &drvdata->config;
1168 ret = kstrtoul(buf, 16, &val);
1172 config->timestamp_event = val & ETM_EVENT_MASK;
1175 static DEVICE_ATTR_RW(timestamp_event);
1177 static ssize_t cpu_show(struct device *dev,
1178 struct device_attribute *attr, char *buf)
1181 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1184 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
1187 static DEVICE_ATTR_RO(cpu);
1189 static ssize_t traceid_show(struct device *dev,
1190 struct device_attribute *attr, char *buf)
1193 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1195 val = etm_get_trace_id(drvdata);
1197 return sprintf(buf, "%#lx\n", val);
1200 static ssize_t traceid_store(struct device *dev,
1201 struct device_attribute *attr,
1202 const char *buf, size_t size)
1206 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1208 ret = kstrtoul(buf, 16, &val);
1212 drvdata->traceid = val & ETM_TRACEID_MASK;
1215 static DEVICE_ATTR_RW(traceid);
1217 static struct attribute *coresight_etm_attrs[] = {
1218 &dev_attr_nr_addr_cmp.attr,
1219 &dev_attr_nr_cntr.attr,
1220 &dev_attr_nr_ctxid_cmp.attr,
1221 &dev_attr_etmsr.attr,
1222 &dev_attr_reset.attr,
1223 &dev_attr_mode.attr,
1224 &dev_attr_trigger_event.attr,
1225 &dev_attr_enable_event.attr,
1226 &dev_attr_fifofull_level.attr,
1227 &dev_attr_addr_idx.attr,
1228 &dev_attr_addr_single.attr,
1229 &dev_attr_addr_range.attr,
1230 &dev_attr_addr_start.attr,
1231 &dev_attr_addr_stop.attr,
1232 &dev_attr_addr_acctype.attr,
1233 &dev_attr_cntr_idx.attr,
1234 &dev_attr_cntr_rld_val.attr,
1235 &dev_attr_cntr_event.attr,
1236 &dev_attr_cntr_rld_event.attr,
1237 &dev_attr_cntr_val.attr,
1238 &dev_attr_seq_12_event.attr,
1239 &dev_attr_seq_21_event.attr,
1240 &dev_attr_seq_23_event.attr,
1241 &dev_attr_seq_31_event.attr,
1242 &dev_attr_seq_32_event.attr,
1243 &dev_attr_seq_13_event.attr,
1244 &dev_attr_seq_curr_state.attr,
1245 &dev_attr_ctxid_idx.attr,
1246 &dev_attr_ctxid_pid.attr,
1247 &dev_attr_ctxid_mask.attr,
1248 &dev_attr_sync_freq.attr,
1249 &dev_attr_timestamp_event.attr,
1250 &dev_attr_traceid.attr,
1255 #define coresight_etm3x_reg(name, offset) \
1256 coresight_simple_reg32(struct etm_drvdata, name, offset)
1258 coresight_etm3x_reg(etmccr, ETMCCR);
1259 coresight_etm3x_reg(etmccer, ETMCCER);
1260 coresight_etm3x_reg(etmscr, ETMSCR);
1261 coresight_etm3x_reg(etmidr, ETMIDR);
1262 coresight_etm3x_reg(etmcr, ETMCR);
1263 coresight_etm3x_reg(etmtraceidr, ETMTRACEIDR);
1264 coresight_etm3x_reg(etmteevr, ETMTEEVR);
1265 coresight_etm3x_reg(etmtssvr, ETMTSSCR);
1266 coresight_etm3x_reg(etmtecr1, ETMTECR1);
1267 coresight_etm3x_reg(etmtecr2, ETMTECR2);
1269 static struct attribute *coresight_etm_mgmt_attrs[] = {
1270 &dev_attr_etmccr.attr,
1271 &dev_attr_etmccer.attr,
1272 &dev_attr_etmscr.attr,
1273 &dev_attr_etmidr.attr,
1274 &dev_attr_etmcr.attr,
1275 &dev_attr_etmtraceidr.attr,
1276 &dev_attr_etmteevr.attr,
1277 &dev_attr_etmtssvr.attr,
1278 &dev_attr_etmtecr1.attr,
1279 &dev_attr_etmtecr2.attr,
1283 static const struct attribute_group coresight_etm_group = {
1284 .attrs = coresight_etm_attrs,
1287 static const struct attribute_group coresight_etm_mgmt_group = {
1288 .attrs = coresight_etm_mgmt_attrs,
1292 const struct attribute_group *coresight_etm_groups[] = {
1293 &coresight_etm_group,
1294 &coresight_etm_mgmt_group,