2 # SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
3 """Convert directories of JSON events to C code."""
6 from functools import lru_cache
11 from typing import (Callable, Dict, Optional, Sequence, Set, Tuple)
14 # Global command line arguments.
16 # List of regular event tables.
18 # List of event tables generated from "/sys" directories.
19 _sys_event_tables = []
20 # List of regular metric tables.
22 # List of metric tables generated from "/sys" directories.
23 _sys_metric_tables = []
24 # Mapping between sys event table names and sys metric table names.
25 _sys_event_table_to_metric_table_mapping = {}
26 # Map from an event name to an architecture standard
27 # JsonEvent. Architecture standard events are in json files in the top
28 # f'{_args.starting_dir}/{_args.arch}' directory.
30 # Events to write out when the table is closed
32 # Name of events table to be written out
33 _pending_events_tblname = None
34 # Metrics to write out when the table is closed
36 # Name of metrics table to be written out
37 _pending_metrics_tblname = None
38 # Global BigCString shared by all structures.
40 # Order specific JsonEvent attributes will be visited.
41 _json_event_attributes = [
42 # cmp_sevent related attributes.
43 'name', 'pmu', 'topic', 'desc',
44 # Seems useful, put it early.
46 # Short things in alphabetical order.
47 'compat', 'deprecated', 'perpkg', 'unit',
48 # Longer things (the last won't be iterated over during decompress).
52 # Attributes that are in pmu_metric rather than pmu_event.
53 _json_metric_attributes = [
54 'metric_name', 'metric_group', 'metric_expr', 'metric_threshold', 'desc',
55 'long_desc', 'unit', 'compat', 'aggr_mode', 'event_grouping'
57 # Attributes that are bools or enum int values, encoded as '0', '1',...
58 _json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg']
60 def removesuffix(s: str, suffix: str) -> str:
61 """Remove the suffix from a string
63 The removesuffix function is added to str in Python 3.9. We aim for 3.6
64 compatibility and so provide our own function here.
66 return s[0:-len(suffix)] if s.endswith(suffix) else s
69 def file_name_to_table_name(prefix: str, parents: Sequence[str],
71 """Generate a C table name from directory names."""
75 tblname += '_' + dirname
76 return tblname.replace('-', '_')
79 def c_len(s: str) -> int:
80 """Return the length of s a C string
82 This doesn't handle all escape characters properly. It first assumes
83 all \ are for escaping, it then adjusts as it will have over counted
84 \\. The code uses \000 rather than \0 as a terminator as an adjacent
85 number would be folded into a string of \0 (ie. "\0" + "5" doesn't
86 equal a terminator followed by the number 5 but the escape of
87 \05). The code adjusts for \000 but not properly for all octal, hex
91 utf = s.encode(encoding='utf-8',errors='strict')
93 print(f'broken string {s}')
95 return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2)
98 """A class to hold many strings concatenated together.
100 Generating a large number of stand-alone C strings creates a large
101 number of relocations in position independent code. The BigCString
102 is a helper for this case. It builds a single string which within it
103 are all the other C strings (to avoid memory issues the string
104 itself is held as a list of strings). The offsets within the big
105 string are recorded and when stored to disk these don't need
106 relocation. To reduce the size of the string further, identical
107 strings are merged. If a longer string ends-with the same value as a
108 shorter string, these entries are also merged.
111 big_string: Sequence[str]
112 offsets: Dict[str, int]
117 def add(self, s: str) -> None:
118 """Called to add to the big string."""
121 def compute(self) -> None:
122 """Called once all strings are added to compute the string and offsets."""
125 # Determine if two strings can be folded, ie. let 1 string use the
126 # end of another. First reverse all strings and sort them.
127 sorted_reversed_strings = sorted([x[::-1] for x in self.strings])
129 # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward
130 # for each string to see if there is a better candidate to fold it
131 # into, in the example rather than using 'yz' we can use'xyz' at
132 # an offset of 1. We record which string can be folded into which
133 # in folded_strings, we don't need to record the offset as it is
134 # trivially computed from the string lengths.
135 for pos,s in enumerate(sorted_reversed_strings):
137 for check_pos in range(pos + 1, len(sorted_reversed_strings)):
138 if sorted_reversed_strings[check_pos].startswith(s):
143 folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1]
145 # Compute reverse mappings for debugging.
146 fold_into_strings = collections.defaultdict(set)
147 for key, val in folded_strings.items():
149 fold_into_strings[val].add(key)
151 # big_string_offset is the current location within the C string
152 # being appended to - comments, etc. don't count. big_string is
153 # the string contents represented as a list. Strings are immutable
154 # in Python and so appending to one causes memory issues, while
156 big_string_offset = 0
160 # Emit all strings that aren't folded in a sorted manner.
161 for s in sorted(self.strings):
162 if s not in folded_strings:
163 self.offsets[s] = big_string_offset
164 self.big_string.append(f'/* offset={big_string_offset} */ "')
165 self.big_string.append(s)
166 self.big_string.append('"')
167 if s in fold_into_strings:
168 self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */')
169 self.big_string.append('\n')
170 big_string_offset += c_len(s)
173 # Compute the offsets of the folded strings.
174 for s in folded_strings.keys():
175 assert s not in self.offsets
176 folded_s = folded_strings[s]
177 self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s)
182 """Representation of an event loaded from a json file dictionary."""
184 def __init__(self, jd: dict):
185 """Constructor passed the dictionary of parsed json values."""
187 def llx(x: int) -> str:
188 """Convert an int to a string similar to a printf modifier of %#llx."""
189 return '0' if x == 0 else hex(x)
191 def fixdesc(s: str) -> str:
192 """Fix formatting issue for the desc string."""
195 return removesuffix(removesuffix(removesuffix(s, '. '),
196 '. '), '.').replace('\n', '\\n').replace(
197 '\"', '\\"').replace('\r', '\\r')
199 def convert_aggr_mode(aggr_mode: str) -> Optional[str]:
200 """Returns the aggr_mode_class enum value associated with the JSON string."""
203 aggr_mode_to_enum = {
207 return aggr_mode_to_enum[aggr_mode]
209 def convert_metric_constraint(metric_constraint: str) -> Optional[str]:
210 """Returns the metric_event_groups enum value associated with the JSON string."""
211 if not metric_constraint:
213 metric_constraint_to_enum = {
214 'NO_GROUP_EVENTS': '1',
215 'NO_GROUP_EVENTS_NMI': '2',
216 'NO_NMI_WATCHDOG': '2',
217 'NO_GROUP_EVENTS_SMT': '3',
219 return metric_constraint_to_enum[metric_constraint]
221 def lookup_msr(num: str) -> Optional[str]:
222 """Converts the msr number, or first in a list to the appropriate event field."""
227 0x1A6: 'offcore_rsp=',
228 0x1A7: 'offcore_rsp=',
231 return msrmap[int(num.split(',', 1)[0], 0)]
233 def real_event(name: str, event: str) -> Optional[str]:
234 """Convert well known event names to an event string otherwise use the event argument."""
236 'inst_retired.any': 'event=0xc0,period=2000003',
237 'inst_retired.any_p': 'event=0xc0,period=2000003',
238 'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003',
239 'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003',
240 'cpu_clk_unhalted.core': 'event=0x3c,period=2000003',
241 'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003',
245 if name.lower() in fixed:
246 return fixed[name.lower()]
249 def unit_to_pmu(unit: str) -> Optional[str]:
250 """Convert a JSON Unit to Linux PMU name."""
253 # Comment brought over from jevents.c:
254 # it's not realistic to keep adding these, we need something more scalable ...
256 'CBO': 'uncore_cbox',
257 'QPI LL': 'uncore_qpi',
258 'SBO': 'uncore_sbox',
259 'iMPH-U': 'uncore_arb',
260 'CPU-M-CF': 'cpum_cf',
261 'CPU-M-SF': 'cpum_sf',
262 'PAI-CRYPTO' : 'pai_crypto',
263 'PAI-EXT' : 'pai_ext',
264 'UPI LL': 'uncore_upi',
265 'hisi_sicl,cpa': 'hisi_sicl,cpa',
266 'hisi_sccl,ddrc': 'hisi_sccl,ddrc',
267 'hisi_sccl,hha': 'hisi_sccl,hha',
268 'hisi_sccl,l3c': 'hisi_sccl,l3c',
269 'imx8_ddr': 'imx8_ddr',
272 'cpu_core': 'cpu_core',
273 'cpu_atom': 'cpu_atom',
275 return table[unit] if unit in table else f'uncore_{unit.lower()}'
278 if 'EventCode' in jd:
279 eventcode = int(jd['EventCode'].split(',', 1)[0], 0)
281 eventcode |= int(jd['ExtSel']) << 8
282 configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
283 self.name = jd['EventName'].lower() if 'EventName' in jd else None
285 self.compat = jd.get('Compat')
286 self.desc = fixdesc(jd.get('BriefDescription'))
287 self.long_desc = fixdesc(jd.get('PublicDescription'))
288 precise = jd.get('PEBS')
289 msr = lookup_msr(jd.get('MSRIndex'))
290 msrval = jd.get('MSRValue')
293 extra_desc += ' Supports address when precise'
297 extra_desc += ' Spec update: ' + jd['Errata']
298 self.pmu = unit_to_pmu(jd.get('Unit'))
299 filter = jd.get('Filter')
300 self.unit = jd.get('ScaleUnit')
301 self.perpkg = jd.get('PerPkg')
302 self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode'))
303 self.deprecated = jd.get('Deprecated')
304 self.metric_name = jd.get('MetricName')
305 self.metric_group = jd.get('MetricGroup')
306 self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint'))
307 self.metric_expr = None
308 if 'MetricExpr' in jd:
309 self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify()
310 # Note, the metric formula for the threshold isn't parsed as the &
311 # and > have incorrect precedence.
312 self.metric_threshold = jd.get('MetricThreshold')
314 arch_std = jd.get('ArchStdEvent')
315 if precise and self.desc and '(Precise Event)' not in self.desc:
316 extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise '
318 event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}'
320 ('AnyThread', 'any='),
321 ('PortMask', 'ch_mask='),
322 ('CounterMask', 'cmask='),
323 ('EdgeDetect', 'edge='),
324 ('FCMask', 'fc_mask='),
326 ('SampleAfterValue', 'period='),
329 for key, value in event_fields:
330 if key in jd and jd[key] != '0':
331 event += ',' + value + jd[key]
333 event += f',{filter}'
335 event += f',{msr}{msrval}'
336 if self.desc and extra_desc:
337 self.desc += extra_desc
338 if self.long_desc and extra_desc:
339 self.long_desc += extra_desc
341 if self.desc and not self.desc.endswith('. '):
343 self.desc = (self.desc if self.desc else '') + ('Unit: ' + self.pmu + ' ')
344 if arch_std and arch_std.lower() in _arch_std_events:
345 event = _arch_std_events[arch_std.lower()].event
346 # Copy from the architecture standard event to self for undefined fields.
347 for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
348 if hasattr(self, attr) and not getattr(self, attr):
349 setattr(self, attr, value)
351 self.event = real_event(self.name, event)
353 def __repr__(self) -> str:
354 """String representation primarily for debugging."""
356 for attr, value in self.__dict__.items():
358 s += f'\t{attr} = {value},\n'
361 def build_c_string(self, metric: bool) -> str:
363 for attr in _json_metric_attributes if metric else _json_event_attributes:
364 x = getattr(self, attr)
365 if metric and x and attr == 'metric_expr':
366 # Convert parsed metric expressions into a string. Slashes
367 # must be doubled in the file.
368 x = x.ToPerfJson().replace('\\', '\\\\')
369 if metric and x and attr == 'metric_threshold':
370 x = x.replace('\\', '\\\\')
371 if attr in _json_enum_attributes:
374 s += f'{x}\\000' if x else '\\000'
377 def to_c_string(self, metric: bool) -> str:
378 """Representation of the event as a C struct initializer."""
380 s = self.build_c_string(metric)
381 return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n'
384 @lru_cache(maxsize=None)
385 def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]:
386 """Read json events from the specified file."""
388 events = json.load(open(path), object_hook=JsonEvent)
389 except BaseException as err:
390 print(f"Exception processing {path}")
392 metrics: list[Tuple[str, metric.Expression]] = []
395 if event.metric_name and '-' not in event.metric_name:
396 metrics.append((event.metric_name, event.metric_expr))
397 updates = metric.RewriteMetricsInTermsOfOthers(metrics)
400 if event.metric_name in updates:
401 # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n'
402 # f'to\n"{updates[event.metric_name]}"')
403 event.metric_expr = updates[event.metric_name]
407 def preprocess_arch_std_files(archpath: str) -> None:
408 """Read in all architecture standard events."""
409 global _arch_std_events
410 for item in os.scandir(archpath):
411 if item.is_file() and item.name.endswith('.json'):
412 for event in read_json_events(item.path, topic=''):
414 _arch_std_events[event.name.lower()] = event
415 if event.metric_name:
416 _arch_std_events[event.metric_name.lower()] = event
419 def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
420 """Add contents of file to _pending_events table."""
421 for e in read_json_events(item.path, topic):
423 _pending_events.append(e)
425 _pending_metrics.append(e)
428 def print_pending_events() -> None:
429 """Optionally close events table."""
431 def event_cmp_key(j: JsonEvent) -> Tuple[bool, str, str, str, str]:
432 def fix_none(s: Optional[str]) -> str:
437 return (j.desc is not None, fix_none(j.topic), fix_none(j.name), fix_none(j.pmu),
438 fix_none(j.metric_name))
440 global _pending_events
441 if not _pending_events:
444 global _pending_events_tblname
445 if _pending_events_tblname.endswith('_sys'):
446 global _sys_event_tables
447 _sys_event_tables.append(_pending_events_tblname)
450 _event_tables.append(_pending_events_tblname)
452 _args.output_file.write(
453 f'static const struct compact_pmu_event {_pending_events_tblname}[] = {{\n')
455 for event in sorted(_pending_events, key=event_cmp_key):
456 _args.output_file.write(event.to_c_string(metric=False))
459 _args.output_file.write('};\n\n')
461 def print_pending_metrics() -> None:
462 """Optionally close metrics table."""
464 def metric_cmp_key(j: JsonEvent) -> Tuple[bool, str, str]:
465 def fix_none(s: Optional[str]) -> str:
470 return (j.desc is not None, fix_none(j.pmu), fix_none(j.metric_name))
472 global _pending_metrics
473 if not _pending_metrics:
476 global _pending_metrics_tblname
477 if _pending_metrics_tblname.endswith('_sys'):
478 global _sys_metric_tables
479 _sys_metric_tables.append(_pending_metrics_tblname)
482 _metric_tables.append(_pending_metrics_tblname)
484 _args.output_file.write(
485 f'static const struct compact_pmu_event {_pending_metrics_tblname}[] = {{\n')
487 for metric in sorted(_pending_metrics, key=metric_cmp_key):
488 _args.output_file.write(metric.to_c_string(metric=True))
489 _pending_metrics = []
491 _args.output_file.write('};\n\n')
493 def get_topic(topic: str) -> str:
494 if topic.endswith('metrics.json'):
496 return removesuffix(topic, '.json').replace('-', ' ')
498 def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
503 # base dir or too deep
505 if level == 0 or level > 4:
508 # Ignore other directories. If the file name does not have a .json
509 # extension, ignore it. It could be a readme.txt for instance.
510 if not item.is_file() or not item.name.endswith('.json'):
513 topic = get_topic(item.name)
514 for event in read_json_events(item.path, topic):
516 _bcs.add(event.build_c_string(metric=False))
517 if event.metric_name:
518 _bcs.add(event.build_c_string(metric=True))
520 def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
521 """Process a JSON file during the main walk."""
522 def is_leaf_dir(path: str) -> bool:
523 for item in os.scandir(path):
528 # model directory, reset topic
529 if item.is_dir() and is_leaf_dir(item.path):
530 print_pending_events()
531 print_pending_metrics()
533 global _pending_events_tblname
534 _pending_events_tblname = file_name_to_table_name('pmu_events_', parents, item.name)
535 global _pending_metrics_tblname
536 _pending_metrics_tblname = file_name_to_table_name('pmu_metrics_', parents, item.name)
538 if item.name == 'sys':
539 _sys_event_table_to_metric_table_mapping[_pending_events_tblname] = _pending_metrics_tblname
542 # base dir or too deep
544 if level == 0 or level > 4:
547 # Ignore other directories. If the file name does not have a .json
548 # extension, ignore it. It could be a readme.txt for instance.
549 if not item.is_file() or not item.name.endswith('.json'):
552 add_events_table_entries(item, get_topic(item.name))
555 def print_mapping_table(archs: Sequence[str]) -> None:
556 """Read the mapfile and generate the struct from cpuid string to event table."""
557 _args.output_file.write("""
558 /* Struct used to make the PMU event table implementation opaque to callers. */
559 struct pmu_events_table {
560 const struct compact_pmu_event *entries;
564 /* Struct used to make the PMU metric table implementation opaque to callers. */
565 struct pmu_metrics_table {
566 const struct compact_pmu_event *entries;
571 * Map a CPU to its table of PMU events. The CPU is identified by the
572 * cpuid field, which is an arch-specific identifier for the CPU.
573 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
574 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
576 * The cpuid can contain any character other than the comma.
578 struct pmu_events_map {
581 struct pmu_events_table event_table;
582 struct pmu_metrics_table metric_table;
586 * Global table mapping each known CPU for the architecture to its
587 * table of PMU events.
589 const struct pmu_events_map pmu_events_map[] = {
593 _args.output_file.write("""{
594 \t.arch = "testarch",
595 \t.cpuid = "testcpu",
597 \t\t.entries = pmu_events__test_soc_cpu,
598 \t\t.length = ARRAY_SIZE(pmu_events__test_soc_cpu),
601 \t\t.entries = pmu_metrics__test_soc_cpu,
602 \t\t.length = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
607 with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile:
608 table = csv.reader(csvfile)
611 # Skip the first row or any row beginning with #.
612 if not first and len(row) > 0 and not row[0].startswith('#'):
613 event_tblname = file_name_to_table_name('pmu_events_', [], row[2].replace('/', '_'))
614 if event_tblname in _event_tables:
615 event_size = f'ARRAY_SIZE({event_tblname})'
617 event_tblname = 'NULL'
619 metric_tblname = file_name_to_table_name('pmu_metrics_', [], row[2].replace('/', '_'))
620 if metric_tblname in _metric_tables:
621 metric_size = f'ARRAY_SIZE({metric_tblname})'
623 metric_tblname = 'NULL'
625 if event_size == '0' and metric_size == '0':
627 cpuid = row[0].replace('\\', '\\\\')
628 _args.output_file.write(f"""{{
630 \t.cpuid = "{cpuid}",
632 \t\t.entries = {event_tblname},
633 \t\t.length = {event_size}
636 \t\t.entries = {metric_tblname},
637 \t\t.length = {metric_size}
643 _args.output_file.write("""{
646 \t.event_table = { 0, 0 },
647 \t.metric_table = { 0, 0 },
653 def print_system_mapping_table() -> None:
654 """C struct mapping table array for tables from /sys directories."""
655 _args.output_file.write("""
656 struct pmu_sys_events {
658 \tstruct pmu_events_table event_table;
659 \tstruct pmu_metrics_table metric_table;
662 static const struct pmu_sys_events pmu_sys_event_tables[] = {
664 printed_metric_tables = []
665 for tblname in _sys_event_tables:
666 _args.output_file.write(f"""\t{{
667 \t\t.event_table = {{
668 \t\t\t.entries = {tblname},
669 \t\t\t.length = ARRAY_SIZE({tblname})
671 metric_tblname = _sys_event_table_to_metric_table_mapping[tblname]
672 if metric_tblname in _sys_metric_tables:
673 _args.output_file.write(f"""
674 \t\t.metric_table = {{
675 \t\t\t.entries = {metric_tblname},
676 \t\t\t.length = ARRAY_SIZE({metric_tblname})
678 printed_metric_tables.append(metric_tblname)
679 _args.output_file.write(f"""
680 \t\t.name = \"{tblname}\",
683 for tblname in _sys_metric_tables:
684 if tblname in printed_metric_tables:
686 _args.output_file.write(f"""\t{{
687 \t\t.metric_table = {{
688 \t\t\t.entries = {tblname},
689 \t\t\t.length = ARRAY_SIZE({tblname})
691 \t\t.name = \"{tblname}\",
694 _args.output_file.write("""\t{
695 \t\t.event_table = { 0, 0 },
696 \t\t.metric_table = { 0, 0 },
700 static void decompress_event(int offset, struct pmu_event *pe)
702 \tconst char *p = &big_c_string[offset];
704 for attr in _json_event_attributes:
705 _args.output_file.write(f'\n\tpe->{attr} = ')
706 if attr in _json_enum_attributes:
707 _args.output_file.write("*p - '0';\n")
709 _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
710 if attr == _json_event_attributes[-1]:
712 if attr in _json_enum_attributes:
713 _args.output_file.write('\tp++;')
715 _args.output_file.write('\twhile (*p++);')
716 _args.output_file.write("""}
718 static void decompress_metric(int offset, struct pmu_metric *pm)
720 \tconst char *p = &big_c_string[offset];
722 for attr in _json_metric_attributes:
723 _args.output_file.write(f'\n\tpm->{attr} = ')
724 if attr in _json_enum_attributes:
725 _args.output_file.write("*p - '0';\n")
727 _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
728 if attr == _json_metric_attributes[-1]:
730 if attr in _json_enum_attributes:
731 _args.output_file.write('\tp++;')
733 _args.output_file.write('\twhile (*p++);')
734 _args.output_file.write("""}
736 int pmu_events_table_for_each_event(const struct pmu_events_table *table,
737 pmu_event_iter_fn fn,
740 for (size_t i = 0; i < table->length; i++) {
744 decompress_event(table->entries[i].offset, &pe);
747 ret = fn(&pe, table, data);
754 int pmu_metrics_table_for_each_metric(const struct pmu_metrics_table *table,
755 pmu_metric_iter_fn fn,
758 for (size_t i = 0; i < table->length; i++) {
759 struct pmu_metric pm;
762 decompress_metric(table->entries[i].offset, &pm);
765 ret = fn(&pm, table, data);
772 const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
774 const struct pmu_events_table *table = NULL;
775 char *cpuid = perf_pmu__getcpuid(pmu);
778 /* on some platforms which uses cpus map, cpuid can be NULL for
779 * PMUs other than CORE PMUs.
786 const struct pmu_events_map *map = &pmu_events_map[i++];
790 if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
791 table = &map->event_table;
799 const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu)
801 const struct pmu_metrics_table *table = NULL;
802 char *cpuid = perf_pmu__getcpuid(pmu);
805 /* on some platforms which uses cpus map, cpuid can be NULL for
806 * PMUs other than CORE PMUs.
813 const struct pmu_events_map *map = &pmu_events_map[i++];
817 if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
818 table = &map->metric_table;
826 const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
828 for (const struct pmu_events_map *tables = &pmu_events_map[0];
831 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
832 return &tables->event_table;
837 const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
839 for (const struct pmu_events_map *tables = &pmu_events_map[0];
842 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
843 return &tables->metric_table;
848 int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
850 for (const struct pmu_events_map *tables = &pmu_events_map[0];
853 int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data);
861 int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
863 for (const struct pmu_events_map *tables = &pmu_events_map[0];
866 int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
874 const struct pmu_events_table *find_sys_events_table(const char *name)
876 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
879 if (!strcmp(tables->name, name))
880 return &tables->event_table;
885 int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
887 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
890 int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data);
898 int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data)
900 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
903 int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
916 def dir_path(path: str) -> str:
917 """Validate path is a directory for argparse."""
918 if os.path.isdir(path):
920 raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory')
922 def ftw(path: str, parents: Sequence[str],
923 action: Callable[[Sequence[str], os.DirEntry], None]) -> None:
924 """Replicate the directory/file walking behavior of C's file tree walk."""
925 for item in sorted(os.scandir(path), key=lambda e: e.name):
926 if _args.model != 'all' and item.is_dir():
927 # Check if the model matches one in _args.model.
928 if len(parents) == _args.model.split(',')[0].count('/'):
929 # We're testing the correct directory.
930 item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name
931 if 'test' not in item_path and item_path not in _args.model.split(','):
933 action(parents, item)
935 ftw(item.path, parents + [item.name], action)
937 ap = argparse.ArgumentParser()
938 ap.add_argument('arch', help='Architecture name like x86')
939 ap.add_argument('model', help='''Select a model such as skylake to
940 reduce the code size. Normally set to "all". For architectures like
941 ARM64 with an implementor/model, the model must include the implementor
942 such as "arm/cortex-a34".''',
947 help='Root of tree containing architecture directories containing json files'
950 'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout)
951 _args = ap.parse_args()
953 _args.output_file.write("""
954 #include "pmu-events/pmu-events.h"
955 #include "util/header.h"
956 #include "util/pmu.h"
960 struct compact_pmu_event {
966 for item in os.scandir(_args.starting_dir):
967 if not item.is_dir():
969 if item.name == _args.arch or _args.arch == 'all' or item.name == 'test':
970 archs.append(item.name)
973 raise IOError(f'Missing architecture directory \'{_args.arch}\'')
977 arch_path = f'{_args.starting_dir}/{arch}'
978 preprocess_arch_std_files(arch_path)
979 ftw(arch_path, [], preprocess_one_file)
982 _args.output_file.write('static const char *const big_c_string =\n')
983 for s in _bcs.big_string:
984 _args.output_file.write(s)
985 _args.output_file.write(';\n\n')
987 arch_path = f'{_args.starting_dir}/{arch}'
988 ftw(arch_path, [], process_one_file)
989 print_pending_events()
990 print_pending_metrics()
992 print_mapping_table(archs)
993 print_system_mapping_table()
996 if __name__ == '__main__':