# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
-# For help see docs/tracing.txt
+# For help see docs/devel/tracing.txt
+from __future__ import print_function
import struct
import re
import inspect
rec = (name, rechdr[1], rechdr[3])
try:
event = edict[name]
- except KeyError, e:
+ except KeyError as e:
import sys
sys.stderr.write('%s event is logged but is not declared ' \
'in the trace events file, try using ' \
def get_mapping(fobj):
(event_id, ) = struct.unpack('=Q', fobj.read(8))
(len, ) = struct.unpack('=L', fobj.read(4))
- name = fobj.read(len)
+ name = fobj.read(len).decode()
return (event_id, name)
raise ValueError('Log format %d not supported with this QEMU release!'
% log_version)
-def read_trace_records(edict, fobj):
- """Deserialize trace records from a file, yielding record tuples (event_num, timestamp, pid, arg1, ..., arg6)."""
- idtoname = {
- dropped_event_id: "dropped"
- }
+def read_trace_records(edict, idtoname, fobj):
+ """Deserialize trace records from a file, yielding record tuples (event_num, timestamp, pid, arg1, ..., arg6).
+
+ Note that `idtoname` is modified if the file contains mapping records.
+
+ Args:
+ edict (str -> Event): events dict, indexed by name
+ idtoname (int -> str): event names dict, indexed by event ID
+ fobj (file): input file
+
+ """
while True:
t = fobj.read(8)
if len(t) == 0:
def process(events, log, analyzer, read_header=True):
"""Invoke an analyzer on each event in a log."""
if isinstance(events, str):
- events = read_events(open(events, 'r'))
+ events = read_events(open(events, 'r'), events)
if isinstance(log, str):
log = open(log, 'rb')
dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)")
edict = {"dropped": dropped_event}
+ idtoname = {dropped_event_id: "dropped"}
for event in events:
edict[event.name] = event
+ # If there is no header assume event ID mapping matches events list
+ if not read_header:
+ for event_id, event in enumerate(events):
+ idtoname[event_id] = event.name
+
def build_fn(analyzer, event):
if isinstance(event, str):
return analyzer.catchall
fn_argcount = len(inspect.getargspec(fn)[0]) - 1
if fn_argcount == event_argcount + 1:
# Include timestamp as first argument
- return lambda _, rec: fn(*((rec[1:2],) + rec[3:3 + event_argcount]))
+ return lambda _, rec: fn(*(rec[1:2] + rec[3:3 + event_argcount]))
elif fn_argcount == event_argcount + 2:
# Include timestamp and pid
return lambda _, rec: fn(*rec[1:3 + event_argcount])
analyzer.begin()
fn_cache = {}
- for rec in read_trace_records(edict, log):
+ for rec in read_trace_records(edict, idtoname, log):
event_num = rec[0]
event = edict[event_num]
if event_num not in fn_cache:
'<trace-file>\n' % sys.argv[0])
sys.exit(1)
- events = read_events(open(sys.argv[1], 'r'))
+ events = read_events(open(sys.argv[1], 'r'), sys.argv[1])
process(events, sys.argv[2], analyzer, read_header=read_header)
if __name__ == '__main__':
else:
fields.append('%s=0x%x' % (name, rec[i]))
i += 1
- print ' '.join(fields)
+ print(' '.join(fields))
run(Formatter())