mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
apptrace: Adds support for converting SystemView traces to JSON format
This commit is contained in:
parent
9b06b5cdf9
commit
884717fb2f
@ -35,6 +35,7 @@ inline static uint32_t get_ccount(void)
|
||||
} \
|
||||
callers[N] = __builtin_return_address(N+STACK_OFFSET); \
|
||||
if (!esp_ptr_executable(callers[N])) { \
|
||||
callers[N] = 0; \
|
||||
return; \
|
||||
} \
|
||||
} while(0)
|
||||
|
@ -284,7 +284,7 @@ Using this file GDB will connect to the target, reset it, and start tracing when
|
||||
|
||||
5. Quit GDB when program stops at :cpp:func:`heap_trace_stop`. Trace data are saved in ``/tmp/heap.svdat``
|
||||
|
||||
6. Run processing script ``$IDF_PATH/tools/esp_app_trace/sysviewtrace_proc.py /tmp/heap_log.svdat </path/to/program/elf>``
|
||||
6. Run processing script ``$IDF_PATH/tools/esp_app_trace/sysviewtrace_proc.py -p -b </path/to/program/elf> /tmp/heap_log.svdat``
|
||||
|
||||
The output from the heap trace will look something like this::
|
||||
|
||||
|
@ -61,6 +61,7 @@ tools/elf_to_ld.sh
|
||||
tools/esp_app_trace/logtrace_proc.py
|
||||
tools/esp_app_trace/sysviewtrace_proc.py
|
||||
tools/esp_app_trace/test/logtrace/test.sh
|
||||
tools/esp_app_trace/test/sysview/compare_json.py
|
||||
tools/esp_app_trace/test/sysview/test.sh
|
||||
tools/find_apps.py
|
||||
tools/format.sh
|
||||
|
@ -1,3 +1,4 @@
|
||||
import sys
|
||||
import os
|
||||
try:
|
||||
from urlparse import urlparse
|
||||
@ -16,6 +17,13 @@ import elftools.elf.elffile as elffile
|
||||
import elftools.elf.constants as elfconst
|
||||
|
||||
|
||||
def clock():
|
||||
if sys.version_info >= (3, 3):
|
||||
return time.process_time()
|
||||
else:
|
||||
return time.clock()
|
||||
|
||||
|
||||
def addr2line(toolchain, elf_path, addr):
|
||||
"""
|
||||
Creates trace reader.
|
||||
@ -170,12 +178,12 @@ class FileReader(Reader):
|
||||
see Reader.read()
|
||||
"""
|
||||
data = b''
|
||||
start_tm = time.clock()
|
||||
start_tm = clock()
|
||||
while not self.need_stop:
|
||||
data += self.trace_file.read(sz - len(data))
|
||||
if len(data) == sz:
|
||||
break
|
||||
if self.timeout != -1 and time.clock() >= start_tm + self.timeout:
|
||||
if self.timeout != -1 and clock() >= start_tm + self.timeout:
|
||||
raise ReaderTimeoutError(self.timeout, sz)
|
||||
if self.need_stop:
|
||||
raise ReaderShutdownRequest()
|
||||
@ -197,12 +205,12 @@ class FileReader(Reader):
|
||||
see Reader.read()
|
||||
"""
|
||||
line = ''
|
||||
start_tm = time.clock()
|
||||
start_tm = clock()
|
||||
while not self.need_stop:
|
||||
line += self.trace_file.readline().decode("utf-8")
|
||||
if line.endswith(linesep):
|
||||
break
|
||||
if self.timeout != -1 and time.clock() >= start_tm + self.timeout:
|
||||
if self.timeout != -1 and clock() >= start_tm + self.timeout:
|
||||
raise ReaderTimeoutError(self.timeout, 1)
|
||||
if self.need_stop:
|
||||
raise ReaderShutdownRequest()
|
||||
@ -213,12 +221,12 @@ class FileReader(Reader):
|
||||
see Reader.read()
|
||||
"""
|
||||
cur_pos = self.trace_file.tell()
|
||||
start_tm = time.clock()
|
||||
start_tm = clock()
|
||||
while not self.need_stop:
|
||||
file_sz = os.path.getsize(self.trace_file_path)
|
||||
if file_sz - cur_pos >= sz:
|
||||
break
|
||||
if self.timeout != -1 and time.clock() >= start_tm + self.timeout:
|
||||
if self.timeout != -1 and clock() >= start_tm + self.timeout:
|
||||
raise ReaderTimeoutError(self.timeout, sz)
|
||||
if self.need_stop:
|
||||
raise ReaderShutdownRequest()
|
||||
@ -351,6 +359,34 @@ def reader_create(trc_src, tmo):
|
||||
return None
|
||||
|
||||
|
||||
class TraceEvent:
|
||||
"""
|
||||
Base class for all trace events.
|
||||
"""
|
||||
def __init__(self, name, core_id, evt_id):
|
||||
self.name = name
|
||||
self.ctx_name = 'None'
|
||||
self.in_irq = False
|
||||
self.core_id = core_id
|
||||
self.id = evt_id
|
||||
self.ts = 0
|
||||
self.params = {}
|
||||
|
||||
@property
|
||||
def ctx_desc(self):
|
||||
if self.in_irq:
|
||||
return 'IRQ "%s"' % self.ctx_name
|
||||
return 'task "%s"' % self.ctx_name
|
||||
|
||||
def to_jsonable(self):
|
||||
res = self.__dict__
|
||||
params = {}
|
||||
for p in self.params:
|
||||
params.update(self.params[p].to_jsonable())
|
||||
res['params'] = params
|
||||
return res
|
||||
|
||||
|
||||
class TraceDataProcessor:
|
||||
"""
|
||||
Base abstract class for all trace data processors.
|
||||
@ -593,66 +629,58 @@ class HeapTraceEvent:
|
||||
"""
|
||||
Heap trace event.
|
||||
"""
|
||||
def __init__(self, ctx_name, in_irq, core_id, ts, alloc, size, addr, callers, toolchain='', elf_path=''):
|
||||
def __init__(self, trace_event, alloc, toolchain='', elf_path=''):
|
||||
"""
|
||||
Constructor.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ctx_name : string
|
||||
name of event context (task or IRQ name)
|
||||
in_irq : bool
|
||||
True if event has been generated in IRQ context, otherwise False
|
||||
core_id : int
|
||||
core which generated the event
|
||||
ts : float
|
||||
event timestamp
|
||||
sys_view_event : TraceEvent
|
||||
trace event object related to this heap event
|
||||
alloc : bool
|
||||
True for allocation event, otherwise False
|
||||
size : int
|
||||
size of allocation; has no meaning for de-allocation event
|
||||
addr : int
|
||||
address of allocation/de-allocation
|
||||
callers : list
|
||||
list of callers (callstack) for event
|
||||
toolchain_pref : string
|
||||
toolchain prefix to retrieve source line locations using addresses
|
||||
elf_path : string
|
||||
path to ELF file to retrieve format strings for log messages
|
||||
"""
|
||||
self.ctx_name = ctx_name
|
||||
self.in_irq = in_irq
|
||||
self.core_id = core_id
|
||||
self.ts = ts
|
||||
self.trace_event = trace_event
|
||||
self.alloc = alloc
|
||||
self.size = size
|
||||
self.addr = addr
|
||||
self.callers = callers
|
||||
self.toolchain = toolchain
|
||||
self.elf_path = elf_path
|
||||
if self.alloc:
|
||||
self.size = self.trace_event.params['size'].value
|
||||
else:
|
||||
self.size = None
|
||||
|
||||
@property
|
||||
def addr(self):
|
||||
return self.trace_event.params['addr'].value
|
||||
|
||||
def __repr__(self):
|
||||
if len(self.toolchain) and len(self.elf_path):
|
||||
callers = os.linesep
|
||||
for addr in self.callers:
|
||||
for addr in self.trace_event.params['callers'].value:
|
||||
if addr == 0:
|
||||
break
|
||||
callers += '{}'.format(addr2line(self.toolchain, self.elf_path, addr))
|
||||
else:
|
||||
callers = ''
|
||||
for addr in self.callers:
|
||||
for addr in self.trace_event.params['callers'].value:
|
||||
if addr == 0:
|
||||
break
|
||||
if len(callers):
|
||||
callers += ':'
|
||||
callers += '0x{:x}'.format(addr)
|
||||
if self.in_irq:
|
||||
ctx_desc = 'IRQ "%s"' % self.ctx_name
|
||||
else:
|
||||
ctx_desc = 'task "%s"' % self.ctx_name
|
||||
if self.alloc:
|
||||
return "[{:.9f}] HEAP: Allocated {:d} bytes @ 0x{:x} from {} on core {:d} by: {}".format(self.ts, self.size,
|
||||
self.addr, ctx_desc,
|
||||
self.core_id, callers)
|
||||
return "[{:.9f}] HEAP: Allocated {:d} bytes @ 0x{:x} from {} on core {:d} by: {}".format(self.trace_event.ts,
|
||||
self.size, self.addr,
|
||||
self.trace_event.ctx_desc,
|
||||
self.trace_event.core_id, callers)
|
||||
else:
|
||||
return "[{:.9f}] HEAP: Freed bytes @ 0x{:x} from {} on core {:d} by: {}".format(self.ts, self.addr, ctx_desc,
|
||||
self.core_id, callers)
|
||||
return "[{:.9f}] HEAP: Freed bytes @ 0x{:x} from {} on core {:d} by: {}".format(self.trace_event.ts,
|
||||
self.addr, self.trace_event.ctx_desc,
|
||||
self.trace_event.core_id, callers)
|
||||
|
||||
|
||||
class BaseHeapTraceDataProcessorImpl:
|
||||
|
@ -1,6 +1,7 @@
|
||||
import re
|
||||
import struct
|
||||
import copy
|
||||
import json
|
||||
import espytrace.apptrace as apptrace
|
||||
|
||||
|
||||
@ -40,6 +41,38 @@ SYSVIEW_MODULE_EVENT_OFFSET = 512
|
||||
|
||||
SYSVIEW_SYNC_LEN = 10
|
||||
|
||||
_sysview_events_map = {
|
||||
"SYS_NOP": SYSVIEW_EVTID_NOP,
|
||||
"SYS_OVERFLOW": SYSVIEW_EVTID_OVERFLOW,
|
||||
"SYS_ISR_ENTER": SYSVIEW_EVTID_ISR_ENTER,
|
||||
"SYS_ISR_EXIT": SYSVIEW_EVTID_ISR_EXIT,
|
||||
"SYS_TASK_START_EXEC": SYSVIEW_EVTID_TASK_START_EXEC,
|
||||
"SYS_TASK_STOP_EXEC": SYSVIEW_EVTID_TASK_STOP_EXEC,
|
||||
"SYS_TASK_START_READY": SYSVIEW_EVTID_TASK_START_READY,
|
||||
"SYS_TASK_STOP_READY": SYSVIEW_EVTID_TASK_STOP_READY,
|
||||
"SYS_TASK_CREATE": SYSVIEW_EVTID_TASK_CREATE,
|
||||
"SYS_TASK_INFO": SYSVIEW_EVTID_TASK_INFO,
|
||||
"SYS_TRACE_START": SYSVIEW_EVTID_TRACE_START,
|
||||
"SYS_TRACE_STOP": SYSVIEW_EVTID_TRACE_STOP,
|
||||
"SYS_SYSTIME_CYCLES": SYSVIEW_EVTID_SYSTIME_CYCLES,
|
||||
"SYS_SYSTIME_US": SYSVIEW_EVTID_SYSTIME_US,
|
||||
"SYS_SYSDESC": SYSVIEW_EVTID_SYSDESC,
|
||||
"SYS_USER_START": SYSVIEW_EVTID_USER_START,
|
||||
"SYS_USER_STOP": SYSVIEW_EVTID_USER_STOP,
|
||||
"SYS_IDLE": SYSVIEW_EVTID_IDLE,
|
||||
"SYS_ISR_TO_SCHEDULER": SYSVIEW_EVTID_ISR_TO_SCHEDULER,
|
||||
"SYS_TIMER_ENTER": SYSVIEW_EVTID_TIMER_ENTER,
|
||||
"SYS_TIMER_EXIT": SYSVIEW_EVTID_TIMER_EXIT,
|
||||
"SYS_STACK_INFO": SYSVIEW_EVTID_STACK_INFO,
|
||||
"SYS_MODULEDESC": SYSVIEW_EVTID_INIT,
|
||||
"SYS_INIT": SYSVIEW_EVTID_INIT,
|
||||
"SYS_NAME_RESOURCE": SYSVIEW_EVTID_NAME_RESOURCE,
|
||||
"SYS_PRINT_FORMATTED": SYSVIEW_EVTID_PRINT_FORMATTED,
|
||||
"SYS_NUMMODULES": SYSVIEW_EVTID_NUMMODULES
|
||||
}
|
||||
|
||||
_os_events_map = {}
|
||||
|
||||
|
||||
def parse_trace(reader, parser, os_evt_map_file=''):
|
||||
"""
|
||||
@ -54,12 +87,13 @@ def parse_trace(reader, parser, os_evt_map_file=''):
|
||||
os_evt_map_file : string
|
||||
Path to file containg events format description.
|
||||
"""
|
||||
global _os_events_map
|
||||
# parse OS events formats file
|
||||
os_evt_map = _read_events_map(os_evt_map_file)
|
||||
_os_events_map = _read_events_map(os_evt_map_file)
|
||||
_read_file_header(reader)
|
||||
_read_init_seq(reader)
|
||||
while True:
|
||||
event = parser.read_event(reader, os_evt_map)
|
||||
event = parser.read_event(reader, _os_events_map)
|
||||
parser.on_new_event(event)
|
||||
|
||||
|
||||
@ -267,7 +301,7 @@ class SysViewTraceParseError(apptrace.ParseError):
|
||||
pass
|
||||
|
||||
|
||||
class SysViewEvent:
|
||||
class SysViewEvent(apptrace.TraceEvent):
|
||||
"""
|
||||
Generic SystemView event class. This is a base class for all events.
|
||||
"""
|
||||
@ -286,11 +320,8 @@ class SysViewEvent:
|
||||
events_fmt_map : dict
|
||||
see return value of _read_events_map()
|
||||
"""
|
||||
self.name = 'SysViewEvent'
|
||||
self.core_id = core_id
|
||||
self.id = evt_id
|
||||
apptrace.TraceEvent.__init__(self, 'SysViewEvent', core_id, evt_id)
|
||||
self.plen = 0
|
||||
self.params = {}
|
||||
if self.id >= SYSVIEW_EVENT_ID_PREDEF_LEN_MAX:
|
||||
self.plen = _decode_plen(reader)
|
||||
if events_fmt_map:
|
||||
@ -385,6 +416,9 @@ class SysViewEventParam:
|
||||
def __str__(self):
|
||||
return '{}: {}'.format(self.name, self.value)
|
||||
|
||||
def to_jsonable(self):
|
||||
return {self.name: self.value}
|
||||
|
||||
|
||||
class SysViewEventParamSimple(SysViewEventParam):
|
||||
"""
|
||||
@ -477,7 +511,7 @@ class SysViewPredefinedEvent(SysViewEvent):
|
||||
SYSVIEW_EVTID_NAME_RESOURCE: ('svNameResource', [SysViewEventParamSimple('res_id', _decode_u32),
|
||||
SysViewEventParamSimple('name', _decode_str)]),
|
||||
SYSVIEW_EVTID_PRINT_FORMATTED: ('svPrint', [SysViewEventParamSimple('msg', _decode_str),
|
||||
SysViewEventParamSimple('id', _decode_u32),
|
||||
SysViewEventParamSimple('lvl', _decode_u32),
|
||||
SysViewEventParamSimple('unused', _decode_u32)]),
|
||||
SYSVIEW_EVTID_NUMMODULES: ('svNumModules', [SysViewEventParamSimple('mod_cnt', _decode_u32)]),
|
||||
}
|
||||
@ -486,8 +520,8 @@ class SysViewPredefinedEvent(SysViewEvent):
|
||||
"""
|
||||
see SysViewEvent.__init__()
|
||||
"""
|
||||
self.name = 'SysViewPredefinedEvent'
|
||||
SysViewEvent.__init__(self, evt_id, reader, core_id, self._predef_events_fmt)
|
||||
# self.name = 'SysViewPredefinedEvent'
|
||||
|
||||
|
||||
class SysViewOSEvent(SysViewEvent):
|
||||
@ -498,8 +532,8 @@ class SysViewOSEvent(SysViewEvent):
|
||||
"""
|
||||
see SysViewEvent.__init__()
|
||||
"""
|
||||
self.name = 'SysViewOSEvent'
|
||||
SysViewEvent.__init__(self, evt_id, reader, core_id, events_fmt_map)
|
||||
# self.name = 'SysViewOSEvent'
|
||||
|
||||
|
||||
class SysViewHeapEvent(SysViewEvent):
|
||||
@ -534,11 +568,11 @@ class SysViewHeapEvent(SysViewEvent):
|
||||
core_id : int
|
||||
see SysViewEvent.__init__()
|
||||
"""
|
||||
self.name = 'SysViewHeapEvent'
|
||||
cur_events_map = {}
|
||||
for id in self.events_fmt:
|
||||
cur_events_map[events_off + id] = self.events_fmt[id]
|
||||
SysViewEvent.__init__(self, evt_id, reader, core_id, cur_events_map)
|
||||
# self.name = 'SysViewHeapEvent'
|
||||
|
||||
|
||||
class SysViewTraceDataParser(apptrace.TraceDataProcessor):
|
||||
@ -569,7 +603,7 @@ class SysViewTraceDataParser(apptrace.TraceDataProcessor):
|
||||
core_id : int
|
||||
id of the core this parser object relates to.
|
||||
"""
|
||||
apptrace.TraceDataProcessor.__init__(self, print_events, keep_all_events=True)
|
||||
apptrace.TraceDataProcessor.__init__(self, print_events=print_events, keep_all_events=True)
|
||||
self.sys_info = None
|
||||
self._last_ts = 0
|
||||
self.irqs_info = {}
|
||||
@ -670,6 +704,9 @@ class SysViewTraceDataParser(apptrace.TraceDataProcessor):
|
||||
else:
|
||||
return self.read_extension_event(evt_id, reader)
|
||||
|
||||
def event_supported(self, event):
|
||||
return False
|
||||
|
||||
def on_new_event(self, event):
|
||||
"""
|
||||
Does essential processing of event. Must be called for every read event.
|
||||
@ -701,6 +738,27 @@ class SysViewTraceDataParser(apptrace.TraceDataProcessor):
|
||||
apptrace.TraceDataProcessor.on_new_event(self, event)
|
||||
|
||||
|
||||
class SysViewTraceDataExtEventParser(SysViewTraceDataParser):
|
||||
def __init__(self, events_num, print_events=False, core_id=0):
|
||||
"""
|
||||
Constructor.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
print_events : bool
|
||||
see apptrace.TraceDataProcessor.__init__()
|
||||
core_id : int
|
||||
id of the core this parser object relates to.
|
||||
"""
|
||||
SysViewTraceDataParser.__init__(self, core_id=core_id, print_events=print_events)
|
||||
self.events_off = 0
|
||||
self.events_num = events_num
|
||||
|
||||
def event_supported(self, event):
|
||||
return False if (self.events_off < SYSVIEW_MODULE_EVENT_OFFSET or event.id < self.events_off or
|
||||
event.id >= (self.events_off + self.events_num)) else True
|
||||
|
||||
|
||||
class SysViewMultiTraceDataParser(SysViewTraceDataParser):
|
||||
"""
|
||||
SystemView trace data parser supporting multiple event streams.
|
||||
@ -794,7 +852,7 @@ class SysViewTraceDataProcessor(apptrace.TraceDataProcessor):
|
||||
"""
|
||||
Base SystemView trace data processor class.
|
||||
"""
|
||||
def __init__(self, traces, print_events=False, keep_all_events=False):
|
||||
def __init__(self, traces, root_proc=None, print_events=False, keep_all_events=False):
|
||||
"""
|
||||
Constructor.
|
||||
|
||||
@ -808,16 +866,20 @@ class SysViewTraceDataProcessor(apptrace.TraceDataProcessor):
|
||||
see apptrace.TraceDataProcessor.__init__()
|
||||
"""
|
||||
apptrace.TraceDataProcessor.__init__(self, print_events, keep_all_events)
|
||||
self.event_ids = {}
|
||||
self.name = ""
|
||||
self.root_proc = root_proc if root_proc else self
|
||||
self.traces = {}
|
||||
self.ctx_stack = {}
|
||||
self.prev_ctx = {}
|
||||
self.no_ctx_events = []
|
||||
for t in traces:
|
||||
self.traces[t.core_id] = t
|
||||
# current context item is a tuple of task ID or IRQ num and 'in_irq' flag
|
||||
# empty list means IDLE context or self.start_ctx
|
||||
self.ctx_stack[t.core_id] = []
|
||||
# context is undefined, we do not know have we started the tracing in task/IDLE or IRQ context
|
||||
# there are three scenarious when we can start tracing: when core is in task, IDLE task or IRQ context
|
||||
# in general there are three scenarious when we can start tracing: when core is in task, IDLE task or IRQ context
|
||||
self.prev_ctx[t.core_id] = None
|
||||
|
||||
def _get_curr_context(self, core_id):
|
||||
@ -878,13 +940,25 @@ class SysViewTraceDataProcessor(apptrace.TraceDataProcessor):
|
||||
return self.traces[core_id]
|
||||
return self.root_proc.get_trace_stream(core_id, stream_id)
|
||||
|
||||
def event_supported(self, e):
|
||||
"""
|
||||
Should be overriden in child class.
|
||||
"""
|
||||
return False
|
||||
|
||||
def handle_event(self, e):
|
||||
"""
|
||||
Should be overriden in child class.
|
||||
"""
|
||||
pass
|
||||
|
||||
def print_report(self):
|
||||
"""
|
||||
see apptrace.TraceDataProcessor.print_report()
|
||||
"""
|
||||
apptrace.TraceDataProcessor.print_report(self)
|
||||
|
||||
def on_new_event(self, event):
|
||||
def _process_event(self, event):
|
||||
"""
|
||||
Processes event.
|
||||
Keeps track of execution context on every core.
|
||||
@ -952,8 +1026,35 @@ class SysViewTraceDataProcessor(apptrace.TraceDataProcessor):
|
||||
else:
|
||||
# the 1st context switching event after trace start is SYSVIEW_EVTID_TASK_STOP_READY, so we have been in task context
|
||||
self.prev_ctx[event.core_id] = SysViewEventContext(event.params['tid'].value, False, trace.tasks_info[event.params['tid'].value])
|
||||
# count events
|
||||
|
||||
def on_new_event(self, event):
|
||||
"""
|
||||
Processes heap events.
|
||||
"""
|
||||
if self.root_proc == self:
|
||||
SysViewTraceDataProcessor._process_event(self, event)
|
||||
curr_ctx = self._get_curr_context(event.core_id)
|
||||
if not curr_ctx:
|
||||
# postpone events handling till their context is known
|
||||
self.no_ctx_events.append(event)
|
||||
return
|
||||
event.in_irq = curr_ctx.irq
|
||||
event.ctx_name = curr_ctx.name
|
||||
# here we know the previous context: we switched from it or implied upon the 1st context switching event
|
||||
prev_ctx = self._get_prev_context(event.core_id)
|
||||
if len(self.no_ctx_events):
|
||||
for cached_evt in self.no_ctx_events:
|
||||
cached_evt.ctx_name = prev_ctx.name
|
||||
cached_evt.in_irq = prev_ctx.irq
|
||||
# count and save the event
|
||||
apptrace.TraceDataProcessor.on_new_event(self, cached_evt)
|
||||
if self.event_supported(event):
|
||||
self.handle_event(event)
|
||||
del self.no_ctx_events[:]
|
||||
# count and save the event
|
||||
apptrace.TraceDataProcessor.on_new_event(self, event)
|
||||
if self.event_supported(event):
|
||||
self.handle_event(event)
|
||||
|
||||
def merge_and_process(self):
|
||||
"""
|
||||
@ -967,7 +1068,7 @@ class SysViewTraceDataProcessor(apptrace.TraceDataProcessor):
|
||||
self.on_new_event(event)
|
||||
|
||||
|
||||
class SysViewMultiTraceDataProcessor(SysViewTraceDataProcessor):
|
||||
class SysViewMultiStreamTraceDataProcessor(SysViewTraceDataProcessor):
|
||||
"""
|
||||
SystemView trace data processor supporting multiple event streams.
|
||||
"""
|
||||
@ -975,7 +1076,7 @@ class SysViewMultiTraceDataProcessor(SysViewTraceDataProcessor):
|
||||
"""
|
||||
see SysViewTraceDataProcessor.__init__()
|
||||
"""
|
||||
SysViewTraceDataProcessor.__init__(self, traces, print_events, keep_all_events)
|
||||
SysViewTraceDataProcessor.__init__(self, traces, print_events=print_events, keep_all_events=keep_all_events)
|
||||
self.stream_procs = {}
|
||||
|
||||
def add_stream_processor(self, stream_id, proc):
|
||||
@ -1024,7 +1125,10 @@ class SysViewMultiTraceDataProcessor(SysViewTraceDataProcessor):
|
||||
Iterates over registered stream processors and prints their reports.
|
||||
"""
|
||||
SysViewTraceDataProcessor.print_report(self)
|
||||
for stream_id in self.stream_procs:
|
||||
# need to sort stream procs by keys to print reports in the same order regardless of Python version
|
||||
stream_ids = list(self.stream_procs.keys())
|
||||
stream_ids.sort()
|
||||
for stream_id in stream_ids:
|
||||
self.stream_procs[stream_id].print_report()
|
||||
|
||||
def cleanup(self):
|
||||
@ -1050,18 +1154,64 @@ class SysViewMultiTraceDataProcessor(SysViewTraceDataProcessor):
|
||||
self.stream_procs[stream_id].on_new_event(event)
|
||||
|
||||
|
||||
class SysViewHeapTraceDataParser(SysViewTraceDataParser):
|
||||
class SysViewTraceDataJsonEncoder(json.JSONEncoder):
|
||||
JSON_TRACE_VER = "1.0"
|
||||
|
||||
def default(self, obj):
|
||||
global _sysview_events_map
|
||||
global _os_events_map
|
||||
if isinstance(obj, SysViewMultiStreamTraceDataProcessor):
|
||||
json_event_ids = {"system": _sysview_events_map, "os": {}}
|
||||
for eid in _os_events_map:
|
||||
ename = _os_events_map[eid][0]
|
||||
json_event_ids['os'][ename] = eid
|
||||
for stream in obj.stream_procs.values():
|
||||
json_event_ids[stream.name] = stream.event_ids
|
||||
json_events = []
|
||||
for e in obj.events:
|
||||
for stream in obj.stream_procs.values():
|
||||
if stream.event_supported(e):
|
||||
json_events.append(e)
|
||||
break
|
||||
# include also OS and pre-defined events
|
||||
if isinstance(e, SysViewPredefinedEvent) or isinstance(e, SysViewOSEvent):
|
||||
json_events.append(e)
|
||||
return {"version": self.JSON_TRACE_VER, "streams": json_event_ids, "events": json_events}
|
||||
if isinstance(obj, SysViewHeapEvent):
|
||||
blk_size = 0
|
||||
if "size" in obj.params:
|
||||
blk_size = obj.params["size"].value
|
||||
blk_addr = "0x{:x}".format(obj.params["addr"].value)
|
||||
callers = []
|
||||
for addr in obj.params['callers'].value:
|
||||
callers.append('0x{:x}'.format(addr))
|
||||
return {"ctx_name": obj.ctx_name, "in_irq": obj.in_irq, "id": obj.id, "core_id": obj.core_id,
|
||||
"ts": obj.ts, "addr": blk_addr, "size": blk_size, "callers": callers}
|
||||
if isinstance(obj, SysViewPredefinedEvent) and obj.id == SYSVIEW_EVTID_PRINT_FORMATTED:
|
||||
return {"ctx_name": obj.ctx_name, "in_irq": obj.in_irq, "id": obj.id, "core_id": obj.core_id,
|
||||
"ts": obj.ts, "msg": obj.params["msg"].value, "lvl": obj.params["lvl"].value}
|
||||
if isinstance(obj, SysViewEvent):
|
||||
jobj = obj.to_jsonable()
|
||||
# remove unused fields
|
||||
if 'name' in jobj:
|
||||
del jobj['name']
|
||||
if 'plen' in jobj:
|
||||
del jobj['plen']
|
||||
return jobj
|
||||
# Let the base class default method raise the TypeError
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
class SysViewHeapTraceDataParser(SysViewTraceDataExtEventParser):
|
||||
"""
|
||||
SystemView trace data parser supporting heap events.
|
||||
"""
|
||||
def __init__(self, print_events=False, core_id=0):
|
||||
"""
|
||||
SystemView trace data parser supporting multiple event streams.
|
||||
see SysViewTraceDataParser.__init__()
|
||||
see SysViewTraceDataExtEventParser.__init__()
|
||||
"""
|
||||
SysViewTraceDataParser.__init__(self, print_events, core_id)
|
||||
self.events_off = 0
|
||||
self.events_num = len(SysViewHeapEvent.events_fmt.keys())
|
||||
SysViewTraceDataExtEventParser.__init__(self, events_num=len(SysViewHeapEvent.events_fmt.keys()), core_id=core_id, print_events=print_events)
|
||||
|
||||
def read_extension_event(self, evt_id, reader):
|
||||
"""
|
||||
@ -1087,57 +1237,34 @@ class SysViewHeapTraceDataProcessor(SysViewTraceDataProcessor, apptrace.BaseHeap
|
||||
"""
|
||||
SystemView trace data processor supporting heap events.
|
||||
"""
|
||||
def __init__(self, toolchain_pref, elf_path, traces=[], print_events=False, print_heap_events=False):
|
||||
def __init__(self, toolchain_pref, elf_path, root_proc=None, traces=[], print_events=False, print_heap_events=False):
|
||||
"""
|
||||
Constructor.
|
||||
see SysViewTraceDataProcessor.__init__()
|
||||
see apptrace.BaseHeapTraceDataProcessorImpl.__init__()
|
||||
"""
|
||||
SysViewTraceDataProcessor.__init__(self, traces, print_events)
|
||||
SysViewTraceDataProcessor.__init__(self, traces, root_proc=root_proc, print_events=print_events)
|
||||
apptrace.BaseHeapTraceDataProcessorImpl.__init__(self, print_heap_events)
|
||||
self.toolchain = toolchain_pref
|
||||
self.elf_path = elf_path
|
||||
self.no_ctx_events = []
|
||||
# self.no_ctx_events = []
|
||||
self.name = "heap"
|
||||
stream = self.root_proc.get_trace_stream(0, SysViewTraceDataParser.STREAMID_HEAP)
|
||||
self.event_ids = {"alloc": stream.events_off, "free": stream.events_off + 1}
|
||||
|
||||
def on_new_event(self, event):
|
||||
"""
|
||||
Processes heap events.
|
||||
"""
|
||||
if self.root_proc == self:
|
||||
SysViewTraceDataProcessor.on_new_event(self, event)
|
||||
def event_supported(self, event):
|
||||
heap_stream = self.root_proc.get_trace_stream(event.core_id, SysViewTraceDataParser.STREAMID_HEAP)
|
||||
return heap_stream.event_supported(event)
|
||||
|
||||
def handle_event(self, event):
|
||||
heap_stream = self.root_proc.get_trace_stream(event.core_id, SysViewTraceDataParser.STREAMID_HEAP)
|
||||
if (heap_stream.events_off < SYSVIEW_MODULE_EVENT_OFFSET or event.id < heap_stream.events_off or
|
||||
event.id >= (heap_stream.events_off + heap_stream.events_num)):
|
||||
return
|
||||
curr_ctx = self._get_curr_context(event.core_id)
|
||||
if curr_ctx:
|
||||
in_irq = curr_ctx.irq
|
||||
ctx_name = curr_ctx.name
|
||||
else:
|
||||
in_irq = False
|
||||
ctx_name = 'None'
|
||||
if (event.id - heap_stream.events_off) == 0:
|
||||
heap_event = apptrace.HeapTraceEvent(ctx_name, in_irq, event.core_id, event.ts,
|
||||
True, event.params['size'].value, event.params['addr'].value,
|
||||
event.params['callers'].value, toolchain=self.toolchain,
|
||||
heap_event = apptrace.HeapTraceEvent(event, True, toolchain=self.toolchain,
|
||||
elf_path=self.elf_path)
|
||||
else:
|
||||
heap_event = apptrace.HeapTraceEvent(ctx_name, in_irq, event.core_id, event.ts,
|
||||
False, 0, event.params['addr'].value,
|
||||
event.params['callers'].value, toolchain=self.toolchain,
|
||||
heap_event = apptrace.HeapTraceEvent(event, False, toolchain=self.toolchain,
|
||||
elf_path=self.elf_path)
|
||||
if not curr_ctx:
|
||||
# postpone events handling till their context is known
|
||||
self.no_ctx_events.append(heap_event)
|
||||
else:
|
||||
# here we know the previous context: we switched from it or implied upon the 1st context switching event
|
||||
prev_ctx = self._get_prev_context(event.core_id)
|
||||
for cached_evt in self.no_ctx_events:
|
||||
cached_evt.ctx_name = prev_ctx.name
|
||||
cached_evt.in_irq = prev_ctx.irq
|
||||
apptrace.BaseHeapTraceDataProcessorImpl.on_new_event(self, cached_evt)
|
||||
del self.no_ctx_events[:]
|
||||
apptrace.BaseHeapTraceDataProcessorImpl.on_new_event(self, heap_event)
|
||||
apptrace.BaseHeapTraceDataProcessorImpl.on_new_event(self, heap_event)
|
||||
|
||||
def print_report(self):
|
||||
"""
|
||||
@ -1180,6 +1307,9 @@ class SysViewLogTraceDataParser(SysViewTraceDataParser):
|
||||
"""
|
||||
SystemView trace data parser supporting log events.
|
||||
"""
|
||||
def event_supported(self, event):
|
||||
return event.id == SYSVIEW_EVTID_PRINT_FORMATTED
|
||||
|
||||
def on_new_event(self, event):
|
||||
"""
|
||||
see SysViewTraceDataParser.on_new_event()
|
||||
@ -1192,14 +1322,19 @@ class SysViewLogTraceDataProcessor(SysViewTraceDataProcessor, apptrace.BaseLogTr
|
||||
"""
|
||||
SystemView trace data processor supporting heap events.
|
||||
"""
|
||||
def __init__(self, traces=[], print_events=False, print_log_events=False):
|
||||
def __init__(self, traces=[], root_proc=None, print_events=False, print_log_events=False):
|
||||
"""
|
||||
Constructor.
|
||||
see SysViewTraceDataProcessor.__init__()
|
||||
see apptrace.BaseLogTraceDataProcessorImpl.__init__()
|
||||
"""
|
||||
SysViewTraceDataProcessor.__init__(self, traces, print_events)
|
||||
SysViewTraceDataProcessor.__init__(self, traces, root_proc=root_proc, print_events=print_events)
|
||||
apptrace.BaseLogTraceDataProcessorImpl.__init__(self, print_log_events)
|
||||
self.name = "log"
|
||||
self.event_ids = {"print": SYSVIEW_EVTID_PRINT_FORMATTED}
|
||||
|
||||
def event_supported(self, event):
|
||||
return event.id == SYSVIEW_EVTID_PRINT_FORMATTED
|
||||
|
||||
def on_new_event(self, event):
|
||||
"""
|
||||
|
@ -24,22 +24,34 @@ import sys
|
||||
import os.path
|
||||
import signal
|
||||
import traceback
|
||||
import logging
|
||||
import json
|
||||
import espytrace.apptrace as apptrace
|
||||
import espytrace.sysview as sysview
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
verbosity_levels = [
|
||||
logging.CRITICAL,
|
||||
logging.ERROR,
|
||||
logging.WARNING,
|
||||
logging.INFO,
|
||||
logging.DEBUG
|
||||
]
|
||||
|
||||
parser = argparse.ArgumentParser(description='ESP32 SEGGER SystemView Trace Parsing Tool')
|
||||
|
||||
parser.add_argument('trace_sources', help='Trace data sources. Format: [file://]/path/to/file.', nargs='+', type=str)
|
||||
parser.add_argument('elf_file', help='Path to program ELF file.', type=str)
|
||||
parser.add_argument('--elf-file', '-b', help='Path to program ELF file.', type=str, default='')
|
||||
parser.add_argument('--tmo', '-w', help='Data wait timeout in sec. -1: infinite, 0: no wait', type=int, default=0)
|
||||
parser.add_argument('--dump-events', '-d', help='Dump all events.', action='store_true')
|
||||
parser.add_argument('--print-events', '-p', help='Print events of selected types. By default only reports are printed', action='store_true')
|
||||
parser.add_argument('--include-events', '-i', help='Events types to be included into report.', type=str, choices=['heap', 'log', 'all'], default='all')
|
||||
parser.add_argument('--toolchain', '-t', help='Toolchain prefix.', type=str, default='xtensa-esp32-elf-')
|
||||
parser.add_argument('--events-map', '-e', help='Events map file.', type=str, default=os.path.join(os.path.dirname(__file__), 'SYSVIEW_FreeRTOS.txt'))
|
||||
parser.add_argument('--to-json', '-j', help='Print JSON.', action='store_true', default=False)
|
||||
parser.add_argument('--verbose', '-v', help='Verbosity level. Default 1', choices=range(0, len(verbosity_levels)), type=int, default=1)
|
||||
args = parser.parse_args()
|
||||
|
||||
def sig_int_handler(signum, frame):
|
||||
@ -56,6 +68,8 @@ def main():
|
||||
elif args.include_events == 'log':
|
||||
include_events['log'] = True
|
||||
|
||||
logging.basicConfig(level=verbosity_levels[args.verbose], format='[%(levelname)s] %(message)s')
|
||||
|
||||
# parse trace files
|
||||
parsers = []
|
||||
for i, trace_source in enumerate(args.trace_sources):
|
||||
@ -69,21 +83,22 @@ def main():
|
||||
sysview.SysViewLogTraceDataParser(print_events=False, core_id=i))
|
||||
parsers.append(parser)
|
||||
except Exception as e:
|
||||
print("Failed to create data parser ({})!".format(e))
|
||||
logging.error("Failed to create data parser (%s)!", e)
|
||||
traceback.print_exc()
|
||||
sys.exit(2)
|
||||
reader = apptrace.reader_create(trace_source, args.tmo)
|
||||
if not reader:
|
||||
print("Failed to create trace reader!")
|
||||
logging.error("Failed to create trace reader!")
|
||||
sys.exit(2)
|
||||
try:
|
||||
print("Parse trace from '{}'...".format(trace_source))
|
||||
# logging.info("Parse trace from '{}'...".format(trace_source))
|
||||
logging.info("Parse trace from '%s'...", trace_source)
|
||||
sysview.parse_trace(reader, parser, args.events_map)
|
||||
print("Parsing completed.")
|
||||
logging.info("Parsing completed.")
|
||||
except (apptrace.ReaderTimeoutError, apptrace.ReaderShutdownRequest) as e:
|
||||
print("Stop parsing trace. ({})".format(e))
|
||||
logging.info("Stop parsing trace. (%s)", e)
|
||||
except Exception as e:
|
||||
print("Failed to parse trace ({})!".format(e))
|
||||
logging.error("Failed to parse trace (%s)!", e)
|
||||
parser.cleanup()
|
||||
traceback.print_exc()
|
||||
sys.exit(2)
|
||||
@ -92,27 +107,32 @@ def main():
|
||||
|
||||
# merge and process traces
|
||||
try:
|
||||
proc = sysview.SysViewMultiTraceDataProcessor(traces=parsers, print_events=args.dump_events)
|
||||
proc = sysview.SysViewMultiStreamTraceDataProcessor(traces=parsers, print_events=args.dump_events, keep_all_events=True if args.to_json else False)
|
||||
if include_events['heap']:
|
||||
proc.add_stream_processor(sysview.SysViewTraceDataParser.STREAMID_HEAP,
|
||||
sysview.SysViewHeapTraceDataProcessor(args.toolchain, args.elf_file, print_heap_events=args.print_events))
|
||||
sysview.SysViewHeapTraceDataProcessor(args.toolchain, args.elf_file, root_proc=proc, print_heap_events=args.print_events))
|
||||
if include_events['log']:
|
||||
proc.add_stream_processor(sysview.SysViewTraceDataParser.STREAMID_LOG,
|
||||
sysview.SysViewLogTraceDataProcessor(print_log_events=args.print_events))
|
||||
sysview.SysViewLogTraceDataProcessor(root_proc=proc, print_log_events=args.print_events))
|
||||
except Exception as e:
|
||||
print("Failed to create data processor ({})!".format(e))
|
||||
logging.error("Failed to create data processor (%s)!", e)
|
||||
traceback.print_exc()
|
||||
sys.exit(2)
|
||||
|
||||
try:
|
||||
print("Process events from '{}'...".format(args.trace_sources))
|
||||
logging.info("Process events from '%s'...", args.trace_sources)
|
||||
proc.merge_and_process()
|
||||
print("Processing completed.")
|
||||
logging.info("Processing completed.")
|
||||
except Exception as e:
|
||||
print("Failed to process trace ({})!".format(e))
|
||||
logging.error("Failed to process trace (%s)!", e)
|
||||
traceback.print_exc()
|
||||
sys.exit(2)
|
||||
finally:
|
||||
proc.print_report()
|
||||
if args.to_json:
|
||||
print(json.dumps(proc, cls=sysview.SysViewTraceDataJsonEncoder, indent=4, separators=(',', ': '), sort_keys=True))
|
||||
else:
|
||||
proc.print_report()
|
||||
proc.cleanup()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
16
tools/esp_app_trace/test/sysview/compare_json.py
Executable file
16
tools/esp_app_trace/test/sysview/compare_json.py
Executable file
@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import json
|
||||
import sys
|
||||
|
||||
if len(sys.argv) < 3:
|
||||
sys.exit(-1)
|
||||
|
||||
objs = []
|
||||
for i in range(1, len(sys.argv)):
|
||||
f = open(sys.argv[i])
|
||||
objs.append(json.load(f))
|
||||
|
||||
for i in range(1, len(objs)):
|
||||
if objs[i - 1] != objs[i]:
|
||||
sys.exit(-1)
|
@ -1,8 +1,3 @@
|
||||
Parse trace from 'cpu0.svdat'...
|
||||
Stop parsing trace. (Timeout 0.000000 sec while reading 1 bytes!)
|
||||
Parse trace from 'cpu1.svdat'...
|
||||
Stop parsing trace. (Timeout 0.000000 sec while reading 1 bytes!)
|
||||
Process events from '['cpu0.svdat', 'cpu1.svdat']'...
|
||||
EVENT[0]: 0.000000000 - core[0].svTraceStart(10), plen 0: []
|
||||
EVENT[1]: 0.000000000 - core[1].svTraceStart(10), plen 0: []
|
||||
EVENT[2]: 0.000010950 - core[0].svInit(24), plen 14: [sys_freq: 40000000, cpu_freq: 160000000, ram_base: 1061158912, id_shift: 0]
|
||||
@ -417,7 +412,6 @@ EVENT[354]: 0.148825750 - core[0].vTaskDelay(34), plen 1: [xTicksToDelay: 1]
|
||||
EVENT[355]: 0.148833200 - core[0].svTaskStopReady(7), plen 0: [tid: 12291908, cause: 4]
|
||||
EVENT[356]: 0.148839250 - core[0].svTraceStop(11), plen 0: []
|
||||
EVENT[357]: 0.148839250 - core[1].svTraceStop(11), plen 0: []
|
||||
Processing completed.
|
||||
Processed 358 events
|
||||
=============== LOG TRACE REPORT ===============
|
||||
Processed 0 log messages.
|
||||
|
3960
tools/esp_app_trace/test/sysview/expected_output.json
Normal file
3960
tools/esp_app_trace/test/sysview/expected_output.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -2,7 +2,15 @@
|
||||
|
||||
{ coverage debug sys \
|
||||
&& coverage erase &> output \
|
||||
&& coverage run -a $IDF_PATH/tools/esp_app_trace/sysviewtrace_proc.py -d -p cpu0.svdat cpu1.svdat test.elf &>> output \
|
||||
&& coverage run -a $IDF_PATH/tools/esp_app_trace/sysviewtrace_proc.py -d -p -b test.elf cpu0.svdat cpu1.svdat &>> output \
|
||||
&& diff output expected_output \
|
||||
&& coverage report \
|
||||
; } || { echo 'The test for sysviewtrace_proc has failed. Please examine the artifacts.' ; exit 1; }
|
||||
|
||||
{ coverage debug sys \
|
||||
&& coverage erase &> output.json \
|
||||
&& coverage run -a $IDF_PATH/tools/esp_app_trace/sysviewtrace_proc.py -j -b test.elf cpu0.svdat cpu1.svdat &>> output.json \
|
||||
&& diff output.json expected_output.json \
|
||||
&& coverage report \
|
||||
; } || { echo 'The test for sysviewtrace_proc JSON functionality has failed. Please examine the artifacts.' ; exit 1; }
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user