mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
idf_size.py: can alternatively write data to file
* Also Changed json separators - no spaces at eol
This commit is contained in:
parent
2cb7534bc5
commit
53aadafac8
@ -31,21 +31,8 @@ import re
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
|
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
|
||||||
|
GLOBAL_JSON_INDENT = 4
|
||||||
CHIP_SIZES = {
|
GLOBAL_JSON_SEPARATORS = (',', ': ')
|
||||||
"esp32": {
|
|
||||||
"total_iram": 0x20000,
|
|
||||||
"total_irom": 0x330000,
|
|
||||||
"total_drom": 0x800000,
|
|
||||||
# total dram is determined from objdump output
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def _json_dump(obj):
|
|
||||||
""" Pretty-print JSON object to stdout """
|
|
||||||
json.dump(obj, sys.stdout, indent=4)
|
|
||||||
print('\n')
|
|
||||||
|
|
||||||
|
|
||||||
def scan_to_header(f, header_line):
|
def scan_to_header(f, header_line):
|
||||||
@ -56,6 +43,10 @@ def scan_to_header(f, header_line):
|
|||||||
raise RuntimeError("Didn't find line '%s' in file" % header_line)
|
raise RuntimeError("Didn't find line '%s' in file" % header_line)
|
||||||
|
|
||||||
|
|
||||||
|
def format_json(json_object):
|
||||||
|
return json.dumps(json_object, indent=GLOBAL_JSON_INDENT, separators=GLOBAL_JSON_SEPARATORS) + "\n"
|
||||||
|
|
||||||
|
|
||||||
def load_map_data(map_file):
|
def load_map_data(map_file):
|
||||||
memory_config = load_memory_config(map_file)
|
memory_config = load_memory_config(map_file)
|
||||||
sections = load_sections(map_file)
|
sections = load_sections(map_file)
|
||||||
@ -205,21 +196,33 @@ def main():
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--files', help='Print per-file sizes', action='store_true')
|
'--files', help='Print per-file sizes', action='store_true')
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'-o',
|
||||||
|
'--output-file',
|
||||||
|
type=argparse.FileType('w'),
|
||||||
|
default=sys.stdout,
|
||||||
|
help="Print output to the specified file instead of stdout")
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
output = ""
|
||||||
|
|
||||||
memory_config, sections = load_map_data(args.map_file)
|
memory_config, sections = load_map_data(args.map_file)
|
||||||
if not args.json or not (args.archives or args.files or args.archive_details):
|
if not args.json or not (args.archives or args.files or args.archive_details):
|
||||||
print_summary(memory_config, sections, args.json)
|
output += get_summary(memory_config, sections, args.json)
|
||||||
|
|
||||||
if args.archives:
|
if args.archives:
|
||||||
print_detailed_sizes(sections, "archive", "Archive File", args.json)
|
output += get_detailed_sizes(sections, "archive", "Archive File", args.json)
|
||||||
if args.files:
|
if args.files:
|
||||||
print_detailed_sizes(sections, "file", "Object File", args.json)
|
output += get_detailed_sizes(sections, "file", "Object File", args.json)
|
||||||
|
|
||||||
if args.archive_details:
|
if args.archive_details:
|
||||||
print_archive_symbols(sections, args.archive_details, args.json)
|
output += get_archive_symbols(sections, args.archive_details, args.json)
|
||||||
|
|
||||||
|
args.output_file.write(output)
|
||||||
|
|
||||||
|
|
||||||
def print_summary(memory_config, sections, as_json=False):
|
def get_summary(memory_config, sections, as_json=False):
|
||||||
def get_size(section):
|
def get_size(section):
|
||||||
try:
|
try:
|
||||||
return sections[section]["size"]
|
return sections[section]["size"]
|
||||||
@ -245,8 +248,9 @@ def print_summary(memory_config, sections, as_json=False):
|
|||||||
flash_rodata = get_size(".flash.rodata")
|
flash_rodata = get_size(".flash.rodata")
|
||||||
total_size = used_data + used_iram + flash_code + flash_rodata
|
total_size = used_data + used_iram + flash_code + flash_rodata
|
||||||
|
|
||||||
|
output = ""
|
||||||
if as_json:
|
if as_json:
|
||||||
_json_dump(collections.OrderedDict([
|
output = format_json(collections.OrderedDict([
|
||||||
("dram_data", used_data),
|
("dram_data", used_data),
|
||||||
("dram_bss", used_bss),
|
("dram_bss", used_bss),
|
||||||
("used_dram", used_dram),
|
("used_dram", used_dram),
|
||||||
@ -260,19 +264,21 @@ def print_summary(memory_config, sections, as_json=False):
|
|||||||
("total_size", total_size)
|
("total_size", total_size)
|
||||||
]))
|
]))
|
||||||
else:
|
else:
|
||||||
print("Total sizes:")
|
output += "Total sizes:\n"
|
||||||
print(" DRAM .data size: %7d bytes" % used_data)
|
output += " DRAM .data size: {:>7} bytes\n".format(used_data)
|
||||||
print(" DRAM .bss size: %7d bytes" % used_bss)
|
output += " DRAM .bss size: {:>7} bytes\n".format(used_bss)
|
||||||
print("Used static DRAM: %7d bytes (%7d available, %.1f%% used)" %
|
output += "Used static DRAM: {:>7} bytes ({:>7} available, {:.1%} used)\n".format(
|
||||||
(used_dram, total_dram - used_dram, 100.0 * used_dram_ratio))
|
used_dram, total_dram - used_dram, used_dram_ratio)
|
||||||
print("Used static IRAM: %7d bytes (%7d available, %.1f%% used)" %
|
output += "Used static IRAM: {:>7} bytes ({:>7} available, {:.1%} used)\n".format(
|
||||||
(used_iram, total_iram - used_iram, 100.0 * used_iram_ratio))
|
used_iram, total_iram - used_iram, used_iram_ratio)
|
||||||
print(" Flash code: %7d bytes" % flash_code)
|
output += " Flash code: {:>7} bytes\n".format(flash_code)
|
||||||
print(" Flash rodata: %7d bytes" % flash_rodata)
|
output += " Flash rodata: {:>7} bytes\n".format(flash_rodata)
|
||||||
print("Total image size:~%7d bytes (.bin may be padded larger)" % (total_size))
|
output += "Total image size:~{:>7} bytes (.bin may be padded larger)\n".format(total_size)
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
def print_detailed_sizes(sections, key, header, as_json=False):
|
def get_detailed_sizes(sections, key, header, as_json=False):
|
||||||
sizes = sizes_by_key(sections, key)
|
sizes = sizes_by_key(sections, key)
|
||||||
|
|
||||||
result = {}
|
result = {}
|
||||||
@ -297,33 +303,37 @@ def print_detailed_sizes(sections, key, header, as_json=False):
|
|||||||
# do a secondary sort in order to have consistent order (for diff-ing the output)
|
# do a secondary sort in order to have consistent order (for diff-ing the output)
|
||||||
s = sorted(s, key=return_total_size, reverse=True)
|
s = sorted(s, key=return_total_size, reverse=True)
|
||||||
|
|
||||||
|
output = ""
|
||||||
|
|
||||||
if as_json:
|
if as_json:
|
||||||
_json_dump(collections.OrderedDict(s))
|
output = format_json(collections.OrderedDict(s))
|
||||||
else:
|
else:
|
||||||
print("Per-%s contributions to ELF file:" % key)
|
header_format = "{:>24} {:>10} {:>6} {:>6} {:>10} {:>8} {:>7}\n"
|
||||||
headings = (header,
|
|
||||||
"DRAM .data",
|
output += "Per-{} contributions to ELF file:\n".format(key)
|
||||||
"& .bss",
|
output += header_format.format(header,
|
||||||
"IRAM",
|
"DRAM .data",
|
||||||
"Flash code",
|
"& .bss",
|
||||||
"& rodata",
|
"IRAM",
|
||||||
"Total")
|
"Flash code",
|
||||||
header_format = "%24s %10d %6d %6d %10d %8d %7d"
|
"& rodata",
|
||||||
print(header_format.replace("d", "s") % headings)
|
"Total")
|
||||||
|
|
||||||
for k,v in s:
|
for k,v in s:
|
||||||
if ":" in k: # print subheadings for key of format archive:file
|
if ":" in k: # print subheadings for key of format archive:file
|
||||||
sh,k = k.split(":")
|
sh,k = k.split(":")
|
||||||
print(header_format % (k[:24],
|
output += header_format.format(k[:24],
|
||||||
v["data"],
|
v["data"],
|
||||||
v["bss"],
|
v["bss"],
|
||||||
v["iram"],
|
v["iram"],
|
||||||
v["flash_text"],
|
v["flash_text"],
|
||||||
v["flash_rodata"],
|
v["flash_rodata"],
|
||||||
v["total"]))
|
v["total"])
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
def print_archive_symbols(sections, archive, as_json=False):
|
def get_archive_symbols(sections, archive, as_json=False):
|
||||||
interested_sections = [".dram0.data", ".dram0.bss", ".iram0.text", ".iram0.vectors", ".flash.text", ".flash.rodata"]
|
interested_sections = [".dram0.data", ".dram0.bss", ".iram0.text", ".iram0.vectors", ".flash.text", ".flash.rodata"]
|
||||||
result = {}
|
result = {}
|
||||||
for t in interested_sections:
|
for t in interested_sections:
|
||||||
@ -346,17 +356,20 @@ def print_archive_symbols(sections, archive, as_json=False):
|
|||||||
s = sorted(s, key=lambda k_v: k_v[1], reverse=True)
|
s = sorted(s, key=lambda k_v: k_v[1], reverse=True)
|
||||||
section_symbols[t] = collections.OrderedDict(s)
|
section_symbols[t] = collections.OrderedDict(s)
|
||||||
|
|
||||||
|
output = ""
|
||||||
if as_json:
|
if as_json:
|
||||||
_json_dump(section_symbols)
|
output = format_json(section_symbols)
|
||||||
else:
|
else:
|
||||||
print("Symbols within the archive: %s (Not all symbols may be reported)" % (archive))
|
output += "Symbols within the archive: {} (Not all symbols may be reported)\n".format(archive)
|
||||||
for t,s in section_symbols.items():
|
for t,s in section_symbols.items():
|
||||||
section_total = 0
|
section_total = 0
|
||||||
print("\nSymbols from section:", t)
|
output += "\nSymbols from section: {}\n".format(t)
|
||||||
for key, val in s.items():
|
for key, val in s.items():
|
||||||
print(("%s(%d)" % (key.replace(t + ".", ""), val)), end=' ')
|
output += "{}({}) ".format(key.replace(t + ".", ""), val)
|
||||||
section_total += val
|
section_total += val
|
||||||
print("\nSection total:",section_total)
|
output += "\nSection total: {}\n".format(section_total)
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -391,7 +391,8 @@ Symbols from section: .flash.rodata
|
|||||||
str1.4(249) get_clk_en_mask(128) get_rst_en_mask(128) __FUNCTION__$5441(24) TG(8)
|
str1.4(249) get_clk_en_mask(128) get_rst_en_mask(128) __FUNCTION__$5441(24) TG(8)
|
||||||
Section total: 537
|
Section total: 537
|
||||||
|
|
||||||
***]nProducing JSON output...
|
***
|
||||||
|
Producing JSON output...
|
||||||
{
|
{
|
||||||
"dram_data": 9324,
|
"dram_data": 9324,
|
||||||
"dram_bss": 8296,
|
"dram_bss": 8296,
|
||||||
@ -405,7 +406,6 @@ Section total: 537
|
|||||||
"flash_rodata": 39580,
|
"flash_rodata": 39580,
|
||||||
"total_size": 234780
|
"total_size": 234780
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
"liblwip.a": {
|
"liblwip.a": {
|
||||||
"data": 14,
|
"data": 14,
|
||||||
@ -712,7 +712,6 @@ Section total: 537
|
|||||||
"total": 0
|
"total": 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
"libc.a:lib_a-vfprintf.o": {
|
"libc.a:lib_a-vfprintf.o": {
|
||||||
"data": 0,
|
"data": 0,
|
||||||
@ -2963,7 +2962,6 @@ Section total: 537
|
|||||||
"total": 0
|
"total": 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
".dram0.data": {
|
".dram0.data": {
|
||||||
"timer_spinlock": 16,
|
"timer_spinlock": 16,
|
||||||
@ -3002,6 +3000,11 @@ Section total: 537
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
***
|
||||||
|
Producing JSON file output...
|
||||||
|
|
||||||
|
***
|
||||||
|
Producing text file output...
|
||||||
|
|
||||||
***
|
***
|
||||||
Running idf_size_tests.py...
|
Running idf_size_tests.py...
|
||||||
|
13
tools/test_idf_size/expected_output.json
Normal file
13
tools/test_idf_size/expected_output.json
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
{
|
||||||
|
"dram_data": 9324,
|
||||||
|
"dram_bss": 8296,
|
||||||
|
"used_dram": 17620,
|
||||||
|
"available_dram": 163116,
|
||||||
|
"used_dram_ratio": 0.09749026203966006,
|
||||||
|
"used_iram": 38932,
|
||||||
|
"available_iram": 92140,
|
||||||
|
"used_iram_ratio": 0.297027587890625,
|
||||||
|
"flash_code": 146944,
|
||||||
|
"flash_rodata": 39580,
|
||||||
|
"total_size": 234780
|
||||||
|
}
|
8
tools/test_idf_size/expected_output.txt
Normal file
8
tools/test_idf_size/expected_output.txt
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
Total sizes:
|
||||||
|
DRAM .data size: 9324 bytes
|
||||||
|
DRAM .bss size: 8296 bytes
|
||||||
|
Used static DRAM: 17620 bytes ( 163116 available, 9.7% used)
|
||||||
|
Used static IRAM: 38932 bytes ( 92140 available, 29.7% used)
|
||||||
|
Flash code: 146944 bytes
|
||||||
|
Flash rodata: 39580 bytes
|
||||||
|
Total image size:~ 234780 bytes (.bin may be padded larger)
|
@ -10,14 +10,20 @@
|
|||||||
&& coverage run -a $IDF_PATH/tools/idf_size.py --files app.map &>> output \
|
&& coverage run -a $IDF_PATH/tools/idf_size.py --files app.map &>> output \
|
||||||
&& echo -e "\n***\nRunning idf_size.py --archive_details..." >> output \
|
&& echo -e "\n***\nRunning idf_size.py --archive_details..." >> output \
|
||||||
&& coverage run -a $IDF_PATH/tools/idf_size.py --archive_details libdriver.a app.map &>> output \
|
&& coverage run -a $IDF_PATH/tools/idf_size.py --archive_details libdriver.a app.map &>> output \
|
||||||
&& echo -e "\n***]nProducing JSON output..." >> output \
|
&& echo -e "\n***\nProducing JSON output..." >> output \
|
||||||
&& coverage run -a $IDF_PATH/tools/idf_size.py --json app.map &>> output \
|
&& coverage run -a $IDF_PATH/tools/idf_size.py --json app.map &>> output \
|
||||||
&& coverage run -a $IDF_PATH/tools/idf_size.py --json --archives app.map &>> output \
|
&& coverage run -a $IDF_PATH/tools/idf_size.py --json --archives app.map &>> output \
|
||||||
&& coverage run -a $IDF_PATH/tools/idf_size.py --json --files app.map &>> output \
|
&& coverage run -a $IDF_PATH/tools/idf_size.py --json --files app.map &>> output \
|
||||||
&& coverage run -a $IDF_PATH/tools/idf_size.py --json --archive_details libdriver.a app.map &>> output \
|
&& coverage run -a $IDF_PATH/tools/idf_size.py --json --archive_details libdriver.a app.map &>> output \
|
||||||
|
&& echo -e "\n***\nProducing JSON file output..." >> output \
|
||||||
|
&& coverage run -a $IDF_PATH/tools/idf_size.py --json --output-file output.json app.map &>> output \
|
||||||
|
&& echo -e "\n***\nProducing text file output..." >> output \
|
||||||
|
&& coverage run -a $IDF_PATH/tools/idf_size.py -o output.txt app.map &>> output \
|
||||||
&& echo -e "\n***\nRunning idf_size_tests.py..." >> output \
|
&& echo -e "\n***\nRunning idf_size_tests.py..." >> output \
|
||||||
&& coverage run -a $IDF_PATH/tools/test_idf_size/test_idf_size.py &>> output \
|
&& coverage run -a $IDF_PATH/tools/test_idf_size/test_idf_size.py &>> output \
|
||||||
&& diff -Z output expected_output \
|
&& diff -Z output expected_output \
|
||||||
|
&& diff -Z output.json expected_output.json \
|
||||||
|
&& diff -Z output.txt expected_output.txt \
|
||||||
&& coverage report \
|
&& coverage report \
|
||||||
; } || { echo 'The test for idf_size has failed. Please examine the artifacts.' ; exit 1; }
|
; } || { echo 'The test for idf_size has failed. Please examine the artifacts.' ; exit 1; }
|
||||||
|
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -39,4 +40,4 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
# This used to crash with a division by zero error but now it just prints nan% due to
|
# This used to crash with a division by zero error but now it just prints nan% due to
|
||||||
# zero lengths
|
# zero lengths
|
||||||
idf_size.print_summary({"iram0_0_seg": {"length":0}, "dram0_0_seg": {"length":0}}, {})
|
print(idf_size.get_summary({"iram0_0_seg": {"length":0}, "dram0_0_seg": {"length":0}}, {}), end="")
|
||||||
|
Loading…
Reference in New Issue
Block a user