2018-02-01 07:14:47 -05:00
|
|
|
#!/usr/bin/env python
|
|
|
|
#
|
2022-02-08 11:08:04 -05:00
|
|
|
# SPDX-FileCopyrightText: 2018-2022 Espressif Systems (Shanghai) CO LTD
|
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2018-02-01 07:14:47 -05:00
|
|
|
|
2021-01-25 21:49:01 -05:00
|
|
|
from __future__ import print_function, unicode_literals
|
|
|
|
|
2018-08-28 14:01:25 -04:00
|
|
|
import sys
|
2022-06-03 08:46:56 -04:00
|
|
|
from typing import Any, List, Optional, TextIO
|
2021-01-25 21:49:01 -05:00
|
|
|
|
2018-08-28 14:01:25 -04:00
|
|
|
try:
|
2021-01-25 21:49:01 -05:00
|
|
|
from builtins import object, range, str
|
2018-08-28 14:01:25 -04:00
|
|
|
except ImportError:
|
|
|
|
# This should not happen because the Python packages are checked before invoking this script. However, here is
|
|
|
|
# some output which should help if we missed something.
|
|
|
|
print('Import has failed probably because of the missing "future" package. Please install all the packages for '
|
|
|
|
'interpreter {} from the requirements.txt file.'.format(sys.executable))
|
|
|
|
# The path to requirements.txt is not provided because this script could be invoked from an IDF project (then the
|
|
|
|
# requirements.txt from the IDF_PATH should be used) or from the documentation project (then the requirements.txt
|
|
|
|
# for the documentation directory should be used).
|
|
|
|
sys.exit(1)
|
2018-02-01 07:14:47 -05:00
|
|
|
import argparse
|
|
|
|
import collections
|
2021-01-25 21:49:01 -05:00
|
|
|
import fnmatch
|
2018-08-02 05:01:04 -04:00
|
|
|
import functools
|
2021-01-25 21:49:01 -05:00
|
|
|
import os
|
|
|
|
import re
|
|
|
|
import textwrap
|
|
|
|
from io import open
|
2018-02-01 07:14:47 -05:00
|
|
|
|
|
|
|
# list files here which should not be parsed
|
2022-04-05 06:31:04 -04:00
|
|
|
ignore_files: list = list()
|
2018-02-01 07:14:47 -05:00
|
|
|
|
2020-10-14 07:31:29 -04:00
|
|
|
# add directories here which should not be parsed, this is a tuple since it will be used with *.startswith()
|
2020-09-01 05:06:22 -04:00
|
|
|
ignore_dirs = (os.path.join('examples'),
|
|
|
|
os.path.join('components', 'cmock', 'CMock', 'test'),
|
|
|
|
os.path.join('components', 'spi_flash', 'sim'))
|
2018-07-09 04:51:22 -04:00
|
|
|
|
2018-02-01 07:14:47 -05:00
|
|
|
# macros from here have higher priorities in case of collisions
|
2020-10-14 07:31:29 -04:00
|
|
|
priority_headers = [os.path.join('components', 'esp_common', 'include', 'esp_err.h')]
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2019-05-06 09:06:19 -04:00
|
|
|
# The following headers won't be included. This is useful if they are permanently included from esp_err_to_name.c.in.
|
2020-10-14 07:31:29 -04:00
|
|
|
dont_include = [os.path.join('soc', 'soc.h'),
|
|
|
|
os.path.join('esp_err.h')]
|
2019-05-06 09:06:19 -04:00
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
err_dict = collections.defaultdict(list) # identified errors are stored here; mapped by the error code
|
|
|
|
rev_err_dict = dict() # map of error string to error code
|
|
|
|
unproc_list = list() # errors with unknown codes which depend on other errors
|
2018-02-01 07:14:47 -05:00
|
|
|
|
|
|
|
|
2018-08-02 05:01:04 -04:00
|
|
|
class ErrItem(object):
|
2018-02-01 07:14:47 -05:00
|
|
|
"""
|
|
|
|
Contains information about the error:
|
|
|
|
- name - error string
|
|
|
|
- file - relative path inside the IDF project to the file which defines this error
|
2018-09-19 08:41:46 -04:00
|
|
|
- include_as - (optional) overwrites the include determined from file
|
2018-02-01 07:14:47 -05:00
|
|
|
- comment - (optional) comment for the error
|
|
|
|
- rel_str - (optional) error string which is a base for the error
|
|
|
|
- rel_off - (optional) offset in relation to the base error
|
|
|
|
"""
|
2022-06-03 08:46:56 -04:00
|
|
|
def __init__(self, name: str, file: str, include_as: Optional[Any]=None, comment: str='', rel_str: str='', rel_off: int=0) -> None:
|
2018-02-01 07:14:47 -05:00
|
|
|
self.name = name
|
|
|
|
self.file = file
|
2018-09-19 08:41:46 -04:00
|
|
|
self.include_as = include_as
|
2018-02-01 07:14:47 -05:00
|
|
|
self.comment = comment
|
|
|
|
self.rel_str = rel_str
|
|
|
|
self.rel_off = rel_off
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2022-06-03 08:46:56 -04:00
|
|
|
def __str__(self) -> str:
|
2021-01-25 21:49:01 -05:00
|
|
|
ret = self.name + ' from ' + self.file
|
|
|
|
if (self.rel_str != ''):
|
|
|
|
ret += ' is (' + self.rel_str + ' + ' + str(self.rel_off) + ')'
|
|
|
|
if self.comment != '':
|
|
|
|
ret += ' // ' + self.comment
|
2018-02-01 07:14:47 -05:00
|
|
|
return ret
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2022-06-03 08:46:56 -04:00
|
|
|
def __cmp__(self, other) -> int:
|
2018-02-01 07:14:47 -05:00
|
|
|
if self.file in priority_headers and other.file not in priority_headers:
|
|
|
|
return -1
|
|
|
|
elif self.file not in priority_headers and other.file in priority_headers:
|
|
|
|
return 1
|
|
|
|
|
2021-01-25 21:49:01 -05:00
|
|
|
base = '_BASE'
|
2018-02-01 07:14:47 -05:00
|
|
|
|
|
|
|
if self.file == other.file:
|
2022-08-10 03:01:57 -04:00
|
|
|
if self.name.endswith(base) and not other.name.endswith(base):
|
2018-02-01 07:14:47 -05:00
|
|
|
return 1
|
2022-08-10 03:01:57 -04:00
|
|
|
elif not self.name.endswith(base) and other.name.endswith(base):
|
2018-02-01 07:14:47 -05:00
|
|
|
return -1
|
|
|
|
|
|
|
|
self_key = self.file + self.name
|
|
|
|
other_key = other.file + other.name
|
|
|
|
if self_key < other_key:
|
|
|
|
return -1
|
|
|
|
elif self_key > other_key:
|
|
|
|
return 1
|
|
|
|
else:
|
|
|
|
return 0
|
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2018-02-01 07:14:47 -05:00
|
|
|
class InputError(RuntimeError):
|
|
|
|
"""
|
|
|
|
Represents and error on the input
|
|
|
|
"""
|
2022-06-03 08:46:56 -04:00
|
|
|
def __init__(self, p: str, e: str) -> None:
|
2021-01-25 21:49:01 -05:00
|
|
|
super(InputError, self).__init__(p + ': ' + e)
|
2018-02-01 07:14:47 -05:00
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2022-06-03 08:46:56 -04:00
|
|
|
def process(line: str, idf_path: str, include_as: Any) -> None:
|
2018-02-01 07:14:47 -05:00
|
|
|
"""
|
|
|
|
Process a line of text from file idf_path (relative to IDF project).
|
|
|
|
Fills the global list unproc_list and dictionaries err_dict, rev_err_dict
|
|
|
|
"""
|
2021-01-25 21:49:01 -05:00
|
|
|
if idf_path.endswith('.c'):
|
2018-02-01 07:14:47 -05:00
|
|
|
# We would not try to include a C file
|
2021-01-25 21:49:01 -05:00
|
|
|
raise InputError(idf_path, 'This line should be in a header file: %s' % line)
|
2018-02-01 07:14:47 -05:00
|
|
|
|
|
|
|
words = re.split(r' +', line, 2)
|
|
|
|
# words[1] is the error name
|
|
|
|
# words[2] is the rest of the line (value, base + value, comment)
|
2018-08-02 05:01:04 -04:00
|
|
|
if len(words) < 3:
|
2021-01-25 21:49:01 -05:00
|
|
|
raise InputError(idf_path, 'Error at line %s' % line)
|
2018-02-01 07:14:47 -05:00
|
|
|
|
2021-01-25 21:49:01 -05:00
|
|
|
line = ''
|
2018-02-01 07:14:47 -05:00
|
|
|
todo_str = words[2]
|
|
|
|
|
2021-01-25 21:49:01 -05:00
|
|
|
comment = ''
|
2018-02-01 07:14:47 -05:00
|
|
|
# identify possible comment
|
|
|
|
m = re.search(r'/\*!<(.+?(?=\*/))', todo_str)
|
|
|
|
if m:
|
2018-08-02 05:01:04 -04:00
|
|
|
comment = m.group(1).strip()
|
2018-12-04 07:46:48 -05:00
|
|
|
todo_str = todo_str[:m.start()].strip() # keep just the part before the comment
|
2018-02-01 07:14:47 -05:00
|
|
|
|
|
|
|
# identify possible parentheses ()
|
|
|
|
m = re.search(r'\((.+)\)', todo_str)
|
|
|
|
if m:
|
2018-12-04 07:46:48 -05:00
|
|
|
todo_str = m.group(1) # keep what is inside the parentheses
|
2018-02-01 07:14:47 -05:00
|
|
|
|
|
|
|
# identify BASE error code, e.g. from the form BASE + 0x01
|
|
|
|
m = re.search(r'\s*(\w+)\s*\+(.+)', todo_str)
|
|
|
|
if m:
|
2018-12-04 07:46:48 -05:00
|
|
|
related = m.group(1) # BASE
|
|
|
|
todo_str = m.group(2) # keep and process only what is after "BASE +"
|
2018-02-01 07:14:47 -05:00
|
|
|
|
|
|
|
# try to match a hexadecimal number
|
|
|
|
m = re.search(r'0x([0-9A-Fa-f]+)', todo_str)
|
|
|
|
if m:
|
|
|
|
num = int(m.group(1), 16)
|
|
|
|
else:
|
|
|
|
# Try to match a decimal number. Negative value is possible for some numbers, e.g. ESP_FAIL
|
|
|
|
m = re.search(r'(-?[0-9]+)', todo_str)
|
|
|
|
if m:
|
|
|
|
num = int(m.group(1), 10)
|
|
|
|
elif re.match(r'\w+', todo_str):
|
|
|
|
# It is possible that there is no number, e.g. #define ERROR BASE
|
2018-12-04 07:46:48 -05:00
|
|
|
related = todo_str # BASE error
|
|
|
|
num = 0 # (BASE + 0)
|
2018-02-01 07:14:47 -05:00
|
|
|
else:
|
2021-01-25 21:49:01 -05:00
|
|
|
raise InputError(idf_path, 'Cannot parse line %s' % line)
|
2018-02-01 07:14:47 -05:00
|
|
|
|
|
|
|
try:
|
|
|
|
related
|
|
|
|
except NameError:
|
|
|
|
# The value of the error is known at this moment because it do not depends on some other BASE error code
|
2018-09-19 08:41:46 -04:00
|
|
|
err_dict[num].append(ErrItem(words[1], idf_path, include_as, comment))
|
2018-02-01 07:14:47 -05:00
|
|
|
rev_err_dict[words[1]] = num
|
|
|
|
else:
|
|
|
|
# Store the information available now and compute the error code later
|
2018-09-19 08:41:46 -04:00
|
|
|
unproc_list.append(ErrItem(words[1], idf_path, include_as, comment, related, num))
|
2018-02-01 07:14:47 -05:00
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2022-06-03 08:46:56 -04:00
|
|
|
def process_remaining_errors() -> None:
|
2018-02-01 07:14:47 -05:00
|
|
|
"""
|
|
|
|
Create errors which could not be processed before because the error code
|
|
|
|
for the BASE error code wasn't known.
|
|
|
|
This works for sure only if there is no multiple-time dependency, e.g.:
|
|
|
|
#define BASE1 0
|
|
|
|
#define BASE2 (BASE1 + 10)
|
|
|
|
#define ERROR (BASE2 + 10) - ERROR will be processed successfully only if it processed later than BASE2
|
|
|
|
"""
|
|
|
|
for item in unproc_list:
|
|
|
|
if item.rel_str in rev_err_dict:
|
|
|
|
base_num = rev_err_dict[item.rel_str]
|
|
|
|
num = base_num + item.rel_off
|
2018-09-19 08:41:46 -04:00
|
|
|
err_dict[num].append(ErrItem(item.name, item.file, item.include_as, item.comment))
|
2018-02-01 07:14:47 -05:00
|
|
|
rev_err_dict[item.name] = num
|
|
|
|
else:
|
2021-01-25 21:49:01 -05:00
|
|
|
print(item.rel_str + ' referenced by ' + item.name + ' in ' + item.file + ' is unknown')
|
2018-02-01 07:14:47 -05:00
|
|
|
|
|
|
|
del unproc_list[:]
|
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2022-06-03 08:46:56 -04:00
|
|
|
def path_to_include(path: str) -> str:
|
2018-02-01 07:14:47 -05:00
|
|
|
"""
|
|
|
|
Process the path (relative to the IDF project) in a form which can be used
|
|
|
|
to include in a C file. Using just the filename does not work all the
|
|
|
|
time because some files are deeper in the tree. This approach tries to
|
|
|
|
find an 'include' parent directory an include its subdirectories, e.g.
|
|
|
|
"components/XY/include/esp32/file.h" will be transported into "esp32/file.h"
|
|
|
|
So this solution works only works when the subdirectory or subdirectories
|
|
|
|
are inside the "include" directory. Other special cases need to be handled
|
|
|
|
here when the compiler gives an unknown header file error message.
|
|
|
|
"""
|
2018-08-02 05:01:04 -04:00
|
|
|
spl_path = path.split(os.sep)
|
2018-02-01 07:14:47 -05:00
|
|
|
try:
|
|
|
|
i = spl_path.index('include')
|
|
|
|
except ValueError:
|
|
|
|
# no include in the path -> use just the filename
|
|
|
|
return os.path.basename(path)
|
|
|
|
else:
|
2018-12-04 07:46:48 -05:00
|
|
|
return os.sep.join(spl_path[i + 1:]) # subdirectories and filename in "include"
|
|
|
|
|
2018-02-01 07:14:47 -05:00
|
|
|
|
2022-06-03 08:46:56 -04:00
|
|
|
def print_warning(error_list: List, error_code: int) -> None:
|
2018-02-01 07:14:47 -05:00
|
|
|
"""
|
|
|
|
Print warning about errors with the same error code
|
|
|
|
"""
|
2021-01-25 21:49:01 -05:00
|
|
|
print('[WARNING] The following errors have the same code (%d):' % error_code)
|
2018-02-01 07:14:47 -05:00
|
|
|
for e in error_list:
|
2021-01-25 21:49:01 -05:00
|
|
|
print(' ' + str(e))
|
2018-02-01 07:14:47 -05:00
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2022-06-03 08:46:56 -04:00
|
|
|
def max_string_width() -> int:
|
2018-02-01 07:14:47 -05:00
|
|
|
max = 0
|
2018-08-02 05:01:04 -04:00
|
|
|
for k in err_dict:
|
2018-02-01 07:14:47 -05:00
|
|
|
for e in err_dict[k]:
|
|
|
|
x = len(e.name)
|
|
|
|
if x > max:
|
|
|
|
max = x
|
|
|
|
return max
|
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2022-06-03 08:46:56 -04:00
|
|
|
def generate_c_output(fin: TextIO, fout: TextIO) -> None:
|
2018-02-01 07:14:47 -05:00
|
|
|
"""
|
|
|
|
Writes the output to fout based on th error dictionary err_dict and
|
|
|
|
template file fin.
|
|
|
|
"""
|
|
|
|
# make includes unique by using a set
|
|
|
|
includes = set()
|
2018-08-02 05:01:04 -04:00
|
|
|
for k in err_dict:
|
2018-02-01 07:14:47 -05:00
|
|
|
for e in err_dict[k]:
|
2018-09-19 08:41:46 -04:00
|
|
|
if e.include_as:
|
|
|
|
includes.add(e.include_as)
|
|
|
|
else:
|
|
|
|
includes.add(path_to_include(e.file))
|
2018-02-01 07:14:47 -05:00
|
|
|
|
|
|
|
# The order in a set in non-deterministic therefore it could happen that the
|
|
|
|
# include order will be different in other machines and false difference
|
|
|
|
# in the output file could be reported. In order to avoid this, the items
|
|
|
|
# are sorted in a list.
|
|
|
|
include_list = list(includes)
|
|
|
|
include_list.sort()
|
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
max_width = max_string_width() + 17 + 1 # length of " ERR_TBL_IT()," with spaces is 17
|
2018-08-02 05:01:04 -04:00
|
|
|
max_decdig = max(len(str(k)) for k in err_dict)
|
2018-02-01 07:14:47 -05:00
|
|
|
|
|
|
|
for line in fin:
|
|
|
|
if re.match(r'@COMMENT@', line):
|
2021-01-25 21:49:01 -05:00
|
|
|
fout.write('//Do not edit this file because it is autogenerated by ' + os.path.basename(__file__) + '\n')
|
2018-02-01 07:14:47 -05:00
|
|
|
|
|
|
|
elif re.match(r'@HEADERS@', line):
|
|
|
|
for i in include_list:
|
2019-05-06 09:06:19 -04:00
|
|
|
if i not in dont_include:
|
|
|
|
fout.write("#if __has_include(\"" + i + "\")\n#include \"" + i + "\"\n#endif\n")
|
2018-02-01 07:14:47 -05:00
|
|
|
elif re.match(r'@ERROR_ITEMS@', line):
|
2021-01-25 21:49:01 -05:00
|
|
|
last_file = ''
|
2018-02-01 07:14:47 -05:00
|
|
|
for k in sorted(err_dict.keys()):
|
|
|
|
if len(err_dict[k]) > 1:
|
2018-08-02 05:01:04 -04:00
|
|
|
err_dict[k].sort(key=functools.cmp_to_key(ErrItem.__cmp__))
|
2018-02-01 07:14:47 -05:00
|
|
|
print_warning(err_dict[k], k)
|
|
|
|
for e in err_dict[k]:
|
|
|
|
if e.file != last_file:
|
|
|
|
last_file = e.file
|
2021-01-25 21:49:01 -05:00
|
|
|
fout.write(' // %s\n' % last_file)
|
|
|
|
table_line = (' ERR_TBL_IT(' + e.name + '), ').ljust(max_width) + '/* ' + str(k).rjust(max_decdig)
|
|
|
|
fout.write('# ifdef %s\n' % e.name)
|
2018-02-01 07:14:47 -05:00
|
|
|
fout.write(table_line)
|
|
|
|
hexnum_length = 0
|
2018-12-04 07:46:48 -05:00
|
|
|
if k > 0: # negative number and zero should be only ESP_FAIL and ESP_OK
|
2021-01-25 21:49:01 -05:00
|
|
|
hexnum = ' 0x%x' % k
|
2018-02-01 07:14:47 -05:00
|
|
|
hexnum_length = len(hexnum)
|
|
|
|
fout.write(hexnum)
|
2021-01-25 21:49:01 -05:00
|
|
|
if e.comment != '':
|
2018-02-01 07:14:47 -05:00
|
|
|
if len(e.comment) < 50:
|
2021-01-25 21:49:01 -05:00
|
|
|
fout.write(' %s' % e.comment)
|
2018-02-01 07:14:47 -05:00
|
|
|
else:
|
2021-01-25 21:49:01 -05:00
|
|
|
indent = ' ' * (len(table_line) + hexnum_length + 1)
|
2018-12-04 07:46:48 -05:00
|
|
|
w = textwrap.wrap(e.comment, width=120, initial_indent=indent, subsequent_indent=indent)
|
2018-02-01 07:14:47 -05:00
|
|
|
# this couldn't be done with initial_indent because there is no initial_width option
|
2021-01-25 21:49:01 -05:00
|
|
|
fout.write(' %s' % w[0].strip())
|
2018-02-01 07:14:47 -05:00
|
|
|
for i in range(1, len(w)):
|
2021-01-25 21:49:01 -05:00
|
|
|
fout.write('\n%s' % w[i])
|
|
|
|
fout.write(' */\n# endif\n')
|
2018-02-01 07:14:47 -05:00
|
|
|
else:
|
|
|
|
fout.write(line)
|
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2022-06-03 08:46:56 -04:00
|
|
|
def generate_rst_output(fout: TextIO) -> None:
|
2018-06-13 04:07:26 -04:00
|
|
|
for k in sorted(err_dict.keys()):
|
|
|
|
v = err_dict[k][0]
|
|
|
|
fout.write(':c:macro:`{}` '.format(v.name))
|
|
|
|
if k > 0:
|
|
|
|
fout.write('**(0x{:x})**'.format(k))
|
|
|
|
else:
|
|
|
|
fout.write('({:d})'.format(k))
|
|
|
|
if len(v.comment) > 0:
|
|
|
|
fout.write(': {}'.format(v.comment))
|
|
|
|
fout.write('\n\n')
|
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2022-06-03 08:46:56 -04:00
|
|
|
def main() -> None:
|
2018-06-21 03:21:45 -04:00
|
|
|
if 'IDF_PATH' in os.environ:
|
|
|
|
idf_path = os.environ['IDF_PATH']
|
|
|
|
else:
|
|
|
|
idf_path = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
|
|
|
|
|
2018-02-01 07:14:47 -05:00
|
|
|
parser = argparse.ArgumentParser(description='ESP32 esp_err_to_name lookup generator for esp_err_t')
|
2019-03-26 04:30:43 -04:00
|
|
|
parser.add_argument('--c_input', help='Path to the esp_err_to_name.c.in template input.',
|
|
|
|
default=idf_path + '/components/esp_common/src/esp_err_to_name.c.in')
|
2019-05-06 08:57:52 -04:00
|
|
|
parser.add_argument('--c_output', help='Path to the esp_err_to_name.c output.', default=idf_path + '/components/esp_common/src/esp_err_to_name.c')
|
2018-06-13 04:07:26 -04:00
|
|
|
parser.add_argument('--rst_output', help='Generate .rst output and save it into this file')
|
2018-02-01 07:14:47 -05:00
|
|
|
args = parser.parse_args()
|
|
|
|
|
2018-09-19 08:41:46 -04:00
|
|
|
include_as_pattern = re.compile(r'\s*//\s*{}: [^"]* "([^"]+)"'.format(os.path.basename(__file__)))
|
|
|
|
define_pattern = re.compile(r'\s*#define\s+(ESP_ERR_|ESP_OK|ESP_FAIL)')
|
|
|
|
|
2018-06-21 03:21:45 -04:00
|
|
|
for root, dirnames, filenames in os.walk(idf_path):
|
2018-02-01 07:14:47 -05:00
|
|
|
for filename in fnmatch.filter(filenames, '*.[ch]'):
|
|
|
|
full_path = os.path.join(root, filename)
|
2018-06-21 03:21:45 -04:00
|
|
|
path_in_idf = os.path.relpath(full_path, idf_path)
|
2018-07-09 04:51:22 -04:00
|
|
|
if path_in_idf in ignore_files or path_in_idf.startswith(ignore_dirs):
|
2018-02-01 07:14:47 -05:00
|
|
|
continue
|
2018-08-02 05:01:04 -04:00
|
|
|
with open(full_path, encoding='utf-8') as f:
|
|
|
|
try:
|
2018-09-19 08:41:46 -04:00
|
|
|
include_as = None
|
2018-08-02 05:01:04 -04:00
|
|
|
for line in f:
|
2018-09-19 08:41:46 -04:00
|
|
|
line = line.strip()
|
|
|
|
m = include_as_pattern.search(line)
|
|
|
|
if m:
|
|
|
|
include_as = m.group(1)
|
2018-08-02 05:01:04 -04:00
|
|
|
# match also ESP_OK and ESP_FAIL because some of ESP_ERRs are referencing them
|
2018-09-19 08:41:46 -04:00
|
|
|
elif define_pattern.match(line):
|
2018-08-02 05:01:04 -04:00
|
|
|
try:
|
2018-09-19 08:41:46 -04:00
|
|
|
process(line, path_in_idf, include_as)
|
2018-08-02 05:01:04 -04:00
|
|
|
except InputError as e:
|
2018-09-19 08:41:46 -04:00
|
|
|
print(e)
|
2018-08-02 05:01:04 -04:00
|
|
|
except UnicodeDecodeError:
|
2021-01-25 21:49:01 -05:00
|
|
|
raise ValueError('The encoding of {} is not Unicode.'.format(path_in_idf))
|
2018-02-01 07:14:47 -05:00
|
|
|
|
|
|
|
process_remaining_errors()
|
|
|
|
|
2018-06-13 04:07:26 -04:00
|
|
|
if args.rst_output is not None:
|
2018-08-02 05:01:04 -04:00
|
|
|
with open(args.rst_output, 'w', encoding='utf-8') as fout:
|
2018-06-13 04:07:26 -04:00
|
|
|
generate_rst_output(fout)
|
|
|
|
else:
|
2018-08-02 05:01:04 -04:00
|
|
|
with open(args.c_input, 'r', encoding='utf-8') as fin, open(args.c_output, 'w', encoding='utf-8') as fout:
|
2018-06-13 04:07:26 -04:00
|
|
|
generate_c_output(fin, fout)
|
2018-02-01 07:14:47 -05:00
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2021-01-25 21:49:01 -05:00
|
|
|
if __name__ == '__main__':
|
2018-02-01 07:14:47 -05:00
|
|
|
main()
|