mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
refactor: rewrite ldgen fragment file parser
closes https://github.com/espressif/esp-idf/issues/7940
This commit is contained in:
parent
a44953ecd4
commit
172854a850
@ -57,10 +57,7 @@ test_ldgen_on_host:
|
|||||||
extends: .host_test_template
|
extends: .host_test_template
|
||||||
script:
|
script:
|
||||||
- cd tools/ldgen/test
|
- cd tools/ldgen/test
|
||||||
- ./test_fragments.py
|
- python -m unittest
|
||||||
- ./test_generation.py
|
|
||||||
- ./test_entity.py
|
|
||||||
- ./test_output_commands.py
|
|
||||||
variables:
|
variables:
|
||||||
LC_ALL: C.UTF-8
|
LC_ALL: C.UTF-8
|
||||||
|
|
||||||
|
@ -151,6 +151,7 @@ disable=print-statement,
|
|||||||
too-many-branches,
|
too-many-branches,
|
||||||
too-many-statements,
|
too-many-statements,
|
||||||
ungrouped-imports, # since we have isort in pre-commit
|
ungrouped-imports, # since we have isort in pre-commit
|
||||||
|
no-name-in-module, # since we have flake8 to check this
|
||||||
|
|
||||||
# Enable the message, report, category or checker with the given id(s). You can
|
# Enable the message, report, category or checker with the given id(s). You can
|
||||||
# either give multiple identifier separated by comma (,) or put this option
|
# either give multiple identifier separated by comma (,) or put this option
|
||||||
|
@ -15,7 +15,7 @@ cryptography>=2.1.4
|
|||||||
# We do have cryptography binary on https://dl.espressif.com/pypi for ARM
|
# We do have cryptography binary on https://dl.espressif.com/pypi for ARM
|
||||||
# On https://pypi.org/ are no ARM binaries as standard now
|
# On https://pypi.org/ are no ARM binaries as standard now
|
||||||
|
|
||||||
pyparsing>=2.0.3,<2.4.0
|
pyparsing>=3.0.3 # https://github.com/pyparsing/pyparsing/issues/319 is fixed in 3.0.3
|
||||||
pyelftools>=0.22
|
pyelftools>=0.22
|
||||||
idf-component-manager>=0.2.99-beta
|
idf-component-manager>=0.2.99-beta
|
||||||
|
|
||||||
|
@ -237,7 +237,6 @@ tools/ldgen/ldgen.py
|
|||||||
tools/ldgen/ldgen/entity.py
|
tools/ldgen/ldgen/entity.py
|
||||||
tools/ldgen/ldgen/fragments.py
|
tools/ldgen/ldgen/fragments.py
|
||||||
tools/ldgen/ldgen/generation.py
|
tools/ldgen/ldgen/generation.py
|
||||||
tools/ldgen/ldgen/ldgen_common.py
|
|
||||||
tools/ldgen/ldgen/linker_script.py
|
tools/ldgen/ldgen/linker_script.py
|
||||||
tools/ldgen/ldgen/output_commands.py
|
tools/ldgen/ldgen/output_commands.py
|
||||||
tools/ldgen/ldgen/sdkconfig.py
|
tools/ldgen/ldgen/sdkconfig.py
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -14,7 +14,7 @@ import tempfile
|
|||||||
from io import StringIO
|
from io import StringIO
|
||||||
|
|
||||||
from ldgen.entity import EntityDB
|
from ldgen.entity import EntityDB
|
||||||
from ldgen.fragments import FragmentFile
|
from ldgen.fragments import parse_fragment_file
|
||||||
from ldgen.generation import Generation
|
from ldgen.generation import Generation
|
||||||
from ldgen.ldgen_common import LdGenFailure
|
from ldgen.ldgen_common import LdGenFailure
|
||||||
from ldgen.linker_script import LinkerScript
|
from ldgen.linker_script import LinkerScript
|
||||||
@ -148,12 +148,12 @@ def main():
|
|||||||
|
|
||||||
for fragment_file in fragment_files:
|
for fragment_file in fragment_files:
|
||||||
try:
|
try:
|
||||||
fragment_file = FragmentFile(fragment_file, sdkconfig)
|
fragment_file = parse_fragment_file(fragment_file, sdkconfig)
|
||||||
except (ParseException, ParseFatalException) as e:
|
except (ParseException, ParseFatalException) as e:
|
||||||
# ParseException is raised on incorrect grammar
|
# ParseException is raised on incorrect grammar
|
||||||
# ParseFatalException is raised on correct grammar, but inconsistent contents (ex. duplicate
|
# ParseFatalException is raised on correct grammar, but inconsistent contents (ex. duplicate
|
||||||
# keys, key unsupported by fragment, unexpected number of values, etc.)
|
# keys, key unsupported by fragment, unexpected number of values, etc.)
|
||||||
raise LdGenFailure('failed to parse %s\n%s' % (fragment_file.name, str(e)))
|
raise LdGenFailure('failed to parse %s\n%s' % (fragment_file, str(e)))
|
||||||
generation_model.add_fragments_from_file(fragment_file)
|
generation_model.add_fragments_from_file(fragment_file)
|
||||||
|
|
||||||
mapping_rules = generation_model.generate(sections_infos)
|
mapping_rules = generation_model.generate(sections_infos)
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -10,7 +10,7 @@ from enum import Enum
|
|||||||
from functools import total_ordering
|
from functools import total_ordering
|
||||||
|
|
||||||
from pyparsing import (Group, Literal, OneOrMore, ParseException, SkipTo, Suppress, White, Word, ZeroOrMore, alphas,
|
from pyparsing import (Group, Literal, OneOrMore, ParseException, SkipTo, Suppress, White, Word, ZeroOrMore, alphas,
|
||||||
nums, restOfLine)
|
nums, rest_of_line)
|
||||||
|
|
||||||
|
|
||||||
@total_ordering
|
@total_ordering
|
||||||
@ -82,7 +82,7 @@ class Entity:
|
|||||||
return '%s:%s %s' % self.__repr__()
|
return '%s:%s %s' % self.__repr__()
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return (self.archive, self.obj, self.symbol)
|
return self.archive, self.obj, self.symbol
|
||||||
|
|
||||||
def __getitem__(self, spec):
|
def __getitem__(self, spec):
|
||||||
res = None
|
res = None
|
||||||
@ -97,7 +97,7 @@ class Entity:
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
class EntityDB():
|
class EntityDB:
|
||||||
"""
|
"""
|
||||||
Collection of entities extracted from libraries known in the build.
|
Collection of entities extracted from libraries known in the build.
|
||||||
Allows retrieving a list of archives, a list of object files in an archive
|
Allows retrieving a list of archives, a list of object files in an archive
|
||||||
@ -116,12 +116,10 @@ class EntityDB():
|
|||||||
archive_path = (Literal('In archive').suppress() +
|
archive_path = (Literal('In archive').suppress() +
|
||||||
White().suppress() +
|
White().suppress() +
|
||||||
# trim the colon and line ending characters from archive_path
|
# trim the colon and line ending characters from archive_path
|
||||||
restOfLine.setResultsName('archive_path').setParseAction(
|
rest_of_line.set_results_name('archive_path').set_parse_action(
|
||||||
lambda s, loc, toks: s.rstrip(':\n\r ')))
|
lambda s, loc, toks: s.rstrip(':\n\r ')))
|
||||||
parser = archive_path
|
parser = archive_path
|
||||||
|
|
||||||
results = None
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
results = parser.parseString(first_line, parseAll=True)
|
results = parser.parseString(first_line, parseAll=True)
|
||||||
except ParseException as p:
|
except ParseException as p:
|
||||||
@ -132,7 +130,7 @@ class EntityDB():
|
|||||||
|
|
||||||
def _get_infos_from_file(self, info):
|
def _get_infos_from_file(self, info):
|
||||||
# {object}: file format elf32-xtensa-le
|
# {object}: file format elf32-xtensa-le
|
||||||
object_line = SkipTo(':').setResultsName('object') + Suppress(restOfLine)
|
object_line = SkipTo(':').set_results_name('object') + Suppress(rest_of_line)
|
||||||
|
|
||||||
# Sections:
|
# Sections:
|
||||||
# Idx Name ...
|
# Idx Name ...
|
||||||
@ -141,12 +139,14 @@ class EntityDB():
|
|||||||
|
|
||||||
# 00 {section} 0000000 ...
|
# 00 {section} 0000000 ...
|
||||||
# CONTENTS, ALLOC, ....
|
# CONTENTS, ALLOC, ....
|
||||||
section_entry = Suppress(Word(nums)) + SkipTo(' ') + Suppress(restOfLine) + \
|
section_entry = (Suppress(Word(nums)) + SkipTo(' ') + Suppress(rest_of_line)
|
||||||
Suppress(ZeroOrMore(Word(alphas) + Literal(',')) + Word(alphas))
|
+ Suppress(ZeroOrMore(Word(alphas) + Literal(',')) + Word(alphas)))
|
||||||
|
|
||||||
content = Group(
|
content = Group(object_line
|
||||||
object_line + section_start + section_header + Group(OneOrMore(section_entry)).setResultsName('sections'))
|
+ section_start
|
||||||
parser = Group(ZeroOrMore(content)).setResultsName('contents')
|
+ section_header
|
||||||
|
+ Group(OneOrMore(section_entry)).set_results_name('sections'))
|
||||||
|
parser = Group(ZeroOrMore(content)).set_results_name('contents')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
results = parser.parseString(info.content, parseAll=True)
|
results = parser.parseString(info.content, parseAll=True)
|
||||||
@ -181,8 +181,9 @@ class EntityDB():
|
|||||||
|
|
||||||
def _match_obj(self, archive, obj):
|
def _match_obj(self, archive, obj):
|
||||||
objs = self.get_objects(archive)
|
objs = self.get_objects(archive)
|
||||||
match_objs = fnmatch.filter(objs, obj + '.o') + fnmatch.filter(objs, obj + '.*.obj') + fnmatch.filter(objs,
|
match_objs = (fnmatch.filter(objs, obj + '.o')
|
||||||
obj + '.obj')
|
+ fnmatch.filter(objs, obj + '.*.obj')
|
||||||
|
+ fnmatch.filter(objs, obj + '.obj'))
|
||||||
|
|
||||||
if len(match_objs) > 1:
|
if len(match_objs) > 1:
|
||||||
raise ValueError("Multiple matches for object: '%s: %s': %s" % (archive, obj, str(match_objs)))
|
raise ValueError("Multiple matches for object: '%s: %s': %s" % (archive, obj, str(match_objs)))
|
||||||
|
@ -1,220 +1,40 @@
|
|||||||
#
|
#
|
||||||
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
import abc
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
from collections import namedtuple
|
|
||||||
from enum import Enum
|
|
||||||
|
|
||||||
from pyparsing import (Combine, Forward, Group, Keyword, Literal, OneOrMore, Optional, Or, ParseFatalException,
|
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
||||||
Suppress, Word, ZeroOrMore, alphanums, alphas, delimitedList, indentedBlock, nums,
|
|
||||||
originalTextFor, restOfLine)
|
|
||||||
|
|
||||||
from .entity import Entity
|
from pyparsing import (Combine, Forward, Group, IndentedBlock, Keyword, LineEnd, Literal, OneOrMore, Opt,
|
||||||
from .sdkconfig import SDKConfig
|
ParseFatalException, SkipTo, Suppress, Word, ZeroOrMore, alphanums, alphas, delimited_list,
|
||||||
|
nums, rest_of_line)
|
||||||
|
|
||||||
|
|
||||||
class FragmentFile():
|
class Empty:
|
||||||
"""
|
"""
|
||||||
Processes a fragment file and stores all parsed fragments. For
|
Return `Empty()` when the sdkconfig does not meet the conditional statements.
|
||||||
more information on how this class interacts with classes for the different fragment types,
|
|
||||||
see description of Fragment.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, fragment_file, sdkconfig):
|
def __repr__(self):
|
||||||
try:
|
return '<EMPTY>'
|
||||||
fragment_file = open(fragment_file, 'r')
|
|
||||||
except TypeError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
path = os.path.realpath(fragment_file.name)
|
def __bool__(self):
|
||||||
|
return False
|
||||||
indent_stack = [1]
|
|
||||||
|
|
||||||
class parse_ctx:
|
|
||||||
fragment = None # current fragment
|
|
||||||
key = '' # current key
|
|
||||||
keys = list() # list of keys parsed
|
|
||||||
key_grammar = None # current key grammar
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def reset():
|
|
||||||
parse_ctx.fragment_instance = None
|
|
||||||
parse_ctx.key = ''
|
|
||||||
parse_ctx.keys = list()
|
|
||||||
parse_ctx.key_grammar = None
|
|
||||||
|
|
||||||
def fragment_type_parse_action(toks):
|
|
||||||
parse_ctx.reset()
|
|
||||||
parse_ctx.fragment = FRAGMENT_TYPES[toks[0]]() # create instance of the fragment
|
|
||||||
return None
|
|
||||||
|
|
||||||
def expand_conditionals(toks, stmts):
|
|
||||||
try:
|
|
||||||
stmt = toks['value']
|
|
||||||
stmts.append(stmt)
|
|
||||||
except KeyError:
|
|
||||||
try:
|
|
||||||
conditions = toks['conditional']
|
|
||||||
for condition in conditions:
|
|
||||||
try:
|
|
||||||
_toks = condition[1]
|
|
||||||
_cond = condition[0]
|
|
||||||
if sdkconfig.evaluate_expression(_cond):
|
|
||||||
expand_conditionals(_toks, stmts)
|
|
||||||
break
|
|
||||||
except IndexError:
|
|
||||||
expand_conditionals(condition[0], stmts)
|
|
||||||
except KeyError:
|
|
||||||
for tok in toks:
|
|
||||||
expand_conditionals(tok, stmts)
|
|
||||||
|
|
||||||
def key_parse_action(pstr, loc, toks):
|
|
||||||
key = toks[0]
|
|
||||||
|
|
||||||
if key in parse_ctx.keys:
|
|
||||||
raise ParseFatalException(pstr, loc, "duplicate key '%s' value definition" % parse_ctx.key)
|
|
||||||
|
|
||||||
parse_ctx.key = key
|
|
||||||
parse_ctx.keys.append(key)
|
|
||||||
|
|
||||||
try:
|
|
||||||
parse_ctx.key_grammar = parse_ctx.fragment.get_key_grammars()[key]
|
|
||||||
key_grammar = parse_ctx.key_grammar.grammar
|
|
||||||
except KeyError:
|
|
||||||
raise ParseFatalException(pstr, loc, "key '%s' is not supported by fragment" % key)
|
|
||||||
except Exception as e:
|
|
||||||
raise ParseFatalException(pstr, loc, "unable to parse key '%s'; %s" % (key, str(e)))
|
|
||||||
|
|
||||||
key_stmt << (conditional | Group(key_grammar).setResultsName('value'))
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def key_body_parsed(pstr, loc, toks):
|
|
||||||
stmts = list()
|
|
||||||
expand_conditionals(toks, stmts)
|
|
||||||
|
|
||||||
if parse_ctx.key_grammar.min and len(stmts) < parse_ctx.key_grammar.min:
|
|
||||||
raise ParseFatalException(pstr, loc, "fragment requires at least %d values for key '%s'" %
|
|
||||||
(parse_ctx.key_grammar.min, parse_ctx.key))
|
|
||||||
|
|
||||||
if parse_ctx.key_grammar.max and len(stmts) > parse_ctx.key_grammar.max:
|
|
||||||
raise ParseFatalException(pstr, loc, "fragment requires at most %d values for key '%s'" %
|
|
||||||
(parse_ctx.key_grammar.max, parse_ctx.key))
|
|
||||||
|
|
||||||
try:
|
|
||||||
parse_ctx.fragment.set_key_value(parse_ctx.key, stmts)
|
|
||||||
except Exception as e:
|
|
||||||
raise ParseFatalException(pstr, loc, "unable to add key '%s'; %s" % (parse_ctx.key, str(e)))
|
|
||||||
return None
|
|
||||||
|
|
||||||
key = (Word(alphanums + '_') + Suppress(':')).setParseAction(key_parse_action)
|
|
||||||
key_stmt = Forward()
|
|
||||||
|
|
||||||
condition_block = indentedBlock(key_stmt, indent_stack)
|
|
||||||
key_stmts = OneOrMore(condition_block)
|
|
||||||
key_body = Suppress(key) + key_stmts
|
|
||||||
key_body.setParseAction(key_body_parsed)
|
|
||||||
|
|
||||||
condition = originalTextFor(SDKConfig.get_expression_grammar()).setResultsName('condition')
|
|
||||||
if_condition = Group(Suppress('if') + condition + Suppress(':') + condition_block)
|
|
||||||
elif_condition = Group(Suppress('elif') + condition + Suppress(':') + condition_block)
|
|
||||||
else_condition = Group(Suppress('else') + Suppress(':') + condition_block)
|
|
||||||
conditional = (if_condition + Optional(OneOrMore(elif_condition)) + Optional(else_condition)).setResultsName(
|
|
||||||
'conditional')
|
|
||||||
|
|
||||||
def name_parse_action(pstr, loc, toks):
|
|
||||||
parse_ctx.fragment.name = toks[0]
|
|
||||||
|
|
||||||
ftype = Word(alphas).setParseAction(fragment_type_parse_action)
|
|
||||||
fid = Suppress(':') + Word(alphanums + '_.').setResultsName('name')
|
|
||||||
fid.setParseAction(name_parse_action)
|
|
||||||
header = Suppress('[') + ftype + fid + Suppress(']')
|
|
||||||
|
|
||||||
def fragment_parse_action(pstr, loc, toks):
|
|
||||||
key_grammars = parse_ctx.fragment.get_key_grammars()
|
|
||||||
required_keys = set([k for (k, v) in key_grammars.items() if v.required])
|
|
||||||
present_keys = required_keys.intersection(set(parse_ctx.keys))
|
|
||||||
if present_keys != required_keys:
|
|
||||||
raise ParseFatalException(pstr, loc, 'required keys %s for fragment not found' %
|
|
||||||
list(required_keys - present_keys))
|
|
||||||
return parse_ctx.fragment
|
|
||||||
|
|
||||||
fragment_stmt = Forward()
|
|
||||||
fragment_block = indentedBlock(fragment_stmt, indent_stack)
|
|
||||||
|
|
||||||
fragment_if_condition = Group(Suppress('if') + condition + Suppress(':') + fragment_block)
|
|
||||||
fragment_elif_condition = Group(Suppress('elif') + condition + Suppress(':') + fragment_block)
|
|
||||||
fragment_else_condition = Group(Suppress('else') + Suppress(':') + fragment_block)
|
|
||||||
fragment_conditional = (fragment_if_condition + Optional(OneOrMore(fragment_elif_condition)) +
|
|
||||||
Optional(fragment_else_condition)).setResultsName('conditional')
|
|
||||||
|
|
||||||
fragment = (header + OneOrMore(indentedBlock(key_body, indent_stack, False))).setResultsName('value')
|
|
||||||
fragment.setParseAction(fragment_parse_action)
|
|
||||||
fragment.ignore('#' + restOfLine)
|
|
||||||
|
|
||||||
deprecated_mapping = DeprecatedMapping.get_fragment_grammar(sdkconfig, fragment_file.name).setResultsName(
|
|
||||||
'value')
|
|
||||||
|
|
||||||
fragment_stmt << (Group(deprecated_mapping) | Group(fragment) | Group(fragment_conditional))
|
|
||||||
|
|
||||||
def fragment_stmt_parsed(pstr, loc, toks):
|
|
||||||
stmts = list()
|
|
||||||
expand_conditionals(toks, stmts)
|
|
||||||
return stmts
|
|
||||||
|
|
||||||
parser = ZeroOrMore(fragment_stmt).setParseAction(fragment_stmt_parsed)
|
|
||||||
|
|
||||||
self.fragments = parser.parseFile(fragment_file, parseAll=True)
|
|
||||||
|
|
||||||
for fragment in self.fragments:
|
|
||||||
fragment.path = path
|
|
||||||
|
|
||||||
|
|
||||||
class Fragment:
|
class Fragment:
|
||||||
"""
|
"""
|
||||||
Base class for a fragment that can be parsed from a fragment file. All fragments
|
Base class for a fragment that can be parsed from a fragment file.
|
||||||
share the common grammar:
|
|
||||||
|
|
||||||
[type:name]
|
|
||||||
key1:value1
|
|
||||||
key2:value2
|
|
||||||
...
|
|
||||||
|
|
||||||
Supporting a new fragment type means deriving a concrete class which specifies
|
|
||||||
key-value pairs that the fragment supports and what to do with the parsed key-value pairs.
|
|
||||||
|
|
||||||
The new fragment must also be appended to FRAGMENT_TYPES, specifying the
|
|
||||||
keyword for the type and the derived class.
|
|
||||||
|
|
||||||
The key of the key-value pair is a simple keyword string. Other parameters
|
|
||||||
that describe the key-value pair is specified in Fragment.KeyValue:
|
|
||||||
1. grammar - pyparsing grammar to parse the value of key-value pair
|
|
||||||
2. min - the minimum number of value in the key entry, None means no minimum
|
|
||||||
3. max - the maximum number of value in the key entry, None means no maximum
|
|
||||||
4. required - if the key-value pair is required in the fragment
|
|
||||||
|
|
||||||
Setting min=max=1 means that the key has a single value.
|
|
||||||
|
|
||||||
FragmentFile provides conditional expression evaluation, enforcing
|
|
||||||
the parameters for Fragment.Keyvalue.
|
|
||||||
"""
|
"""
|
||||||
__metaclass__ = abc.ABCMeta
|
|
||||||
|
|
||||||
KeyValue = namedtuple('KeyValue', 'grammar min max required')
|
|
||||||
|
|
||||||
IDENTIFIER = Word(alphas + '_', alphanums + '_')
|
IDENTIFIER = Word(alphas + '_', alphanums + '_')
|
||||||
ENTITY = Word(alphanums + '.-_$+')
|
ENTITY = Word(alphanums + '.-_$+')
|
||||||
|
|
||||||
@abc.abstractmethod
|
def __init__(self, name: str, entries: Set[Union[str, Tuple[str]]]):
|
||||||
def set_key_value(self, key, parse_results):
|
self.name = name
|
||||||
pass
|
self.entries = entries
|
||||||
|
|
||||||
@abc.abstractmethod
|
def __repr__(self):
|
||||||
def get_key_grammars(self):
|
return str(self.__dict__)
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class Sections(Fragment):
|
class Sections(Fragment):
|
||||||
@ -227,24 +47,33 @@ class Sections(Fragment):
|
|||||||
.section2
|
.section2
|
||||||
...
|
...
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Unless quoted, symbol names start with a letter, underscore, or point
|
# Unless quoted, symbol names start with a letter, underscore, or point
|
||||||
# and may include any letters, underscores, digits, points, and hyphens.
|
# and may include any letters, underscores, digits, points, and hyphens.
|
||||||
GNU_LD_SYMBOLS = Word(alphas + '_.', alphanums + '._-')
|
ENTRY = Combine(Word(alphas + '_.', alphanums + '._-') + Opt('+')) + LineEnd().suppress()
|
||||||
|
|
||||||
entries_grammar = Combine(GNU_LD_SYMBOLS + Optional('+'))
|
@staticmethod
|
||||||
|
def parse_entry(toks):
|
||||||
|
# section
|
||||||
|
return toks[0]
|
||||||
|
|
||||||
grammars = {
|
@staticmethod
|
||||||
'entries': Fragment.KeyValue(entries_grammar.setResultsName('section'), 1, None, True)
|
def parse(s, loc, toks):
|
||||||
}
|
this = toks[0]
|
||||||
|
|
||||||
"""
|
name = this[0]
|
||||||
Utility function that returns a list of sections given a sections fragment entry,
|
entries = {entry for entry in this[1] if entry}
|
||||||
with the '+' notation and symbol concatenation handled automatically.
|
|
||||||
"""
|
if not entries:
|
||||||
|
raise ParseFatalException(s, loc, 'Sections entries shouldn\'t be empty')
|
||||||
|
|
||||||
|
return Sections(name, entries)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_section_data_from_entry(sections_entry, symbol=None):
|
def get_section_data_from_entry(sections_entry, symbol=None):
|
||||||
|
"""
|
||||||
|
Returns a list of sections given a sections fragment entry,
|
||||||
|
with the '+' notation and symbol concatenation handled automatically.
|
||||||
|
"""
|
||||||
if not symbol:
|
if not symbol:
|
||||||
sections = list()
|
sections = list()
|
||||||
sections.append(sections_entry.replace('+', ''))
|
sections.append(sections_entry.replace('+', ''))
|
||||||
@ -254,18 +83,9 @@ class Sections(Fragment):
|
|||||||
if sections_entry.endswith('+'):
|
if sections_entry.endswith('+'):
|
||||||
section = sections_entry.replace('+', '.*')
|
section = sections_entry.replace('+', '.*')
|
||||||
expansion = section.replace('.*', '.' + symbol)
|
expansion = section.replace('.*', '.' + symbol)
|
||||||
return (section, expansion)
|
return section, expansion
|
||||||
else:
|
else:
|
||||||
return (sections_entry, None)
|
return sections_entry, None
|
||||||
|
|
||||||
def set_key_value(self, key, parse_results):
|
|
||||||
if key == 'entries':
|
|
||||||
self.entries = set()
|
|
||||||
for result in parse_results:
|
|
||||||
self.entries.add(result['section'])
|
|
||||||
|
|
||||||
def get_key_grammars(self):
|
|
||||||
return self.__class__.grammars
|
|
||||||
|
|
||||||
|
|
||||||
class Scheme(Fragment):
|
class Scheme(Fragment):
|
||||||
@ -279,20 +99,184 @@ class Scheme(Fragment):
|
|||||||
sections1 -> target1
|
sections1 -> target1
|
||||||
...
|
...
|
||||||
"""
|
"""
|
||||||
|
ENTRY = Fragment.IDENTIFIER + Suppress('->') + Fragment.IDENTIFIER + LineEnd().suppress()
|
||||||
|
|
||||||
grammars = {
|
@staticmethod
|
||||||
'entries': Fragment.KeyValue(Fragment.IDENTIFIER.setResultsName('sections') + Suppress('->') +
|
def parse_entry(toks):
|
||||||
Fragment.IDENTIFIER.setResultsName('target'), 1, None, True)
|
# section, target
|
||||||
}
|
return toks[0], toks[1]
|
||||||
|
|
||||||
def set_key_value(self, key, parse_results):
|
@staticmethod
|
||||||
if key == 'entries':
|
def parse(s, loc, toks):
|
||||||
self.entries = set()
|
this = toks[0]
|
||||||
for result in parse_results:
|
|
||||||
self.entries.add((result['sections'], result['target']))
|
|
||||||
|
|
||||||
def get_key_grammars(self):
|
name = this[0]
|
||||||
return self.__class__.grammars
|
entries = {entry for entry in this[1] if entry}
|
||||||
|
|
||||||
|
if not entries:
|
||||||
|
raise ParseFatalException(s, loc, 'Scheme entries shouldn\'t be empty')
|
||||||
|
|
||||||
|
return Scheme(name, entries)
|
||||||
|
|
||||||
|
|
||||||
|
class EntryFlag:
|
||||||
|
def __repr__(self):
|
||||||
|
return str(self.__dict__)
|
||||||
|
|
||||||
|
|
||||||
|
class Surround(EntryFlag):
|
||||||
|
"""
|
||||||
|
SURROUND(symbol)
|
||||||
|
|
||||||
|
'__symbol_start', '__symbol_end' is generated before and after
|
||||||
|
the corresponding input section description, respectively.
|
||||||
|
"""
|
||||||
|
SURROUND = (Keyword('SURROUND').suppress()
|
||||||
|
+ Suppress('(')
|
||||||
|
+ Fragment.IDENTIFIER
|
||||||
|
+ Suppress(')'))
|
||||||
|
|
||||||
|
def __init__(self, symbol: str):
|
||||||
|
self.symbol = symbol
|
||||||
|
self.pre = True
|
||||||
|
self.post = True
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if isinstance(other, Surround):
|
||||||
|
if self.symbol == other.symbol and self.pre == other.pre and self.post == other.post:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse(toks):
|
||||||
|
return Surround(toks[0])
|
||||||
|
|
||||||
|
|
||||||
|
class Align(EntryFlag):
|
||||||
|
"""
|
||||||
|
ALIGN(alignment, [, pre, post]).
|
||||||
|
|
||||||
|
Generates alignment command before and/or after the corresponding
|
||||||
|
input section description, depending on whether pre, post or
|
||||||
|
both are specified.
|
||||||
|
"""
|
||||||
|
PRE = Opt(Suppress(',') + Suppress('pre')).set_results_name('pre')
|
||||||
|
POST = Opt(Suppress(',') + Suppress('post')).set_results_name('post')
|
||||||
|
|
||||||
|
ALIGN = (Keyword('ALIGN').suppress()
|
||||||
|
+ Suppress('(')
|
||||||
|
+ Word(nums)
|
||||||
|
+ PRE
|
||||||
|
+ POST
|
||||||
|
+ Suppress(')'))
|
||||||
|
|
||||||
|
def __init__(self, alignment, pre=True, post=False):
|
||||||
|
self.alignment = alignment
|
||||||
|
self.pre = pre
|
||||||
|
self.post = post
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if isinstance(other, Align):
|
||||||
|
if self.alignment == other.alignment and self.pre == other.pre and self.post == other.post:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse(toks):
|
||||||
|
alignment = int(toks[0])
|
||||||
|
if toks.post == '':
|
||||||
|
return Align(alignment)
|
||||||
|
|
||||||
|
if toks.pre == '' and toks.post != '':
|
||||||
|
return Align(alignment, False, True)
|
||||||
|
|
||||||
|
return Align(alignment, True, True)
|
||||||
|
|
||||||
|
|
||||||
|
class Keep(EntryFlag):
|
||||||
|
"""
|
||||||
|
KEEP()
|
||||||
|
|
||||||
|
Surrounds input section description with KEEP command.
|
||||||
|
"""
|
||||||
|
KEEP = Keyword('KEEP()')
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if isinstance(other, Keep):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse():
|
||||||
|
return Keep()
|
||||||
|
|
||||||
|
|
||||||
|
class Sort(EntryFlag):
|
||||||
|
"""
|
||||||
|
SORT([sort_by_first, sort_by_second])
|
||||||
|
|
||||||
|
where sort_by_first, sort_by_second = {name, alignment, init_priority}
|
||||||
|
|
||||||
|
Emits SORT_BY_NAME, SORT_BY_ALIGNMENT or SORT_BY_INIT_PRIORITY
|
||||||
|
depending on arguments. Nested sort follows linker script rules.
|
||||||
|
"""
|
||||||
|
_keywords = Keyword('name') | Keyword('alignment') | Keyword('init_priority')
|
||||||
|
SORT = (Keyword('SORT').suppress()
|
||||||
|
+ Suppress('(')
|
||||||
|
+ _keywords.set_results_name('first')
|
||||||
|
+ Opt(Suppress(',') + _keywords.set_results_name('second'))
|
||||||
|
+ Suppress(')'))
|
||||||
|
|
||||||
|
def __init__(self, first: str, second: Optional[str] = None):
|
||||||
|
self.first = first
|
||||||
|
self.second = second
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if isinstance(other, Sort):
|
||||||
|
if self.first == other.first and self.second == other.second:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse(toks):
|
||||||
|
return Sort(toks.first, toks.second or None)
|
||||||
|
|
||||||
|
|
||||||
|
class Flag:
|
||||||
|
_section_target = Fragment.IDENTIFIER + Suppress('->') + Fragment.IDENTIFIER
|
||||||
|
_flag = (Surround.SURROUND.set_parse_action(Surround.parse)
|
||||||
|
| Align.ALIGN.set_parse_action(Align.parse)
|
||||||
|
| Keep.KEEP.set_parse_action(Keep.parse)
|
||||||
|
| Sort.SORT.set_parse_action(Sort.parse))
|
||||||
|
|
||||||
|
FLAG = _section_target + OneOrMore(_flag)
|
||||||
|
|
||||||
|
def __init__(self, section: str, target: str, flags: List[EntryFlag]):
|
||||||
|
self.section = section
|
||||||
|
self.target = target
|
||||||
|
self.flags = flags
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if isinstance(other, Flag):
|
||||||
|
if self.section == other.section and self.target == other.target and len(self.flags) == len(other.flags):
|
||||||
|
for i, j in zip(self.flags, other.flags):
|
||||||
|
if i != j:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse(toks):
|
||||||
|
return Flag(toks[0], toks[1], toks[2:])
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return str(self.__dict__)
|
||||||
|
|
||||||
|
|
||||||
class Mapping(Fragment):
|
class Mapping(Fragment):
|
||||||
@ -316,295 +300,174 @@ class Mapping(Fragment):
|
|||||||
output_commands.py.
|
output_commands.py.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class Flag():
|
_any = Literal('*')
|
||||||
PRE_POST = (Optional(Suppress(',') + Suppress('pre').setParseAction(lambda: True).setResultsName('pre')) +
|
_obj = Word(alphas + '_', alphanums + '-_').set_results_name('object')
|
||||||
Optional(Suppress(',') + Suppress('post').setParseAction(lambda: True).setResultsName('post')))
|
_sym = Fragment.IDENTIFIER.set_results_name('symbol')
|
||||||
|
|
||||||
class Surround(Flag):
|
# There are three possible patterns for mapping entries:
|
||||||
def __init__(self, symbol):
|
# obj:symbol (scheme)
|
||||||
self.symbol = symbol
|
# obj (scheme)
|
||||||
self.pre = True
|
# * (scheme)
|
||||||
self.post = True
|
_entry = (((_obj + Opt(Suppress(':') + _sym)) | _any.set_results_name('object'))
|
||||||
|
+ Suppress('(')
|
||||||
|
+ Fragment.IDENTIFIER.set_results_name('section')
|
||||||
|
+ Suppress(')'))
|
||||||
|
|
||||||
@staticmethod
|
ENTRY = _entry + LineEnd().suppress()
|
||||||
def get_grammar():
|
ARCHIVE = (Word(alphanums + '.-_$+') | Literal('*')) + LineEnd().suppress()
|
||||||
# SURROUND(symbol)
|
|
||||||
#
|
|
||||||
# '__symbol_start', '__symbol_end' is generated before and after
|
|
||||||
# the corresponding input section description, respectively.
|
|
||||||
grammar = (Keyword('SURROUND').suppress() +
|
|
||||||
Suppress('(') +
|
|
||||||
Fragment.IDENTIFIER.setResultsName('symbol') +
|
|
||||||
Suppress(')'))
|
|
||||||
|
|
||||||
grammar.setParseAction(lambda tok: Mapping.Surround(tok.symbol))
|
# Flags can be specified for section->target in the scheme specified, ex:
|
||||||
return grammar
|
# obj (scheme);
|
||||||
|
# section->target SURROUND(symbol),
|
||||||
|
# section2->target2 ALIGN(4)
|
||||||
|
ENTRY_WITH_FLAG = (_entry + Suppress(';')
|
||||||
|
+ delimited_list(Flag.FLAG.set_parse_action(Flag.parse)))
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __init__(self, archive: str, flags: Dict[Any, Flag], *args, **kwargs):
|
||||||
return (isinstance(other, Mapping.Surround) and
|
super().__init__(*args, **kwargs)
|
||||||
self.symbol == other.symbol)
|
self.archive = archive
|
||||||
|
self.flags = flags
|
||||||
class Align(Flag):
|
|
||||||
|
|
||||||
def __init__(self, alignment, pre=True, post=False):
|
|
||||||
self.alignment = alignment
|
|
||||||
self.pre = pre
|
|
||||||
self.post = post
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_grammar():
|
|
||||||
# ALIGN(alignment, [, pre, post]).
|
|
||||||
#
|
|
||||||
# Generates alignment command before and/or after the corresponding
|
|
||||||
# input section description, depending whether pre, post or
|
|
||||||
# both are specified.
|
|
||||||
grammar = (Keyword('ALIGN').suppress() +
|
|
||||||
Suppress('(') +
|
|
||||||
Word(nums).setResultsName('alignment') +
|
|
||||||
Mapping.Flag.PRE_POST +
|
|
||||||
Suppress(')'))
|
|
||||||
|
|
||||||
def on_parse(tok):
|
|
||||||
alignment = int(tok.alignment)
|
|
||||||
if tok.pre == '' and tok.post == '':
|
|
||||||
res = Mapping.Align(alignment)
|
|
||||||
elif tok.pre != '' and tok.post == '':
|
|
||||||
res = Mapping.Align(alignment, tok.pre)
|
|
||||||
elif tok.pre == '' and tok.post != '':
|
|
||||||
res = Mapping.Align(alignment, False, tok.post)
|
|
||||||
else:
|
|
||||||
res = Mapping.Align(alignment, tok.pre, tok.post)
|
|
||||||
return res
|
|
||||||
|
|
||||||
grammar.setParseAction(on_parse)
|
|
||||||
return grammar
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
|
||||||
return (isinstance(other, Mapping.Align) and
|
|
||||||
self.alignment == other.alignment and
|
|
||||||
self.pre == other.pre and
|
|
||||||
self.post == other.post)
|
|
||||||
|
|
||||||
class Keep(Flag):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_grammar():
|
|
||||||
# KEEP()
|
|
||||||
#
|
|
||||||
# Surrounds input section description with KEEP command.
|
|
||||||
grammar = Keyword('KEEP()').setParseAction(Mapping.Keep)
|
|
||||||
return grammar
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
|
||||||
return isinstance(other, Mapping.Keep)
|
|
||||||
|
|
||||||
class Sort(Flag):
|
|
||||||
class Type(Enum):
|
|
||||||
NAME = 0
|
|
||||||
ALIGNMENT = 1
|
|
||||||
INIT_PRIORITY = 2
|
|
||||||
|
|
||||||
def __init__(self, first, second=None):
|
|
||||||
self.first = first
|
|
||||||
self.second = second
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_grammar():
|
|
||||||
# SORT([sort_by_first, sort_by_second])
|
|
||||||
#
|
|
||||||
# where sort_by_first, sort_by_second = {name, alignment, init_priority}
|
|
||||||
#
|
|
||||||
# Emits SORT_BY_NAME, SORT_BY_ALIGNMENT or SORT_BY_INIT_PRIORITY
|
|
||||||
# depending on arguments. Nested sort follows linker script rules.
|
|
||||||
keywords = Keyword('name') | Keyword('alignment') | Keyword('init_priority')
|
|
||||||
grammar = (Keyword('SORT').suppress() + Suppress('(') +
|
|
||||||
keywords.setResultsName('first') +
|
|
||||||
Optional(Suppress(',') + keywords.setResultsName('second')) + Suppress(')'))
|
|
||||||
|
|
||||||
grammar.setParseAction(lambda tok: Mapping.Sort(tok.first, tok.second if tok.second != '' else None))
|
|
||||||
return grammar
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
|
||||||
return (isinstance(other, Mapping.Sort) and
|
|
||||||
self.first == other.first and
|
|
||||||
self.second == other.second)
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
Fragment.__init__(self)
|
|
||||||
self.entries = set()
|
|
||||||
# k = (obj, symbol, scheme)
|
|
||||||
# v = list((section, target), Mapping.Flag))
|
|
||||||
self.flags = dict()
|
|
||||||
self.deprecated = False
|
|
||||||
|
|
||||||
def set_key_value(self, key, parse_results):
|
|
||||||
if key == 'archive':
|
|
||||||
self.archive = parse_results[0]['archive']
|
|
||||||
elif key == 'entries':
|
|
||||||
for result in parse_results:
|
|
||||||
obj = None
|
|
||||||
symbol = None
|
|
||||||
scheme = None
|
|
||||||
|
|
||||||
obj = result['object']
|
|
||||||
|
|
||||||
try:
|
|
||||||
symbol = result['symbol']
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
scheme = result['scheme']
|
|
||||||
|
|
||||||
mapping = (obj, symbol, scheme)
|
|
||||||
self.entries.add(mapping)
|
|
||||||
|
|
||||||
try:
|
|
||||||
parsed_flags = result['sections_target_flags']
|
|
||||||
except KeyError:
|
|
||||||
parsed_flags = []
|
|
||||||
|
|
||||||
if parsed_flags:
|
|
||||||
entry_flags = []
|
|
||||||
for pf in parsed_flags:
|
|
||||||
entry_flags.append((pf.sections, pf.target, list(pf.flags)))
|
|
||||||
|
|
||||||
try:
|
|
||||||
existing_flags = self.flags[mapping]
|
|
||||||
except KeyError:
|
|
||||||
existing_flags = list()
|
|
||||||
self.flags[mapping] = existing_flags
|
|
||||||
|
|
||||||
existing_flags.extend(entry_flags)
|
|
||||||
|
|
||||||
def get_key_grammars(self):
|
|
||||||
# There are three possible patterns for mapping entries:
|
|
||||||
# obj:symbol (scheme)
|
|
||||||
# obj (scheme)
|
|
||||||
# * (scheme)
|
|
||||||
# Flags can be specified for section->target in the scheme specified, ex:
|
|
||||||
# obj (scheme); section->target SURROUND(symbol), section2->target2 ALIGN(4)
|
|
||||||
obj = Fragment.ENTITY.setResultsName('object')
|
|
||||||
symbol = Suppress(':') + Fragment.IDENTIFIER.setResultsName('symbol')
|
|
||||||
scheme = Suppress('(') + Fragment.IDENTIFIER.setResultsName('scheme') + Suppress(')')
|
|
||||||
|
|
||||||
# The flags are specified for section->target in the scheme specified
|
|
||||||
sections_target = Scheme.grammars['entries'].grammar
|
|
||||||
|
|
||||||
flag = Or([f.get_grammar() for f in [Mapping.Keep, Mapping.Align, Mapping.Surround, Mapping.Sort]])
|
|
||||||
|
|
||||||
section_target_flags = Group(sections_target + Group(OneOrMore(flag)).setResultsName('flags'))
|
|
||||||
|
|
||||||
pattern1 = obj + symbol
|
|
||||||
pattern2 = obj
|
|
||||||
pattern3 = Literal(Entity.ALL).setResultsName('object')
|
|
||||||
|
|
||||||
entry = ((pattern1 | pattern2 | pattern3) + scheme +
|
|
||||||
Optional(Suppress(';') + delimitedList(section_target_flags).setResultsName('sections_target_flags')))
|
|
||||||
|
|
||||||
grammars = {
|
|
||||||
'archive': Fragment.KeyValue(Or([Fragment.ENTITY, Word(Entity.ALL)]).setResultsName('archive'), 1, 1,
|
|
||||||
True),
|
|
||||||
'entries': Fragment.KeyValue(entry, 0, None, True)
|
|
||||||
}
|
|
||||||
|
|
||||||
return grammars
|
|
||||||
|
|
||||||
|
|
||||||
class DeprecatedMapping():
|
|
||||||
"""
|
|
||||||
Mapping fragment with old grammar in versions older than ESP-IDF v4.0. Does not conform to
|
|
||||||
requirements of the Fragment class and thus is limited when it comes to conditional expression
|
|
||||||
evaluation.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Name of the default condition entry
|
|
||||||
DEFAULT_CONDITION = 'default'
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_fragment_grammar(sdkconfig, fragment_file):
|
def parse_archive(s, loc, toks):
|
||||||
|
this = toks[0][0]
|
||||||
|
if len(this) != 1:
|
||||||
|
raise ParseFatalException(s, loc, 'Could only specify one archive file in one mapping fragment')
|
||||||
|
|
||||||
# Match header [mapping]
|
return this[0]
|
||||||
header = Suppress('[') + Suppress('mapping') + Suppress(']')
|
|
||||||
|
|
||||||
# There are three possible patterns for mapping entries:
|
@staticmethod
|
||||||
# obj:symbol (scheme)
|
def parse_entry(toks):
|
||||||
# obj (scheme)
|
return toks.object, toks.symbol or None, toks.section
|
||||||
# * (scheme)
|
|
||||||
obj = Fragment.ENTITY.setResultsName('object')
|
|
||||||
symbol = Suppress(':') + Fragment.IDENTIFIER.setResultsName('symbol')
|
|
||||||
scheme = Suppress('(') + Fragment.IDENTIFIER.setResultsName('scheme') + Suppress(')')
|
|
||||||
|
|
||||||
pattern1 = Group(obj + symbol + scheme)
|
@staticmethod
|
||||||
pattern2 = Group(obj + scheme)
|
def parse_entry_with_flag(toks):
|
||||||
pattern3 = Group(Literal(Entity.ALL).setResultsName('object') + scheme)
|
entry = toks.object, toks.symbol or None, toks.section
|
||||||
|
return {
|
||||||
|
entry: [tok for tok in toks if isinstance(tok, Flag)]
|
||||||
|
}
|
||||||
|
|
||||||
mapping_entry = pattern1 | pattern2 | pattern3
|
@staticmethod
|
||||||
|
def parse_entries(toks):
|
||||||
|
return toks[0]
|
||||||
|
|
||||||
# To simplify parsing, classify groups of condition-mapping entry into two types: normal and default
|
@staticmethod
|
||||||
# A normal grouping is one with a non-default condition. The default grouping is one which contains the
|
def parse(toks):
|
||||||
# default condition
|
this = toks[0]
|
||||||
mapping_entries = Group(ZeroOrMore(mapping_entry)).setResultsName('mappings')
|
|
||||||
|
|
||||||
normal_condition = Suppress(':') + originalTextFor(SDKConfig.get_expression_grammar())
|
name = this[0]
|
||||||
default_condition = Optional(Suppress(':') + Literal(DeprecatedMapping.DEFAULT_CONDITION))
|
archive = this[1]
|
||||||
|
entries_or_dict_with_flags = this[2]
|
||||||
|
|
||||||
normal_group = Group(normal_condition.setResultsName('condition') + mapping_entries)
|
entries = set()
|
||||||
default_group = Group(default_condition + mapping_entries).setResultsName('default_group')
|
flags = dict()
|
||||||
|
for item in entries_or_dict_with_flags:
|
||||||
|
if isinstance(item, Empty):
|
||||||
|
continue
|
||||||
|
elif isinstance(item, dict): # entry with flags
|
||||||
|
for k, v in item.items():
|
||||||
|
entries.add(k)
|
||||||
|
if k in flags:
|
||||||
|
flags[k].extend(v)
|
||||||
|
else:
|
||||||
|
flags[k] = v
|
||||||
|
else:
|
||||||
|
entries.add(item)
|
||||||
|
|
||||||
normal_groups = Group(ZeroOrMore(normal_group)).setResultsName('normal_groups')
|
return Mapping(archive=archive, name=name, entries=entries, flags=flags)
|
||||||
|
|
||||||
# Any mapping fragment definition can have zero or more normal group and only one default group as a last entry.
|
|
||||||
archive = Suppress('archive') + Suppress(':') + Fragment.ENTITY.setResultsName('archive')
|
|
||||||
entries = Suppress('entries') + Suppress(':') + (normal_groups + default_group).setResultsName('entries')
|
|
||||||
|
|
||||||
mapping = Group(header + archive + entries)
|
|
||||||
mapping.ignore('#' + restOfLine)
|
|
||||||
|
|
||||||
def parsed_deprecated_mapping(pstr, loc, toks):
|
|
||||||
fragment = Mapping()
|
|
||||||
fragment.archive = toks[0].archive
|
|
||||||
fragment.name = re.sub(r'[^0-9a-zA-Z]+', '_', fragment.archive)
|
|
||||||
fragment.deprecated = True
|
|
||||||
|
|
||||||
fragment.entries = set()
|
|
||||||
condition_true = False
|
|
||||||
for entries in toks[0].entries[0]:
|
|
||||||
condition = next(iter(entries.condition.asList())).strip()
|
|
||||||
condition_val = sdkconfig.evaluate_expression(condition)
|
|
||||||
|
|
||||||
if condition_val:
|
|
||||||
for entry in entries[1]:
|
|
||||||
fragment.entries.add(
|
|
||||||
(entry.object, None if entry.symbol == '' else entry.symbol, entry.scheme))
|
|
||||||
condition_true = True
|
|
||||||
break
|
|
||||||
|
|
||||||
if not fragment.entries and not condition_true:
|
|
||||||
try:
|
|
||||||
entries = toks[0].entries[1][1]
|
|
||||||
except IndexError:
|
|
||||||
entries = toks[0].entries[1][0]
|
|
||||||
for entry in entries:
|
|
||||||
fragment.entries.add((entry.object, None if entry.symbol == '' else entry.symbol, entry.scheme))
|
|
||||||
|
|
||||||
if not fragment.entries:
|
|
||||||
fragment.entries.add(('*', None, 'default'))
|
|
||||||
|
|
||||||
dep_warning = str(ParseFatalException(pstr, loc,
|
|
||||||
'Warning: Deprecated old-style mapping fragment parsed in file %s.' % fragment_file))
|
|
||||||
|
|
||||||
print(dep_warning)
|
|
||||||
return fragment
|
|
||||||
|
|
||||||
mapping.setParseAction(parsed_deprecated_mapping)
|
|
||||||
return mapping
|
|
||||||
|
|
||||||
|
|
||||||
FRAGMENT_TYPES = {
|
class FragmentFile:
|
||||||
'sections': Sections,
|
"""
|
||||||
'scheme': Scheme,
|
Processes a fragment file and stores all parsed fragments. For
|
||||||
'mapping': Mapping
|
more information on how this class interacts with classes for the different fragment types,
|
||||||
}
|
see description of Fragment.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, fragments: List[Fragment]):
|
||||||
|
self.path = None # assign later, couldn't pass extra argument while parsing
|
||||||
|
self.fragments: List[Fragment] = fragments
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return str(self.__dict__)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_fragment_file(path, sdkconfig):
|
||||||
|
def parse_conditional(toks):
|
||||||
|
this = toks[0]
|
||||||
|
for stmt in this:
|
||||||
|
if stmt[0] in ['if', 'elif']: # if/elif
|
||||||
|
if sdkconfig.evaluate_expression(stmt.condition):
|
||||||
|
return stmt[-1]
|
||||||
|
else: # else
|
||||||
|
return stmt[-1]
|
||||||
|
|
||||||
|
return Empty()
|
||||||
|
|
||||||
|
def get_conditional_stmt(_stmt):
|
||||||
|
condition = SkipTo(':').set_results_name('condition') + Suppress(':')
|
||||||
|
_suite = IndentedBlock(_stmt)
|
||||||
|
|
||||||
|
if_decl = Literal('if') + condition
|
||||||
|
elif_decl = Literal('elif') + condition
|
||||||
|
else_decl = Literal('else:')
|
||||||
|
if_ = Group(if_decl + _suite)
|
||||||
|
elif_ = Group(elif_decl + _suite)
|
||||||
|
else_ = Group(else_decl + _suite)
|
||||||
|
return Group(if_ + Opt(OneOrMore(elif_)) + Opt(else_)).set_parse_action(parse_conditional)
|
||||||
|
|
||||||
|
def get_suite(_stmt):
|
||||||
|
__stmt = Forward()
|
||||||
|
__conditional = get_conditional_stmt(__stmt)
|
||||||
|
__stmt <<= (comment
|
||||||
|
| _stmt
|
||||||
|
| __conditional)
|
||||||
|
return IndentedBlock(__stmt)
|
||||||
|
|
||||||
|
def parse(toks):
|
||||||
|
return FragmentFile([tok for tok in toks if not isinstance(tok, Empty)])
|
||||||
|
|
||||||
|
# comment
|
||||||
|
comment = (Literal('#') + rest_of_line).set_parse_action(lambda s, l, t: Empty())
|
||||||
|
|
||||||
|
# section
|
||||||
|
section_entry = Sections.ENTRY.set_parse_action(Sections.parse_entry)
|
||||||
|
section_entries_suite = get_suite(section_entry)
|
||||||
|
section_header = Suppress('[sections:') + Fragment.IDENTIFIER + Suppress(']') + LineEnd().suppress()
|
||||||
|
section = Group(section_header
|
||||||
|
+ Suppress('entries:')
|
||||||
|
+ section_entries_suite).set_parse_action(Sections.parse)
|
||||||
|
|
||||||
|
# scheme
|
||||||
|
scheme_entry = Scheme.ENTRY.set_parse_action(Scheme.parse_entry)
|
||||||
|
scheme_entries_suite = get_suite(scheme_entry)
|
||||||
|
scheme_header = Suppress('[scheme:') + Fragment.IDENTIFIER + Suppress(']') + LineEnd().suppress()
|
||||||
|
scheme = Group(scheme_header
|
||||||
|
+ Suppress('entries:')
|
||||||
|
+ scheme_entries_suite).set_parse_action(Scheme.parse)
|
||||||
|
# mapping
|
||||||
|
mapping_archive = Mapping.ARCHIVE
|
||||||
|
mapping_archive_suite = get_suite(mapping_archive)
|
||||||
|
|
||||||
|
mapping_entry = Mapping.ENTRY.set_parse_action(Mapping.parse_entry)
|
||||||
|
mapping_entry_with_flag = Mapping.ENTRY_WITH_FLAG.set_parse_action(Mapping.parse_entry_with_flag)
|
||||||
|
mapping_entries_suite = get_suite(mapping_entry | mapping_entry_with_flag)
|
||||||
|
|
||||||
|
mapping_header = Suppress('[mapping:') + Fragment.IDENTIFIER + Suppress(']')
|
||||||
|
mapping = Group(mapping_header
|
||||||
|
+ Group(Suppress('archive:')
|
||||||
|
+ mapping_archive_suite).set_parse_action(Mapping.parse_archive)
|
||||||
|
+ Group(Suppress('entries:')
|
||||||
|
+ mapping_entries_suite).set_parse_action(Mapping.parse_entries)
|
||||||
|
).set_parse_action(Mapping.parse)
|
||||||
|
|
||||||
|
# highest level
|
||||||
|
fragment = (section
|
||||||
|
| scheme
|
||||||
|
| mapping
|
||||||
|
| get_conditional_stmt(section | scheme | mapping))
|
||||||
|
parser = ZeroOrMore(fragment).ignore(comment).set_parse_action(parse)
|
||||||
|
fragment_file = parser.parse_file(path, parse_all=True)[0]
|
||||||
|
fragment_file.path = path
|
||||||
|
|
||||||
|
return fragment_file
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -9,7 +9,7 @@ import itertools
|
|||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
from .entity import Entity
|
from .entity import Entity
|
||||||
from .fragments import Mapping, Scheme, Sections
|
from .fragments import Keep, Scheme, Sections, Sort, Surround
|
||||||
from .ldgen_common import LdGenFailure
|
from .ldgen_common import LdGenFailure
|
||||||
from .output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
|
from .output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
|
||||||
|
|
||||||
@ -97,7 +97,7 @@ class Placement:
|
|||||||
self.subplacements.add(subplacement)
|
self.subplacements.add(subplacement)
|
||||||
|
|
||||||
|
|
||||||
class EntityNode():
|
class EntityNode:
|
||||||
"""
|
"""
|
||||||
Node in entity tree. An EntityNode
|
Node in entity tree. An EntityNode
|
||||||
is created from an Entity (see entity.py).
|
is created from an Entity (see entity.py).
|
||||||
@ -183,16 +183,16 @@ class EntityNode():
|
|||||||
placement_flags = placement.flags if placement.flags is not None else []
|
placement_flags = placement.flags if placement.flags is not None else []
|
||||||
|
|
||||||
for flag in placement_flags:
|
for flag in placement_flags:
|
||||||
if isinstance(flag, Mapping.Keep):
|
if isinstance(flag, Keep):
|
||||||
keep = True
|
keep = True
|
||||||
elif isinstance(flag, Mapping.Sort):
|
elif isinstance(flag, Sort):
|
||||||
sort = (flag.first, flag.second)
|
sort = (flag.first, flag.second)
|
||||||
else: # SURROUND or ALIGN
|
else: # SURROUND or ALIGN
|
||||||
surround_type.append(flag)
|
surround_type.append(flag)
|
||||||
|
|
||||||
for flag in surround_type:
|
for flag in surround_type:
|
||||||
if flag.pre:
|
if flag.pre:
|
||||||
if isinstance(flag, Mapping.Surround):
|
if isinstance(flag, Surround):
|
||||||
commands[placement.target].append(SymbolAtAddress('_%s_start' % flag.symbol))
|
commands[placement.target].append(SymbolAtAddress('_%s_start' % flag.symbol))
|
||||||
else: # ALIGN
|
else: # ALIGN
|
||||||
commands[placement.target].append(AlignAtAddress(flag.alignment))
|
commands[placement.target].append(AlignAtAddress(flag.alignment))
|
||||||
@ -206,8 +206,8 @@ class EntityNode():
|
|||||||
[e.node.entity for e in placement.exclusions], keep, sort)
|
[e.node.entity for e in placement.exclusions], keep, sort)
|
||||||
commands[placement.target].append(command)
|
commands[placement.target].append(command)
|
||||||
|
|
||||||
# Generate commands for intermediate, non-explicit exclusion placements here, so that they can be enclosed by
|
# Generate commands for intermediate, non-explicit exclusion placements here,
|
||||||
# flags that affect the parent placement.
|
# so that they can be enclosed by flags that affect the parent placement.
|
||||||
for subplacement in placement.subplacements:
|
for subplacement in placement.subplacements:
|
||||||
if not subplacement.flags and not subplacement.explicit:
|
if not subplacement.flags and not subplacement.explicit:
|
||||||
command = InputSectionDesc(subplacement.node.entity, subplacement.sections,
|
command = InputSectionDesc(subplacement.node.entity, subplacement.sections,
|
||||||
@ -216,7 +216,7 @@ class EntityNode():
|
|||||||
|
|
||||||
for flag in surround_type:
|
for flag in surround_type:
|
||||||
if flag.post:
|
if flag.post:
|
||||||
if isinstance(flag, Mapping.Surround):
|
if isinstance(flag, Surround):
|
||||||
commands[placement.target].append(SymbolAtAddress('_%s_end' % flag.symbol))
|
commands[placement.target].append(SymbolAtAddress('_%s_end' % flag.symbol))
|
||||||
else: # ALIGN
|
else: # ALIGN
|
||||||
commands[placement.target].append(AlignAtAddress(flag.alignment))
|
commands[placement.target].append(AlignAtAddress(flag.alignment))
|
||||||
@ -387,7 +387,7 @@ class Generation:
|
|||||||
|
|
||||||
for (sections_name, target_name) in scheme.entries:
|
for (sections_name, target_name) in scheme.entries:
|
||||||
# Get the sections under the bucket 'target_name'. If this bucket does not exist
|
# Get the sections under the bucket 'target_name'. If this bucket does not exist
|
||||||
# is is created automatically
|
# is created automatically
|
||||||
sections_in_bucket = sections_bucket[target_name]
|
sections_in_bucket = sections_bucket[target_name]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -449,10 +449,11 @@ class Generation:
|
|||||||
flags = mapping.flags[(obj, symbol, scheme_name)]
|
flags = mapping.flags[(obj, symbol, scheme_name)]
|
||||||
# Check if all section->target defined in the current
|
# Check if all section->target defined in the current
|
||||||
# scheme.
|
# scheme.
|
||||||
for (s, t, f) in flags:
|
for flag in flags:
|
||||||
if (t not in scheme_dictionary[scheme_name].keys()
|
if (flag.target not in scheme_dictionary[scheme_name].keys()
|
||||||
or s not in [_s.name for _s in scheme_dictionary[scheme_name][t]]):
|
or flag.section not in
|
||||||
message = "%s->%s not defined in scheme '%s'" % (s, t, scheme_name)
|
[_s.name for _s in scheme_dictionary[scheme_name][flag.target]]):
|
||||||
|
message = "%s->%s not defined in scheme '%s'" % (flag.section, flag.target, scheme_name)
|
||||||
raise GenerationException(message, mapping)
|
raise GenerationException(message, mapping)
|
||||||
else:
|
else:
|
||||||
flags = None
|
flags = None
|
||||||
@ -464,9 +465,9 @@ class Generation:
|
|||||||
_flags = []
|
_flags = []
|
||||||
|
|
||||||
if flags:
|
if flags:
|
||||||
for (s, t, f) in flags:
|
for flag in flags:
|
||||||
if (s, t) == (section.name, target):
|
if (flag.section, flag.target) == (section.name, target):
|
||||||
_flags.extend(f)
|
_flags.extend(flag.flags)
|
||||||
|
|
||||||
sections_str = get_section_strs(section)
|
sections_str = get_section_strs(section)
|
||||||
|
|
||||||
@ -481,18 +482,18 @@ class Generation:
|
|||||||
entity_mappings[key] = Generation.EntityMapping(entity, sections_str, target, _flags)
|
entity_mappings[key] = Generation.EntityMapping(entity, sections_str, target, _flags)
|
||||||
else:
|
else:
|
||||||
# Check for conflicts.
|
# Check for conflicts.
|
||||||
if (target != existing.target):
|
if target != existing.target:
|
||||||
raise GenerationException('Sections mapped to multiple targets.', mapping)
|
raise GenerationException('Sections mapped to multiple targets.', mapping)
|
||||||
|
|
||||||
# Combine flags here if applicable, to simplify
|
# Combine flags here if applicable, to simplify
|
||||||
# insertion logic.
|
# insertion logic.
|
||||||
if (_flags or existing.flags):
|
if _flags or existing.flags:
|
||||||
if ((_flags and not existing.flags) or (not _flags and existing.flags)):
|
if (_flags and not existing.flags) or (not _flags and existing.flags):
|
||||||
_flags.extend(existing.flags)
|
_flags.extend(existing.flags)
|
||||||
entity_mappings[key] = Generation.EntityMapping(entity,
|
entity_mappings[key] = Generation.EntityMapping(entity,
|
||||||
sections_str,
|
sections_str,
|
||||||
target, _flags)
|
target, _flags)
|
||||||
elif (_flags == existing.flags):
|
elif _flags == existing.flags:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
raise GenerationException('Conflicting flags specified.', mapping)
|
raise GenerationException('Conflicting flags specified.', mapping)
|
||||||
@ -521,25 +522,22 @@ class Generation:
|
|||||||
|
|
||||||
def add_fragments_from_file(self, fragment_file):
|
def add_fragments_from_file(self, fragment_file):
|
||||||
for fragment in fragment_file.fragments:
|
for fragment in fragment_file.fragments:
|
||||||
if isinstance(fragment, Mapping) and fragment.deprecated and fragment.name in self.mappings.keys():
|
if isinstance(fragment, Scheme):
|
||||||
self.mappings[fragment.name].entries |= fragment.entries
|
dict_to_append_to = self.schemes
|
||||||
|
elif isinstance(fragment, Sections):
|
||||||
|
dict_to_append_to = self.placements
|
||||||
else:
|
else:
|
||||||
if isinstance(fragment, Scheme):
|
dict_to_append_to = self.mappings
|
||||||
dict_to_append_to = self.schemes
|
|
||||||
elif isinstance(fragment, Sections):
|
|
||||||
dict_to_append_to = self.placements
|
|
||||||
else:
|
|
||||||
dict_to_append_to = self.mappings
|
|
||||||
|
|
||||||
# Raise exception when the fragment of the same type is already in the stored fragments
|
# Raise exception when the fragment of the same type is already in the stored fragments
|
||||||
if fragment.name in dict_to_append_to.keys():
|
if fragment.name in dict_to_append_to:
|
||||||
stored = dict_to_append_to[fragment.name].path
|
stored = dict_to_append_to[fragment.name].path
|
||||||
new = fragment.path
|
new = fragment.path
|
||||||
message = "Duplicate definition of fragment '%s' found in %s and %s." % (
|
message = "Duplicate definition of fragment '%s' found in %s and %s." % (
|
||||||
fragment.name, stored, new)
|
fragment.name, stored, new)
|
||||||
raise GenerationException(message)
|
raise GenerationException(message)
|
||||||
|
|
||||||
dict_to_append_to[fragment.name] = fragment
|
dict_to_append_to[fragment.name] = fragment
|
||||||
|
|
||||||
|
|
||||||
class GenerationException(LdGenFailure):
|
class GenerationException(LdGenFailure):
|
||||||
|
@ -1,13 +1,9 @@
|
|||||||
#
|
#
|
||||||
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
class LdGenFailure(RuntimeError):
|
class LdGenFailure(RuntimeError):
|
||||||
"""
|
"""
|
||||||
Parent class for any ldgen runtime failure which is due to input data
|
Parent class for any ldgen runtime failure which is due to input data
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, message):
|
|
||||||
super(LdGenFailure, self).__init__(message)
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -33,24 +33,21 @@ class LinkerScript:
|
|||||||
lines = template_file.readlines()
|
lines = template_file.readlines()
|
||||||
|
|
||||||
target = Fragment.IDENTIFIER
|
target = Fragment.IDENTIFIER
|
||||||
reference = Suppress('mapping') + Suppress('[') + target.setResultsName('target') + Suppress(']')
|
reference = Suppress('mapping') + Suppress('[') + target + Suppress(']')
|
||||||
pattern = White(' \t').setResultsName('indent') + reference
|
pattern = White(' \t') + reference
|
||||||
|
|
||||||
# Find the markers in the template file line by line. If line does not match marker grammar,
|
# Find the markers in the template file line by line. If line does not match marker grammar,
|
||||||
# set it as a literal to be copied as is to the output file.
|
# set it as a literal to be copied as is to the output file.
|
||||||
for line in lines:
|
for line in lines:
|
||||||
try:
|
try:
|
||||||
parsed = pattern.parseString(line)
|
parsed = pattern.parse_string(line)
|
||||||
|
|
||||||
indent = parsed.indent
|
|
||||||
target = parsed.target
|
|
||||||
|
|
||||||
marker = LinkerScript.Marker(target, indent, [])
|
|
||||||
|
|
||||||
self.members.append(marker)
|
|
||||||
except ParseException:
|
except ParseException:
|
||||||
# Does not match marker syntax
|
# Does not match marker syntax
|
||||||
self.members.append(line)
|
self.members.append(line)
|
||||||
|
else:
|
||||||
|
indent, target = parsed
|
||||||
|
marker = LinkerScript.Marker(target, indent, [])
|
||||||
|
self.members.append(marker)
|
||||||
|
|
||||||
def fill(self, mapping_rules):
|
def fill(self, mapping_rules):
|
||||||
for member in self.members:
|
for member in self.members:
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -9,7 +9,7 @@ from .entity import Entity
|
|||||||
# https://www.acrc.bris.ac.uk/acrc/RedHat/rhel-ld-en-4/sections.html#OUTPUT-SECTION-DESCRIPTION.
|
# https://www.acrc.bris.ac.uk/acrc/RedHat/rhel-ld-en-4/sections.html#OUTPUT-SECTION-DESCRIPTION.
|
||||||
|
|
||||||
|
|
||||||
class AlignAtAddress():
|
class AlignAtAddress:
|
||||||
"""
|
"""
|
||||||
Outputs assignment of builtin function ALIGN to current
|
Outputs assignment of builtin function ALIGN to current
|
||||||
position:
|
position:
|
||||||
@ -31,7 +31,7 @@ class AlignAtAddress():
|
|||||||
self.alignment == other.alignment)
|
self.alignment == other.alignment)
|
||||||
|
|
||||||
|
|
||||||
class SymbolAtAddress():
|
class SymbolAtAddress:
|
||||||
"""
|
"""
|
||||||
Outputs assignment of builtin function ABSOLUTE to a symbol
|
Outputs assignment of builtin function ABSOLUTE to a symbol
|
||||||
for current position:
|
for current position:
|
||||||
@ -54,7 +54,7 @@ class SymbolAtAddress():
|
|||||||
self.symbol == other.symbol)
|
self.symbol == other.symbol)
|
||||||
|
|
||||||
|
|
||||||
class InputSectionDesc():
|
class InputSectionDesc:
|
||||||
"""
|
"""
|
||||||
Outputs an input section description as described in
|
Outputs an input section description as described in
|
||||||
https://www.acrc.bris.ac.uk/acrc/RedHat/rhel-ld-en-4/sections.html#INPUT-SECTION.
|
https://www.acrc.bris.ac.uk/acrc/RedHat/rhel-ld-en-4/sections.html#INPUT-SECTION.
|
||||||
|
@ -1,35 +1,19 @@
|
|||||||
#
|
#
|
||||||
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
import kconfiglib
|
import kconfiglib
|
||||||
from pyparsing import (Combine, Group, Literal, Optional, Word, alphanums, hexnums, infixNotation, nums, oneOf,
|
|
||||||
opAssoc, printables, quotedString, removeQuotes)
|
|
||||||
|
|
||||||
|
|
||||||
class SDKConfig:
|
class SDKConfig:
|
||||||
"""
|
"""
|
||||||
Evaluates conditional expressions based on the build's sdkconfig and Kconfig files.
|
Evaluates conditional expressions based on the build's sdkconfig and Kconfig files.
|
||||||
This also defines the grammar of conditional expressions.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# A configuration entry is in the form CONFIG=VALUE. Definitions of components of that grammar
|
|
||||||
IDENTIFIER = Word(alphanums.upper() + '_')
|
|
||||||
|
|
||||||
HEX = Combine('0x' + Word(hexnums)).setParseAction(lambda t: int(t[0], 16))
|
|
||||||
DECIMAL = Combine(Optional(Literal('+') | Literal('-')) + Word(nums)).setParseAction(lambda t: int(t[0]))
|
|
||||||
LITERAL = Word(printables.replace(':', ''))
|
|
||||||
QUOTED_LITERAL = quotedString.setParseAction(removeQuotes)
|
|
||||||
|
|
||||||
VALUE = HEX | DECIMAL | LITERAL | QUOTED_LITERAL
|
|
||||||
|
|
||||||
# Operators supported by the expression evaluation
|
|
||||||
OPERATOR = oneOf(['=', '!=', '>', '<', '<=', '>='])
|
|
||||||
|
|
||||||
def __init__(self, kconfig_file, sdkconfig_file):
|
def __init__(self, kconfig_file, sdkconfig_file):
|
||||||
self.config = kconfiglib.Kconfig(kconfig_file)
|
self.config = kconfiglib.Kconfig(kconfig_file)
|
||||||
self.config.load_config(sdkconfig_file)
|
self.config.load_config(sdkconfig_file)
|
||||||
|
self.config.warn = False # eval_string may contain un-declared symbol
|
||||||
|
|
||||||
def evaluate_expression(self, expression):
|
def evaluate_expression(self, expression):
|
||||||
result = self.config.eval_string(expression)
|
result = self.config.eval_string(expression)
|
||||||
@ -40,23 +24,3 @@ class SDKConfig:
|
|||||||
return True
|
return True
|
||||||
else: # m
|
else: # m
|
||||||
raise Exception('unsupported config expression result')
|
raise Exception('unsupported config expression result')
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_expression_grammar():
|
|
||||||
identifier = SDKConfig.IDENTIFIER.setResultsName('identifier')
|
|
||||||
operator = SDKConfig.OPERATOR.setResultsName('operator')
|
|
||||||
value = SDKConfig.VALUE.setResultsName('value')
|
|
||||||
|
|
||||||
test_binary = identifier + operator + value
|
|
||||||
test_single = identifier
|
|
||||||
|
|
||||||
test = test_binary | test_single
|
|
||||||
|
|
||||||
condition = Group(Optional('(').suppress() + test + Optional(')').suppress())
|
|
||||||
|
|
||||||
grammar = infixNotation(condition, [
|
|
||||||
('!', 1, opAssoc.RIGHT),
|
|
||||||
('&&', 2, opAssoc.LEFT),
|
|
||||||
('||', 2, opAssoc.LEFT)])
|
|
||||||
|
|
||||||
return grammar
|
|
||||||
|
@ -1,17 +1,18 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# coding=utf-8
|
# coding=utf-8
|
||||||
#
|
#
|
||||||
# SPDX-FileCopyrightText: 2018-2021 Espressif Systems (Shanghai) CO LTD
|
# SPDX-FileCopyrightText: 2018-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
|
import os
|
||||||
import sys
|
import sys
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from ldgen.entity import Entity, EntityDB
|
from ldgen.entity import Entity, EntityDB
|
||||||
except ImportError:
|
except ImportError:
|
||||||
sys.path.append('../')
|
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
|
||||||
from ldgen.entity import Entity, EntityDB
|
from ldgen.entity import Entity, EntityDB
|
||||||
|
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -10,20 +10,23 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
try:
|
|
||||||
from ldgen.generation import Generation, GenerationException
|
|
||||||
except ImportError:
|
|
||||||
sys.path.append('../')
|
|
||||||
from ldgen.generation import Generation, GenerationException
|
|
||||||
|
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
|
|
||||||
from ldgen.entity import Entity, EntityDB
|
try:
|
||||||
from ldgen.fragments import FragmentFile
|
from ldgen.entity import Entity, EntityDB
|
||||||
from ldgen.linker_script import LinkerScript
|
from ldgen.fragments import parse_fragment_file
|
||||||
from ldgen.output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
|
from ldgen.generation import Generation, GenerationException
|
||||||
from ldgen.sdkconfig import SDKConfig
|
from ldgen.linker_script import LinkerScript
|
||||||
|
from ldgen.output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
|
||||||
|
from ldgen.sdkconfig import SDKConfig
|
||||||
|
except ImportError:
|
||||||
|
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
|
||||||
|
from ldgen.entity import Entity, EntityDB
|
||||||
|
from ldgen.fragments import parse_fragment_file
|
||||||
|
from ldgen.generation import Generation, GenerationException
|
||||||
|
from ldgen.linker_script import LinkerScript
|
||||||
|
from ldgen.output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
|
||||||
|
from ldgen.sdkconfig import SDKConfig
|
||||||
|
|
||||||
ROOT = Entity('*')
|
ROOT = Entity('*')
|
||||||
|
|
||||||
@ -58,9 +61,8 @@ class GenerationTest(unittest.TestCase):
|
|||||||
|
|
||||||
self.sdkconfig = SDKConfig('data/Kconfig', 'data/sdkconfig')
|
self.sdkconfig = SDKConfig('data/Kconfig', 'data/sdkconfig')
|
||||||
|
|
||||||
with open('data/base.lf') as fragment_file_obj:
|
fragment_file = parse_fragment_file('data/base.lf', self.sdkconfig)
|
||||||
fragment_file = FragmentFile(fragment_file_obj, self.sdkconfig)
|
self.generation.add_fragments_from_file(fragment_file)
|
||||||
self.generation.add_fragments_from_file(fragment_file)
|
|
||||||
|
|
||||||
self.entities = EntityDB()
|
self.entities = EntityDB()
|
||||||
|
|
||||||
@ -78,7 +80,7 @@ class GenerationTest(unittest.TestCase):
|
|||||||
|
|
||||||
def add_fragments(self, text):
|
def add_fragments(self, text):
|
||||||
fragment_file = self.create_fragment_file(text)
|
fragment_file = self.create_fragment_file(text)
|
||||||
fragment_file = FragmentFile(fragment_file, self.sdkconfig)
|
fragment_file = parse_fragment_file(fragment_file, self.sdkconfig)
|
||||||
self.generation.add_fragments_from_file(fragment_file)
|
self.generation.add_fragments_from_file(fragment_file)
|
||||||
|
|
||||||
def write(self, expected, actual):
|
def write(self, expected, actual):
|
||||||
@ -1062,43 +1064,6 @@ entries:
|
|||||||
with self.assertRaises(GenerationException):
|
with self.assertRaises(GenerationException):
|
||||||
self.generation.generate(self.entities)
|
self.generation.generate(self.entities)
|
||||||
|
|
||||||
def test_disambiguated_obj(self):
|
|
||||||
# Test command generation for disambiguated entry. Should produce similar
|
|
||||||
# results to test_nondefault_mapping_symbol.
|
|
||||||
mapping = u"""
|
|
||||||
[mapping:test]
|
|
||||||
archive: libfreertos.a
|
|
||||||
entries:
|
|
||||||
port.c:xPortGetTickRateHz (noflash) #1
|
|
||||||
"""
|
|
||||||
port = Entity('libfreertos.a', 'port.c')
|
|
||||||
self.add_fragments(mapping)
|
|
||||||
actual = self.generation.generate(self.entities)
|
|
||||||
expected = self.generate_default_rules()
|
|
||||||
|
|
||||||
flash_text = expected['flash_text']
|
|
||||||
iram0_text = expected['iram0_text']
|
|
||||||
|
|
||||||
# Generate exclusion in flash_text A
|
|
||||||
flash_text[0].exclusions.add(port)
|
|
||||||
|
|
||||||
# Generate intermediate command B
|
|
||||||
# List all relevant sections except the symbol
|
|
||||||
# being mapped
|
|
||||||
port_sections = self.entities.get_sections('libfreertos.a', 'port.c')
|
|
||||||
filtered_sections = fnmatch.filter(port_sections, '.literal.*')
|
|
||||||
filtered_sections.extend(fnmatch.filter(port_sections, '.text.*'))
|
|
||||||
|
|
||||||
filtered_sections = [s for s in filtered_sections if not s.endswith('xPortGetTickRateHz')]
|
|
||||||
filtered_sections.append('.text')
|
|
||||||
|
|
||||||
flash_text.append(InputSectionDesc(port, set(filtered_sections), []))
|
|
||||||
|
|
||||||
# Input section commands in iram_text for #1 C
|
|
||||||
iram0_text.append(InputSectionDesc(port, set(['.text.xPortGetTickRateHz', '.literal.xPortGetTickRateHz']), []))
|
|
||||||
|
|
||||||
self.compare_rules(expected, actual)
|
|
||||||
|
|
||||||
def test_root_mapping_fragment_conflict(self):
|
def test_root_mapping_fragment_conflict(self):
|
||||||
# Test that root mapping fragments are also checked for
|
# Test that root mapping fragments are also checked for
|
||||||
# conflicts.
|
# conflicts.
|
||||||
@ -1258,84 +1223,6 @@ entries:
|
|||||||
|
|
||||||
self.compare_rules(expected, actual)
|
self.compare_rules(expected, actual)
|
||||||
|
|
||||||
def test_conditional_on_scheme_legacy_mapping_00(self):
|
|
||||||
# Test use of conditional scheme on legacy mapping fragment grammar.
|
|
||||||
mapping = u"""
|
|
||||||
[mapping]
|
|
||||||
archive: lib.a
|
|
||||||
entries:
|
|
||||||
* (cond_noflash)
|
|
||||||
"""
|
|
||||||
self._test_conditional_on_scheme(0, mapping)
|
|
||||||
|
|
||||||
def test_conditional_on_scheme_legacy_mapping_01(self):
|
|
||||||
# Test use of conditional scheme on legacy mapping fragment grammar.
|
|
||||||
mapping = u"""
|
|
||||||
[mapping]
|
|
||||||
archive: lib.a
|
|
||||||
entries:
|
|
||||||
* (cond_noflash)
|
|
||||||
"""
|
|
||||||
self._test_conditional_on_scheme(0, mapping)
|
|
||||||
|
|
||||||
def test_conditional_entries_legacy_mapping_fragment(self):
|
|
||||||
# Test conditional entries on legacy mapping fragment grammar.
|
|
||||||
mapping = u"""
|
|
||||||
[mapping:default]
|
|
||||||
archive: *
|
|
||||||
entries:
|
|
||||||
* (default)
|
|
||||||
|
|
||||||
[mapping]
|
|
||||||
archive: lib.a
|
|
||||||
entries:
|
|
||||||
: PERFORMANCE_LEVEL = 0
|
|
||||||
: PERFORMANCE_LEVEL = 1
|
|
||||||
obj1 (noflash)
|
|
||||||
: PERFORMANCE_LEVEL = 2
|
|
||||||
obj1 (noflash)
|
|
||||||
obj2 (noflash)
|
|
||||||
: PERFORMANCE_LEVEL = 3
|
|
||||||
obj1 (noflash)
|
|
||||||
obj2 (noflash)
|
|
||||||
obj3 (noflash)
|
|
||||||
"""
|
|
||||||
self.test_conditional_mapping(mapping)
|
|
||||||
|
|
||||||
def test_multiple_fragment_same_lib_conditional_legacy(self):
|
|
||||||
# Test conditional entries on legacy mapping fragment grammar
|
|
||||||
# across multiple fragments.
|
|
||||||
mapping = u"""
|
|
||||||
[mapping:default]
|
|
||||||
archive: *
|
|
||||||
entries:
|
|
||||||
* (default)
|
|
||||||
|
|
||||||
[mapping]
|
|
||||||
archive: lib.a
|
|
||||||
entries:
|
|
||||||
: PERFORMANCE_LEVEL = 0
|
|
||||||
: PERFORMANCE_LEVEL = 1
|
|
||||||
obj1 (noflash)
|
|
||||||
: PERFORMANCE_LEVEL = 2
|
|
||||||
obj1 (noflash)
|
|
||||||
: PERFORMANCE_LEVEL = 3
|
|
||||||
obj1 (noflash)
|
|
||||||
|
|
||||||
[mapping]
|
|
||||||
archive: lib.a
|
|
||||||
entries:
|
|
||||||
: PERFORMANCE_LEVEL = 1
|
|
||||||
obj1 (noflash) # ignore duplicate definition
|
|
||||||
: PERFORMANCE_LEVEL = 2
|
|
||||||
obj2 (noflash)
|
|
||||||
: PERFORMANCE_LEVEL = 3
|
|
||||||
obj2 (noflash)
|
|
||||||
obj3 (noflash)
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.test_conditional_mapping(mapping)
|
|
||||||
|
|
||||||
def test_multiple_fragment_same_lib_conditional(self):
|
def test_multiple_fragment_same_lib_conditional(self):
|
||||||
# Test conditional entries on new mapping fragment grammar.
|
# Test conditional entries on new mapping fragment grammar.
|
||||||
# across multiple fragments.
|
# across multiple fragments.
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
|
import os
|
||||||
import sys
|
import sys
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
@ -11,7 +12,7 @@ try:
|
|||||||
from ldgen.entity import Entity
|
from ldgen.entity import Entity
|
||||||
from ldgen.output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
|
from ldgen.output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
|
||||||
except ImportError:
|
except ImportError:
|
||||||
sys.path.append('../')
|
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
|
||||||
from ldgen.entity import Entity
|
from ldgen.entity import Entity
|
||||||
from ldgen.output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
|
from ldgen.output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user