2018-04-17 22:57:45 -04:00
|
|
|
#
|
|
|
|
# Copyright 2018-2019 Espressif Systems (Shanghai) PTE LTD
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
#
|
|
|
|
|
|
|
|
import collections
|
|
|
|
import itertools
|
|
|
|
import os
|
|
|
|
import fnmatch
|
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
from fragments import Sections, Scheme, Mapping, Fragment
|
2019-03-15 06:31:30 -04:00
|
|
|
from pyparsing import Suppress, White, ParseException, Literal, Group, ZeroOrMore
|
2020-10-20 05:38:15 -04:00
|
|
|
from pyparsing import Word, OneOrMore, nums, alphanums, alphas, Optional, restOfLine
|
2019-04-12 20:59:32 -04:00
|
|
|
from ldgen_common import LdGenFailure
|
2018-04-17 22:57:45 -04:00
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2018-04-17 22:57:45 -04:00
|
|
|
class PlacementRule():
|
2018-12-04 07:46:48 -05:00
|
|
|
"""
|
|
|
|
Encapsulates a generated placement rule placed under a target
|
|
|
|
"""
|
2018-04-17 22:57:45 -04:00
|
|
|
|
|
|
|
DEFAULT_SPECIFICITY = 0
|
|
|
|
ARCHIVE_SPECIFICITY = 1
|
|
|
|
OBJECT_SPECIFICITY = 2
|
|
|
|
SYMBOL_SPECIFICITY = 3
|
|
|
|
|
|
|
|
class __container():
|
|
|
|
def __init__(self, content):
|
|
|
|
self.content = content
|
|
|
|
|
|
|
|
__metadata = collections.namedtuple("__metadata", "excludes expansions expanded")
|
|
|
|
|
|
|
|
def __init__(self, archive, obj, symbol, sections, target):
|
|
|
|
if archive == "*":
|
|
|
|
archive = None
|
|
|
|
|
|
|
|
if obj == "*":
|
|
|
|
obj = None
|
|
|
|
|
|
|
|
self.archive = archive
|
|
|
|
self.obj = obj
|
|
|
|
self.symbol = symbol
|
|
|
|
self.target = target
|
|
|
|
self.sections = dict()
|
|
|
|
|
|
|
|
self.specificity = 0
|
|
|
|
self.specificity += 1 if self.archive else 0
|
|
|
|
self.specificity += 1 if (self.obj and not self.obj == '*') else 0
|
|
|
|
self.specificity += 1 if self.symbol else 0
|
|
|
|
|
|
|
|
for section in sections:
|
|
|
|
section_data = Sections.get_section_data_from_entry(section, self.symbol)
|
|
|
|
|
|
|
|
if not self.symbol:
|
|
|
|
for s in section_data:
|
|
|
|
metadata = self.__metadata(self.__container([]), self.__container([]), self.__container(False))
|
|
|
|
self.sections[s] = metadata
|
|
|
|
else:
|
|
|
|
(section, expansion) = section_data
|
|
|
|
if expansion:
|
|
|
|
metadata = self.__metadata(self.__container([]), self.__container([expansion]), self.__container(True))
|
2018-12-04 07:46:48 -05:00
|
|
|
self.sections[section] = metadata
|
2018-04-17 22:57:45 -04:00
|
|
|
|
|
|
|
def get_section_names(self):
|
|
|
|
return self.sections.keys()
|
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
def add_exclusion(self, other, sections_infos=None):
|
2018-04-17 22:57:45 -04:00
|
|
|
# Utility functions for this method
|
|
|
|
def do_section_expansion(rule, section):
|
|
|
|
if section in rule.get_section_names():
|
|
|
|
sections_in_obj = sections_infos.get_obj_sections(rule.archive, rule.obj)
|
2020-09-15 11:14:31 -04:00
|
|
|
expansions = [n for n in sections_in_obj or [] if fnmatch.fnmatch(n, section)]
|
2018-04-17 22:57:45 -04:00
|
|
|
return expansions
|
|
|
|
|
|
|
|
def remove_section_expansions(rule, section, expansions):
|
|
|
|
existing_expansions = self.sections[section].expansions.content
|
|
|
|
self.sections[section].expansions.content = [e for e in existing_expansions if e not in expansions]
|
|
|
|
|
|
|
|
# Exit immediately if the exclusion to be added is more general than this rule.
|
|
|
|
if not other.is_more_specific_rule_of(self):
|
|
|
|
return
|
|
|
|
|
|
|
|
for section in self.get_sections_intersection(other):
|
|
|
|
if(other.specificity == PlacementRule.SYMBOL_SPECIFICITY):
|
|
|
|
# If this sections has not been expanded previously, expand now and keep track.
|
|
|
|
previously_expanded = self.sections[section].expanded.content
|
|
|
|
if not previously_expanded:
|
|
|
|
expansions = do_section_expansion(self, section)
|
|
|
|
if expansions:
|
|
|
|
self.sections[section].expansions.content = expansions
|
|
|
|
self.sections[section].expanded.content = True
|
|
|
|
previously_expanded = True
|
|
|
|
|
|
|
|
# Remove the sections corresponding to the symbol name
|
|
|
|
remove_section_expansions(self, section, other.sections[section].expansions.content)
|
|
|
|
|
|
|
|
# If it has been expanded previously but now the expansions list is empty,
|
|
|
|
# it means adding exclusions has exhausted the list. Remove the section entirely.
|
|
|
|
if previously_expanded and not self.sections[section].expanded.content:
|
|
|
|
del self.sections[section]
|
|
|
|
else:
|
|
|
|
# A rule section can have multiple rule sections excluded from it. Get the
|
|
|
|
# most specific rule from the list, and if an even more specific rule is found,
|
|
|
|
# replace it entirely. Otherwise, keep appending.
|
|
|
|
exclusions = self.sections[section].excludes
|
2018-12-04 07:46:48 -05:00
|
|
|
exclusions_list = exclusions.content if exclusions.content is not None else []
|
2018-04-17 22:57:45 -04:00
|
|
|
exclusions_to_remove = filter(lambda r: r.is_more_specific_rule_of(other), exclusions_list)
|
|
|
|
|
|
|
|
remaining_exclusions = [e for e in exclusions_list if e not in exclusions_to_remove]
|
|
|
|
remaining_exclusions.append(other)
|
|
|
|
|
|
|
|
self.sections[section].excludes.content = remaining_exclusions
|
|
|
|
|
|
|
|
def get_sections_intersection(self, other):
|
|
|
|
return set(self.sections.keys()).intersection(set(other.sections.keys()))
|
|
|
|
|
|
|
|
def is_more_specific_rule_of(self, other):
|
|
|
|
if (self.specificity <= other.specificity):
|
|
|
|
return False
|
|
|
|
|
|
|
|
# Compare archive, obj and target
|
2018-12-04 07:46:48 -05:00
|
|
|
for entity_index in range(1, other.specificity + 1):
|
|
|
|
if self[entity_index] != other[entity_index] and other[entity_index] is not None:
|
2018-04-17 22:57:45 -04:00
|
|
|
return False
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
def maps_same_entities_as(self, other):
|
|
|
|
if self.specificity != other.specificity:
|
|
|
|
return False
|
|
|
|
|
|
|
|
# Compare archive, obj and target
|
2018-12-04 07:46:48 -05:00
|
|
|
for entity_index in range(1, other.specificity + 1):
|
|
|
|
if self[entity_index] != other[entity_index] and other[entity_index] is not None:
|
2018-04-17 22:57:45 -04:00
|
|
|
return False
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
def __getitem__(self, key):
|
|
|
|
if key == PlacementRule.ARCHIVE_SPECIFICITY:
|
2018-12-04 07:46:48 -05:00
|
|
|
return self.archive
|
2018-04-17 22:57:45 -04:00
|
|
|
elif key == PlacementRule.OBJECT_SPECIFICITY:
|
|
|
|
return self.obj
|
|
|
|
elif key == PlacementRule.SYMBOL_SPECIFICITY:
|
|
|
|
return self.symbol
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
sorted_sections = sorted(self.get_section_names())
|
|
|
|
|
|
|
|
sections_string = list()
|
|
|
|
|
|
|
|
for section in sorted_sections:
|
|
|
|
exclusions = self.sections[section].excludes.content
|
|
|
|
|
|
|
|
exclusion_string = None
|
|
|
|
|
|
|
|
if exclusions:
|
|
|
|
exclusion_string = " ".join(map(lambda e: "*" + e.archive + (":" + e.obj + ".*" if e.obj else ""), exclusions))
|
|
|
|
exclusion_string = "EXCLUDE_FILE(" + exclusion_string + ")"
|
|
|
|
else:
|
|
|
|
exclusion_string = ""
|
|
|
|
|
|
|
|
section_string = None
|
|
|
|
exclusion_section_string = None
|
|
|
|
|
|
|
|
section_expansions = self.sections[section].expansions.content
|
|
|
|
section_expanded = self.sections[section].expanded.content
|
|
|
|
|
|
|
|
if section_expansions and section_expanded:
|
|
|
|
section_string = " ".join(section_expansions)
|
|
|
|
exclusion_section_string = section_string
|
|
|
|
else:
|
|
|
|
section_string = section
|
|
|
|
exclusion_section_string = exclusion_string + " " + section_string
|
|
|
|
|
|
|
|
sections_string.append(exclusion_section_string)
|
|
|
|
|
|
|
|
sections_string = " ".join(sections_string)
|
|
|
|
|
|
|
|
archive = str(self.archive) if self.archive else ""
|
2018-12-04 07:46:48 -05:00
|
|
|
obj = (str(self.obj) + (".*" if self.obj else "")) if self.obj else ""
|
2018-04-17 22:57:45 -04:00
|
|
|
|
|
|
|
# Handle output string generation based on information available
|
|
|
|
if self.specificity == PlacementRule.DEFAULT_SPECIFICITY:
|
|
|
|
rule_string = "*(%s)" % (sections_string)
|
|
|
|
elif self.specificity == PlacementRule.ARCHIVE_SPECIFICITY:
|
|
|
|
rule_string = "*%s:(%s)" % (archive, sections_string)
|
|
|
|
else:
|
|
|
|
rule_string = "*%s:%s(%s)" % (archive, obj, sections_string)
|
|
|
|
|
|
|
|
return rule_string
|
|
|
|
|
|
|
|
def __eq__(self, other):
|
|
|
|
if id(self) == id(other):
|
|
|
|
return True
|
|
|
|
|
|
|
|
def exclusions_set(exclusions):
|
|
|
|
exclusions_set = {(e.archive, e.obj, e.symbol, e.target) for e in exclusions}
|
|
|
|
return exclusions_set
|
|
|
|
|
|
|
|
if self.archive != other.archive:
|
|
|
|
return False
|
|
|
|
|
|
|
|
if self.obj != other.obj:
|
|
|
|
return False
|
|
|
|
|
|
|
|
if self.symbol != other.symbol:
|
|
|
|
return False
|
|
|
|
|
|
|
|
if set(self.sections.keys()) != set(other.sections.keys()):
|
|
|
|
return False
|
|
|
|
|
|
|
|
for (section, metadata) in self.sections.items():
|
|
|
|
|
|
|
|
self_meta = metadata
|
|
|
|
other_meta = other.sections[section]
|
|
|
|
|
|
|
|
if exclusions_set(self_meta.excludes.content) != exclusions_set(other_meta.excludes.content):
|
|
|
|
return False
|
|
|
|
|
|
|
|
if set(self_meta.expansions.content) != set(other_meta.expansions.content):
|
|
|
|
return False
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
def __ne__(self, other):
|
|
|
|
return not self.__eq__(other)
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
yield self.archive
|
|
|
|
yield self.obj
|
|
|
|
yield self.symbol
|
|
|
|
raise StopIteration
|
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2018-04-17 22:57:45 -04:00
|
|
|
class GenerationModel:
|
2018-12-04 07:46:48 -05:00
|
|
|
"""
|
|
|
|
Implements generation of placement rules based on collected sections, scheme and mapping fragment.
|
|
|
|
"""
|
2018-04-17 22:57:45 -04:00
|
|
|
|
|
|
|
DEFAULT_SCHEME = "default"
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.schemes = {}
|
|
|
|
self.sections = {}
|
|
|
|
self.mappings = {}
|
|
|
|
|
|
|
|
def _add_mapping_rules(self, archive, obj, symbol, scheme_name, scheme_dict, rules):
|
|
|
|
# Use an ordinary dictionary to raise exception on non-existing keys
|
|
|
|
temp_dict = dict(scheme_dict)
|
|
|
|
|
|
|
|
sections_bucket = temp_dict[scheme_name]
|
|
|
|
|
|
|
|
for (target, sections) in sections_bucket.items():
|
|
|
|
section_entries = []
|
|
|
|
|
|
|
|
for section in sections:
|
|
|
|
section_entries.extend(section.entries)
|
|
|
|
|
|
|
|
rule = PlacementRule(archive, obj, symbol, section_entries, target)
|
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
if rule not in rules:
|
2018-04-17 22:57:45 -04:00
|
|
|
rules.append(rule)
|
|
|
|
|
|
|
|
def _build_scheme_dictionary(self):
|
|
|
|
scheme_dictionary = collections.defaultdict(dict)
|
|
|
|
|
|
|
|
# Collect sections into buckets based on target name
|
|
|
|
for scheme in self.schemes.values():
|
|
|
|
sections_bucket = collections.defaultdict(list)
|
|
|
|
|
|
|
|
for (sections_name, target_name) in scheme.entries:
|
|
|
|
# Get the sections under the bucket 'target_name'. If this bucket does not exist
|
|
|
|
# is is created automatically
|
|
|
|
sections_in_bucket = sections_bucket[target_name]
|
|
|
|
|
|
|
|
try:
|
|
|
|
sections = self.sections[sections_name]
|
|
|
|
except KeyError:
|
|
|
|
message = GenerationException.UNDEFINED_REFERENCE + " to sections '" + sections + "'."
|
|
|
|
raise GenerationException(message, scheme)
|
|
|
|
|
|
|
|
sections_in_bucket.append(sections)
|
|
|
|
|
|
|
|
scheme_dictionary[scheme.name] = sections_bucket
|
|
|
|
|
|
|
|
# Search for and raise exception on first instance of sections mapped to multiple targets
|
|
|
|
for (scheme_name, sections_bucket) in scheme_dictionary.items():
|
|
|
|
for sections_a, sections_b in itertools.combinations(sections_bucket.values(), 2):
|
|
|
|
set_a = set()
|
|
|
|
set_b = set()
|
|
|
|
|
|
|
|
for sections in sections_a:
|
|
|
|
set_a.update(sections.entries)
|
|
|
|
|
|
|
|
for sections in sections_b:
|
|
|
|
set_b.update(sections.entries)
|
|
|
|
|
|
|
|
intersection = set_a.intersection(set_b)
|
|
|
|
|
|
|
|
# If the intersection is a non-empty set, it means sections are mapped to multiple
|
|
|
|
# targets. Raise exception.
|
|
|
|
if intersection:
|
|
|
|
scheme = self.schemes[scheme_name]
|
|
|
|
message = "Sections " + str(intersection) + " mapped to multiple targets."
|
|
|
|
raise GenerationException(message, scheme)
|
|
|
|
|
|
|
|
return scheme_dictionary
|
|
|
|
|
2019-03-19 02:35:47 -04:00
|
|
|
def generate_rules(self, sections_infos):
|
2018-04-17 22:57:45 -04:00
|
|
|
scheme_dictionary = self._build_scheme_dictionary()
|
|
|
|
|
|
|
|
# Generate default rules
|
|
|
|
default_rules = list()
|
|
|
|
self._add_mapping_rules(None, None, None, GenerationModel.DEFAULT_SCHEME, scheme_dictionary, default_rules)
|
|
|
|
|
|
|
|
all_mapping_rules = collections.defaultdict(list)
|
|
|
|
|
|
|
|
# Generate rules based on mapping fragments
|
|
|
|
for mapping in self.mappings.values():
|
2019-03-19 02:35:47 -04:00
|
|
|
archive = mapping.archive
|
2019-04-05 02:32:21 -04:00
|
|
|
mapping_rules = all_mapping_rules[archive]
|
2019-03-19 02:35:47 -04:00
|
|
|
for (obj, symbol, scheme_name) in mapping.entries:
|
|
|
|
try:
|
|
|
|
if not (obj == Mapping.MAPPING_ALL_OBJECTS and symbol is None and
|
|
|
|
scheme_name == GenerationModel.DEFAULT_SCHEME):
|
|
|
|
self._add_mapping_rules(archive, obj, symbol, scheme_name, scheme_dictionary, mapping_rules)
|
|
|
|
except KeyError:
|
|
|
|
message = GenerationException.UNDEFINED_REFERENCE + " to scheme '" + scheme_name + "'."
|
|
|
|
raise GenerationException(message, mapping)
|
2018-04-17 22:57:45 -04:00
|
|
|
|
|
|
|
# Detect rule conflicts
|
|
|
|
for mapping_rules in all_mapping_rules.items():
|
|
|
|
self._detect_conflicts(mapping_rules)
|
|
|
|
|
|
|
|
# Add exclusions
|
|
|
|
for mapping_rules in all_mapping_rules.values():
|
|
|
|
self._create_exclusions(mapping_rules, default_rules, sections_infos)
|
|
|
|
|
2020-04-24 09:57:05 -04:00
|
|
|
placement_rules = collections.defaultdict(list)
|
|
|
|
|
2018-04-17 22:57:45 -04:00
|
|
|
# Add the default rules grouped by target
|
|
|
|
for default_rule in default_rules:
|
|
|
|
existing_rules = placement_rules[default_rule.target]
|
|
|
|
if default_rule.get_section_names():
|
|
|
|
existing_rules.append(default_rule)
|
|
|
|
|
2020-04-24 09:57:05 -04:00
|
|
|
archives = sorted(all_mapping_rules.keys())
|
|
|
|
|
|
|
|
for archive in archives:
|
2018-04-17 22:57:45 -04:00
|
|
|
# Add the mapping rules grouped by target
|
2020-04-24 09:57:05 -04:00
|
|
|
mapping_rules = sorted(all_mapping_rules[archive], key=lambda m: (m.specificity, str(m)))
|
2018-04-17 22:57:45 -04:00
|
|
|
for mapping_rule in mapping_rules:
|
|
|
|
existing_rules = placement_rules[mapping_rule.target]
|
|
|
|
if mapping_rule.get_section_names():
|
|
|
|
existing_rules.append(mapping_rule)
|
|
|
|
|
|
|
|
return placement_rules
|
|
|
|
|
|
|
|
def _detect_conflicts(self, rules):
|
|
|
|
(archive, rules_list) = rules
|
|
|
|
|
|
|
|
for specificity in range(0, PlacementRule.OBJECT_SPECIFICITY + 1):
|
|
|
|
rules_with_specificity = filter(lambda r: r.specificity == specificity, rules_list)
|
|
|
|
|
|
|
|
for rule_a, rule_b in itertools.combinations(rules_with_specificity, 2):
|
|
|
|
intersections = rule_a.get_sections_intersection(rule_b)
|
|
|
|
|
|
|
|
if intersections and rule_a.maps_same_entities_as(rule_b):
|
|
|
|
rules_string = str([str(rule_a), str(rule_b)])
|
|
|
|
message = "Rules " + rules_string + " map sections " + str(list(intersections)) + " into multiple targets."
|
2019-03-19 02:35:47 -04:00
|
|
|
raise GenerationException(message)
|
2018-04-17 22:57:45 -04:00
|
|
|
|
|
|
|
def _create_extra_rules(self, rules):
|
|
|
|
# This function generates extra rules for symbol specific rules. The reason for generating extra rules is to isolate,
|
|
|
|
# as much as possible, rules that require expansion. Particularly, object specific extra rules are generated.
|
2018-12-04 07:46:48 -05:00
|
|
|
rules_to_process = sorted(rules, key=lambda r: r.specificity)
|
2018-04-17 22:57:45 -04:00
|
|
|
symbol_specific_rules = list(filter(lambda r: r.specificity == PlacementRule.SYMBOL_SPECIFICITY, rules_to_process))
|
|
|
|
|
|
|
|
extra_rules = dict()
|
|
|
|
|
|
|
|
for symbol_specific_rule in symbol_specific_rules:
|
|
|
|
extra_rule_candidate = {s: None for s in symbol_specific_rule.get_section_names()}
|
|
|
|
|
|
|
|
super_rules = filter(lambda r: symbol_specific_rule.is_more_specific_rule_of(r), rules_to_process)
|
|
|
|
|
|
|
|
# Take a look at the existing rules that are more general than the current symbol-specific rule.
|
|
|
|
# Only generate an extra rule if there is no existing object specific rule for that section
|
|
|
|
for super_rule in super_rules:
|
|
|
|
intersections = symbol_specific_rule.get_sections_intersection(super_rule)
|
|
|
|
for intersection in intersections:
|
|
|
|
if super_rule.specificity != PlacementRule.OBJECT_SPECIFICITY:
|
|
|
|
extra_rule_candidate[intersection] = super_rule
|
|
|
|
else:
|
|
|
|
extra_rule_candidate[intersection] = None
|
|
|
|
|
|
|
|
# Generate the extra rules for the symbol specific rule section, keeping track of the generated extra rules
|
|
|
|
for (section, section_rule) in extra_rule_candidate.items():
|
|
|
|
if section_rule:
|
|
|
|
extra_rule = None
|
|
|
|
extra_rules_key = (symbol_specific_rule.archive, symbol_specific_rule.obj, section_rule.target)
|
|
|
|
|
|
|
|
try:
|
|
|
|
extra_rule = extra_rules[extra_rules_key]
|
|
|
|
|
|
|
|
if section not in extra_rule.get_section_names():
|
2018-12-04 07:46:48 -05:00
|
|
|
new_rule = PlacementRule(extra_rule.archive, extra_rule.obj, extra_rule.symbol,
|
|
|
|
list(extra_rule.get_section_names()) + [section], extra_rule.target)
|
2018-04-17 22:57:45 -04:00
|
|
|
extra_rules[extra_rules_key] = new_rule
|
|
|
|
except KeyError:
|
|
|
|
extra_rule = PlacementRule(symbol_specific_rule.archive, symbol_specific_rule.obj, None, [section], section_rule.target)
|
|
|
|
extra_rules[extra_rules_key] = extra_rule
|
|
|
|
|
|
|
|
return extra_rules.values()
|
|
|
|
|
|
|
|
def _create_exclusions(self, mapping_rules, default_rules, sections_info):
|
|
|
|
rules = list(default_rules)
|
|
|
|
rules.extend(mapping_rules)
|
|
|
|
|
|
|
|
extra_rules = self._create_extra_rules(rules)
|
|
|
|
|
|
|
|
mapping_rules.extend(extra_rules)
|
|
|
|
rules.extend(extra_rules)
|
|
|
|
|
|
|
|
# Sort the rules by means of how specific they are. Sort by specificity from lowest to highest
|
|
|
|
# * -> lib:* -> lib:obj -> lib:obj:symbol
|
2018-12-04 07:46:48 -05:00
|
|
|
sorted_rules = sorted(rules, key=lambda r: r.specificity)
|
2018-04-17 22:57:45 -04:00
|
|
|
|
|
|
|
# Now that the rules have been sorted, loop through each rule, and then loop
|
|
|
|
# through rules below it (higher indeces), adding exclusions whenever appropriate.
|
|
|
|
for general_rule in sorted_rules:
|
|
|
|
for specific_rule in reversed(sorted_rules):
|
2018-12-04 07:46:48 -05:00
|
|
|
if (specific_rule.specificity > general_rule.specificity and
|
2018-04-17 22:57:45 -04:00
|
|
|
specific_rule.specificity != PlacementRule.SYMBOL_SPECIFICITY) or \
|
2018-12-04 07:46:48 -05:00
|
|
|
(specific_rule.specificity == PlacementRule.SYMBOL_SPECIFICITY and
|
|
|
|
general_rule.specificity == PlacementRule.OBJECT_SPECIFICITY):
|
2018-04-17 22:57:45 -04:00
|
|
|
general_rule.add_exclusion(specific_rule, sections_info)
|
|
|
|
|
|
|
|
def add_fragments_from_file(self, fragment_file):
|
|
|
|
for fragment in fragment_file.fragments:
|
|
|
|
dict_to_append_to = None
|
|
|
|
|
2019-04-05 02:32:21 -04:00
|
|
|
if isinstance(fragment, Mapping) and fragment.deprecated and fragment.name in self.mappings.keys():
|
|
|
|
self.mappings[fragment.name].entries |= fragment.entries
|
2018-04-17 22:57:45 -04:00
|
|
|
else:
|
2019-04-05 02:32:21 -04:00
|
|
|
if isinstance(fragment, Scheme):
|
|
|
|
dict_to_append_to = self.schemes
|
|
|
|
elif isinstance(fragment, Sections):
|
|
|
|
dict_to_append_to = self.sections
|
|
|
|
else:
|
|
|
|
dict_to_append_to = self.mappings
|
|
|
|
|
|
|
|
# Raise exception when the fragment of the same type is already in the stored fragments
|
|
|
|
if fragment.name in dict_to_append_to.keys():
|
|
|
|
stored = dict_to_append_to[fragment.name].path
|
|
|
|
new = fragment.path
|
|
|
|
message = "Duplicate definition of fragment '%s' found in %s and %s." % (fragment.name, stored, new)
|
|
|
|
raise GenerationException(message)
|
2018-04-17 22:57:45 -04:00
|
|
|
|
2019-04-05 02:32:21 -04:00
|
|
|
dict_to_append_to[fragment.name] = fragment
|
2018-04-17 22:57:45 -04:00
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2018-04-17 22:57:45 -04:00
|
|
|
class TemplateModel:
|
2018-12-04 07:46:48 -05:00
|
|
|
"""
|
|
|
|
Encapsulates a linker script template file. Finds marker syntax and handles replacement to generate the
|
|
|
|
final output.
|
|
|
|
"""
|
2018-04-17 22:57:45 -04:00
|
|
|
|
|
|
|
Marker = collections.namedtuple("Marker", "target indent rules")
|
|
|
|
|
|
|
|
def __init__(self, template_file):
|
|
|
|
self.members = []
|
|
|
|
self.file = os.path.realpath(template_file.name)
|
|
|
|
|
|
|
|
self._generate_members(template_file)
|
|
|
|
|
|
|
|
def _generate_members(self, template_file):
|
|
|
|
lines = template_file.readlines()
|
|
|
|
|
|
|
|
target = Fragment.IDENTIFIER
|
|
|
|
reference = Suppress("mapping") + Suppress("[") + target.setResultsName("target") + Suppress("]")
|
|
|
|
pattern = White(" \t").setResultsName("indent") + reference
|
|
|
|
|
|
|
|
# Find the markers in the template file line by line. If line does not match marker grammar,
|
|
|
|
# set it as a literal to be copied as is to the output file.
|
|
|
|
for line in lines:
|
|
|
|
try:
|
|
|
|
parsed = pattern.parseString(line)
|
|
|
|
|
|
|
|
indent = parsed.indent
|
|
|
|
target = parsed.target
|
|
|
|
|
|
|
|
marker = TemplateModel.Marker(target, indent, [])
|
|
|
|
|
|
|
|
self.members.append(marker)
|
|
|
|
except ParseException:
|
|
|
|
# Does not match marker syntax
|
|
|
|
self.members.append(line)
|
|
|
|
|
2019-03-19 02:35:47 -04:00
|
|
|
def fill(self, mapping_rules):
|
2018-04-17 22:57:45 -04:00
|
|
|
for member in self.members:
|
|
|
|
target = None
|
|
|
|
try:
|
|
|
|
target = member.target
|
|
|
|
rules = member.rules
|
|
|
|
|
|
|
|
del rules[:]
|
|
|
|
|
|
|
|
rules.extend(mapping_rules[target])
|
|
|
|
except KeyError:
|
|
|
|
message = GenerationException.UNDEFINED_REFERENCE + " to target '" + target + "'."
|
|
|
|
raise GenerationException(message)
|
2018-12-04 07:46:48 -05:00
|
|
|
except AttributeError:
|
2018-04-17 22:57:45 -04:00
|
|
|
pass
|
|
|
|
|
|
|
|
def write(self, output_file):
|
|
|
|
# Add information that this is a generated file.
|
|
|
|
output_file.write("/* Automatically generated file; DO NOT EDIT */\n")
|
|
|
|
output_file.write("/* Espressif IoT Development Framework Linker Script */\n")
|
|
|
|
output_file.write("/* Generated from: %s */\n" % self.file)
|
|
|
|
output_file.write("\n")
|
|
|
|
|
|
|
|
# Do the text replacement
|
|
|
|
for member in self.members:
|
|
|
|
try:
|
|
|
|
indent = member.indent
|
|
|
|
rules = member.rules
|
|
|
|
|
|
|
|
for rule in rules:
|
|
|
|
generated_line = "".join([indent, str(rule), '\n'])
|
|
|
|
output_file.write(generated_line)
|
|
|
|
except AttributeError:
|
|
|
|
output_file.write(member)
|
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2018-11-28 19:46:54 -05:00
|
|
|
class GenerationException(LdGenFailure):
|
2018-12-04 07:46:48 -05:00
|
|
|
"""
|
|
|
|
Exception for linker script generation failures such as undefined references/ failure to
|
|
|
|
evaluate conditions, duplicate mappings, etc.
|
|
|
|
"""
|
2018-04-17 22:57:45 -04:00
|
|
|
|
|
|
|
UNDEFINED_REFERENCE = "Undefined reference"
|
|
|
|
|
|
|
|
def __init__(self, message, fragment=None):
|
|
|
|
self.fragment = fragment
|
|
|
|
self.message = message
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
if self.fragment:
|
|
|
|
return "%s\nIn fragment '%s' defined in '%s'." % (self.message, self.fragment.name, self.fragment.path)
|
|
|
|
else:
|
|
|
|
return self.message
|
|
|
|
|
2018-12-04 07:46:48 -05:00
|
|
|
|
2018-04-17 22:57:45 -04:00
|
|
|
class SectionsInfo(dict):
|
2018-12-04 07:46:48 -05:00
|
|
|
"""
|
|
|
|
Encapsulates an output of objdump. Contains information about the static library sections
|
|
|
|
and names
|
|
|
|
"""
|
2018-04-17 22:57:45 -04:00
|
|
|
|
|
|
|
__info = collections.namedtuple("__info", "filename content")
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.sections = dict()
|
|
|
|
|
2019-04-26 01:42:10 -04:00
|
|
|
def add_sections_info(self, sections_info_dump):
|
|
|
|
first_line = sections_info_dump.readline()
|
2018-04-17 22:57:45 -04:00
|
|
|
|
2019-03-15 06:31:30 -04:00
|
|
|
archive_path = (Literal("In archive").suppress() +
|
2020-10-20 05:38:15 -04:00
|
|
|
White().suppress() +
|
|
|
|
# trim the last character (:) from archive_path
|
|
|
|
restOfLine.setResultsName("archive_path").setParseAction(lambda t: t[0][:-1]))
|
2018-11-26 21:50:51 -05:00
|
|
|
parser = archive_path
|
2018-04-17 22:57:45 -04:00
|
|
|
|
|
|
|
results = None
|
|
|
|
|
|
|
|
try:
|
|
|
|
results = parser.parseString(first_line)
|
|
|
|
except ParseException as p:
|
2020-05-24 15:21:34 -04:00
|
|
|
raise ParseException("Parsing sections info for library " + sections_info_dump.name + " failed. " + p.msg)
|
2018-04-17 22:57:45 -04:00
|
|
|
|
2018-11-26 21:50:51 -05:00
|
|
|
archive = os.path.basename(results.archive_path)
|
2019-04-26 01:42:10 -04:00
|
|
|
self.sections[archive] = SectionsInfo.__info(sections_info_dump.name, sections_info_dump.read())
|
2018-04-17 22:57:45 -04:00
|
|
|
|
|
|
|
def _get_infos_from_file(self, info):
|
|
|
|
# Object file line: '{object}: file format elf32-xtensa-le'
|
|
|
|
object = Fragment.ENTITY.setResultsName("object") + Literal(":").suppress() + Literal("file format elf32-xtensa-le").suppress()
|
|
|
|
|
|
|
|
# Sections table
|
2018-12-04 07:46:48 -05:00
|
|
|
header = Suppress(Literal("Sections:") + Literal("Idx") + Literal("Name") + Literal("Size") + Literal("VMA") +
|
|
|
|
Literal("LMA") + Literal("File off") + Literal("Algn"))
|
|
|
|
entry = Word(nums).suppress() + Fragment.ENTITY + Suppress(OneOrMore(Word(alphanums, exact=8)) +
|
|
|
|
Word(nums + "*") + ZeroOrMore(Word(alphas.upper()) +
|
|
|
|
Optional(Literal(","))))
|
2018-04-17 22:57:45 -04:00
|
|
|
|
|
|
|
# Content is object file line + sections table
|
|
|
|
content = Group(object + header + Group(ZeroOrMore(entry)).setResultsName("sections"))
|
|
|
|
|
|
|
|
parser = Group(ZeroOrMore(content)).setResultsName("contents")
|
|
|
|
|
|
|
|
sections_info_text = info.content
|
|
|
|
results = None
|
|
|
|
|
|
|
|
try:
|
|
|
|
results = parser.parseString(sections_info_text)
|
|
|
|
except ParseException as p:
|
2020-06-10 01:53:51 -04:00
|
|
|
raise ParseException("Unable to parse section info file " + info.filename + ". " + p.msg)
|
2018-04-17 22:57:45 -04:00
|
|
|
|
|
|
|
return results
|
|
|
|
|
|
|
|
def get_obj_sections(self, archive, obj):
|
|
|
|
stored = self.sections[archive]
|
|
|
|
|
|
|
|
# Parse the contents of the sections file
|
|
|
|
if not isinstance(stored, dict):
|
|
|
|
parsed = self._get_infos_from_file(stored)
|
|
|
|
stored = dict()
|
|
|
|
for content in parsed.contents:
|
|
|
|
sections = list(map(lambda s: s, content.sections))
|
|
|
|
stored[content.object] = sections
|
|
|
|
self.sections[archive] = stored
|
|
|
|
|
|
|
|
for obj_key in stored.keys():
|
|
|
|
if obj_key == obj + ".o" or obj_key == obj + ".c.obj":
|
|
|
|
return stored[obj_key]
|