tools: overhaul csv handling for mfg_gen and nvs_partition_gen

This fixes the issue where multiline strings and strings with delimiters inside the nvs input csv file were incorrectly parsed, and adds back the ability to add comment lines anywhere in the CSV file.

The issue stems from the move away from the python built in csv module to manual parsing, which was made after moving away from using the csv module to parse mfg data.

This reverts back to using the csv module for parsing and writing csv data in both mfg_gen and nvs_partition_gen, fixes the original issue in mfg_gen and improves code quality which makes the code more readable and maintainable.

Closes https://github.com/espressif/esp-idf/issues/7175
This commit is contained in:
Djordje Nedic 2022-06-02 15:29:35 +02:00 committed by BOT
parent 0b80546f8e
commit bbc3add0e3
9 changed files with 311 additions and 379 deletions

View File

@ -43,7 +43,7 @@ Each line of a .csv file should contain 4 parameters, separated by a comma. The
| | | | Any values in these cells are ignored. |
+-----+-----------+----------------------------------------------------------------------+-----------------------------------------------------+
.. note:: The first line of the CSV file should be the column header and it is not configurable. Comments (if provided) are allowed only as the first line of the CSV file, the following line then should always be the column header. Comments should always start with the `#` symbol.
.. note:: The first line of the CSV file should always be the column header and it is not configurable.
Below is an example dump of such a CSV file::
@ -308,5 +308,5 @@ Caveats
-------
- Utility does not check for duplicate keys and will write data pertaining to both keys. You need to make sure that the keys are distinct.
- Once a new page is created, no data will be written in the space left on the previous page. Fields in the CSV file need to be ordered in such a way as to optimize memory.
- Utility supports using multiline strings with ``file`` type and singleline strings with ``data`` type in the CSV file.
- 64-bit datatype is not yet supported.

View File

@ -3,19 +3,8 @@
# esp-idf NVS partition generation tool. Tool helps in generating NVS-compatible
# partition binary, with key-value pair entries provided via a CSV file.
#
# Copyright 2018 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SPDX-FileCopyrightText: 2018-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
#
from __future__ import division, print_function
@ -24,6 +13,7 @@ import argparse
import array
import binascii
import codecs
import csv
import datetime
import distutils.dir_util
import os
@ -34,8 +24,6 @@ import zlib
from builtins import bytes, int, range
from io import open
from future.moves.itertools import zip_longest
try:
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
@ -547,23 +535,22 @@ class NVS(object):
We don't have to guard re-invocation with try-except since no entry can span multiple pages.
"""
def write_entry(self, key, value, encoding):
# Encoding-specific handling
if encoding == 'hex2bin':
value = value.strip()
if len(value) % 2 != 0:
raise InputError('%s: Invalid data length. Should be multiple of 2.' % key)
value = binascii.a2b_hex(value)
if encoding == 'base64':
elif encoding == 'base64':
value = binascii.a2b_base64(value)
if encoding == 'string':
elif encoding == 'string':
if type(value) == bytes:
value = value.decode()
value += '\0'
encoding = encoding.lower()
varlen_encodings = ['string', 'binary', 'hex2bin', 'base64']
primitive_encodings = ['u8', 'i8', 'u16', 'i16', 'u32', 'i32', 'u64', 'i64']
varlen_encodings = {'string', 'binary', 'hex2bin', 'base64'}
primitive_encodings = {'u8', 'i8', 'u16', 'i16', 'u32', 'i32', 'u64', 'i64'}
if encoding in varlen_encodings:
try:
@ -904,46 +891,23 @@ def generate(args, is_encr_enabled=False, encr_key=None):
if is_encr_enabled and not encr_key:
encr_key = generate_key(args)
input_file = open(args.input, 'rt', encoding='utf8')
output_file = open(args.output, 'wb')
with open(args.input, 'rt', encoding='utf8') as input_file,\
open(args.output, 'wb') as output_file,\
nvs_open(output_file, input_size, args.version, is_encrypt=is_encr_enabled, key=encr_key) as nvs_obj:
# Comments are skipped
reader = csv.DictReader(filter(lambda row: row[0] != '#',input_file), delimiter=',')
if nvs_obj.version == Page.VERSION1:
version_set = VERSION1_PRINT
else:
version_set = VERSION2_PRINT
print('\nCreating NVS binary with version:', version_set)
line = input_file.readline().strip()
# Comments are skipped
while line.startswith('#'):
line = input_file.readline().strip()
if not isinstance(line, str):
line = line.encode('utf-8')
header = line.split(',')
while True:
line = input_file.readline().strip()
if not isinstance(line, str):
line = line.encode('utf-8')
value = line.split(',')
if len(value) == 1 and '' in value:
break
data = dict(zip_longest(header, value))
for row in reader:
try:
# Check key length
if len(data['key']) > 15:
raise InputError('Length of key `{}` should be <= 15 characters.'.format(data['key']))
write_entry(nvs_obj, data['key'], data['type'], data['encoding'], data['value'])
max_key_len = 15
if len(row['key']) > max_key_len:
raise InputError('Length of key `%s` should be <= 15 characters.' % row['key'])
write_entry(nvs_obj, row['key'], row['type'], row['encoding'], row['value'])
except InputError as e:
print(e)
filedir, filename = os.path.split(args.output)

View File

@ -6,7 +6,11 @@ dummyI8Key,data,i8,-128
dummyU16Key,data,u16,32768
dummyU32Key,data,u32,4294967295
dummyI32Key,data,i32,-2147483648
dummyStringKey,data,string,0A:0B:0C:0D:0E:0F
dummyStringKey,data,string,"Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Fusce quis risus justo.
Suspendisse egestas in nisi sit amet auctor.
Pellentesque rhoncus dictum sodales.
In justo erat, viverra at interdum eget, interdum vel dui."
dummyHex2BinKey,data,hex2bin,010203abcdef
dummyBase64Key,data,base64,MTIzYWJj
hexFileKey,file,hex2bin,testdata/sample.hex

Can't render this file because it has a wrong number of fields in line 2.

View File

@ -6,7 +6,11 @@ dummyI8Key,data,i8,-128
dummyU16Key,data,u16,32768
dummyU32Key,data,u32,4294967295
dummyI32Key,data,i32,-2147483648
dummyStringKey,data,string,0A:0B:0C:0D:0E:0F
dummyStringKey,data,string,"Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Fusce quis risus justo.
Suspendisse egestas in nisi sit amet auctor.
Pellentesque rhoncus dictum sodales.
In justo erat, viverra at interdum eget, interdum vel dui."
dummyHex2BinKey,data,hex2bin,010203abcdef
dummyBase64Key,data,base64,MTIzYWJj
hexFileKey,file,hex2bin,testdata/sample.hex

Can't render this file because it has a wrong number of fields in line 2.

View File

@ -1,21 +1,14 @@
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef intrusive_list_h
#define intrusive_list_h
#include <cassert>
#include <unordered_map>
#include <cstddef>
template <typename T>
class intrusive_list;

View File

@ -1,16 +1,8 @@
// Copyright 2019 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* SPDX-FileCopyrightText: 2019-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <cstring>
#include "nvs_encrypted_partition.hpp"
@ -105,7 +97,7 @@ esp_err_t NVSEncryptedPartition::write(size_t addr, const void* src, size_t size
data_unit,
buf + offset,
buf + offset) != 0) {
delete buf;
delete [] buf;
return ESP_ERR_NVS_XTS_ENCR_FAILED;
}
}
@ -113,7 +105,7 @@ esp_err_t NVSEncryptedPartition::write(size_t addr, const void* src, size_t size
// write data
esp_err_t result = esp_partition_write(mESPPartition, addr, buf, size);
delete buf;
delete [] buf;
return result;
}

View File

@ -2682,6 +2682,96 @@ static void check_nvs_part_gen_args(SpiFlashEmulator *spi_flash_emulator,
TEST_ESP_OK( nvs_get_i32(handle, "dummyI32Key", &i32v));
CHECK(i32v == -2147483648);
char string_buf[256];
const char test_str[] = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.\n"
"Fusce quis risus justo.\n"
"Suspendisse egestas in nisi sit amet auctor.\n"
"Pellentesque rhoncus dictum sodales.\n"
"In justo erat, viverra at interdum eget, interdum vel dui.";
size_t str_len = sizeof(test_str);
TEST_ESP_OK( nvs_get_str(handle, "dummyStringKey", string_buf, &str_len));
CHECK(strncmp(string_buf, test_str, str_len) == 0);
char buf[64] = {0};
uint8_t hexdata[] = {0x01, 0x02, 0x03, 0xab, 0xcd, 0xef};
size_t buflen = 64;
int j;
TEST_ESP_OK( nvs_get_blob(handle, "dummyHex2BinKey", buf, &buflen));
CHECK(memcmp(buf, hexdata, buflen) == 0);
uint8_t base64data[] = {'1', '2', '3', 'a', 'b', 'c'};
TEST_ESP_OK( nvs_get_blob(handle, "dummyBase64Key", buf, &buflen));
CHECK(memcmp(buf, base64data, buflen) == 0);
buflen = 64;
uint8_t hexfiledata[] = {0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef};
TEST_ESP_OK( nvs_get_blob(handle, "hexFileKey", buf, &buflen));
CHECK(memcmp(buf, hexfiledata, buflen) == 0);
buflen = 64;
uint8_t strfiledata[64] = "abcdefghijklmnopqrstuvwxyz\0";
TEST_ESP_OK( nvs_get_str(handle, "stringFileKey", buf, &buflen));
CHECK(memcmp(buf, strfiledata, buflen) == 0);
char bin_data[5200];
size_t bin_len = sizeof(bin_data);
char binfiledata[5200];
ifstream file;
file.open(filename);
file.read(binfiledata,5200);
TEST_ESP_OK( nvs_get_blob(handle, "binFileKey", bin_data, &bin_len));
CHECK(memcmp(bin_data, binfiledata, bin_len) == 0);
file.close();
nvs_close(handle);
TEST_ESP_OK(nvs_flash_deinit_partition(part_name));
}
static void check_nvs_part_gen_args_mfg(SpiFlashEmulator *spi_flash_emulator,
char const *part_name,
int size,
char const *filename,
bool is_encr,
nvs_sec_cfg_t* xts_cfg)
{
nvs_handle_t handle;
esp_partition_t esp_part;
esp_part.encrypted = false; // we're not testing generic flash encryption here, only the legacy NVS encryption
esp_part.address = 0;
esp_part.size = size * SPI_FLASH_SEC_SIZE;
strncpy(esp_part.label, part_name, PART_NAME_MAX_SIZE);
shared_ptr<Partition> part;
if (is_encr) {
NVSEncryptedPartition *enc_part = new NVSEncryptedPartition(&esp_part);
TEST_ESP_OK(enc_part->init(xts_cfg));
part.reset(enc_part);
} else {
part.reset(new PartitionEmulation(spi_flash_emulator, 0, size, part_name));
}
TEST_ESP_OK( NVSPartitionManager::get_instance()->init_custom(part.get(), 0, size) );
TEST_ESP_OK( nvs_open_from_partition(part_name, "dummyNamespace", NVS_READONLY, &handle));
uint8_t u8v;
TEST_ESP_OK( nvs_get_u8(handle, "dummyU8Key", &u8v));
CHECK(u8v == 127);
int8_t i8v;
TEST_ESP_OK( nvs_get_i8(handle, "dummyI8Key", &i8v));
CHECK(i8v == -128);
uint16_t u16v;
TEST_ESP_OK( nvs_get_u16(handle, "dummyU16Key", &u16v));
CHECK(u16v == 32768);
uint32_t u32v;
TEST_ESP_OK( nvs_get_u32(handle, "dummyU32Key", &u32v));
CHECK(u32v == 4294967295);
int32_t i32v;
TEST_ESP_OK( nvs_get_i32(handle, "dummyI32Key", &i32v));
CHECK(i32v == -2147483648);
char buf[64] = {0};
size_t buflen = 64;
TEST_ESP_OK( nvs_get_str(handle, "dummyStringKey", buf, &buflen));
@ -2723,7 +2813,6 @@ static void check_nvs_part_gen_args(SpiFlashEmulator *spi_flash_emulator,
TEST_ESP_OK(nvs_flash_deinit_partition(part_name));
}
TEST_CASE("check and read data from partition generated via partition generation utility with multipage blob support disabled", "[nvs_part_gen]")
{
int status;
@ -2886,10 +2975,10 @@ TEST_CASE("check and read data from partition generated via manufacturing utilit
}
SpiFlashEmulator emu1("../../../tools/mass_mfg/host_test/bin/Test-1.bin");
check_nvs_part_gen_args(&emu1, "test", 3, "mfg_testdata/sample_singlepage_blob.bin", false, NULL);
check_nvs_part_gen_args_mfg(&emu1, "test", 3, "mfg_testdata/sample_singlepage_blob.bin", false, NULL);
SpiFlashEmulator emu2("../nvs_partition_generator/Test-1-partition.bin");
check_nvs_part_gen_args(&emu2, "test", 3, "testdata/sample_singlepage_blob.bin", false, NULL);
check_nvs_part_gen_args_mfg(&emu2, "test", 3, "testdata/sample_singlepage_blob.bin", false, NULL);
childpid = fork();
@ -2967,10 +3056,10 @@ TEST_CASE("check and read data from partition generated via manufacturing utilit
}
SpiFlashEmulator emu1("../../../tools/mass_mfg/host_test/bin/Test-1.bin");
check_nvs_part_gen_args(&emu1, "test", 4, "mfg_testdata/sample_multipage_blob.bin", false, NULL);
check_nvs_part_gen_args_mfg(&emu1, "test", 4, "mfg_testdata/sample_multipage_blob.bin", false, NULL);
SpiFlashEmulator emu2("../nvs_partition_generator/Test-1-partition.bin");
check_nvs_part_gen_args(&emu2, "test", 4, "testdata/sample_multipage_blob.bin", false, NULL);
check_nvs_part_gen_args_mfg(&emu2, "test", 4, "testdata/sample_multipage_blob.bin", false, NULL);
childpid = fork();
if (childpid == 0) {
@ -3465,11 +3554,11 @@ TEST_CASE("check and read data from partition generated via manufacturing utilit
cfg.tky[count] = 0x22;
}
check_nvs_part_gen_args(&emu1, NVS_DEFAULT_PART_NAME, 4, "mfg_testdata/sample_multipage_blob.bin", true, &cfg);
check_nvs_part_gen_args_mfg(&emu1, NVS_DEFAULT_PART_NAME, 4, "mfg_testdata/sample_multipage_blob.bin", true, &cfg);
SpiFlashEmulator emu2("../nvs_partition_generator/Test-1-partition-encrypted.bin");
check_nvs_part_gen_args(&emu2, NVS_DEFAULT_PART_NAME, 4, "testdata/sample_multipage_blob.bin", true, &cfg);
check_nvs_part_gen_args_mfg(&emu2, NVS_DEFAULT_PART_NAME, 4, "testdata/sample_multipage_blob.bin", true, &cfg);
childpid = fork();
@ -3585,11 +3674,11 @@ TEST_CASE("check and read data from partition generated via manufacturing utilit
cfg.tky[count] = buffer[count+32] & 255;
}
check_nvs_part_gen_args(&emu1, NVS_DEFAULT_PART_NAME, 4, "mfg_testdata/sample_multipage_blob.bin", true, &cfg);
check_nvs_part_gen_args_mfg(&emu1, NVS_DEFAULT_PART_NAME, 4, "mfg_testdata/sample_multipage_blob.bin", true, &cfg);
SpiFlashEmulator emu2("../nvs_partition_generator/Test-1-partition-encrypted.bin");
check_nvs_part_gen_args(&emu2, NVS_DEFAULT_PART_NAME, 4, "testdata/sample_multipage_blob.bin", true, &cfg);
check_nvs_part_gen_args_mfg(&emu2, NVS_DEFAULT_PART_NAME, 4, "testdata/sample_multipage_blob.bin", true, &cfg);
childpid = fork();
if (childpid == 0) {

View File

@ -1036,10 +1036,7 @@ components/nvs_flash/host_test/fixtures/test_fixtures.hpp
components/nvs_flash/host_test/nvs_page_test/main/nvs_page_test.cpp
components/nvs_flash/include/nvs_flash.h
components/nvs_flash/include/nvs_handle.hpp
components/nvs_flash/nvs_partition_generator/nvs_partition_gen.py
components/nvs_flash/src/intrusive_list.h
components/nvs_flash/src/nvs_cxx_api.cpp
components/nvs_flash/src/nvs_encrypted_partition.cpp
components/nvs_flash/src/nvs_encrypted_partition.hpp
components/nvs_flash/src/nvs_handle_locked.cpp
components/nvs_flash/src/nvs_handle_locked.hpp
@ -2286,7 +2283,6 @@ tools/ldgen/output_commands.py
tools/ldgen/samples/template.ld
tools/ldgen/sdkconfig.py
tools/ldgen/test/data/linker_script.ld
tools/mass_mfg/mfg_gen.py
tools/mocks/esp_system/include/esp_task.h
tools/templates/sample_component/include/main.h
tools/templates/sample_component/main.c

456
tools/mass_mfg/mfg_gen.py Executable file → Normal file
View File

@ -1,106 +1,96 @@
#!/usr/bin/env python
#
# Copyright 2018 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SPDX-FileCopyrightText: 2018-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
#
from __future__ import print_function
import argparse
import csv
import distutils.dir_util
import os
import sys
from io import open
from future.moves.itertools import zip_longest
try:
sys.path.insert(0, os.getenv('IDF_PATH') + '/components/nvs_flash/nvs_partition_generator/')
idf_path = os.environ['IDF_PATH']
sys.path.insert(0, idf_path + '/components/nvs_flash/nvs_partition_generator/')
import nvs_partition_gen
except Exception as e:
print(e)
sys.exit('Please check IDF_PATH')
def verify_values_exist(input_values_file, values_file_data, key_count_in_values_file, line_no=1):
def verify_values_exist(input_values_file, keys_in_values_file):
""" Verify all keys have corresponding values in values file
"""
if len(values_file_data) != key_count_in_values_file:
raise SystemExit('\nError: Number of values is not equal to number of keys in file: %s at line No:%s\n'
% (str(input_values_file), str(line_no)))
with open(input_values_file, 'r') as values_file:
values_file_reader = csv.reader(values_file, delimiter=',')
next(values_file_reader)
for line_num, line in enumerate(values_file_reader, start=2):
key_count_in_values_file = len(keys_in_values_file)
if len(line) != key_count_in_values_file:
raise SystemExit('\nError: Number of values is not equal to number of keys in file: %s at line No:%s\n'
% (str(input_values_file), str(line_num)))
def verify_keys_exist(values_file_keys, config_file_data):
def verify_keys_exist(values_file_keys, input_config_file):
""" Verify all keys from config file are present in values file
"""
keys_missing = []
for line_no, config_data in enumerate(config_file_data,1):
if not isinstance(config_data, str):
config_data = config_data.encode('utf-8')
config_data_line = config_data.strip().split(',')
if 'namespace' not in config_data_line:
if values_file_keys:
if config_data_line[0] == values_file_keys[0]:
del values_file_keys[0]
with open(input_config_file,'r') as config_file:
config_file_reader = csv.reader(config_file, delimiter=',')
for line_num, line in enumerate(config_file_reader, start=1):
if 'namespace' not in line:
if values_file_keys:
if line[0] == values_file_keys[0]:
del values_file_keys[0]
else:
keys_missing.append([line_num, line[0]])
else:
keys_missing.append([config_data_line[0], line_no])
else:
keys_missing.append([config_data_line[0], line_no])
keys_missing.append([line_num, line[0]])
if keys_missing:
for key, line_no in keys_missing:
print('Key:`', str(key), '` at line no:', str(line_no),
' in config file is not found in values file.')
raise SystemExit(1)
if keys_missing:
for line_num, key in keys_missing:
print('Key:`', str(key), '` at line no:', str(line_num),
' in config file is not found in values file.')
raise SystemExit(1)
def verify_datatype_encoding(input_config_file, config_file_data):
def verify_datatype_encoding(input_config_file):
""" Verify datatype and encodings from config file is valid
"""
valid_encodings = ['string', 'binary', 'hex2bin','u8', 'i8', 'u16', 'u32', 'i32','base64']
valid_datatypes = ['file','data','namespace']
line_no = 0
valid_encodings = {'string', 'binary', 'hex2bin','u8', 'i8', 'u16', 'u32', 'i32','base64'}
valid_datatypes = {'file','data','namespace'}
for data in config_file_data:
line_no += 1
if not isinstance(data, str):
data = data.encode('utf-8')
line = data.strip().split(',')
if line[1] not in valid_datatypes:
raise SystemExit('Error: config file: %s has invalid datatype at line no:%s\n'
% (str(input_config_file), str(line_no)))
if 'namespace' not in line:
if line[2] not in valid_encodings:
raise SystemExit('Error: config file: %s has invalid encoding at line no:%s\n'
% (str(input_config_file), str(line_no)))
with open(input_config_file,'r') as config_file:
config_file_reader = csv.reader(config_file, delimiter=',')
for line_num, line in enumerate(config_file_reader, start=1):
if line[1] not in valid_datatypes:
raise SystemExit('Error: config file: %s has invalid datatype at line no:%s\n`'
% (str(input_config_file), str(line_num)))
if 'namespace' not in line and line[2] not in valid_encodings:
raise SystemExit('Error: config file: %s has invalid encoding at line no:%s\n`'
% (str(input_config_file), str(line_num)))
def verify_file_data_count(cfg_file_data, keys_repeat):
def verify_file_data_count(input_config_file, keys_repeat):
""" Verify count of data on each line in config file is equal to 3
(as format must be: <key,type and encoding>)
"""
line_no = 0
with open(input_config_file, 'r') as config_file:
config_file_reader = csv.reader(config_file, delimiter=',')
for data in cfg_file_data:
line_no += 1
if not isinstance(data, str):
data = data.encode('utf-8')
line = data.strip().split(',')
if len(line) != 3 and line[0] not in keys_repeat:
raise SystemExit('Error: data missing in config file at line no:%s <format needed:key,type,encoding>\n'
% str(line_no))
for line_num, line in enumerate(config_file_reader, start=1):
if len(line) != 3 and line[0] not in keys_repeat:
raise SystemExit('Error: data missing in config file at line no:%s <format needed:key,type,encoding>\n'
% str(line_num))
def verify_data_in_file(input_config_file, input_values_file, config_file_keys, keys_in_values_file, keys_repeat):
@ -111,36 +101,16 @@ def verify_data_in_file(input_config_file, input_values_file, config_file_keys,
Verify each key has corresponding value in values file
"""
try:
values_file_keys = []
values_file_line = None
verify_file_data_count(input_config_file, keys_repeat)
verify_datatype_encoding(input_config_file)
# Get keys from values file present in config files
values_file_keys = get_keys(keys_in_values_file, config_file_keys)
with open(input_config_file, 'r', newline='\n') as cfg_file:
cfg_file_data = cfg_file.readlines()
verify_file_data_count(cfg_file_data, keys_repeat)
verify_datatype_encoding(input_config_file, cfg_file_data)
verify_keys_exist(values_file_keys, cfg_file_data)
verify_keys_exist(values_file_keys, input_config_file)
with open(input_values_file, 'r', newline='\n') as values_file:
key_count_in_values_file = len(keys_in_values_file)
lineno = 0
# Read first keys(header) line
values_file_data = values_file.readline()
lineno += 1
while values_file_data:
# Read values line
values_file_line = values_file.readline()
if not isinstance(values_file_line, str):
values_file_line = values_file_line.encode('utf-8')
values_file_data = values_file_line.strip().split(',')
lineno += 1
if len(values_file_data) == 1 and '' in values_file_data:
break
verify_values_exist(input_values_file, values_file_data, key_count_in_values_file, line_no=lineno)
verify_values_exist(input_values_file, keys_in_values_file)
except Exception as err:
print(err)
@ -150,11 +120,7 @@ def verify_data_in_file(input_config_file, input_values_file, config_file_keys,
def get_keys(keys_in_values_file, config_file_keys):
""" Get keys from values file present in config file
"""
values_file_keys = []
for key in keys_in_values_file:
if key in config_file_keys:
values_file_keys.append(key)
values_file_keys = [key for key in keys_in_values_file if key in config_file_keys]
return values_file_keys
@ -164,35 +130,31 @@ def add_config_data_per_namespace(input_config_file):
config_data_to_write = []
config_data_per_namespace = []
with open(input_config_file, 'r', newline='\n') as cfg_file:
config_data = cfg_file.readlines()
with open(input_config_file,'r') as csv_config_file:
config_file_reader = csv.reader(csv_config_file, delimiter=',')
# `config_data_per_namespace` is added to `config_data_to_write` list after reading next namespace
for data in config_data:
if not isinstance(data, str):
data = data.encode('utf-8')
cfg_data = data.strip().split(',')
if 'REPEAT' in cfg_data:
cfg_data.remove('REPEAT')
if 'namespace' in cfg_data:
if config_data_per_namespace:
config_data_to_write.append(config_data_per_namespace)
config_data_per_namespace = []
config_data_per_namespace.append(cfg_data)
# `config_data_per_namespace` is added to `config_data_to_write` list after reading next namespace
for config_data in config_file_reader:
if 'REPEAT' in config_data:
config_data.remove('REPEAT')
if 'namespace' in config_data:
if config_data_per_namespace:
config_data_to_write.append(config_data_per_namespace)
config_data_per_namespace = []
config_data_per_namespace.append(config_data)
else:
config_data_per_namespace.append(config_data)
else:
config_data_per_namespace.append(cfg_data)
else:
config_data_per_namespace.append(cfg_data)
config_data_per_namespace.append(config_data)
# `config_data_per_namespace` is added to `config_data_to_write` list as EOF is reached
if (not config_data_to_write) or (config_data_to_write and config_data_per_namespace):
config_data_to_write.append(config_data_per_namespace)
# `config_data_per_namespace` is added to `config_data_to_write` list as EOF is reached
if (not config_data_to_write) or (config_data_to_write and config_data_per_namespace):
config_data_to_write.append(config_data_per_namespace)
return config_data_to_write
def get_fileid_val(file_identifier, keys_in_config_file, keys_in_values_file,
values_data_line, key_value_data, fileid_value):
def get_fileid_val(file_identifier, key_value_data, fileid_value):
""" Get file identifier value
"""
file_id_found = False
@ -213,36 +175,26 @@ def add_data_to_file(config_data_to_write, key_value_pair, output_csv_file):
"""
header = ['key', 'type', 'encoding', 'value']
data_to_write = []
newline = u'\n'
target_csv_file = open(output_csv_file, 'w', newline=None)
with open(output_csv_file, 'w', newline='') as target_csv_file:
output_file_writer = csv.writer(target_csv_file, delimiter=',')
output_file_writer.writerow(header)
line_to_write = u','.join(header)
target_csv_file.write(line_to_write)
target_csv_file.write(newline)
for namespace_config_data in config_data_to_write:
for data in namespace_config_data:
data_to_write = data[:]
if 'namespace' in data:
data_to_write.append('')
line_to_write = u','.join(data_to_write)
target_csv_file.write(line_to_write)
target_csv_file.write(newline)
else:
key = data[0]
while key not in key_value_pair[0]:
del key_value_pair[0]
if key in key_value_pair[0]:
value = key_value_pair[0][1]
data_to_write.append(value)
del key_value_pair[0]
line_to_write = u','.join(data_to_write)
target_csv_file.write(line_to_write)
target_csv_file.write(newline)
# Set index to start of file
target_csv_file.seek(0)
target_csv_file.close()
for namespace_config_data in config_data_to_write:
for data in namespace_config_data:
data_to_write = data[:]
if 'namespace' in data:
data_to_write.append('')
output_file_writer.writerow(data_to_write)
else:
key = data[0]
while key not in key_value_pair[0]:
del key_value_pair[0]
if key in key_value_pair[0]:
value = key_value_pair[0][1]
data_to_write.append(value)
del key_value_pair[0]
output_file_writer.writerow(data_to_write)
def create_dir(filetype, output_dir_path):
@ -256,72 +208,46 @@ def create_dir(filetype, output_dir_path):
def set_repeat_value(total_keys_repeat, keys, csv_file, target_filename):
key_val_pair = []
key_repeated = []
line = None
newline = u'\n'
with open(csv_file, 'r', newline=None) as read_from, open(target_filename,'w', newline=None) as write_to:
headers = read_from.readline()
values = read_from.readline()
write_to.write(headers)
write_to.write(values)
if not isinstance(values, str):
values = values.encode('utf-8')
values = values.strip().split(',')
total_keys_values = list(zip_longest(keys, values))
with open(csv_file, 'r') as read_from, open(target_filename,'w', newline='') as write_to:
csv_file_reader = csv.reader(read_from, delimiter=',')
headers = next(csv_file_reader)
values = next(csv_file_reader)
csv_file_writer = csv.writer(write_to, delimiter=',')
csv_file_writer.writerow(headers)
csv_file_writer.writerow(values)
# read new data, add value if key has repeat tag, write to new file
line = read_from.readline()
if not isinstance(line, str):
line = line.encode('utf-8')
row = line.strip().split(',')
while row:
index = -1
for row in csv_file_reader:
key_val_new = list(zip_longest(keys, row))
key_val_pair = total_keys_values[:]
key_val_pair = list(zip_longest(keys, values))
key_repeated = total_keys_repeat[:]
index = 0
while key_val_new and key_repeated:
index = index + 1
# if key has repeat tag, get its corresponding value, write to file
if key_val_new[0][0] == key_repeated[0]:
val = key_val_pair[0][1]
row[index] = val
row[index] = key_val_pair[0][1]
del key_repeated[0]
del key_val_new[0]
del key_val_pair[0]
line_to_write = u','.join(row)
write_to.write(line_to_write)
write_to.write(newline)
# Read next line
line = read_from.readline()
if not isinstance(line, str):
line = line.encode('utf-8')
row = line.strip().split(',')
if len(row) == 1 and '' in row:
break
index += 1
csv_file_writer.writerow(row)
return target_filename
def create_intermediate_csv(args, keys_in_config_file, keys_in_values_file, keys_repeat, is_encr=False):
def create_intermediate_csv(args, keys_in_values_file, keys_repeat, is_encr=False):
file_identifier_value = '0'
csv_str = 'csv'
bin_str = 'bin'
line = None
set_output_keyfile = False
# Add config data per namespace to `config_data_to_write` list
config_data_to_write = add_config_data_per_namespace(args.conf)
try:
with open(args.values, 'r', newline=None) as csv_values_file:
# first line must be keys in file
line = csv_values_file.readline()
if not isinstance(line, str):
line = line.encode('utf-8')
keys = line.strip().split(',')
with open(args.values, 'r') as csv_values_file:
values_file_reader = csv.reader(csv_values_file, delimiter=',')
keys = next(values_file_reader)
filename, file_ext = os.path.splitext(args.values)
target_filename = filename + '_created' + file_ext
@ -330,91 +256,77 @@ def create_intermediate_csv(args, keys_in_config_file, keys_in_values_file, keys
else:
target_values_file = args.values
csv_values_file = open(target_values_file, 'r', newline=None)
with open(target_values_file, 'r') as csv_values_file:
values_file_reader = csv.reader(csv_values_file, delimiter=',')
next(values_file_reader)
# Read header line
csv_values_file.readline()
# Create new directory(if doesn't exist) to store csv file generated
output_csv_target_dir = create_dir(csv_str, args.outdir)
# Create new directory(if doesn't exist) to store bin file generated
output_bin_target_dir = create_dir(bin_str, args.outdir)
if args.keygen:
set_output_keyfile = True
# Create new directory(if doesn't exist) to store csv file generated
output_csv_target_dir = create_dir(csv_str, args.outdir)
# Create new directory(if doesn't exist) to store bin file generated
output_bin_target_dir = create_dir(bin_str, args.outdir)
if args.keygen:
set_output_keyfile = True
for values_data_line in values_file_reader:
key_value_data = list(zip_longest(keys_in_values_file, values_data_line))
line = csv_values_file.readline()
if not isinstance(line, str):
line = line.encode('utf-8')
values_data_line = line.strip().split(',')
# Get file identifier value from values file
file_identifier_value = get_fileid_val(args.fileid, key_value_data, file_identifier_value)
while values_data_line:
key_value_data = list(zip_longest(keys_in_values_file, values_data_line))
key_value_pair = key_value_data[:]
# Get file identifier value from values file
file_identifier_value = get_fileid_val(args.fileid, keys_in_config_file,
keys_in_values_file, values_data_line, key_value_data,
file_identifier_value)
# Verify if output csv file does not exist
csv_filename = args.prefix + '-' + file_identifier_value + '.' + csv_str
output_csv_file = output_csv_target_dir + csv_filename
if os.path.isfile(output_csv_file):
raise SystemExit('Target csv file: %s already exists.`' % output_csv_file)
key_value_pair = key_value_data[:]
# Add values corresponding to each key to csv intermediate file
add_data_to_file(config_data_to_write, key_value_pair, output_csv_file)
print('\nCreated CSV file: ===>', output_csv_file)
# Verify if output csv file does not exist
csv_filename = args.prefix + '-' + file_identifier_value + '.' + csv_str
output_csv_file = output_csv_target_dir + csv_filename
if os.path.isfile(output_csv_file):
raise SystemExit('Target csv file: %s already exists.`' % output_csv_file)
# Verify if output bin file does not exist
bin_filename = args.prefix + '-' + file_identifier_value + '.' + bin_str
output_bin_file = output_bin_target_dir + bin_filename
if os.path.isfile(output_bin_file):
raise SystemExit('Target binary file: %s already exists.`' % output_bin_file)
# Add values corresponding to each key to csv intermediate file
add_data_to_file(config_data_to_write, key_value_pair, output_csv_file)
print('\nCreated CSV file: ===>', output_csv_file)
args.input = output_csv_file
args.output = os.path.join(bin_str, bin_filename)
if set_output_keyfile:
args.keyfile = 'keys-' + args.prefix + '-' + file_identifier_value
# Verify if output bin file does not exist
bin_filename = args.prefix + '-' + file_identifier_value + '.' + bin_str
output_bin_file = output_bin_target_dir + bin_filename
if os.path.isfile(output_bin_file):
raise SystemExit('Target binary file: %s already exists.`' % output_bin_file)
if is_encr:
nvs_partition_gen.encrypt(args)
else:
nvs_partition_gen.generate(args)
args.input = output_csv_file
args.output = os.path.join(bin_str, bin_filename)
if set_output_keyfile:
args.keyfile = 'keys-' + args.prefix + '-' + file_identifier_value
if is_encr:
nvs_partition_gen.encrypt(args)
else:
nvs_partition_gen.generate(args)
# Read next line
line = csv_values_file.readline()
if not isinstance(line, str):
line = line.encode('utf-8')
values_data_line = line.strip().split(',')
if len(values_data_line) == 1 and '' in values_data_line:
break
print('\nFiles generated in %s ...' % args.outdir)
print('\nFiles generated in %s ...' % args.outdir)
except Exception as e:
print(e)
exit(1)
finally:
csv_values_file.close()
def verify_empty_lines_exist(file_name, input_file_data):
for data in input_file_data:
if not isinstance(data, str):
data = data.encode('utf-8')
cfg_data = data.strip().split(',')
def verify_empty_lines_exist(args, input_file):
input_file_reader = csv.reader(input_file, delimiter=',')
for file_data in input_file_reader:
for data in file_data:
if len(data.strip()) == 0:
raise SystemExit('Error: config file: %s cannot have empty lines. ' % args.conf)
else:
break
if not file_data:
raise SystemExit('Error: config file: %s cannot have empty lines.' % args.conf)
if len(cfg_data) == 1 and '' in cfg_data:
raise SystemExit('Error: file: %s cannot have empty lines. ' % file_name)
input_file.seek(0)
return input_file_reader
def verify_file_format(args):
keys_in_config_file = []
keys_in_values_file = []
keys_repeat = []
file_data_keys = None
# Verify config file is not empty
if os.stat(args.conf).st_size == 0:
@ -425,41 +337,25 @@ def verify_file_format(args):
raise SystemExit('Error: values file: %s is empty.' % args.values)
# Verify config file does not have empty lines
with open(args.conf, 'r', newline='\n') as csv_config_file:
with open(args.conf, 'r') as csv_config_file:
try:
file_data = csv_config_file.readlines()
verify_empty_lines_exist(args.conf, file_data)
csv_config_file.seek(0)
config_file_reader = verify_empty_lines_exist(args, csv_config_file)
# Extract keys from config file
for data in file_data:
if not isinstance(data, str):
data = data.encode('utf-8')
line_data = data.strip().split(',')
if 'namespace' not in line_data:
keys_in_config_file.append(line_data[0])
if 'REPEAT' in line_data:
keys_repeat.append(line_data[0])
for config_data in config_file_reader:
if 'namespace' not in config_data:
keys_in_config_file.append(config_data[0])
if 'REPEAT' in config_data:
keys_repeat.append(config_data[0])
except Exception as e:
print(e)
# Verify values file does not have empty lines
with open(args.values, 'r', newline='\n') as csv_values_file:
with open(args.values, 'r') as csv_values_file:
try:
# Extract keys from values file (first line of file)
file_data = [csv_values_file.readline()]
file_data_keys = file_data[0]
if not isinstance(file_data_keys, str):
file_data_keys = file_data_keys.encode('utf-8')
keys_in_values_file = file_data_keys.strip().split(',')
while file_data:
verify_empty_lines_exist(args.values, file_data)
file_data = [csv_values_file.readline()]
if '' in file_data:
break
values_file_reader = verify_empty_lines_exist(args, csv_values_file)
# Extract keys from values file
keys_in_values_file = next(values_file_reader)
except Exception as e:
print(e)
@ -475,11 +371,6 @@ def verify_file_format(args):
def generate(args):
keys_in_config_file = []
keys_in_values_file = []
keys_repeat = []
encryption_enabled = False
args.outdir = os.path.join(args.outdir, '')
# Verify input config and values file format
keys_in_config_file, keys_in_values_file, keys_repeat = verify_file_format(args)
@ -488,13 +379,12 @@ def generate(args):
verify_data_in_file(args.conf, args.values, keys_in_config_file,
keys_in_values_file, keys_repeat)
encryption_enabled = False
if (args.keygen or args.inputkey):
encryption_enabled = True
print('\nGenerating encrypted NVS binary images...')
# Create intermediate csv file
create_intermediate_csv(args, keys_in_config_file, keys_in_values_file,
keys_repeat, is_encr=encryption_enabled)
create_intermediate_csv(args, keys_in_values_file, keys_repeat, is_encr=encryption_enabled)
def generate_key(args):