2021-11-06 05:23:21 -04:00
|
|
|
/*
|
2022-01-12 02:03:50 -05:00
|
|
|
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
|
2021-11-06 05:23:21 -04:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2016-10-19 05:17:24 -04:00
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <assert.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
#include <freertos/FreeRTOS.h>
|
|
|
|
#include <freertos/task.h>
|
|
|
|
#include <freertos/semphr.h>
|
2022-05-31 22:14:48 -04:00
|
|
|
#include "soc/mmu.h"
|
2016-10-19 05:17:24 -04:00
|
|
|
#include "sdkconfig.h"
|
|
|
|
#include "esp_attr.h"
|
2022-07-21 07:14:41 -04:00
|
|
|
#include "esp_memory_utils.h"
|
2022-06-27 03:24:07 -04:00
|
|
|
#include "spi_flash_mmap.h"
|
2017-01-26 02:29:18 -05:00
|
|
|
#include "esp_flash_encrypt.h"
|
2016-10-19 05:17:24 -04:00
|
|
|
#include "esp_log.h"
|
2022-06-27 03:24:07 -04:00
|
|
|
#include "esp_private/cache_utils.h"
|
2022-04-18 03:04:10 -04:00
|
|
|
#include "hal/mmu_ll.h"
|
2022-06-27 03:24:07 -04:00
|
|
|
#include "esp_rom_spiflash.h"
|
2020-11-26 03:56:13 -05:00
|
|
|
|
2019-06-05 22:57:29 -04:00
|
|
|
#if CONFIG_IDF_TARGET_ESP32
|
2020-11-26 03:56:13 -05:00
|
|
|
#include "soc/dport_reg.h"
|
2019-06-05 22:57:29 -04:00
|
|
|
#include "esp32/rom/cache.h"
|
2020-01-16 22:47:08 -05:00
|
|
|
#elif CONFIG_IDF_TARGET_ESP32S2
|
|
|
|
#include "esp32s2/rom/cache.h"
|
2019-12-26 02:25:24 -05:00
|
|
|
#include "soc/extmem_reg.h"
|
2020-07-29 01:13:51 -04:00
|
|
|
#elif CONFIG_IDF_TARGET_ESP32S3
|
|
|
|
#include "esp32s3/rom/cache.h"
|
|
|
|
#include "soc/extmem_reg.h"
|
2020-11-26 03:56:13 -05:00
|
|
|
#elif CONFIG_IDF_TARGET_ESP32C3
|
|
|
|
#include "esp32c3/rom/cache.h"
|
2021-06-10 07:47:41 -04:00
|
|
|
#elif CONFIG_IDF_TARGET_ESP32H2
|
|
|
|
#include "esp32h2/rom/cache.h"
|
2022-01-17 21:32:56 -05:00
|
|
|
#elif CONFIG_IDF_TARGET_ESP32C2
|
|
|
|
#include "esp32c2/rom/cache.h"
|
2019-06-05 22:57:29 -04:00
|
|
|
#endif
|
2016-10-19 05:17:24 -04:00
|
|
|
|
2022-05-10 22:32:56 -04:00
|
|
|
#if CONFIG_SPIRAM
|
|
|
|
#include "esp_private/esp_psram_extram.h"
|
2022-08-19 05:31:32 -04:00
|
|
|
#include "esp_private/mmu_psram_flash.h"
|
2022-05-10 22:32:56 -04:00
|
|
|
#endif
|
|
|
|
|
2016-10-19 05:17:24 -04:00
|
|
|
#ifndef NDEBUG
|
|
|
|
// Enable built-in checks in queue.h in debug builds
|
|
|
|
#define INVARIANTS
|
|
|
|
#endif
|
2019-03-14 05:29:32 -04:00
|
|
|
#include "sys/queue.h"
|
2016-10-19 05:17:24 -04:00
|
|
|
|
2020-12-08 23:22:55 -05:00
|
|
|
#define IROM0_PAGES_NUM (SOC_MMU_IROM0_PAGES_END - SOC_MMU_IROM0_PAGES_START)
|
|
|
|
#define DROM0_PAGES_NUM (SOC_MMU_DROM0_PAGES_END - SOC_MMU_DROM0_PAGES_START)
|
|
|
|
#define PAGES_LIMIT ((SOC_MMU_IROM0_PAGES_END > SOC_MMU_DROM0_PAGES_END) ? SOC_MMU_IROM0_PAGES_END:SOC_MMU_DROM0_PAGES_END)
|
2022-05-31 22:14:48 -04:00
|
|
|
#define INVALID_PHY_PAGE(page_size) ((page_size) - 1)
|
2016-10-19 05:17:24 -04:00
|
|
|
|
2022-05-09 04:44:02 -04:00
|
|
|
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
|
|
|
|
extern int _instruction_reserved_start;
|
|
|
|
extern int _instruction_reserved_end;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if CONFIG_SPIRAM_RODATA
|
|
|
|
extern int _rodata_reserved_start;
|
|
|
|
extern int _rodata_reserved_end;
|
|
|
|
#endif
|
|
|
|
|
2020-12-15 22:50:13 -05:00
|
|
|
#if !CONFIG_SPI_FLASH_ROM_IMPL
|
|
|
|
|
2016-10-19 05:17:24 -04:00
|
|
|
typedef struct mmap_entry_{
|
|
|
|
uint32_t handle;
|
|
|
|
int page;
|
|
|
|
int count;
|
|
|
|
LIST_ENTRY(mmap_entry_) entries;
|
|
|
|
} mmap_entry_t;
|
|
|
|
|
|
|
|
|
|
|
|
static LIST_HEAD(mmap_entries_head, mmap_entry_) s_mmap_entries_head =
|
|
|
|
LIST_HEAD_INITIALIZER(s_mmap_entries_head);
|
2020-12-08 23:22:55 -05:00
|
|
|
static uint8_t s_mmap_page_refcnt[SOC_MMU_REGIONS_COUNT * SOC_MMU_PAGES_PER_REGION] = {0};
|
2016-10-19 05:17:24 -04:00
|
|
|
static uint32_t s_mmap_last_handle = 0;
|
|
|
|
|
|
|
|
|
2019-07-16 05:33:30 -04:00
|
|
|
static void IRAM_ATTR spi_flash_mmap_init(void)
|
2016-10-19 05:17:24 -04:00
|
|
|
{
|
2020-12-08 23:22:55 -05:00
|
|
|
if (s_mmap_page_refcnt[SOC_MMU_DROM0_PAGES_START] != 0) {
|
2017-02-17 02:26:43 -05:00
|
|
|
return; /* mmap data already initialised */
|
|
|
|
}
|
2020-12-08 23:22:55 -05:00
|
|
|
for (int i = 0; i < SOC_MMU_REGIONS_COUNT * SOC_MMU_PAGES_PER_REGION; ++i) {
|
2022-04-18 03:04:10 -04:00
|
|
|
uint32_t entry_pro = mmu_ll_read_entry(MMU_TABLE_CORE0, i);
|
2020-07-29 01:13:51 -04:00
|
|
|
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
|
2022-04-18 03:04:10 -04:00
|
|
|
uint32_t entry_app = mmu_ll_read_entry(MMU_TABLE_CORE1, i);
|
2018-01-03 06:47:42 -05:00
|
|
|
|
2016-10-19 05:17:24 -04:00
|
|
|
if (entry_pro != entry_app) {
|
|
|
|
// clean up entries used by boot loader
|
2022-04-18 03:04:10 -04:00
|
|
|
mmu_ll_set_entry_invalid(MMU_TABLE_CORE0, i);
|
2016-10-19 05:17:24 -04:00
|
|
|
}
|
2019-06-05 22:57:29 -04:00
|
|
|
#endif
|
2022-04-18 03:04:10 -04:00
|
|
|
bool entry_pro_invalid = mmu_ll_get_entry_is_invalid(MMU_TABLE_CORE0, i);
|
|
|
|
if (!entry_pro_invalid && (i == SOC_MMU_DROM0_PAGES_START || i == SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE || entry_pro != 0)) {
|
2016-10-19 05:17:24 -04:00
|
|
|
s_mmap_page_refcnt[i] = 1;
|
2017-02-17 02:26:43 -05:00
|
|
|
} else {
|
2022-04-18 03:04:10 -04:00
|
|
|
mmu_ll_set_entry_invalid(MMU_TABLE_CORE0, i);
|
2020-07-29 01:13:51 -04:00
|
|
|
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
|
2022-04-18 03:04:10 -04:00
|
|
|
mmu_ll_set_entry_invalid(MMU_TABLE_CORE1, i);
|
2019-06-05 22:57:29 -04:00
|
|
|
#endif
|
2016-10-19 05:17:24 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-15 07:58:02 -04:00
|
|
|
static void IRAM_ATTR get_mmu_region(spi_flash_mmap_memory_t memory, int* out_begin, int* out_size,uint32_t* region_addr)
|
|
|
|
{
|
|
|
|
if (memory == SPI_FLASH_MMAP_DATA) {
|
|
|
|
// Vaddr0
|
2020-12-08 23:22:55 -05:00
|
|
|
*out_begin = SOC_MMU_DROM0_PAGES_START;
|
2019-06-05 22:57:29 -04:00
|
|
|
*out_size = DROM0_PAGES_NUM;
|
2020-12-08 23:22:55 -05:00
|
|
|
*region_addr = SOC_MMU_VADDR0_START_ADDR;
|
2018-03-15 07:58:02 -04:00
|
|
|
} else {
|
|
|
|
// only part of VAddr1 is usable, so adjust for that
|
2020-12-08 23:22:55 -05:00
|
|
|
*out_begin = SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE;
|
|
|
|
*out_size = SOC_MMU_IROM0_PAGES_END - *out_begin;
|
|
|
|
*region_addr = SOC_MMU_VADDR1_FIRST_USABLE_ADDR;
|
2018-03-15 07:58:02 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-21 20:28:08 -05:00
|
|
|
esp_err_t IRAM_ATTR spi_flash_mmap(size_t src_addr, size_t size, spi_flash_mmap_memory_t memory,
|
2016-10-19 05:17:24 -04:00
|
|
|
const void** out_ptr, spi_flash_mmap_handle_t* out_handle)
|
|
|
|
{
|
|
|
|
esp_err_t ret;
|
2022-05-31 22:14:48 -04:00
|
|
|
if (src_addr & INVALID_PHY_PAGE(CONFIG_MMU_PAGE_SIZE)) {
|
2016-10-19 05:17:24 -04:00
|
|
|
return ESP_ERR_INVALID_ARG;
|
|
|
|
}
|
2022-06-27 03:24:07 -04:00
|
|
|
if ((src_addr + size) > g_rom_flashchip.chip_size) {
|
2016-10-21 07:33:42 -04:00
|
|
|
return ESP_ERR_INVALID_ARG;
|
|
|
|
}
|
2017-06-14 15:33:44 -04:00
|
|
|
// region which should be mapped
|
|
|
|
int phys_page = src_addr / SPI_FLASH_MMU_PAGE_SIZE;
|
|
|
|
int page_count = (size + SPI_FLASH_MMU_PAGE_SIZE - 1) / SPI_FLASH_MMU_PAGE_SIZE;
|
2018-07-30 15:49:09 -04:00
|
|
|
// prepare a linear pages array to feed into spi_flash_mmap_pages
|
|
|
|
int *pages = heap_caps_malloc(sizeof(int)*page_count, MALLOC_CAP_INTERNAL);
|
|
|
|
if (pages == NULL) {
|
2017-06-14 15:33:44 -04:00
|
|
|
return ESP_ERR_NO_MEM;
|
|
|
|
}
|
|
|
|
for (int i = 0; i < page_count; i++) {
|
2019-06-19 03:31:47 -04:00
|
|
|
pages[i] = (phys_page+i);
|
2017-06-14 15:33:44 -04:00
|
|
|
}
|
2018-07-30 15:49:09 -04:00
|
|
|
ret = spi_flash_mmap_pages(pages, page_count, memory, out_ptr, out_handle);
|
2017-06-14 15:33:44 -04:00
|
|
|
free(pages);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-07-30 15:49:09 -04:00
|
|
|
esp_err_t IRAM_ATTR spi_flash_mmap_pages(const int *pages, size_t page_count, spi_flash_mmap_memory_t memory,
|
2017-06-14 15:33:44 -04:00
|
|
|
const void** out_ptr, spi_flash_mmap_handle_t* out_handle)
|
|
|
|
{
|
|
|
|
esp_err_t ret;
|
2020-12-09 05:38:09 -05:00
|
|
|
const void* temp_ptr = *out_ptr = NULL;
|
|
|
|
spi_flash_mmap_handle_t temp_handle = *out_handle = (spi_flash_mmap_handle_t)NULL;
|
2019-03-08 00:30:49 -05:00
|
|
|
bool need_flush = false;
|
2017-06-14 15:33:44 -04:00
|
|
|
if (!page_count) {
|
|
|
|
return ESP_ERR_INVALID_ARG;
|
|
|
|
}
|
2018-07-30 15:49:09 -04:00
|
|
|
if (!esp_ptr_internal(pages)) {
|
|
|
|
return ESP_ERR_INVALID_ARG;
|
|
|
|
}
|
2017-06-14 15:33:44 -04:00
|
|
|
for (int i = 0; i < page_count; i++) {
|
2022-06-27 03:24:07 -04:00
|
|
|
if (pages[i] < 0 || pages[i]*SPI_FLASH_MMU_PAGE_SIZE >= g_rom_flashchip.chip_size) {
|
2017-06-14 15:33:44 -04:00
|
|
|
return ESP_ERR_INVALID_ARG;
|
|
|
|
}
|
|
|
|
}
|
2017-09-22 04:02:39 -04:00
|
|
|
mmap_entry_t* new_entry = (mmap_entry_t*) heap_caps_malloc(sizeof(mmap_entry_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
|
2017-02-28 02:11:54 -05:00
|
|
|
if (new_entry == 0) {
|
|
|
|
return ESP_ERR_NO_MEM;
|
|
|
|
}
|
2017-01-04 23:51:02 -05:00
|
|
|
|
2016-10-19 05:17:24 -04:00
|
|
|
spi_flash_disable_interrupts_caches_and_other_cpu();
|
2017-01-04 23:51:02 -05:00
|
|
|
|
2017-02-17 02:26:43 -05:00
|
|
|
spi_flash_mmap_init();
|
2016-10-19 05:17:24 -04:00
|
|
|
// figure out the memory region where we should look for pages
|
|
|
|
int region_begin; // first page to check
|
|
|
|
int region_size; // number of pages to check
|
|
|
|
uint32_t region_addr; // base address of memory region
|
2018-03-15 07:58:02 -04:00
|
|
|
get_mmu_region(memory,®ion_begin,®ion_size,®ion_addr);
|
2017-06-14 15:33:44 -04:00
|
|
|
if (region_size < page_count) {
|
2020-07-22 03:08:53 -04:00
|
|
|
spi_flash_enable_interrupts_caches_and_other_cpu();
|
2017-06-14 15:33:44 -04:00
|
|
|
return ESP_ERR_NO_MEM;
|
|
|
|
}
|
2016-10-19 05:17:24 -04:00
|
|
|
// The following part searches for a range of MMU entries which can be used.
|
|
|
|
// Algorithm is essentially naïve strstr algorithm, except that unused MMU
|
|
|
|
// entries are treated as wildcards.
|
|
|
|
int start;
|
global: move the soc component out of the common list
This MR removes the common dependency from every IDF components to the SOC component.
Currently, in the ``idf_functions.cmake`` script, we include the header path of SOC component by default for all components.
But for better code organization (or maybe also benifits to the compiling speed), we may remove the dependency to SOC components for most components except the driver and kernel related components.
In CMAKE, we have two kinds of header visibilities (set by include path visibility):
(Assume component A --(depends on)--> B, B is the current component)
1. public (``COMPONENT_ADD_INCLUDEDIRS``): means this path is visible to other depending components (A) (visible to A and B)
2. private (``COMPONENT_PRIV_INCLUDEDIRS``): means this path is only visible to source files inside the component (visible to B only)
and we have two kinds of depending ways:
(Assume component A --(depends on)--> B --(depends on)--> C, B is the current component)
1. public (```COMPONENT_REQUIRES```): means B can access to public include path of C. All other components rely on you (A) will also be available for the public headers. (visible to A, B)
2. private (``COMPONENT_PRIV_REQUIRES``): means B can access to public include path of C, but don't propagate this relation to other components (A). (visible to B)
1. remove the common requirement in ``idf_functions.cmake``, this makes the SOC components invisible to all other components by default.
2. if a component (for example, DRIVER) really needs the dependency to SOC, add a private dependency to SOC for it.
3. some other components that don't really depends on the SOC may still meet some errors saying "can't find header soc/...", this is because it's depended component (DRIVER) incorrectly include the header of SOC in its public headers. Moving all this kind of #include into source files, or private headers
4. Fix the include requirements for some file which miss sufficient #include directives. (Previously they include some headers by the long long long header include link)
This is a breaking change. Previous code may depends on the long include chain.
You may need to include the following headers for some files after this commit:
- soc/soc.h
- soc/soc_memory_layout.h
- driver/gpio.h
- esp_sleep.h
The major broken include chain includes:
1. esp_system.h no longer includes esp_sleep.h. The latter includes driver/gpio.h and driver/touch_pad.h.
2. ets_sys.h no longer includes soc/soc.h
3. freertos/portmacro.h no longer includes soc/soc_memory_layout.h
some peripheral headers no longer includes their hw related headers, e.g. rom/gpio.h no longer includes soc/gpio_pins.h and soc/gpio_reg.h
BREAKING CHANGE
2019-04-03 01:17:38 -04:00
|
|
|
// the " + 1" is a fix when loop the MMU table pages, because the last MMU page
|
2018-03-16 03:47:31 -04:00
|
|
|
// is valid as well if it have not been used
|
|
|
|
int end = region_begin + region_size - page_count + 1;
|
|
|
|
for (start = region_begin; start < end; ++start) {
|
2017-06-14 15:33:44 -04:00
|
|
|
int pageno = 0;
|
2016-10-19 05:17:24 -04:00
|
|
|
int pos;
|
2017-06-14 15:33:44 -04:00
|
|
|
for (pos = start; pos < start + page_count; ++pos, ++pageno) {
|
2022-04-18 03:04:10 -04:00
|
|
|
int table_val = (int) mmu_ll_read_entry(MMU_TABLE_CORE0, pos);
|
global: move the soc component out of the common list
This MR removes the common dependency from every IDF components to the SOC component.
Currently, in the ``idf_functions.cmake`` script, we include the header path of SOC component by default for all components.
But for better code organization (or maybe also benifits to the compiling speed), we may remove the dependency to SOC components for most components except the driver and kernel related components.
In CMAKE, we have two kinds of header visibilities (set by include path visibility):
(Assume component A --(depends on)--> B, B is the current component)
1. public (``COMPONENT_ADD_INCLUDEDIRS``): means this path is visible to other depending components (A) (visible to A and B)
2. private (``COMPONENT_PRIV_INCLUDEDIRS``): means this path is only visible to source files inside the component (visible to B only)
and we have two kinds of depending ways:
(Assume component A --(depends on)--> B --(depends on)--> C, B is the current component)
1. public (```COMPONENT_REQUIRES```): means B can access to public include path of C. All other components rely on you (A) will also be available for the public headers. (visible to A, B)
2. private (``COMPONENT_PRIV_REQUIRES``): means B can access to public include path of C, but don't propagate this relation to other components (A). (visible to B)
1. remove the common requirement in ``idf_functions.cmake``, this makes the SOC components invisible to all other components by default.
2. if a component (for example, DRIVER) really needs the dependency to SOC, add a private dependency to SOC for it.
3. some other components that don't really depends on the SOC may still meet some errors saying "can't find header soc/...", this is because it's depended component (DRIVER) incorrectly include the header of SOC in its public headers. Moving all this kind of #include into source files, or private headers
4. Fix the include requirements for some file which miss sufficient #include directives. (Previously they include some headers by the long long long header include link)
This is a breaking change. Previous code may depends on the long include chain.
You may need to include the following headers for some files after this commit:
- soc/soc.h
- soc/soc_memory_layout.h
- driver/gpio.h
- esp_sleep.h
The major broken include chain includes:
1. esp_system.h no longer includes esp_sleep.h. The latter includes driver/gpio.h and driver/touch_pad.h.
2. ets_sys.h no longer includes soc/soc.h
3. freertos/portmacro.h no longer includes soc/soc_memory_layout.h
some peripheral headers no longer includes their hw related headers, e.g. rom/gpio.h no longer includes soc/gpio_pins.h and soc/gpio_reg.h
BREAKING CHANGE
2019-04-03 01:17:38 -04:00
|
|
|
uint8_t refcnt = s_mmap_page_refcnt[pos];
|
2023-07-02 23:41:32 -04:00
|
|
|
|
|
|
|
#if !CONFIG_IDF_TARGET_ESP32 && SOC_SPIRAM_SUPPORTED
|
|
|
|
if (table_val == SOC_MMU_PAGE_IN_PSRAM(pages[pageno])) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif //#if !CONFIG_IDF_TARGET_ESP32
|
|
|
|
|
2020-12-08 23:22:55 -05:00
|
|
|
if (refcnt != 0 && table_val != SOC_MMU_PAGE_IN_FLASH(pages[pageno])) {
|
2016-10-19 05:17:24 -04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// whole mapping range matched, bail out
|
|
|
|
if (pos - start == page_count) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// checked all the region(s) and haven't found anything?
|
2018-03-16 03:47:31 -04:00
|
|
|
if (start == end) {
|
2016-10-19 05:17:24 -04:00
|
|
|
ret = ESP_ERR_NO_MEM;
|
|
|
|
} else {
|
2017-06-14 15:33:44 -04:00
|
|
|
// set up mapping using pages
|
|
|
|
uint32_t pageno = 0;
|
|
|
|
for (int i = start; i != start + page_count; ++i, ++pageno) {
|
2016-10-19 05:17:24 -04:00
|
|
|
// sanity check: we won't reconfigure entries with non-zero reference count
|
2022-04-18 03:04:10 -04:00
|
|
|
uint32_t entry_pro = mmu_ll_read_entry(MMU_TABLE_CORE0, i);
|
2020-07-29 01:13:51 -04:00
|
|
|
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
|
2022-04-18 03:04:10 -04:00
|
|
|
uint32_t entry_app = mmu_ll_read_entry(MMU_TABLE_CORE1, i);
|
2019-06-05 22:57:29 -04:00
|
|
|
#endif
|
2023-07-02 23:41:32 -04:00
|
|
|
if (s_mmap_page_refcnt[i] == 0) {
|
|
|
|
assert(mmu_ll_get_entry_is_invalid(MMU_TABLE_CORE0, i));
|
|
|
|
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
|
|
|
|
assert(mmu_ll_get_entry_is_invalid(MMU_TABLE_CORE1, i));
|
|
|
|
#endif
|
|
|
|
}
|
2016-10-19 05:17:24 -04:00
|
|
|
assert(s_mmap_page_refcnt[i] == 0 ||
|
2020-12-08 23:22:55 -05:00
|
|
|
(entry_pro == SOC_MMU_PAGE_IN_FLASH(pages[pageno])
|
2020-07-29 01:13:51 -04:00
|
|
|
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
|
2020-12-08 23:22:55 -05:00
|
|
|
&& entry_app == SOC_MMU_PAGE_IN_FLASH(pages[pageno])
|
2019-06-05 22:57:29 -04:00
|
|
|
#endif
|
|
|
|
));
|
2016-10-19 05:17:24 -04:00
|
|
|
if (s_mmap_page_refcnt[i] == 0) {
|
2020-12-08 23:22:55 -05:00
|
|
|
if (entry_pro != SOC_MMU_PAGE_IN_FLASH(pages[pageno])
|
2020-07-29 01:13:51 -04:00
|
|
|
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
|
2020-12-08 23:22:55 -05:00
|
|
|
|| entry_app != SOC_MMU_PAGE_IN_FLASH(pages[pageno])
|
2019-06-05 22:57:29 -04:00
|
|
|
#endif
|
|
|
|
) {
|
2022-04-18 03:04:10 -04:00
|
|
|
mmu_ll_write_entry(MMU_TABLE_CORE0, i, pages[pageno], 0);
|
2020-07-29 01:13:51 -04:00
|
|
|
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
|
2022-04-18 03:04:10 -04:00
|
|
|
mmu_ll_write_entry(MMU_TABLE_CORE1, i, pages[pageno], 0);
|
2019-06-05 22:57:29 -04:00
|
|
|
#endif
|
2019-06-27 10:35:06 -04:00
|
|
|
|
2020-11-26 03:56:13 -05:00
|
|
|
#if !CONFIG_IDF_TARGET_ESP32
|
2019-06-27 10:35:06 -04:00
|
|
|
Cache_Invalidate_Addr(region_addr + (i - region_begin) * SPI_FLASH_MMU_PAGE_SIZE, SPI_FLASH_MMU_PAGE_SIZE);
|
|
|
|
#endif
|
2017-01-26 02:29:18 -05:00
|
|
|
need_flush = true;
|
|
|
|
}
|
2016-10-19 05:17:24 -04:00
|
|
|
}
|
|
|
|
++s_mmap_page_refcnt[i];
|
|
|
|
}
|
|
|
|
LIST_INSERT_HEAD(&s_mmap_entries_head, new_entry, entries);
|
|
|
|
new_entry->page = start;
|
|
|
|
new_entry->count = page_count;
|
|
|
|
new_entry->handle = ++s_mmap_last_handle;
|
2020-12-09 05:38:09 -05:00
|
|
|
temp_handle = new_entry->handle;
|
|
|
|
temp_ptr = (void*) (region_addr + (start - region_begin) * SPI_FLASH_MMU_PAGE_SIZE);
|
2016-10-19 05:17:24 -04:00
|
|
|
ret = ESP_OK;
|
|
|
|
}
|
2017-01-26 02:29:18 -05:00
|
|
|
|
|
|
|
/* This is a temporary fix for an issue where some
|
2017-03-04 04:48:44 -05:00
|
|
|
cache reads may see stale data.
|
2017-01-26 02:29:18 -05:00
|
|
|
|
|
|
|
Working on a long term fix that doesn't require invalidating
|
|
|
|
entire cache.
|
|
|
|
*/
|
2019-03-08 00:30:49 -05:00
|
|
|
if (need_flush) {
|
2019-06-05 22:57:29 -04:00
|
|
|
#if CONFIG_IDF_TARGET_ESP32
|
2020-11-26 03:56:13 -05:00
|
|
|
#if CONFIG_SPIRAM
|
2022-05-10 22:32:56 -04:00
|
|
|
esp_psram_extram_writeback_cache();
|
2020-11-26 03:56:13 -05:00
|
|
|
#endif // CONFIG_SPIRAM
|
2017-01-26 02:29:18 -05:00
|
|
|
Cache_Flush(0);
|
2020-11-26 03:56:13 -05:00
|
|
|
#if !CONFIG_FREERTOS_UNICORE
|
2017-01-26 02:29:18 -05:00
|
|
|
Cache_Flush(1);
|
2020-11-26 03:56:13 -05:00
|
|
|
#endif // !CONFIG_FREERTOS_UNICORE
|
|
|
|
#endif // CONFIG_IDF_TARGET_ESP32
|
2017-01-26 02:29:18 -05:00
|
|
|
}
|
|
|
|
|
2016-10-19 05:17:24 -04:00
|
|
|
spi_flash_enable_interrupts_caches_and_other_cpu();
|
2020-12-09 05:38:09 -05:00
|
|
|
if (temp_ptr == NULL) {
|
2016-10-19 05:17:24 -04:00
|
|
|
free(new_entry);
|
|
|
|
}
|
2020-12-09 05:38:09 -05:00
|
|
|
*out_ptr = temp_ptr;
|
|
|
|
*out_handle = temp_handle;
|
2016-10-19 05:17:24 -04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void IRAM_ATTR spi_flash_munmap(spi_flash_mmap_handle_t handle)
|
|
|
|
{
|
|
|
|
spi_flash_disable_interrupts_caches_and_other_cpu();
|
|
|
|
mmap_entry_t* it;
|
|
|
|
// look for handle in linked list
|
|
|
|
for (it = LIST_FIRST(&s_mmap_entries_head); it != NULL; it = LIST_NEXT(it, entries)) {
|
|
|
|
if (it->handle == handle) {
|
|
|
|
// for each page, decrement reference counter
|
|
|
|
// if reference count is zero, disable MMU table entry to
|
|
|
|
// facilitate debugging of use-after-free conditions
|
|
|
|
for (int i = it->page; i < it->page + it->count; ++i) {
|
|
|
|
assert(s_mmap_page_refcnt[i] > 0);
|
|
|
|
if (--s_mmap_page_refcnt[i] == 0) {
|
2022-04-18 03:04:10 -04:00
|
|
|
mmu_ll_set_entry_invalid(MMU_TABLE_CORE0, i);
|
2020-07-29 01:13:51 -04:00
|
|
|
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
|
2022-04-18 03:04:10 -04:00
|
|
|
mmu_ll_set_entry_invalid(MMU_TABLE_CORE1, i);
|
2019-06-05 22:57:29 -04:00
|
|
|
#endif
|
2016-10-19 05:17:24 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
LIST_REMOVE(it, entries);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spi_flash_enable_interrupts_caches_and_other_cpu();
|
|
|
|
if (it == NULL) {
|
|
|
|
assert(0 && "invalid handle, or handle already unmapped");
|
|
|
|
}
|
|
|
|
free(it);
|
|
|
|
}
|
|
|
|
|
2019-07-16 05:33:30 -04:00
|
|
|
static void IRAM_ATTR NOINLINE_ATTR spi_flash_protected_mmap_init(void)
|
2016-10-19 05:17:24 -04:00
|
|
|
{
|
2018-09-28 06:29:52 -04:00
|
|
|
spi_flash_disable_interrupts_caches_and_other_cpu();
|
2017-02-17 02:26:43 -05:00
|
|
|
spi_flash_mmap_init();
|
2018-09-28 06:29:52 -04:00
|
|
|
spi_flash_enable_interrupts_caches_and_other_cpu();
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t IRAM_ATTR NOINLINE_ATTR spi_flash_protected_read_mmu_entry(int index)
|
|
|
|
{
|
|
|
|
uint32_t value;
|
|
|
|
spi_flash_disable_interrupts_caches_and_other_cpu();
|
2022-04-18 03:04:10 -04:00
|
|
|
value = mmu_ll_read_entry(MMU_TABLE_CORE0, index);
|
2018-09-28 06:29:52 -04:00
|
|
|
spi_flash_enable_interrupts_caches_and_other_cpu();
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
2019-07-16 05:33:30 -04:00
|
|
|
void spi_flash_mmap_dump(void)
|
2018-09-28 06:29:52 -04:00
|
|
|
{
|
|
|
|
spi_flash_protected_mmap_init();
|
|
|
|
|
2016-10-19 05:17:24 -04:00
|
|
|
mmap_entry_t* it;
|
|
|
|
for (it = LIST_FIRST(&s_mmap_entries_head); it != NULL; it = LIST_NEXT(it, entries)) {
|
|
|
|
printf("handle=%d page=%d count=%d\n", it->handle, it->page, it->count);
|
|
|
|
}
|
2020-12-08 23:22:55 -05:00
|
|
|
for (int i = 0; i < SOC_MMU_REGIONS_COUNT * SOC_MMU_PAGES_PER_REGION; ++i) {
|
2016-10-19 05:17:24 -04:00
|
|
|
if (s_mmap_page_refcnt[i] != 0) {
|
2018-09-28 06:29:52 -04:00
|
|
|
uint32_t paddr = spi_flash_protected_read_mmu_entry(i);
|
|
|
|
printf("page %d: refcnt=%d paddr=%d\n", i, (int) s_mmap_page_refcnt[i], paddr);
|
2016-10-19 05:17:24 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-01-04 23:51:02 -05:00
|
|
|
|
2018-09-28 06:29:52 -04:00
|
|
|
uint32_t IRAM_ATTR spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory)
|
2018-03-15 07:58:02 -04:00
|
|
|
{
|
2018-09-28 06:29:52 -04:00
|
|
|
spi_flash_disable_interrupts_caches_and_other_cpu();
|
2018-03-15 07:58:02 -04:00
|
|
|
spi_flash_mmap_init();
|
|
|
|
int count = 0;
|
|
|
|
int region_begin; // first page to check
|
|
|
|
int region_size; // number of pages to check
|
|
|
|
uint32_t region_addr; // base address of memory region
|
|
|
|
get_mmu_region(memory,®ion_begin,®ion_size,®ion_addr);
|
|
|
|
for (int i = region_begin; i < region_begin + region_size; ++i) {
|
2022-04-18 03:04:10 -04:00
|
|
|
bool entry_is_invalid = mmu_ll_get_entry_is_invalid(MMU_TABLE_CORE0, i);
|
|
|
|
if (s_mmap_page_refcnt[i] == 0 && entry_is_invalid) {
|
2018-03-15 07:58:02 -04:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
2018-09-28 06:29:52 -04:00
|
|
|
spi_flash_enable_interrupts_caches_and_other_cpu();
|
2018-03-16 03:47:31 -04:00
|
|
|
return count;
|
2018-03-15 07:58:02 -04:00
|
|
|
}
|
|
|
|
|
2020-12-15 22:50:13 -05:00
|
|
|
size_t spi_flash_cache2phys(const void *cached)
|
2017-02-17 02:26:43 -05:00
|
|
|
{
|
|
|
|
intptr_t c = (intptr_t)cached;
|
|
|
|
size_t cache_page;
|
2020-04-20 07:35:16 -04:00
|
|
|
int offset = 0;
|
2020-12-08 23:22:55 -05:00
|
|
|
if (c >= SOC_MMU_VADDR1_START_ADDR && c < SOC_MMU_VADDR1_FIRST_USABLE_ADDR) {
|
2017-02-17 02:26:43 -05:00
|
|
|
/* IRAM address, doesn't map to flash */
|
|
|
|
return SPI_FLASH_CACHE2PHYS_FAIL;
|
2020-12-08 23:22:55 -05:00
|
|
|
}
|
|
|
|
if (c < SOC_MMU_VADDR1_FIRST_USABLE_ADDR) {
|
2017-02-17 02:26:43 -05:00
|
|
|
/* expect cache is in DROM */
|
2020-12-08 23:22:55 -05:00
|
|
|
cache_page = (c - SOC_MMU_VADDR0_START_ADDR) / SPI_FLASH_MMU_PAGE_SIZE + SOC_MMU_DROM0_PAGES_START;
|
2020-04-20 07:35:16 -04:00
|
|
|
#if CONFIG_SPIRAM_RODATA
|
|
|
|
if (c >= (uint32_t)&_rodata_reserved_start && c <= (uint32_t)&_rodata_reserved_end) {
|
|
|
|
offset = rodata_flash2spiram_offset();
|
|
|
|
}
|
|
|
|
#endif
|
2017-02-17 02:26:43 -05:00
|
|
|
} else {
|
|
|
|
/* expect cache is in IROM */
|
2020-12-08 23:22:55 -05:00
|
|
|
cache_page = (c - SOC_MMU_VADDR1_START_ADDR) / SPI_FLASH_MMU_PAGE_SIZE + SOC_MMU_IROM0_PAGES_START;
|
2020-04-20 07:35:16 -04:00
|
|
|
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
|
|
|
|
if (c >= (uint32_t)&_instruction_reserved_start && c <= (uint32_t)&_instruction_reserved_end) {
|
|
|
|
offset = instruction_flash2spiram_offset();
|
|
|
|
}
|
|
|
|
#endif
|
2017-02-17 02:26:43 -05:00
|
|
|
}
|
|
|
|
|
2019-06-05 22:57:29 -04:00
|
|
|
if (cache_page >= PAGES_LIMIT) {
|
2017-02-17 02:26:43 -05:00
|
|
|
/* cached address was not in IROM or DROM */
|
|
|
|
return SPI_FLASH_CACHE2PHYS_FAIL;
|
|
|
|
}
|
2018-09-28 06:29:52 -04:00
|
|
|
uint32_t phys_page = spi_flash_protected_read_mmu_entry(cache_page);
|
2022-04-18 03:04:10 -04:00
|
|
|
bool entry_is_invalid = mmu_ll_get_entry_is_invalid(MMU_TABLE_CORE0, cache_page);
|
|
|
|
if (entry_is_invalid) {
|
2017-02-17 02:26:43 -05:00
|
|
|
/* page is not mapped */
|
|
|
|
return SPI_FLASH_CACHE2PHYS_FAIL;
|
|
|
|
}
|
2020-12-08 23:22:55 -05:00
|
|
|
uint32_t phys_offs = ((phys_page & SOC_MMU_ADDR_MASK) + offset) * SPI_FLASH_MMU_PAGE_SIZE;
|
2017-02-17 02:26:43 -05:00
|
|
|
return phys_offs | (c & (SPI_FLASH_MMU_PAGE_SIZE-1));
|
|
|
|
}
|
|
|
|
|
2020-11-26 03:56:13 -05:00
|
|
|
const void *IRAM_ATTR spi_flash_phys2cache(size_t phys_offs, spi_flash_mmap_memory_t memory)
|
2017-02-17 02:26:43 -05:00
|
|
|
{
|
|
|
|
uint32_t phys_page = phys_offs / SPI_FLASH_MMU_PAGE_SIZE;
|
|
|
|
int start, end, page_delta;
|
|
|
|
intptr_t base;
|
|
|
|
|
|
|
|
if (memory == SPI_FLASH_MMAP_DATA) {
|
2020-12-08 23:22:55 -05:00
|
|
|
start = SOC_MMU_DROM0_PAGES_START;
|
|
|
|
end = SOC_MMU_DROM0_PAGES_END;
|
|
|
|
base = SOC_MMU_VADDR0_START_ADDR;
|
|
|
|
page_delta = SOC_MMU_DROM0_PAGES_START;
|
2017-02-17 02:26:43 -05:00
|
|
|
} else {
|
2020-12-08 23:22:55 -05:00
|
|
|
start = SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE;
|
|
|
|
end = SOC_MMU_IROM0_PAGES_END;
|
|
|
|
base = SOC_MMU_VADDR1_START_ADDR;
|
|
|
|
page_delta = SOC_MMU_IROM0_PAGES_START;
|
2017-02-17 02:26:43 -05:00
|
|
|
}
|
2018-09-28 06:29:52 -04:00
|
|
|
spi_flash_disable_interrupts_caches_and_other_cpu();
|
2017-02-17 02:26:43 -05:00
|
|
|
for (int i = start; i < end; i++) {
|
2022-04-18 03:04:10 -04:00
|
|
|
uint32_t mmu_value = mmu_ll_read_entry(MMU_TABLE_CORE0, i);
|
2020-04-20 07:35:16 -04:00
|
|
|
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
|
|
|
|
if (phys_page >= instruction_flash_start_page_get() && phys_page <= instruction_flash_end_page_get()) {
|
|
|
|
if (mmu_value & MMU_ACCESS_SPIRAM) {
|
|
|
|
mmu_value += instruction_flash2spiram_offset();
|
2020-12-08 23:22:55 -05:00
|
|
|
mmu_value = (mmu_value & SOC_MMU_ADDR_MASK) | MMU_ACCESS_FLASH;
|
2020-04-20 07:35:16 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#if CONFIG_SPIRAM_RODATA
|
|
|
|
if (phys_page >= rodata_flash_start_page_get() && phys_page <= rodata_flash_start_page_get()) {
|
|
|
|
if (mmu_value & MMU_ACCESS_SPIRAM) {
|
|
|
|
mmu_value += rodata_flash2spiram_offset();
|
2020-12-08 23:22:55 -05:00
|
|
|
mmu_value = (mmu_value & SOC_MMU_ADDR_MASK) | MMU_ACCESS_FLASH;
|
2020-04-20 07:35:16 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2020-12-08 23:22:55 -05:00
|
|
|
if (mmu_value == SOC_MMU_PAGE_IN_FLASH(phys_page)) {
|
2017-02-17 02:26:43 -05:00
|
|
|
i -= page_delta;
|
|
|
|
intptr_t cache_page = base + (SPI_FLASH_MMU_PAGE_SIZE * i);
|
2018-09-28 06:29:52 -04:00
|
|
|
spi_flash_enable_interrupts_caches_and_other_cpu();
|
2017-02-17 02:26:43 -05:00
|
|
|
return (const void *) (cache_page | (phys_offs & (SPI_FLASH_MMU_PAGE_SIZE-1)));
|
|
|
|
}
|
|
|
|
}
|
2018-09-28 06:29:52 -04:00
|
|
|
spi_flash_enable_interrupts_caches_and_other_cpu();
|
2017-02-17 02:26:43 -05:00
|
|
|
return NULL;
|
|
|
|
}
|
2019-03-08 00:30:49 -05:00
|
|
|
|
2019-06-27 10:35:06 -04:00
|
|
|
static bool IRAM_ATTR is_page_mapped_in_cache(uint32_t phys_page, const void **out_ptr)
|
2019-03-08 00:30:49 -05:00
|
|
|
{
|
|
|
|
int start[2], end[2];
|
|
|
|
|
2019-06-27 10:35:06 -04:00
|
|
|
*out_ptr = NULL;
|
|
|
|
|
2019-03-08 00:30:49 -05:00
|
|
|
/* SPI_FLASH_MMAP_DATA */
|
2020-12-08 23:22:55 -05:00
|
|
|
start[0] = SOC_MMU_DROM0_PAGES_START;
|
|
|
|
end[0] = SOC_MMU_DROM0_PAGES_END;
|
2019-03-08 00:30:49 -05:00
|
|
|
|
|
|
|
/* SPI_FLASH_MMAP_INST */
|
2020-12-08 23:22:55 -05:00
|
|
|
start[1] = SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE;
|
|
|
|
end[1] = SOC_MMU_IROM0_PAGES_END;
|
2019-03-08 00:30:49 -05:00
|
|
|
|
|
|
|
for (int j = 0; j < 2; j++) {
|
|
|
|
for (int i = start[j]; i < end[j]; i++) {
|
2022-04-18 03:04:10 -04:00
|
|
|
uint32_t entry_pro = mmu_ll_read_entry(MMU_TABLE_CORE0, i);
|
|
|
|
if (entry_pro == SOC_MMU_PAGE_IN_FLASH(phys_page)) {
|
2020-11-26 03:56:13 -05:00
|
|
|
#if !CONFIG_IDF_TARGET_ESP32
|
2019-06-27 10:35:06 -04:00
|
|
|
if (j == 0) { /* SPI_FLASH_MMAP_DATA */
|
2020-12-08 23:22:55 -05:00
|
|
|
*out_ptr = (const void *)(SOC_MMU_VADDR0_START_ADDR + SPI_FLASH_MMU_PAGE_SIZE * (i - start[0]));
|
2019-06-27 10:35:06 -04:00
|
|
|
} else { /* SPI_FLASH_MMAP_INST */
|
2020-12-08 23:22:55 -05:00
|
|
|
*out_ptr = (const void *)(SOC_MMU_VADDR1_FIRST_USABLE_ADDR + SPI_FLASH_MMU_PAGE_SIZE * (i - start[1]));
|
2019-06-27 10:35:06 -04:00
|
|
|
}
|
|
|
|
#endif
|
2019-03-08 00:30:49 -05:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Validates if given flash address has corresponding cache mapping, if yes, flushes cache memories */
|
|
|
|
IRAM_ATTR bool spi_flash_check_and_flush_cache(size_t start_addr, size_t length)
|
|
|
|
{
|
2019-06-27 10:35:06 -04:00
|
|
|
bool ret = false;
|
2019-03-08 00:30:49 -05:00
|
|
|
/* align start_addr & length to full MMU pages */
|
|
|
|
uint32_t page_start_addr = start_addr & ~(SPI_FLASH_MMU_PAGE_SIZE-1);
|
|
|
|
length += (start_addr - page_start_addr);
|
|
|
|
length = (length + SPI_FLASH_MMU_PAGE_SIZE - 1) & ~(SPI_FLASH_MMU_PAGE_SIZE-1);
|
|
|
|
for (uint32_t addr = page_start_addr; addr < page_start_addr + length; addr += SPI_FLASH_MMU_PAGE_SIZE) {
|
|
|
|
uint32_t page = addr / SPI_FLASH_MMU_PAGE_SIZE;
|
2022-04-18 03:04:10 -04:00
|
|
|
// TODO: IDF-4969
|
2019-03-08 00:30:49 -05:00
|
|
|
if (page >= 256) {
|
|
|
|
return false; /* invalid address */
|
|
|
|
}
|
|
|
|
|
2019-06-27 10:35:06 -04:00
|
|
|
const void *vaddr = NULL;
|
|
|
|
if (is_page_mapped_in_cache(page, &vaddr)) {
|
2019-06-10 03:07:12 -04:00
|
|
|
#if CONFIG_IDF_TARGET_ESP32
|
2019-06-05 00:34:19 -04:00
|
|
|
#if CONFIG_SPIRAM
|
2022-05-10 22:32:56 -04:00
|
|
|
esp_psram_extram_writeback_cache();
|
2019-03-08 00:30:49 -05:00
|
|
|
#endif
|
|
|
|
Cache_Flush(0);
|
|
|
|
#ifndef CONFIG_FREERTOS_UNICORE
|
|
|
|
Cache_Flush(1);
|
|
|
|
#endif
|
|
|
|
return true;
|
2020-11-26 03:56:13 -05:00
|
|
|
#else // CONFIG_IDF_TARGET_ESP32
|
2019-06-27 10:35:06 -04:00
|
|
|
if (vaddr != NULL) {
|
|
|
|
Cache_Invalidate_Addr((uint32_t)vaddr, SPI_FLASH_MMU_PAGE_SIZE);
|
|
|
|
ret = true;
|
|
|
|
}
|
2020-11-26 03:56:13 -05:00
|
|
|
#endif // CONFIG_IDF_TARGET_ESP32
|
2019-06-27 10:35:06 -04:00
|
|
|
|
2019-03-08 00:30:49 -05:00
|
|
|
}
|
|
|
|
}
|
2019-06-27 10:35:06 -04:00
|
|
|
return ret;
|
2019-03-08 00:30:49 -05:00
|
|
|
}
|
2020-12-15 22:50:13 -05:00
|
|
|
#endif //!CONFIG_SPI_FLASH_ROM_IMPL
|