2021-10-29 05:32:28 -04:00
|
|
|
/*
|
2023-06-25 03:20:15 -04:00
|
|
|
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
|
2021-10-29 05:32:28 -04:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2019-01-08 05:29:25 -05:00
|
|
|
|
|
|
|
#include <stdarg.h>
|
2020-04-29 22:37:35 -04:00
|
|
|
#include <sys/param.h> //For max/min
|
2019-01-08 05:29:25 -05:00
|
|
|
#include "esp_attr.h"
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
#include "esp_private/system_internal.h"
|
2019-01-08 05:29:25 -05:00
|
|
|
#include "esp_spi_flash.h" //for ``g_flash_guard_default_ops``
|
|
|
|
#include "esp_flash.h"
|
2019-09-11 14:41:00 -04:00
|
|
|
#include "esp_flash_partitions.h"
|
2020-05-29 15:52:48 -04:00
|
|
|
#include "freertos/FreeRTOS.h"
|
|
|
|
#include "freertos/task.h"
|
2019-11-27 20:20:00 -05:00
|
|
|
#include "hal/spi_types.h"
|
2020-04-09 01:30:12 -04:00
|
|
|
#include "sdkconfig.h"
|
2020-04-29 22:37:35 -04:00
|
|
|
#include "esp_log.h"
|
2021-10-29 05:32:28 -04:00
|
|
|
#include "esp_compiler.h"
|
2020-07-21 01:07:34 -04:00
|
|
|
#include "esp_rom_sys.h"
|
2019-08-09 01:26:49 -04:00
|
|
|
|
2019-10-24 07:00:26 -04:00
|
|
|
#include "driver/spi_common_internal.h"
|
|
|
|
|
2023-06-25 03:22:42 -04:00
|
|
|
#define SPI_FLASH_CACHE_NO_DISABLE (CONFIG_SPI_FLASH_AUTO_SUSPEND || (CONFIG_SPIRAM_FETCH_INSTRUCTIONS && CONFIG_SPIRAM_RODATA) || CONFIG_APP_BUILD_TYPE_ELF_RAM)
|
2020-04-29 22:37:35 -04:00
|
|
|
static const char TAG[] = "spi_flash";
|
2019-10-24 07:00:26 -04:00
|
|
|
|
2023-06-25 03:22:42 -04:00
|
|
|
#if SPI_FLASH_CACHE_NO_DISABLE
|
|
|
|
static _lock_t s_spi1_flash_mutex;
|
|
|
|
#endif // #if SPI_FLASH_CACHE_NO_DISABLE
|
|
|
|
|
2019-01-08 05:29:25 -05:00
|
|
|
/*
|
|
|
|
* OS functions providing delay service and arbitration among chips, and with the cache.
|
|
|
|
*
|
|
|
|
* The cache needs to be disabled when chips on the SPI1 bus is under operation, hence these functions need to be put
|
|
|
|
* into the IRAM,and their data should be put into the DRAM.
|
|
|
|
*/
|
|
|
|
|
|
|
|
typedef struct {
|
2019-10-24 07:00:26 -04:00
|
|
|
spi_bus_lock_dev_handle_t dev_lock;
|
2019-01-08 05:29:25 -05:00
|
|
|
} app_func_arg_t;
|
|
|
|
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
/*
|
|
|
|
* Time yield algorithm:
|
|
|
|
* Every time spi_flash_os_check_yield() is called:
|
|
|
|
*
|
|
|
|
* 1. If the time since last end() function is longer than CONFIG_SPI_FLASH_ERASE_YIELD_TICKS (time
|
|
|
|
* to yield), all counters will be reset, as if the yield has just ends;
|
|
|
|
* 2. If the time since last yield() is longer than CONFIG_SPI_FLASH_ERASE_YIELD_DURATION_MS, will
|
|
|
|
* return a yield request. When the yield() is called, all counters will be reset.
|
|
|
|
* Note: Short intervals between start() and end() after the last yield() will not reset the
|
|
|
|
* counter mentioned in #2, but still be counted into the time mentioned in #2.
|
|
|
|
*/
|
2019-09-11 14:41:00 -04:00
|
|
|
typedef struct {
|
2019-10-24 07:00:26 -04:00
|
|
|
app_func_arg_t common_arg; //shared args, must be the first item
|
2019-09-11 14:41:00 -04:00
|
|
|
bool no_protect; //to decide whether to check protected region (for the main chip) or not.
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
uint32_t acquired_since_us; // Time since last explicit yield()
|
|
|
|
uint32_t released_since_us; // Time since last end() (implicit yield)
|
2019-09-11 14:41:00 -04:00
|
|
|
} spi1_app_func_arg_t;
|
|
|
|
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
static inline IRAM_ATTR void on_spi1_released(spi1_app_func_arg_t* ctx);
|
|
|
|
static inline IRAM_ATTR void on_spi1_acquired(spi1_app_func_arg_t* ctx);
|
|
|
|
static inline IRAM_ATTR void on_spi1_yielded(spi1_app_func_arg_t* ctx);
|
|
|
|
static inline IRAM_ATTR bool on_spi1_check_yield(spi1_app_func_arg_t* ctx);
|
|
|
|
|
2023-06-25 03:22:42 -04:00
|
|
|
#if !SPI_FLASH_CACHE_NO_DISABLE
|
2020-04-09 01:30:12 -04:00
|
|
|
IRAM_ATTR static void cache_enable(void* arg)
|
|
|
|
{
|
|
|
|
g_flash_guard_default_ops.end();
|
|
|
|
}
|
2019-10-24 07:00:26 -04:00
|
|
|
|
2020-04-09 01:30:12 -04:00
|
|
|
IRAM_ATTR static void cache_disable(void* arg)
|
|
|
|
{
|
|
|
|
g_flash_guard_default_ops.start();
|
|
|
|
}
|
2023-06-25 03:22:42 -04:00
|
|
|
#endif //#if !SPI_FLASH_CACHE_NO_DISABLE
|
2020-04-09 01:30:12 -04:00
|
|
|
|
2023-06-26 07:37:06 -04:00
|
|
|
static IRAM_ATTR esp_err_t acquire_spi_bus_lock(void *arg)
|
2019-01-08 05:29:25 -05:00
|
|
|
{
|
2020-04-09 01:30:12 -04:00
|
|
|
spi_bus_lock_dev_handle_t dev_lock = ((app_func_arg_t *)arg)->dev_lock;
|
|
|
|
|
|
|
|
// wait for other devices (or cache) to finish their operation
|
2019-10-24 07:00:26 -04:00
|
|
|
esp_err_t ret = spi_bus_lock_acquire_start(dev_lock, portMAX_DELAY);
|
|
|
|
if (ret != ESP_OK) {
|
|
|
|
return ret;
|
|
|
|
}
|
2020-04-09 01:30:12 -04:00
|
|
|
spi_bus_lock_touch(dev_lock);
|
2019-01-08 05:29:25 -05:00
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
2023-06-26 07:37:06 -04:00
|
|
|
static IRAM_ATTR esp_err_t release_spi_bus_lock(void *arg)
|
2019-01-08 05:29:25 -05:00
|
|
|
{
|
2020-04-09 01:30:12 -04:00
|
|
|
return spi_bus_lock_acquire_end(((app_func_arg_t *)arg)->dev_lock);
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
|
|
|
|
2020-04-09 01:30:12 -04:00
|
|
|
static IRAM_ATTR esp_err_t spi1_start(void *arg)
|
2019-01-08 05:29:25 -05:00
|
|
|
{
|
2023-06-25 03:20:15 -04:00
|
|
|
esp_err_t ret = ESP_OK;
|
2023-06-25 03:22:42 -04:00
|
|
|
/**
|
|
|
|
* There are three ways for ESP Flash API lock:
|
|
|
|
* 1. spi bus lock, this is used when SPI1 is shared with GPSPI Master Driver
|
|
|
|
* 2. mutex, this is used when the Cache isn't need to be disabled.
|
|
|
|
* 3. cache lock (from cache_utils.h), this is used when we need to disable Cache to avoid access from SPI0
|
|
|
|
*
|
|
|
|
* From 1 to 3, the lock efficiency decreases.
|
|
|
|
*/
|
2020-04-09 01:30:12 -04:00
|
|
|
#if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
|
|
|
|
//use the lock to disable the cache and interrupts before using the SPI bus
|
2023-06-26 07:37:06 -04:00
|
|
|
ret = acquire_spi_bus_lock(arg);
|
2023-06-25 03:22:42 -04:00
|
|
|
#elif SPI_FLASH_CACHE_NO_DISABLE
|
|
|
|
_lock_acquire(&s_spi1_flash_mutex);
|
2020-04-09 01:30:12 -04:00
|
|
|
#else
|
|
|
|
//directly disable the cache and interrupts when lock is not used
|
|
|
|
cache_disable(NULL);
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
#endif
|
2023-06-25 03:20:15 -04:00
|
|
|
on_spi1_acquired((spi1_app_func_arg_t*)arg);
|
|
|
|
return ret;
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
|
|
|
|
2020-04-09 01:30:12 -04:00
|
|
|
static IRAM_ATTR esp_err_t spi1_end(void *arg)
|
2019-01-08 05:29:25 -05:00
|
|
|
{
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
esp_err_t ret = ESP_OK;
|
2023-06-25 03:22:42 -04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* There are three ways for ESP Flash API lock, see `spi1_start`
|
|
|
|
*/
|
2020-04-09 01:30:12 -04:00
|
|
|
#if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
|
2023-06-26 07:37:06 -04:00
|
|
|
ret = release_spi_bus_lock(arg);
|
2023-06-25 03:22:42 -04:00
|
|
|
#elif SPI_FLASH_CACHE_NO_DISABLE
|
|
|
|
_lock_release(&s_spi1_flash_mutex);
|
2020-04-09 01:30:12 -04:00
|
|
|
#else
|
|
|
|
cache_enable(NULL);
|
|
|
|
#endif
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
on_spi1_released((spi1_app_func_arg_t*)arg);
|
|
|
|
return ret;
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
|
|
|
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
static IRAM_ATTR esp_err_t spi1_flash_os_check_yield(void *arg, uint32_t chip_status, uint32_t* out_request)
|
2019-01-08 05:29:25 -05:00
|
|
|
{
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
assert (chip_status == 0); //TODO: support suspend
|
|
|
|
esp_err_t ret = ESP_ERR_TIMEOUT; //Nothing happened
|
|
|
|
uint32_t request = 0;
|
|
|
|
|
|
|
|
if (on_spi1_check_yield((spi1_app_func_arg_t *)arg)) {
|
|
|
|
request = SPI_FLASH_YIELD_REQ_YIELD;
|
|
|
|
ret = ESP_OK;
|
|
|
|
}
|
|
|
|
if (out_request) {
|
|
|
|
*out_request = request;
|
|
|
|
}
|
|
|
|
return ret;
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
|
|
|
|
static IRAM_ATTR esp_err_t spi1_flash_os_yield(void *arg, uint32_t* out_status)
|
2020-05-29 15:52:48 -04:00
|
|
|
{
|
2021-10-29 05:32:28 -04:00
|
|
|
if (likely(xTaskGetSchedulerState() == taskSCHEDULER_RUNNING)) {
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
#ifdef CONFIG_SPI_FLASH_ERASE_YIELD_TICKS
|
2021-10-29 05:32:28 -04:00
|
|
|
vTaskDelay(CONFIG_SPI_FLASH_ERASE_YIELD_TICKS);
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
#else
|
2021-10-29 05:32:28 -04:00
|
|
|
vTaskDelay(1);
|
2020-05-29 15:52:48 -04:00
|
|
|
#endif
|
2021-10-29 05:32:28 -04:00
|
|
|
}
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
on_spi1_yielded((spi1_app_func_arg_t*)arg);
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static IRAM_ATTR esp_err_t delay_us(void *arg, uint32_t us)
|
|
|
|
{
|
|
|
|
esp_rom_delay_us(us);
|
2020-05-29 15:52:48 -04:00
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
2020-04-29 22:37:35 -04:00
|
|
|
static IRAM_ATTR void* get_buffer_malloc(void* arg, size_t reqest_size, size_t* out_size)
|
|
|
|
{
|
|
|
|
/* Allocate temporary internal buffer to use for the actual read. If the preferred size
|
|
|
|
doesn't fit in free internal memory, allocate the largest available free block.
|
|
|
|
|
|
|
|
(May need to shrink read_chunk_size and retry due to race conditions with other tasks
|
|
|
|
also allocating from the heap.)
|
|
|
|
*/
|
|
|
|
void* ret = NULL;
|
|
|
|
unsigned retries = 5;
|
|
|
|
size_t read_chunk_size = reqest_size;
|
|
|
|
while(ret == NULL && retries--) {
|
|
|
|
read_chunk_size = MIN(read_chunk_size, heap_caps_get_largest_free_block(MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT));
|
|
|
|
read_chunk_size = (read_chunk_size + 3) & ~3;
|
|
|
|
ret = heap_caps_malloc(read_chunk_size, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
|
|
|
}
|
|
|
|
ESP_LOGV(TAG, "allocate temp buffer: %p (%d)", ret, read_chunk_size);
|
|
|
|
*out_size = (ret != NULL? read_chunk_size: 0);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static IRAM_ATTR void release_buffer_malloc(void* arg, void *temp_buf)
|
|
|
|
{
|
|
|
|
free(temp_buf);
|
|
|
|
}
|
|
|
|
|
2019-09-11 14:41:00 -04:00
|
|
|
static IRAM_ATTR esp_err_t main_flash_region_protected(void* arg, size_t start_addr, size_t size)
|
|
|
|
{
|
|
|
|
if (((spi1_app_func_arg_t*)arg)->no_protect || esp_partition_main_flash_region_safe(start_addr, size)) {
|
|
|
|
//ESP_OK = 0, also means protected==0
|
|
|
|
return ESP_OK;
|
|
|
|
} else {
|
|
|
|
return ESP_ERR_NOT_SUPPORTED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-24 07:00:26 -04:00
|
|
|
static DRAM_ATTR spi1_app_func_arg_t main_flash_arg = {};
|
2019-11-27 20:20:00 -05:00
|
|
|
|
2019-01-08 05:29:25 -05:00
|
|
|
//for SPI1, we have to disable the cache and interrupts before using the SPI bus
|
2020-04-09 01:30:12 -04:00
|
|
|
static const DRAM_ATTR esp_flash_os_functions_t esp_flash_spi1_default_os_functions = {
|
|
|
|
.start = spi1_start,
|
|
|
|
.end = spi1_end,
|
2019-09-11 14:41:00 -04:00
|
|
|
.region_protected = main_flash_region_protected,
|
2020-05-29 15:52:48 -04:00
|
|
|
.delay_us = delay_us,
|
2020-04-29 22:37:35 -04:00
|
|
|
.get_temp_buffer = get_buffer_malloc,
|
|
|
|
.release_temp_buffer = release_buffer_malloc,
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
.check_yield = spi1_flash_os_check_yield,
|
|
|
|
.yield = spi1_flash_os_yield,
|
2019-01-08 05:29:25 -05:00
|
|
|
};
|
|
|
|
|
2020-04-09 01:30:12 -04:00
|
|
|
static const esp_flash_os_functions_t esp_flash_spi23_default_os_functions = {
|
2023-06-26 07:37:06 -04:00
|
|
|
.start = acquire_spi_bus_lock,
|
|
|
|
.end = release_spi_bus_lock,
|
2020-05-08 05:35:22 -04:00
|
|
|
.delay_us = delay_us,
|
2020-04-29 22:37:35 -04:00
|
|
|
.get_temp_buffer = get_buffer_malloc,
|
|
|
|
.release_temp_buffer = release_buffer_malloc,
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
.region_protected = NULL,
|
|
|
|
.check_yield = NULL,
|
|
|
|
.yield = NULL,
|
2019-01-08 05:29:25 -05:00
|
|
|
};
|
|
|
|
|
2020-05-11 14:34:53 -04:00
|
|
|
static spi_bus_lock_dev_handle_t register_dev(int host_id)
|
2019-01-08 05:29:25 -05:00
|
|
|
{
|
2019-10-24 07:00:26 -04:00
|
|
|
spi_bus_lock_handle_t lock = spi_bus_lock_get_by_id(host_id);
|
|
|
|
spi_bus_lock_dev_handle_t dev_handle;
|
|
|
|
spi_bus_lock_dev_config_t config = {.flags = SPI_BUS_LOCK_DEV_FLAG_CS_REQUIRED};
|
|
|
|
esp_err_t err = spi_bus_lock_register_dev(lock, &config, &dev_handle);
|
|
|
|
if (err != ESP_OK) {
|
2020-05-11 14:34:53 -04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return dev_handle;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t esp_flash_init_os_functions(esp_flash_t *chip, int host_id, int* out_dev_id)
|
|
|
|
{
|
|
|
|
spi_bus_lock_dev_handle_t dev_handle = NULL;
|
|
|
|
|
|
|
|
// Skip initializing the bus lock when the bus is SPI1 and the bus is not shared with SPI Master
|
|
|
|
// driver, leaving dev_handle = NULL
|
2021-03-05 03:20:33 -05:00
|
|
|
bool skip_register_dev = (host_id == SPI1_HOST);
|
2020-05-11 14:34:53 -04:00
|
|
|
#if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
|
|
|
|
skip_register_dev = false;
|
|
|
|
#endif
|
|
|
|
if (!skip_register_dev) {
|
|
|
|
dev_handle = register_dev(host_id);
|
2019-10-24 07:00:26 -04:00
|
|
|
}
|
|
|
|
|
2022-12-12 02:09:22 -05:00
|
|
|
switch (host_id)
|
|
|
|
{
|
|
|
|
case SPI1_HOST:
|
|
|
|
//SPI1
|
|
|
|
chip->os_func = &esp_flash_spi1_default_os_functions;
|
|
|
|
chip->os_func_data = heap_caps_malloc(sizeof(spi1_app_func_arg_t),
|
|
|
|
MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
|
|
|
if (chip->os_func_data == NULL) {
|
|
|
|
return ESP_ERR_NO_MEM;
|
|
|
|
}
|
|
|
|
*(spi1_app_func_arg_t*) chip->os_func_data = (spi1_app_func_arg_t) {
|
|
|
|
.common_arg = {
|
|
|
|
.dev_lock = dev_handle,
|
|
|
|
},
|
|
|
|
.no_protect = true,
|
|
|
|
};
|
|
|
|
break;
|
|
|
|
case SPI2_HOST:
|
|
|
|
#if SOC_SPI_PERIPH_NUM > 2
|
|
|
|
case SPI3_HOST:
|
|
|
|
#endif
|
|
|
|
//SPI2, SPI3
|
|
|
|
chip->os_func = &esp_flash_spi23_default_os_functions;
|
|
|
|
chip->os_func_data = heap_caps_malloc(sizeof(app_func_arg_t),
|
|
|
|
MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
|
|
|
if (chip->os_func_data == NULL) {
|
|
|
|
return ESP_ERR_NO_MEM;
|
|
|
|
}
|
|
|
|
*(app_func_arg_t*) chip->os_func_data = (app_func_arg_t) {
|
2019-10-24 07:00:26 -04:00
|
|
|
.dev_lock = dev_handle,
|
2022-12-12 02:09:22 -05:00
|
|
|
};
|
|
|
|
break;
|
|
|
|
default: return ESP_ERR_INVALID_ARG;
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
2019-10-24 07:00:26 -04:00
|
|
|
|
2020-05-11 14:34:53 -04:00
|
|
|
// Bus lock not initialized, the device ID should be directly given by application.
|
|
|
|
if (dev_handle) {
|
|
|
|
*out_dev_id = spi_bus_lock_get_dev_id(dev_handle);
|
|
|
|
}
|
2019-10-24 07:00:26 -04:00
|
|
|
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t esp_flash_deinit_os_functions(esp_flash_t* chip)
|
|
|
|
{
|
|
|
|
if (chip->os_func_data) {
|
2020-05-11 14:34:53 -04:00
|
|
|
spi_bus_lock_dev_handle_t dev_lock = ((app_func_arg_t*)chip->os_func_data)->dev_lock;
|
|
|
|
// SPI bus lock is possible not used on SPI1 bus
|
|
|
|
if (dev_lock) {
|
|
|
|
spi_bus_lock_unregister_dev(dev_lock);
|
|
|
|
}
|
2019-10-24 07:00:26 -04:00
|
|
|
free(chip->os_func_data);
|
|
|
|
}
|
|
|
|
chip->os_func = NULL;
|
|
|
|
chip->os_func_data = NULL;
|
2019-01-08 05:29:25 -05:00
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
2020-04-09 01:30:12 -04:00
|
|
|
esp_err_t esp_flash_init_main_bus_lock(void)
|
2019-10-24 07:00:26 -04:00
|
|
|
{
|
2021-01-13 23:09:40 -05:00
|
|
|
/* The following called functions are only defined if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
|
|
|
|
* is set. Thus, we must not call them if the macro is not defined, else the linker
|
|
|
|
* would trigger errors. */
|
|
|
|
#if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
|
2020-04-09 01:30:12 -04:00
|
|
|
spi_bus_lock_init_main_bus();
|
|
|
|
spi_bus_lock_set_bg_control(g_main_spi_bus_lock, cache_enable, cache_disable, NULL);
|
2019-10-24 07:00:26 -04:00
|
|
|
|
|
|
|
esp_err_t err = spi_bus_lock_init_main_dev();
|
|
|
|
if (err != ESP_OK) {
|
|
|
|
return err;
|
|
|
|
}
|
2020-04-09 01:30:12 -04:00
|
|
|
return ESP_OK;
|
2021-01-13 23:09:40 -05:00
|
|
|
#else
|
|
|
|
return ESP_ERR_NOT_SUPPORTED;
|
|
|
|
#endif
|
2020-04-09 01:30:12 -04:00
|
|
|
}
|
2019-10-24 07:00:26 -04:00
|
|
|
|
2020-04-09 01:30:12 -04:00
|
|
|
esp_err_t esp_flash_app_enable_os_functions(esp_flash_t* chip)
|
|
|
|
{
|
2019-10-24 07:00:26 -04:00
|
|
|
main_flash_arg = (spi1_app_func_arg_t) {
|
|
|
|
.common_arg = {
|
|
|
|
.dev_lock = g_spi_lock_main_flash_dev, //for SPI1,
|
|
|
|
},
|
|
|
|
.no_protect = false,
|
|
|
|
};
|
2020-04-09 01:30:12 -04:00
|
|
|
chip->os_func = &esp_flash_spi1_default_os_functions;
|
|
|
|
chip->os_func_data = &main_flash_arg;
|
2019-09-11 14:41:00 -04:00
|
|
|
return ESP_OK;
|
|
|
|
}
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
|
|
|
|
// The goal of this part is to manually insert one valid task execution interval, if the time since
|
|
|
|
// last valid interval exceed the limitation (CONFIG_SPI_FLASH_ERASE_YIELD_DURATION_MS).
|
|
|
|
//
|
|
|
|
// Valid task execution interval: continuous time with the cache enabled, which is longer than
|
|
|
|
// CONFIG_SPI_FLASH_ERASE_YIELD_TICKS. Yield time shorter than CONFIG_SPI_FLASH_ERASE_YIELD_TICKS is
|
|
|
|
// not treated as valid interval.
|
|
|
|
static inline IRAM_ATTR bool on_spi1_check_yield(spi1_app_func_arg_t* ctx)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_SPI_FLASH_YIELD_DURING_ERASE
|
|
|
|
uint32_t time = esp_system_get_time();
|
|
|
|
// We handle the reset here instead of in `on_spi1_acquired()`, when acquire() and release() is
|
|
|
|
// larger than CONFIG_SPI_FLASH_ERASE_YIELD_TICKS, to save one `esp_system_get_time()` call
|
|
|
|
if ((time - ctx->released_since_us) >= CONFIG_SPI_FLASH_ERASE_YIELD_TICKS * portTICK_PERIOD_MS * 1000) {
|
|
|
|
// Reset the acquired time as if the yield has just happened.
|
|
|
|
ctx->acquired_since_us = time;
|
|
|
|
} else if ((time - ctx->acquired_since_us) >= CONFIG_SPI_FLASH_ERASE_YIELD_DURATION_MS * 1000) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
static inline IRAM_ATTR void on_spi1_released(spi1_app_func_arg_t* ctx)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_SPI_FLASH_YIELD_DURING_ERASE
|
|
|
|
ctx->released_since_us = esp_system_get_time();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline IRAM_ATTR void on_spi1_acquired(spi1_app_func_arg_t* ctx)
|
|
|
|
{
|
|
|
|
// Ideally, when the time after `on_spi1_released()` before this function is called is larger
|
|
|
|
// than CONFIG_SPI_FLASH_ERASE_YIELD_TICKS, the acquired time should be reset. We assume the
|
|
|
|
// time after `on_spi1_check_yield()` before this function is so short that we can do the reset
|
|
|
|
// in that function instead.
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline IRAM_ATTR void on_spi1_yielded(spi1_app_func_arg_t* ctx)
|
|
|
|
{
|
|
|
|
uint32_t time = esp_system_get_time();
|
|
|
|
ctx->acquired_since_us = time;
|
|
|
|
}
|