mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
Merge branch 'feat/spi_bus_lock' into 'master'
SPI: support running SPI master and esp_flash on the same bus See merge request espressif/esp-idf!6520
This commit is contained in:
commit
a304421124
@ -18,6 +18,7 @@ set(srcs
|
||||
"spi_common.c"
|
||||
"spi_master.c"
|
||||
"spi_slave.c"
|
||||
"spi_bus_lock.c"
|
||||
"timer.c"
|
||||
"touch_sensor_common.c"
|
||||
"uart.c")
|
||||
@ -45,7 +46,7 @@ idf_component_register(SRCS "${srcs}"
|
||||
INCLUDE_DIRS ${includes}
|
||||
PRIV_INCLUDE_DIRS "include/driver"
|
||||
PRIV_REQUIRES efuse esp_timer
|
||||
REQUIRES esp_ringbuf soc) #cannot totally hide soc headers, since there are a lot arguments in the driver are chip-dependent
|
||||
REQUIRES esp_ringbuf freertos soc) #cannot totally hide soc headers, since there are a lot arguments in the driver are chip-dependent
|
||||
|
||||
# uses C11 atomic feature
|
||||
set_source_files_properties(spi_master.c PROPERTIES COMPILE_FLAGS -std=gnu11)
|
||||
|
@ -102,7 +102,7 @@ typedef struct {
|
||||
*
|
||||
* @warning For now, only supports HSPI and VSPI.
|
||||
*
|
||||
* @param host SPI peripheral that controls this bus
|
||||
* @param host_id SPI peripheral that controls this bus
|
||||
* @param bus_config Pointer to a spi_bus_config_t struct specifying how the host should be initialized
|
||||
* @param dma_chan Either channel 1 or 2, or 0 in the case when no DMA is required. Selecting a DMA channel
|
||||
* for a SPI bus allows transfers on the bus to have sizes only limited by the amount of
|
||||
@ -123,20 +123,20 @@ typedef struct {
|
||||
* - ESP_ERR_NO_MEM if out of memory
|
||||
* - ESP_OK on success
|
||||
*/
|
||||
esp_err_t spi_bus_initialize(spi_host_device_t host, const spi_bus_config_t *bus_config, int dma_chan);
|
||||
esp_err_t spi_bus_initialize(spi_host_device_t host_id, const spi_bus_config_t *bus_config, int dma_chan);
|
||||
|
||||
/**
|
||||
* @brief Free a SPI bus
|
||||
*
|
||||
* @warning In order for this to succeed, all devices have to be removed first.
|
||||
*
|
||||
* @param host SPI peripheral to free
|
||||
* @param host_id SPI peripheral to free
|
||||
* @return
|
||||
* - ESP_ERR_INVALID_ARG if parameter is invalid
|
||||
* - ESP_ERR_INVALID_STATE if not all devices on the bus are freed
|
||||
* - ESP_OK on success
|
||||
*/
|
||||
esp_err_t spi_bus_free(spi_host_device_t host);
|
||||
esp_err_t spi_bus_free(spi_host_device_t host_id);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -16,7 +16,11 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <esp_intr_alloc.h>
|
||||
#include "driver/spi_common.h"
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "hal/spi_types.h"
|
||||
#include "esp_pm.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
@ -24,6 +28,57 @@ extern "C"
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_SPI_MASTER_ISR_IN_IRAM
|
||||
#define SPI_MASTER_ISR_ATTR IRAM_ATTR
|
||||
#else
|
||||
#define SPI_MASTER_ISR_ATTR
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SPI_MASTER_IN_IRAM
|
||||
#define SPI_MASTER_ATTR IRAM_ATTR
|
||||
#else
|
||||
#define SPI_MASTER_ATTR
|
||||
#endif
|
||||
|
||||
|
||||
#define BUS_LOCK_DEBUG 0
|
||||
|
||||
#if BUS_LOCK_DEBUG
|
||||
#define BUS_LOCK_DEBUG_EXECUTE_CHECK(x) assert(x)
|
||||
#else
|
||||
#define BUS_LOCK_DEBUG_EXECUTE_CHECK(x)
|
||||
#endif
|
||||
|
||||
|
||||
struct spi_bus_lock_t;
|
||||
struct spi_bus_lock_dev_t;
|
||||
/// Handle to the lock of an SPI bus
|
||||
typedef struct spi_bus_lock_t* spi_bus_lock_handle_t;
|
||||
/// Handle to lock of one of the device on an SPI bus
|
||||
typedef struct spi_bus_lock_dev_t* spi_bus_lock_dev_handle_t;
|
||||
|
||||
/// Background operation control function
|
||||
typedef void (*bg_ctrl_func_t)(void*);
|
||||
|
||||
/// Attributes of an SPI bus
|
||||
typedef struct {
|
||||
spi_bus_config_t bus_cfg; ///< Config used to initialize the bus
|
||||
uint32_t flags; ///< Flags (attributes) of the bus
|
||||
int max_transfer_sz; ///< Maximum length of bytes available to send
|
||||
int dma_chan; ///< DMA channel used
|
||||
int dma_desc_num; ///< DMA descriptor number of dmadesc_tx or dmadesc_rx.
|
||||
lldesc_t *dmadesc_tx; ///< DMA descriptor array for TX
|
||||
lldesc_t *dmadesc_rx; ///< DMA descriptor array for RX
|
||||
spi_bus_lock_handle_t lock;
|
||||
#ifdef CONFIG_PM_ENABLE
|
||||
esp_pm_lock_handle_t pm_lock; ///< Power management lock
|
||||
#endif
|
||||
} spi_bus_attr_t;
|
||||
|
||||
/// Destructor called when a bus is deinitialized.
|
||||
typedef esp_err_t (*spi_destroy_func_t)(void*);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Try to claim a SPI peripheral
|
||||
*
|
||||
@ -262,6 +317,472 @@ void spicommon_dmaworkaround_idle(int dmachan);
|
||||
*/
|
||||
void spicommon_dmaworkaround_transfer_active(int dmachan);
|
||||
|
||||
/*******************************************************************************
|
||||
* Bus attributes
|
||||
******************************************************************************/
|
||||
/**
|
||||
* @brief Set bus lock for the main bus, called by startup code.
|
||||
*
|
||||
* @param lock The lock to be used by the main SPI bus.
|
||||
*/
|
||||
void spi_bus_main_set_lock(spi_bus_lock_handle_t lock);
|
||||
|
||||
/**
|
||||
* @brief Get the attributes of a specified SPI bus.
|
||||
*
|
||||
* @param host_id The specified host to get attribute
|
||||
* @return (Const) Pointer to the attributes
|
||||
*/
|
||||
const spi_bus_attr_t* spi_bus_get_attr(spi_host_device_t host_id);
|
||||
|
||||
/**
|
||||
* @brief Register a function to a initialized bus to make it called when deinitializing the bus.
|
||||
*
|
||||
* @param host_id The SPI bus to register the destructor.
|
||||
* @param f Destructor to register
|
||||
* @param arg The argument to call the destructor
|
||||
* @return Always ESP_OK.
|
||||
*/
|
||||
esp_err_t spi_bus_register_destroy_func(spi_host_device_t host_id,
|
||||
spi_destroy_func_t f, void *arg);
|
||||
|
||||
/*******************************************************************************
|
||||
* SPI Bus Lock for arbitration among SPI master (intr, polling) trans, SPI flash operations and
|
||||
* flash/psram cache access.
|
||||
*
|
||||
* NON-PUBLIC API. Don't use it directly in applications.
|
||||
*
|
||||
* There is the main lock corresponding to an SPI bus, of which several devices (holding child
|
||||
* locks) attaching to it. Each of the device is STRONGLY RECOMMENDED to be used in only one task
|
||||
* to avoid concurrency issues.
|
||||
*
|
||||
* Terms:
|
||||
* - BG operations (BackGround operations) means some transaction that will not immediately /
|
||||
* explicitly be sent in the task. It can be some cache access, or interrupt transactions.
|
||||
*
|
||||
* - Operation: usage of the bus, for example, do SPI transactions.
|
||||
*
|
||||
* - Acquiring processor: the task or the ISR that is allowed to use the bus. No operations will be
|
||||
* performed if there is no acquiring processor. A processor becomes the acquiring processor if
|
||||
* it ask for that when no acquiring processor exist, otherwise it has to wait for the acquiring
|
||||
* processor to handle over the role to it. The acquiring processor will and will only assign one
|
||||
* acquiring processor in the waiting list (if not empty) when it finishes its operation.
|
||||
*
|
||||
* - Acquiring device: the only device allowed to use the bus. Operations can be performed in
|
||||
* either the BG or the task. When there's no acquiring device, only the ISR is allowed to be the
|
||||
* acquiring processor and perform operations on the bus.
|
||||
*
|
||||
* When a device wants to perform operations, it either:
|
||||
* 1. Acquire the bus, and operate in the task (e.g. polling transactions of SPI master, and SPI flash
|
||||
* operations)
|
||||
*
|
||||
* 2. Request a BG operation. And the ISR will be enabled at proper time.
|
||||
*
|
||||
* For example if a task wants to send an interrupt transaction, it prepares the data in the task,
|
||||
* call `spi_bus_lock_bg_request`, and handle sending in the ISR.
|
||||
*
|
||||
* 3. When a device has already acquired the bus, BG operations are also allowed. After the
|
||||
* `spi_bus_lock_bg_request` is called, call `spi_bus_lock_wait_bg_done` before operations in task
|
||||
* again to wait until BG operations are done.
|
||||
*
|
||||
* Any device may try to invoke the ISR (by `spi_bus_lock_bg_request`). The ISR will be invoked and
|
||||
* become the acquiring processor immediately when the bus is not acquired by other processors. Any
|
||||
* device may also try to acquire the bus (by `spi_bus_lock_acquire_start`). The device will become
|
||||
* the acquiring processor immediately when the bus is not acquired and there is no request active.
|
||||
*
|
||||
* The acquiring processor must be aware of its acquiring role, and properly transfer the acquiring
|
||||
* processor to other tasks or ISR when they have nothing else to do. Before picking a new
|
||||
* acquiring processor, a new acquiring device must be picked first, if there are other devices,
|
||||
* asking to be acquiring device. After that, the new acquiring processor is picked by the sequence
|
||||
* below:
|
||||
*
|
||||
* 1. If there is an acquiring device:
|
||||
* 1.1 The ISR, if acquiring device has active BG requests
|
||||
* 1.2 The task of the device, if no active BG request for the device
|
||||
* 2. The ISR, if there's no acquiring device, but any BG request is active
|
||||
* 3. No one becomes the acquiring processor
|
||||
*
|
||||
* The API also helps on the arbitration of SPI cs lines. The bus is initialized with a cs_num
|
||||
* argument. When attaching devices onto the bus with `spi_bus_lock_register_dev`, it will allocate
|
||||
* devices with different device ID according to the flags given. If the ID is smaller than the
|
||||
* cs_num given when bus is initialized, error will be returned.
|
||||
*
|
||||
* Usage:
|
||||
* * Initialization:
|
||||
* 1. Call `spi_bus_init_lock` to register a lock for a bus.
|
||||
* 2. Call `spi_bus_lock_set_bg_control` to prepare BG enable/disable functions for
|
||||
* the lock.
|
||||
* 3. Call `spi_bus_lock_register_dev` for each devices that may make use of the
|
||||
* bus, properly store the returned handle, representing those devices.
|
||||
*
|
||||
* * Acquiring:
|
||||
* 1. Call `spi_bus_lock_acquire_start` when a device wants to use the bus
|
||||
* 2. Call `spi_bus_lock_touch` to mark the bus as touched by this device. Also check if the bus
|
||||
* has been touched by other devices.
|
||||
* 3. (optional) Do something on the bus...
|
||||
* 4. (optional) Call `spi_bus_lock_bg_request` to inform and invoke the BG. See ISR below about
|
||||
* ISR operations.
|
||||
* 5. (optional) If `spi_bus_lock_bg_request` is done, you have to call `spi_bus_lock_wait_bg_done`
|
||||
* before touching the bus again, or do the following steps.
|
||||
* 6. Call `spi_bus_lock_acquire_end` to release the bus to other devices.
|
||||
*
|
||||
* * ISR:
|
||||
* 1. Call `spi_bus_lock_bg_entry` when entering the ISR, run or skip the closure for the previous
|
||||
* operation according to the return value.
|
||||
* 2. Call `spi_bus_lock_get_acquiring_dev` to get the acquiring device. If there is no acquiring
|
||||
* device, call `spi_bus_lock_bg_check_dev_acq` to check and update a new acquiring device.
|
||||
* 3. Call `spi_bus_lock_bg_check_dev_req` to check for request of the desired device. If the
|
||||
* desired device is not requested, go to step 5.
|
||||
* 4. Check, start operation for the desired device and go to step 6; otherwise if no operations
|
||||
* can be performed, call `spi_bus_lock_bg_clear_req` to clear the request for this device. If
|
||||
* `spi_bus_lock_bg_clear_req` is called and there is no BG requests active, goto step 6.
|
||||
* 5. (optional) If the device is the acquiring device, go to step 6, otherwise
|
||||
* find another desired device, and go back to step 3.
|
||||
* 6. Call `spi_bus_lock_bg_exit` to try quitting the ISR. If failed, go back to step 2 to look for
|
||||
* a new request again. Otherwise, quit the ISR.
|
||||
*
|
||||
* * Deinitialization (optional):
|
||||
* 1. Call `spi_bus_lock_unregister_dev` for each device when they are no longer needed.
|
||||
* 2. Call `spi_bus_deinit_lock` to release the resources occupied by the lock.
|
||||
*
|
||||
* Some technical details:
|
||||
*
|
||||
* The child-lock of each device will have its own Binary Semaphore, which allows the task serving
|
||||
* this device (task A) being blocked when it fail to become the acquiring processor while it's
|
||||
* calling `spi_bus_lock_acquire_start` or `spi_bus_lock_wait_bg_done`. If it is blocked, there
|
||||
* must be an acquiring processor (either the ISR or another task (task B)), is doing transaction
|
||||
* on the bus. After that, task A will get unblocked and become the acquiring processor when the
|
||||
* ISR call `spi_bus_lock_bg_resume_acquired_dev`, or task B call `spi_bus_lock_acquire_end`.
|
||||
*
|
||||
* When the device wants to send ISR transaction, it should call `spi_bus_lock_bg_request` after
|
||||
* the data is prepared. This function sets a request bit in the critical resource. The ISR will be
|
||||
* invoked and become the new acquiring processor, when:
|
||||
*
|
||||
* 1. A task calls `spi_bus_lock_bg_request` while there is no acquiring processor;
|
||||
* 2. A tasks calls `spi_bus_lock_bg_request` while the task is the acquiring processor. Then the
|
||||
* acquiring processor is handled over to the ISR;
|
||||
* 3. A tasks who is the acquiring processor release the bus by calling `spi_bus_lock_acquire_end`,
|
||||
* and the ISR happens to be the next acquiring processor.
|
||||
*
|
||||
* The ISR will check (by `spi_bus_lock_bg_check_dev_req`) and clear a request bit (by
|
||||
* `spi_bus_lock_bg_clear_req`) after it confirm that all the requests of the corresponding device
|
||||
* are served. The request bit supports being written to recursively, which means, the task don't
|
||||
* need to wait for `spi_bus_lock_bg_clear_req` before call another `spi_bus_lock_bg_request`. The
|
||||
* API will handle the concurrency conflicts properly.
|
||||
*
|
||||
* The `spi_bus_lock_bg_exit` (together with `spi_bus_lock_bg_entry` called before)` is responsible
|
||||
* to ensure ONE and ONLY ONE of the following will happen when the ISR try to give up its
|
||||
* acquiring processor rule:
|
||||
*
|
||||
* 1. ISR quit, no any task unblocked while the interrupt disabled, and none of the BG bits is
|
||||
* active.
|
||||
* 2. ISR quit, there is an acquiring device, and the acquiring processor is passed to the task
|
||||
* serving the acquiring device by unblocking the task.
|
||||
* 3. The ISR failed to quit and have to try again.
|
||||
******************************************************************************/
|
||||
|
||||
#define DEV_NUM_MAX 6 ///< Number of devices supported by this lock
|
||||
|
||||
/// Lock configuration struct
|
||||
typedef struct {
|
||||
int host_id; ///< SPI host id
|
||||
int cs_num; ///< Physical cs numbers of the host
|
||||
} spi_bus_lock_config_t;
|
||||
|
||||
/// Child-lock configuration struct
|
||||
typedef struct {
|
||||
uint32_t flags; ///< flags for the lock, OR-ed of `SPI_BUS_LOCK_DEV_*` flags.
|
||||
#define SPI_BUS_LOCK_DEV_FLAG_CS_REQUIRED BIT(0) ///< The device needs a physical CS pin.
|
||||
} spi_bus_lock_dev_config_t;
|
||||
|
||||
/************* Common *********************/
|
||||
/**
|
||||
* Initialize a lock for an SPI bus.
|
||||
*
|
||||
* @param out_lock Output of the handle to the lock
|
||||
* @return
|
||||
* - ESP_ERR_NO_MEM: if memory exhausted
|
||||
* - ESP_OK: if success
|
||||
*/
|
||||
esp_err_t spi_bus_init_lock(spi_bus_lock_handle_t *out_lock, const spi_bus_lock_config_t *config);
|
||||
|
||||
/**
|
||||
* Free the resources used by an SPI bus lock.
|
||||
*
|
||||
* @note All attached devices should have been unregistered before calling this
|
||||
* funciton.
|
||||
*
|
||||
* @param lock Handle to the lock to free.
|
||||
*/
|
||||
void spi_bus_deinit_lock(spi_bus_lock_handle_t lock);
|
||||
|
||||
/**
|
||||
* @brief Get the corresponding lock according to bus id.
|
||||
*
|
||||
* @param host_id The bus id to get the lock
|
||||
* @return The lock handle
|
||||
*/
|
||||
spi_bus_lock_handle_t spi_bus_lock_get_by_id(spi_host_device_t host_id);
|
||||
|
||||
/**
|
||||
* @brief Configure how the SPI bus lock enable the background operation.
|
||||
*
|
||||
* @note The lock will not try to stop the background operations, but wait for
|
||||
* The background operations finished indicated by `spi_bus_lock_bg_resume_acquired_dev`.
|
||||
*
|
||||
* @param lock Handle to the lock to set
|
||||
* @param bg_enable The enabling function
|
||||
* @param bg_disable The disabling function, set to NULL if not required
|
||||
* @param arg Argument to pass to the enabling/disabling function.
|
||||
*/
|
||||
void spi_bus_lock_set_bg_control(spi_bus_lock_handle_t lock, bg_ctrl_func_t bg_enable,
|
||||
bg_ctrl_func_t bg_disable, void *arg);
|
||||
|
||||
/**
|
||||
* Attach a device onto an SPI bus lock. The returning handle is used to perform
|
||||
* following requests for the attached device.
|
||||
*
|
||||
* @param lock SPI bus lock to attach
|
||||
* @param out_dev_handle Output handle corresponding to the device
|
||||
* @param flags requirement of the device, bitwise OR of SPI_BUS_LOCK_FLAG_* flags
|
||||
*
|
||||
* @return
|
||||
* - ESP_ERR_NOT_SUPPORTED: if there's no hardware resources for new devices.
|
||||
* - ESP_ERR_NO_MEM: if memory exhausted
|
||||
* - ESP_OK: if success
|
||||
*/
|
||||
esp_err_t spi_bus_lock_register_dev(spi_bus_lock_handle_t lock,
|
||||
spi_bus_lock_dev_config_t *config,
|
||||
spi_bus_lock_dev_handle_t *out_dev_handle);
|
||||
|
||||
/**
|
||||
* Detach a device from its bus and free the resources used
|
||||
*
|
||||
* @param dev_handle Handle to the device.
|
||||
*/
|
||||
void spi_bus_lock_unregister_dev(spi_bus_lock_dev_handle_t dev_handle);
|
||||
|
||||
/**
|
||||
* @brief Get the parent bus lock of the device
|
||||
*
|
||||
* @param dev_handle Handle to the device to get bus lock
|
||||
* @return The bus lock handle
|
||||
*/
|
||||
spi_bus_lock_handle_t spi_bus_lock_get_parent(spi_bus_lock_dev_handle_t dev_handle);
|
||||
|
||||
/**
|
||||
* @brief Get the device ID of a lock.
|
||||
*
|
||||
* The callers should allocate CS pins according to this ID.
|
||||
*
|
||||
* @param dev_handle Handle to the device to get ID
|
||||
* @return ID of the device
|
||||
*/
|
||||
int spi_bus_lock_get_dev_id(spi_bus_lock_dev_handle_t dev_handle);
|
||||
|
||||
/**
|
||||
* @brief The device request to touch bus registers. Can only be called by the acquiring processor.
|
||||
*
|
||||
* Also check if the registers has been touched by other devices.
|
||||
*
|
||||
* @param dev_handle Handle to the device to operate the registers
|
||||
* @return true if there has been other devices touching SPI registers.
|
||||
* The caller may need to do a full-configuration. Otherwise return
|
||||
* false.
|
||||
*/
|
||||
bool spi_bus_lock_touch(spi_bus_lock_dev_handle_t dev_handle);
|
||||
|
||||
/************* Acquiring service *********************/
|
||||
/**
|
||||
* Acquiring the SPI bus for exclusive use. Will also wait for the BG to finish all requests of
|
||||
* this device before it returns.
|
||||
*
|
||||
* After successfully return, the caller becomes the acquiring processor.
|
||||
*
|
||||
* @note For the main flash bus, `bg_disable` will be called to disable the cache.
|
||||
*
|
||||
* @param dev_handle Handle to the device request for acquiring.
|
||||
* @param wait Time to wait until timeout or succeed, must be `portMAX_DELAY` for now.
|
||||
* @return
|
||||
* - ESP_OK: on success
|
||||
* - ESP_ERR_INVALID_ARG: timeout is not portMAX_DELAY
|
||||
*/
|
||||
esp_err_t spi_bus_lock_acquire_start(spi_bus_lock_dev_handle_t dev_handle, TickType_t wait);
|
||||
|
||||
/**
|
||||
* Release the bus acquired. Will pass the acquiring processor to other blocked
|
||||
* processors (tasks or ISR), and cause them to be unblocked or invoked.
|
||||
*
|
||||
* The acquiring device may also become NULL if no device is asking for acquiring.
|
||||
* In this case, the BG may be invoked if there is any BG requests.
|
||||
*
|
||||
* If the new acquiring device has BG requests, the BG will be invoked before the
|
||||
* task is resumed later after the BG finishes all requests of the new acquiring
|
||||
* device. Otherwise the task of the new acquiring device will be resumed immediately.
|
||||
*
|
||||
* @param dev_handle Handle to the device releasing the bus.
|
||||
* @return
|
||||
* - ESP_OK: on success
|
||||
* - ESP_ERR_INVALID_STATE: the device hasn't acquired the lock yet
|
||||
*/
|
||||
esp_err_t spi_bus_lock_acquire_end(spi_bus_lock_dev_handle_t dev_handle);
|
||||
|
||||
/**
|
||||
* Get the device acquiring the bus.
|
||||
*
|
||||
* @note Return value is not stable as the acquiring processor may change
|
||||
* when this function is called.
|
||||
*
|
||||
* @param lock Lock of SPI bus to get the acquiring device.
|
||||
* @return The argument corresponding to the acquiring device, see
|
||||
* `spi_bus_lock_register_dev`.
|
||||
*/
|
||||
spi_bus_lock_dev_handle_t spi_bus_lock_get_acquiring_dev(spi_bus_lock_handle_t lock);
|
||||
|
||||
/************* BG (Background, for ISR or cache) service *********************/
|
||||
/**
|
||||
* Call by a device to request a BG operation.
|
||||
*
|
||||
* Depending on the bus lock state, the BG operations may be resumed by this
|
||||
* call, or pending until BG operations allowed.
|
||||
*
|
||||
* Cleared by `spi_bus_lock_bg_clear_req` in the BG.
|
||||
*
|
||||
* @param dev_handle The device requesting BG operations.
|
||||
* @return always ESP_OK
|
||||
*/
|
||||
esp_err_t spi_bus_lock_bg_request(spi_bus_lock_dev_handle_t dev_handle);
|
||||
|
||||
/**
|
||||
* Wait until the ISR has finished all the BG operations for the acquiring device.
|
||||
* If any `spi_bus_lock_bg_request` for this device has been called after
|
||||
* `spi_bus_lock_acquire_start`, this function must be called before any operation
|
||||
* in the task.
|
||||
*
|
||||
* @note Can only be called when bus acquired by this device.
|
||||
*
|
||||
* @param dev_handle Handle to the device acquiring the bus.
|
||||
* @param wait Time to wait until timeout or succeed, must be `portMAX_DELAY` for now.
|
||||
* @return
|
||||
* - ESP_OK: on success
|
||||
* - ESP_ERR_INVALID_STATE: The device is not the acquiring bus.
|
||||
* - ESP_ERR_INVALID_ARG: Timeout is not portMAX_DELAY.
|
||||
*/
|
||||
esp_err_t spi_bus_lock_wait_bg_done(spi_bus_lock_dev_handle_t dev_handle, TickType_t wait);
|
||||
|
||||
/**
|
||||
* Handle interrupt and closure of last operation. Should be called at the beginning of the ISR,
|
||||
* when the ISR is acting as the acquiring processor.
|
||||
*
|
||||
* @param lock The SPI bus lock
|
||||
*
|
||||
* @return false if the ISR has already touched the HW, should run closure of the
|
||||
* last operation first; otherwise true if the ISR just start operating
|
||||
* on the HW, closure should be skipped.
|
||||
*/
|
||||
bool spi_bus_lock_bg_entry(spi_bus_lock_handle_t lock);
|
||||
|
||||
/**
|
||||
* Handle the scheduling of other acquiring devices, and control of HW operation
|
||||
* status.
|
||||
*
|
||||
* If no BG request is found, call with `wip=false`. This function will return false,
|
||||
* indicating there is incoming BG requests for the current acquiring device (or
|
||||
* for all devices if there is no acquiring device) and the ISR needs retry.
|
||||
* Otherwise may schedule a new acquiring processor (unblock the task) if there
|
||||
* is, and return true.
|
||||
*
|
||||
* Otherwise if a BG request is started in this ISR, call with `wip=true` and the
|
||||
* function will enable the interrupt to make the ISR be called again when the
|
||||
* request is done.
|
||||
*
|
||||
* This function is safe and should still be called when the ISR just lost its acquiring processor
|
||||
* role, but hasn't quit.
|
||||
*
|
||||
* @note This function will not change acquiring device. The ISR call
|
||||
* `spi_bus_lock_bg_update_acquiring` to check for new acquiring device,
|
||||
* when acquiring devices need to be served before other devices.
|
||||
*
|
||||
* @param lock The SPI bus lock.
|
||||
* @param wip Whether an operation is being executed when quitting the ISR.
|
||||
* @param do_yield[out] Not touched when no yielding required, otherwise set
|
||||
* to pdTRUE.
|
||||
* @return false if retry is required, indicating that there is pending BG request.
|
||||
* otherwise true and quit ISR is allowed.
|
||||
*/
|
||||
bool spi_bus_lock_bg_exit(spi_bus_lock_handle_t lock, bool wip, BaseType_t* do_yield);
|
||||
|
||||
/**
|
||||
* Check whether there is device asking for the acquiring device, and the desired
|
||||
* device for the next operation is also recommended.
|
||||
*
|
||||
* @note Must be called when the ISR is acting as the acquiring processor, and
|
||||
* there is no acquiring device.
|
||||
*
|
||||
* @param lock The SPI bus lock.
|
||||
* @param out_dev_lock The recommended device for hte next operation. It's the new
|
||||
* acquiring device when found, otherwise a device that has active BG request.
|
||||
*
|
||||
* @return true if the ISR need to quit (new acquiring device has no active BG
|
||||
* request, or no active BG requests for all devices when there is no
|
||||
* acquiring device), otherwise false.
|
||||
*/
|
||||
bool spi_bus_lock_bg_check_dev_acq(spi_bus_lock_handle_t lock, spi_bus_lock_dev_handle_t *out_dev_lock);
|
||||
|
||||
/**
|
||||
* Check if the device has BG requests. Must be called when the ISR is acting as
|
||||
* the acquiring processor.
|
||||
*
|
||||
* @note This is not stable, may become true again when a task request for BG
|
||||
* operation (by `spi_bus_lock_bg_request`).
|
||||
*
|
||||
* @param dev_lock The device to check.
|
||||
* @return true if the device has BG requests, otherwise false.
|
||||
*/
|
||||
bool spi_bus_lock_bg_check_dev_req(spi_bus_lock_dev_handle_t dev_lock);
|
||||
|
||||
/**
|
||||
* Clear the pending BG operation request of a device after served. Must be
|
||||
* called when the ISR is acting as the acquiring processor.
|
||||
*
|
||||
* @note When the return value is true, the ISR will lost the acquiring processor role. Then
|
||||
* `spi_bus_lock_bg_exit` must be called and checked before calling all other functions that
|
||||
* require to be called when the ISR is the acquiring processor again.
|
||||
*
|
||||
* @param dev_handle The device whose request is served.
|
||||
* @return True if no pending requests for the acquiring device, or for all devices
|
||||
* if there is no acquiring device. Otherwise false. When the return value is
|
||||
* true, the ISR is no longer the acquiring processor.
|
||||
*/
|
||||
bool spi_bus_lock_bg_clear_req(spi_bus_lock_dev_handle_t dev_lock);
|
||||
|
||||
/**
|
||||
* Check if there is any active BG requests.
|
||||
*
|
||||
* @param lock The SPI bus lock.
|
||||
* @return true if any device has active BG requst, otherwise false.
|
||||
*/
|
||||
bool spi_bus_lock_bg_req_exist(spi_bus_lock_handle_t lock);
|
||||
|
||||
/*******************************************************************************
|
||||
* Variable and APIs for the OS to initialize the locks for the main chip
|
||||
******************************************************************************/
|
||||
/// The lock for the main flash device
|
||||
extern const spi_bus_lock_dev_handle_t g_spi_lock_main_flash_dev;
|
||||
|
||||
/// The lock for the main bus
|
||||
extern const spi_bus_lock_handle_t g_main_spi_bus_lock;
|
||||
|
||||
/**
|
||||
* @brief Initialize the main flash device, called during chip startup.
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK: if success
|
||||
* - ESP_ERR_NO_MEM: memory exhausted
|
||||
*/
|
||||
esp_err_t spi_bus_lock_init_main_dev(void);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -171,7 +171,7 @@ typedef struct spi_device_t* spi_device_handle_t; ///< Handle for a device on a
|
||||
* @note While in general, speeds up to 80MHz on the dedicated SPI pins and 40MHz on GPIO-matrix-routed pins are
|
||||
* supported, full-duplex transfers routed over the GPIO matrix only support speeds up to 26MHz.
|
||||
*
|
||||
* @param host SPI peripheral to allocate device on
|
||||
* @param host_id SPI peripheral to allocate device on
|
||||
* @param dev_config SPI interface protocol config for the device
|
||||
* @param handle Pointer to variable to hold the device handle
|
||||
* @return
|
||||
@ -180,7 +180,7 @@ typedef struct spi_device_t* spi_device_handle_t; ///< Handle for a device on a
|
||||
* - ESP_ERR_NO_MEM if out of memory
|
||||
* - ESP_OK on success
|
||||
*/
|
||||
esp_err_t spi_bus_add_device(spi_host_device_t host, const spi_device_interface_config_t *dev_config, spi_device_handle_t *handle);
|
||||
esp_err_t spi_bus_add_device(spi_host_device_t host_id, const spi_device_interface_config_t *dev_config, spi_device_handle_t *handle);
|
||||
|
||||
|
||||
/**
|
||||
|
826
components/driver/spi_bus_lock.c
Normal file
826
components/driver/spi_bus_lock.c
Normal file
@ -0,0 +1,826 @@
|
||||
// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/semphr.h"
|
||||
#include <stdatomic.h>
|
||||
#include "sdkconfig.h"
|
||||
#include "spi_common_internal.h"
|
||||
#include "esp_intr_alloc.h"
|
||||
#include "soc/spi_caps.h"
|
||||
#include "stdatomic.h"
|
||||
#include "esp_log.h"
|
||||
#include <strings.h>
|
||||
|
||||
|
||||
/*
|
||||
* This lock is designed to solve the conflicts between SPI devices (used in tasks) and
|
||||
* the background operations (ISR or cache access).
|
||||
*
|
||||
* There are N (device/task) + 1 (BG) acquiring processer candidates that may touch the bus.
|
||||
*
|
||||
* The core of the lock is a `status` atomic variable, which is always available. No intermediate
|
||||
* status is allowed. The atomic operations (mainly `atomic_fetch_and`, `atomic_fetch_or`)
|
||||
* atomically read the status, and bitwisely write status value ORed / ANDed with given masks.
|
||||
*
|
||||
* Definitions of the status:
|
||||
* - [30] WEAK_BG_FLAG, active when the BG is the cache
|
||||
* - [29:20] LOCK bits, active when corresponding device is asking for acquiring
|
||||
* - [19:10] PENDING bits, active when the BG acknowledges the REQ bits, but hasn't fully handled them.
|
||||
* - [ 9: 0] REQ bits, active when corresponding device is requesting for BG operations.
|
||||
*
|
||||
* The REQ bits together PENDING bits are called BG bits, which represent the actual BG request
|
||||
* state of devices. Either one of REQ or PENDING being active indicates the device has pending BG
|
||||
* requests. Reason of having two bits instead of one is in the appendix below.
|
||||
*
|
||||
* Acquiring processer means the current processor (task or ISR) allowed to touch the critical
|
||||
* resources, or the SPI bus.
|
||||
*
|
||||
* States of the lock:
|
||||
* - STATE_IDLE: There's no acquiring processor. No device is acquiring the bus, and no BG
|
||||
* operation is in progress.
|
||||
*
|
||||
* - STATE_ACQ: The acquiring processor is a device task. This means one of the devices is
|
||||
* acquiring the bus.
|
||||
*
|
||||
* - STATE_BG: The acquiring processor is the ISR, and there is no acquiring device.
|
||||
*
|
||||
* - STATE_BG_ACQ: The acquiring processor is the ISR, and there is an acquiring device.
|
||||
*
|
||||
*
|
||||
* Whenever a bit is written to the status, it means the a device on a task is trying to acquire
|
||||
* the lock (either for the task, or the ISR). When there is no LOCK bits or BG bits active, the
|
||||
* caller immediately become the acquiring processor. Otherwise, the task has to block, and the ISR
|
||||
* will not be invoked until scheduled by the current acquiring processor.
|
||||
*
|
||||
* The acquiring processor is responsible to assign the next acquiring processor by calling the
|
||||
* scheduler, usually after it finishes some requests, and cleared the corresponding status bit.
|
||||
* But there is one exception, when the last bit is cleared from the status, after which there is
|
||||
* no other LOCK bits or BG bits active, the acquiring processor lost its role immediately, and
|
||||
* don't need to call the scheduler to assign the next acquiring processor.
|
||||
*
|
||||
* The acquiring processor may also choose to assign a new acquiring device when there is no, by
|
||||
* calling `spi_bus_lock_bg_rotate_acq_dev` in the ISR. But the acquiring processor, in this case,
|
||||
* is still the ISR, until it calls the scheduler.
|
||||
*
|
||||
*
|
||||
* Transition of the FSM:
|
||||
*
|
||||
* - STATE_IDLE: no acquiring device, nor acquiring processor, no LOCK or BG bits active
|
||||
* -> STATE_BG: by `req_core`
|
||||
* -> STATE_ACQ: by `acquire_core`
|
||||
*
|
||||
* - STATE_BG:
|
||||
* * No acquiring device, the ISR is the acquiring processor, there is BG bits active, but no LOCK
|
||||
* bits
|
||||
* * The BG operation should be enabled while turning into this state.
|
||||
*
|
||||
* -> STATE_IDLE: by `bg_exit_core` after `clear_pend_core` for all BG bits
|
||||
* -> STATE_BG_ACQ: by `schedule_core`, when there is new LOCK bit set (by `acquire_core`)
|
||||
*
|
||||
* - STATE_BG_ACQ:
|
||||
* * There is acquiring device, the ISR is the acquiring processor, there may be BG bits active for
|
||||
* the acquiring device.
|
||||
* * The BG operation should be enabled while turning into this state.
|
||||
*
|
||||
* -> STATE_ACQ: by `bg_exit_core` after `clear_pend_core` for all BG bits for the acquiring
|
||||
* device.
|
||||
*
|
||||
* Should not go to the STATE_ACQ (unblock the acquiring task) until all requests of the
|
||||
* acquiring device are finished. This is to preserve the sequence of foreground (polling) and
|
||||
* background operations of the device. The background operations queued before the acquiring
|
||||
* should be completed first.
|
||||
*
|
||||
* - STATE_ACQ:
|
||||
* * There is acquiring device, the task is the acquiring processor, there is no BG bits active for
|
||||
* the acquiring device.
|
||||
* * The acquiring task (if blocked at `spi_bus_lock_acquire_start` or `spi_bus_lock_wait_bg_done`)
|
||||
* should be resumed while turning into this state.
|
||||
*
|
||||
* -> STATE_BG_ACQ: by `req_core`
|
||||
* -> STATE_BG_ACQ (other device): by `acquire_end_core`, when there is LOCK bit for another
|
||||
* device, and the new acquiring device has active BG bits.
|
||||
* -> STATE_ACQ (other device): by `acquire_end_core`, when there is LOCK bit for another devices,
|
||||
* but the new acquiring device has no active BG bits.
|
||||
* -> STATE_BG: by `acquire_end_core` when there is no LOCK bit active, but there are active BG
|
||||
* bits.
|
||||
* -> STATE_IDLE: by `acquire_end_core` when there is no LOCK bit, nor BG bit active.
|
||||
*
|
||||
* The `req_core` used in the task is a little special. It asks for acquiring processor for the
|
||||
* ISR. When it succeed for the first time, it will invoke the ISR (hence passing the acquiring
|
||||
* role to the BG). Otherwise it will not block, the ISR will be automatically be invoked by other
|
||||
* acquiring processor. The caller of `req_core` will never become acquiring processor by this
|
||||
* function.
|
||||
*
|
||||
*
|
||||
* Appendix: The design, that having both request bit and pending bit, is to solve the
|
||||
* concurrency issue between tasks and the bg, when the task can queue several requests,
|
||||
* however the request bit cannot represent the number of requests queued.
|
||||
*
|
||||
* Here's the workflow of task and ISR work concurrently:
|
||||
* - Task: (a) Write to Queue -> (b) Write request bit
|
||||
* The Task have to write request bit (b) after the data is prepared in the queue (a),
|
||||
* otherwise the BG may fail to read from the queue when it sees the request bit set.
|
||||
*
|
||||
* - BG: (c) Read queue -> (d) Clear request bit
|
||||
* Since the BG cannot know the number of requests queued, it have to repeatedly check the
|
||||
* queue (c), until it find the data is empty, and then clear the request bit (d).
|
||||
*
|
||||
* The events are possible to happen in the order: (c) -> (a) -> (b) -> (d). This may cause a false
|
||||
* clear of the request bit. And there will be data prepared in the queue, but the request bit is
|
||||
* inactive.
|
||||
*
|
||||
* (e) move REQ bits to PEND bits, happen before (c) is introduced to solve this problem. In this
|
||||
* case (d) is changed to clear the PEND bit. Even if (e) -> (c) -> (a) -> (b) -> (d), only PEND
|
||||
* bit is cleared, while the REQ bit is still active.
|
||||
*/
|
||||
|
||||
struct spi_bus_lock_dev_t;
|
||||
typedef struct spi_bus_lock_dev_t spi_bus_lock_dev_t;
|
||||
|
||||
typedef struct spi_bus_lock_t spi_bus_lock_t;
|
||||
|
||||
|
||||
#define MAX_DEV_NUM 10
|
||||
|
||||
// Bit 29-20: lock bits, Bit 19-10: pending bits
|
||||
// Bit 9-0: request bits, Bit 30:
|
||||
#define LOCK_SHIFT 20
|
||||
#define PENDING_SHIFT 10
|
||||
#define REQ_SHIFT 0
|
||||
|
||||
#define WEAK_BG_FLAG BIT(30) /**< The bus is permanently requested by background operations.
|
||||
* This flag is weak, will not prevent acquiring of devices. But will help the BG to be re-enabled again after the bus is release.
|
||||
*/
|
||||
|
||||
// get the bit mask wher bit [high-1, low] are all 1'b1 s.
|
||||
#define BIT1_MASK(high, low) ((UINT32_MAX << (high)) ^ (UINT32_MAX << (low)))
|
||||
|
||||
#define LOCK_BIT(mask) ((mask) << LOCK_SHIFT)
|
||||
#define REQUEST_BIT(mask) ((mask) << REQ_SHIFT)
|
||||
#define PENDING_BIT(mask) ((mask) << PENDING_SHIFT)
|
||||
#define DEV_MASK(id) (LOCK_BIT(1<<id) | PENDING_BIT(1<<id) | REQUEST_BIT(1<<id))
|
||||
#define ID_DEV_MASK(mask) (ffs(mask) - 1)
|
||||
|
||||
#define REQ_MASK BIT1_MASK(REQ_SHIFT+MAX_DEV_NUM, REQ_SHIFT)
|
||||
#define PEND_MASK BIT1_MASK(PENDING_SHIFT+MAX_DEV_NUM, PENDING_SHIFT)
|
||||
#define BG_MASK BIT1_MASK(REQ_SHIFT+MAX_DEV_NUM*2, REQ_SHIFT)
|
||||
#define LOCK_MASK BIT1_MASK(LOCK_SHIFT+MAX_DEV_NUM, LOCK_SHIFT)
|
||||
|
||||
#define DEV_REQ_MASK(dev) ((dev)->mask & REQ_MASK)
|
||||
#define DEV_PEND_MASK(dev) ((dev)->mask & PEND_MASK)
|
||||
#define DEV_BG_MASK(dev) ((dev)->mask & BG_MASK)
|
||||
|
||||
struct spi_bus_lock_t {
|
||||
/**
|
||||
* The core of the lock. These bits are status of the lock, which should be always available.
|
||||
* No intermediate status is allowed. This is realized by atomic operations, mainly
|
||||
* `atomic_fetch_and`, `atomic_fetch_or`, which atomically read the status, and bitwise write
|
||||
* status value ORed / ANDed with given masks.
|
||||
*
|
||||
* The request bits together pending bits represent the actual bg request state of one device.
|
||||
* Either one of them being active indicates the device has pending bg requests.
|
||||
*
|
||||
* Whenever a bit is written to the status, it means the a device on a task is trying to
|
||||
* acquire the lock. But this will succeed only when no LOCK or BG bits active.
|
||||
*
|
||||
* The acquiring processor is responsible to call the scheduler to pass its role to other tasks
|
||||
* or the BG, unless it clear the last bit in the status register.
|
||||
*/
|
||||
//// Critical resources, they are only writable by acquiring processor, and stable only when read by the acquiring processor.
|
||||
atomic_uint_fast32_t status;
|
||||
spi_bus_lock_dev_t* volatile acquiring_dev; ///< The acquiring device
|
||||
bool volatile acq_dev_bg_active; ///< BG is the acquiring processor serving the acquiring device, used for the wait_bg to skip waiting quickly.
|
||||
bool volatile in_isr; ///< ISR is touching HW
|
||||
//// End of critical resources
|
||||
|
||||
atomic_intptr_t dev[DEV_NUM_MAX]; ///< Child locks.
|
||||
bg_ctrl_func_t bg_enable; ///< Function to enable background operations.
|
||||
bg_ctrl_func_t bg_disable; ///< Function to disable background operations
|
||||
void* bg_arg; ///< Argument for `bg_enable` and `bg_disable` functions.
|
||||
|
||||
spi_bus_lock_dev_t* last_dev; ///< Last used device, to decide whether to refresh all registers.
|
||||
int periph_cs_num; ///< Number of the CS pins the HW has.
|
||||
|
||||
//debug information
|
||||
int host_id; ///< Host ID, for debug information printing
|
||||
uint32_t new_req; ///< Last int_req when `spi_bus_lock_bg_start` is called. Debug use.
|
||||
};
|
||||
|
||||
struct spi_bus_lock_dev_t {
|
||||
SemaphoreHandle_t semphr; ///< Binray semaphore to notify the device it claimed the bus
|
||||
spi_bus_lock_t* parent; ///< Pointer to parent spi_bus_lock_t
|
||||
uint32_t mask; ///< Bitwise OR-ed mask of the REQ, PEND, LOCK bits of this device
|
||||
};
|
||||
|
||||
static const char TAG[] = "bus_lock";
|
||||
|
||||
#define LOCK_CHECK(a, str, ret_val, ...) \
|
||||
if (!(a)) { \
|
||||
ESP_LOGE(TAG,"%s(%d): "str, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
|
||||
return (ret_val); \
|
||||
}
|
||||
|
||||
static inline uint32_t mask_get_id(uint32_t mask);
|
||||
static inline uint32_t dev_lock_get_id(spi_bus_lock_dev_t *dev_lock);
|
||||
|
||||
/*******************************************************************************
|
||||
* atomic operations to the status
|
||||
******************************************************************************/
|
||||
SPI_MASTER_ISR_ATTR static inline uint32_t lock_status_fetch_set(spi_bus_lock_t *lock, uint32_t set)
|
||||
{
|
||||
return atomic_fetch_or(&lock->status, set);
|
||||
}
|
||||
|
||||
IRAM_ATTR static inline uint32_t lock_status_fetch_clear(spi_bus_lock_t *lock, uint32_t clear)
|
||||
{
|
||||
return atomic_fetch_and(&lock->status, ~clear);
|
||||
}
|
||||
|
||||
IRAM_ATTR static inline uint32_t lock_status_fetch(spi_bus_lock_t *lock)
|
||||
{
|
||||
return atomic_load(&lock->status);
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR static inline void lock_status_init(spi_bus_lock_t *lock)
|
||||
{
|
||||
atomic_store(&lock->status, 0);
|
||||
}
|
||||
|
||||
// return the remaining status bits
|
||||
IRAM_ATTR static inline uint32_t lock_status_clear(spi_bus_lock_t* lock, uint32_t clear)
|
||||
{
|
||||
//the fetch and clear should be atomic, avoid missing the all '0' status when all bits are clear.
|
||||
uint32_t state = lock_status_fetch_clear(lock, clear);
|
||||
return state & (~clear);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* Schedule service
|
||||
*
|
||||
* The modification to the status bits may cause rotating of the acquiring processor. It also have
|
||||
* effects to `acquired_dev` (the acquiring device), `in_isr` (HW used in BG), and
|
||||
* `acq_dev_bg_active` (wait_bg_end can be skipped) members of the lock structure.
|
||||
*
|
||||
* Most of them should be atomic, and special attention should be paid to the operation
|
||||
* sequence.
|
||||
******************************************************************************/
|
||||
SPI_MASTER_ISR_ATTR static inline void resume_dev_in_isr(spi_bus_lock_dev_t *dev_lock, BaseType_t *do_yield)
|
||||
{
|
||||
xSemaphoreGiveFromISR(dev_lock->semphr, do_yield);
|
||||
}
|
||||
|
||||
IRAM_ATTR static inline void resume_dev(const spi_bus_lock_dev_t *dev_lock)
|
||||
{
|
||||
xSemaphoreGive(dev_lock->semphr);
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR static inline void bg_disable(spi_bus_lock_t *lock)
|
||||
{
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->bg_disable);
|
||||
lock->bg_disable(lock->bg_arg);
|
||||
}
|
||||
|
||||
IRAM_ATTR static inline void bg_enable(spi_bus_lock_t* lock)
|
||||
{
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->bg_enable);
|
||||
lock->bg_enable(lock->bg_arg);
|
||||
}
|
||||
|
||||
// Set the REQ bit. If we become the acquiring processor, invoke the ISR and pass that to it.
|
||||
// The caller will never become the acquiring processor after this function returns.
|
||||
SPI_MASTER_ATTR static inline void req_core(spi_bus_lock_dev_t *dev_handle)
|
||||
{
|
||||
spi_bus_lock_t *lock = dev_handle->parent;
|
||||
|
||||
// Though `acquired_dev` is critical resource, `dev_handle == lock->acquired_dev`
|
||||
// is a stable statement unless `acquire_start` or `acquire_end` is called by current
|
||||
// device.
|
||||
if (dev_handle == lock->acquiring_dev){
|
||||
// Set the REQ bit and check BG bits if we are the acquiring processor.
|
||||
// If the BG bits were not active before, invoke the BG again.
|
||||
|
||||
// Avoid competitive risk against the `clear_pend_core`, `acq_dev_bg_active` should be set before
|
||||
// setting REQ bit.
|
||||
lock->acq_dev_bg_active = true;
|
||||
uint32_t status = lock_status_fetch_set(lock, DEV_REQ_MASK(dev_handle));
|
||||
if ((status & DEV_BG_MASK(dev_handle)) == 0) {
|
||||
bg_enable(lock); //acquiring processor passed to BG
|
||||
}
|
||||
} else {
|
||||
uint32_t status = lock_status_fetch_set(lock, DEV_REQ_MASK(dev_handle));
|
||||
if (status == 0) {
|
||||
bg_enable(lock); //acquiring processor passed to BG
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//Set the LOCK bit. Handle related stuff and return true if we become the acquiring processor.
|
||||
SPI_MASTER_ISR_ATTR static inline bool acquire_core(spi_bus_lock_dev_t *dev_handle)
|
||||
{
|
||||
spi_bus_lock_t* lock = dev_handle->parent;
|
||||
uint32_t status = lock_status_fetch_set(lock, dev_handle->mask & LOCK_MASK);
|
||||
|
||||
// Check all bits except WEAK_BG
|
||||
if ((status & (BG_MASK | LOCK_MASK)) == 0) {
|
||||
//succeed at once
|
||||
lock->acquiring_dev = dev_handle;
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
|
||||
if (status & WEAK_BG_FLAG) {
|
||||
//Mainly to disable the cache (Weak_BG), that is not able to disable itself
|
||||
bg_disable(lock);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the next acquiring processor according to the status. Will directly change
|
||||
* the acquiring device if new one found.
|
||||
*
|
||||
* Cases:
|
||||
* - BG should still be the acquiring processor (Return false):
|
||||
* 1. Acquiring device has active BG bits: out_desired_dev = new acquiring device
|
||||
* 2. No acquiring device, but BG active: out_desired_dev = randomly pick one device with active BG bits
|
||||
* - BG should yield to the task (Return true):
|
||||
* 3. Acquiring device has no active BG bits: out_desired_dev = new acquiring device
|
||||
* 4. No acquiring device while no active BG bits: out_desired_dev=NULL
|
||||
*
|
||||
* Acquiring device task need to be resumed only when case 3.
|
||||
*
|
||||
* This scheduling can happen in either task or ISR, so `in_isr` or `bg_active` not touched.
|
||||
*
|
||||
* @param lock
|
||||
* @param status Current status
|
||||
* @param out_desired_dev Desired device to work next, see above.
|
||||
*
|
||||
* @return False if BG should still be the acquiring processor, otherwise True (yield to task).
|
||||
*/
|
||||
IRAM_ATTR static inline bool
|
||||
schedule_core(spi_bus_lock_t *lock, uint32_t status, spi_bus_lock_dev_t **out_desired_dev)
|
||||
{
|
||||
spi_bus_lock_dev_t* desired_dev = NULL;
|
||||
uint32_t lock_bits = (status & LOCK_MASK) >> LOCK_SHIFT;
|
||||
uint32_t bg_bits = status & BG_MASK;
|
||||
bg_bits = ((bg_bits >> REQ_SHIFT) | (bg_bits >> PENDING_SHIFT)) & REQ_MASK;
|
||||
|
||||
bool bg_yield;
|
||||
if (lock_bits) {
|
||||
int dev_id = mask_get_id(lock_bits);
|
||||
desired_dev = (spi_bus_lock_dev_t *)atomic_load(&lock->dev[dev_id]);
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(desired_dev);
|
||||
|
||||
lock->acquiring_dev = desired_dev;
|
||||
bg_yield = ((bg_bits & desired_dev->mask) == 0);
|
||||
lock->acq_dev_bg_active = !bg_yield;
|
||||
} else {
|
||||
lock->acq_dev_bg_active = false;
|
||||
if (bg_bits) {
|
||||
int dev_id = mask_get_id(bg_bits);
|
||||
desired_dev = (spi_bus_lock_dev_t *)atomic_load(&lock->dev[dev_id]);
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(desired_dev);
|
||||
|
||||
lock->acquiring_dev = NULL;
|
||||
bg_yield = false;
|
||||
} else {
|
||||
desired_dev = NULL;
|
||||
lock->acquiring_dev = NULL;
|
||||
bg_yield = true;
|
||||
}
|
||||
}
|
||||
*out_desired_dev = desired_dev;
|
||||
return bg_yield;
|
||||
}
|
||||
|
||||
//Clear the LOCK bit and trigger a rescheduling.
|
||||
IRAM_ATTR static inline void acquire_end_core(spi_bus_lock_dev_t *dev_handle)
|
||||
{
|
||||
spi_bus_lock_t* lock = dev_handle->parent;
|
||||
uint32_t status = lock_status_clear(lock, dev_handle->mask & LOCK_MASK);
|
||||
spi_bus_lock_dev_t* desired_dev = NULL;
|
||||
|
||||
bool invoke_bg = !schedule_core(lock, status, &desired_dev);
|
||||
if (invoke_bg) {
|
||||
bg_enable(lock);
|
||||
} else if (desired_dev) {
|
||||
resume_dev(desired_dev);
|
||||
} else if (status & WEAK_BG_FLAG) {
|
||||
bg_enable(lock);
|
||||
}
|
||||
}
|
||||
|
||||
// Move the REQ bits to corresponding PEND bits. Must be called by acquiring processor.
|
||||
// Have no side effects on the acquiring device/processor.
|
||||
SPI_MASTER_ISR_ATTR static inline void update_pend_core(spi_bus_lock_t *lock, uint32_t status)
|
||||
{
|
||||
uint32_t active_req_bits = status & REQ_MASK;
|
||||
#if PENDING_SHIFT > REQ_SHIFT
|
||||
uint32_t pending_mask = active_req_bits << (PENDING_SHIFT - REQ_SHIFT);
|
||||
#else
|
||||
uint32_t pending_mask = active_req_bits >> (REQ_SHIFT - PENDING_SHIFT);
|
||||
#endif
|
||||
// We have to set the PEND bits and then clear the REQ bits, since BG bits are using bitwise OR logic,
|
||||
// this will not influence the effectiveness of the BG bits of every device.
|
||||
lock_status_fetch_set(lock, pending_mask);
|
||||
lock_status_fetch_clear(lock, active_req_bits);
|
||||
}
|
||||
|
||||
// Clear the PEND bit (not REQ bit!) of a device, return the suggestion whether we can try to quit the ISR.
|
||||
// Lost the acquiring processor immediately when the BG bits for active device are inactive, indiciating by the return value.
|
||||
// Can be called only when ISR is acting as the acquiring processor.
|
||||
SPI_MASTER_ISR_ATTR static inline bool clear_pend_core(spi_bus_lock_dev_t *dev_handle)
|
||||
{
|
||||
bool finished;
|
||||
spi_bus_lock_t *lock = dev_handle->parent;
|
||||
uint32_t pend_mask = DEV_PEND_MASK(dev_handle);
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(lock_status_fetch(lock) & pend_mask);
|
||||
|
||||
uint32_t status = lock_status_clear(lock, pend_mask);
|
||||
|
||||
if (lock->acquiring_dev == dev_handle) {
|
||||
finished = ((status & DEV_REQ_MASK(dev_handle)) == 0);
|
||||
if (finished) {
|
||||
lock->acq_dev_bg_active = false;
|
||||
}
|
||||
} else {
|
||||
finished = (status == 0);
|
||||
}
|
||||
return finished;
|
||||
}
|
||||
|
||||
// Return true if the ISR has already touched the HW, which means previous operations should
|
||||
// be terminated first, before we use the HW again. Otherwise return false.
|
||||
// In either case `in_isr` will be marked as true, until call to `bg_exit_core` with `wip=false` successfully.
|
||||
SPI_MASTER_ISR_ATTR static inline bool bg_entry_core(spi_bus_lock_t *lock)
|
||||
{
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev || lock->acq_dev_bg_active);
|
||||
/*
|
||||
* The interrupt is disabled at the entry of ISR to avoid competitive risk as below:
|
||||
*
|
||||
* The `esp_intr_enable` will be called (b) after new BG request is queued (a) in the task;
|
||||
* while `esp_intr_disable` should be called (c) if we check and found the sending queue is empty (d).
|
||||
* If (c) happens after (d), if things happens in this sequence:
|
||||
* (d) -> (a) -> (b) -> (c), the interrupt will be disabled while there's pending BG request in the queue.
|
||||
*
|
||||
* To avoid this, interrupt is disabled here, and re-enabled later if required. (c) -> (d) -> (a) -> (b) -> revert (c) if !d
|
||||
*/
|
||||
bg_disable(lock);
|
||||
if (lock->in_isr) {
|
||||
return false;
|
||||
} else {
|
||||
lock->in_isr = true;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle the conditions of status and interrupt, avoiding the ISR being disabled when there is any new coming BG requests.
|
||||
// When called with `wip=true`, means the ISR is performing some operations. Will enable the interrupt again and exit unconditionally.
|
||||
// When called with `wip=false`, will only return `true` when there is no coming BG request. If return value is `false`, the ISR should try again.
|
||||
// Will not change acquiring device.
|
||||
SPI_MASTER_ISR_ATTR static inline bool bg_exit_core(spi_bus_lock_t *lock, bool wip, BaseType_t *do_yield)
|
||||
{
|
||||
//See comments in `bg_entry_core`, re-enable interrupt disabled in entry if we do need the interrupt
|
||||
if (wip) {
|
||||
bg_enable(lock);
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev || lock->acq_dev_bg_active);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ret;
|
||||
uint32_t status = lock_status_fetch(lock);
|
||||
if (lock->acquiring_dev) {
|
||||
if (status & DEV_BG_MASK(lock->acquiring_dev)) {
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->acq_dev_bg_active);
|
||||
ret = false;
|
||||
} else {
|
||||
// The request may happen any time, even after we fetched the status.
|
||||
// The value of `acq_dev_bg_active` is random.
|
||||
resume_dev_in_isr(lock->acquiring_dev, do_yield);
|
||||
ret = true;
|
||||
}
|
||||
} else {
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
|
||||
ret = !(status & BG_MASK);
|
||||
}
|
||||
if (ret) {
|
||||
//when successfully exit, but no transaction done, mark BG as inactive
|
||||
lock->in_isr = false;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
IRAM_ATTR static inline void dev_wait_prepare(spi_bus_lock_dev_t *dev_handle)
|
||||
{
|
||||
xSemaphoreTake(dev_handle->semphr, 0);
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR static inline esp_err_t dev_wait(spi_bus_lock_dev_t *dev_handle, TickType_t wait)
|
||||
{
|
||||
BaseType_t ret = xSemaphoreTake(dev_handle->semphr, wait);
|
||||
|
||||
if (ret == pdFALSE) return ESP_ERR_TIMEOUT;
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* Initialization & Deinitialization
|
||||
******************************************************************************/
|
||||
esp_err_t spi_bus_init_lock(spi_bus_lock_handle_t *out_lock, const spi_bus_lock_config_t *config)
|
||||
{
|
||||
spi_bus_lock_t* lock = (spi_bus_lock_t*)calloc(sizeof(spi_bus_lock_t), 1);
|
||||
if (lock == NULL) {
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
|
||||
lock_status_init(lock);
|
||||
lock->acquiring_dev = NULL;
|
||||
lock->last_dev = NULL;
|
||||
lock->periph_cs_num = config->cs_num;
|
||||
lock->host_id = config->host_id;
|
||||
|
||||
*out_lock = lock;
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
void spi_bus_deinit_lock(spi_bus_lock_handle_t lock)
|
||||
{
|
||||
for (int i = 0; i < DEV_NUM_MAX; i++) {
|
||||
assert(atomic_load(&lock->dev[i]) == (intptr_t)NULL);
|
||||
}
|
||||
free(lock);
|
||||
}
|
||||
|
||||
static int try_acquire_free_dev(spi_bus_lock_t *lock, bool cs_required)
|
||||
{
|
||||
if (cs_required) {
|
||||
int i;
|
||||
for (i = 0; i < lock->periph_cs_num; i++) {
|
||||
intptr_t null = (intptr_t) NULL;
|
||||
//use 1 to occupy the slot, actual setup comes later
|
||||
if (atomic_compare_exchange_strong(&lock->dev[i], &null, (intptr_t) 1)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ((i == lock->periph_cs_num)? -1: i);
|
||||
} else {
|
||||
int i;
|
||||
for (i = DEV_NUM_MAX - 1; i >= 0; i--) {
|
||||
intptr_t null = (intptr_t) NULL;
|
||||
//use 1 to occupy the slot, actual setup comes later
|
||||
if (atomic_compare_exchange_strong(&lock->dev[i], &null, (intptr_t) 1)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
esp_err_t spi_bus_lock_register_dev(spi_bus_lock_handle_t lock, spi_bus_lock_dev_config_t *config,
|
||||
spi_bus_lock_dev_handle_t *out_dev_handle)
|
||||
{
|
||||
int id = try_acquire_free_dev(lock, config->flags & SPI_BUS_LOCK_DEV_FLAG_CS_REQUIRED);
|
||||
if (id == -1) return ESP_ERR_NOT_SUPPORTED;
|
||||
|
||||
spi_bus_lock_dev_t* dev_lock = (spi_bus_lock_dev_t*)heap_caps_calloc(sizeof(spi_bus_lock_dev_t), 1, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
if (dev_lock == NULL) {
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
dev_lock->semphr = xSemaphoreCreateBinary();
|
||||
if (dev_lock->semphr == NULL) {
|
||||
free(dev_lock);
|
||||
atomic_store(&lock->dev[id], (intptr_t)NULL);
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
dev_lock->parent = lock;
|
||||
dev_lock->mask = DEV_MASK(id);
|
||||
|
||||
ESP_LOGV(TAG, "device registered on bus %d slot %d.", lock->host_id, id);
|
||||
atomic_store(&lock->dev[id], (intptr_t)dev_lock);
|
||||
*out_dev_handle = dev_lock;
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
void spi_bus_lock_unregister_dev(spi_bus_lock_dev_handle_t dev_handle)
|
||||
{
|
||||
int id = dev_lock_get_id(dev_handle);
|
||||
|
||||
spi_bus_lock_t* lock = dev_handle->parent;
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(atomic_load(&lock->dev[id]) == (intptr_t)dev_handle);
|
||||
|
||||
if (lock->last_dev == dev_handle) lock->last_dev = NULL;
|
||||
|
||||
atomic_store(&lock->dev[id], (intptr_t)NULL);
|
||||
if (dev_handle->semphr) {
|
||||
vSemaphoreDelete(dev_handle->semphr);
|
||||
}
|
||||
|
||||
free(dev_handle);
|
||||
}
|
||||
|
||||
IRAM_ATTR static inline uint32_t mask_get_id(uint32_t mask)
|
||||
{
|
||||
return ID_DEV_MASK(mask);
|
||||
}
|
||||
|
||||
IRAM_ATTR static inline uint32_t dev_lock_get_id(spi_bus_lock_dev_t *dev_lock)
|
||||
{
|
||||
return mask_get_id(dev_lock->mask);
|
||||
}
|
||||
|
||||
void spi_bus_lock_set_bg_control(spi_bus_lock_handle_t lock, bg_ctrl_func_t bg_enable, bg_ctrl_func_t bg_disable, void *arg)
|
||||
{
|
||||
lock->bg_enable = bg_enable;
|
||||
lock->bg_disable = bg_disable;
|
||||
lock->bg_arg = arg;
|
||||
}
|
||||
|
||||
IRAM_ATTR int spi_bus_lock_get_dev_id(spi_bus_lock_dev_handle_t dev_handle)
|
||||
{
|
||||
return (dev_handle? dev_lock_get_id(dev_handle): -1);
|
||||
}
|
||||
|
||||
//will be called when cache disabled
|
||||
IRAM_ATTR bool spi_bus_lock_touch(spi_bus_lock_dev_handle_t dev_handle)
|
||||
{
|
||||
spi_bus_lock_dev_t* last_dev = dev_handle->parent->last_dev;
|
||||
dev_handle->parent->last_dev = dev_handle;
|
||||
ESP_EARLY_LOGD(TAG, "SPI dev changed from %d to %d",
|
||||
dev_lock_get_id(last_dev), dev_lock_get_id(dev_handle));
|
||||
return (dev_handle != last_dev);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* Acquiring service
|
||||
******************************************************************************/
|
||||
IRAM_ATTR esp_err_t spi_bus_lock_acquire_start(spi_bus_lock_dev_t *dev_handle, TickType_t wait)
|
||||
{
|
||||
LOCK_CHECK(wait == portMAX_DELAY, "timeout other than portMAX_DELAY not supported", ESP_ERR_INVALID_ARG);
|
||||
|
||||
spi_bus_lock_t* lock = dev_handle->parent;
|
||||
|
||||
// Clear the semaphore before checking
|
||||
dev_wait_prepare(dev_handle);
|
||||
if (!acquire_core(dev_handle)) {
|
||||
//block until becoming the acquiring processor (help by previous acquiring processor)
|
||||
esp_err_t err = dev_wait(dev_handle, wait);
|
||||
//TODO: add timeout handling here.
|
||||
if (err != ESP_OK) return err;
|
||||
}
|
||||
|
||||
ESP_LOGV(TAG, "dev %d acquired.", dev_lock_get_id(dev_handle));
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->acquiring_dev == dev_handle);
|
||||
|
||||
//When arrives at here, requests of this device should already be handled
|
||||
uint32_t status = lock_status_fetch(lock);
|
||||
(void) status;
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK((status & DEV_BG_MASK(dev_handle)) == 0);
|
||||
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
IRAM_ATTR esp_err_t spi_bus_lock_acquire_end(spi_bus_lock_dev_t *dev_handle)
|
||||
{
|
||||
//release the bus
|
||||
spi_bus_lock_t* lock = dev_handle->parent;
|
||||
LOCK_CHECK(lock->acquiring_dev == dev_handle, "Cannot release a lock that hasn't been acquired.", ESP_ERR_INVALID_STATE);
|
||||
|
||||
acquire_end_core(dev_handle);
|
||||
|
||||
ESP_LOGV(TAG, "dev %d released.", dev_lock_get_id(dev_handle));
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR spi_bus_lock_dev_handle_t spi_bus_lock_get_acquiring_dev(spi_bus_lock_t *lock)
|
||||
{
|
||||
return lock->acquiring_dev;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* BG (background operation) service
|
||||
******************************************************************************/
|
||||
SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_entry(spi_bus_lock_t* lock)
|
||||
{
|
||||
return bg_entry_core(lock);
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_exit(spi_bus_lock_t* lock, bool wip, BaseType_t* do_yield)
|
||||
{
|
||||
return bg_exit_core(lock, wip, do_yield);
|
||||
}
|
||||
|
||||
SPI_MASTER_ATTR esp_err_t spi_bus_lock_bg_request(spi_bus_lock_dev_t *dev_handle)
|
||||
{
|
||||
req_core(dev_handle);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
IRAM_ATTR esp_err_t spi_bus_lock_wait_bg_done(spi_bus_lock_dev_handle_t dev_handle, TickType_t wait)
|
||||
{
|
||||
spi_bus_lock_t *lock = dev_handle->parent;
|
||||
LOCK_CHECK(lock->acquiring_dev == dev_handle, "Cannot wait for a device that is not acquired", ESP_ERR_INVALID_STATE);
|
||||
LOCK_CHECK(wait == portMAX_DELAY, "timeout other than portMAX_DELAY not supported", ESP_ERR_INVALID_ARG);
|
||||
|
||||
// If no BG bits active, skip quickly. This is ensured by `spi_bus_lock_wait_bg_done`
|
||||
// cannot be executed with `bg_request` on the same device concurrently.
|
||||
if (lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) {
|
||||
// Clear the semaphore before checking
|
||||
dev_wait_prepare(dev_handle);
|
||||
if (lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) {
|
||||
//block until becoming the acquiring processor (help by previous acquiring processor)
|
||||
esp_err_t err = dev_wait(dev_handle, wait);
|
||||
//TODO: add timeout handling here.
|
||||
if (err != ESP_OK) return err;
|
||||
}
|
||||
}
|
||||
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK((lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) == 0);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_clear_req(spi_bus_lock_dev_t *dev_handle)
|
||||
{
|
||||
bool finished = clear_pend_core(dev_handle);
|
||||
ESP_EARLY_LOGV(TAG, "dev %d served from bg.", dev_lock_get_id(dev_handle));
|
||||
return finished;
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_check_dev_acq(spi_bus_lock_t *lock,
|
||||
spi_bus_lock_dev_handle_t *out_dev_lock)
|
||||
{
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev);
|
||||
uint32_t status = lock_status_fetch(lock);
|
||||
return schedule_core(lock, status, out_dev_lock);
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_check_dev_req(spi_bus_lock_dev_t *dev_lock)
|
||||
{
|
||||
spi_bus_lock_t* lock = dev_lock->parent;
|
||||
uint32_t status = lock_status_fetch(lock);
|
||||
uint32_t dev_status = status & dev_lock->mask;
|
||||
|
||||
// move REQ bits of all device to corresponding PEND bits.
|
||||
// To reduce executing time, only done when the REQ bit of the calling device is set.
|
||||
if (dev_status & REQ_MASK) {
|
||||
update_pend_core(lock, status);
|
||||
return true;
|
||||
} else {
|
||||
return dev_status & PEND_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_req_exist(spi_bus_lock_t *lock)
|
||||
{
|
||||
uint32_t status = lock_status_fetch(lock);
|
||||
return status & BG_MASK;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* Static variables of the locks of the main flash
|
||||
******************************************************************************/
|
||||
static StaticSemaphore_t main_flash_semphr;
|
||||
static spi_bus_lock_dev_t lock_main_flash_dev;
|
||||
|
||||
static spi_bus_lock_t main_spi_bus_lock = {
|
||||
/*
|
||||
* the main bus cache is permanently required, this flag is set here and never clear so that the
|
||||
* cache will always be enabled if acquiring devices yield.
|
||||
*/
|
||||
.status = ATOMIC_VAR_INIT(WEAK_BG_FLAG),
|
||||
.acquiring_dev = NULL,
|
||||
.dev = {ATOMIC_VAR_INIT((intptr_t)&lock_main_flash_dev)},
|
||||
.new_req = 0,
|
||||
.periph_cs_num = SOC_SPI_PERIPH_CS_NUM(0),
|
||||
};
|
||||
|
||||
static spi_bus_lock_dev_t lock_main_flash_dev = {
|
||||
.semphr = NULL,
|
||||
.parent = &main_spi_bus_lock,
|
||||
.mask = DEV_MASK(0),
|
||||
};
|
||||
|
||||
const spi_bus_lock_handle_t g_main_spi_bus_lock = &main_spi_bus_lock;
|
||||
const spi_bus_lock_dev_handle_t g_spi_lock_main_flash_dev = &lock_main_flash_dev;
|
||||
|
||||
esp_err_t spi_bus_lock_init_main_dev(void)
|
||||
{
|
||||
spi_bus_main_set_lock(g_main_spi_bus_lock);
|
||||
g_spi_lock_main_flash_dev->semphr = xSemaphoreCreateBinaryStatic(&main_flash_semphr);
|
||||
if (g_spi_lock_main_flash_dev->semphr == NULL) {
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
|
||||
return ESP_OK;
|
||||
}
|
@ -31,6 +31,7 @@
|
||||
#include "stdatomic.h"
|
||||
#include "hal/spi_hal.h"
|
||||
|
||||
|
||||
static const char *SPI_TAG = "spi";
|
||||
|
||||
#define SPI_CHECK(a, str, ret_val) do { \
|
||||
@ -53,6 +54,23 @@ typedef struct spi_device_t spi_device_t;
|
||||
|
||||
#define DMA_CHANNEL_ENABLED(dma_chan) (BIT(dma_chan-1))
|
||||
|
||||
|
||||
typedef struct {
|
||||
int host_id;
|
||||
spi_destroy_func_t destroy_func;
|
||||
void* destroy_arg;
|
||||
spi_bus_attr_t bus_attr;
|
||||
} spicommon_bus_context_t;
|
||||
|
||||
#define MAIN_BUS_DEFAULT() { \
|
||||
.host_id = 0, \
|
||||
.bus_attr = { \
|
||||
.dma_chan = 0, \
|
||||
.max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE, \
|
||||
.dma_desc_num= 0, \
|
||||
}, \
|
||||
}
|
||||
|
||||
//Periph 1 is 'claimed' by SPI flash code.
|
||||
static atomic_bool spi_periph_claimed[SOC_SPI_PERIPH_NUM] = { ATOMIC_VAR_INIT(true), ATOMIC_VAR_INIT(false), ATOMIC_VAR_INIT(false),
|
||||
#if SOC_SPI_PERIPH_NUM >= 4
|
||||
@ -63,6 +81,9 @@ static const char* spi_claiming_func[3] = {NULL, NULL, NULL};
|
||||
static uint8_t spi_dma_chan_enabled = 0;
|
||||
static portMUX_TYPE spi_dma_spinlock = portMUX_INITIALIZER_UNLOCKED;
|
||||
|
||||
static spicommon_bus_context_t s_mainbus = MAIN_BUS_DEFAULT();
|
||||
static spicommon_bus_context_t* bus_ctx[SOC_SPI_PERIPH_NUM] = {&s_mainbus};
|
||||
|
||||
|
||||
//Returns true if this peripheral is successfully claimed, false if otherwise.
|
||||
bool spicommon_periph_claim(spi_host_device_t host, const char* source)
|
||||
@ -416,6 +437,173 @@ bool spicommon_bus_using_iomux(spi_host_device_t host)
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void spi_bus_main_set_lock(spi_bus_lock_handle_t lock)
|
||||
{
|
||||
bus_ctx[0]->bus_attr.lock = lock;
|
||||
}
|
||||
|
||||
spi_bus_lock_handle_t spi_bus_lock_get_by_id(spi_host_device_t host_id)
|
||||
{
|
||||
return bus_ctx[host_id]->bus_attr.lock;
|
||||
}
|
||||
|
||||
static inline bool is_valid_host(spi_host_device_t host)
|
||||
{
|
||||
return host >= SPI1_HOST && host <= SPI3_HOST;
|
||||
}
|
||||
|
||||
esp_err_t spi_bus_initialize(spi_host_device_t host_id, const spi_bus_config_t *bus_config, int dma_chan)
|
||||
{
|
||||
esp_err_t err = ESP_OK;
|
||||
spicommon_bus_context_t *ctx = NULL;
|
||||
spi_bus_attr_t *bus_attr = NULL;
|
||||
SPI_CHECK(is_valid_host(host_id), "invalid host_id", ESP_ERR_INVALID_ARG);
|
||||
SPI_CHECK(bus_ctx[host_id] == NULL, "SPI bus already initialized.", ESP_ERR_INVALID_STATE);
|
||||
#ifdef CONFIG_IDF_TARGET_ESP32
|
||||
SPI_CHECK( dma_chan >= 0 && dma_chan <= 2, "invalid dma channel", ESP_ERR_INVALID_ARG );
|
||||
#elif CONFIG_IDF_TARGET_ESP32S2
|
||||
SPI_CHECK( dma_chan == 0 || dma_chan == host_id, "invalid dma channel", ESP_ERR_INVALID_ARG );
|
||||
#endif
|
||||
SPI_CHECK((bus_config->intr_flags & (ESP_INTR_FLAG_HIGH|ESP_INTR_FLAG_EDGE|ESP_INTR_FLAG_INTRDISABLED))==0, "intr flag not allowed", ESP_ERR_INVALID_ARG);
|
||||
#ifndef CONFIG_SPI_MASTER_ISR_IN_IRAM
|
||||
SPI_CHECK((bus_config->intr_flags & ESP_INTR_FLAG_IRAM)==0, "ESP_INTR_FLAG_IRAM should be disabled when CONFIG_SPI_MASTER_ISR_IN_IRAM is not set.", ESP_ERR_INVALID_ARG);
|
||||
#endif
|
||||
|
||||
bool spi_chan_claimed = spicommon_periph_claim(host_id, "spi master");
|
||||
SPI_CHECK(spi_chan_claimed, "host_id already in use", ESP_ERR_INVALID_STATE);
|
||||
|
||||
if (dma_chan != 0) {
|
||||
bool dma_chan_claimed=spicommon_dma_chan_claim(dma_chan);
|
||||
if (!dma_chan_claimed) {
|
||||
spicommon_periph_free(host_id);
|
||||
SPI_CHECK(false, "dma channel already in use", ESP_ERR_INVALID_STATE);
|
||||
}
|
||||
}
|
||||
|
||||
//clean and initialize the context
|
||||
ctx = (spicommon_bus_context_t*)malloc(sizeof(spicommon_bus_context_t));
|
||||
if (!ctx) {
|
||||
err = ESP_ERR_NO_MEM;
|
||||
goto cleanup;
|
||||
}
|
||||
*ctx = (spicommon_bus_context_t) {
|
||||
.host_id = host_id,
|
||||
.bus_attr = {
|
||||
.bus_cfg = *bus_config,
|
||||
.dma_chan = dma_chan,
|
||||
},
|
||||
};
|
||||
|
||||
bus_attr = &ctx->bus_attr;
|
||||
if (dma_chan == 0) {
|
||||
bus_attr->max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE;
|
||||
bus_attr->dma_desc_num = 0;
|
||||
} else {
|
||||
//See how many dma descriptors we need and allocate them
|
||||
int dma_desc_ct = lldesc_get_required_num(bus_config->max_transfer_sz);
|
||||
if (dma_desc_ct == 0) dma_desc_ct = 1; //default to 4k when max is not given
|
||||
|
||||
bus_attr->max_transfer_sz = dma_desc_ct * LLDESC_MAX_NUM_PER_DESC;
|
||||
bus_attr->dmadesc_tx = heap_caps_malloc(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA);
|
||||
bus_attr->dmadesc_rx = heap_caps_malloc(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA);
|
||||
if (bus_attr->dmadesc_tx == NULL || bus_attr->dmadesc_rx == NULL) {
|
||||
err = ESP_ERR_NO_MEM;
|
||||
goto cleanup;
|
||||
}
|
||||
bus_attr->dma_desc_num = dma_desc_ct;
|
||||
}
|
||||
|
||||
spi_bus_lock_config_t lock_config = {
|
||||
.host_id = host_id,
|
||||
.cs_num = SOC_SPI_PERIPH_CS_NUM(host_id),
|
||||
};
|
||||
err = spi_bus_init_lock(&bus_attr->lock, &lock_config);
|
||||
if (err != ESP_OK) {
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_ENABLE
|
||||
err = esp_pm_lock_create(ESP_PM_APB_FREQ_MAX, 0, "spi_master",
|
||||
&bus_attr->pm_lock);
|
||||
if (err != ESP_OK) {
|
||||
goto cleanup;
|
||||
}
|
||||
#endif //CONFIG_PM_ENABLE
|
||||
|
||||
err = spicommon_bus_initialize_io(host_id, bus_config, dma_chan, SPICOMMON_BUSFLAG_MASTER | bus_config->flags, &bus_attr->flags);
|
||||
if (err != ESP_OK) {
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
bus_ctx[host_id] = ctx;
|
||||
return ESP_OK;
|
||||
|
||||
cleanup:
|
||||
if (bus_attr) {
|
||||
#ifdef CONFIG_PM_ENABLE
|
||||
esp_pm_lock_delete(bus_attr->pm_lock);
|
||||
#endif
|
||||
if (bus_attr->lock) {
|
||||
spi_bus_deinit_lock(bus_attr->lock);
|
||||
}
|
||||
free(bus_attr->dmadesc_tx);
|
||||
free(bus_attr->dmadesc_rx);
|
||||
}
|
||||
free(ctx);
|
||||
if (dma_chan) {
|
||||
spicommon_dma_chan_free(dma_chan);
|
||||
}
|
||||
spicommon_periph_free(host_id);
|
||||
return err;
|
||||
}
|
||||
|
||||
const spi_bus_attr_t* spi_bus_get_attr(spi_host_device_t host_id)
|
||||
{
|
||||
if (bus_ctx[host_id] == NULL) return NULL;
|
||||
|
||||
return &bus_ctx[host_id]->bus_attr;
|
||||
}
|
||||
|
||||
esp_err_t spi_bus_free(spi_host_device_t host_id)
|
||||
{
|
||||
esp_err_t err = ESP_OK;
|
||||
spicommon_bus_context_t* ctx = bus_ctx[host_id];
|
||||
spi_bus_attr_t* bus_attr = &ctx->bus_attr;
|
||||
|
||||
if (ctx->destroy_func) {
|
||||
err = ctx->destroy_func(ctx->destroy_arg);
|
||||
}
|
||||
|
||||
spicommon_bus_free_io_cfg(&bus_attr->bus_cfg);
|
||||
|
||||
#ifdef CONFIG_PM_ENABLE
|
||||
esp_pm_lock_delete(bus_attr->pm_lock);
|
||||
#endif
|
||||
spi_bus_deinit_lock(bus_attr->lock);
|
||||
|
||||
free(bus_attr->dmadesc_rx);
|
||||
free(bus_attr->dmadesc_tx);
|
||||
|
||||
if (bus_attr->dma_chan > 0) {
|
||||
spicommon_dma_chan_free (bus_attr->dma_chan);
|
||||
}
|
||||
spicommon_periph_free(host_id);
|
||||
|
||||
free(ctx);
|
||||
bus_ctx[host_id] = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
esp_err_t spi_bus_register_destroy_func(spi_host_device_t host_id,
|
||||
spi_destroy_func_t f, void *arg)
|
||||
{
|
||||
bus_ctx[host_id]->destroy_func = f;
|
||||
bus_ctx[host_id]->destroy_arg = arg;
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Code for workaround for DMA issue in ESP32 v0/v1 silicon
|
||||
*/
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -41,8 +41,6 @@ static const char *SPI_TAG = "spi_slave";
|
||||
return (ret_val); \
|
||||
}
|
||||
|
||||
#define VALID_HOST(x) (x > SPI1_HOST && x <= SPI3_HOST)
|
||||
|
||||
#ifdef CONFIG_SPI_SLAVE_ISR_IN_IRAM
|
||||
#define SPI_SLAVE_ISR_ATTR IRAM_ATTR
|
||||
#else
|
||||
@ -75,6 +73,16 @@ static spi_slave_t *spihost[SOC_SPI_PERIPH_NUM];
|
||||
|
||||
static void IRAM_ATTR spi_intr(void *arg);
|
||||
|
||||
static inline bool is_valid_host(spi_host_device_t host)
|
||||
{
|
||||
#if CONFIG_IDF_TARGET_ESP32
|
||||
return host >= SPI1_HOST && host <= SPI3_HOST;
|
||||
#elif CONFIG_IDF_TARGET_ESP32S2
|
||||
// SPI_HOST (SPI1_HOST) is not supported by the SPI Slave driver on ESP32-S2
|
||||
return host >= SPI2_HOST && host <= SPI3_HOST;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool bus_is_iomux(spi_slave_t *host)
|
||||
{
|
||||
return host->flags&SPICOMMON_BUSFLAG_IOMUX_PINS;
|
||||
@ -102,7 +110,7 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b
|
||||
esp_err_t ret = ESP_OK;
|
||||
esp_err_t err;
|
||||
//We only support HSPI/VSPI, period.
|
||||
SPI_CHECK(VALID_HOST(host), "invalid host", ESP_ERR_INVALID_ARG);
|
||||
SPI_CHECK(is_valid_host(host), "invalid host", ESP_ERR_INVALID_ARG);
|
||||
#if defined(CONFIG_IDF_TARGET_ESP32)
|
||||
SPI_CHECK( dma_chan >= 0 && dma_chan <= 2, "invalid dma channel", ESP_ERR_INVALID_ARG );
|
||||
#elif defined(CONFIG_IDF_TARGET_ESP32S2)
|
||||
@ -224,7 +232,7 @@ cleanup:
|
||||
|
||||
esp_err_t spi_slave_free(spi_host_device_t host)
|
||||
{
|
||||
SPI_CHECK(VALID_HOST(host), "invalid host", ESP_ERR_INVALID_ARG);
|
||||
SPI_CHECK(is_valid_host(host), "invalid host", ESP_ERR_INVALID_ARG);
|
||||
SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG);
|
||||
if (spihost[host]->trans_queue) vQueueDelete(spihost[host]->trans_queue);
|
||||
if (spihost[host]->ret_queue) vQueueDelete(spihost[host]->ret_queue);
|
||||
@ -248,7 +256,7 @@ esp_err_t spi_slave_free(spi_host_device_t host)
|
||||
esp_err_t SPI_SLAVE_ATTR spi_slave_queue_trans(spi_host_device_t host, const spi_slave_transaction_t *trans_desc, TickType_t ticks_to_wait)
|
||||
{
|
||||
BaseType_t r;
|
||||
SPI_CHECK(VALID_HOST(host), "invalid host", ESP_ERR_INVALID_ARG);
|
||||
SPI_CHECK(is_valid_host(host), "invalid host", ESP_ERR_INVALID_ARG);
|
||||
SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG);
|
||||
SPI_CHECK(spihost[host]->dma_chan == 0 || trans_desc->tx_buffer==NULL || esp_ptr_dma_capable(trans_desc->tx_buffer),
|
||||
"txdata not in DMA-capable memory", ESP_ERR_INVALID_ARG);
|
||||
@ -268,7 +276,7 @@ esp_err_t SPI_SLAVE_ATTR spi_slave_queue_trans(spi_host_device_t host, const spi
|
||||
esp_err_t SPI_SLAVE_ATTR spi_slave_get_trans_result(spi_host_device_t host, spi_slave_transaction_t **trans_desc, TickType_t ticks_to_wait)
|
||||
{
|
||||
BaseType_t r;
|
||||
SPI_CHECK(VALID_HOST(host), "invalid host", ESP_ERR_INVALID_ARG);
|
||||
SPI_CHECK(is_valid_host(host), "invalid host", ESP_ERR_INVALID_ARG);
|
||||
SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG);
|
||||
r = xQueueReceive(spihost[host]->ret_queue, (void *)trans_desc, ticks_to_wait);
|
||||
if (!r) return ESP_ERR_TIMEOUT;
|
||||
|
344
components/driver/test/test_spi_bus_lock.c
Normal file
344
components/driver/test/test_spi_bus_lock.c
Normal file
@ -0,0 +1,344 @@
|
||||
#include "sdkconfig.h"
|
||||
#include "esp_log.h"
|
||||
#include "driver/spi_master.h"
|
||||
#include "driver/gpio.h"
|
||||
#include "esp_flash_spi_init.h"
|
||||
|
||||
#include "test/test_common_spi.h"
|
||||
#include "unity.h"
|
||||
|
||||
|
||||
#if CONFIG_IDF_TARGET_ESP32
|
||||
// The VSPI pins on UT_T1_ESP_FLASH are connected to a external flash
|
||||
#define TEST_BUS_PIN_NUM_MISO VSPI_IOMUX_PIN_NUM_MISO
|
||||
#define TEST_BUS_PIN_NUM_MOSI VSPI_IOMUX_PIN_NUM_MOSI
|
||||
#define TEST_BUS_PIN_NUM_CLK VSPI_IOMUX_PIN_NUM_CLK
|
||||
#define TEST_BUS_PIN_NUM_CS VSPI_IOMUX_PIN_NUM_CS
|
||||
#define TEST_BUS_PIN_NUM_WP VSPI_IOMUX_PIN_NUM_WP
|
||||
#define TEST_BUS_PIN_NUM_HD VSPI_IOMUX_PIN_NUM_HD
|
||||
|
||||
#elif CONFIG_IDF_TARGET_ESP32S2
|
||||
#define TEST_BUS_PIN_NUM_MISO FSPI_IOMUX_PIN_NUM_MISO
|
||||
#define TEST_BUS_PIN_NUM_MOSI FSPI_IOMUX_PIN_NUM_MOSI
|
||||
#define TEST_BUS_PIN_NUM_CLK FSPI_IOMUX_PIN_NUM_CLK
|
||||
#define TEST_BUS_PIN_NUM_CS FSPI_IOMUX_PIN_NUM_CS
|
||||
#define TEST_BUS_PIN_NUM_WP FSPI_IOMUX_PIN_NUM_WP
|
||||
#define TEST_BUS_PIN_NUM_HD FSPI_IOMUX_PIN_NUM_HD
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
typedef struct {
|
||||
union {
|
||||
spi_device_handle_t handle;
|
||||
esp_flash_t* chip;
|
||||
};
|
||||
bool finished;
|
||||
} task_context_t;
|
||||
|
||||
#ifndef CONFIG_ESP32_SPIRAM_SUPPORT
|
||||
|
||||
const static char TAG[] = "test_spi";
|
||||
|
||||
void spi_task1(void* arg)
|
||||
{
|
||||
//task1 send 50 polling transactions, acquire the bus and send another 50
|
||||
int count=0;
|
||||
spi_transaction_t t = {
|
||||
.flags = SPI_TRANS_USE_TXDATA,
|
||||
.tx_data = { 0x80, 0x12, 0x34, 0x56 },
|
||||
.length = 4*8,
|
||||
};
|
||||
spi_device_handle_t handle = ((task_context_t*)arg)->handle;
|
||||
for( int j = 0; j < 50; j ++ ) {
|
||||
TEST_ESP_OK(spi_device_polling_transmit( handle, &t ));
|
||||
ESP_LOGI(TAG, "task1:%d", count++ );
|
||||
}
|
||||
TEST_ESP_OK(spi_device_acquire_bus( handle, portMAX_DELAY ));
|
||||
for( int j = 0; j < 50; j ++ ) {
|
||||
TEST_ESP_OK(spi_device_polling_transmit( handle, &t ));
|
||||
ESP_LOGI(TAG, "task1:%d", count++ );
|
||||
}
|
||||
spi_device_release_bus(handle);
|
||||
ESP_LOGI(TAG, "task1 terminates");
|
||||
((task_context_t*)arg)->finished = true;
|
||||
vTaskDelete(NULL);
|
||||
}
|
||||
|
||||
void spi_task2(void* arg)
|
||||
{
|
||||
int count=0;
|
||||
//task2 acquire the bus, send 50 polling transactions and then 50 non-polling
|
||||
spi_transaction_t t = {
|
||||
.flags = SPI_TRANS_USE_TXDATA,
|
||||
.tx_data = { 0x80, 0x12, 0x34, 0x56 },
|
||||
.length = 4*8,
|
||||
};
|
||||
spi_transaction_t *ret_t;
|
||||
spi_device_handle_t handle = ((task_context_t*)arg)->handle;
|
||||
TEST_ESP_OK(spi_device_acquire_bus( handle, portMAX_DELAY ));
|
||||
|
||||
for (int i = 0; i < 50; i ++) {
|
||||
TEST_ESP_OK(spi_device_polling_transmit(handle, &t));
|
||||
ESP_LOGI( TAG, "task2: %d", count++ );
|
||||
}
|
||||
|
||||
for( int j = 0; j < 50; j ++ ) {
|
||||
TEST_ESP_OK(spi_device_queue_trans(handle, &t, portMAX_DELAY));
|
||||
}
|
||||
for( int j = 0; j < 50; j ++ ) {
|
||||
TEST_ESP_OK(spi_device_get_trans_result(handle, &ret_t, portMAX_DELAY));
|
||||
assert(ret_t == &t);
|
||||
ESP_LOGI( TAG, "task2: %d", count++ );
|
||||
}
|
||||
spi_device_release_bus(handle);
|
||||
vTaskDelay(1);
|
||||
ESP_LOGI(TAG, "task2 terminates");
|
||||
((task_context_t*)arg)->finished = true;
|
||||
vTaskDelete(NULL);
|
||||
}
|
||||
|
||||
void spi_task3(void* arg)
|
||||
{
|
||||
//task3 send 30 polling transactions, acquire the bus, send 20 polling transactions and then 50 non-polling
|
||||
int count=0;
|
||||
spi_transaction_t t = {
|
||||
.flags = SPI_TRANS_USE_TXDATA,
|
||||
.tx_data = { 0x80, 0x12, 0x34, 0x56 },
|
||||
.length = 4*8,
|
||||
};
|
||||
spi_transaction_t *ret_t;
|
||||
spi_device_handle_t handle = ((task_context_t*)arg)->handle;
|
||||
|
||||
for (int i = 0; i < 30; i ++) {
|
||||
TEST_ESP_OK(spi_device_polling_transmit(handle, &t));
|
||||
ESP_LOGI( TAG, "task3: %d", count++ );
|
||||
}
|
||||
|
||||
TEST_ESP_OK(spi_device_acquire_bus( handle, portMAX_DELAY ));
|
||||
for (int i = 0; i < 20; i ++) {
|
||||
TEST_ESP_OK(spi_device_polling_transmit(handle, &t));
|
||||
ESP_LOGI( TAG, "task3: %d", count++ );
|
||||
}
|
||||
|
||||
for (int j = 0; j < 50; j++) {
|
||||
TEST_ESP_OK(spi_device_queue_trans(handle, &t, portMAX_DELAY));
|
||||
}
|
||||
for (int j = 0; j < 50; j++) {
|
||||
TEST_ESP_OK(spi_device_get_trans_result(handle, &ret_t, portMAX_DELAY));
|
||||
assert(ret_t == &t);
|
||||
ESP_LOGI(TAG, "task3: %d", count++);
|
||||
}
|
||||
spi_device_release_bus(handle);
|
||||
|
||||
ESP_LOGI(TAG, "task3 terminates");
|
||||
((task_context_t*)arg)->finished = true;
|
||||
vTaskDelete(NULL);
|
||||
}
|
||||
|
||||
static void write_large_buffer(esp_flash_t *chip, const esp_partition_t *part, const uint8_t *source, size_t length)
|
||||
{
|
||||
printf("Erasing chip %p, %d bytes\n", chip, length);
|
||||
|
||||
TEST_ESP_OK(esp_flash_erase_region(chip, part->address, (length + SPI_FLASH_SEC_SIZE) & ~(SPI_FLASH_SEC_SIZE - 1)) );
|
||||
|
||||
printf("Writing chip %p, %d bytes from source %p\n", chip, length, source);
|
||||
// note writing to unaligned address
|
||||
TEST_ESP_OK(esp_flash_write(chip, source, part->address + 1, length) );
|
||||
|
||||
printf("Write done.\n");
|
||||
}
|
||||
|
||||
static void read_and_check(esp_flash_t *chip, const esp_partition_t *part, const uint8_t *source, size_t length)
|
||||
{
|
||||
printf("Checking chip %p, %d bytes\n", chip, length);
|
||||
uint8_t *buf = malloc(length);
|
||||
TEST_ASSERT_NOT_NULL(buf);
|
||||
TEST_ESP_OK(esp_flash_read(chip, buf, part->address + 1, length) );
|
||||
TEST_ASSERT_EQUAL_HEX8_ARRAY(source, buf, length);
|
||||
free(buf);
|
||||
|
||||
// check nothing was written at beginning or end
|
||||
uint8_t ends[8];
|
||||
|
||||
TEST_ESP_OK(esp_flash_read(chip, ends, part->address, sizeof(ends)) );
|
||||
TEST_ASSERT_EQUAL_HEX8(0xFF, ends[0]);
|
||||
TEST_ASSERT_EQUAL_HEX8(source[0], ends[1]);
|
||||
|
||||
TEST_ESP_OK(esp_flash_read(chip, ends, part->address + length, sizeof(ends)) );
|
||||
|
||||
TEST_ASSERT_EQUAL_HEX8(source[length - 1], ends[0]);
|
||||
TEST_ASSERT_EQUAL_HEX8(0xFF, ends[1]);
|
||||
TEST_ASSERT_EQUAL_HEX8(0xFF, ends[2]);
|
||||
TEST_ASSERT_EQUAL_HEX8(0xFF, ends[3]);
|
||||
}
|
||||
|
||||
void spi_task4(void* arg)
|
||||
{
|
||||
esp_flash_t *chip = ((task_context_t*)arg)->chip;
|
||||
|
||||
// buffer in RAM
|
||||
const int test_len = 16400;
|
||||
uint8_t *source_buf = heap_caps_malloc(test_len, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
TEST_ASSERT_NOT_NULL(source_buf);
|
||||
|
||||
srand(676);
|
||||
for (int i = 0; i < test_len; i++) {
|
||||
source_buf[i] = rand();
|
||||
}
|
||||
|
||||
ESP_LOGI(TAG, "Testing chip %p...", chip);
|
||||
const esp_partition_t *part = get_test_data_partition();
|
||||
TEST_ASSERT(part->size > test_len + 2 + SPI_FLASH_SEC_SIZE);
|
||||
|
||||
write_large_buffer(chip, part, source_buf, test_len);
|
||||
read_and_check(chip, part, source_buf, test_len);
|
||||
|
||||
free(source_buf);
|
||||
|
||||
ESP_LOGI(TAG, "task4 terminates");
|
||||
((task_context_t*)arg)->finished = true;
|
||||
vTaskDelete(NULL);
|
||||
}
|
||||
|
||||
static void test_bus_lock(bool test_flash)
|
||||
{
|
||||
task_context_t context1={};
|
||||
task_context_t context2={};
|
||||
task_context_t context3={};
|
||||
task_context_t context4={};
|
||||
TaskHandle_t task1, task2, task3, task4;
|
||||
esp_err_t ret;
|
||||
spi_bus_config_t buscfg=SPI_BUS_TEST_DEFAULT_CONFIG();
|
||||
buscfg.miso_io_num = TEST_BUS_PIN_NUM_MISO;
|
||||
buscfg.mosi_io_num = TEST_BUS_PIN_NUM_MOSI;
|
||||
buscfg.sclk_io_num = TEST_BUS_PIN_NUM_CLK;
|
||||
|
||||
spi_device_interface_config_t devcfg=SPI_DEVICE_TEST_DEFAULT_CONFIG();
|
||||
devcfg.queue_size = 100;
|
||||
|
||||
//Initialize the SPI bus and 3 devices
|
||||
ret=spi_bus_initialize(TEST_SPI_HOST, &buscfg, 1);
|
||||
TEST_ESP_OK(ret);
|
||||
ret=spi_bus_add_device(TEST_SPI_HOST, &devcfg, &context1.handle);
|
||||
TEST_ESP_OK(ret);
|
||||
ret=spi_bus_add_device(TEST_SPI_HOST, &devcfg, &context2.handle);
|
||||
TEST_ESP_OK(ret);
|
||||
|
||||
//only have 3 cs pins, leave one for the flash
|
||||
devcfg.spics_io_num = -1;
|
||||
ret=spi_bus_add_device(TEST_SPI_HOST, &devcfg, &context3.handle);
|
||||
TEST_ESP_OK(ret);
|
||||
esp_flash_spi_device_config_t flash_cfg = {
|
||||
.host_id = TEST_SPI_HOST,
|
||||
.cs_id = 2,
|
||||
.cs_io_num = TEST_BUS_PIN_NUM_CS,
|
||||
.io_mode = SPI_FLASH_DIO,
|
||||
.speed = ESP_FLASH_5MHZ,
|
||||
.input_delay_ns = 0,
|
||||
};
|
||||
|
||||
//Clamp the WP and HD pins to VDD to make it work in DIO mode
|
||||
gpio_set_direction(TEST_BUS_PIN_NUM_HD, GPIO_MODE_OUTPUT);
|
||||
gpio_set_direction(TEST_BUS_PIN_NUM_WP, GPIO_MODE_OUTPUT);
|
||||
gpio_set_level(TEST_BUS_PIN_NUM_HD, 1);
|
||||
gpio_set_level(TEST_BUS_PIN_NUM_WP, 1);
|
||||
|
||||
esp_flash_t *chip;
|
||||
(void) chip;
|
||||
if (test_flash) {
|
||||
ret = spi_bus_add_flash_device(&chip, &flash_cfg);
|
||||
TEST_ESP_OK(ret);
|
||||
ret = esp_flash_init(chip);
|
||||
TEST_ESP_OK(ret);
|
||||
context4.chip = chip;
|
||||
}
|
||||
ESP_LOGI(TAG, "Start testing...");
|
||||
|
||||
xTaskCreate( spi_task1, "task1", 2048, &context1, 0, &task1 );
|
||||
xTaskCreate( spi_task2, "task2", 2048, &context2, 0, &task2 );
|
||||
xTaskCreate( spi_task3, "task3", 2048, &context3, 0, &task3 );
|
||||
if (test_flash) {
|
||||
xTaskCreate( spi_task4, "task4", 2048, &context4, 0, &task4 );
|
||||
} else {
|
||||
context4.finished = true;
|
||||
}
|
||||
|
||||
for(;;){
|
||||
vTaskDelay(10);
|
||||
if (context1.finished && context2.finished && context3.finished && context4.finished) break;
|
||||
}
|
||||
|
||||
TEST_ESP_OK(spi_bus_remove_device(context1.handle));
|
||||
TEST_ESP_OK(spi_bus_remove_device(context2.handle));
|
||||
TEST_ESP_OK(spi_bus_remove_device(context3.handle));
|
||||
if (test_flash) {
|
||||
TEST_ESP_OK(spi_bus_remove_flash_device(chip));
|
||||
}
|
||||
TEST_ESP_OK(spi_bus_free(TEST_SPI_HOST) );
|
||||
}
|
||||
|
||||
#if !TEMPORARY_DISABLED_FOR_TARGETS(ESP32S2)
|
||||
//no runners
|
||||
TEST_CASE("spi bus lock, with flash","[spi][test_env=UT_T1_ESP_FLASH]")
|
||||
{
|
||||
test_bus_lock(true);
|
||||
}
|
||||
#endif //!TEMPORARY_DISABLED_FOR_TARGETS(ESP32S2)
|
||||
|
||||
|
||||
TEST_CASE("spi bus lock","[spi]")
|
||||
{
|
||||
test_bus_lock(false);
|
||||
}
|
||||
|
||||
#if !TEMPORARY_DISABLED_FOR_TARGETS(ESP32S2)
|
||||
//SPI1 not supported by driver
|
||||
static IRAM_ATTR esp_err_t test_polling_send(spi_device_handle_t handle)
|
||||
{
|
||||
for (int i = 0; i < 10; i++) {
|
||||
spi_transaction_t trans = {
|
||||
.length = 16,
|
||||
.flags = SPI_TRANS_USE_TXDATA | SPI_TRANS_USE_RXDATA,
|
||||
};
|
||||
esp_err_t err = spi_device_polling_transmit(handle, &trans);
|
||||
if (err != ESP_OK) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static IRAM_ATTR NOINLINE_ATTR void test_acquire(spi_device_handle_t handle)
|
||||
{
|
||||
esp_err_t err = spi_device_acquire_bus(handle, portMAX_DELAY);
|
||||
if (err == ESP_OK) {
|
||||
err = test_polling_send(handle);
|
||||
spi_device_release_bus(handle);
|
||||
}
|
||||
|
||||
TEST_ESP_OK(err);
|
||||
}
|
||||
|
||||
TEST_CASE("spi master can be used on SPI1", "[spi]")
|
||||
{
|
||||
spi_device_interface_config_t dev_cfg = {
|
||||
.mode = 1,
|
||||
.clock_speed_hz = 1*1000*1000,
|
||||
.spics_io_num = -1,
|
||||
.queue_size = 1,
|
||||
};
|
||||
spi_device_handle_t handle;
|
||||
esp_err_t err;
|
||||
err = spi_bus_add_device(SPI1_HOST, &dev_cfg, &handle);
|
||||
TEST_ESP_OK(err);
|
||||
|
||||
err = test_polling_send(handle);
|
||||
TEST_ESP_OK(err);
|
||||
test_acquire(handle);
|
||||
}
|
||||
#endif //!TEMPORARY_DISABLED_FOR_TARGETS(ESP32S2)
|
||||
|
||||
//TODO: add a case when a non-polling transaction happened in the bus-acquiring time and then release the bus then queue a new trans
|
||||
|
||||
#endif
|
@ -26,6 +26,7 @@
|
||||
#include "soc/soc_memory_layout.h"
|
||||
#include "driver/spi_common_internal.h"
|
||||
|
||||
|
||||
const static char TAG[] = "test_spi";
|
||||
|
||||
static void check_spi_pre_n_for(int clk, int pre, int n)
|
||||
@ -773,7 +774,7 @@ void test_cmd_addr(spi_slave_task_context_t *slave_context, bool lsb_first)
|
||||
};
|
||||
|
||||
ESP_LOGI( MASTER_TAG, "===== test%d =====", i );
|
||||
ESP_LOGI(MASTER_TAG, "cmd_bits: %d, addr_bits: %d", cmd_bits, addr_bits);
|
||||
ESP_LOGI(MASTER_TAG, "cmd_bits: %d, addr_bits: %d", cmd_bits, addr_bits);
|
||||
TEST_ESP_OK(spi_device_transmit(spi, (spi_transaction_t*)&trans));
|
||||
//wait for both master and slave end
|
||||
|
||||
@ -984,7 +985,7 @@ static void sorted_array_insert(uint32_t* array, int* size, uint32_t item)
|
||||
|
||||
#define TEST_TIMES 11
|
||||
|
||||
static IRAM_ATTR void spi_transmit_measure(spi_device_handle_t spi, spi_transaction_t* trans, uint32_t* t_flight)
|
||||
static IRAM_ATTR NOINLINE_ATTR void spi_transmit_measure(spi_device_handle_t spi, spi_transaction_t* trans, uint32_t* t_flight)
|
||||
{
|
||||
RECORD_TIME_PREPARE();
|
||||
spi_device_transmit(spi, trans); // prime the flash cache
|
||||
@ -993,7 +994,7 @@ static IRAM_ATTR void spi_transmit_measure(spi_device_handle_t spi, spi_transact
|
||||
RECORD_TIME_END(t_flight);
|
||||
}
|
||||
|
||||
static IRAM_ATTR void spi_transmit_polling_measure(spi_device_handle_t spi, spi_transaction_t* trans, uint32_t* t_flight)
|
||||
static IRAM_ATTR NOINLINE_ATTR void spi_transmit_polling_measure(spi_device_handle_t spi, spi_transaction_t* trans, uint32_t* t_flight)
|
||||
{
|
||||
spi_flash_disable_interrupts_caches_and_other_cpu(); //this can test the code are all in the IRAM at the same time
|
||||
RECORD_TIME_PREPARE();
|
||||
@ -1031,7 +1032,9 @@ TEST_CASE("spi_speed","[spi]")
|
||||
for (int i = 0; i < TEST_TIMES; i++) {
|
||||
ESP_LOGI(TAG, "%.2lf", GET_US_BY_CCOUNT(t_flight_sorted[i]));
|
||||
}
|
||||
#ifndef CONFIG_SPIRAM_SUPPORT
|
||||
TEST_PERFORMANCE_LESS_THAN(SPI_PER_TRANS_NO_POLLING, "%d us", (int)GET_US_BY_CCOUNT(t_flight_sorted[(TEST_TIMES+1)/2]));
|
||||
#endif
|
||||
|
||||
//acquire the bus to send polling transactions faster
|
||||
ret = spi_device_acquire_bus(spi, portMAX_DELAY);
|
||||
@ -1046,7 +1049,9 @@ TEST_CASE("spi_speed","[spi]")
|
||||
for (int i = 0; i < TEST_TIMES; i++) {
|
||||
ESP_LOGI(TAG, "%.2lf", GET_US_BY_CCOUNT(t_flight_sorted[i]));
|
||||
}
|
||||
#ifndef CONFIG_SPIRAM_SUPPORT
|
||||
TEST_PERFORMANCE_LESS_THAN(SPI_PER_TRANS_POLLING, "%d us", (int)GET_US_BY_CCOUNT(t_flight_sorted[(TEST_TIMES+1)/2]));
|
||||
#endif
|
||||
|
||||
//release the bus
|
||||
spi_device_release_bus(spi);
|
||||
@ -1064,7 +1069,9 @@ TEST_CASE("spi_speed","[spi]")
|
||||
for (int i = 0; i < TEST_TIMES; i++) {
|
||||
ESP_LOGI(TAG, "%.2lf", GET_US_BY_CCOUNT(t_flight_sorted[i]));
|
||||
}
|
||||
#ifndef CONFIG_SPIRAM_SUPPORT
|
||||
TEST_PERFORMANCE_LESS_THAN(SPI_PER_TRANS_NO_POLLING_NO_DMA, "%d us", (int)GET_US_BY_CCOUNT(t_flight_sorted[(TEST_TIMES+1)/2]));
|
||||
#endif
|
||||
|
||||
//acquire the bus to send polling transactions faster
|
||||
ret = spi_device_acquire_bus(spi, portMAX_DELAY);
|
||||
@ -1078,150 +1085,12 @@ TEST_CASE("spi_speed","[spi]")
|
||||
for (int i = 0; i < TEST_TIMES; i++) {
|
||||
ESP_LOGI(TAG, "%.2lf", GET_US_BY_CCOUNT(t_flight_sorted[i]));
|
||||
}
|
||||
#ifndef CONFIG_SPIRAM_SUPPORT
|
||||
TEST_PERFORMANCE_LESS_THAN(SPI_PER_TRANS_POLLING_NO_DMA, "%d us", (int)GET_US_BY_CCOUNT(t_flight_sorted[(TEST_TIMES+1)/2]));
|
||||
#endif
|
||||
|
||||
//release the bus
|
||||
spi_device_release_bus(spi);
|
||||
master_free_device_bus(spi);
|
||||
}
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
spi_device_handle_t handle;
|
||||
bool finished;
|
||||
} task_context_t;
|
||||
|
||||
void spi_task1(void* arg)
|
||||
{
|
||||
//task1 send 50 polling transactions, acquire the bus and send another 50
|
||||
int count=0;
|
||||
spi_transaction_t t = {
|
||||
.flags = SPI_TRANS_USE_TXDATA,
|
||||
.tx_data = { 0x80, 0x12, 0x34, 0x56 },
|
||||
.length = 4*8,
|
||||
};
|
||||
spi_device_handle_t handle = ((task_context_t*)arg)->handle;
|
||||
for( int j = 0; j < 50; j ++ ) {
|
||||
TEST_ESP_OK(spi_device_polling_transmit( handle, &t ));
|
||||
ESP_LOGI( TAG, "task1:%d", count++ );
|
||||
}
|
||||
TEST_ESP_OK(spi_device_acquire_bus( handle, portMAX_DELAY ));
|
||||
for( int j = 0; j < 50; j ++ ) {
|
||||
TEST_ESP_OK(spi_device_polling_transmit( handle, &t ));
|
||||
ESP_LOGI( TAG, "task1:%d", count++ );
|
||||
}
|
||||
spi_device_release_bus(handle);
|
||||
ESP_LOGI(TAG, "task1 terminates");
|
||||
((task_context_t*)arg)->finished = true;
|
||||
vTaskDelete(NULL);
|
||||
}
|
||||
|
||||
void spi_task2(void* arg)
|
||||
{
|
||||
int count=0;
|
||||
//task2 acquire the bus, send 50 polling transactions and then 50 non-polling
|
||||
spi_transaction_t t = {
|
||||
.flags = SPI_TRANS_USE_TXDATA,
|
||||
.tx_data = { 0x80, 0x12, 0x34, 0x56 },
|
||||
.length = 4*8,
|
||||
};
|
||||
spi_transaction_t *ret_t;
|
||||
spi_device_handle_t handle = ((task_context_t*)arg)->handle;
|
||||
TEST_ESP_OK(spi_device_acquire_bus( handle, portMAX_DELAY ));
|
||||
|
||||
for (int i = 0; i < 50; i ++) {
|
||||
TEST_ESP_OK(spi_device_polling_transmit(handle, &t));
|
||||
ESP_LOGI( TAG, "task2: %d", count++ );
|
||||
}
|
||||
|
||||
for( int j = 0; j < 50; j ++ ) {
|
||||
TEST_ESP_OK(spi_device_queue_trans( handle, &t, portMAX_DELAY ));
|
||||
}
|
||||
for( int j = 0; j < 50; j ++ ) {
|
||||
TEST_ESP_OK(spi_device_get_trans_result(handle, &ret_t, portMAX_DELAY));
|
||||
assert(ret_t == &t);
|
||||
ESP_LOGI( TAG, "task2: %d", count++ );
|
||||
}
|
||||
spi_device_release_bus(handle);
|
||||
vTaskDelay(1);
|
||||
ESP_LOGI(TAG, "task2 terminates");
|
||||
((task_context_t*)arg)->finished = true;
|
||||
vTaskDelete(NULL);
|
||||
}
|
||||
|
||||
void spi_task3(void* arg)
|
||||
{
|
||||
//task3 send 30 polling transactions, acquire the bus, send 20 polling transactions and then 50 non-polling
|
||||
int count=0;
|
||||
spi_transaction_t t = {
|
||||
.flags = SPI_TRANS_USE_TXDATA,
|
||||
.tx_data = { 0x80, 0x12, 0x34, 0x56 },
|
||||
.length = 4*8,
|
||||
};
|
||||
spi_transaction_t *ret_t;
|
||||
spi_device_handle_t handle = ((task_context_t*)arg)->handle;
|
||||
|
||||
for (int i = 0; i < 30; i ++) {
|
||||
TEST_ESP_OK(spi_device_polling_transmit(handle, &t));
|
||||
ESP_LOGI( TAG, "task3: %d", count++ );
|
||||
}
|
||||
|
||||
TEST_ESP_OK(spi_device_acquire_bus( handle, portMAX_DELAY ));
|
||||
for (int i = 0; i < 20; i ++) {
|
||||
TEST_ESP_OK(spi_device_polling_transmit(handle, &t));
|
||||
ESP_LOGI( TAG, "task3: %d", count++ );
|
||||
}
|
||||
|
||||
for (int j = 0; j < 50; j++) {
|
||||
TEST_ESP_OK(spi_device_queue_trans(handle, &t, portMAX_DELAY));
|
||||
}
|
||||
for (int j = 0; j < 50; j++) {
|
||||
TEST_ESP_OK(spi_device_get_trans_result(handle, &ret_t, portMAX_DELAY));
|
||||
assert(ret_t == &t);
|
||||
ESP_LOGI(TAG, "task3: %d", count++);
|
||||
}
|
||||
spi_device_release_bus(handle);
|
||||
|
||||
ESP_LOGI(TAG, "task3 terminates");
|
||||
((task_context_t*)arg)->finished = true;
|
||||
vTaskDelete(NULL);
|
||||
}
|
||||
|
||||
TEST_CASE("spi poll tasks","[spi]")
|
||||
{
|
||||
task_context_t context1={};
|
||||
task_context_t context2={};
|
||||
task_context_t context3={};
|
||||
TaskHandle_t task1, task2, task3;
|
||||
esp_err_t ret;
|
||||
spi_bus_config_t buscfg=SPI_BUS_TEST_DEFAULT_CONFIG();
|
||||
spi_device_interface_config_t devcfg=SPI_DEVICE_TEST_DEFAULT_CONFIG();
|
||||
devcfg.queue_size = 100;
|
||||
|
||||
//Initialize the SPI bus and 3 devices
|
||||
ret=spi_bus_initialize(TEST_SPI_HOST, &buscfg, 1);
|
||||
TEST_ASSERT(ret==ESP_OK);
|
||||
ret=spi_bus_add_device(TEST_SPI_HOST, &devcfg, &context1.handle);
|
||||
TEST_ASSERT(ret==ESP_OK);
|
||||
ret=spi_bus_add_device(TEST_SPI_HOST, &devcfg, &context2.handle);
|
||||
TEST_ASSERT(ret==ESP_OK);
|
||||
ret=spi_bus_add_device(TEST_SPI_HOST, &devcfg, &context3.handle);
|
||||
TEST_ASSERT(ret==ESP_OK);
|
||||
|
||||
xTaskCreate( spi_task1, "task1", 2048, &context1, 0, &task1 );
|
||||
xTaskCreate( spi_task2, "task2", 2048, &context2, 0, &task2 );
|
||||
xTaskCreate( spi_task3, "task3", 2048, &context3, 0, &task3 );
|
||||
|
||||
for(;;){
|
||||
vTaskDelay(10);
|
||||
if (context1.finished && context2.finished && context3.finished) break;
|
||||
}
|
||||
|
||||
TEST_ESP_OK( spi_bus_remove_device(context1.handle) );
|
||||
TEST_ESP_OK( spi_bus_remove_device(context2.handle) );
|
||||
TEST_ESP_OK( spi_bus_remove_device(context3.handle) );
|
||||
TEST_ESP_OK( spi_bus_free(TEST_SPI_HOST) );
|
||||
}
|
||||
|
||||
|
||||
//TODO: add a case when a non-polling transaction happened in the bus-acquiring time and then release the bus then queue a new trans
|
||||
|
@ -204,7 +204,7 @@ menu "FreeRTOS"
|
||||
|
||||
config FREERTOS_SUPPORT_STATIC_ALLOCATION
|
||||
bool "Enable FreeRTOS static allocation API"
|
||||
default n
|
||||
default y
|
||||
help
|
||||
FreeRTOS gives the application writer the ability to instead provide the memory
|
||||
themselves, allowing the following objects to optionally be created without any
|
||||
|
@ -21,12 +21,10 @@
|
||||
* @brief Enum with the three SPI peripherals that are software-accessible in it
|
||||
*/
|
||||
typedef enum {
|
||||
// SPI_HOST (SPI1_HOST) is not supported by the SPI Master and SPI Slave driver on ESP32-S2
|
||||
SPI1_HOST=0, ///< SPI1
|
||||
SPI2_HOST=1, ///< SPI2
|
||||
SPI3_HOST=2, ///< SPI3
|
||||
#if SOC_SPI_PERIPH_NUM > 3
|
||||
SPI4_HOST=3, ///< SPI4
|
||||
#endif
|
||||
} spi_host_device_t;
|
||||
|
||||
//alias for different chips
|
||||
@ -35,6 +33,7 @@ typedef enum {
|
||||
#define HSPI_HOST SPI2_HOST
|
||||
#define VSPI_HOST SPI3_HOST
|
||||
#elif CONFIG_IDF_TARGET_ESP32S2
|
||||
// SPI_HOST (SPI1_HOST) is not supported by the SPI Master and SPI Slave driver on ESP32-S2
|
||||
#define SPI_HOST SPI1_HOST
|
||||
#define FSPI_HOST SPI2_HOST
|
||||
#define HSPI_HOST SPI3_HOST
|
||||
|
@ -10,17 +10,17 @@ entries:
|
||||
rtc_sleep (noflash_text)
|
||||
rtc_time (noflash_text)
|
||||
rtc_wdt (noflash_text)
|
||||
spi_hal_iram (noflash_text)
|
||||
spi_slave_hal_iram (noflash_text)
|
||||
spi_hal_iram (noflash)
|
||||
spi_slave_hal_iram (noflash)
|
||||
if UART_ISR_IN_IRAM = y:
|
||||
uart_hal_iram (noflash_text)
|
||||
uart_hal_iram (noflash)
|
||||
else:
|
||||
uart_hal_iram (default)
|
||||
spi_flash_hal_iram (noflash)
|
||||
ledc_hal_iram (noflash_text)
|
||||
ledc_hal_iram (noflash)
|
||||
i2c_hal_iram (noflash)
|
||||
spi_flash_hal_gpspi (noflash)
|
||||
lldesc (noflash_text)
|
||||
lldesc (noflash)
|
||||
cpu_hal (noflash)
|
||||
soc_hal (noflash)
|
||||
wdt_hal_iram (noflash)
|
||||
|
@ -16,8 +16,7 @@
|
||||
|
||||
#define SOC_SPI_PERIPH_NUM 3
|
||||
#define SOC_SPI_DMA_CHAN_NUM 2
|
||||
|
||||
#define SPI_PERIPH_NUM 3
|
||||
#define SOC_SPI_PERIPH_CS_NUM(i) 3
|
||||
|
||||
#define SPI_FUNC_NUM 1
|
||||
#define SPI_IOMUX_PIN_NUM_MISO 7
|
||||
|
@ -14,8 +14,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#define SOC_SPI_PERIPH_NUM 4
|
||||
#define SOC_SPI_PERIPH_NUM 3
|
||||
#define SOC_SPI_DMA_CHAN_NUM 3
|
||||
#define SOC_SPI_PERIPH_CS_NUM(i) 3
|
||||
|
||||
#define SPI_FUNC_NUM 0
|
||||
#define SPI_IOMUX_PIN_NUM_HD 27
|
||||
@ -35,7 +36,7 @@
|
||||
#define FSPI_IOMUX_PIN_NUM_WP 14
|
||||
//TODO: add the next slot
|
||||
|
||||
//HSPI and VSPI have no iomux pins
|
||||
//HSPI has no iomux pins
|
||||
|
||||
#define SOC_SPI_MAXIMUM_BUFFER_SIZE 72
|
||||
|
||||
|
@ -132,7 +132,10 @@ esp_err_t spi_bus_add_flash_device(esp_flash_t **out_chip, const esp_flash_spi_d
|
||||
.read_mode = config->io_mode,
|
||||
.host = host,
|
||||
};
|
||||
esp_err_t err = esp_flash_init_os_functions(chip, config->host_id);
|
||||
|
||||
int dev_id;
|
||||
esp_err_t err = esp_flash_init_os_functions(chip, config->host_id, &dev_id);
|
||||
assert(dev_id < SOC_SPI_PERIPH_CS_NUM(config->host_id) && dev_id >= 0);
|
||||
if (err != ESP_OK) {
|
||||
ret = err;
|
||||
goto fail;
|
||||
@ -141,7 +144,7 @@ esp_err_t spi_bus_add_flash_device(esp_flash_t **out_chip, const esp_flash_spi_d
|
||||
bool use_iomux = spicommon_bus_using_iomux(config->host_id);
|
||||
memspi_host_config_t host_cfg = {
|
||||
.host_id = config->host_id,
|
||||
.cs_num = config->cs_id,
|
||||
.cs_num = dev_id,
|
||||
.iomux = use_iomux,
|
||||
.input_delay_ns = config->input_delay_ns,
|
||||
.speed = config->speed,
|
||||
@ -165,6 +168,7 @@ esp_err_t spi_bus_remove_flash_device(esp_flash_t *chip)
|
||||
if (chip==NULL) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
esp_flash_deinit_os_functions(chip);
|
||||
if (chip->host) {
|
||||
free(chip->host->driver_data);
|
||||
free(chip->host);
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "esp_err.h"
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <driver/spi_common_internal.h>
|
||||
#include "sdkconfig.h"
|
||||
|
||||
#include "esp_flash.h"
|
||||
@ -68,12 +69,21 @@ esp_err_t esp_flash_app_disable_protect(bool disable);
|
||||
*
|
||||
* @param chip The chip to init os functions.
|
||||
* @param host_id Which SPI host to use, 1 for SPI1, 2 for SPI2 (HSPI), 3 for SPI3 (VSPI)
|
||||
* @param out_dev_id Output of occupied device slot
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK if success
|
||||
* - ESP_ERR_INVALID_ARG if host_id is invalid
|
||||
*/
|
||||
esp_err_t esp_flash_init_os_functions(esp_flash_t *chip, int host_id);
|
||||
esp_err_t esp_flash_init_os_functions(esp_flash_t *chip, int host_id, int *out_dev_id);
|
||||
|
||||
/**
|
||||
* @brief Deinitialize OS-level functions
|
||||
*
|
||||
* @param chip The chip to deinit os functions
|
||||
* @return always ESP_OK.
|
||||
*/
|
||||
esp_err_t esp_flash_deinit_os_functions(esp_flash_t* chip);
|
||||
|
||||
/**
|
||||
* Initialize OS-level functions for the main flash chip.
|
||||
|
@ -26,6 +26,9 @@
|
||||
#include "esp32s2/rom/ets_sys.h"
|
||||
#endif
|
||||
|
||||
#include "driver/spi_common_internal.h"
|
||||
|
||||
|
||||
/*
|
||||
* OS functions providing delay service and arbitration among chips, and with the cache.
|
||||
*
|
||||
@ -34,52 +37,43 @@
|
||||
*/
|
||||
|
||||
typedef struct {
|
||||
int host_id;
|
||||
spi_bus_lock_dev_handle_t dev_lock;
|
||||
} app_func_arg_t;
|
||||
|
||||
typedef struct {
|
||||
int host_id;
|
||||
app_func_arg_t common_arg; //shared args, must be the first item
|
||||
bool no_protect; //to decide whether to check protected region (for the main chip) or not.
|
||||
} spi1_app_func_arg_t;
|
||||
|
||||
|
||||
// in the future we will have arbitration among devices, including flash on the same flash bus
|
||||
static IRAM_ATTR esp_err_t spi_bus_acquire(int host_id)
|
||||
static IRAM_ATTR esp_err_t spi_bus_acquire(spi_bus_lock_dev_handle_t dev_lock)
|
||||
{
|
||||
// was in BG operation (cache). Disable it and schedule
|
||||
esp_err_t ret = spi_bus_lock_acquire_start(dev_lock, portMAX_DELAY);
|
||||
if (ret != ESP_OK) {
|
||||
return ret;
|
||||
}
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static IRAM_ATTR esp_err_t spi_bus_release(int host_id)
|
||||
static IRAM_ATTR esp_err_t spi_bus_release(spi_bus_lock_dev_handle_t dev_lock)
|
||||
{
|
||||
return ESP_OK;
|
||||
return spi_bus_lock_acquire_end(dev_lock);
|
||||
}
|
||||
|
||||
//for SPI1, we have to disable the cache and interrupts before using the SPI bus
|
||||
static IRAM_ATTR esp_err_t spi1_start(void *arg)
|
||||
static IRAM_ATTR esp_err_t spi_start(void *arg)
|
||||
{
|
||||
g_flash_guard_default_ops.start();
|
||||
|
||||
spi_bus_acquire(((spi1_app_func_arg_t *)arg)->host_id);
|
||||
|
||||
return ESP_OK;
|
||||
}
|
||||
static IRAM_ATTR esp_err_t spi1_end(void *arg)
|
||||
{
|
||||
g_flash_guard_default_ops.end();
|
||||
|
||||
spi_bus_release(((spi1_app_func_arg_t *)arg)->host_id);
|
||||
|
||||
spi_bus_lock_dev_handle_t dev_lock = ((app_func_arg_t *)arg)->dev_lock;
|
||||
spi_bus_acquire(dev_lock);
|
||||
spi_bus_lock_touch(dev_lock);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static esp_err_t spi23_start(void *arg)
|
||||
static IRAM_ATTR esp_err_t spi_end(void *arg)
|
||||
{
|
||||
spi_bus_acquire(((app_func_arg_t *)arg)->host_id);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static esp_err_t spi23_end(void *arg)
|
||||
{
|
||||
spi_bus_release(((app_func_arg_t *)arg)->host_id);
|
||||
spi_bus_release(((app_func_arg_t *)arg)->dev_lock);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
@ -99,73 +93,107 @@ static IRAM_ATTR esp_err_t main_flash_region_protected(void* arg, size_t start_a
|
||||
}
|
||||
}
|
||||
|
||||
static DRAM_ATTR spi1_app_func_arg_t spi1_arg = {
|
||||
.host_id = SPI1_HOST, //for SPI1,
|
||||
.no_protect = true,
|
||||
};
|
||||
|
||||
static DRAM_ATTR spi1_app_func_arg_t main_flash_arg = {
|
||||
.host_id = SPI1_HOST, //for SPI1,
|
||||
.no_protect = false,
|
||||
};
|
||||
|
||||
static app_func_arg_t spi2_arg = {
|
||||
.host_id = SPI2_HOST, //for SPI2,
|
||||
};
|
||||
|
||||
static app_func_arg_t spi3_arg = {
|
||||
.host_id = SPI3_HOST, //for SPI3,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_IDF_TARGET_ESP32S2
|
||||
static app_func_arg_t spi4_arg = {
|
||||
.host_id = SPI4_HOST, //for SPI4,
|
||||
};
|
||||
#endif
|
||||
static DRAM_ATTR spi1_app_func_arg_t main_flash_arg = {};
|
||||
|
||||
//for SPI1, we have to disable the cache and interrupts before using the SPI bus
|
||||
const DRAM_ATTR esp_flash_os_functions_t esp_flash_spi1_default_os_functions = {
|
||||
.start = spi1_start,
|
||||
.end = spi1_end,
|
||||
.start = spi_start,
|
||||
.end = spi_end,
|
||||
.delay_ms = delay_ms,
|
||||
.region_protected = main_flash_region_protected,
|
||||
};
|
||||
|
||||
const esp_flash_os_functions_t esp_flash_spi23_default_os_functions = {
|
||||
.start = spi23_start,
|
||||
.end = spi23_end,
|
||||
.start = spi_start,
|
||||
.end = spi_end,
|
||||
.delay_ms = delay_ms,
|
||||
};
|
||||
|
||||
esp_err_t esp_flash_init_os_functions(esp_flash_t *chip, int host_id)
|
||||
esp_err_t esp_flash_init_os_functions(esp_flash_t *chip, int host_id, int* out_dev_id)
|
||||
{
|
||||
spi_bus_lock_handle_t lock = spi_bus_lock_get_by_id(host_id);
|
||||
spi_bus_lock_dev_handle_t dev_handle;
|
||||
spi_bus_lock_dev_config_t config = {.flags = SPI_BUS_LOCK_DEV_FLAG_CS_REQUIRED};
|
||||
esp_err_t err = spi_bus_lock_register_dev(lock, &config, &dev_handle);
|
||||
if (err != ESP_OK) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (host_id == SPI1_HOST) {
|
||||
//SPI1
|
||||
chip->os_func = &esp_flash_spi1_default_os_functions;
|
||||
chip->os_func_data = &spi1_arg;
|
||||
} else if (host_id == SPI2_HOST || host_id == SPI3_HOST
|
||||
#ifdef CONFIG_IDF_TARGET_ESP32S2
|
||||
|| host_id == SPI4_HOST
|
||||
#endif
|
||||
) {
|
||||
//SPI2,3,4
|
||||
chip->os_func_data = heap_caps_malloc(sizeof(spi1_app_func_arg_t),
|
||||
MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
if (chip->os_func_data == NULL) {
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
*(spi1_app_func_arg_t*) chip->os_func_data = (spi1_app_func_arg_t) {
|
||||
.common_arg = {
|
||||
.dev_lock = dev_handle,
|
||||
},
|
||||
.no_protect = true,
|
||||
};
|
||||
} else if (host_id == SPI2_HOST || host_id == SPI3_HOST) {
|
||||
//SPI2, SPI3
|
||||
chip->os_func = &esp_flash_spi23_default_os_functions;
|
||||
#if CONFIG_IDF_TARGET_ESP32
|
||||
chip->os_func_data = (host_id == SPI2_HOST) ? &spi2_arg : &spi3_arg;
|
||||
#elif CONFIG_IDF_TARGET_ESP32S2
|
||||
chip->os_func_data = (host_id == SPI2_HOST) ? &spi2_arg : ((host_id == SPI3_HOST) ? &spi3_arg : &spi4_arg);
|
||||
#endif
|
||||
chip->os_func_data = heap_caps_malloc(sizeof(app_func_arg_t),
|
||||
MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
if (chip->os_func_data == NULL) {
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
*(app_func_arg_t*) chip->os_func_data = (app_func_arg_t) {
|
||||
.dev_lock = dev_handle,
|
||||
};
|
||||
} else {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
|
||||
*out_dev_id = spi_bus_lock_get_dev_id(dev_handle);
|
||||
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t esp_flash_deinit_os_functions(esp_flash_t* chip)
|
||||
{
|
||||
if (chip->os_func_data) {
|
||||
spi_bus_lock_unregister_dev(((app_func_arg_t*)chip->os_func_data)->dev_lock);
|
||||
free(chip->os_func_data);
|
||||
}
|
||||
chip->os_func = NULL;
|
||||
chip->os_func_data = NULL;
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
IRAM_ATTR static void cache_enable(void* arg)
|
||||
{
|
||||
g_flash_guard_default_ops.end();
|
||||
}
|
||||
|
||||
IRAM_ATTR static void cache_disable(void* arg)
|
||||
{
|
||||
g_flash_guard_default_ops.start();
|
||||
}
|
||||
|
||||
esp_err_t esp_flash_app_init_os_functions(esp_flash_t* chip)
|
||||
{
|
||||
esp_err_t err = spi_bus_lock_init_main_dev();
|
||||
if (err != ESP_OK) {
|
||||
return err;
|
||||
}
|
||||
|
||||
spi_bus_lock_set_bg_control(g_main_spi_bus_lock,
|
||||
cache_enable, cache_disable, NULL);
|
||||
|
||||
chip->os_func = &esp_flash_spi1_default_os_functions;
|
||||
chip->os_func_data = &main_flash_arg;
|
||||
main_flash_arg = (spi1_app_func_arg_t) {
|
||||
.common_arg = {
|
||||
.dev_lock = g_spi_lock_main_flash_dev, //for SPI1,
|
||||
},
|
||||
.no_protect = false,
|
||||
};
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
TEST_COMPONENTS=driver esp32 esp_timer freertos mbedtls spi_flash
|
||||
TEST_COMPONENTS=esp32 esp_timer freertos mbedtls spi_flash
|
||||
CONFIG_ESP32_SPIRAM_SUPPORT=y
|
||||
CONFIG_ESP_INT_WDT_TIMEOUT_MS=800
|
||||
CONFIG_SPIRAM_OCCUPY_NO_HOST=y
|
||||
|
5
tools/unit-test-app/configs/psram_3
Normal file
5
tools/unit-test-app/configs/psram_3
Normal file
@ -0,0 +1,5 @@
|
||||
TEST_COMPONENTS=driver
|
||||
CONFIG_ESP32_SPIRAM_SUPPORT=y
|
||||
CONFIG_ESP_INT_WDT_TIMEOUT_MS=800
|
||||
CONFIG_SPIRAM_OCCUPY_NO_HOST=y
|
||||
CONFIG_ESP32_WIFI_RX_IRAM_OPT=n
|
Loading…
Reference in New Issue
Block a user