mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
feat(gdma): support channel allocator on esp32p4
There's two GDMA groups on ESP32P4, one is connected to AHB bus, and another one is connected AXI bus. We now have two seperate APIs for allocating DMA channels, depends on the bus type.
This commit is contained in:
parent
0ac1ee4358
commit
57879e772d
@ -215,7 +215,11 @@ static esp_err_t parlio_tx_unit_init_dma(parlio_tx_unit_t *tx_unit)
|
||||
gdma_channel_alloc_config_t dma_chan_config = {
|
||||
.direction = GDMA_CHANNEL_DIRECTION_TX,
|
||||
};
|
||||
ESP_RETURN_ON_ERROR(gdma_new_channel(&dma_chan_config, &tx_unit->dma_chan), TAG, "allocate TX DMA channel failed");
|
||||
#if SOC_GDMA_TRIG_PERIPH_PARLIO0_BUS == SOC_GDMA_BUS_AHB
|
||||
ESP_RETURN_ON_ERROR(gdma_new_ahb_channel(&dma_chan_config, &tx_unit->dma_chan), TAG, "allocate TX DMA channel failed");
|
||||
#elif SOC_GDMA_TRIG_PERIPH_PARLIO0_BUS == SOC_GDMA_BUS_AXI
|
||||
ESP_RETURN_ON_ERROR(gdma_new_axi_channel(&dma_chan_config, &tx_unit->dma_chan), TAG, "allocate TX DMA channel failed");
|
||||
#endif
|
||||
gdma_connect(tx_unit->dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_PARLIO, 0));
|
||||
gdma_strategy_config_t gdma_strategy_conf = {
|
||||
.auto_update_desc = true,
|
||||
|
@ -74,7 +74,9 @@ static esp_err_t rmt_rx_init_dma_link(rmt_rx_channel_t *rx_channel, const rmt_rx
|
||||
gdma_channel_alloc_config_t dma_chan_config = {
|
||||
.direction = GDMA_CHANNEL_DIRECTION_RX,
|
||||
};
|
||||
ESP_RETURN_ON_ERROR(gdma_new_channel(&dma_chan_config, &rx_channel->base.dma_chan), TAG, "allocate RX DMA channel failed");
|
||||
#if SOC_GDMA_TRIG_PERIPH_RMT0_BUS == SOC_GDMA_BUS_AHB
|
||||
ESP_RETURN_ON_ERROR(gdma_new_ahb_channel(&dma_chan_config, &rx_channel->base.dma_chan), TAG, "allocate RX DMA channel failed");
|
||||
#endif
|
||||
gdma_strategy_config_t gdma_strategy_conf = {
|
||||
.auto_update_desc = true,
|
||||
.owner_check = true,
|
||||
|
@ -61,7 +61,9 @@ static esp_err_t rmt_tx_init_dma_link(rmt_tx_channel_t *tx_channel, const rmt_tx
|
||||
gdma_channel_alloc_config_t dma_chan_config = {
|
||||
.direction = GDMA_CHANNEL_DIRECTION_TX,
|
||||
};
|
||||
ESP_RETURN_ON_ERROR(gdma_new_channel(&dma_chan_config, &tx_channel->base.dma_chan), TAG, "allocate TX DMA channel failed");
|
||||
#if SOC_GDMA_TRIG_PERIPH_RMT0_BUS == SOC_GDMA_BUS_AHB
|
||||
ESP_RETURN_ON_ERROR(gdma_new_ahb_channel(&dma_chan_config, &tx_channel->base.dma_chan), TAG, "allocate TX DMA channel failed");
|
||||
#endif
|
||||
gdma_strategy_config_t gdma_strategy_conf = {
|
||||
.auto_update_desc = true,
|
||||
.owner_check = true,
|
||||
|
@ -218,6 +218,13 @@ menu "Hardware Settings"
|
||||
This will ensure the GDMA interrupt handler is IRAM-Safe, allow to avoid flash
|
||||
cache misses, and also be able to run whilst the cache is disabled.
|
||||
(e.g. SPI Flash write).
|
||||
|
||||
config GDMA_ENABLE_DEBUG_LOG
|
||||
bool "Enable debug log"
|
||||
default n
|
||||
help
|
||||
Wether to enable the debug log message for GDMA driver.
|
||||
Note that, this option only controls the GDMA driver log, won't affect other drivers.
|
||||
endmenu # GDMA Configuration
|
||||
|
||||
menu "Main XTAL Config"
|
||||
|
@ -4,12 +4,37 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
// #define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
|
||||
/**
|
||||
* AHB-Bus --------+ +-------- AXI-Bus
|
||||
* | |
|
||||
* | |
|
||||
* +-----------------------------------+--+ +--+-----------------------------------+
|
||||
* | GDMA-Group-X | | | | GDMA-Group-Y |
|
||||
* | +-------------+ +------------+ | | | | +-------------+ +------------+ |
|
||||
* | | GDMA-Pair-0 |... |GDMA-Pair-N | | | | | | GDMA-Pair-0 |... |GDMA-Pair-N | |
|
||||
* | | | | | | | | | | | | | |
|
||||
* | | TX-Chan |... | TX-Chan | | | | | | TX-Chan |... | TX-Chan | |
|
||||
* | | RX-Chan | | RX-Chan | | | | | | RX-Chan | | RX-Chan | |
|
||||
* | +-------------+ +------------+ | | | | +-------------+ +------------+ |
|
||||
* | | | | | |
|
||||
* +-----------------------------------+--+ +--+-----------------------------------+
|
||||
* | |
|
||||
* | |
|
||||
*
|
||||
* - Channel is allocated when user calls `gdma_new_ahb/axi_channel`, its lifecycle is maintained by the user.
|
||||
* - Pair and Group are all lazy allocated, their life cycles are maintained by this driver.
|
||||
* - We're not using a global spin lock, instead, we created different spin locks at different level (group, pair).
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/cdefs.h>
|
||||
#include "sdkconfig.h"
|
||||
#if CONFIG_GDMA_ENABLE_DEBUG_LOG
|
||||
// The local log level must be defined before including esp_log.h
|
||||
// Set the maximum log level for this source file
|
||||
#define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
|
||||
#endif
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "soc/soc_caps.h"
|
||||
@ -27,20 +52,8 @@ static const char *TAG = "gdma";
|
||||
#define SEARCH_REQUEST_RX_CHANNEL (1 << 0)
|
||||
#define SEARCH_REQUEST_TX_CHANNEL (1 << 1)
|
||||
|
||||
/**
|
||||
* GDMA driver consists of there object class, namely: Group, Pair and Channel.
|
||||
* Channel is allocated when user calls `gdma_new_channel`, its lifecycle is maintained by user.
|
||||
* Pair and Group are all lazy allocated, their life cycles are maintained by this driver.
|
||||
* We use reference count to track their life cycles, i.e. the driver will free their memory only when their reference count reached to 0.
|
||||
*
|
||||
* We don't use an all-in-one spin lock in this driver, instead, we created different spin locks at different level.
|
||||
* For platform, it has a spinlock, which is used to protect the group handle slots and reference count of each group.
|
||||
* For group, it has a spinlock, which is used to protect group level stuffs, e.g. hal object, pair handle slots and reference count of each pair.
|
||||
* For pair, it has a spinlock, which is used to protect pair level stuffs, e.g. channel handle slots, occupy code.
|
||||
*/
|
||||
|
||||
typedef struct gdma_platform_t {
|
||||
portMUX_TYPE spinlock; // platform level spinlock
|
||||
portMUX_TYPE spinlock; // platform level spinlock, protect the group handle slots and reference count of each group.
|
||||
gdma_group_t *groups[SOC_GDMA_NUM_GROUPS_MAX]; // array of GDMA group instances
|
||||
int group_ref_counts[SOC_GDMA_NUM_GROUPS_MAX]; // reference count used to protect group install/uninstall
|
||||
} gdma_platform_t;
|
||||
@ -69,6 +82,9 @@ typedef struct {
|
||||
|
||||
static esp_err_t do_allocate_gdma_channel(const gdma_channel_search_info_t *search_info, const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
|
||||
{
|
||||
#if CONFIG_GDMA_ENABLE_DEBUG_LOG
|
||||
esp_log_level_set(TAG, ESP_LOG_DEBUG);
|
||||
#endif
|
||||
esp_err_t ret = ESP_OK;
|
||||
gdma_tx_channel_t *alloc_tx_channel = NULL;
|
||||
gdma_rx_channel_t *alloc_rx_channel = NULL;
|
||||
@ -118,14 +134,19 @@ static esp_err_t do_allocate_gdma_channel(const gdma_channel_search_info_t *sear
|
||||
search_code = 0; // exit search loop
|
||||
}
|
||||
portEXIT_CRITICAL(&pair->spinlock);
|
||||
if (search_code) {
|
||||
gdma_release_pair_handle(pair);
|
||||
pair = NULL;
|
||||
// found a pair that satisfies the search condition
|
||||
if (search_code == 0) {
|
||||
portENTER_CRITICAL(&group->spinlock);
|
||||
group->pair_ref_counts[pair->pair_id]++; // channel obtains a reference to pair
|
||||
portEXIT_CRITICAL(&group->spinlock);
|
||||
}
|
||||
gdma_release_pair_handle(pair);
|
||||
} // loop used to search pair
|
||||
gdma_release_group_handle(group);
|
||||
// restore to initial state if no suitable channel slot is found
|
||||
if (search_code) {
|
||||
gdma_release_group_handle(group);
|
||||
group = NULL;
|
||||
pair = NULL;
|
||||
}
|
||||
} // loop used to search group
|
||||
ESP_GOTO_ON_FALSE(search_code == 0, ESP_ERR_NOT_FOUND, err, TAG, "no free gdma channel, search code=%d", search_code);
|
||||
@ -665,7 +686,7 @@ static esp_err_t gdma_del_tx_channel(gdma_channel_t *dma_channel)
|
||||
if (dma_channel->intr) {
|
||||
esp_intr_free(dma_channel->intr);
|
||||
portENTER_CRITICAL(&pair->spinlock);
|
||||
gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX, false); // disable all interupt events
|
||||
gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX, false); // disable all interrupt events
|
||||
gdma_hal_clear_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX); // clear all pending events
|
||||
portEXIT_CRITICAL(&pair->spinlock);
|
||||
ESP_LOGD(TAG, "uninstall interrupt service for tx channel (%d,%d)", group_id, pair_id);
|
||||
@ -694,7 +715,7 @@ static esp_err_t gdma_del_rx_channel(gdma_channel_t *dma_channel)
|
||||
if (dma_channel->intr) {
|
||||
esp_intr_free(dma_channel->intr);
|
||||
portENTER_CRITICAL(&pair->spinlock);
|
||||
gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX, false); // disable all interupt events
|
||||
gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX, false); // disable all interrupt events
|
||||
gdma_hal_clear_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX); // clear all pending events
|
||||
portEXIT_CRITICAL(&pair->spinlock);
|
||||
ESP_LOGD(TAG, "uninstall interrupt service for rx channel (%d,%d)", group_id, pair_id);
|
||||
@ -792,7 +813,7 @@ static esp_err_t gdma_install_rx_interrupt(gdma_rx_channel_t *rx_chan)
|
||||
rx_chan->base.intr = intr;
|
||||
|
||||
portENTER_CRITICAL(&pair->spinlock);
|
||||
gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX, false); // disable all interupt events
|
||||
gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX, false); // disable all interrupt events
|
||||
gdma_hal_clear_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX); // clear all pending events
|
||||
portEXIT_CRITICAL(&pair->spinlock);
|
||||
ESP_LOGD(TAG, "install interrupt service for rx channel (%d,%d)", group->group_id, pair_id);
|
||||
@ -821,7 +842,7 @@ static esp_err_t gdma_install_tx_interrupt(gdma_tx_channel_t *tx_chan)
|
||||
tx_chan->base.intr = intr;
|
||||
|
||||
portENTER_CRITICAL(&pair->spinlock);
|
||||
gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX, false); // disable all interupt events
|
||||
gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX, false); // disable all interrupt events
|
||||
gdma_hal_clear_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX); // clear all pending events
|
||||
portEXIT_CRITICAL(&pair->spinlock);
|
||||
ESP_LOGD(TAG, "install interrupt service for tx channel (%d,%d)", group->group_id, pair_id);
|
||||
|
@ -45,7 +45,7 @@ typedef struct gdma_group_t {
|
||||
int group_id; // Group ID, index from 0
|
||||
int bus_id; // which system does the GDMA instance attached to
|
||||
gdma_hal_context_t hal; // HAL instance is at group level
|
||||
portMUX_TYPE spinlock; // group level spinlock
|
||||
portMUX_TYPE spinlock; // group level spinlock, protect group level stuffs, e.g. hal object, pair handle slots and reference count of each pair
|
||||
uint32_t tx_periph_in_use_mask; // each bit indicates which peripheral (TX direction) has been occupied
|
||||
uint32_t rx_periph_in_use_mask; // each bit indicates which peripheral (RX direction) has been occupied
|
||||
gdma_pair_t *pairs[SOC_GDMA_PAIRS_PER_GROUP_MAX]; // handles of GDMA pairs
|
||||
@ -58,7 +58,7 @@ struct gdma_pair_t {
|
||||
gdma_tx_channel_t *tx_chan; // pointer of tx channel in the pair
|
||||
gdma_rx_channel_t *rx_chan; // pointer of rx channel in the pair
|
||||
int occupy_code; // each bit indicates which channel has been occupied (an occupied channel will be skipped during channel search)
|
||||
portMUX_TYPE spinlock; // pair level spinlock
|
||||
portMUX_TYPE spinlock; // pair level spinlock, protect pair level stuffs, e.g. channel handle slots, occupy code
|
||||
};
|
||||
|
||||
struct gdma_channel_t {
|
||||
|
@ -120,6 +120,18 @@ typedef struct {
|
||||
} gdma_strategy_config_t;
|
||||
|
||||
/** @cond */
|
||||
/**
|
||||
* @brief Create GDMA channel (only create AHB GDMA channel)
|
||||
* @note This API is going to be deprecated, please use `gdma_new_ahb_channel` or `gdma_new_axi_channel` instead.
|
||||
*
|
||||
* @param[in] config Pointer to a collection of configurations for allocating GDMA channel
|
||||
* @param[out] ret_chan Returned channel handle
|
||||
* @return
|
||||
* - ESP_OK: Create DMA channel successfully
|
||||
* - ESP_ERR_INVALID_ARG: Create DMA channel failed because of invalid argument
|
||||
* - ESP_ERR_NO_MEM: Create DMA channel failed because out of memory
|
||||
* - ESP_FAIL: Create DMA channel failed because of other error
|
||||
*/
|
||||
esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan);
|
||||
/** @endcond */
|
||||
|
||||
|
@ -3,26 +3,32 @@
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
#include <string.h>
|
||||
#include "sdkconfig.h"
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "freertos/semphr.h"
|
||||
#include "unity.h"
|
||||
#include "esp_heap_caps.h"
|
||||
#include "esp_private/gdma.h"
|
||||
#include "hal/dma_types.h"
|
||||
#include "soc/soc_caps.h"
|
||||
#include "hal/gdma_ll.h"
|
||||
#include "rom/cache.h"
|
||||
|
||||
TEST_CASE("AHB GDMA channel allocation", "[gdma]")
|
||||
TEST_CASE("GDMA channel allocation", "[GDMA]")
|
||||
{
|
||||
gdma_channel_alloc_config_t channel_config = {};
|
||||
gdma_channel_handle_t tx_channels[GDMA_LL_AHB_PAIRS_PER_GROUP] = {};
|
||||
gdma_channel_handle_t rx_channels[GDMA_LL_AHB_PAIRS_PER_GROUP] = {};
|
||||
gdma_channel_handle_t tx_channels[SOC_GDMA_PAIRS_PER_GROUP_MAX] = {};
|
||||
gdma_channel_handle_t rx_channels[SOC_GDMA_PAIRS_PER_GROUP_MAX] = {};
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_TX;
|
||||
gdma_tx_event_callbacks_t tx_cbs = {};
|
||||
gdma_rx_event_callbacks_t rx_cbs = {};
|
||||
|
||||
#if SOC_AHB_GDMA_SUPPORTED
|
||||
// install TX channels
|
||||
for (int i = 0; i < GDMA_LL_AHB_PAIRS_PER_GROUP; i++) {
|
||||
TEST_ESP_OK(gdma_new_channel(&channel_config, &tx_channels[i]));
|
||||
TEST_ESP_OK(gdma_register_tx_event_callbacks(tx_channels[i], &tx_cbs, NULL));
|
||||
TEST_ESP_OK(gdma_new_ahb_channel(&channel_config, &tx_channels[i]));
|
||||
};
|
||||
TEST_ASSERT_EQUAL(ESP_ERR_NOT_FOUND, gdma_new_channel(&channel_config, &tx_channels[0]));
|
||||
TEST_ASSERT_EQUAL(ESP_ERR_NOT_FOUND, gdma_new_ahb_channel(&channel_config, &tx_channels[0]));
|
||||
|
||||
// Free interrupts before installing RX interrupts to ensure enough free interrupts
|
||||
for (int i = 0; i < GDMA_LL_AHB_PAIRS_PER_GROUP; i++) {
|
||||
@ -32,32 +38,91 @@ TEST_CASE("AHB GDMA channel allocation", "[gdma]")
|
||||
// install RX channels
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_RX;
|
||||
for (int i = 0; i < GDMA_LL_AHB_PAIRS_PER_GROUP; i++) {
|
||||
TEST_ESP_OK(gdma_new_channel(&channel_config, &rx_channels[i]));
|
||||
TEST_ESP_OK(gdma_register_rx_event_callbacks(rx_channels[i], &rx_cbs, NULL));
|
||||
TEST_ESP_OK(gdma_new_ahb_channel(&channel_config, &rx_channels[i]));
|
||||
}
|
||||
TEST_ASSERT_EQUAL(ESP_ERR_NOT_FOUND, gdma_new_channel(&channel_config, &rx_channels[0]));
|
||||
TEST_ASSERT_EQUAL(ESP_ERR_NOT_FOUND, gdma_new_ahb_channel(&channel_config, &rx_channels[0]));
|
||||
|
||||
for (int i = 0; i < GDMA_LL_AHB_PAIRS_PER_GROUP; i++) {
|
||||
TEST_ESP_OK(gdma_del_channel(rx_channels[i]));
|
||||
}
|
||||
#endif // SOC_AHB_GDMA_SUPPORTED
|
||||
|
||||
// install single and paired TX/RX channels
|
||||
#if GDMA_LL_AHB_PAIRS_PER_GROUP >= 2
|
||||
// single tx channel
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_TX;
|
||||
TEST_ESP_OK(gdma_new_channel(&channel_config, &tx_channels[0]));
|
||||
TEST_ESP_OK(gdma_new_ahb_channel(&channel_config, &tx_channels[0]));
|
||||
|
||||
// create tx channel and reserve sibling
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_TX;
|
||||
channel_config.flags.reserve_sibling = 1;
|
||||
TEST_ESP_OK(gdma_new_channel(&channel_config, &tx_channels[1]));
|
||||
TEST_ESP_OK(gdma_new_ahb_channel(&channel_config, &tx_channels[1]));
|
||||
// create rx channel and specify sibling channel
|
||||
channel_config.flags.reserve_sibling = 0;
|
||||
channel_config.sibling_chan = tx_channels[1]; // specify sibling channel
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_RX;
|
||||
TEST_ESP_OK(gdma_new_channel(&channel_config, &rx_channels[1]));
|
||||
TEST_ESP_OK(gdma_new_ahb_channel(&channel_config, &rx_channels[1]));
|
||||
channel_config.sibling_chan = NULL;
|
||||
TEST_ESP_OK(gdma_new_channel(&channel_config, &rx_channels[0]));
|
||||
TEST_ESP_OK(gdma_new_ahb_channel(&channel_config, &rx_channels[0]));
|
||||
|
||||
TEST_ESP_OK(gdma_connect(tx_channels[0], GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_UHCI, 0)));
|
||||
// can't connect multiple channels to the same peripheral
|
||||
TEST_ESP_ERR(ESP_ERR_INVALID_STATE, gdma_connect(tx_channels[1], GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_UHCI, 0)));
|
||||
TEST_ESP_OK(gdma_connect(tx_channels[1], GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_M2M, 0)));
|
||||
|
||||
TEST_ESP_OK(gdma_connect(rx_channels[0], GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_UHCI, 0)));
|
||||
// but rx and tx can connect to the same peripheral
|
||||
TEST_ESP_OK(gdma_connect(rx_channels[1], GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_M2M, 0)));
|
||||
for (int i = 0; i < 2; i++) {
|
||||
TEST_ESP_OK(gdma_disconnect(tx_channels[i]));
|
||||
TEST_ESP_OK(gdma_disconnect(rx_channels[i]));
|
||||
TEST_ESP_OK(gdma_del_channel(tx_channels[i]));
|
||||
TEST_ESP_OK(gdma_del_channel(rx_channels[i]));
|
||||
}
|
||||
#endif // GDMA_LL_AHB_PAIRS_PER_GROUP >= 2
|
||||
|
||||
#if SOC_AXI_GDMA_SUPPORTED
|
||||
// install TX channels
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_TX;
|
||||
for (int i = 0; i < GDMA_LL_AXI_PAIRS_PER_GROUP; i++) {
|
||||
TEST_ESP_OK(gdma_new_axi_channel(&channel_config, &tx_channels[i]));
|
||||
};
|
||||
TEST_ASSERT_EQUAL(ESP_ERR_NOT_FOUND, gdma_new_axi_channel(&channel_config, &tx_channels[0]));
|
||||
|
||||
// Free interrupts before installing RX interrupts to ensure enough free interrupts
|
||||
for (int i = 0; i < GDMA_LL_AXI_PAIRS_PER_GROUP; i++) {
|
||||
TEST_ESP_OK(gdma_del_channel(tx_channels[i]));
|
||||
}
|
||||
|
||||
// install RX channels
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_RX;
|
||||
for (int i = 0; i < GDMA_LL_AXI_PAIRS_PER_GROUP; i++) {
|
||||
TEST_ESP_OK(gdma_new_axi_channel(&channel_config, &rx_channels[i]));
|
||||
}
|
||||
TEST_ASSERT_EQUAL(ESP_ERR_NOT_FOUND, gdma_new_axi_channel(&channel_config, &rx_channels[0]));
|
||||
|
||||
for (int i = 0; i < GDMA_LL_AXI_PAIRS_PER_GROUP; i++) {
|
||||
TEST_ESP_OK(gdma_del_channel(rx_channels[i]));
|
||||
}
|
||||
#endif // SOC_AXI_GDMA_SUPPORTED
|
||||
|
||||
// install single and paired TX/RX channels
|
||||
#if GDMA_LL_AXI_PAIRS_PER_GROUP >= 2
|
||||
// single tx channel
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_TX;
|
||||
TEST_ESP_OK(gdma_new_axi_channel(&channel_config, &tx_channels[0]));
|
||||
|
||||
// create tx channel and reserve sibling
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_TX;
|
||||
channel_config.flags.reserve_sibling = 1;
|
||||
TEST_ESP_OK(gdma_new_axi_channel(&channel_config, &tx_channels[1]));
|
||||
// create rx channel and specify sibling channel
|
||||
channel_config.flags.reserve_sibling = 0;
|
||||
channel_config.sibling_chan = tx_channels[1]; // specify sibling channel
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_RX;
|
||||
TEST_ESP_OK(gdma_new_axi_channel(&channel_config, &rx_channels[1]));
|
||||
channel_config.sibling_chan = NULL;
|
||||
TEST_ESP_OK(gdma_new_axi_channel(&channel_config, &rx_channels[0]));
|
||||
|
||||
TEST_ESP_OK(gdma_connect(tx_channels[0], GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 2)));
|
||||
// can't connect multiple channels to the same peripheral
|
||||
@ -73,5 +138,141 @@ TEST_CASE("AHB GDMA channel allocation", "[gdma]")
|
||||
TEST_ESP_OK(gdma_del_channel(tx_channels[i]));
|
||||
TEST_ESP_OK(gdma_del_channel(rx_channels[i]));
|
||||
}
|
||||
#endif
|
||||
#endif // GDMA_LL_AXI_PAIRS_PER_GROUP >= 2
|
||||
}
|
||||
|
||||
static bool test_gdma_m2m_rx_eof_callback(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data)
|
||||
{
|
||||
BaseType_t task_woken = pdFALSE;
|
||||
SemaphoreHandle_t done_sem = (SemaphoreHandle_t)user_data;
|
||||
xSemaphoreGiveFromISR(done_sem, &task_woken);
|
||||
return task_woken == pdTRUE;
|
||||
}
|
||||
|
||||
static void test_gdma_m2m_mode(gdma_channel_handle_t tx_chan, gdma_channel_handle_t rx_chan)
|
||||
{
|
||||
gdma_rx_event_callbacks_t rx_cbs = {
|
||||
.on_recv_eof = test_gdma_m2m_rx_eof_callback,
|
||||
};
|
||||
SemaphoreHandle_t done_sem = xSemaphoreCreateBinary();
|
||||
TEST_ESP_OK(gdma_register_rx_event_callbacks(rx_chan, &rx_cbs, done_sem));
|
||||
|
||||
gdma_strategy_config_t strategy = {
|
||||
.auto_update_desc = true,
|
||||
.owner_check = true,
|
||||
};
|
||||
TEST_ESP_OK(gdma_apply_strategy(tx_chan, &strategy));
|
||||
TEST_ESP_OK(gdma_apply_strategy(rx_chan, &strategy));
|
||||
|
||||
gdma_trigger_t m2m_trigger = GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_M2M, 0);
|
||||
// get a free DMA trigger ID for memory copy
|
||||
uint32_t free_m2m_id_mask = 0;
|
||||
gdma_get_free_m2m_trig_id_mask(tx_chan, &free_m2m_id_mask);
|
||||
m2m_trigger.instance_id = __builtin_ctz(free_m2m_id_mask);
|
||||
TEST_ESP_OK(gdma_connect(tx_chan, m2m_trigger));
|
||||
TEST_ESP_OK(gdma_connect(rx_chan, m2m_trigger));
|
||||
|
||||
uint8_t *src_buf = heap_caps_aligned_alloc(64, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
uint8_t *dst_buf = heap_caps_aligned_alloc(64, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
TEST_ASSERT_NOT_NULL(src_buf);
|
||||
TEST_ASSERT_NOT_NULL(dst_buf);
|
||||
memset(src_buf, 0, 256);
|
||||
memset(dst_buf, 0, 256);
|
||||
|
||||
dma_descriptor_t *tx_desc = (dma_descriptor_t *) src_buf;
|
||||
dma_descriptor_t *rx_desc = (dma_descriptor_t *) dst_buf;
|
||||
uint8_t *src_data = src_buf + 64;
|
||||
uint8_t *dst_data = dst_buf + 64;
|
||||
|
||||
for (int i = 0; i < 100; i++) {
|
||||
src_data[i] = i;
|
||||
}
|
||||
|
||||
tx_desc->buffer = src_data;
|
||||
tx_desc->dw0.size = 100;
|
||||
tx_desc->dw0.length = 100;
|
||||
tx_desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
tx_desc->dw0.suc_eof = 1;
|
||||
tx_desc->next = NULL;
|
||||
|
||||
rx_desc->buffer = dst_data;
|
||||
rx_desc->dw0.size = 100;
|
||||
rx_desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
rx_desc->next = NULL;
|
||||
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// descriptors are in the cache, DMA engine may not see the changes, so do a write-back
|
||||
Cache_WriteBack_Addr(CACHE_MAP_L1_DCACHE, (uint32_t)tx_desc, sizeof(tx_desc));
|
||||
Cache_WriteBack_Addr(CACHE_MAP_L1_DCACHE, (uint32_t)rx_desc, sizeof(rx_desc));
|
||||
// do write-back for the source data
|
||||
Cache_WriteBack_Addr(CACHE_MAP_L1_DCACHE, (uint32_t)src_data, 100);
|
||||
#endif
|
||||
|
||||
TEST_ESP_OK(gdma_start(rx_chan, (intptr_t)rx_desc));
|
||||
TEST_ESP_OK(gdma_start(tx_chan, (intptr_t)tx_desc));
|
||||
|
||||
xSemaphoreTake(done_sem, portMAX_DELAY);
|
||||
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
|
||||
Cache_Invalidate_Addr(CACHE_MAP_L1_DCACHE, (uint32_t)dst_data, 100);
|
||||
// the DMA descriptors are updated by the DMA as well, so do an invalidate
|
||||
Cache_Invalidate_Addr(CACHE_MAP_L1_DCACHE, (uint32_t)tx_desc, sizeof(tx_desc));
|
||||
Cache_Invalidate_Addr(CACHE_MAP_L1_DCACHE, (uint32_t)rx_desc, sizeof(rx_desc));
|
||||
#endif
|
||||
|
||||
// check the DMA descriptor write-back feature
|
||||
TEST_ASSERT_EQUAL(DMA_DESCRIPTOR_BUFFER_OWNER_CPU, tx_desc->dw0.owner);
|
||||
TEST_ASSERT_EQUAL(DMA_DESCRIPTOR_BUFFER_OWNER_CPU, rx_desc->dw0.owner);
|
||||
|
||||
for (int i = 0; i < 100; i++) {
|
||||
TEST_ASSERT_EQUAL(i, dst_data[i]);
|
||||
}
|
||||
free((void *)src_buf);
|
||||
free((void *)dst_buf);
|
||||
vSemaphoreDelete(done_sem);
|
||||
}
|
||||
|
||||
TEST_CASE("GDMA M2M Mode", "[GDMA]")
|
||||
{
|
||||
gdma_channel_handle_t tx_chan = NULL;
|
||||
gdma_channel_handle_t rx_chan = NULL;
|
||||
gdma_channel_alloc_config_t tx_chan_alloc_config = {};
|
||||
gdma_channel_alloc_config_t rx_chan_alloc_config = {};
|
||||
|
||||
#if SOC_AHB_GDMA_SUPPORTED
|
||||
tx_chan_alloc_config = (gdma_channel_alloc_config_t) {
|
||||
.direction = GDMA_CHANNEL_DIRECTION_TX,
|
||||
.flags.reserve_sibling = true,
|
||||
};
|
||||
TEST_ESP_OK(gdma_new_ahb_channel(&tx_chan_alloc_config, &tx_chan));
|
||||
rx_chan_alloc_config = (gdma_channel_alloc_config_t) {
|
||||
.direction = GDMA_CHANNEL_DIRECTION_RX,
|
||||
.sibling_chan = tx_chan,
|
||||
};
|
||||
TEST_ESP_OK(gdma_new_ahb_channel(&rx_chan_alloc_config, &rx_chan));
|
||||
|
||||
test_gdma_m2m_mode(tx_chan, rx_chan);
|
||||
|
||||
TEST_ESP_OK(gdma_del_channel(tx_chan));
|
||||
TEST_ESP_OK(gdma_del_channel(rx_chan));
|
||||
#endif // SOC_AHB_GDMA_SUPPORTED
|
||||
|
||||
#if SOC_AXI_GDMA_SUPPORTED
|
||||
tx_chan_alloc_config = (gdma_channel_alloc_config_t) {
|
||||
.direction = GDMA_CHANNEL_DIRECTION_TX,
|
||||
.flags.reserve_sibling = true,
|
||||
};
|
||||
TEST_ESP_OK(gdma_new_axi_channel(&tx_chan_alloc_config, &tx_chan));
|
||||
rx_chan_alloc_config = (gdma_channel_alloc_config_t) {
|
||||
.direction = GDMA_CHANNEL_DIRECTION_RX,
|
||||
.sibling_chan = tx_chan,
|
||||
};
|
||||
TEST_ESP_OK(gdma_new_axi_channel(&rx_chan_alloc_config, &rx_chan));
|
||||
|
||||
test_gdma_m2m_mode(tx_chan, rx_chan);
|
||||
|
||||
TEST_ESP_OK(gdma_del_channel(tx_chan));
|
||||
TEST_ESP_OK(gdma_del_channel(rx_chan));
|
||||
#endif // SOC_AXI_GDMA_SUPPORTED
|
||||
}
|
||||
|
@ -522,7 +522,11 @@ static esp_err_t lcd_i80_init_dma_link(esp_lcd_i80_bus_handle_t bus)
|
||||
gdma_channel_alloc_config_t dma_chan_config = {
|
||||
.direction = GDMA_CHANNEL_DIRECTION_TX,
|
||||
};
|
||||
ret = gdma_new_channel(&dma_chan_config, &bus->dma_chan);
|
||||
#if SOC_GDMA_TRIG_PERIPH_LCD0_BUS == SOC_GDMA_BUS_AHB
|
||||
ret = gdma_new_ahb_channel(&dma_chan_config, &bus->dma_chan);
|
||||
#elif SOC_GDMA_TRIG_PERIPH_LCD0_BUS == SOC_GDMA_BUS_AXI
|
||||
ret = gdma_new_axi_channel(&dma_chan_config, &bus->dma_chan);
|
||||
#endif
|
||||
ESP_GOTO_ON_ERROR(ret, err, TAG, "alloc DMA channel failed");
|
||||
gdma_connect(bus->dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_LCD, 0));
|
||||
gdma_strategy_config_t strategy_config = {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
|
||||
* SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
@ -979,7 +979,11 @@ static esp_err_t lcd_rgb_panel_create_trans_link(esp_rgb_panel_t *panel)
|
||||
gdma_channel_alloc_config_t dma_chan_config = {
|
||||
.direction = GDMA_CHANNEL_DIRECTION_TX,
|
||||
};
|
||||
ESP_RETURN_ON_ERROR(gdma_new_channel(&dma_chan_config, &panel->dma_chan), TAG, "alloc DMA channel failed");
|
||||
#if SOC_GDMA_TRIG_PERIPH_LCD0_BUS == SOC_GDMA_BUS_AHB
|
||||
ESP_RETURN_ON_ERROR(gdma_new_ahb_channel(&dma_chan_config, &panel->dma_chan), TAG, "alloc DMA channel failed");
|
||||
#elif SOC_GDMA_TRIG_PERIPH_LCD0_BUS == SOC_GDMA_BUS_AXI
|
||||
ESP_RETURN_ON_ERROR(gdma_new_axi_channel(&dma_chan_config, &panel->dma_chan), TAG, "alloc DMA channel failed");
|
||||
#endif
|
||||
gdma_connect(panel->dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_LCD, 0));
|
||||
gdma_transfer_ability_t ability = {
|
||||
.psram_trans_align = panel->psram_trans_align,
|
||||
|
@ -60,6 +60,10 @@ static inline uint32_t periph_ll_get_clk_en_mask(periph_module_t periph)
|
||||
return HP_SYS_CLKRST_REG_TWAI1_CLK_EN;
|
||||
case PERIPH_TWAI2_MODULE:
|
||||
return HP_SYS_CLKRST_REG_TWAI2_CLK_EN;
|
||||
case PERIPH_AHB_PDMA_MODULE:
|
||||
return HP_SYS_CLKRST_REG_AHB_PDMA_SYS_CLK_EN;
|
||||
case PERIPH_AXI_PDMA_MODULE:
|
||||
return HP_SYS_CLKRST_REG_AXI_PDMA_SYS_CLK_EN;
|
||||
case PERIPH_GPSPI_MODULE:
|
||||
return HP_SYS_CLKRST_REG_GPSPI2_HS_CLK_EN;
|
||||
case PERIPH_GPSPI2_MODULE:
|
||||
@ -109,7 +113,7 @@ static inline uint32_t periph_ll_get_clk_en_mask(periph_module_t periph)
|
||||
case PERIPH_ISP_MODULE:
|
||||
return HP_SYS_CLKRST_REG_ISP_CLK_EN;
|
||||
default:
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -217,13 +221,16 @@ static inline uint32_t periph_ll_get_rst_en_mask(periph_module_t periph, bool en
|
||||
case PERIPH_EMAC_MODULE:
|
||||
return LP_CLKRST_RST_EN_EMAC;
|
||||
default:
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t periph_ll_get_clk_en_reg(periph_module_t periph)
|
||||
{
|
||||
switch (periph) {
|
||||
case PERIPH_AHB_PDMA_MODULE:
|
||||
case PERIPH_AXI_PDMA_MODULE:
|
||||
return HP_SYS_CLKRST_SOC_CLK_CTRL1_REG;
|
||||
case PERIPH_MSPI_FLASH_MODULE:
|
||||
case PERIPH_MSPI_PSRAM_MODULE:
|
||||
return HP_SYS_CLKRST_PERI_CLK_CTRL00_REG;
|
||||
@ -282,7 +289,7 @@ static uint32_t periph_ll_get_clk_en_reg(periph_module_t periph)
|
||||
return LP_CLKRST_HP_CLK_CTRL_REG;
|
||||
default:
|
||||
abort();
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -296,10 +303,10 @@ static uint32_t periph_ll_get_rst_en_reg(periph_module_t periph)
|
||||
case PERIPH_ISP_MODULE:
|
||||
case PERIPH_JPEG_MODULE:
|
||||
case PERIPH_DMA2D_MODULE:
|
||||
return HP_SYS_CLKRST_HP_RST_EN0_REG;
|
||||
case PERIPH_PPA_MODULE:
|
||||
case PERIPH_AHB_PDMA_MODULE:
|
||||
case PERIPH_AXI_PDMA_MODULE:
|
||||
return HP_SYS_CLKRST_HP_RST_EN0_REG;
|
||||
case PERIPH_SYSTIMER_MODULE:
|
||||
case PERIPH_TIMG0_MODULE:
|
||||
case PERIPH_TIMG1_MODULE:
|
||||
@ -343,7 +350,7 @@ static uint32_t periph_ll_get_rst_en_reg(periph_module_t periph)
|
||||
return LP_CLKRST_HP_SDMMC_EMAC_RST_CTRL_REG;
|
||||
default:
|
||||
abort();
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -19,8 +19,7 @@ extern "C" {
|
||||
#define GDMA_LL_RX_EVENT_MASK (0x1F)
|
||||
#define GDMA_LL_TX_EVENT_MASK (0x0F)
|
||||
|
||||
//To check this //TODO: IDF-6504
|
||||
#define GDMA_LL_INVALID_PERIPH_ID (0x3F)
|
||||
#define GDMA_LL_INVALID_PERIPH_ID (0x3F)
|
||||
|
||||
#define GDMA_LL_EVENT_TX_TOTAL_EOF (1<<3)
|
||||
#define GDMA_LL_EVENT_TX_DESC_ERROR (1<<2)
|
||||
|
@ -6,4 +6,41 @@
|
||||
|
||||
#include "soc/gdma_periph.h"
|
||||
|
||||
const gdma_signal_conn_t gdma_periph_signals = {};
|
||||
const gdma_signal_conn_t gdma_periph_signals = {
|
||||
.groups = {
|
||||
[0] = {
|
||||
.module = PERIPH_AHB_PDMA_MODULE,
|
||||
.pairs = {
|
||||
[0] = {
|
||||
.rx_irq_id = ETS_AHB_PDMA_IN_CH0_INTR_SOURCE,
|
||||
.tx_irq_id = ETS_AHB_PDMA_OUT_CH0_INTR_SOURCE,
|
||||
},
|
||||
[1] = {
|
||||
.rx_irq_id = ETS_AHB_PDMA_IN_CH1_INTR_SOURCE,
|
||||
.tx_irq_id = ETS_AHB_PDMA_OUT_CH1_INTR_SOURCE,
|
||||
},
|
||||
[2] = {
|
||||
.rx_irq_id = ETS_AHB_PDMA_IN_CH2_INTR_SOURCE,
|
||||
.tx_irq_id = ETS_AHB_PDMA_OUT_CH2_INTR_SOURCE,
|
||||
}
|
||||
}
|
||||
},
|
||||
[1] = {
|
||||
.module = PERIPH_AXI_PDMA_MODULE,
|
||||
.pairs = {
|
||||
[0] = {
|
||||
.rx_irq_id = ETS_AXI_PDMA_IN_CH0_INTR_SOURCE,
|
||||
.tx_irq_id = ETS_AXI_PDMA_OUT_CH0_INTR_SOURCE,
|
||||
},
|
||||
[1] = {
|
||||
.rx_irq_id = ETS_AXI_PDMA_IN_CH1_INTR_SOURCE,
|
||||
.tx_irq_id = ETS_AXI_PDMA_OUT_CH1_INTR_SOURCE,
|
||||
},
|
||||
[2] = {
|
||||
.rx_irq_id = ETS_AXI_PDMA_IN_CH2_INTR_SOURCE,
|
||||
.tx_irq_id = ETS_AXI_PDMA_OUT_CH2_INTR_SOURCE,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -7,7 +7,15 @@ config SOC_UART_SUPPORTED
|
||||
bool
|
||||
default y
|
||||
|
||||
config SOC_ASYNC_MEMCPY_SUPPORTED
|
||||
config SOC_GDMA_SUPPORTED
|
||||
bool
|
||||
default y
|
||||
|
||||
config SOC_AHB_GDMA_SUPPORTED
|
||||
bool
|
||||
default y
|
||||
|
||||
config SOC_AXI_GDMA_SUPPORTED
|
||||
bool
|
||||
default y
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -29,16 +29,16 @@
|
||||
// #define SOC_ANA_CMPR_SUPPORTED 1 //TODO: IDF-7479
|
||||
// #define SOC_DEDICATED_GPIO_SUPPORTED 1 //TODO: IDF-7552
|
||||
#define SOC_UART_SUPPORTED 1
|
||||
// #define SOC_GDMA_SUPPORTED 1 //TODO: IDF-6504
|
||||
// #define SOC_AHB_GDMA_SUPPORTED 1
|
||||
// #define SOC_AXI_GDMA_SUPPORTED 1
|
||||
#define SOC_GDMA_SUPPORTED 1
|
||||
#define SOC_AHB_GDMA_SUPPORTED 1
|
||||
#define SOC_AXI_GDMA_SUPPORTED 1
|
||||
// #define SOC_GPTIMER_SUPPORTED 1 //TODO: IDF-6515
|
||||
// #define SOC_PCNT_SUPPORTED 1 //TODO: IDF-7475
|
||||
// #define SOC_MCPWM_SUPPORTED 1 //TODO: IDF-7493
|
||||
// #define SOC_TWAI_SUPPORTED 1 //TODO: IDF-7470
|
||||
// #define SOC_ETM_SUPPORTED 1 //TODO: IDF-7478
|
||||
// #define SOC_PARLIO_SUPPORTED 1 //TODO: IDF-7471, TODO: IDF-7472
|
||||
#define SOC_ASYNC_MEMCPY_SUPPORTED 1
|
||||
// #define SOC_ASYNC_MEMCPY_SUPPORTED 1
|
||||
// disable usb serial jtag for esp32p4, current image does not support
|
||||
// #define SOC_USB_SERIAL_JTAG_SUPPORTED 1 //TODO: IDF-7496
|
||||
// #define SOC_TEMP_SENSOR_SUPPORTED 1 //TODO: IDF-7482
|
||||
@ -160,7 +160,7 @@
|
||||
#define SOC_AHB_GDMA_VERSION 2
|
||||
#define SOC_GDMA_NUM_GROUPS_MAX 2
|
||||
#define SOC_GDMA_PAIRS_PER_GROUP_MAX 3
|
||||
// #define SOC_GDMA_SUPPORT_ETM 1 // Both AHB-DMA and AXI-DMA supports ETM //TODO: IDF-6504
|
||||
// #define SOC_GDMA_SUPPORT_ETM 1 // Both AHB-DMA and AXI-DMA supports ETM //TODO: IDF-7478
|
||||
|
||||
/*-------------------------- ETM CAPS --------------------------------------*/
|
||||
#define SOC_ETM_GROUPS 1U // Number of ETM groups
|
||||
|
@ -39,7 +39,6 @@ PROVIDE ( MCPWM1 = 0x500C1000 );
|
||||
PROVIDE ( PARL_IO = 0x500CF000 );
|
||||
PROVIDE ( PVT_MONITOR = 0x5009E000 );
|
||||
|
||||
PROVIDE ( GDMA = 0x50081000 );
|
||||
PROVIDE ( GPSPI2 = 0x500D0000 );
|
||||
PROVIDE ( GPSPI3 = 0x500D1000 );
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user