mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
feat(dma): advanced dma malloc helper
This commit is contained in:
parent
9c846916fa
commit
f0518b3c16
@ -575,14 +575,19 @@ static esp_err_t i2s_alloc_dma_buffer(i2s_port_t i2s_num, i2s_dma_t *dma_obj)
|
||||
size_t desc_size = 0;
|
||||
for (int cnt = 0; cnt < buf_cnt; cnt++) {
|
||||
/* Allocate DMA buffer */
|
||||
esp_dma_calloc(1, sizeof(char) * dma_obj->buf_size, (MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA), (void **)&dma_obj->buf[cnt], NULL);
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA,
|
||||
.dma_alignment = 4,
|
||||
};
|
||||
esp_dma_capable_calloc(1, sizeof(char) * dma_obj->buf_size, &dma_mem_info, (void **)&dma_obj->buf[cnt], NULL);
|
||||
ESP_GOTO_ON_FALSE(dma_obj->buf[cnt], ESP_ERR_NO_MEM, err, TAG, "Error malloc dma buffer");
|
||||
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
|
||||
esp_cache_msync(dma_obj->buf[cnt], dma_obj->buf_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
|
||||
#endif
|
||||
|
||||
/* Allocate DMA descriptor */
|
||||
esp_dma_calloc(1, sizeof(lldesc_t), MALLOC_CAP_DEFAULT, (void **)&dma_obj->desc[cnt], &desc_size);
|
||||
esp_dma_capable_calloc(1, sizeof(lldesc_t), &dma_mem_info, (void **)&dma_obj->desc[cnt], &desc_size);
|
||||
// esp_dma_calloc(1, sizeof(lldesc_t), MALLOC_CAP_DEFAULT, (void **)&dma_obj->desc[cnt], &desc_size);
|
||||
ESP_GOTO_ON_FALSE(dma_obj->desc[cnt], ESP_ERR_NO_MEM, err, TAG, "Error malloc dma description entry");
|
||||
}
|
||||
/* DMA descriptor must be initialize after all descriptor has been created, otherwise they can't be linked together as a chain */
|
||||
|
@ -69,10 +69,32 @@
|
||||
static const char *TAG = "i2s_common";
|
||||
|
||||
__attribute__((always_inline))
|
||||
inline void *i2s_dma_calloc(size_t num, size_t size, uint32_t caps, size_t *actual_size)
|
||||
inline void *i2s_dma_calloc(i2s_chan_handle_t handle, size_t num, size_t size, bool is_desc, size_t *actual_size)
|
||||
{
|
||||
esp_err_t ret = ESP_FAIL;
|
||||
void *ptr = NULL;
|
||||
esp_dma_calloc(num, size, caps, &ptr, actual_size);
|
||||
|
||||
size_t dma_alignment = 0;
|
||||
void *gdma_chan_handle = NULL;
|
||||
#if SOC_GDMA_SUPPORTED
|
||||
gdma_chan_handle = handle->dma.dma_chan;
|
||||
#endif
|
||||
dma_alignment_info_t info = {
|
||||
.is_desc = is_desc,
|
||||
};
|
||||
ret = esp_dma_get_alignment(gdma_chan_handle, &info, &dma_alignment);
|
||||
assert(ret == ESP_OK);
|
||||
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_DMA,
|
||||
.dma_alignment = 4,
|
||||
};
|
||||
esp_dma_capable_calloc(num, size, &dma_mem_info, &ptr, actual_size);
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
assert((int)ptr % 64 == 0);
|
||||
#else
|
||||
assert((int)ptr % 4 == 0);
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
|
||||
@ -422,7 +444,7 @@ esp_err_t i2s_alloc_dma_desc(i2s_chan_handle_t handle, uint32_t num, uint32_t bu
|
||||
size_t desc_size = 0;
|
||||
for (int i = 0; i < num; i++) {
|
||||
/* Allocate DMA descriptor */
|
||||
handle->dma.desc[i] = (lldesc_t *) i2s_dma_calloc(1, sizeof(lldesc_t), I2S_DMA_ALLOC_CAPS, &desc_size);
|
||||
handle->dma.desc[i] = (lldesc_t *) i2s_dma_calloc(handle, 1, sizeof(lldesc_t), true, &desc_size);
|
||||
ESP_GOTO_ON_FALSE(handle->dma.desc[i], ESP_ERR_NO_MEM, err, TAG, "allocate DMA description failed");
|
||||
handle->dma.desc[i]->owner = 1;
|
||||
handle->dma.desc[i]->eof = 1;
|
||||
@ -430,7 +452,7 @@ esp_err_t i2s_alloc_dma_desc(i2s_chan_handle_t handle, uint32_t num, uint32_t bu
|
||||
handle->dma.desc[i]->length = bufsize;
|
||||
handle->dma.desc[i]->size = bufsize;
|
||||
handle->dma.desc[i]->offset = 0;
|
||||
handle->dma.bufs[i] = (uint8_t *) i2s_dma_calloc(1, bufsize * sizeof(uint8_t), I2S_DMA_ALLOC_CAPS, NULL);
|
||||
handle->dma.bufs[i] = (uint8_t *) i2s_dma_calloc(handle, 1, bufsize * sizeof(uint8_t), false, NULL);
|
||||
ESP_GOTO_ON_FALSE(handle->dma.bufs[i], ESP_ERR_NO_MEM, err, TAG, "allocate DMA buffer failed");
|
||||
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
|
||||
esp_cache_msync(handle->dma.bufs[i], bufsize * sizeof(uint8_t), ESP_CACHE_MSYNC_FLAG_DIR_C2M);
|
||||
|
@ -14,39 +14,163 @@
|
||||
#include "esp_memory_utils.h"
|
||||
#include "esp_dma_utils.h"
|
||||
#include "esp_private/esp_cache_private.h"
|
||||
#include "esp_private/gdma.h"
|
||||
#include "soc/soc_caps.h"
|
||||
#include "hal/hal_utils.h"
|
||||
|
||||
static const char *TAG = "dma_utils";
|
||||
_Static_assert(ESP_DMA_MALLOC_FLAG_PSRAM == ESP_CACHE_MALLOC_FLAG_PSRAM);
|
||||
|
||||
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
|
||||
#define ALIGN_DOWN_BY(num, align) ((num) & (~((align) - 1)))
|
||||
|
||||
|
||||
esp_err_t esp_dma_malloc(size_t size, uint32_t flags, void **out_ptr, size_t *actual_size)
|
||||
esp_err_t esp_dma_capable_malloc(size_t size, const esp_dma_mem_info_t *dma_mem_info, void **out_ptr, size_t *actual_size)
|
||||
{
|
||||
ESP_RETURN_ON_FALSE_ISR(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
|
||||
ESP_RETURN_ON_FALSE_ISR(dma_mem_info && out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
|
||||
|
||||
esp_err_t ret = ESP_OK;
|
||||
size_t alignment = 1;
|
||||
|
||||
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
|
||||
ret = esp_cache_aligned_malloc(size, flags | ESP_CACHE_MALLOC_FLAG_DMA, out_ptr, actual_size);
|
||||
#else
|
||||
if (flags & ESP_DMA_MALLOC_FLAG_PSRAM) {
|
||||
ret = esp_cache_aligned_malloc(size, flags | ESP_CACHE_MALLOC_FLAG_DMA, out_ptr, actual_size);
|
||||
} else {
|
||||
size = ALIGN_UP_BY(size, 4);
|
||||
void *ptr = heap_caps_aligned_alloc(4, size, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
|
||||
ESP_RETURN_ON_FALSE_ISR(ptr, ESP_ERR_NO_MEM, TAG, "no enough heap memory");
|
||||
*out_ptr = ptr;
|
||||
if (actual_size) {
|
||||
*actual_size = size;
|
||||
}
|
||||
//dma align
|
||||
size_t dma_alignment = dma_mem_info->dma_alignment;
|
||||
|
||||
//custom align
|
||||
size_t custom_alignment = dma_mem_info->custom_alignment;
|
||||
|
||||
//cache align
|
||||
int cache_flags = 0;
|
||||
size_t cache_alignment = 1;
|
||||
if (dma_mem_info->heap_caps & MALLOC_CAP_SPIRAM) {
|
||||
cache_flags |= ESP_DMA_MALLOC_FLAG_PSRAM;
|
||||
}
|
||||
esp_err_t ret = esp_cache_get_alignment(cache_flags, &cache_alignment);
|
||||
assert(ret == ESP_OK);
|
||||
|
||||
//lcm3
|
||||
alignment = _lcm_3(dma_alignment, cache_alignment, custom_alignment);
|
||||
ESP_LOGD(TAG, "alignment: 0x%x", alignment);
|
||||
|
||||
//malloc
|
||||
size = ALIGN_UP_BY(size, alignment);
|
||||
int heap_caps = dma_mem_info->heap_caps;
|
||||
|
||||
void *ptr = heap_caps_aligned_alloc(alignment, size, heap_caps);
|
||||
ESP_RETURN_ON_FALSE_ISR(ptr, ESP_ERR_NO_MEM, TAG, "no enough heap memory");
|
||||
|
||||
*out_ptr = ptr;
|
||||
if (actual_size) {
|
||||
*actual_size = size;
|
||||
}
|
||||
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t esp_dma_capable_calloc(size_t n, size_t size, const esp_dma_mem_info_t *dma_mem_info, void **out_ptr, size_t *actual_size)
|
||||
{
|
||||
esp_err_t ret = ESP_FAIL;
|
||||
size_t size_bytes = 0;
|
||||
bool ovf = false;
|
||||
|
||||
ovf = __builtin_mul_overflow(n, size, &size_bytes);
|
||||
ESP_RETURN_ON_FALSE_ISR(!ovf, ESP_ERR_INVALID_ARG, TAG, "wrong size, total size overflow");
|
||||
|
||||
void *ptr = NULL;
|
||||
ret = esp_dma_capable_malloc(size_bytes, dma_mem_info, &ptr, actual_size);
|
||||
if (ret == ESP_OK) {
|
||||
memset(ptr, 0, size_bytes);
|
||||
*out_ptr = ptr;
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool s_buf_in_region(const void *ptr, size_t size, esp_dma_buf_location_t location)
|
||||
{
|
||||
bool found = false;
|
||||
if (location == ESP_DMA_BUF_LOCATION_INTERNAL) {
|
||||
if (esp_ptr_dma_capable(ptr) && esp_ptr_dma_capable(ptr + size - 1)) {
|
||||
found = true;
|
||||
}
|
||||
} else if (location == ESP_DMA_BUF_LOCATION_PSRAM) {
|
||||
#if SOC_PSRAM_DMA_CAPABLE
|
||||
if (esp_ptr_external_ram(ptr) && esp_ptr_external_ram(ptr + size - 1)) {
|
||||
found = true;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
||||
static inline bool s_is_buf_aligned(intptr_t ptr, size_t alignment)
|
||||
{
|
||||
return (ptr % alignment == 0);
|
||||
}
|
||||
|
||||
bool esp_dma_is_buffer_alignment_satisfied(const void *ptr, size_t size, esp_dma_mem_info_t *dma_mem_info)
|
||||
{
|
||||
assert(ptr);
|
||||
|
||||
bool found = false;
|
||||
for (int i = ESP_DMA_BUF_LOCATION_INTERNAL; i < ESP_DMA_BUF_LOCATION_AUTO; i++) {
|
||||
if (s_buf_in_region(ptr, size, i)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t alignment = 1;
|
||||
|
||||
//dma align
|
||||
size_t dma_alignment = dma_mem_info->dma_alignment;
|
||||
|
||||
//custom align
|
||||
size_t custom_alignment = dma_mem_info->custom_alignment;
|
||||
|
||||
//cache align
|
||||
int cache_flags = 0;
|
||||
size_t cache_alignment = 1;
|
||||
if (dma_mem_info->heap_caps & MALLOC_CAP_SPIRAM) {
|
||||
cache_flags |= ESP_DMA_MALLOC_FLAG_PSRAM;
|
||||
}
|
||||
esp_err_t ret = esp_cache_get_alignment(cache_flags, &cache_alignment);
|
||||
assert(ret == ESP_OK);
|
||||
|
||||
//lcm3
|
||||
alignment = _lcm_3(dma_alignment, cache_alignment, custom_alignment);
|
||||
|
||||
bool is_aligned = s_is_buf_aligned((intptr_t)ptr, alignment) && s_is_buf_aligned((intptr_t)size, alignment);
|
||||
return is_aligned;
|
||||
}
|
||||
|
||||
|
||||
//-----------------------Deprecated APIs-----------------------//
|
||||
esp_err_t s_legacy_malloc(size_t size, uint32_t flags, void **out_ptr, size_t *actual_size)
|
||||
{
|
||||
ESP_RETURN_ON_FALSE_ISR(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
|
||||
|
||||
int heap_caps = 0;
|
||||
if (flags & ESP_DMA_MALLOC_FLAG_PSRAM) {
|
||||
heap_caps |= MALLOC_CAP_SPIRAM;
|
||||
} else {
|
||||
heap_caps |= MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL;
|
||||
}
|
||||
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = heap_caps,
|
||||
.dma_alignment = 4, //legacy API behaviour is only check max dma buffer alignment
|
||||
};
|
||||
|
||||
ESP_RETURN_ON_ERROR_ISR(esp_dma_capable_malloc(size, &dma_mem_info, out_ptr, actual_size), TAG, "failed to do malloc");
|
||||
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t esp_dma_malloc(size_t size, uint32_t flags, void **out_ptr, size_t *actual_size)
|
||||
{
|
||||
return s_legacy_malloc(size, flags, out_ptr, actual_size);
|
||||
}
|
||||
|
||||
esp_err_t esp_dma_calloc(size_t n, size_t size, uint32_t flags, void **out_ptr, size_t *actual_size)
|
||||
{
|
||||
@ -60,7 +184,7 @@ esp_err_t esp_dma_calloc(size_t n, size_t size, uint32_t flags, void **out_ptr,
|
||||
ESP_RETURN_ON_FALSE_ISR(!ovf, ESP_ERR_INVALID_ARG, TAG, "wrong size, total size overflow");
|
||||
|
||||
void *ptr = NULL;
|
||||
ret = esp_dma_malloc(size_bytes, flags, &ptr, actual_size);
|
||||
ret = s_legacy_malloc(size_bytes, flags, &ptr, actual_size);
|
||||
if (ret == ESP_OK) {
|
||||
memset(ptr, 0, size_bytes);
|
||||
*out_ptr = ptr;
|
||||
@ -69,17 +193,18 @@ esp_err_t esp_dma_calloc(size_t n, size_t size, uint32_t flags, void **out_ptr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool s_buf_in_region(const void *ptr, size_t size, esp_dma_buf_location_t location, uint32_t *in_out_flags)
|
||||
static bool s_buf_in_region_legacy(const void *ptr, size_t size, esp_dma_buf_location_t location, int *heap_caps)
|
||||
{
|
||||
bool found = false;
|
||||
if (location == ESP_DMA_BUF_LOCATION_INTERNAL) {
|
||||
if (esp_ptr_dma_capable(ptr) && esp_ptr_dma_capable(ptr + size - 1)) {
|
||||
*heap_caps = MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL;
|
||||
found = true;
|
||||
}
|
||||
} else if (location == ESP_DMA_BUF_LOCATION_PSRAM) {
|
||||
#if SOC_PSRAM_DMA_CAPABLE
|
||||
if (esp_ptr_external_ram(ptr) && esp_ptr_external_ram(ptr + size - 1)) {
|
||||
*in_out_flags |= ESP_DMA_MALLOC_FLAG_PSRAM;
|
||||
*heap_caps = MALLOC_CAP_SPIRAM;
|
||||
found = true;
|
||||
}
|
||||
#endif
|
||||
@ -90,33 +215,48 @@ static bool s_buf_in_region(const void *ptr, size_t size, esp_dma_buf_location_t
|
||||
bool esp_dma_is_buffer_aligned(const void *ptr, size_t size, esp_dma_buf_location_t location)
|
||||
{
|
||||
assert(ptr);
|
||||
uint32_t flags = ESP_CACHE_MALLOC_FLAG_DMA;
|
||||
|
||||
bool found = false;
|
||||
int heap_caps = 0;
|
||||
if (location == ESP_DMA_BUF_LOCATION_AUTO) {
|
||||
for (int i = ESP_DMA_BUF_LOCATION_INTERNAL; i < ESP_DMA_BUF_LOCATION_AUTO; i++) {
|
||||
if (s_buf_in_region(ptr, size, i, &flags)) {
|
||||
if (s_buf_in_region_legacy(ptr, size, i, &heap_caps)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (location == ESP_DMA_BUF_LOCATION_INTERNAL) {
|
||||
found = s_buf_in_region(ptr, size, ESP_DMA_BUF_LOCATION_INTERNAL, &flags);
|
||||
found = s_buf_in_region_legacy(ptr, size, ESP_DMA_BUF_LOCATION_INTERNAL, &heap_caps);
|
||||
} else {
|
||||
found = s_buf_in_region(ptr, size, ESP_DMA_BUF_LOCATION_PSRAM, &flags);
|
||||
found = s_buf_in_region_legacy(ptr, size, ESP_DMA_BUF_LOCATION_PSRAM, &heap_caps);
|
||||
}
|
||||
if (!found) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool is_aligned = false;
|
||||
size_t dma_alignment = 0;
|
||||
size_t cache_alignment = 0;
|
||||
size_t alignment = 0;
|
||||
esp_err_t ret = esp_cache_get_alignment(flags, &cache_alignment);
|
||||
assert(ret == ESP_OK);
|
||||
alignment = MAX(dma_alignment, cache_alignment);
|
||||
is_aligned = ((intptr_t)ptr % alignment == 0) && (size % alignment == 0);
|
||||
|
||||
return is_aligned;
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = heap_caps,
|
||||
.dma_alignment = 4, //legacy API behaviour is only check max dma buffer alignment
|
||||
};
|
||||
return esp_dma_is_buffer_alignment_satisfied(ptr, size, &dma_mem_info);
|
||||
}
|
||||
|
||||
esp_err_t esp_dma_get_alignment(void *gdma_chan_handle, const dma_alignment_info_t *info, size_t *alignment)
|
||||
{
|
||||
ESP_RETURN_ON_FALSE(info && alignment, ESP_ERR_INVALID_ARG, TAG, "null pointer");
|
||||
|
||||
#if SOC_GDMA_SUPPORTED
|
||||
if (gdma_chan_handle) {
|
||||
gdma_channel_handle_t dma_chan = (gdma_channel_handle_t)gdma_chan_handle;
|
||||
gdma_alignment_info_t gdma_info = {};
|
||||
memcpy(&gdma_info, info, sizeof(gdma_alignment_info_t));
|
||||
ESP_RETURN_ON_ERROR(gdma_get_alignment(dma_chan, &gdma_info, alignment), TAG, "failed to get gdma alignment");
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
//for esp32 and esp32s2
|
||||
*alignment = 4;
|
||||
}
|
||||
|
||||
return ESP_OK;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
|
||||
* SPDX-FileCopyrightText: 2020-2024 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
@ -944,3 +944,34 @@ static esp_err_t gdma_install_tx_interrupt(gdma_tx_channel_t *tx_chan)
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
esp_err_t gdma_get_alignment(gdma_channel_handle_t dma_chan, const gdma_alignment_info_t *info, size_t *alignment)
|
||||
{
|
||||
ESP_RETURN_ON_FALSE(dma_chan && info && alignment, ESP_ERR_INVALID_ARG, TAG, "null pointer");
|
||||
bool desc_on_psram = info->is_desc && info->on_psram;
|
||||
ESP_RETURN_ON_FALSE(!desc_on_psram, ESP_ERR_INVALID_ARG, TAG, "should not place descriptor on psram");
|
||||
|
||||
if (info->is_desc) {
|
||||
if (dma_chan->pair->group->bus_id == SOC_GDMA_BUS_AHB) {
|
||||
*alignment = GDMA_LL_AHB_DESC_ALIGNMENT;
|
||||
}
|
||||
#if SOC_AXI_GDMA_SUPPORTED
|
||||
else if (dma_chan->pair->group->bus_id == SOC_GDMA_BUS_AXI) {
|
||||
*alignment = GDMA_LL_AXI_DESC_ALIGNMENT;
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
if (dma_chan->psram_alignment == 0 && dma_chan->sram_alignment == 0) {
|
||||
ESP_LOGI(TAG, "gdma_set_transfer_ability isn't called before, use fallback alignment");
|
||||
*alignment = 4;
|
||||
} else {
|
||||
if (info->on_psram) {
|
||||
*alignment = dma_chan->psram_alignment;
|
||||
} else {
|
||||
*alignment = dma_chan->sram_alignment;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ESP_OK;
|
||||
}
|
||||
|
@ -9,11 +9,86 @@
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include "esp_err.h"
|
||||
#include "esp_heap_caps.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @breif DMA Mem info
|
||||
*/
|
||||
typedef struct {
|
||||
int heap_caps; ///< See heap caps
|
||||
size_t dma_alignment; ///< DMA alignment
|
||||
size_t custom_alignment; ///< Set this if you have custom alignment. E.g. if `psram_trans_align` is set when using GDMA driver, or you're using IP self DMA (e.g. SDMMC)
|
||||
} esp_dma_mem_info_t;
|
||||
|
||||
/**
|
||||
* @brief Helper function for malloc a DMA capable memory buffer
|
||||
*
|
||||
* @param[in] size Size in bytes, the amount of memory to allocate
|
||||
* @param[in] dma_mem_info DMA and memory info, see `esp_dma_mem_info_t`
|
||||
* @param[out] out_ptr A pointer to the memory allocated successfully
|
||||
* @param[out] actual_size Actual size for allocation in bytes, when the size you specified doesn't meet the DMA alignment requirements, this value might be bigger than the size you specified. Set null if you don't care this value.
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK:
|
||||
* - ESP_ERR_INVALID_ARG: Invalid argument
|
||||
* - ESP_ERR_NO_MEM: No enough memory for allocation
|
||||
*/
|
||||
esp_err_t esp_dma_capable_malloc(size_t size, const esp_dma_mem_info_t *dma_mem_info, void **out_ptr, size_t *actual_size);
|
||||
|
||||
/**
|
||||
* @brief Helper function for calloc a DMA capable memory buffer
|
||||
*
|
||||
* @param[in] size Size in bytes, the amount of memory to allocate
|
||||
* @param[in] dma_mem_info DMA and memory info, see `esp_dma_mem_info_t`
|
||||
* @param[out] out_ptr A pointer to the memory allocated successfully
|
||||
* @param[out] actual_size Actual size for allocation in bytes, when the size you specified doesn't meet the DMA alignment requirements, this value might be bigger than the size you specified. Set null if you don't care this value.
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK:
|
||||
* - ESP_ERR_INVALID_ARG: Invalid argument
|
||||
* - ESP_ERR_NO_MEM: No enough memory for allocation
|
||||
*/
|
||||
esp_err_t esp_dma_capable_calloc(size_t n, size_t size, const esp_dma_mem_info_t *dma_mem_info, void **out_ptr, size_t *actual_size);
|
||||
|
||||
/**
|
||||
* @brief Helper function to check if a DMA buffer meets alignment requirements
|
||||
*
|
||||
* @param[in] ptr Pointer to the buffer
|
||||
* @param[in] size Size of the buffer
|
||||
* @param[in] dma_mem_info DMA and memory info, see `esp_dma_mem_info_t`
|
||||
*
|
||||
* @return
|
||||
* - True: Buffer is aligned
|
||||
* - False: Buffer is not aligned, or buffer is not DMA capable
|
||||
*/
|
||||
bool esp_dma_is_buffer_alignment_satisfied(const void *ptr, size_t size, esp_dma_mem_info_t *dma_mem_info);
|
||||
|
||||
/**
|
||||
* @brief Needed info to get GDMA alignment
|
||||
*/
|
||||
typedef struct {
|
||||
bool is_desc;
|
||||
bool on_psram;
|
||||
} dma_alignment_info_t;
|
||||
|
||||
/**
|
||||
* @brief Helper to get DMA alignment
|
||||
*
|
||||
* @param[in] gdma_chan_handle GDMA channel handle, if no GDMA supported, set it to NULL
|
||||
* @param[in] info DMA alignment info
|
||||
* @param[out] alignment Alignment
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK
|
||||
* - ESP_ERR_INVALID_ARG Invalid argument
|
||||
*/
|
||||
esp_err_t esp_dma_get_alignment(void *gdma_chan_handle, const dma_alignment_info_t *info, size_t *alignment);
|
||||
|
||||
//-----------------------Deprecated APIs-----------------------//
|
||||
/**
|
||||
* DMA malloc flags
|
||||
*/
|
||||
@ -23,35 +98,16 @@ extern "C" {
|
||||
#define ESP_DMA_MALLOC_FLAG_PSRAM BIT(0)
|
||||
|
||||
/**
|
||||
* @brief Helper function for malloc a DMA capable memory buffer
|
||||
*
|
||||
* @param[in] size Size in bytes, the amount of memory to allocate
|
||||
* @param[in] flags Flags, see `ESP_DMA_MALLOC_FLAG_x`
|
||||
* @param[out] out_ptr A pointer to the memory allocated successfully
|
||||
* @param[out] actual_size Actual size for allocation in bytes, when the size you specified doesn't meet the DMA alignment requirements, this value might be bigger than the size you specified. Set null if you don't care this value.
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK:
|
||||
* - ESP_ERR_INVALID_ARG: Invalid argument
|
||||
* - ESP_ERR_NO_MEM: No enough memory for allocation
|
||||
* @note This API will use MAX alignment requirement
|
||||
*/
|
||||
esp_err_t esp_dma_malloc(size_t size, uint32_t flags, void **out_ptr, size_t *actual_size);
|
||||
esp_err_t esp_dma_malloc(size_t size, uint32_t flags, void **out_ptr, size_t *actual_size)
|
||||
__attribute__((deprecated("esp_dma_malloc is deprecated, please use esp_dma_capable_malloc")));
|
||||
|
||||
/**
|
||||
* @brief Helper function for calloc a DMA capable memory buffer
|
||||
*
|
||||
* @param[in] n Number of continuing chunks of memory to allocate
|
||||
* @param[in] size Size of one chunk, in bytes
|
||||
* @param[in] flags Flags, see `ESP_DMA_MALLOC_FLAG_x`
|
||||
* @param[out] out_ptr A pointer to the memory allocated successfully
|
||||
* @param[out] actual_size Actual size for allocation in bytes, when the size you specified doesn't meet the cache alignment requirements, this value might be bigger than the size you specified. Set null if you don't care this value.
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK:
|
||||
* - ESP_ERR_INVALID_ARG: Invalid argument
|
||||
* - ESP_ERR_NO_MEM: No enough memory for allocation
|
||||
* @note This API will use MAX alignment requirement
|
||||
*/
|
||||
esp_err_t esp_dma_calloc(size_t n, size_t size, uint32_t flags, void **out_ptr, size_t *actual_size);
|
||||
esp_err_t esp_dma_calloc(size_t n, size_t size, uint32_t flags, void **out_ptr, size_t *actual_size)
|
||||
__attribute__((deprecated("esp_dma_calloc is deprecated, please use esp_dma_capable_calloc")));
|
||||
|
||||
/**
|
||||
* @brief DMA buffer location
|
||||
@ -63,17 +119,10 @@ typedef enum {
|
||||
} esp_dma_buf_location_t;
|
||||
|
||||
/**
|
||||
* @brief Helper function to check if a buffer meets DMA alignment requirements
|
||||
*
|
||||
* @param[in] ptr Pointer to the buffer
|
||||
* @param[in] size Size of the buffer
|
||||
* @param[in] location Location of the DMA buffer, see `esp_dma_buf_location_t`
|
||||
*
|
||||
* @return
|
||||
* - True: Buffer is aligned
|
||||
* - False: Buffer is not aligned, or buffer is not DMA capable
|
||||
* @note This API will use MAX alignment requirement
|
||||
*/
|
||||
bool esp_dma_is_buffer_aligned(const void *ptr, size_t size, esp_dma_buf_location_t location);
|
||||
bool esp_dma_is_buffer_aligned(const void *ptr, size_t size, esp_dma_buf_location_t location)
|
||||
__attribute__((deprecated("esp_dma_is_buffer_aligned is deprecated, please use esp_dma_is_buffer_alignment_satisfied")));
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -457,6 +457,26 @@ esp_err_t gdma_config_crc_calculator(gdma_channel_handle_t dma_chan, const gdma_
|
||||
esp_err_t gdma_crc_get_result(gdma_channel_handle_t dma_chan, uint32_t *result);
|
||||
#endif // SOC_GDMA_SUPPORT_CRC
|
||||
|
||||
/**
|
||||
* @brief Needed info to get GDMA alignment
|
||||
*/
|
||||
typedef struct {
|
||||
bool is_desc;
|
||||
bool on_psram;
|
||||
} gdma_alignment_info_t;
|
||||
|
||||
/**
|
||||
* @brief Get GDMA alignment from the channel handle
|
||||
*
|
||||
* @param[in] dma_chan GDMA channel handle
|
||||
* @param[in] info GDMA alignment info
|
||||
* @param[out] alignment Alignment
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK
|
||||
* - ESP_ERR_INVALID_ARG Invalid argument
|
||||
*/
|
||||
esp_err_t gdma_get_alignment(gdma_channel_handle_t dma_chan, const gdma_alignment_info_t *info, size_t *alignment);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -1,4 +1,4 @@
|
||||
set(srcs "test_app_main.c")
|
||||
set(srcs "test_app_main.c" "test_dma_utils.c")
|
||||
|
||||
if(CONFIG_SOC_ASYNC_MEMCPY_SUPPORTED)
|
||||
list(APPEND srcs "test_async_memcpy.c")
|
||||
|
155
components/esp_hw_support/test_apps/dma/main/test_dma_utils.c
Normal file
155
components/esp_hw_support/test_apps/dma/main/test_dma_utils.c
Normal file
@ -0,0 +1,155 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
#include "sdkconfig.h"
|
||||
#include "unity.h"
|
||||
#include "esp_log.h"
|
||||
#include "esp_dma_utils.h"
|
||||
#include "esp_private/esp_cache_private.h"
|
||||
#include "esp_private/gdma.h"
|
||||
#include "soc/soc_caps.h"
|
||||
|
||||
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
|
||||
|
||||
static const char *TAG = "test_dma_utils";
|
||||
|
||||
|
||||
#if CONFIG_SPIRAM
|
||||
/**
|
||||
* To test the API logic is correct, here we simply use max value under default sdkconfig
|
||||
*/
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
#define TEST_BUFFER_PSRAM_ALIGNMENT 64
|
||||
#else
|
||||
#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
|
||||
#define TEST_BUFFER_PSRAM_ALIGNMENT 4
|
||||
#else
|
||||
#define TEST_BUFFER_PSRAM_ALIGNMENT 32
|
||||
#endif
|
||||
#endif
|
||||
|
||||
TEST_CASE("test esp_dma_capable_malloc for PSRAM", "[dma_utils]")
|
||||
{
|
||||
size_t test_size = 0;
|
||||
void *test_ptr = NULL;
|
||||
size_t actual_size = 0;
|
||||
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_SPIRAM,
|
||||
.dma_alignment = 4,
|
||||
.custom_alignment = 4,
|
||||
};
|
||||
|
||||
//------ psram ------//
|
||||
//aligned
|
||||
test_size = TEST_BUFFER_PSRAM_ALIGNMENT;
|
||||
ESP_LOGI(TAG, "to alloc 0x%zx", test_size);
|
||||
TEST_ESP_OK(esp_dma_capable_malloc(test_size, &dma_mem_info, &test_ptr, &actual_size));
|
||||
ESP_LOGI(TAG, "get test_ptr: %p, actual_size: 0x%zx", test_ptr, actual_size);
|
||||
TEST_ASSERT((uint32_t)test_ptr % TEST_BUFFER_PSRAM_ALIGNMENT == 0);
|
||||
TEST_ASSERT(test_size == actual_size);
|
||||
free(test_ptr);
|
||||
|
||||
//unaligned
|
||||
test_size = TEST_BUFFER_PSRAM_ALIGNMENT + TEST_BUFFER_PSRAM_ALIGNMENT / 2;
|
||||
ESP_LOGI(TAG, "to alloc 0x%zx", test_size);
|
||||
TEST_ESP_OK(esp_dma_capable_malloc(test_size, &dma_mem_info, &test_ptr, &actual_size));
|
||||
ESP_LOGI(TAG, "get test_ptr: %p, actual_size: 0x%zx", test_ptr, actual_size);
|
||||
TEST_ASSERT((uint32_t)test_ptr % TEST_BUFFER_PSRAM_ALIGNMENT == 0);
|
||||
TEST_ASSERT(ALIGN_UP_BY(test_size, TEST_BUFFER_PSRAM_ALIGNMENT) == actual_size);
|
||||
free(test_ptr);
|
||||
}
|
||||
#endif
|
||||
|
||||
TEST_CASE("test custom alignment", "[dma_utils]")
|
||||
{
|
||||
size_t test_size = 0;
|
||||
void *test_ptr = NULL;
|
||||
size_t actual_size = 0;
|
||||
size_t custom_alignment = 512;
|
||||
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_SPIRAM,
|
||||
.dma_alignment = 4,
|
||||
.custom_alignment = custom_alignment,
|
||||
};
|
||||
test_size = custom_alignment + 3;
|
||||
ESP_LOGI(TAG, "to alloc 0x%zx", test_size);
|
||||
TEST_ESP_OK(esp_dma_capable_malloc(test_size, &dma_mem_info, &test_ptr, &actual_size));
|
||||
ESP_LOGI(TAG, "get test_ptr: %p, actual_size: 0x%zx", test_ptr, actual_size);
|
||||
TEST_ASSERT((uint32_t)test_ptr % custom_alignment == 0);
|
||||
TEST_ASSERT(ALIGN_UP_BY(test_size, custom_alignment) == actual_size);
|
||||
free(test_ptr);
|
||||
}
|
||||
|
||||
TEST_CASE("test esp_dma_is_buffer_alignment_satisfied", "[dma_utils]")
|
||||
{
|
||||
size_t test_size = 64;
|
||||
void *test_ptr = NULL;
|
||||
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_DMA,
|
||||
.dma_alignment = 4,
|
||||
};
|
||||
TEST_ESP_OK(esp_dma_capable_malloc(test_size, &dma_mem_info, &test_ptr, NULL));
|
||||
ESP_LOGI(TAG, "test_ptr %p", test_ptr);
|
||||
bool is_aligned = esp_dma_is_buffer_alignment_satisfied(test_ptr, test_size, &dma_mem_info);
|
||||
TEST_ASSERT(is_aligned);
|
||||
is_aligned = esp_dma_is_buffer_alignment_satisfied(test_ptr + 3, test_size, &dma_mem_info);
|
||||
TEST_ASSERT(!is_aligned);
|
||||
}
|
||||
|
||||
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
#define TEST_DMA_ALIGNMENT_INT 8
|
||||
#else
|
||||
#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
|
||||
#define TEST_DMA_ALIGNMENT_INT 4
|
||||
#else
|
||||
#define TEST_DMA_ALIGNMENT_INT 8
|
||||
#endif
|
||||
#endif
|
||||
|
||||
TEST_CASE("test esp_dma_get_alignment", "[dma_utils]")
|
||||
{
|
||||
size_t dma_alignment = 0;
|
||||
gdma_channel_handle_t tx_channel = NULL;
|
||||
#if SOC_GDMA_SUPPORTED
|
||||
gdma_channel_alloc_config_t channel_config = {};
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_TX;
|
||||
|
||||
TEST_ESP_OK(gdma_new_ahb_channel(&channel_config, &tx_channel));
|
||||
|
||||
gdma_transfer_ability_t ability = {
|
||||
.psram_trans_align = 0,
|
||||
.sram_trans_align = 8,
|
||||
};
|
||||
TEST_ESP_OK(gdma_set_transfer_ability(tx_channel, &ability));
|
||||
#endif
|
||||
|
||||
dma_alignment_info_t internal_info = {};
|
||||
|
||||
TEST_ESP_OK(esp_dma_get_alignment(tx_channel, &internal_info, &dma_alignment));
|
||||
ESP_LOGI(TAG, "dma_alignment: 0x%x", dma_alignment);
|
||||
TEST_ASSERT(dma_alignment == TEST_DMA_ALIGNMENT_INT);
|
||||
}
|
||||
|
||||
#if SOC_GDMA_SUPPORTED
|
||||
TEST_CASE("test esp_dma_get_alignment with no transfer ability set", "[dma_utils]")
|
||||
{
|
||||
size_t dma_alignment = 0;
|
||||
gdma_channel_handle_t tx_channel = NULL;
|
||||
gdma_channel_alloc_config_t channel_config = {};
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_TX;
|
||||
TEST_ESP_OK(gdma_new_ahb_channel(&channel_config, &tx_channel));
|
||||
|
||||
dma_alignment_info_t internal_info = {};
|
||||
TEST_ESP_OK(esp_dma_get_alignment(tx_channel, &internal_info, &dma_alignment));
|
||||
ESP_LOGI(TAG, "dma_alignment: 0x%x", dma_alignment);
|
||||
TEST_ASSERT(dma_alignment == 4);
|
||||
}
|
||||
#endif
|
@ -218,10 +218,6 @@ esp_err_t esp_cache_get_alignment(uint32_t flags, size_t *out_alignment)
|
||||
}
|
||||
|
||||
data_cache_line_size = cache_hal_get_cache_line_size(cache_level, CACHE_TYPE_DATA);
|
||||
if (data_cache_line_size == 0) {
|
||||
//default alignment
|
||||
data_cache_line_size = 4;
|
||||
}
|
||||
|
||||
*out_alignment = data_cache_line_size;
|
||||
|
||||
|
@ -47,6 +47,8 @@ extern "C" {
|
||||
#define GDMA_LL_AHB_PAIRS_PER_GROUP 1 // Number of GDMA pairs in each AHB group
|
||||
#define GDMA_LL_AHB_TX_RX_SHARE_INTERRUPT 1 // TX and RX channel in the same pair will share the same interrupt source number
|
||||
|
||||
#define GDMA_LL_AHB_DESC_ALIGNMENT 4
|
||||
|
||||
///////////////////////////////////// Common /////////////////////////////////////////
|
||||
|
||||
/**
|
||||
|
@ -47,6 +47,8 @@ extern "C" {
|
||||
#define GDMA_LL_AHB_PAIRS_PER_GROUP 3 // Number of GDMA pairs in each AHB group
|
||||
#define GDMA_LL_AHB_TX_RX_SHARE_INTERRUPT 1 // TX and RX channel in the same pair will share the same interrupt source number
|
||||
|
||||
#define GDMA_LL_AHB_DESC_ALIGNMENT 4
|
||||
|
||||
///////////////////////////////////// Common /////////////////////////////////////////
|
||||
|
||||
/**
|
||||
|
@ -50,6 +50,8 @@ extern "C" {
|
||||
#define GDMA_LL_AHB_NUM_GROUPS 1 // Number of AHB GDMA groups
|
||||
#define GDMA_LL_AHB_PAIRS_PER_GROUP 3 // Number of GDMA pairs in each AHB group
|
||||
|
||||
#define GDMA_LL_AHB_DESC_ALIGNMENT 4
|
||||
|
||||
#define GDMA_LL_TX_ETM_EVENT_TABLE(group, chan, event) \
|
||||
(uint32_t[1][3][GDMA_ETM_EVENT_MAX]){{{ \
|
||||
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_OUT_EOF_CH0, \
|
||||
|
@ -50,6 +50,8 @@ extern "C" {
|
||||
#define GDMA_LL_AHB_NUM_GROUPS 1 // Number of AHB GDMA groups
|
||||
#define GDMA_LL_AHB_PAIRS_PER_GROUP 3 // Number of GDMA pairs in each AHB group
|
||||
|
||||
#define GDMA_LL_AHB_DESC_ALIGNMENT 4
|
||||
|
||||
#define GDMA_LL_TX_ETM_EVENT_TABLE(group, chan, event) \
|
||||
(uint32_t[1][3][GDMA_ETM_EVENT_MAX]){{{ \
|
||||
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_OUT_EOF_CH0, \
|
||||
|
@ -45,6 +45,9 @@
|
||||
#define GDMA_LL_AHB_MAX_CRC_BIT_WIDTH 32 // Max CRC bit width supported by AHB GDMA
|
||||
#define GDMA_LL_AXI_MAX_CRC_BIT_WIDTH 16 // Max CRC bit width supported by AXI GDMA
|
||||
|
||||
#define GDMA_LL_AHB_DESC_ALIGNMENT 4
|
||||
#define GDMA_LL_AXI_DESC_ALIGNMENT 8
|
||||
|
||||
#define GDMA_LL_TX_ETM_EVENT_TABLE(group, chan, event) \
|
||||
(uint32_t[2][GDMA_ETM_EVENT_MAX]){ \
|
||||
{ \
|
||||
|
@ -60,6 +60,8 @@ extern "C" {
|
||||
#define GDMA_LL_AHB_NUM_GROUPS 1 // Number of AHB GDMA groups
|
||||
#define GDMA_LL_AHB_PAIRS_PER_GROUP 5 // Number of GDMA pairs in each AHB group
|
||||
|
||||
#define GDMA_LL_AHB_DESC_ALIGNMENT 4
|
||||
|
||||
///////////////////////////////////// Common /////////////////////////////////////////
|
||||
|
||||
/**
|
||||
|
@ -7,25 +7,6 @@
|
||||
#include "hal/hal_utils.h"
|
||||
#include "hal/assert.h"
|
||||
|
||||
/**
|
||||
* @brief helper function, calculate the Greatest Common Divisor
|
||||
* @note gcd(a, b) = gcd(b, a % b)
|
||||
* @param a bigger value
|
||||
* @param b smaller value
|
||||
* @return result of gcd(a, b)
|
||||
*/
|
||||
__attribute__((always_inline))
|
||||
static inline uint32_t _gcd(uint32_t a, uint32_t b)
|
||||
{
|
||||
uint32_t c = a % b;
|
||||
while (c != 0) {
|
||||
a = b;
|
||||
b = c;
|
||||
c = a % b;
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
__attribute__((always_inline))
|
||||
static inline uint32_t _sub_abs(uint32_t a, uint32_t b)
|
||||
{
|
||||
|
@ -102,6 +102,59 @@ static inline uint8_t hal_utils_bitwise_reverse8(uint8_t n)
|
||||
return n;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief helper function, calculate the Greatest Common Divisor
|
||||
* @note gcd(a, b) = gcd(b, a % b)
|
||||
* @param a bigger value
|
||||
* @param b smaller value
|
||||
* @return result of gcd(a, b)
|
||||
*/
|
||||
__attribute__((always_inline))
|
||||
static inline uint32_t _gcd(uint32_t a, uint32_t b)
|
||||
{
|
||||
uint32_t c = a % b;
|
||||
while (c != 0) {
|
||||
a = b;
|
||||
b = c;
|
||||
c = a % b;
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the least common multiple of two integer
|
||||
*
|
||||
* @param[in] Integer A
|
||||
* @param[in] Integer B
|
||||
*
|
||||
* @return LCM of A and B
|
||||
*/
|
||||
__attribute__((always_inline))
|
||||
static inline uint32_t _lcm(uint32_t a, uint32_t b)
|
||||
{
|
||||
a = a == 0 ? 1 : a;
|
||||
b = b == 0 ? 1 : b;
|
||||
return (a * b / _gcd(a, b));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the least common multiple of three integer
|
||||
*
|
||||
* @param[in] Integer A
|
||||
* @param[in] Integer B
|
||||
* @param[in] Integer C
|
||||
*
|
||||
* @return LCM of A, B and C
|
||||
*/
|
||||
__attribute__((always_inline))
|
||||
static inline uint32_t _lcm_3(uint32_t a, uint32_t b, uint32_t c)
|
||||
{
|
||||
a = a == 0 ? 1 : a;
|
||||
b = b == 0 ? 1 : b;
|
||||
c = c == 0 ? 1 : c;
|
||||
return _lcm(a, _lcm(b, c));
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
|
||||
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
@ -328,7 +328,11 @@ esp_err_t sdmmc_send_cmd_send_scr(sdmmc_card_t* card, sdmmc_scr_t *out_scr)
|
||||
esp_err_t err = ESP_FAIL;
|
||||
uint32_t *buf = NULL;
|
||||
size_t actual_size = 0;
|
||||
err = esp_dma_malloc(datalen, 0, (void *)&buf, &actual_size);
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_DMA,
|
||||
.custom_alignment = 4,
|
||||
};
|
||||
err = esp_dma_capable_malloc(datalen, &dma_mem_info, (void *)&buf, &actual_size);
|
||||
if (err != ESP_OK) {
|
||||
return err;
|
||||
}
|
||||
@ -401,7 +405,11 @@ esp_err_t sdmmc_write_sectors(sdmmc_card_t* card, const void* src,
|
||||
|
||||
esp_err_t err = ESP_OK;
|
||||
size_t block_size = card->csd.sector_size;
|
||||
if (esp_dma_is_buffer_aligned(src, block_size * block_count, ESP_DMA_BUF_LOCATION_INTERNAL)) {
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_DMA,
|
||||
.custom_alignment = 4,
|
||||
};
|
||||
if (esp_dma_is_buffer_alignment_satisfied(src, block_size * block_count, &dma_mem_info)) {
|
||||
err = sdmmc_write_sectors_dma(card, src, start_block, block_count, block_size * block_count);
|
||||
} else {
|
||||
// SDMMC peripheral needs DMA-capable buffers. Split the write into
|
||||
@ -409,7 +417,7 @@ esp_err_t sdmmc_write_sectors(sdmmc_card_t* card, const void* src,
|
||||
// DMA-capable buffer.
|
||||
void *tmp_buf = NULL;
|
||||
size_t actual_size = 0;
|
||||
err = esp_dma_malloc(block_size, 0, &tmp_buf, &actual_size);
|
||||
err = esp_dma_capable_malloc(block_size, &dma_mem_info, &tmp_buf, &actual_size);
|
||||
if (err != ESP_OK) {
|
||||
return err;
|
||||
}
|
||||
@ -519,7 +527,11 @@ esp_err_t sdmmc_read_sectors(sdmmc_card_t* card, void* dst,
|
||||
|
||||
esp_err_t err = ESP_OK;
|
||||
size_t block_size = card->csd.sector_size;
|
||||
if (esp_dma_is_buffer_aligned(dst, block_size * block_count, ESP_DMA_BUF_LOCATION_INTERNAL)) {
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_DMA,
|
||||
.custom_alignment = 4,
|
||||
};
|
||||
if (esp_dma_is_buffer_alignment_satisfied(dst, block_size * block_count, &dma_mem_info)) {
|
||||
err = sdmmc_read_sectors_dma(card, dst, start_block, block_count, block_size * block_count);
|
||||
} else {
|
||||
// SDMMC peripheral needs DMA-capable buffers. Split the read into
|
||||
@ -527,7 +539,7 @@ esp_err_t sdmmc_read_sectors(sdmmc_card_t* card, void* dst,
|
||||
// DMA-capable buffer.
|
||||
void *tmp_buf = NULL;
|
||||
size_t actual_size = 0;
|
||||
err = esp_dma_malloc(block_size, 0, &tmp_buf, &actual_size);
|
||||
err = esp_dma_capable_malloc(block_size, &dma_mem_info, &tmp_buf, &actual_size);
|
||||
if (err != ESP_OK) {
|
||||
return err;
|
||||
}
|
||||
|
@ -340,7 +340,13 @@ esp_err_t sdmmc_allocate_aligned_buf(sdmmc_card_t* card)
|
||||
if (card->host.flags & SDMMC_HOST_FLAG_ALLOC_ALIGNED_BUF) {
|
||||
void* buf = NULL;
|
||||
size_t actual_size = 0;
|
||||
esp_err_t ret = esp_dma_malloc(SDMMC_IO_BLOCK_SIZE, 0, &buf, &actual_size);
|
||||
// esp_err_t ret = esp_dma_malloc(SDMMC_IO_BLOCK_SIZE, 0, &buf, &actual_size);
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_DMA,
|
||||
.custom_alignment = 4,
|
||||
};
|
||||
esp_err_t ret = esp_dma_capable_malloc(SDMMC_IO_BLOCK_SIZE, &dma_mem_info, &buf, &actual_size);
|
||||
|
||||
if (ret != ESP_OK) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -277,7 +277,11 @@ esp_err_t sdmmc_io_rw_extended(sdmmc_card_t* card, int func,
|
||||
.blklen = SDMMC_IO_BLOCK_SIZE /* TODO: read max block size from CIS */
|
||||
};
|
||||
|
||||
if (unlikely(datalen > 0 && !esp_dma_is_buffer_aligned(datap, buflen, ESP_DMA_BUF_LOCATION_AUTO))) {
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_DMA,
|
||||
.custom_alignment = 4,
|
||||
};
|
||||
if (unlikely(datalen > 0 && !esp_dma_is_buffer_alignment_satisfied(datap, buflen, &dma_mem_info))) {
|
||||
if (datalen > SDMMC_IO_BLOCK_SIZE || card->host.dma_aligned_buffer == NULL) {
|
||||
// User gives unaligned buffer while `SDMMC_HOST_FLAG_ALLOC_ALIGNED_BUF` not set.
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
@ -386,7 +390,11 @@ esp_err_t sdmmc_io_write_bytes(sdmmc_card_t* card, uint32_t function,
|
||||
esp_err_t sdmmc_io_read_blocks(sdmmc_card_t* card, uint32_t function,
|
||||
uint32_t addr, void* dst, size_t size)
|
||||
{
|
||||
if (unlikely(!esp_dma_is_buffer_aligned(dst, size, ESP_DMA_BUF_LOCATION_INTERNAL))) {
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_DMA,
|
||||
.custom_alignment = 4,
|
||||
};
|
||||
if (unlikely(!esp_dma_is_buffer_alignment_satisfied(dst, size, &dma_mem_info))) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return sdmmc_io_rw_extended(card, function, addr,
|
||||
@ -397,7 +405,11 @@ esp_err_t sdmmc_io_read_blocks(sdmmc_card_t* card, uint32_t function,
|
||||
esp_err_t sdmmc_io_write_blocks(sdmmc_card_t* card, uint32_t function,
|
||||
uint32_t addr, const void* src, size_t size)
|
||||
{
|
||||
if (unlikely(!esp_dma_is_buffer_aligned(src, size, ESP_DMA_BUF_LOCATION_INTERNAL))) {
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_DMA,
|
||||
.custom_alignment = 4,
|
||||
};
|
||||
if (unlikely(!esp_dma_is_buffer_alignment_satisfied(src, size, &dma_mem_info))) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return sdmmc_io_rw_extended(card, function, addr,
|
||||
|
@ -28,7 +28,11 @@ esp_err_t sdmmc_init_mmc_read_ext_csd(sdmmc_card_t* card)
|
||||
esp_err_t err = ESP_OK;
|
||||
uint8_t* ext_csd = NULL;
|
||||
size_t actual_size = 0;
|
||||
err = esp_dma_malloc(EXT_CSD_MMC_SIZE, 0, (void *)&ext_csd, &actual_size);
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_DMA,
|
||||
.custom_alignment = 4,
|
||||
};
|
||||
err = esp_dma_capable_malloc(EXT_CSD_MMC_SIZE, &dma_mem_info, (void *)&ext_csd, &actual_size);
|
||||
if (err != ESP_OK) {
|
||||
ESP_LOGE(TAG, "%s: could not allocate ext_csd", __func__);
|
||||
return err;
|
||||
@ -255,7 +259,11 @@ esp_err_t sdmmc_init_mmc_check_ext_csd(sdmmc_card_t* card)
|
||||
/* ensure EXT_CSD buffer is available before starting any SD-card operation */
|
||||
uint8_t* ext_csd = NULL;
|
||||
size_t actual_size = 0;
|
||||
esp_err_t err = esp_dma_malloc(EXT_CSD_MMC_SIZE, 0, (void *)&ext_csd, &actual_size);
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_DMA,
|
||||
.custom_alignment = 4,
|
||||
};
|
||||
esp_err_t err = esp_dma_capable_malloc(EXT_CSD_MMC_SIZE, &dma_mem_info, (void *)&ext_csd, &actual_size);
|
||||
if (err != ESP_OK) {
|
||||
ESP_LOGE(TAG, "%s: could not allocate ext_csd", __func__);
|
||||
return err;
|
||||
|
@ -91,7 +91,11 @@ esp_err_t sdmmc_init_sd_ssr(sdmmc_card_t* card)
|
||||
*/
|
||||
uint32_t* sd_ssr = NULL;
|
||||
size_t actual_size = 0;
|
||||
err = esp_dma_calloc(1, SD_SSR_SIZE, 0, (void *)&sd_ssr, &actual_size);
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_DMA,
|
||||
.custom_alignment = 4,
|
||||
};
|
||||
err = esp_dma_capable_calloc(1, SD_SSR_SIZE, &dma_mem_info, (void *)&sd_ssr, &actual_size);
|
||||
if (err != ESP_OK) {
|
||||
ESP_LOGE(TAG, "%s: could not allocate sd_ssr", __func__);
|
||||
return err;
|
||||
@ -239,7 +243,11 @@ esp_err_t sdmmc_enable_hs_mode(sdmmc_card_t* card)
|
||||
|
||||
size_t actual_size = 0;
|
||||
sdmmc_switch_func_rsp_t *response = NULL;
|
||||
esp_err_t err = esp_dma_malloc(sizeof(*response), 0, (void *)&response, &actual_size);
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_DMA,
|
||||
.custom_alignment = 4,
|
||||
};
|
||||
esp_err_t err = esp_dma_capable_malloc(sizeof(*response), &dma_mem_info, (void *)&response, &actual_size);
|
||||
assert(actual_size == sizeof(*response));
|
||||
if (err != ESP_OK) {
|
||||
return err;
|
||||
|
@ -1040,11 +1040,18 @@ static void port_obj_free(port_t *port)
|
||||
|
||||
void *frame_list_alloc(size_t frame_list_len)
|
||||
{
|
||||
void *frame_list = heap_caps_aligned_calloc(USB_DWC_FRAME_LIST_MEM_ALIGN, frame_list_len, sizeof(uint32_t), MALLOC_CAP_DMA);
|
||||
esp_err_t ret = ESP_FAIL;
|
||||
void *frame_list = NULL;
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_DMA,
|
||||
.custom_alignment = USB_DWC_FRAME_LIST_MEM_ALIGN,
|
||||
};
|
||||
ret = esp_dma_capable_calloc(frame_list_len, sizeof(uint32_t), &dma_mem_info, &frame_list, NULL);
|
||||
assert(ret != ESP_ERR_INVALID_ARG);
|
||||
|
||||
// Both Frame List start address and size should be already cache aligned so this is only a sanity check
|
||||
if (frame_list) {
|
||||
if (!esp_dma_is_buffer_aligned(frame_list, frame_list_len * sizeof(uint32_t), ESP_DMA_BUF_LOCATION_AUTO)) {
|
||||
if (!esp_dma_is_buffer_alignment_satisfied(frame_list, frame_list_len * sizeof(uint32_t), &dma_mem_info)) {
|
||||
// This should never happen
|
||||
heap_caps_free(frame_list);
|
||||
frame_list = NULL;
|
||||
@ -1065,10 +1072,17 @@ void *transfer_descriptor_list_alloc(size_t list_len, size_t *list_len_bytes_out
|
||||
*list_len_bytes_out = list_len * sizeof(usb_dwc_ll_dma_qtd_t);
|
||||
#endif // SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
|
||||
|
||||
void *qtd_list = heap_caps_aligned_calloc(USB_DWC_QTD_LIST_MEM_ALIGN, *list_len_bytes_out, 1, MALLOC_CAP_DMA);
|
||||
esp_err_t ret = ESP_FAIL;
|
||||
void *qtd_list = NULL;
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.heap_caps = MALLOC_CAP_DMA,
|
||||
.custom_alignment = USB_DWC_QTD_LIST_MEM_ALIGN,
|
||||
};
|
||||
ret = esp_dma_capable_calloc(*list_len_bytes_out, 1, &dma_mem_info, &qtd_list, NULL);
|
||||
assert(ret != ESP_ERR_INVALID_ARG);
|
||||
|
||||
if (qtd_list) {
|
||||
if (!esp_dma_is_buffer_aligned(qtd_list, *list_len_bytes_out * sizeof(usb_dwc_ll_dma_qtd_t), ESP_DMA_BUF_LOCATION_AUTO)) {
|
||||
if (!esp_dma_is_buffer_alignment_satisfied(qtd_list, *list_len_bytes_out * sizeof(usb_dwc_ll_dma_qtd_t), &dma_mem_info)) {
|
||||
// This should never happen
|
||||
heap_caps_free(qtd_list);
|
||||
qtd_list = NULL;
|
||||
|
@ -266,8 +266,14 @@ urb_t *test_hcd_alloc_urb(int num_isoc_packets, size_t data_buffer_size)
|
||||
urb_t *urb = heap_caps_calloc(1, sizeof(urb_t) + (sizeof(usb_isoc_packet_desc_t) * num_isoc_packets), MALLOC_CAP_DEFAULT);
|
||||
void *data_buffer;
|
||||
size_t real_size;
|
||||
esp_dma_malloc(data_buffer_size, 0, &data_buffer, &real_size);
|
||||
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.dma_type = ESP_DMA_OTHERS,
|
||||
.mem_flags = {
|
||||
.dir = ESP_DMA_MEM_DIR_DONT_CARE,
|
||||
},
|
||||
.custom_alignment = 4,
|
||||
};
|
||||
esp_dma_capable_malloc(data_buffer_size, &dma_mem_info, &data_buffer, &real_size);
|
||||
TEST_ASSERT_NOT_NULL_MESSAGE(urb, "Failed to allocate URB");
|
||||
TEST_ASSERT_NOT_NULL_MESSAGE(data_buffer, "Failed to allocate transfer buffer");
|
||||
|
||||
|
@ -14,7 +14,14 @@ urb_t *urb_alloc(size_t data_buffer_size, int num_isoc_packets)
|
||||
urb_t *urb = heap_caps_calloc(1, sizeof(urb_t) + (sizeof(usb_isoc_packet_desc_t) * num_isoc_packets), MALLOC_CAP_DEFAULT);
|
||||
void *data_buffer;
|
||||
size_t real_size;
|
||||
esp_dma_malloc(data_buffer_size, 0, &data_buffer, &real_size);
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.dma_type = ESP_DMA_OTHERS,
|
||||
.mem_flags = {
|
||||
.dir = ESP_DMA_MEM_DIR_DONT_CARE,
|
||||
},
|
||||
.custom_alignment = 4,
|
||||
};
|
||||
esp_dma_capable_malloc(data_buffer_size, &dma_mem_info, &data_buffer, &real_size);
|
||||
if (urb == NULL || data_buffer == NULL) {
|
||||
goto err;
|
||||
}
|
||||
|
@ -106,10 +106,10 @@ Memory Allocation Helper
|
||||
|
||||
cache memory synchronization is usually considered when DMA is involved. ESP-IDF provides an API to do memory allocation that can meet the alignment requirement from both the cache and the DMA.
|
||||
|
||||
- :cpp:func:`esp_dma_malloc`, this API allocates a chunk of memory that meets the alignment requirement from both the cache and the DMA.
|
||||
- :cpp:func:`esp_dma_calloc`, this API allocates a chunk of memory that meets the alignment requirement from both the cache and the DMA. The initialized value in the memory is set to zero.
|
||||
- :cpp:func:`esp_dma_capable_malloc`, this API allocates a chunk of memory that meets the alignment requirement from both the cache and the DMA.
|
||||
- :cpp:func:`esp_dma_capable_calloc`, this API allocates a chunk of memory that meets the alignment requirement from both the cache and the DMA. The initialized value in the memory is set to zero.
|
||||
|
||||
You can also use :c:macro:`ESP_DMA_MALLOC_FLAG_PSRAM` to allocate from the PSRAM.
|
||||
You can also use :cpp:member:`esp_dma_mem_info_t::on_psram` to allocate from the PSRAM.
|
||||
|
||||
|
||||
Warning for Address Alignment Requirement
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD
|
||||
* SPDX-FileCopyrightText: 2021-2024 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: CC0-1.0
|
||||
*/
|
||||
@ -447,12 +447,16 @@ void app_main(void)
|
||||
// it's recommended to choose the size of the draw buffer(s) to be at least 1/10 screen sized
|
||||
lv_color_t *buf1 = NULL;
|
||||
lv_color_t *buf2 = NULL;
|
||||
uint32_t malloc_flags = 0;
|
||||
esp_dma_mem_info_t dma_mem_info = {
|
||||
.dma_alignment = 4,
|
||||
#if CONFIG_EXAMPLE_LCD_I80_COLOR_IN_PSRAM
|
||||
malloc_flags |= ESP_DMA_MALLOC_FLAG_PSRAM;
|
||||
.heap_caps = MALLOC_CAP_SPIRAM,
|
||||
#else
|
||||
.heap_caps = MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL,
|
||||
#endif // CONFIG_EXAMPLE_LCD_I80_COLOR_IN_PSRAM
|
||||
ESP_ERROR_CHECK(esp_dma_malloc(EXAMPLE_LCD_H_RES * 100 * sizeof(lv_color_t), malloc_flags, (void *)&buf1, NULL));
|
||||
ESP_ERROR_CHECK(esp_dma_malloc(EXAMPLE_LCD_H_RES * 100 * sizeof(lv_color_t), malloc_flags, (void *)&buf2, NULL));
|
||||
};
|
||||
ESP_ERROR_CHECK(esp_dma_capable_malloc(EXAMPLE_LCD_H_RES * 100 * sizeof(lv_color_t), &dma_mem_info, (void *)&buf1, NULL));
|
||||
ESP_ERROR_CHECK(esp_dma_capable_malloc(EXAMPLE_LCD_H_RES * 100 * sizeof(lv_color_t), &dma_mem_info, (void *)&buf2, NULL));
|
||||
assert(buf1);
|
||||
assert(buf2);
|
||||
ESP_LOGI(TAG, "buf1@%p, buf2@%p", buf1, buf2);
|
||||
|
Loading…
x
Reference in New Issue
Block a user