mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
change(gdma): improve the test cases to be target agnostic
This commit is contained in:
parent
14315bb751
commit
4fb58d56b4
@ -8,12 +8,20 @@ if(CONFIG_SOC_GDMA_SUPPORTED)
|
||||
list(APPEND srcs "test_gdma.c")
|
||||
endif()
|
||||
|
||||
if(CONFIG_SOC_ETM_SUPPORTED AND CONFIG_SOC_GDMA_SUPPORT_ETM)
|
||||
list(APPEND srcs "test_gdma_etm.c")
|
||||
endif()
|
||||
|
||||
if(CONFIG_SOC_DW_GDMA_SUPPORTED)
|
||||
list(APPEND srcs "test_dw_gdma.c")
|
||||
endif()
|
||||
|
||||
if(CONFIG_SOC_GDMA_SUPPORT_CRC)
|
||||
list(APPEND srcs "test_gdma_crc.c")
|
||||
endif()
|
||||
|
||||
# In order for the cases defined by `TEST_CASE` to be linked into the final elf,
|
||||
# the component can be registered as WHOLE_ARCHIVE
|
||||
idf_component_register(SRCS ${srcs}
|
||||
PRIV_REQUIRES unity esp_mm
|
||||
PRIV_REQUIRES unity esp_mm esp_driver_gpio
|
||||
WHOLE_ARCHIVE)
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "esp_async_memcpy.h"
|
||||
#include "soc/soc_caps.h"
|
||||
#include "hal/dma_types.h"
|
||||
#include "esp_dma_utils.h"
|
||||
|
||||
#define IDF_LOG_PERFORMANCE(item, value_fmt, value, ...) \
|
||||
printf("[Performance][%s]: " value_fmt "\n", item, value, ##__VA_ARGS__)
|
||||
@ -26,10 +27,7 @@
|
||||
#define ALIGN_DOWN(size, align) ((size) & ~((align) - 1))
|
||||
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
#define TEST_MEMCPY_DST_BASE_ALIGN 64
|
||||
#define TEST_MEMCPY_BUFFER_SIZE_MUST_ALIGN_CACHE 1
|
||||
#else
|
||||
#define TEST_MEMCPY_DST_BASE_ALIGN 4
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
@ -56,23 +54,23 @@ static void async_memcpy_setup_testbench(memcpy_testbench_context_t *test_contex
|
||||
uint8_t *dst_buf = NULL;
|
||||
uint8_t *from_addr = NULL;
|
||||
uint8_t *to_addr = NULL;
|
||||
#if CONFIG_SPIRAM && SOC_AHB_GDMA_SUPPORT_PSRAM
|
||||
|
||||
esp_dma_mem_info_t mem_info = {
|
||||
.dma_alignment_bytes = test_context->align,
|
||||
};
|
||||
if (test_context->src_in_psram) {
|
||||
src_buf = heap_caps_aligned_alloc(test_context->align, buffer_size, MALLOC_CAP_SPIRAM);
|
||||
mem_info.extra_heap_caps = MALLOC_CAP_SPIRAM;
|
||||
} else {
|
||||
src_buf = heap_caps_aligned_alloc(test_context->align, buffer_size, MALLOC_CAP_8BIT | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
|
||||
mem_info.extra_heap_caps = 0;
|
||||
}
|
||||
TEST_ESP_OK(esp_dma_capable_calloc(1, buffer_size, &mem_info, (void **)&src_buf, NULL));
|
||||
if (test_context->dst_in_psram) {
|
||||
dst_buf = heap_caps_aligned_alloc(test_context->align, buffer_size, MALLOC_CAP_SPIRAM);
|
||||
mem_info.extra_heap_caps = MALLOC_CAP_SPIRAM;
|
||||
} else {
|
||||
dst_buf = heap_caps_aligned_alloc(test_context->align, buffer_size, MALLOC_CAP_8BIT | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
|
||||
mem_info.extra_heap_caps = 0;
|
||||
}
|
||||
#else
|
||||
src_buf = heap_caps_aligned_alloc(test_context->align, buffer_size, MALLOC_CAP_8BIT | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
|
||||
dst_buf = heap_caps_aligned_alloc(test_context->align, buffer_size, MALLOC_CAP_8BIT | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
|
||||
#endif
|
||||
TEST_ASSERT_NOT_NULL_MESSAGE(src_buf, "allocate source buffer failed");
|
||||
TEST_ASSERT_NOT_NULL_MESSAGE(dst_buf, "allocate destination buffer failed");
|
||||
TEST_ESP_OK(esp_dma_capable_calloc(1, buffer_size, &mem_info, (void **)&dst_buf, NULL));
|
||||
|
||||
// adding extra offset
|
||||
from_addr = src_buf + test_context->offset;
|
||||
to_addr = dst_buf;
|
||||
@ -113,8 +111,13 @@ TEST_CASE("memory copy the same buffer with different content", "[async mcp]")
|
||||
async_memcpy_config_t config = ASYNC_MEMCPY_DEFAULT_CONFIG();
|
||||
async_memcpy_handle_t driver = NULL;
|
||||
TEST_ESP_OK(esp_async_memcpy_install(&config, &driver));
|
||||
uint8_t *sbuf = heap_caps_aligned_alloc(TEST_MEMCPY_DST_BASE_ALIGN, 256, MALLOC_CAP_8BIT | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
|
||||
uint8_t *dbuf = heap_caps_aligned_alloc(TEST_MEMCPY_DST_BASE_ALIGN, 256, MALLOC_CAP_8BIT | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
|
||||
uint8_t *sbuf = NULL;
|
||||
uint8_t *dbuf = NULL;
|
||||
esp_dma_mem_info_t mem_info = {
|
||||
.dma_alignment_bytes = 4,
|
||||
};
|
||||
TEST_ESP_OK(esp_dma_capable_calloc(1, 256, &mem_info, (void **)&sbuf, NULL));
|
||||
TEST_ESP_OK(esp_dma_capable_calloc(1, 256, &mem_info, (void **)&dbuf, NULL));
|
||||
for (int j = 0; j < 20; j++) {
|
||||
TEST_ESP_OK(esp_async_memcpy(driver, dbuf, sbuf, 256, NULL, NULL));
|
||||
vTaskDelay(pdMS_TO_TICKS(10));
|
||||
@ -136,7 +139,7 @@ static void test_memory_copy_one_by_one(async_memcpy_handle_t driver)
|
||||
{
|
||||
uint32_t aligned_test_buffer_size[] = {256, 512, 1024, 2048, 4096};
|
||||
memcpy_testbench_context_t test_context = {
|
||||
.align = TEST_MEMCPY_DST_BASE_ALIGN,
|
||||
.align = 4,
|
||||
};
|
||||
|
||||
for (int i = 0; i < sizeof(aligned_test_buffer_size) / sizeof(aligned_test_buffer_size[0]); i++) {
|
||||
@ -216,9 +219,13 @@ TEST_CASE("memory copy done callback", "[async mcp]")
|
||||
async_memcpy_handle_t driver = NULL;
|
||||
TEST_ESP_OK(esp_async_memcpy_install(&config, &driver));
|
||||
|
||||
uint8_t *src_buf = heap_caps_aligned_alloc(TEST_MEMCPY_DST_BASE_ALIGN, 256, MALLOC_CAP_8BIT | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
|
||||
// destination address should aligned to data cache line
|
||||
uint8_t *dst_buf = heap_caps_aligned_alloc(TEST_MEMCPY_DST_BASE_ALIGN, 256, MALLOC_CAP_8BIT | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
|
||||
uint8_t *src_buf = NULL;
|
||||
uint8_t *dst_buf = NULL;
|
||||
esp_dma_mem_info_t mem_info = {
|
||||
.dma_alignment_bytes = 4,
|
||||
};
|
||||
TEST_ESP_OK(esp_dma_capable_calloc(1, 256, &mem_info, (void **)&src_buf, NULL));
|
||||
TEST_ESP_OK(esp_dma_capable_calloc(1, 256, &mem_info, (void **)&dst_buf, NULL));
|
||||
|
||||
SemaphoreHandle_t sem = xSemaphoreCreateBinary();
|
||||
TEST_ESP_OK(esp_async_memcpy(driver, dst_buf, src_buf, 256, test_async_memcpy_cb_v1, sem));
|
||||
@ -235,38 +242,39 @@ TEST_CASE("memory copy by DMA on the fly", "[async mcp]")
|
||||
async_memcpy_handle_t driver = NULL;
|
||||
TEST_ESP_OK(esp_async_memcpy_install(&config, &driver));
|
||||
|
||||
uint32_t test_buffer_len[] = {512, 1024, 2048, 4096, 5011};
|
||||
uint32_t aligned_test_buffer_size[] = {512, 1024, 2048, 4096, 4608};
|
||||
memcpy_testbench_context_t test_context[5] = {
|
||||
[0 ... 4] = {
|
||||
.align = TEST_MEMCPY_DST_BASE_ALIGN,
|
||||
.align = 4,
|
||||
}
|
||||
};
|
||||
|
||||
// Aligned case
|
||||
for (int i = 0; i < sizeof(test_buffer_len) / sizeof(test_buffer_len[0]); i++) {
|
||||
for (int i = 0; i < sizeof(aligned_test_buffer_size) / sizeof(aligned_test_buffer_size[0]); i++) {
|
||||
test_context[i].seed = i;
|
||||
test_context[i].buffer_size = test_buffer_len[i];
|
||||
test_context[i].buffer_size = aligned_test_buffer_size[i];
|
||||
async_memcpy_setup_testbench(&test_context[i]);
|
||||
}
|
||||
for (int i = 0; i < sizeof(test_buffer_len) / sizeof(test_buffer_len[0]); i++) {
|
||||
for (int i = 0; i < sizeof(aligned_test_buffer_size) / sizeof(aligned_test_buffer_size[0]); i++) {
|
||||
TEST_ESP_OK(esp_async_memcpy(driver, test_context[i].to_addr, test_context[i].from_addr, test_context[i].copy_size, NULL, NULL));
|
||||
}
|
||||
for (int i = 0; i < sizeof(test_buffer_len) / sizeof(test_buffer_len[0]); i++) {
|
||||
for (int i = 0; i < sizeof(aligned_test_buffer_size) / sizeof(aligned_test_buffer_size[0]); i++) {
|
||||
async_memcpy_verify_and_clear_testbench(i, test_context[i].copy_size, test_context[i].src_buf, test_context[i].dst_buf, test_context[i].from_addr, test_context[i].to_addr);
|
||||
}
|
||||
|
||||
#if !TEST_MEMCPY_BUFFER_SIZE_MUST_ALIGN_CACHE
|
||||
uint32_t unaligned_test_buffer_size[] = {511, 1023, 2047, 4095, 5011};
|
||||
// Non-aligned case
|
||||
for (int i = 0; i < sizeof(test_buffer_len) / sizeof(test_buffer_len[0]); i++) {
|
||||
for (int i = 0; i < sizeof(unaligned_test_buffer_size) / sizeof(unaligned_test_buffer_size[0]); i++) {
|
||||
test_context[i].seed = i;
|
||||
test_context[i].buffer_size = test_buffer_len[i];
|
||||
test_context[i].buffer_size = unaligned_test_buffer_size[i];
|
||||
test_context[i].offset = 3;
|
||||
async_memcpy_setup_testbench(&test_context[i]);
|
||||
}
|
||||
for (int i = 0; i < sizeof(test_buffer_len) / sizeof(test_buffer_len[0]); i++) {
|
||||
for (int i = 0; i < sizeof(unaligned_test_buffer_size) / sizeof(unaligned_test_buffer_size[0]); i++) {
|
||||
TEST_ESP_OK(esp_async_memcpy(driver, test_context[i].to_addr, test_context[i].from_addr, test_context[i].copy_size, NULL, NULL));
|
||||
}
|
||||
for (int i = 0; i < sizeof(test_buffer_len) / sizeof(test_buffer_len[0]); i++) {
|
||||
for (int i = 0; i < sizeof(unaligned_test_buffer_size) / sizeof(unaligned_test_buffer_size[0]); i++) {
|
||||
async_memcpy_verify_and_clear_testbench(i, test_context[i].copy_size, test_context[i].src_buf, test_context[i].dst_buf, test_context[i].from_addr, test_context[i].to_addr);
|
||||
}
|
||||
#endif
|
||||
@ -328,7 +336,7 @@ static void memcpy_performance_test(uint32_t buffer_size)
|
||||
IDF_LOG_PERFORMANCE("CPU_COPY", "%.2f MB/s, dir: SRAM->SRAM, size: %zu Bytes", throughput, test_context.buffer_size);
|
||||
async_memcpy_verify_and_clear_testbench(test_context.seed, test_context.copy_size, test_context.src_buf, test_context.dst_buf, test_context.from_addr, test_context.to_addr);
|
||||
|
||||
#if CONFIG_SPIRAM && SOC_AHB_GDMA_SUPPORT_PSRAM
|
||||
#if SOC_AHB_GDMA_SUPPORT_PSRAM
|
||||
// 2. PSRAM->PSRAM
|
||||
test_context.src_in_psram = true;
|
||||
test_context.dst_in_psram = true;
|
||||
|
@ -56,17 +56,20 @@ TEST_CASE("DW_GDMA M2M Test: Contiguous Mode", "[DW_GDMA]")
|
||||
TEST_ASSERT_NOT_NULL(done_sem);
|
||||
|
||||
printf("prepare the source and destination buffers\r\n");
|
||||
uint8_t *src_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
uint8_t *dst_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
size_t sram_alignment = 0;
|
||||
TEST_ESP_OK(esp_cache_get_alignment(0, &sram_alignment));
|
||||
size_t alignment = MAX(sram_alignment, 8);
|
||||
uint8_t *src_buf = heap_caps_aligned_calloc(alignment, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
uint8_t *dst_buf = heap_caps_aligned_calloc(alignment, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
TEST_ASSERT_NOT_NULL(src_buf);
|
||||
TEST_ASSERT_NOT_NULL(dst_buf);
|
||||
for (int i = 0; i < 256; i++) {
|
||||
src_buf[i] = i;
|
||||
}
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// do write-back for the source data because it's in the cache
|
||||
TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
|
||||
#endif
|
||||
if (sram_alignment) {
|
||||
// do write-back for the source data because it's in the cache
|
||||
TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
|
||||
}
|
||||
|
||||
printf("allocate a channel for memory copy\r\n");
|
||||
dw_gdma_channel_static_config_t static_config = {
|
||||
@ -117,10 +120,10 @@ TEST_CASE("DW_GDMA M2M Test: Contiguous Mode", "[DW_GDMA]")
|
||||
TEST_ASSERT_EQUAL(pdFALSE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(100)));
|
||||
|
||||
printf("check the memory copy result\r\n");
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
|
||||
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
|
||||
#endif
|
||||
if (sram_alignment) {
|
||||
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
|
||||
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
|
||||
}
|
||||
for (int i = 0; i < 256; i++) {
|
||||
TEST_ASSERT_EQUAL_UINT8(i, dst_buf[i]);
|
||||
}
|
||||
@ -145,17 +148,20 @@ TEST_CASE("DW_GDMA M2M Test: Reload Mode", "[DW_GDMA]")
|
||||
TEST_ASSERT_NOT_NULL(done_sem);
|
||||
|
||||
printf("prepare the source and destination buffers\r\n");
|
||||
uint8_t *src_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
uint8_t *dst_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
size_t sram_alignment = 0;
|
||||
TEST_ESP_OK(esp_cache_get_alignment(0, &sram_alignment));
|
||||
size_t alignment = MAX(sram_alignment, 8);
|
||||
uint8_t *src_buf = heap_caps_aligned_calloc(alignment, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
uint8_t *dst_buf = heap_caps_aligned_calloc(alignment, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
TEST_ASSERT_NOT_NULL(src_buf);
|
||||
TEST_ASSERT_NOT_NULL(dst_buf);
|
||||
for (int i = 0; i < 256; i++) {
|
||||
src_buf[i] = i;
|
||||
}
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// do write-back for the source data because it's in the cache
|
||||
TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
|
||||
#endif
|
||||
if (sram_alignment) {
|
||||
// do write-back for the source data because it's in the cache
|
||||
TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
|
||||
}
|
||||
|
||||
printf("allocate a channel for memory copy\r\n");
|
||||
dw_gdma_channel_static_config_t static_config = {
|
||||
@ -212,10 +218,10 @@ TEST_CASE("DW_GDMA M2M Test: Reload Mode", "[DW_GDMA]")
|
||||
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(100)));
|
||||
|
||||
printf("check the memory copy result\r\n");
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
|
||||
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
|
||||
#endif
|
||||
if (sram_alignment) {
|
||||
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
|
||||
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
|
||||
}
|
||||
for (int i = 0; i < 256; i++) {
|
||||
TEST_ASSERT_EQUAL_UINT8(i, dst_buf[i]);
|
||||
}
|
||||
@ -264,17 +270,20 @@ TEST_CASE("DW_GDMA M2M Test: Shadow Mode", "[DW_GDMA]")
|
||||
TEST_ASSERT_NOT_NULL(done_sem);
|
||||
|
||||
printf("prepare the source and destination buffers\r\n");
|
||||
uint8_t *src_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
uint8_t *dst_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
size_t sram_alignment = 0;
|
||||
TEST_ESP_OK(esp_cache_get_alignment(0, &sram_alignment));
|
||||
size_t alignment = MAX(sram_alignment, 8);
|
||||
uint8_t *src_buf = heap_caps_aligned_calloc(alignment, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
uint8_t *dst_buf = heap_caps_aligned_calloc(alignment, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
TEST_ASSERT_NOT_NULL(src_buf);
|
||||
TEST_ASSERT_NOT_NULL(dst_buf);
|
||||
for (int i = 0; i < 256; i++) {
|
||||
src_buf[i] = i;
|
||||
}
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// do write-back for the source data because it's in the cache
|
||||
TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
|
||||
#endif
|
||||
if (sram_alignment) {
|
||||
// do write-back for the source data because it's in the cache
|
||||
TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
|
||||
}
|
||||
|
||||
printf("allocate a channel for memory copy\r\n");
|
||||
dw_gdma_channel_static_config_t static_config = {
|
||||
@ -334,10 +343,10 @@ TEST_CASE("DW_GDMA M2M Test: Shadow Mode", "[DW_GDMA]")
|
||||
TEST_ASSERT_EQUAL_UINT8(1, user_data.count);
|
||||
|
||||
printf("check the memory copy result\r\n");
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
|
||||
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
|
||||
#endif
|
||||
if (sram_alignment) {
|
||||
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
|
||||
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
|
||||
}
|
||||
for (int i = 0; i < 256; i++) {
|
||||
TEST_ASSERT_EQUAL_UINT8(i, dst_buf[i]);
|
||||
}
|
||||
@ -387,17 +396,20 @@ TEST_CASE("DW_GDMA M2M Test: Link-List Mode", "[DW_GDMA]")
|
||||
TEST_ASSERT_NOT_NULL(done_sem);
|
||||
|
||||
printf("prepare the source and destination buffers\r\n");
|
||||
uint8_t *src_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
uint8_t *dst_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
size_t sram_alignment = 0;
|
||||
TEST_ESP_OK(esp_cache_get_alignment(0, &sram_alignment));
|
||||
size_t alignment = MAX(sram_alignment, 8);
|
||||
uint8_t *src_buf = heap_caps_aligned_calloc(alignment, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
uint8_t *dst_buf = heap_caps_aligned_calloc(alignment, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
TEST_ASSERT_NOT_NULL(src_buf);
|
||||
TEST_ASSERT_NOT_NULL(dst_buf);
|
||||
for (int i = 0; i < 256; i++) {
|
||||
src_buf[i] = i;
|
||||
}
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// do write-back for the source data because it's in the cache
|
||||
TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
|
||||
#endif
|
||||
if (sram_alignment) {
|
||||
// do write-back for the source data because it's in the cache
|
||||
TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
|
||||
}
|
||||
|
||||
printf("allocate a channel for memory copy\r\n");
|
||||
dw_gdma_channel_static_config_t static_config = {
|
||||
@ -472,10 +484,10 @@ TEST_CASE("DW_GDMA M2M Test: Link-List Mode", "[DW_GDMA]")
|
||||
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(1000)));
|
||||
|
||||
printf("check the memory copy result\r\n");
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
|
||||
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
|
||||
#endif
|
||||
if (sram_alignment) {
|
||||
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
|
||||
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
|
||||
}
|
||||
for (int i = 0; i < 256; i++) {
|
||||
TEST_ASSERT_EQUAL_UINT8(i, dst_buf[i]);
|
||||
}
|
||||
@ -504,10 +516,10 @@ TEST_CASE("DW_GDMA M2M Test: Link-List Mode", "[DW_GDMA]")
|
||||
TEST_ASSERT_EQUAL_UINT8(1, user_data.count);
|
||||
|
||||
printf("check the memory copy result\r\n");
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
|
||||
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
|
||||
#endif
|
||||
if (sram_alignment) {
|
||||
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
|
||||
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
|
||||
}
|
||||
for (int i = 0; i < 256; i++) {
|
||||
TEST_ASSERT_EQUAL_UINT8(i, dst_buf[i]);
|
||||
}
|
||||
@ -536,10 +548,10 @@ TEST_CASE("DW_GDMA M2M Test: memory set with fixed address", "[DW_GDMA]")
|
||||
src_buf[i] = 0;
|
||||
}
|
||||
src_buf[0] = 66;
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// do write-back for the source data because it's in the cache
|
||||
TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
|
||||
#endif
|
||||
if (ext_mem_alignment) {
|
||||
// do write-back for the source data because it's in the cache
|
||||
TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
|
||||
}
|
||||
|
||||
printf("allocate a channel for memory set\r\n");
|
||||
dw_gdma_channel_static_config_t static_config = {
|
||||
@ -581,10 +593,10 @@ TEST_CASE("DW_GDMA M2M Test: memory set with fixed address", "[DW_GDMA]")
|
||||
vTaskDelay(pdMS_TO_TICKS(100));
|
||||
|
||||
printf("check the memory set result\r\n");
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
|
||||
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
|
||||
#endif
|
||||
if (int_mem_alignment) {
|
||||
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
|
||||
TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
|
||||
}
|
||||
for (int i = 0; i < 256; i++) {
|
||||
TEST_ASSERT_EQUAL_UINT8(66, dst_buf[i]);
|
||||
}
|
||||
|
@ -1,10 +1,11 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD
|
||||
* SPDX-FileCopyrightText: 2021-2024 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
#include <sys/param.h>
|
||||
#include "sdkconfig.h"
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
@ -16,6 +17,7 @@
|
||||
#include "soc/soc_caps.h"
|
||||
#include "hal/gdma_ll.h"
|
||||
#include "hal/cache_ll.h"
|
||||
#include "hal/cache_hal.h"
|
||||
#include "esp_cache.h"
|
||||
|
||||
TEST_CASE("GDMA channel allocation", "[GDMA]")
|
||||
@ -174,80 +176,71 @@ static void test_gdma_m2m_mode(gdma_channel_handle_t tx_chan, gdma_channel_handl
|
||||
TEST_ESP_OK(gdma_connect(tx_chan, m2m_trigger));
|
||||
TEST_ESP_OK(gdma_connect(rx_chan, m2m_trigger));
|
||||
|
||||
uint8_t *src_buf = heap_caps_aligned_alloc(64, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
uint8_t *dst_buf = heap_caps_aligned_alloc(64, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
// allocate the source and destination buffer from SRAM
|
||||
// |--------------------------------------------------|
|
||||
// | 128 bytes DMA descriptor | 128 bytes data buffer |
|
||||
// |--------------------------------------------------|
|
||||
size_t sram_alignment = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
|
||||
size_t alignment = MAX(sram_alignment, 8);
|
||||
uint8_t *src_buf = heap_caps_aligned_calloc(alignment, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
uint8_t *dst_buf = heap_caps_aligned_calloc(alignment, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
TEST_ASSERT_NOT_NULL(src_buf);
|
||||
TEST_ASSERT_NOT_NULL(dst_buf);
|
||||
memset(src_buf, 0, 256);
|
||||
memset(dst_buf, 0, 256);
|
||||
|
||||
dma_descriptor_align8_t *tx_descs = (dma_descriptor_align8_t *) src_buf;
|
||||
dma_descriptor_align8_t *rx_descs = (dma_descriptor_align8_t *) dst_buf;
|
||||
uint8_t *src_data = src_buf + 64;
|
||||
uint8_t *dst_data = dst_buf + 64;
|
||||
uint8_t *src_data = src_buf + 128;
|
||||
uint8_t *dst_data = dst_buf + 128;
|
||||
|
||||
// prepare the source data
|
||||
for (int i = 0; i < 128; i++) {
|
||||
src_data[i] = i;
|
||||
}
|
||||
if (sram_alignment) {
|
||||
// do write-back for the source data because it's in the cache
|
||||
TEST_ESP_OK(esp_cache_msync((void *)src_data, 128, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
|
||||
}
|
||||
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// CPU and DMA both can write to the DMA descriptor, so if there is a cache, multiple descriptors may reside in the same cache line
|
||||
// causing data inconsistency. To avoid this, we want to access the descriptor memory without the cache.
|
||||
dma_descriptor_align8_t *tx_descs_noncache = (dma_descriptor_align8_t *)(CACHE_LL_L2MEM_NON_CACHE_ADDR(tx_descs));
|
||||
dma_descriptor_align8_t *rx_descs_noncache = (dma_descriptor_align8_t *)(CACHE_LL_L2MEM_NON_CACHE_ADDR(rx_descs));
|
||||
|
||||
tx_descs_noncache[0].buffer = src_data;
|
||||
tx_descs_noncache[0].dw0.size = 64;
|
||||
tx_descs_noncache[0].dw0.length = 64;
|
||||
tx_descs_noncache[0].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
tx_descs_noncache[0].dw0.suc_eof = 0;
|
||||
tx_descs_noncache[0].next = &tx_descs[1]; // Note, the DMA doesn't recognize a non-cacheable address, here must be the cached address
|
||||
|
||||
tx_descs_noncache[1].buffer = src_data + 64;
|
||||
tx_descs_noncache[1].dw0.size = 64;
|
||||
tx_descs_noncache[1].dw0.length = 64;
|
||||
tx_descs_noncache[1].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
tx_descs_noncache[1].dw0.suc_eof = 1;
|
||||
tx_descs_noncache[1].next = NULL;
|
||||
|
||||
rx_descs_noncache->buffer = dst_data;
|
||||
rx_descs_noncache->dw0.size = 128;
|
||||
rx_descs_noncache->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
rx_descs_noncache->dw0.suc_eof = 1;
|
||||
rx_descs_noncache->next = NULL;
|
||||
#ifdef CACHE_LL_L2MEM_NON_CACHE_ADDR
|
||||
dma_descriptor_align8_t *tx_descs_nc = (dma_descriptor_align8_t *)(CACHE_LL_L2MEM_NON_CACHE_ADDR(tx_descs));
|
||||
dma_descriptor_align8_t *rx_descs_nc = (dma_descriptor_align8_t *)(CACHE_LL_L2MEM_NON_CACHE_ADDR(rx_descs));
|
||||
#else
|
||||
tx_descs->buffer = src_data;
|
||||
tx_descs->dw0.size = 128;
|
||||
tx_descs->dw0.length = 128;
|
||||
tx_descs->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
tx_descs->dw0.suc_eof = 1;
|
||||
tx_descs->next = NULL;
|
||||
|
||||
rx_descs->buffer = dst_data;
|
||||
rx_descs->dw0.size = 128;
|
||||
rx_descs->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
rx_descs->next = NULL;
|
||||
dma_descriptor_align8_t *tx_descs_nc = tx_descs;
|
||||
dma_descriptor_align8_t *rx_descs_nc = rx_descs;
|
||||
#endif
|
||||
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// do write-back for the source data because it's in the cache
|
||||
TEST_ESP_OK(esp_cache_msync((void *)src_data, 128, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
|
||||
#endif
|
||||
tx_descs_nc[0].buffer = src_data;
|
||||
tx_descs_nc[0].dw0.size = 64;
|
||||
tx_descs_nc[0].dw0.length = 64;
|
||||
tx_descs_nc[0].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
tx_descs_nc[0].dw0.suc_eof = 0;
|
||||
tx_descs_nc[0].next = &tx_descs[1]; // Note, the DMA doesn't recognize a non-cacheable address, here must be the cached address
|
||||
|
||||
tx_descs_nc[1].buffer = src_data + 64;
|
||||
tx_descs_nc[1].dw0.size = 64;
|
||||
tx_descs_nc[1].dw0.length = 64;
|
||||
tx_descs_nc[1].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
tx_descs_nc[1].dw0.suc_eof = 1;
|
||||
tx_descs_nc[1].next = NULL;
|
||||
|
||||
rx_descs_nc->buffer = dst_data;
|
||||
rx_descs_nc->dw0.size = 128;
|
||||
rx_descs_nc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
rx_descs_nc->dw0.suc_eof = 1;
|
||||
rx_descs_nc->next = NULL;
|
||||
|
||||
TEST_ESP_OK(gdma_start(rx_chan, (intptr_t)rx_descs));
|
||||
TEST_ESP_OK(gdma_start(tx_chan, (intptr_t)tx_descs));
|
||||
|
||||
xSemaphoreTake(done_sem, portMAX_DELAY);
|
||||
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
|
||||
TEST_ESP_OK(esp_cache_msync((void *)dst_data, 128, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
|
||||
#endif
|
||||
if (sram_alignment) {
|
||||
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
|
||||
TEST_ESP_OK(esp_cache_msync((void *)dst_data, 128, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
|
||||
}
|
||||
|
||||
// check the DMA descriptor write-back feature
|
||||
TEST_ASSERT_EQUAL(DMA_DESCRIPTOR_BUFFER_OWNER_CPU, tx_descs[0].dw0.owner);
|
||||
TEST_ASSERT_EQUAL(DMA_DESCRIPTOR_BUFFER_OWNER_CPU, rx_descs[0].dw0.owner);
|
||||
TEST_ASSERT_EQUAL(DMA_DESCRIPTOR_BUFFER_OWNER_CPU, tx_descs_nc[0].dw0.owner);
|
||||
TEST_ASSERT_EQUAL(DMA_DESCRIPTOR_BUFFER_OWNER_CPU, rx_descs_nc[0].dw0.owner);
|
||||
|
||||
for (int i = 0; i < 128; i++) {
|
||||
TEST_ASSERT_EQUAL(i, dst_data[i]);
|
||||
@ -257,7 +250,7 @@ static void test_gdma_m2m_mode(gdma_channel_handle_t tx_chan, gdma_channel_handl
|
||||
vSemaphoreDelete(done_sem);
|
||||
}
|
||||
|
||||
TEST_CASE("GDMA M2M Mode", "[GDMA]")
|
||||
TEST_CASE("GDMA M2M Mode", "[GDMA][M2M]")
|
||||
{
|
||||
gdma_channel_handle_t tx_chan = NULL;
|
||||
gdma_channel_handle_t rx_chan = NULL;
|
||||
@ -300,117 +293,3 @@ TEST_CASE("GDMA M2M Mode", "[GDMA]")
|
||||
TEST_ESP_OK(gdma_del_channel(rx_chan));
|
||||
#endif // SOC_AXI_GDMA_SUPPORTED
|
||||
}
|
||||
|
||||
#if SOC_GDMA_SUPPORT_CRC
|
||||
typedef struct {
|
||||
uint32_t init_value;
|
||||
uint32_t crc_bit_width;
|
||||
uint32_t poly_hex;
|
||||
bool reverse_data_mask;
|
||||
uint32_t expected_result;
|
||||
} test_crc_case_t;
|
||||
static test_crc_case_t crc_test_cases[] = {
|
||||
// CRC8, x^8+x^2+x+1
|
||||
[0] = {
|
||||
.crc_bit_width = 8,
|
||||
.init_value = 0x00,
|
||||
.poly_hex = 0x07,
|
||||
.expected_result = 0xC6,
|
||||
},
|
||||
[1] = {
|
||||
.crc_bit_width = 8,
|
||||
.init_value = 0x00,
|
||||
.poly_hex = 0x07,
|
||||
.reverse_data_mask = true, // refin = true
|
||||
.expected_result = 0xDE,
|
||||
},
|
||||
// CRC16, x^16+x^12+x^5+1
|
||||
[2] = {
|
||||
.crc_bit_width = 16,
|
||||
.init_value = 0xFFFF,
|
||||
.poly_hex = 0x1021,
|
||||
.expected_result = 0x5289,
|
||||
},
|
||||
// CRC32, x32+x26+x23+x22+x16+x12+x11+x10+x8+x7+x5+x4+x2+x+1
|
||||
[3] = {
|
||||
.crc_bit_width = 32,
|
||||
.init_value = 0xFFFFFFFF,
|
||||
.poly_hex = 0x04C11DB7,
|
||||
.expected_result = 0x63B3E283,
|
||||
}
|
||||
};
|
||||
|
||||
// CRC online: https://www.lddgo.net/en/encrypt/crc
|
||||
static void test_gdma_crc_calculation(gdma_channel_handle_t tx_chan, int test_num_crc_algorithm)
|
||||
{
|
||||
uint32_t crc_result = 0;
|
||||
const char *test_input_string = "Share::Connect::Innovate";
|
||||
size_t input_data_size = strlen(test_input_string);
|
||||
printf("Calculate CRC value for string: \"%s\"\r\n", test_input_string);
|
||||
|
||||
gdma_trigger_t m2m_trigger = GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_M2M, 0);
|
||||
// get a free DMA trigger ID
|
||||
uint32_t free_m2m_id_mask = 0;
|
||||
gdma_get_free_m2m_trig_id_mask(tx_chan, &free_m2m_id_mask);
|
||||
m2m_trigger.instance_id = __builtin_ctz(free_m2m_id_mask);
|
||||
TEST_ESP_OK(gdma_connect(tx_chan, m2m_trigger));
|
||||
|
||||
uint8_t *src_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
TEST_ASSERT_NOT_NULL(src_buf);
|
||||
dma_descriptor_align8_t *tx_descs = (dma_descriptor_align8_t *) src_buf;
|
||||
uint8_t *src_data = src_buf + 64;
|
||||
memcpy(src_data, test_input_string, input_data_size);
|
||||
|
||||
tx_descs->buffer = src_data;
|
||||
tx_descs->dw0.size = 256 - 64;
|
||||
tx_descs->dw0.length = input_data_size;
|
||||
tx_descs->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
tx_descs->dw0.suc_eof = 1;
|
||||
tx_descs->next = NULL;
|
||||
|
||||
#if CONFIG_IDF_TARGET_ESP32P4
|
||||
// do write-back for the buffer because it's in the cache
|
||||
Cache_WriteBack_Addr(CACHE_MAP_L1_DCACHE, (uint32_t)src_buf, 256);
|
||||
#endif
|
||||
|
||||
for (int i = 0; i < test_num_crc_algorithm; i++) {
|
||||
gdma_crc_calculator_config_t crc_config = {
|
||||
.crc_bit_width = crc_test_cases[i].crc_bit_width,
|
||||
.init_value = crc_test_cases[i].init_value,
|
||||
.poly_hex = crc_test_cases[i].poly_hex,
|
||||
.reverse_data_mask = crc_test_cases[i].reverse_data_mask,
|
||||
};
|
||||
TEST_ESP_OK(gdma_config_crc_calculator(tx_chan, &crc_config));
|
||||
TEST_ESP_OK(gdma_reset(tx_chan));
|
||||
TEST_ESP_OK(gdma_start(tx_chan, (intptr_t)tx_descs));
|
||||
// simply wait for the transfer done
|
||||
vTaskDelay(pdMS_TO_TICKS(100));
|
||||
TEST_ESP_OK(gdma_crc_get_result(tx_chan, &crc_result));
|
||||
printf("CRC Result: 0x%"PRIx32"\r\n", crc_result);
|
||||
TEST_ASSERT_EQUAL(crc_test_cases[i].expected_result, crc_result);
|
||||
}
|
||||
|
||||
free(src_buf);
|
||||
}
|
||||
|
||||
TEST_CASE("GDMA CRC Calculation", "[GDMA]")
|
||||
{
|
||||
gdma_channel_handle_t tx_chan = NULL;
|
||||
gdma_channel_alloc_config_t tx_chan_alloc_config = {
|
||||
.direction = GDMA_CHANNEL_DIRECTION_TX,
|
||||
};
|
||||
#if SOC_AHB_GDMA_SUPPORTED
|
||||
printf("Test CRC calculation for AHB GDMA\r\n");
|
||||
TEST_ESP_OK(gdma_new_ahb_channel(&tx_chan_alloc_config, &tx_chan));
|
||||
test_gdma_crc_calculation(tx_chan, 4);
|
||||
TEST_ESP_OK(gdma_del_channel(tx_chan));
|
||||
#endif // SOC_AHB_GDMA_SUPPORTED
|
||||
|
||||
#if SOC_AXI_GDMA_SUPPORTED
|
||||
printf("Test CRC calculation for AXI GDMA\r\n");
|
||||
TEST_ESP_OK(gdma_new_axi_channel(&tx_chan_alloc_config, &tx_chan));
|
||||
test_gdma_crc_calculation(tx_chan, 3);
|
||||
TEST_ESP_OK(gdma_del_channel(tx_chan));
|
||||
#endif // SOC_AXI_GDMA_SUPPORTED
|
||||
}
|
||||
#endif // SOC_GDMA_SUPPORT_CRC
|
||||
|
136
components/esp_hw_support/test_apps/dma/main/test_gdma_crc.c
Normal file
136
components/esp_hw_support/test_apps/dma/main/test_gdma_crc.c
Normal file
@ -0,0 +1,136 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
#include "sdkconfig.h"
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "freertos/semphr.h"
|
||||
#include "unity.h"
|
||||
#include "esp_heap_caps.h"
|
||||
#include "esp_private/gdma.h"
|
||||
#include "hal/dma_types.h"
|
||||
#include "soc/soc_caps.h"
|
||||
#include "hal/cache_hal.h"
|
||||
#include "hal/cache_ll.h"
|
||||
#include "esp_cache.h"
|
||||
|
||||
typedef struct {
|
||||
uint32_t init_value;
|
||||
uint32_t crc_bit_width;
|
||||
uint32_t poly_hex;
|
||||
bool reverse_data_mask;
|
||||
uint32_t expected_result;
|
||||
} test_crc_case_t;
|
||||
static test_crc_case_t crc_test_cases[] = {
|
||||
// CRC8, x^8+x^2+x+1
|
||||
[0] = {
|
||||
.crc_bit_width = 8,
|
||||
.init_value = 0x00,
|
||||
.poly_hex = 0x07,
|
||||
.expected_result = 0xC6,
|
||||
},
|
||||
[1] = {
|
||||
.crc_bit_width = 8,
|
||||
.init_value = 0x00,
|
||||
.poly_hex = 0x07,
|
||||
.reverse_data_mask = true, // refin = true
|
||||
.expected_result = 0xDE,
|
||||
},
|
||||
// CRC16, x^16+x^12+x^5+1
|
||||
[2] = {
|
||||
.crc_bit_width = 16,
|
||||
.init_value = 0xFFFF,
|
||||
.poly_hex = 0x1021,
|
||||
.expected_result = 0x5289,
|
||||
},
|
||||
// CRC32, x32+x26+x23+x22+x16+x12+x11+x10+x8+x7+x5+x4+x2+x+1
|
||||
[3] = {
|
||||
.crc_bit_width = 32,
|
||||
.init_value = 0xFFFFFFFF,
|
||||
.poly_hex = 0x04C11DB7,
|
||||
.expected_result = 0x63B3E283,
|
||||
}
|
||||
};
|
||||
|
||||
// CRC online: https://www.lddgo.net/en/encrypt/crc
|
||||
static void test_gdma_crc_calculation(gdma_channel_handle_t tx_chan, int test_num_crc_algorithm)
|
||||
{
|
||||
uint32_t crc_result = 0;
|
||||
const char *test_input_string = "Share::Connect::Innovate";
|
||||
size_t input_data_size = strlen(test_input_string);
|
||||
printf("Calculate CRC value for string: \"%s\"\r\n", test_input_string);
|
||||
|
||||
gdma_trigger_t m2m_trigger = GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_M2M, 0);
|
||||
// get a free DMA trigger ID
|
||||
uint32_t free_m2m_id_mask = 0;
|
||||
gdma_get_free_m2m_trig_id_mask(tx_chan, &free_m2m_id_mask);
|
||||
m2m_trigger.instance_id = __builtin_ctz(free_m2m_id_mask);
|
||||
TEST_ESP_OK(gdma_connect(tx_chan, m2m_trigger));
|
||||
|
||||
// allocate the source and destination buffer from SRAM
|
||||
// |--------------------------------------------------|
|
||||
// | 128 bytes DMA descriptor | 128 bytes data buffer |
|
||||
// |--------------------------------------------------|
|
||||
size_t sram_alignment = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
|
||||
uint8_t *src_buf = heap_caps_aligned_calloc(sram_alignment, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
TEST_ASSERT_NOT_NULL(src_buf);
|
||||
dma_descriptor_align8_t *tx_descs = (dma_descriptor_align8_t *) src_buf;
|
||||
uint8_t *src_data = src_buf + 64;
|
||||
memcpy(src_data, test_input_string, input_data_size);
|
||||
|
||||
tx_descs->buffer = src_data;
|
||||
tx_descs->dw0.size = 256 - 64;
|
||||
tx_descs->dw0.length = input_data_size;
|
||||
tx_descs->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
tx_descs->dw0.suc_eof = 1;
|
||||
tx_descs->next = NULL;
|
||||
|
||||
if (sram_alignment) {
|
||||
// do write-back for the buffer because it's in the cache
|
||||
TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
|
||||
}
|
||||
|
||||
for (int i = 0; i < test_num_crc_algorithm; i++) {
|
||||
gdma_crc_calculator_config_t crc_config = {
|
||||
.crc_bit_width = crc_test_cases[i].crc_bit_width,
|
||||
.init_value = crc_test_cases[i].init_value,
|
||||
.poly_hex = crc_test_cases[i].poly_hex,
|
||||
.reverse_data_mask = crc_test_cases[i].reverse_data_mask,
|
||||
};
|
||||
TEST_ESP_OK(gdma_config_crc_calculator(tx_chan, &crc_config));
|
||||
TEST_ESP_OK(gdma_reset(tx_chan));
|
||||
TEST_ESP_OK(gdma_start(tx_chan, (intptr_t)tx_descs));
|
||||
// simply wait for the transfer done
|
||||
vTaskDelay(pdMS_TO_TICKS(100));
|
||||
TEST_ESP_OK(gdma_crc_get_result(tx_chan, &crc_result));
|
||||
printf("CRC Result: 0x%"PRIx32"\r\n", crc_result);
|
||||
TEST_ASSERT_EQUAL(crc_test_cases[i].expected_result, crc_result);
|
||||
}
|
||||
|
||||
free(src_buf);
|
||||
}
|
||||
|
||||
TEST_CASE("GDMA CRC Calculation", "[GDMA][CRC]")
|
||||
{
|
||||
gdma_channel_handle_t tx_chan = NULL;
|
||||
gdma_channel_alloc_config_t tx_chan_alloc_config = {
|
||||
.direction = GDMA_CHANNEL_DIRECTION_TX,
|
||||
};
|
||||
#if SOC_AHB_GDMA_SUPPORTED
|
||||
printf("Test CRC calculation for AHB GDMA\r\n");
|
||||
TEST_ESP_OK(gdma_new_ahb_channel(&tx_chan_alloc_config, &tx_chan));
|
||||
test_gdma_crc_calculation(tx_chan, 4);
|
||||
TEST_ESP_OK(gdma_del_channel(tx_chan));
|
||||
#endif // SOC_AHB_GDMA_SUPPORTED
|
||||
|
||||
#if SOC_AXI_GDMA_SUPPORTED
|
||||
printf("Test CRC calculation for AXI GDMA\r\n");
|
||||
TEST_ESP_OK(gdma_new_axi_channel(&tx_chan_alloc_config, &tx_chan));
|
||||
test_gdma_crc_calculation(tx_chan, 3);
|
||||
TEST_ESP_OK(gdma_del_channel(tx_chan));
|
||||
#endif // SOC_AXI_GDMA_SUPPORTED
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
|
||||
* SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
@ -15,7 +15,7 @@
|
||||
#include "driver/gpio.h"
|
||||
#include "esp_async_memcpy.h"
|
||||
|
||||
TEST_CASE("async_memcpy_eof_event", "[etm]")
|
||||
TEST_CASE("async_memcpy_eof_event", "[GDMA][ETM]")
|
||||
{
|
||||
const uint32_t output_gpio = 1;
|
||||
// async_memcpy done ---> ETM channel A ---> GPIO toggle
|
@ -13,10 +13,6 @@ if(CONFIG_SOC_SYSTIMER_SUPPORT_ETM)
|
||||
list(APPEND srcs "test_systimer_etm.c")
|
||||
endif()
|
||||
|
||||
if(CONFIG_SOC_GDMA_SUPPORT_ETM)
|
||||
list(APPEND srcs "test_gdma_etm.c")
|
||||
endif()
|
||||
|
||||
if(CONFIG_SOC_MCPWM_SUPPORT_ETM)
|
||||
list(APPEND srcs "test_mcpwm_etm.c")
|
||||
endif()
|
||||
|
Loading…
x
Reference in New Issue
Block a user