diff --git a/components/esp_mm/Kconfig b/components/esp_mm/Kconfig new file mode 100644 index 0000000000..b9cb3c5d3b --- /dev/null +++ b/components/esp_mm/Kconfig @@ -0,0 +1,31 @@ +menu "ESP-MM: Memory Management Configurations" + + config ESP_MM_CACHE_MSYNC_C2M_CHUNKED_OPS + depends on SOC_CACHE_WRITEBACK_SUPPORTED + bool "Enable esp_cache_msync C2M chunked operation" + help + `esp_cache_msync` C2M direction takes critical sections, which means during + the operation, the interrupts are disabled. Whereas Cache writebacks for + large buffers could be especially time intensive, and might cause interrupts + to be disabled for a significant amount of time. + + Sometimes you want other ISRs to be responded during this C2M process. + This option is to slice one C2M operation into multiple chunks, + with CONFIG_ESP_MM_CACHE_MSYNC_C2M_CHUNKED_OPS_MAX_LEN max len. This will give you + a breath during the C2M process as sometimes the C2M process is quite long. + + Note if the buffer processed by the `esp_cache_msync` (C2M sliced) is interrupted by an ISR, + and this ISR also accesses this buffer, this may lead to data coherence issue. + + config ESP_MM_CACHE_MSYNC_C2M_CHUNKED_OPS_MAX_LEN + hex "Max len in bytes per C2M chunk" + depends on ESP_MM_CACHE_MSYNC_C2M_CHUNKED_OPS + range 0 0x80000 + default 0x20000 if IDF_TARGET_ESP32P4 + default 0x2000 if IDF_TARGET_ESP32S2 + default 0x8000 if IDF_TARGET_ESP32S3 + help + Max len in bytes per C2M chunk, operations with size over the max len will be + sliced into multiple chunks. + +endmenu diff --git a/components/esp_mm/esp_cache.c b/components/esp_mm/esp_cache.c index b87925bb7d..c21dc16a22 100644 --- a/components/esp_mm/esp_cache.c +++ b/components/esp_mm/esp_cache.c @@ -10,6 +10,7 @@ #include "sdkconfig.h" #include "esp_check.h" #include "esp_log.h" +#include "freertos/FreeRTOS.h" #include "esp_heap_caps.h" #include "esp_rom_caps.h" #include "soc/soc_caps.h" @@ -26,6 +27,36 @@ static const char *TAG = "cache"; #define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1)) DEFINE_CRIT_SECTION_LOCK_STATIC(s_spinlock); +#if CONFIG_ESP_MM_CACHE_MSYNC_C2M_CHUNKED_OPS +static _lock_t s_mutex; +#endif + +#if SOC_CACHE_WRITEBACK_SUPPORTED +static void s_c2m_ops(uint32_t vaddr, size_t size) +{ +#if CONFIG_ESP_MM_CACHE_MSYNC_C2M_CHUNKED_OPS + if (!xPortInIsrContext()) { + bool valid = true; + size_t offset = 0; + while (offset < size) { + size_t chunk_len = ((size - offset) > CONFIG_ESP_MM_CACHE_MSYNC_C2M_CHUNKED_OPS_MAX_LEN) ? CONFIG_ESP_MM_CACHE_MSYNC_C2M_CHUNKED_OPS_MAX_LEN : (size - offset); + esp_os_enter_critical_safe(&s_spinlock); + valid &= cache_hal_writeback_addr(vaddr + offset, chunk_len); + esp_os_exit_critical_safe(&s_spinlock); + offset += chunk_len; + } + assert(valid); + } else +#endif + { + bool valid = false; + esp_os_enter_critical_safe(&s_spinlock); + valid = cache_hal_writeback_addr(vaddr, size); + esp_os_exit_critical_safe(&s_spinlock); + assert(valid); + } +} +#endif esp_err_t esp_cache_msync(void *addr, size_t size, int flags) { @@ -76,11 +107,7 @@ esp_err_t esp_cache_msync(void *addr, size_t size, int flags) } #if SOC_CACHE_WRITEBACK_SUPPORTED - - esp_os_enter_critical_safe(&s_spinlock); - valid = cache_hal_writeback_addr(vaddr, size); - esp_os_exit_critical_safe(&s_spinlock); - assert(valid); + s_c2m_ops(vaddr, size); if (flags & ESP_CACHE_MSYNC_FLAG_INVALIDATE) { esp_os_enter_critical_safe(&s_spinlock); @@ -88,7 +115,7 @@ esp_err_t esp_cache_msync(void *addr, size_t size, int flags) esp_os_exit_critical_safe(&s_spinlock); } assert(valid); -#endif +#endif //#if SOC_CACHE_WRITEBACK_SUPPORTED } return ESP_OK;