feat(esp32c61): add G0 component support

This commit is contained in:
wanlei 2024-03-11 20:28:03 +08:00
parent ba4b493df8
commit 37dfd8fb52
35 changed files with 9262 additions and 26 deletions

View File

@ -111,22 +111,22 @@ if(NOT BOOTLOADER_BUILD)
if(CONFIG_SOC_GDMA_SUPPORTED)
list(APPEND srcs "gdma_hal_top.c")
endif()
if(CONFIG_SOC_GDMA_SUPPORT_CRC)
list(APPEND srcs "gdma_hal_crc_gen.c")
endif()
if(CONFIG_SOC_GDMA_SUPPORT_CRC)
list(APPEND srcs "gdma_hal_crc_gen.c")
endif()
if(CONFIG_SOC_AHB_GDMA_VERSION EQUAL 1)
list(APPEND srcs "gdma_hal_ahb_v1.c")
endif()
if(CONFIG_SOC_AHB_GDMA_VERSION EQUAL 1)
list(APPEND srcs "gdma_hal_ahb_v1.c")
endif()
if(CONFIG_SOC_AHB_GDMA_VERSION EQUAL 2)
list(APPEND srcs "gdma_hal_ahb_v2.c")
endif()
if(CONFIG_SOC_AHB_GDMA_VERSION EQUAL 2)
list(APPEND srcs "gdma_hal_ahb_v2.c")
endif()
if(CONFIG_SOC_AXI_GDMA_SUPPORTED)
list(APPEND srcs "gdma_hal_axi.c")
if(CONFIG_SOC_AXI_GDMA_SUPPORTED)
list(APPEND srcs "gdma_hal_axi.c")
endif()
endif()
if(CONFIG_SOC_DW_GDMA_SUPPORTED)
@ -163,10 +163,10 @@ if(NOT BOOTLOADER_BUILD)
if(CONFIG_SOC_ADC_SUPPORTED)
list(APPEND srcs "adc_hal_common.c" "adc_oneshot_hal.c")
endif()
if(CONFIG_SOC_ADC_DMA_SUPPORTED)
list(APPEND srcs "adc_hal.c")
if(CONFIG_SOC_ADC_DMA_SUPPORTED)
list(APPEND srcs "adc_hal.c")
endif()
endif()
if(CONFIG_SOC_LCDCAM_SUPPORTED)
@ -219,14 +219,15 @@ if(NOT BOOTLOADER_BUILD)
if(CONFIG_SOC_GPSPI_SUPPORTED)
list(APPEND srcs
"spi_hal.c"
"spi_hal_iram.c"
"spi_slave_hal.c"
"spi_slave_hal_iram.c")
endif()
"spi_hal.c"
"spi_hal_iram.c"
"spi_slave_hal.c"
"spi_slave_hal_iram.c"
)
if(CONFIG_SOC_SPI_SUPPORT_SLAVE_HD_VER2)
list(APPEND srcs "spi_slave_hd_hal.c")
if(CONFIG_SOC_SPI_SUPPORT_SLAVE_HD_VER2)
list(APPEND srcs "spi_slave_hd_hal.c")
endif()
endif()
if(CONFIG_SOC_GPSPI_SUPPORTED AND NOT CONFIG_IDF_TARGET_ESP32)

View File

@ -0,0 +1,75 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "hal/clk_tree_hal.h"
#include "hal/clk_tree_ll.h"
#include "hal/assert.h"
#include "hal/log.h"
static const char *CLK_HAL_TAG = "clk_hal";
uint32_t clk_hal_soc_root_get_freq_mhz(soc_cpu_clk_src_t cpu_clk_src)
{
switch (cpu_clk_src) {
case SOC_CPU_CLK_SRC_XTAL:
return clk_hal_xtal_get_freq_mhz();
case SOC_CPU_CLK_SRC_PLL:
return clk_ll_bbpll_get_freq_mhz();
case SOC_CPU_CLK_SRC_RC_FAST:
return SOC_CLK_RC_FAST_FREQ_APPROX / MHZ;
default:
// Unknown CPU_CLK mux input
HAL_ASSERT(false);
return 0;
}
}
uint32_t clk_hal_cpu_get_freq_hz(void)
{
soc_cpu_clk_src_t source = clk_ll_cpu_get_src();
uint32_t divider = (source == SOC_CPU_CLK_SRC_PLL) ? clk_ll_cpu_get_hs_divider() : clk_ll_cpu_get_ls_divider();
return clk_hal_soc_root_get_freq_mhz(source) * MHZ / divider;
}
static uint32_t clk_hal_ahb_get_freq_hz(void)
{
soc_cpu_clk_src_t source = clk_ll_cpu_get_src();
uint32_t divider = (source == SOC_CPU_CLK_SRC_PLL) ? clk_ll_ahb_get_hs_divider() : clk_ll_ahb_get_ls_divider();
return clk_hal_soc_root_get_freq_mhz(source) * MHZ / divider;
}
uint32_t clk_hal_apb_get_freq_hz(void)
{
return clk_hal_ahb_get_freq_hz() / clk_ll_apb_get_divider();
}
uint32_t clk_hal_lp_slow_get_freq_hz(void)
{
switch (clk_ll_rtc_slow_get_src()) {
case SOC_RTC_SLOW_CLK_SRC_RC_SLOW:
return SOC_CLK_RC_SLOW_FREQ_APPROX;
case SOC_RTC_SLOW_CLK_SRC_XTAL32K:
return SOC_CLK_XTAL32K_FREQ_APPROX;
case SOC_RTC_SLOW_CLK_SRC_OSC_SLOW:
return SOC_CLK_OSC_SLOW_FREQ_APPROX;
case SOC_RTC_SLOW_CLK_SRC_RC32K:
return SOC_CLK_RC32K_FREQ_APPROX;
default:
// Unknown RTC_SLOW_CLK mux input
HAL_ASSERT(false);
return 0;
}
}
uint32_t clk_hal_xtal_get_freq_mhz(void)
{
uint32_t freq = clk_ll_xtal_load_freq_mhz();
if (freq == 0) {
HAL_LOGW(CLK_HAL_TAG, "invalid RTC_XTAL_FREQ_REG value, assume 40MHz");
return (uint32_t)SOC_XTAL_FREQ_40M;
}
return freq;
}

View File

@ -0,0 +1,100 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <sys/param.h>
#include "sdkconfig.h"
#include "soc/soc_caps.h"
#include "hal/assert.h"
#include "hal/efuse_hal.h"
#include "hal/efuse_ll.h"
#include "esp_attr.h"
#define ESP_EFUSE_BLOCK_ERROR_BITS(error_reg, block) ((error_reg) & (0x08 << (4 * (block))))
#define ESP_EFUSE_BLOCK_ERROR_NUM_BITS(error_reg, block) ((error_reg) & (0x07 << (4 * (block))))
IRAM_ATTR uint32_t efuse_hal_get_major_chip_version(void)
{
#ifdef CONFIG_ESP_REV_NEW_CHIP_TEST
return CONFIG_ESP_REV_MIN_FULL / 100;
#else
return efuse_ll_get_chip_wafer_version_major();
#endif
}
IRAM_ATTR uint32_t efuse_hal_get_minor_chip_version(void)
{
#ifdef CONFIG_ESP_REV_NEW_CHIP_TEST
return CONFIG_ESP_REV_MIN_FULL % 100;
#else
return efuse_ll_get_chip_wafer_version_minor();
#endif
}
/******************* eFuse control functions *************************/
void efuse_hal_set_timing(uint32_t apb_freq_hz)
{
(void) apb_freq_hz;
efuse_ll_set_dac_num(0xFF);
efuse_ll_set_dac_clk_div(0x28);
efuse_ll_set_pwr_on_num(0x3000);
efuse_ll_set_pwr_off_num(0x190);
}
void efuse_hal_read(void)
{
efuse_hal_set_timing(0);
efuse_ll_set_conf_read_op_code();
efuse_ll_set_read_cmd();
while (efuse_ll_get_read_cmd() != 0) { }
/*Due to a hardware error, we have to read READ_CMD again to make sure the efuse clock is normal*/
while (efuse_ll_get_read_cmd() != 0) { }
}
void efuse_hal_clear_program_registers(void)
{
ets_efuse_clear_program_registers();
}
void efuse_hal_program(uint32_t block)
{
efuse_hal_set_timing(0);
efuse_ll_set_conf_write_op_code();
efuse_ll_set_pgm_cmd(block);
while (efuse_ll_get_pgm_cmd() != 0) { }
efuse_hal_clear_program_registers();
efuse_hal_read();
}
void efuse_hal_rs_calculate(const void *data, void *rs_values)
{
ets_efuse_rs_calculate(data, rs_values);
}
/******************* eFuse control functions *************************/
bool efuse_hal_is_coding_error_in_block(unsigned block)
{
if (block == 0) {
for (unsigned i = 0; i < 5; i++) {
if (REG_READ(EFUSE_RD_REPEAT_DATA_ERR0_REG + i * 4)) {
return true;
}
}
} else if (block <= 10) {
// EFUSE_RD_RS_ERR0_REG: (hi) BLOCK8, BLOCK7, BLOCK6, BLOCK5, BLOCK4, BLOCK3, BLOCK2, BLOCK1 (low)
// EFUSE_RD_RS_ERR1_REG: BLOCK10, BLOCK9
block--;
uint32_t error_reg = REG_READ(EFUSE_RD_RS_DATA_ERR0_REG + (block / 8) * 4);
return ESP_EFUSE_BLOCK_ERROR_BITS(error_reg, block % 8) != 0;
}
return false;
}

View File

@ -0,0 +1,319 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
// The LL layer for Cache register operations
#pragma once
#include <stdbool.h>
#include "soc/cache_reg.h"
#include "soc/ext_mem_defs.h"
#include "hal/cache_types.h"
#include "hal/assert.h"
#include "esp32c61/rom/cache.h"
//TODO: [ESP32C61] IDF-9253, inherit from c6
#ifdef __cplusplus
extern "C" {
#endif
#define CACHE_LL_ENABLE_DISABLE_STATE_SW 1 //There's no register indicating cache enable/disable state, we need to use software way for this state.
#define CACHE_LL_DEFAULT_IBUS_MASK CACHE_BUS_IBUS0
#define CACHE_LL_DEFAULT_DBUS_MASK CACHE_BUS_DBUS0
#define CACHE_LL_L1_ACCESS_EVENT_MASK (1<<4)
#define CACHE_LL_L1_ACCESS_EVENT_CACHE_FAIL (1<<4)
#define CACHE_LL_ID_ALL 1 //All of the caches in a type and level, make this value greater than any ID
#define CACHE_LL_LEVEL_INT_MEM 0 //Cache level for accessing internal mem
#define CACHE_LL_LEVEL_EXT_MEM 1 //Cache level for accessing external mem
#define CACHE_LL_LEVEL_ALL 2 //All of the cache levels, make this value greater than any level
#define CACHE_LL_LEVEL_NUMS 1 //Number of cache levels
#define CACHE_LL_L1_ICACHE_AUTOLOAD (1<<0)
/**
* @brief Check if Cache auto preload is enabled or not.
*
* @param cache_level level of the cache
* @param type see `cache_type_t`
* @param cache_id id of the cache in this type and level
*
* @return true: enabled; false: disabled
*/
__attribute__((always_inline))
static inline bool cache_ll_is_cache_autoload_enabled(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL);
bool enabled = false;
if (REG_GET_BIT(CACHE_L1_CACHE_AUTOLOAD_CTRL_REG, CACHE_L1_CACHE_AUTOLOAD_ENA)) {
enabled = true;
}
return enabled;
}
/**
* @brief Disable Cache
*
* @param cache_level level of the cache
* @param type see `cache_type_t`
* @param cache_id id of the cache in this type and level
*/
__attribute__((always_inline))
static inline void cache_ll_disable_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
(void) type;
Cache_Disable_Cache();
}
/**
* @brief Enable Cache
*
* @param cache_level level of the cache
* @param type see `cache_type_t`
* @param cache_id id of the cache in this type and level
* @param data_autoload_en data autoload enabled or not
* @param inst_autoload_en inst autoload enabled or not
*/
__attribute__((always_inline))
static inline void cache_ll_enable_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id, bool inst_autoload_en, bool data_autoload_en)
{
Cache_Enable_Cache(inst_autoload_en ? CACHE_LL_L1_ICACHE_AUTOLOAD : 0);
}
/**
* @brief Suspend Cache
*
* @param cache_level level of the cache
* @param type see `cache_type_t`
* @param cache_id id of the cache in this type and level
*/
__attribute__((always_inline))
static inline void cache_ll_suspend_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
Cache_Suspend_Cache();
}
/**
* @brief Resume Cache
*
* @param cache_level level of the cache
* @param type see `cache_type_t`
* @param cache_id id of the cache in this type and level
* @param data_autoload_en data autoload enabled or not
* @param inst_autoload_en inst autoload enabled or not
*/
__attribute__((always_inline))
static inline void cache_ll_resume_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id, bool inst_autoload_en, bool data_autoload_en)
{
Cache_Resume_Cache(inst_autoload_en ? CACHE_LL_L1_ICACHE_AUTOLOAD : 0);
}
/**
* @brief Invalidate cache supported addr
*
* Invalidate a cache item
*
* @param cache_level level of the cache
* @param type see `cache_type_t`
* @param cache_id id of the cache in this type and level
* @param vaddr start address of the region to be invalidated
* @param size size of the region to be invalidated
*/
__attribute__((always_inline))
static inline void cache_ll_invalidate_addr(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size)
{
Cache_Invalidate_Addr(vaddr, size);
}
/**
* @brief Freeze Cache
*
* @param cache_level level of the cache
* @param type see `cache_type_t`
* @param cache_id id of the cache in this type and level
*/
__attribute__((always_inline))
static inline void cache_ll_freeze_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
Cache_Freeze_Enable(CACHE_FREEZE_ACK_BUSY);
}
/**
* @brief Unfreeze Cache
*
* @param cache_level level of the cache
* @param type see `cache_type_t`
* @param cache_id id of the cache in this type and level
*/
__attribute__((always_inline))
static inline void cache_ll_unfreeze_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
Cache_Freeze_Disable();
}
/**
* @brief Get Cache line size, in bytes
*
* @param cache_level level of the cache
* @param type see `cache_type_t`
* @param cache_id id of the cache in this type and level
*
* @return Cache line size, in bytes
*/
__attribute__((always_inline))
static inline uint32_t cache_ll_get_line_size(uint32_t cache_level, cache_type_t type, uint32_t cache_id)
{
uint32_t size = 0;
size = Cache_Get_Cache_Line_Size();
return size;
}
/**
* @brief Get the buses of a particular cache that are mapped to a virtual address range
*
* External virtual address can only be accessed when the involved cache buses are enabled.
* This API is to get the cache buses where the memory region (from `vaddr_start` to `vaddr_start + len`) reside.
*
* @param cache_id cache ID (when l1 cache is per core)
* @param vaddr_start virtual address start
* @param len vaddr length
*/
#if !BOOTLOADER_BUILD
__attribute__((always_inline))
#endif
static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t vaddr_start, uint32_t len)
{
HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL);
cache_bus_mask_t mask = (cache_bus_mask_t)0;
uint32_t vaddr_end = vaddr_start + len - 1;
if (vaddr_start >= SOC_IRAM0_CACHE_ADDRESS_LOW && vaddr_end < SOC_IRAM0_CACHE_ADDRESS_HIGH) {
//c6 the I/D bus memory are shared, so we always return `CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0`
mask = (cache_bus_mask_t)(mask | (CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0));
} else {
HAL_ASSERT(0); //Out of region
}
return mask;
}
/**
* Enable the Cache Buses
*
* @param cache_id cache ID (when l1 cache is per core)
* @param mask To know which buses should be enabled
*/
#if !BOOTLOADER_BUILD
__attribute__((always_inline))
#endif
static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t mask)
{
//TODO: [ESP32C61] IDF-9253, inherit from c6
HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL);
//On esp32c61, only `CACHE_BUS_IBUS0` and `CACHE_BUS_DBUS0` are supported. Use `cache_ll_l1_get_bus()` to get your bus first
HAL_ASSERT((mask & (CACHE_BUS_IBUS1 | CACHE_BUS_IBUS2 | CACHE_BUS_DBUS1 | CACHE_BUS_DBUS2)) == 0);
uint32_t ibus_mask = 0;
ibus_mask = ibus_mask | ((mask & CACHE_BUS_IBUS0) ? CACHE_L1_ICACHE_SHUT_IBUS0 : 0);
REG_CLR_BIT(CACHE_L1_CACHE_CTRL_REG, ibus_mask);
uint32_t dbus_mask = 0;
dbus_mask = dbus_mask | ((mask & CACHE_BUS_DBUS0) ? CACHE_L1_CACHE_SHUT_BUS0 : 0);
REG_CLR_BIT(CACHE_L1_CACHE_CTRL_REG, dbus_mask);
}
/**
* Disable the Cache Buses
*
* @param cache_id cache ID (when l1 cache is per core)
* @param mask To know which buses should be disabled
*/
__attribute__((always_inline))
static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t mask)
{
//TODO: [ESP32C61] IDF-9253, inherit from c6
HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL);
//On esp32c61, only `CACHE_BUS_IBUS0` and `CACHE_BUS_DBUS0` are supported. Use `cache_ll_l1_get_bus()` to get your bus first
HAL_ASSERT((mask & (CACHE_BUS_IBUS1 | CACHE_BUS_IBUS2 | CACHE_BUS_DBUS1 | CACHE_BUS_DBUS2)) == 0);
uint32_t ibus_mask = 0;
ibus_mask = ibus_mask | ((mask & CACHE_BUS_IBUS0) ? CACHE_L1_ICACHE_SHUT_IBUS0 : 0);
REG_SET_BIT(CACHE_L1_CACHE_CTRL_REG, ibus_mask);
uint32_t dbus_mask = 0;
dbus_mask = dbus_mask | ((mask & CACHE_BUS_DBUS0) ? CACHE_L1_CACHE_SHUT_BUS0 : 0);
REG_SET_BIT(CACHE_L1_CACHE_CTRL_REG, dbus_mask);
}
/**
* @brief Get Cache level and the ID of the vaddr
*
* @param vaddr_start virtual address start
* @param len vaddr length
* @param out_level cache level
* @param out_id cache id
*
* @return true for valid
*/
__attribute__((always_inline))
static inline bool cache_ll_vaddr_to_cache_level_id(uint32_t vaddr_start, uint32_t len, uint32_t *out_level, uint32_t *out_id)
{
bool valid = false;
uint32_t vaddr_end = vaddr_start + len - 1;
valid |= (SOC_ADDRESS_IN_IRAM0_CACHE(vaddr_start) && SOC_ADDRESS_IN_IRAM0_CACHE(vaddr_end));
valid |= (SOC_ADDRESS_IN_DRAM0_CACHE(vaddr_start) && SOC_ADDRESS_IN_DRAM0_CACHE(vaddr_end));
if (valid) {
*out_level = 1;
*out_id = 0;
}
return valid;
}
/*------------------------------------------------------------------------------
* Interrupt
*----------------------------------------------------------------------------*/
/**
* @brief Enable Cache access error interrupt
*
* @param cache_id Cache ID, not used on C3. For compabitlity
* @param mask Interrupt mask
*/
static inline void cache_ll_l1_enable_access_error_intr(uint32_t cache_id, uint32_t mask)
{
SET_PERI_REG_MASK(CACHE_L1_CACHE_ACS_FAIL_INT_ENA_REG, mask);
}
/**
* @brief Clear Cache access error interrupt status
*
* @param cache_id Cache ID, not used on C3. For compabitlity
* @param mask Interrupt mask
*/
static inline void cache_ll_l1_clear_access_error_intr(uint32_t cache_id, uint32_t mask)
{
SET_PERI_REG_MASK(CACHE_L1_CACHE_ACS_FAIL_INT_CLR_REG, mask);
}
/**
* @brief Get Cache access error interrupt status
*
* @param cache_id Cache ID, not used on C3. For compabitlity
* @param mask Interrupt mask
*
* @return Status mask
*/
static inline uint32_t cache_ll_l1_get_access_error_intr_status(uint32_t cache_id, uint32_t mask)
{
return GET_PERI_REG_MASK(CACHE_L1_CACHE_ACS_FAIL_INT_ST_REG, mask);
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,225 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include "hal/assert.h"
#include "soc/periph_defs.h"
#include "soc/pcr_reg.h"
#include "soc/soc.h"
#include "soc/soc_caps.h"
#include "esp_attr.h"
#ifdef __cplusplus
extern "C" {
#endif
// TODO: [ESP32C61] IDF-9249, inherit from c6
static inline uint32_t periph_ll_get_clk_en_mask(periph_module_t periph)
{
switch (periph) {
case PERIPH_SARADC_MODULE:
return PCR_SARADC_CLK_EN;
case PERIPH_LEDC_MODULE:
return PCR_LEDC_CLK_EN;
case PERIPH_UART0_MODULE:
return PCR_UART0_CLK_EN;
case PERIPH_UART1_MODULE:
return PCR_UART1_CLK_EN;
case PERIPH_I2C0_MODULE:
return PCR_I2C_CLK_EN;
case PERIPH_I2S1_MODULE:
return PCR_I2S_CLK_EN;
case PERIPH_TIMG0_MODULE:
return PCR_TG0_CLK_EN;
case PERIPH_TIMG1_MODULE:
return PCR_TG1_CLK_EN;
case PERIPH_SYSTIMER_MODULE:
return PCR_SYSTIMER_CLK_EN;
case PERIPH_SPI_MODULE:
return PCR_MSPI_CLK_EN;
case PERIPH_SPI2_MODULE:
return PCR_SPI2_CLK_EN;
case PERIPH_GDMA_MODULE:
return PCR_GDMA_CLK_EN;
case PERIPH_SHA_MODULE:
return PCR_SHA_CLK_EN;
case PERIPH_ECC_MODULE:
return PCR_ECC_CLK_EN;
case PERIPH_TEMPSENSOR_MODULE:
return PCR_TSENS_CLK_EN;
case PERIPH_REGDMA_MODULE:
return PCR_REGDMA_CLK_EN;
case PERIPH_ASSIST_DEBUG_MODULE:
return PCR_ASSIST_CLK_EN;
default:
return 0;
}
}
static inline uint32_t periph_ll_get_rst_en_mask(periph_module_t periph, bool enable)
{
(void)enable; // unused
switch (periph) {
case PERIPH_SARADC_MODULE:
return PCR_SARADC_RST_EN;
case PERIPH_LEDC_MODULE:
return PCR_LEDC_RST_EN;
case PERIPH_UART0_MODULE:
return PCR_UART0_RST_EN;
case PERIPH_UART1_MODULE:
return PCR_UART1_RST_EN;
case PERIPH_I2C0_MODULE:
return PCR_I2C_RST_EN;
case PERIPH_I2S1_MODULE:
return PCR_I2S_RST_EN;
case PERIPH_TIMG0_MODULE:
return PCR_TG0_RST_EN;
case PERIPH_TIMG1_MODULE:
return PCR_TG1_RST_EN;
case PERIPH_SYSTIMER_MODULE:
return PCR_SYSTIMER_RST_EN;
case PERIPH_SPI_MODULE:
return PCR_MSPI_RST_EN;
case PERIPH_SPI2_MODULE:
return PCR_SPI2_RST_EN;
case PERIPH_GDMA_MODULE:
return PCR_GDMA_RST_EN;
case PERIPH_ECC_MODULE:
return PCR_ECC_RST_EN;
case PERIPH_TEMPSENSOR_MODULE:
return PCR_TSENS_RST_EN;
case PERIPH_SHA_MODULE:
if (enable == true) {
// Clear reset on digital signature and HMAC, otherwise SHA is held in reset
CLEAR_PERI_REG_MASK(PCR_DS_CONF_REG, PCR_DS_RST_EN);
CLEAR_PERI_REG_MASK(PCR_HMAC_CONF_REG, PCR_HMAC_RST_EN);
}
return PCR_SHA_RST_EN;
case PERIPH_REGDMA_MODULE:
return PCR_REGDMA_RST_EN;
case PERIPH_ASSIST_DEBUG_MODULE:
return PCR_ASSIST_RST_EN;
default:
return 0;
}
}
static inline uint32_t periph_ll_get_clk_en_reg(periph_module_t periph)
{
switch (periph) {
case PERIPH_SARADC_MODULE:
return PCR_SARADC_CONF_REG;
case PERIPH_LEDC_MODULE:
return PCR_LEDC_CONF_REG;
case PERIPH_UART0_MODULE:
return PCR_UART0_CONF_REG;
case PERIPH_UART1_MODULE:
return PCR_UART1_CONF_REG;
case PERIPH_I2C0_MODULE:
return PCR_I2C_CONF_REG;
case PERIPH_I2S1_MODULE:
return PCR_I2S_CONF_REG;
case PERIPH_TIMG0_MODULE:
return PCR_TIMERGROUP0_CONF_REG;
case PERIPH_TIMG1_MODULE:
return PCR_TIMERGROUP1_CONF_REG;
case PERIPH_SYSTIMER_MODULE:
return PCR_SYSTIMER_CONF_REG;
case PERIPH_SPI_MODULE:
return PCR_MSPI_CONF_REG;
case PERIPH_SPI2_MODULE:
return PCR_SPI2_CONF_REG;
case PERIPH_GDMA_MODULE:
return PCR_GDMA_CONF_REG;
case PERIPH_SHA_MODULE:
return PCR_SHA_CONF_REG;
case PERIPH_ECC_MODULE:
return PCR_ECC_CONF_REG;
case PERIPH_TEMPSENSOR_MODULE:
return PCR_TSENS_CLK_CONF_REG;
case PERIPH_REGDMA_MODULE:
return PCR_REGDMA_CONF_REG;
case PERIPH_ASSIST_DEBUG_MODULE:
return PCR_ASSIST_CONF_REG;
default:
return 0;
}
}
static inline uint32_t periph_ll_get_rst_en_reg(periph_module_t periph)
{
switch (periph) {
case PERIPH_SARADC_MODULE:
return PCR_SARADC_CONF_REG;
case PERIPH_LEDC_MODULE:
return PCR_LEDC_CONF_REG;
case PERIPH_UART0_MODULE:
return PCR_UART0_CONF_REG;
case PERIPH_UART1_MODULE:
return PCR_UART1_CONF_REG;
case PERIPH_I2C0_MODULE:
return PCR_I2C_CONF_REG;
case PERIPH_I2S1_MODULE:
return PCR_I2S_CONF_REG;
case PERIPH_TIMG0_MODULE:
return PCR_TIMERGROUP0_CONF_REG;
case PERIPH_TIMG1_MODULE:
return PCR_TIMERGROUP1_CONF_REG;
case PERIPH_SYSTIMER_MODULE:
return PCR_SYSTIMER_CONF_REG;
case PERIPH_SPI_MODULE:
return PCR_MSPI_CONF_REG;
case PERIPH_SPI2_MODULE:
return PCR_SPI2_CONF_REG;
case PERIPH_GDMA_MODULE:
return PCR_GDMA_CONF_REG;
case PERIPH_SHA_MODULE:
return PCR_SHA_CONF_REG;
case PERIPH_ECC_MODULE:
return PCR_ECC_CONF_REG;
case PERIPH_TEMPSENSOR_MODULE:
return PCR_TSENS_CLK_CONF_REG;
case PERIPH_REGDMA_MODULE:
return PCR_REGDMA_CONF_REG;
case PERIPH_ASSIST_DEBUG_MODULE:
return PCR_ASSIST_CONF_REG;
default:
return 0;
}
}
static inline void periph_ll_enable_clk_clear_rst(periph_module_t periph)
{
SET_PERI_REG_MASK(periph_ll_get_clk_en_reg(periph), periph_ll_get_clk_en_mask(periph));
CLEAR_PERI_REG_MASK(periph_ll_get_rst_en_reg(periph), periph_ll_get_rst_en_mask(periph, true));
}
static inline void periph_ll_disable_clk_set_rst(periph_module_t periph)
{
CLEAR_PERI_REG_MASK(periph_ll_get_clk_en_reg(periph), periph_ll_get_clk_en_mask(periph));
SET_PERI_REG_MASK(periph_ll_get_rst_en_reg(periph), periph_ll_get_rst_en_mask(periph, false));
}
static inline void periph_ll_reset(periph_module_t periph)
{
SET_PERI_REG_MASK(periph_ll_get_rst_en_reg(periph), periph_ll_get_rst_en_mask(periph, false));
CLEAR_PERI_REG_MASK(periph_ll_get_rst_en_reg(periph), periph_ll_get_rst_en_mask(periph, false));
}
static inline bool IRAM_ATTR periph_ll_periph_enabled(periph_module_t periph)
{
return REG_GET_BIT(periph_ll_get_rst_en_reg(periph), periph_ll_get_rst_en_mask(periph, false)) == 0 &&
REG_GET_BIT(periph_ll_get_clk_en_reg(periph), periph_ll_get_clk_en_mask(periph)) != 0;
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,817 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
// TODO: [ESP32C61] IDF-9249, inherit from ESP32C6
#include <stdint.h>
#include "soc/soc.h"
#include "soc/clk_tree_defs.h"
#include "soc/pcr_struct.h"
#include "soc/lp_clkrst_struct.h"
#include "soc/pmu_reg.h"
#include "hal/regi2c_ctrl.h"
#include "soc/regi2c_bbpll.h"
#include "hal/assert.h"
#include "hal/log.h"
#include "esp32c61/rom/rtc.h"
#include "hal/misc.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MHZ (1000000)
#define CLK_LL_PLL_80M_FREQ_MHZ (80)
#define CLK_LL_PLL_120M_FREQ_MHZ (120)
#define CLK_LL_PLL_160M_FREQ_MHZ (160)
#define CLK_LL_PLL_240M_FREQ_MHZ (240)
#define CLK_LL_PLL_480M_FREQ_MHZ (480)
#define CLK_LL_XTAL32K_CONFIG_DEFAULT() { \
.dac = 3, \
.dres = 3, \
.dgm = 3, \
.dbuf = 1, \
}
/*
Set the frequency division factor of ref_tick
The FOSC of rtc calibration uses the 32 frequency division clock for ECO1,
So the frequency division factor of ref_tick must be greater than or equal to 32
*/
#define REG_FOSC_TICK_NUM 255
/**
* @brief XTAL32K_CLK enable modes
*/
typedef enum {
CLK_LL_XTAL32K_ENABLE_MODE_CRYSTAL, //!< Enable the external 32kHz crystal for XTAL32K_CLK
CLK_LL_XTAL32K_ENABLE_MODE_EXTERNAL, //!< Enable the external clock signal for OSC_SLOW_CLK
CLK_LL_XTAL32K_ENABLE_MODE_BOOTSTRAP, //!< Bootstrap the crystal oscillator for faster XTAL32K_CLK start up */
} clk_ll_xtal32k_enable_mode_t;
/**
* @brief XTAL32K_CLK configuration structure
*/
typedef struct {
uint32_t dac : 6;
uint32_t dres : 3;
uint32_t dgm : 3;
uint32_t dbuf: 1;
} clk_ll_xtal32k_config_t;
/**
* @brief Power up BBPLL circuit
*/
static inline __attribute__((always_inline)) void clk_ll_bbpll_enable(void)
{
SET_PERI_REG_MASK(PMU_IMM_HP_CK_POWER_REG, PMU_TIE_HIGH_XPD_BB_I2C |
PMU_TIE_HIGH_XPD_BBPLL | PMU_TIE_HIGH_XPD_BBPLL_I2C);
SET_PERI_REG_MASK(PMU_IMM_HP_CK_POWER_REG, PMU_TIE_HIGH_GLOBAL_BBPLL_ICG);
}
/**
* @brief Power down BBPLL circuit
*/
static inline __attribute__((always_inline)) void clk_ll_bbpll_disable(void)
{
SET_PERI_REG_MASK(PMU_IMM_HP_CK_POWER_REG, PMU_TIE_LOW_GLOBAL_BBPLL_ICG) ;
SET_PERI_REG_MASK(PMU_IMM_HP_CK_POWER_REG, PMU_TIE_LOW_XPD_BBPLL | PMU_TIE_LOW_XPD_BBPLL_I2C);
}
/**
* @brief Release the root clock source locked by PMU
*/
static inline __attribute__((always_inline)) void clk_ll_cpu_clk_src_lock_release(void)
{
SET_PERI_REG_MASK(PMU_IMM_SLEEP_SYSCLK_REG, PMU_UPDATE_DIG_SYS_CLK_SEL);
}
/**
* @brief Enable the 32kHz crystal oscillator
*
* @param mode Used to determine the xtal32k configuration parameters
*/
static inline __attribute__((always_inline)) void clk_ll_xtal32k_enable(clk_ll_xtal32k_enable_mode_t mode)
{
if (mode == CLK_LL_XTAL32K_ENABLE_MODE_EXTERNAL) {
// No need to configure anything for OSC_SLOW_CLK
return;
}
// Configure xtal32k
clk_ll_xtal32k_config_t cfg = CLK_LL_XTAL32K_CONFIG_DEFAULT();
LP_CLKRST.xtal32k.dac_xtal32k = cfg.dac;
LP_CLKRST.xtal32k.dres_xtal32k = cfg.dres;
LP_CLKRST.xtal32k.dgm_xtal32k = cfg.dgm;
LP_CLKRST.xtal32k.dbuf_xtal32k = cfg.dbuf;
// Enable xtal32k xpd
SET_PERI_REG_MASK(PMU_HP_SLEEP_LP_CK_POWER_REG, PMU_HP_SLEEP_XPD_XTAL32K);
}
/**
* @brief Disable the 32kHz crystal oscillator
*/
static inline __attribute__((always_inline)) void clk_ll_xtal32k_disable(void)
{
// Disable xtal32k xpd
CLEAR_PERI_REG_MASK(PMU_HP_SLEEP_LP_CK_POWER_REG, PMU_HP_SLEEP_XPD_XTAL32K);
}
/**
* @brief Get the state of the 32kHz crystal clock
*
* @return True if the 32kHz XTAL is enabled
*/
static inline __attribute__((always_inline)) bool clk_ll_xtal32k_is_enabled(void)
{
return REG_GET_FIELD(PMU_HP_SLEEP_LP_CK_POWER_REG, PMU_HP_SLEEP_XPD_XTAL32K) == 1;
}
/**
* @brief Enable the internal oscillator output for RC32K_CLK
*/
static inline __attribute__((always_inline)) void clk_ll_rc32k_enable(void)
{
// Enable rc32k xpd status
SET_PERI_REG_MASK(PMU_HP_SLEEP_LP_CK_POWER_REG, PMU_HP_SLEEP_XPD_RC32K);
}
/**
* @brief Disable the internal oscillator output for RC32K_CLK
*/
static inline __attribute__((always_inline)) void clk_ll_rc32k_disable(void)
{
// Disable rc32k xpd status
CLEAR_PERI_REG_MASK(PMU_HP_SLEEP_LP_CK_POWER_REG, PMU_HP_SLEEP_XPD_RC32K);
}
/**
* @brief Get the state of the internal oscillator for RC32K_CLK
*
* @return True if the oscillator is enabled
*/
static inline __attribute__((always_inline)) bool clk_ll_rc32k_is_enabled(void)
{
return REG_GET_FIELD(PMU_HP_SLEEP_LP_CK_POWER_REG, PMU_HP_SLEEP_XPD_RC32K) == 1;
}
/**
* @brief Enable the internal oscillator output for RC_FAST_CLK
*/
static inline __attribute__((always_inline)) void clk_ll_rc_fast_enable(void)
{
SET_PERI_REG_MASK(PMU_HP_SLEEP_LP_CK_POWER_REG, PMU_HP_SLEEP_XPD_FOSC_CLK);
}
/**
* @brief Disable the internal oscillator output for RC_FAST_CLK
*/
static inline __attribute__((always_inline)) void clk_ll_rc_fast_disable(void)
{
CLEAR_PERI_REG_MASK(PMU_HP_SLEEP_LP_CK_POWER_REG, PMU_HP_SLEEP_XPD_FOSC_CLK);
}
/**
* @brief Get the state of the internal oscillator for RC_FAST_CLK
*
* @return True if the oscillator is enabled
*/
static inline __attribute__((always_inline)) bool clk_ll_rc_fast_is_enabled(void)
{
return REG_GET_FIELD(PMU_HP_SLEEP_LP_CK_POWER_REG, PMU_HP_SLEEP_XPD_FOSC_CLK) == 1;
}
/**
* @brief Enable the digital RC_FAST_CLK, which is used to support peripherals.
*/
static inline __attribute__((always_inline)) void clk_ll_rc_fast_digi_enable(void)
{
LP_CLKRST.clk_to_hp.icg_hp_fosc = 1;
}
/**
* @brief Disable the digital RC_FAST_CLK, which is used to support peripherals.
*/
static inline __attribute__((always_inline)) void clk_ll_rc_fast_digi_disable(void)
{
LP_CLKRST.clk_to_hp.icg_hp_fosc = 0;
}
/**
* @brief Get the state of the digital RC_FAST_CLK
*
* @return True if the digital RC_FAST_CLK is enabled
*/
static inline __attribute__((always_inline)) bool clk_ll_rc_fast_digi_is_enabled(void)
{
return LP_CLKRST.clk_to_hp.icg_hp_fosc;
}
/**
* @brief Enable the digital XTAL32K_CLK, which is used to support peripherals.
*/
static inline __attribute__((always_inline)) void clk_ll_xtal32k_digi_enable(void)
{
LP_CLKRST.clk_to_hp.icg_hp_xtal32k = 1;
}
/**
* @brief Disable the digital XTAL32K_CLK, which is used to support peripherals.
*/
static inline __attribute__((always_inline)) void clk_ll_xtal32k_digi_disable(void)
{
LP_CLKRST.clk_to_hp.icg_hp_xtal32k = 0;
}
/**
* @brief Get the state of the digital XTAL32K_CLK
*
* @return True if the digital XTAL32K_CLK is enabled
*/
static inline __attribute__((always_inline)) bool clk_ll_xtal32k_digi_is_enabled(void)
{
return LP_CLKRST.clk_to_hp.icg_hp_xtal32k;
}
/**
* @brief Enable the digital RC32K_CLK, which is used to support peripherals.
*/
static inline __attribute__((always_inline)) void clk_ll_rc32k_digi_enable(void)
{
LP_CLKRST.clk_to_hp.icg_hp_osc32k = 1;
}
/**
* @brief Disable the digital RC32K_CLK, which is used to support peripherals.
*/
static inline __attribute__((always_inline)) void clk_ll_rc32k_digi_disable(void)
{
LP_CLKRST.clk_to_hp.icg_hp_osc32k = 0;
}
/**
* @brief Get the state of the digital RC32K_CLK
*
* @return True if the digital RC32K_CLK is enabled
*/
static inline __attribute__((always_inline)) bool clk_ll_rc32k_digi_is_enabled(void)
{
return LP_CLKRST.clk_to_hp.icg_hp_osc32k;
}
/**
* @brief Get PLL_CLK frequency
*
* @return PLL clock frequency, in MHz. Returns 0 if register field value is invalid.
*/
static inline __attribute__((always_inline)) uint32_t clk_ll_bbpll_get_freq_mhz(void)
{
// The target has a fixed 480MHz SPLL
return CLK_LL_PLL_480M_FREQ_MHZ;
}
/**
* @brief Set BBPLL frequency from XTAL source (Digital part)
*
* @param pll_freq_mhz PLL frequency, in MHz
*/
static inline __attribute__((always_inline)) void clk_ll_bbpll_set_freq_mhz(uint32_t pll_freq_mhz)
{
// The target SPLL is fixed to 480MHz
// Do nothing
HAL_ASSERT(pll_freq_mhz == CLK_LL_PLL_480M_FREQ_MHZ);
}
/**
* @brief Set BBPLL frequency from XTAL source (Analog part)
*
* @param pll_freq_mhz PLL frequency, in MHz
* @param xtal_freq_mhz XTAL frequency, in MHz
*/
static inline __attribute__((always_inline)) void clk_ll_bbpll_set_config(uint32_t pll_freq_mhz, uint32_t xtal_freq_mhz)
{
HAL_ASSERT(pll_freq_mhz == CLK_LL_PLL_480M_FREQ_MHZ);
uint8_t div_ref;
uint8_t div7_0;
uint8_t dr1;
uint8_t dr3;
uint8_t dchgp;
uint8_t dcur;
uint8_t dbias;
/* Configure 480M PLL */
switch (xtal_freq_mhz) {
case SOC_XTAL_FREQ_40M:
default:
div_ref = 0;
div7_0 = 8;
dr1 = 0;
dr3 = 0;
dchgp = 5;
dcur = 3;
dbias = 2;
break;
}
uint8_t i2c_bbpll_lref = (dchgp << I2C_BBPLL_OC_DCHGP_LSB) | (div_ref);
uint8_t i2c_bbpll_div_7_0 = div7_0;
uint8_t i2c_bbpll_dcur = (1 << I2C_BBPLL_OC_DLREF_SEL_LSB ) | (3 << I2C_BBPLL_OC_DHREF_SEL_LSB) | dcur;
REGI2C_WRITE(I2C_BBPLL, I2C_BBPLL_OC_REF_DIV, i2c_bbpll_lref);
REGI2C_WRITE(I2C_BBPLL, I2C_BBPLL_OC_DIV_7_0, i2c_bbpll_div_7_0);
REGI2C_WRITE_MASK(I2C_BBPLL, I2C_BBPLL_OC_DR1, dr1);
REGI2C_WRITE_MASK(I2C_BBPLL, I2C_BBPLL_OC_DR3, dr3);
REGI2C_WRITE(I2C_BBPLL, I2C_BBPLL_OC_DCUR, i2c_bbpll_dcur);
REGI2C_WRITE_MASK(I2C_BBPLL, I2C_BBPLL_OC_VCO_DBIAS, dbias);
}
/**
* @brief Select the clock source for CPU_CLK (SOC Clock Root)
*
* @param in_sel One of the clock sources in soc_cpu_clk_src_t
*/
static inline __attribute__((always_inline)) void clk_ll_cpu_set_src(soc_cpu_clk_src_t in_sel)
{
switch (in_sel) {
case SOC_CPU_CLK_SRC_XTAL:
PCR.sysclk_conf.soc_clk_sel = 0;
break;
case SOC_CPU_CLK_SRC_PLL:
PCR.sysclk_conf.soc_clk_sel = 1;
break;
case SOC_CPU_CLK_SRC_RC_FAST:
PCR.sysclk_conf.soc_clk_sel = 2;
break;
default:
// Unsupported SOC_CLK mux input sel
abort();
}
}
/**
* @brief Get the clock source for CPU_CLK (SOC Clock Root)
*
* @return Currently selected clock source (one of soc_cpu_clk_src_t values)
*/
static inline __attribute__((always_inline)) soc_cpu_clk_src_t clk_ll_cpu_get_src(void)
{
uint32_t clk_sel = PCR.sysclk_conf.soc_clk_sel;
switch (clk_sel) {
case 0:
return SOC_CPU_CLK_SRC_XTAL;
case 1:
return SOC_CPU_CLK_SRC_PLL;
case 2:
return SOC_CPU_CLK_SRC_RC_FAST;
default:
// Invalid SOC_CLK_SEL value
return SOC_CPU_CLK_SRC_INVALID;
}
}
/**
* @brief Set CPU_CLK's high-speed divider (valid when SOC_ROOT clock source is PLL)
*
* @param divider Divider. (PCR_HS_DIV_NUM + 1) * (PCR_CPU_HS_DIV_NUM + 1) = divider.
*/
static inline __attribute__((always_inline)) void clk_ll_cpu_set_hs_divider(uint32_t divider)
{
// SOC_ROOT_CLK ---(1)---> HP_ROOT_CLK ---(2)---> CPU_CLK
// (1) not configurable for the target (HRO register field: PCR_HS_DIV_NUM)
// Fixed at 3 for HS clock source
// Corresponding register field value is PCR_HS_DIV_NUM=2
// (2) configurable
// HS divider option: 1, 2, 4 (PCR_CPU_HS_DIV_NUM=0, 1, 3)
HAL_ASSERT(divider == 3 || divider == 4 || divider == 6 || divider == 12);
HAL_FORCE_MODIFY_U32_REG_FIELD(PCR.cpu_freq_conf, cpu_div_num, (divider / 3) - 1);
// 120MHz CPU freq cannot be achieved through divider, need to set force_120m
// This field is only valid if PCR_CPU_HS_DIV_NUM=0 and PCR_SOC_CLK_SEL=SOC_CPU_CLK_SRC_PLL
// bool force_120m = (divider == 4) ? 1 : 0;
// PCR.cpu_freq_conf.cpu_hs_120m_force = force_120m;
}
/**
* @brief Set CPU_CLK's low-speed divider (valid when SOC_ROOT clock source is XTAL/RC_FAST)
*
* @param divider Divider. (PCR_LS_DIV_NUM + 1) * (PCR_CPU_LS_DIV_NUM + 1) = divider.
*/
static inline __attribute__((always_inline)) void clk_ll_cpu_set_ls_divider(uint32_t divider)
{
// SOC_ROOT_CLK ---(1)---> HP_ROOT_CLK ---(2)---> CPU_CLK
// (1) not configurable for the target (HRO register field: PCR_LS_DIV_NUM)
// Fixed at 1 for LS clock source
// Corresponding register field value is PCR_LS_DIV_NUM=0
// (2) configurable
// LS divider option: 1, 2, 4, 8, 16, 32 (PCR_CPU_LS_DIV_NUM=0, 1, 3, 7, 15, 31)
HAL_ASSERT((divider > 0) && ((divider & (divider - 1)) == 0));
HAL_FORCE_MODIFY_U32_REG_FIELD(PCR.cpu_freq_conf, cpu_div_num, divider - 1);
}
/**
* @brief Get CPU_CLK's high-speed divider
*
* @return Divider. Divider = (PCR_HS_DIV_NUM + 1) * (PCR_CPU_HS_DIV_NUM + 1).
*/
static inline __attribute__((always_inline)) uint32_t clk_ll_cpu_get_hs_divider(void)
{
uint32_t cpu_hs_div = HAL_FORCE_READ_U32_REG_FIELD(PCR.cpu_freq_conf, cpu_div_num);
uint32_t hp_root_hs_div = HAL_FORCE_READ_U32_REG_FIELD(PCR.sysclk_conf, hs_div_num);
return (hp_root_hs_div + 1) * (cpu_hs_div + 1);
}
/**
* @brief Get CPU_CLK's low-speed divider
*
* @return Divider. Divider = (PCR_LS_DIV_NUM + 1) * (PCR_CPU_LS_DIV_NUM + 1).
*/
static inline __attribute__((always_inline)) uint32_t clk_ll_cpu_get_ls_divider(void)
{
uint32_t cpu_ls_div = HAL_FORCE_READ_U32_REG_FIELD(PCR.cpu_freq_conf, cpu_div_num);
uint32_t hp_root_ls_div = HAL_FORCE_READ_U32_REG_FIELD(PCR.sysclk_conf, ls_div_num);
return (hp_root_ls_div + 1) * (cpu_ls_div + 1);
}
/**
* @brief Set AHB_CLK's high-speed divider (valid when SOC_ROOT clock source is PLL)
*
* @param divider Divider. (PCR_HS_DIV_NUM + 1) * (PCR_AHB_HS_DIV_NUM + 1) = divider.
*/
static inline __attribute__((always_inline)) void clk_ll_ahb_set_hs_divider(uint32_t divider)
{
// SOC_ROOT_CLK ---(1)---> HP_ROOT_CLK ---(2)---> AHB_CLK
// (1) not configurable for the target (HRO register field: PCR_HS_DIV_NUM)
// Fixed at 3 for HS clock source
// Corresponding register field value is PCR_HS_DIV_NUM=2
// (2) configurable
// HS divider option: 4, 8, 16 (PCR_AHB_HS_DIV_NUM=3, 7, 15)
HAL_ASSERT(divider == 12 || divider == 24 || divider == 48);
HAL_FORCE_MODIFY_U32_REG_FIELD(PCR.ahb_freq_conf, ahb_div_num, (divider / 3) - 1);
}
/**
* @brief Set AHB_CLK's low-speed divider (valid when SOC_ROOT clock source is XTAL/RC_FAST)
*
* @param divider Divider. (PCR_LS_DIV_NUM + 1) * (PCR_AHB_LS_DIV_NUM + 1) = divider.
*/
static inline __attribute__((always_inline)) void clk_ll_ahb_set_ls_divider(uint32_t divider)
{
// SOC_ROOT_CLK ---(1)---> HP_ROOT_CLK ---(2)---> AHB_CLK
// (1) not configurable for the target (HRO register field: PCR_LS_DIV_NUM)
// Fixed at 1 for LS clock source
// Corresponding register field value is PCR_LS_DIV_NUM=0
// (2) configurable
// LS divider option: 1, 2, 4, 8, 16, 32 (PCR_CPU_LS_DIV_NUM=0, 1, 3, 7, 15, 31)
HAL_ASSERT((divider > 0) && ((divider & (divider - 1)) == 0));
HAL_FORCE_MODIFY_U32_REG_FIELD(PCR.ahb_freq_conf, ahb_div_num, divider - 1);
}
/**
* @brief Get AHB_CLK's high-speed divider
*
* @return Divider. Divider = (PCR_HS_DIV_NUM + 1) * (PCR_AHB_HS_DIV_NUM + 1).
*/
static inline __attribute__((always_inline)) uint32_t clk_ll_ahb_get_hs_divider(void)
{
uint32_t ahb_hs_div = HAL_FORCE_READ_U32_REG_FIELD(PCR.ahb_freq_conf, ahb_div_num);
uint32_t hp_root_hs_div = HAL_FORCE_READ_U32_REG_FIELD(PCR.sysclk_conf, hs_div_num);
return (hp_root_hs_div + 1) * (ahb_hs_div + 1);
}
/**
* @brief Get AHB_CLK's low-speed divider
*
* @return Divider. Divider = (PCR_LS_DIV_NUM + 1) * (PCR_AHB_LS_DIV_NUM + 1).
*/
static inline __attribute__((always_inline)) uint32_t clk_ll_ahb_get_ls_divider(void)
{
uint32_t ahb_ls_div = HAL_FORCE_READ_U32_REG_FIELD(PCR.ahb_freq_conf, ahb_div_num);
uint32_t hp_root_ls_div = HAL_FORCE_READ_U32_REG_FIELD(PCR.sysclk_conf, ls_div_num);
return (hp_root_ls_div + 1) * (ahb_ls_div + 1);
}
/**
* @brief Set APB_CLK divider. freq of APB_CLK = freq of AHB_CLK / divider
*
* @param divider Divider. PCR_APB_DIV_NUM = divider - 1.
*/
static inline __attribute__((always_inline)) void clk_ll_apb_set_divider(uint32_t divider)
{
// AHB ------> APB
// Divider option: 1, 2, 4 (PCR_APB_DIV_NUM=0, 1, 3)
HAL_ASSERT(divider == 1 || divider == 2 || divider == 4);
HAL_FORCE_MODIFY_U32_REG_FIELD(PCR.apb_freq_conf, apb_div_num, divider - 1);
}
/**
* @brief Get APB_CLK divider
*
* @return Divider. Divider = (PCR_APB_DIV_NUM + 1).
*/
static inline __attribute__((always_inline)) uint32_t clk_ll_apb_get_divider(void)
{
return HAL_FORCE_READ_U32_REG_FIELD(PCR.apb_freq_conf, apb_div_num) + 1;
}
/**
* @brief Set MSPI_FAST_CLK's high-speed divider (valid when SOC_ROOT clock source is PLL)
*
* @param divider Divider.
*/
static inline __attribute__((always_inline)) void clk_ll_mspi_fast_set_hs_divider(uint32_t divider)
{
// SOC_ROOT_CLK ------> MSPI_FAST_CLK
// HS divider option: 4, 5, 6 (PCR_MSPI_FAST_HS_DIV_NUM=3, 4, 5)
uint32_t div_num = 0;
switch (divider) {
case 4:
div_num = 3;
break;
case 5:
div_num = 4;
break;
case 6:
div_num = 5;
break;
default:
// Unsupported HS MSPI_FAST divider
abort();
}
HAL_FORCE_MODIFY_U32_REG_FIELD(PCR.mspi_clk_conf, mspi_fast_div_num, div_num);
}
/**
* @brief Set MSPI_FAST_CLK's low-speed divider (valid when SOC_ROOT clock source is XTAL/RC_FAST)
*
* @param divider Divider.
*/
static inline __attribute__((always_inline)) void clk_ll_mspi_fast_set_ls_divider(uint32_t divider)
{
// SOC_ROOT_CLK ------> MSPI_FAST_CLK
// LS divider option: 1, 2, 4 (PCR_MSPI_FAST_LS_DIV_NUM=0, 1, 2)
uint32_t div_num = 0;
switch (divider) {
case 1:
div_num = 0;
break;
case 2:
div_num = 1;
break;
case 4:
div_num = 2;
break;
default:
// Unsupported LS MSPI_FAST divider
abort();
}
HAL_FORCE_MODIFY_U32_REG_FIELD(PCR.mspi_clk_conf, mspi_fast_div_num, div_num);
}
/**
* @brief Select the calibration 32kHz clock source for timergroup0
*
* @param in_sel One of the 32kHz clock sources (RC32K_CLK, XTAL32K_CLK, OSC_SLOW_CLK)
*/
static inline __attribute__((always_inline)) void clk_ll_32k_calibration_set_target(soc_rtc_slow_clk_src_t in_sel)
{
switch (in_sel) {
case SOC_RTC_SLOW_CLK_SRC_RC32K:
PCR.ctrl_32k_conf.clk_32k_sel = 0;
break;
case SOC_RTC_SLOW_CLK_SRC_XTAL32K:
PCR.ctrl_32k_conf.clk_32k_sel = 1;
break;
case SOC_RTC_SLOW_CLK_SRC_OSC_SLOW:
PCR.ctrl_32k_conf.clk_32k_sel = 2;
break;
default:
// Unsupported 32K_SEL mux input
abort();
}
}
/**
* @brief Get the calibration 32kHz clock source for timergroup0
*
* @return soc_rtc_slow_clk_src_t Currently selected calibration 32kHz clock (one of the 32kHz clocks)
*/
static inline __attribute__((always_inline)) soc_rtc_slow_clk_src_t clk_ll_32k_calibration_get_target(void)
{
uint32_t clk_sel = PCR.ctrl_32k_conf.clk_32k_sel;
switch (clk_sel) {
case 0:
return SOC_RTC_SLOW_CLK_SRC_RC32K;
case 1:
return SOC_RTC_SLOW_CLK_SRC_XTAL32K;
case 2:
return SOC_RTC_SLOW_CLK_SRC_OSC_SLOW;
default:
return SOC_RTC_SLOW_CLK_SRC_INVALID;
}
}
/**
* @brief Select the clock source for RTC_SLOW_CLK
*
* @param in_sel One of the clock sources in soc_rtc_slow_clk_src_t
*/
static inline __attribute__((always_inline)) void clk_ll_rtc_slow_set_src(soc_rtc_slow_clk_src_t in_sel)
{
switch (in_sel) {
case SOC_RTC_SLOW_CLK_SRC_RC_SLOW:
LP_CLKRST.lp_clk_conf.slow_clk_sel = 0;
break;
case SOC_RTC_SLOW_CLK_SRC_XTAL32K:
LP_CLKRST.lp_clk_conf.slow_clk_sel = 1;
break;
case SOC_RTC_SLOW_CLK_SRC_RC32K:
LP_CLKRST.lp_clk_conf.slow_clk_sel = 2;
break;
case SOC_RTC_SLOW_CLK_SRC_OSC_SLOW:
LP_CLKRST.lp_clk_conf.slow_clk_sel = 3;
break;
default:
// Unsupported RTC_SLOW_CLK mux input sel
abort();
}
}
/**
* @brief Get the clock source for RTC_SLOW_CLK
*
* @return Currently selected clock source (one of soc_rtc_slow_clk_src_t values)
*/
static inline __attribute__((always_inline)) soc_rtc_slow_clk_src_t clk_ll_rtc_slow_get_src(void)
{
uint32_t clk_sel = LP_CLKRST.lp_clk_conf.slow_clk_sel;
switch (clk_sel) {
case 0:
return SOC_RTC_SLOW_CLK_SRC_RC_SLOW;
case 1:
return SOC_RTC_SLOW_CLK_SRC_XTAL32K;
case 2:
return SOC_RTC_SLOW_CLK_SRC_RC32K;
case 3:
return SOC_RTC_SLOW_CLK_SRC_OSC_SLOW;
default:
return SOC_RTC_SLOW_CLK_SRC_INVALID;
}
}
/**
* @brief Select the clock source for RTC_FAST_CLK
*
* @param in_sel One of the clock sources in soc_rtc_fast_clk_src_t
*/
static inline __attribute__((always_inline)) void clk_ll_rtc_fast_set_src(soc_rtc_fast_clk_src_t in_sel)
{
switch (in_sel) {
case SOC_RTC_FAST_CLK_SRC_RC_FAST:
LP_CLKRST.lp_clk_conf.fast_clk_sel = 0;
break;
case SOC_RTC_FAST_CLK_SRC_XTAL_D2:
LP_CLKRST.lp_clk_conf.fast_clk_sel = 1;
break;
default:
// Unsupported RTC_FAST_CLK mux input sel
abort();
}
}
/**
* @brief Get the clock source for RTC_FAST_CLK
*
* @return Currently selected clock source (one of soc_rtc_fast_clk_src_t values)
*/
static inline __attribute__((always_inline)) soc_rtc_fast_clk_src_t clk_ll_rtc_fast_get_src(void)
{
uint32_t clk_sel = LP_CLKRST.lp_clk_conf.fast_clk_sel;
switch (clk_sel) {
case 0:
return SOC_RTC_FAST_CLK_SRC_RC_FAST;
case 1:
return SOC_RTC_FAST_CLK_SRC_XTAL_D2;
default:
return SOC_RTC_FAST_CLK_SRC_INVALID;
}
}
/**
* @brief Set RC_FAST_CLK divider. The output from the divider is passed into rtc_fast_clk MUX.
*
* @param divider Divider of RC_FAST_CLK. Usually this divider is set to 1 (reg. value is 0) in bootloader stage.
*/
static inline __attribute__((always_inline)) void clk_ll_rc_fast_set_divider(uint32_t divider)
{
// No divider on the target
HAL_ASSERT(divider == 1);
}
/**
* @brief Get RC_FAST_CLK divider
*
* @return Divider. Divider = (CK8M_DIV_SEL + 1).
*/
static inline __attribute__((always_inline)) uint32_t clk_ll_rc_fast_get_divider(void)
{
// No divider on the target, always return divider = 1
return 1;
}
/**
* @brief Set RC_SLOW_CLK divider
*
* @param divider Divider of RC_SLOW_CLK. Usually this divider is set to 1 (reg. value is 0) in bootloader stage.
*/
static inline __attribute__((always_inline)) void clk_ll_rc_slow_set_divider(uint32_t divider)
{
// No divider on the target
HAL_ASSERT(divider == 1);
}
/************************** LP STORAGE REGISTER STORE/LOAD **************************/
/**
* @brief Store XTAL_CLK frequency in RTC storage register
*
* Value of RTC_XTAL_FREQ_REG is stored as two copies in lower and upper 16-bit
* halves. These are the routines to work with that representation.
*
* @param xtal_freq_mhz XTAL frequency, in MHz. The frequency must necessarily be even,
* otherwise there will be a conflict with the low bit, which is used to disable logs
* in the ROM code.
*/
static inline __attribute__((always_inline)) void clk_ll_xtal_store_freq_mhz(uint32_t xtal_freq_mhz)
{
// Read the status of whether disabling logging from ROM code
uint32_t reg = READ_PERI_REG(RTC_XTAL_FREQ_REG) & RTC_DISABLE_ROM_LOG;
// If so, need to write back this setting
if (reg == RTC_DISABLE_ROM_LOG) {
xtal_freq_mhz |= 1;
}
WRITE_PERI_REG(RTC_XTAL_FREQ_REG, (xtal_freq_mhz & UINT16_MAX) | ((xtal_freq_mhz & UINT16_MAX) << 16));
}
/**
* @brief Load XTAL_CLK frequency from RTC storage register
*
* Value of RTC_XTAL_FREQ_REG is stored as two copies in lower and upper 16-bit
* halves. These are the routines to work with that representation.
*
* @return XTAL frequency, in MHz. Returns 0 if value in reg is invalid.
*/
static inline __attribute__((always_inline)) uint32_t clk_ll_xtal_load_freq_mhz(void)
{
// Read from RTC storage register
uint32_t xtal_freq_reg = READ_PERI_REG(RTC_XTAL_FREQ_REG);
if ((xtal_freq_reg & 0xFFFF) == ((xtal_freq_reg >> 16) & 0xFFFF) &&
xtal_freq_reg != 0 && xtal_freq_reg != UINT32_MAX) {
return xtal_freq_reg & ~RTC_DISABLE_ROM_LOG & UINT16_MAX;
}
// If the format in reg is invalid
return 0;
}
/**
* @brief Store RTC_SLOW_CLK calibration value in RTC storage register
*
* Value of RTC_SLOW_CLK_CAL_REG has to be in the same format as returned by rtc_clk_cal (microseconds,
* in Q13.19 fixed-point format).
*
* @param cal_value The calibration value of slow clock period in microseconds, in Q13.19 fixed point format
*/
static inline __attribute__((always_inline)) void clk_ll_rtc_slow_store_cal(uint32_t cal_value)
{
REG_WRITE(RTC_SLOW_CLK_CAL_REG, cal_value);
}
/**
* @brief Load the calibration value of RTC_SLOW_CLK frequency from RTC storage register
*
* This value gets updated (i.e. rtc slow clock gets calibrated) every time RTC_SLOW_CLK source switches
*
* @return The calibration value of slow clock period in microseconds, in Q13.19 fixed point format
*/
static inline __attribute__((always_inline)) uint32_t clk_ll_rtc_slow_load_cal(void)
{
return REG_READ(RTC_SLOW_CLK_CAL_REG);
}
/*
Set the frequency division factor of ref_tick
*/
static inline void clk_ll_rc_fast_tick_conf(void)
{
PCR.ctrl_32k_conf.fosc_tick_num = REG_FOSC_TICK_NUM;
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,53 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdint.h>
#include "esp_attr.h"
#include "soc/intpri_reg.h"
#ifdef __cplusplus
extern "C" {
#endif
// TODO: [ESP32C61] IDF-9262, inherit from ESP32C6
/**
* @brief Clear the crosscore interrupt that just occurred on the current core
*/
FORCE_INLINE_ATTR void crosscore_int_ll_clear_interrupt(int core_id)
{
WRITE_PERI_REG(INTPRI_CPU_INTR_FROM_CPU_0_REG, 0);
}
/**
* @brief Trigger a crosscore interrupt on the given core
*
* @param core_id Core to trigger an interrupt on. Ignored on single core targets.
*/
FORCE_INLINE_ATTR void crosscore_int_ll_trigger_interrupt(int core_id)
{
WRITE_PERI_REG(INTPRI_CPU_INTR_FROM_CPU_0_REG, INTPRI_CPU_INTR_FROM_CPU_0);
}
/**
* @brief Get the state of the crosscore interrupt register for the given core
*
* @param core_id Core to get the crosscore interrupt state of. Ignored on single core targets.
*
* @return Non zero value if a software interrupt is pending on the given core,
* 0 if no software interrupt is pending.
*/
FORCE_INLINE_ATTR uint32_t crosscore_int_ll_get_state(int core_id)
{
return REG_READ(INTPRI_CPU_INTR_FROM_CPU_0_REG);
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,70 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include "soc/soc_caps.h"
#include "hal/efuse_ll.h"
#include_next "hal/efuse_hal.h"
#ifdef __cplusplus
extern "C" {
#endif
// TODO: [ESP32C61] IDF-9282, inherit from c6
/**
* @brief get chip version
*/
uint32_t efuse_hal_get_chip_revision(void);
/**
* @brief set eFuse timings
*
* @param apb_freq_hz APB frequency in Hz
*/
void efuse_hal_set_timing(uint32_t apb_freq_hz);
/**
* @brief trigger eFuse read operation
*/
void efuse_hal_read(void);
/**
* @brief clear registers for programming eFuses
*/
void efuse_hal_clear_program_registers(void);
/**
* @brief burn eFuses written in programming registers (one block at once)
*
* @param block block number
*/
void efuse_hal_program(uint32_t block);
/**
* @brief Calculate Reed-Solomon Encoding values for a block of efuse data.
*
* @param data Pointer to data buffer (length 32 bytes)
* @param rs_values Pointer to write encoded data to (length 12 bytes)
*/
void efuse_hal_rs_calculate(const void *data, void *rs_values);
/**
* @brief Checks coding error in a block
*
* @param block Index of efuse block
*
* @return True - block has an error.
* False - no error.
*/
bool efuse_hal_is_coding_error_in_block(unsigned block);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,200 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include "soc/efuse_defs.h"
#include "soc/efuse_reg.h"
#include "soc/efuse_struct.h"
#include "hal/assert.h"
#include "rom/efuse.h"
// TODO: [ESP32C61] IDF-9282, inherit from c6
#ifdef __cplusplus
extern "C" {
#endif
// Always inline these functions even no gcc optimization is applied.
/******************* eFuse fields *************************/
__attribute__((always_inline)) static inline uint32_t efuse_ll_get_flash_crypt_cnt(void)
{
return EFUSE0.rd_repeat_data0.spi_boot_crypt_cnt;
}
__attribute__((always_inline)) static inline uint32_t efuse_ll_get_wdt_delay_sel(void)
{
return EFUSE0.rd_repeat_data0.wdt_delay_sel;
}
__attribute__((always_inline)) static inline uint32_t efuse_ll_get_mac0(void)
{
return EFUSE0.rd_mac_sys0.mac_0;
}
__attribute__((always_inline)) static inline uint32_t efuse_ll_get_mac1(void)
{
return EFUSE0.rd_mac_sys1.mac_1;
}
__attribute__((always_inline)) static inline bool efuse_ll_get_secure_boot_v2_en(void)
{
return EFUSE0.rd_repeat_data1.secure_boot_en;
}
// use efuse_hal_get_major_chip_version() to get major chip version
__attribute__((always_inline)) static inline uint32_t efuse_ll_get_chip_wafer_version_major(void)
{
return EFUSE0.rd_mac_sys3.mac_reserved_2;
}
// use efuse_hal_get_minor_chip_version() to get minor chip version
__attribute__((always_inline)) static inline uint32_t efuse_ll_get_chip_wafer_version_minor(void)
{
return EFUSE0.rd_mac_sys3.sys_data_part0_0;
}
__attribute__((always_inline)) static inline bool efuse_ll_get_disable_wafer_version_major(void)
{
return EFUSE0.rd_repeat_data4.rd_repeat_data4;
}
__attribute__((always_inline)) static inline uint32_t efuse_ll_get_active_hp_dbias(void)
{
HAL_ASSERT(0);
// return EFUSE0.rd_mac_spi_sys_2.active_hp_dbias;
return 0;
}
__attribute__((always_inline)) static inline uint32_t efuse_ll_get_active_lp_dbias(void)
{
HAL_ASSERT(0);
// return EFUSE0.rd_mac_spi_sys_2.active_lp_dbias;
return 0;
}
__attribute__((always_inline)) static inline uint32_t efuse_ll_get_lslp_dbg(void)
{
HAL_ASSERT(0);
// return EFUSE0.rd_mac_spi_sys_2.lslp_hp_dbg;
return 0;
}
__attribute__((always_inline)) static inline uint32_t efuse_ll_get_lslp_hp_dbias(void)
{
HAL_ASSERT(0);
// return EFUSE0.rd_mac_spi_sys_2.lslp_hp_dbias;
return 0;
}
__attribute__((always_inline)) static inline uint32_t efuse_ll_get_dslp_dbg(void)
{
HAL_ASSERT(0);
// return EFUSE0.rd_mac_spi_sys_2.dslp_lp_dbg;
return 0;
}
__attribute__((always_inline)) static inline uint32_t efuse_ll_get_dslp_lp_dbias(void)
{
HAL_ASSERT(0);
// return EFUSE0.rd_mac_spi_sys_2.dslp_lp_dbias;
return 0;
}
__attribute__((always_inline)) static inline int32_t efuse_ll_get_dbias_vol_gap(void)
{
HAL_ASSERT(0);
// return EFUSE0.rd_mac_spi_sys_2.dbias_vol_gap;
return 0;
}
__attribute__((always_inline)) static inline uint32_t efuse_ll_get_blk_version_major(void)
{
return EFUSE0.rd_mac_sys3.sys_data_part0_0;
}
__attribute__((always_inline)) static inline uint32_t efuse_ll_get_blk_version_minor(void)
{
return EFUSE0.rd_mac_sys3.sys_data_part0_0;
}
__attribute__((always_inline)) static inline bool efuse_ll_get_disable_blk_version_major(void)
{
return EFUSE0.rd_repeat_data4.rd_repeat_data4;
}
__attribute__((always_inline)) static inline uint32_t efuse_ll_get_chip_ver_pkg(void)
{
return EFUSE0.rd_mac_sys3.sys_data_part0_0;
}
__attribute__((always_inline)) static inline uint32_t efuse_ll_get_ocode(void)
{
return EFUSE0.rd_sys_part1_datan[4].sys_data_part1_n;
}
/******************* eFuse control functions *************************/
__attribute__((always_inline)) static inline bool efuse_ll_get_read_cmd(void)
{
return EFUSE0.cmd.read_cmd;
}
__attribute__((always_inline)) static inline bool efuse_ll_get_pgm_cmd(void)
{
return EFUSE0.cmd.pgm_cmd;
}
__attribute__((always_inline)) static inline void efuse_ll_set_read_cmd(void)
{
EFUSE0.cmd.read_cmd = 1;
}
__attribute__((always_inline)) static inline void efuse_ll_set_pgm_cmd(uint32_t block)
{
HAL_ASSERT(block < ETS_EFUSE_BLOCK_MAX);
EFUSE0.cmd.val = ((block << EFUSE_BLK_NUM_S) & EFUSE_BLK_NUM_M) | EFUSE_PGM_CMD;
}
__attribute__((always_inline)) static inline void efuse_ll_set_conf_read_op_code(void)
{
EFUSE0.conf.op_code = EFUSE_READ_OP_CODE;
}
__attribute__((always_inline)) static inline void efuse_ll_set_conf_write_op_code(void)
{
EFUSE0.conf.op_code = EFUSE_WRITE_OP_CODE;
}
__attribute__((always_inline)) static inline void efuse_ll_set_dac_num(uint8_t val)
{
EFUSE0.dac_conf.dac_num = val;
}
__attribute__((always_inline)) static inline void efuse_ll_set_dac_clk_div(uint8_t val)
{
EFUSE0.dac_conf.dac_clk_div = val;
}
__attribute__((always_inline)) static inline void efuse_ll_set_pwr_on_num(uint16_t val)
{
EFUSE0.wr_tim_conf1.pwr_on_num = val;
}
__attribute__((always_inline)) static inline void efuse_ll_set_pwr_off_num(uint16_t value)
{
EFUSE0.wr_tim_conf2.pwr_off_num = value;
}
/******************* eFuse control functions *************************/
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,680 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*******************************************************************************
* NOTICE
* The hal is not public api, don't use in application code.
* See readme.md in hal/include/hal/readme.md
******************************************************************************/
// The LL layer for ESP32-C61 GPIO register operations
#pragma once
#include <stdlib.h>
#include <stdbool.h>
#include "soc/soc.h"
#include "soc/gpio_periph.h"
#include "soc/gpio_struct.h"
#include "soc/lp_aon_struct.h"
#include "soc/lp_gpio_struct.h"
#include "soc/pmu_struct.h"
#include "soc/usb_serial_jtag_reg.h"
#include "soc/pcr_struct.h"
#include "soc/clk_tree_defs.h"
#include "hal/gpio_types.h"
#include "hal/misc.h"
#include "hal/assert.h"
// TODO: [ESP32C61] IDF-9316, inherit from c6
#ifdef __cplusplus
extern "C" {
#endif
// Get GPIO hardware instance with giving gpio num
#define GPIO_LL_GET_HW(num) (((num) == 0) ? (&GPIO) : NULL)
#define GPIO_LL_PRO_CPU_INTR_ENA (BIT(0))
#define GPIO_LL_PRO_CPU_NMI_INTR_ENA (BIT(1))
/**
* @brief Get the configuration for an IO
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
* @param pu Pull-up enabled or not
* @param pd Pull-down enabled or not
* @param ie Input enabled or not
* @param oe Output enabled or not
* @param od Open-drain enabled or not
* @param drv Drive strength value
* @param fun_sel IOMUX function selection value
* @param sig_out Outputting peripheral signal index
* @param slp_sel Pin sleep mode enabled or not
*/
static inline void gpio_ll_get_io_config(gpio_dev_t *hw, uint32_t gpio_num,
bool *pu, bool *pd, bool *ie, bool *oe, bool *od, uint32_t *drv,
uint32_t *fun_sel, uint32_t *sig_out, bool *slp_sel)
{
uint32_t bit_mask = 1 << gpio_num;
uint32_t iomux_reg_val = REG_READ(GPIO_PIN_MUX_REG[gpio_num]);
*pu = (iomux_reg_val & FUN_PU_M) >> FUN_PU_S;
*pd = (iomux_reg_val & FUN_PD_M) >> FUN_PD_S;
*ie = (iomux_reg_val & FUN_IE_M) >> FUN_IE_S;
*oe = (hw->enable.val & bit_mask) >> gpio_num;
*od = hw->pinn[gpio_num].pinn_pad_driver;
*drv = (iomux_reg_val & FUN_DRV_M) >> FUN_DRV_S;
*fun_sel = (iomux_reg_val & MCU_SEL_M) >> MCU_SEL_S;
*sig_out = hw->funcn_out_sel_cfg[gpio_num].funcn_out_sel;
*slp_sel = (iomux_reg_val & SLP_SEL_M) >> SLP_SEL_S;
}
/**
* @brief Enable pull-up on GPIO.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
static inline void gpio_ll_pullup_en(gpio_dev_t *hw, uint32_t gpio_num)
{
REG_SET_BIT(IO_MUX_GPIO0_REG + (gpio_num * 4), FUN_PU);
}
/**
* @brief Disable pull-up on GPIO.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
__attribute__((always_inline))
static inline void gpio_ll_pullup_dis(gpio_dev_t *hw, uint32_t gpio_num)
{
REG_CLR_BIT(IO_MUX_GPIO0_REG + (gpio_num * 4), FUN_PU);
}
/**
* @brief Enable pull-down on GPIO.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
static inline void gpio_ll_pulldown_en(gpio_dev_t *hw, uint32_t gpio_num)
{
REG_SET_BIT(IO_MUX_GPIO0_REG + (gpio_num * 4), FUN_PD);
}
/**
* @brief Disable pull-down on GPIO.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
__attribute__((always_inline))
static inline void gpio_ll_pulldown_dis(gpio_dev_t *hw, uint32_t gpio_num)
{
// The pull-up value of the USB pins are controlled by the pins pull-up value together with USB pull-up value
// USB DP pin is default to PU enabled
// Note that esp32c61 has supported USB_EXCHG_PINS feature. If this efuse is burnt, the gpio pin
// which should be checked is USB_INT_PHY0_DM_GPIO_NUM instead.
// TODO: read the specific efuse with efuse_ll.h
// if (gpio_num == USB_DP_GPIO_NUM) {
// SET_PERI_REG_MASK(USB_SERIAL_JTAG_CONF0_REG, USB_SERIAL_JTAG_PAD_PULL_OVERRIDE);
// CLEAR_PERI_REG_MASK(USB_SERIAL_JTAG_CONF0_REG, USB_SERIAL_JTAG_DP_PULLUP);
// }
REG_CLR_BIT(IO_MUX_GPIO0_REG + (gpio_num * 4), FUN_PD);
}
/**
* @brief GPIO set interrupt trigger type
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number. If you want to set the trigger type of e.g. of GPIO16, gpio_num should be GPIO_NUM_16 (16);
* @param intr_type Interrupt type, select from gpio_int_type_t
*/
static inline void gpio_ll_set_intr_type(gpio_dev_t *hw, uint32_t gpio_num, gpio_int_type_t intr_type)
{
hw->pinn[gpio_num].pinn_int_type = intr_type;
}
/**
* @brief Get GPIO interrupt status
*
* @param hw Peripheral GPIO hardware instance address.
* @param core_id interrupt core id
* @param status interrupt status
*/
__attribute__((always_inline))
static inline void gpio_ll_get_intr_status(gpio_dev_t *hw, uint32_t core_id, uint32_t *status)
{
(void)core_id;
*status = hw->procpu_int.procpu_int;
}
/**
* @brief Get GPIO interrupt status high
*
* @param hw Peripheral GPIO hardware instance address.
* @param core_id interrupt core id
* @param status interrupt status high
*/
__attribute__((always_inline))
static inline void gpio_ll_get_intr_status_high(gpio_dev_t *hw, uint32_t core_id, uint32_t *status)
{
*status = 0; // Less than 32 GPIOs in ESP32-C61
}
/**
* @brief Clear GPIO interrupt status
*
* @param hw Peripheral GPIO hardware instance address.
* @param mask interrupt status clear mask
*/
__attribute__((always_inline))
static inline void gpio_ll_clear_intr_status(gpio_dev_t *hw, uint32_t mask)
{
hw->status_w1tc.status_w1tc = mask;
}
/**
* @brief Clear GPIO interrupt status high
*
* @param hw Peripheral GPIO hardware instance address.
* @param mask interrupt status high clear mask
*/
__attribute__((always_inline))
static inline void gpio_ll_clear_intr_status_high(gpio_dev_t *hw, uint32_t mask)
{
// Less than 32 GPIOs on ESP32-C61 Do nothing.
}
/**
* @brief Enable GPIO module interrupt signal
*
* @param hw Peripheral GPIO hardware instance address.
* @param core_id Interrupt enabled CPU to corresponding ID
* @param gpio_num GPIO number. If you want to enable the interrupt of e.g. GPIO16, gpio_num should be GPIO_NUM_16 (16);
*/
__attribute__((always_inline))
static inline void gpio_ll_intr_enable_on_core(gpio_dev_t *hw, uint32_t core_id, uint32_t gpio_num)
{
HAL_ASSERT(core_id == 0 && "target SoC only has a single core");
GPIO.pinn[gpio_num].pinn_int_ena = GPIO_LL_PRO_CPU_INTR_ENA; //enable pro cpu intr
}
/**
* @brief Disable GPIO module interrupt signal
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number. If you want to disable the interrupt of e.g. GPIO16, gpio_num should be GPIO_NUM_16 (16);
*/
__attribute__((always_inline))
static inline void gpio_ll_intr_disable(gpio_dev_t *hw, uint32_t gpio_num)
{
hw->pinn[gpio_num].pinn_int_ena = 0; //disable GPIO intr
}
/**
* @brief Disable input mode on GPIO.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
__attribute__((always_inline))
static inline void gpio_ll_input_disable(gpio_dev_t *hw, uint32_t gpio_num)
{
PIN_INPUT_DISABLE(IO_MUX_GPIO0_REG + (gpio_num * 4));
}
/**
* @brief Enable input mode on GPIO.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
static inline void gpio_ll_input_enable(gpio_dev_t *hw, uint32_t gpio_num)
{
PIN_INPUT_ENABLE(IO_MUX_GPIO0_REG + (gpio_num * 4));
}
/**
* @brief Enable GPIO pin filter
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number of the pad.
*/
static inline void gpio_ll_pin_filter_enable(gpio_dev_t *hw, uint32_t gpio_num)
{
PIN_FILTER_EN(IO_MUX_GPIO0_REG + (gpio_num * 4));
}
/**
* @brief Disable GPIO pin filter
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number of the pad.
*/
static inline void gpio_ll_pin_filter_disable(gpio_dev_t *hw, uint32_t gpio_num)
{
PIN_FILTER_DIS(IO_MUX_GPIO0_REG + (gpio_num * 4));
}
/**
* @brief Disable output mode on GPIO.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
__attribute__((always_inline))
static inline void gpio_ll_output_disable(gpio_dev_t *hw, uint32_t gpio_num)
{
hw->enable_w1tc.enable_w1tc = (0x1 << gpio_num);
// Ensure no other output signal is routed via GPIO matrix to this pin
REG_WRITE(GPIO_FUNC0_OUT_SEL_CFG_REG + (gpio_num * 4), SIG_GPIO_OUT_IDX);
}
/**
* @brief Enable output mode on GPIO.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
__attribute__((always_inline))
static inline void gpio_ll_output_enable(gpio_dev_t *hw, uint32_t gpio_num)
{
hw->enable_w1ts.enable_w1ts = (0x1 << gpio_num);
}
/**
* @brief Disable open-drain mode on GPIO.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
static inline void gpio_ll_od_disable(gpio_dev_t *hw, uint32_t gpio_num)
{
hw->pinn[gpio_num].pinn_pad_driver = 0;
}
/**
* @brief Enable open-drain mode on GPIO.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
static inline void gpio_ll_od_enable(gpio_dev_t *hw, uint32_t gpio_num)
{
hw->pinn[gpio_num].pinn_pad_driver = 1;
}
/**
* @brief GPIO set output level
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number. If you want to set the output level of e.g. GPIO16, gpio_num should be GPIO_NUM_16 (16);
* @param level Output level. 0: low ; 1: high
*/
__attribute__((always_inline))
static inline void gpio_ll_set_level(gpio_dev_t *hw, uint32_t gpio_num, uint32_t level)
{
if (level) {
hw->out_w1ts.out_w1ts = (1 << gpio_num);
} else {
hw->out_w1tc.out_w1tc = (1 << gpio_num);
}
}
/**
* @brief GPIO get input level
*
* @warning If the pad is not configured for input (or input and output) the returned value is always 0.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number. If you want to get the logic level of e.g. pin GPIO16, gpio_num should be GPIO_NUM_16 (16);
*
* @return
* - 0 the GPIO input level is 0
* - 1 the GPIO input level is 1
*/
static inline int gpio_ll_get_level(gpio_dev_t *hw, uint32_t gpio_num)
{
return (hw->in.in_data_next >> gpio_num) & 0x1;
}
/**
* @brief Enable GPIO wake-up function.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number.
*/
static inline void gpio_ll_wakeup_enable(gpio_dev_t *hw, uint32_t gpio_num)
{
hw->pinn[gpio_num].pinn_wakeup_enable = 0x1;
}
/**
* @brief Disable GPIO wake-up function.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
static inline void gpio_ll_wakeup_disable(gpio_dev_t *hw, uint32_t gpio_num)
{
hw->pinn[gpio_num].pinn_wakeup_enable = 0;
}
/**
* @brief Set GPIO pad drive capability
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number, only support output GPIOs
* @param strength Drive capability of the pad
*/
static inline void gpio_ll_set_drive_capability(gpio_dev_t *hw, uint32_t gpio_num, gpio_drive_cap_t strength)
{
SET_PERI_REG_BITS(IO_MUX_GPIO0_REG + (gpio_num * 4), FUN_DRV_V, strength, FUN_DRV_S);
}
/**
* @brief Get GPIO pad drive capability
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number, only support output GPIOs
* @param strength Pointer to accept drive capability of the pad
*/
static inline void gpio_ll_get_drive_capability(gpio_dev_t *hw, uint32_t gpio_num, gpio_drive_cap_t *strength)
{
*strength = (gpio_drive_cap_t)GET_PERI_REG_BITS2(IO_MUX_GPIO0_REG + (gpio_num * 4), FUN_DRV_V, FUN_DRV_S);
}
/**
* @brief Enable gpio pad hold function.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number, only support output GPIOs
*/
static inline void gpio_ll_hold_en(gpio_dev_t *hw, uint32_t gpio_num)
{
LP_AON.gpio_hold0.gpio_hold0 |= GPIO_HOLD_MASK[gpio_num];
}
/**
* @brief Disable gpio pad hold function.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number, only support output GPIOs
*/
static inline void gpio_ll_hold_dis(gpio_dev_t *hw, uint32_t gpio_num)
{
LP_AON.gpio_hold0.gpio_hold0 &= ~GPIO_HOLD_MASK[gpio_num];
}
/**
* @brief Get digital gpio pad hold status.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number, only support output GPIOs
*
* @note caller must ensure that gpio_num is a digital io pad
*
* @return
* - true digital gpio pad is held
* - false digital gpio pad is unheld
*/
__attribute__((always_inline))
static inline bool gpio_ll_is_digital_io_hold(gpio_dev_t *hw, uint32_t gpio_num)
{
return !!(LP_AON.gpio_hold0.gpio_hold0 & BIT(gpio_num));
}
/**
* @brief Set pad input to a peripheral signal through the IOMUX.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number of the pad.
* @param signal_idx Peripheral signal id to input. One of the ``*_IN_IDX`` signals in ``soc/gpio_sig_map.h``.
*/
__attribute__((always_inline))
static inline void gpio_ll_iomux_in(gpio_dev_t *hw, uint32_t gpio, uint32_t signal_idx)
{
REG_CLR_BIT(GPIO_FUNC0_IN_SEL_CFG_REG + signal_idx * 4, GPIO_SIG0_IN_SEL);
PIN_INPUT_ENABLE(IO_MUX_GPIO0_REG + (gpio * 4));
}
/**
* @brief Select a function for the pin in the IOMUX
*
* @param pin_name Pin name to configure
* @param func Function to assign to the pin
*/
static inline void gpio_ll_iomux_func_sel(uint32_t pin_name, uint32_t func)
{
// Disable USB Serial JTAG if pins 12 or pins 13 needs to select an IOMUX function
if (pin_name == IO_MUX_GPIO12_REG || pin_name == IO_MUX_GPIO13_REG) {
CLEAR_PERI_REG_MASK(USB_SERIAL_JTAG_CONF0_REG, USB_SERIAL_JTAG_USB_PAD_ENABLE);
}
PIN_FUNC_SELECT(pin_name, func);
}
/**
* @brief Control the pin in the IOMUX
*
* @param bmap write mask of control value
* @param val Control value
* @param shift write mask shift of control value
*/
static inline __attribute__((always_inline)) void gpio_ll_set_pin_ctrl(uint32_t val, uint32_t bmap, uint32_t shift)
{
SET_PERI_REG_BITS(PIN_CTRL, bmap, val, shift);
}
/**
* @brief Select a function for the pin in the IOMUX
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
* @param func Function to assign to the pin
*/
__attribute__((always_inline))
static inline void gpio_ll_func_sel(gpio_dev_t *hw, uint8_t gpio_num, uint32_t func)
{
// Disable USB Serial JTAG if pins 12 or pins 13 needs to select an IOMUX function
// if (gpio_num == USB_DM_GPIO_NUM || gpio_num == USB_DP_GPIO_NUM) {
// CLEAR_PERI_REG_MASK(USB_SERIAL_JTAG_CONF0_REG, USB_SERIAL_JTAG_USB_PAD_ENABLE);
// }
PIN_FUNC_SELECT(IO_MUX_GPIO0_REG + (gpio_num * 4), func);
}
/**
* @brief Set peripheral output to an GPIO pad through the IOMUX.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num gpio_num GPIO number of the pad.
* @param func The function number of the peripheral pin to output pin.
* One of the ``FUNC_X_*`` of specified pin (X) in ``soc/io_mux_reg.h``.
* @param oen_inv True if the output enable needs to be inverted, otherwise False.
*/
static inline void gpio_ll_iomux_out(gpio_dev_t *hw, uint8_t gpio_num, int func, uint32_t oen_inv)
{
hw->funcn_out_sel_cfg[gpio_num].funcn_oe_sel = 0;
hw->funcn_out_sel_cfg[gpio_num].funcn_oe_inv_sel = oen_inv;
gpio_ll_func_sel(hw, gpio_num, func);
}
/**
* @brief Set clock source of IO MUX module
*
* @param src IO MUX clock source (only a subset of soc_module_clk_t values are valid)
*/
static inline void gpio_ll_iomux_set_clk_src(soc_module_clk_t src)
{
switch (src) {
case SOC_MOD_CLK_XTAL:
PCR.iomux_clk_conf.iomux_func_clk_sel = 3;
break;
case SOC_MOD_CLK_PLL_F80M:
PCR.iomux_clk_conf.iomux_func_clk_sel = 1;
break;
default:
// Unsupported IO_MUX clock source
HAL_ASSERT(false);
}
}
/**
* @brief Get the GPIO number that is routed to the input peripheral signal through GPIO matrix.
*
* @param hw Peripheral GPIO hardware instance address.
* @param in_sig_idx Peripheral signal index (tagged as input attribute).
*
* @return
* - -1 Signal bypassed GPIO matrix
* - Others GPIO number
*/
static inline int gpio_ll_get_in_signal_connected_io(gpio_dev_t *hw, uint32_t in_sig_idx)
{
uint32_t reg_val = REG_READ(GPIO_FUNC0_IN_SEL_CFG_REG + in_sig_idx * 4);
if (reg_val & GPIO_SIG0_IN_SEL) {
return (reg_val & GPIO_FUNC0_IN_SEL_M);
} else {
return -1;
}
}
/**
* @brief Force hold digital io pad.
* @note GPIO force hold, whether the chip in sleep mode or wakeup mode.
*/
static inline void gpio_ll_force_hold_all(void)
{
// WT flag, it gets self-cleared after the configuration is done
PMU.imm.pad_hold_all.tie_high_hp_pad_hold_all = 1;
}
/**
* @brief Force unhold digital io pad.
* @note GPIO force unhold, whether the chip in sleep mode or wakeup mode.
*/
static inline void gpio_ll_force_unhold_all(void)
{
// WT flag, it gets self-cleared after the configuration is done
PMU.imm.pad_hold_all.tie_low_hp_pad_hold_all = 1;
}
/**
* @brief Enable GPIO pin to use sleep mode pin functions during light sleep.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
static inline void gpio_ll_sleep_sel_en(gpio_dev_t *hw, uint32_t gpio_num)
{
PIN_SLP_SEL_ENABLE(IO_MUX_GPIO0_REG + (gpio_num * 4));
}
/**
* @brief Disable GPIO pin to use sleep mode pin functions during light sleep.
* Pin functions remains the same in both normal execution and in light-sleep mode.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
static inline void gpio_ll_sleep_sel_dis(gpio_dev_t *hw, uint32_t gpio_num)
{
PIN_SLP_SEL_DISABLE(IO_MUX_GPIO0_REG + (gpio_num * 4));
}
/**
* @brief Disable GPIO pull-up in sleep mode.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
static inline void gpio_ll_sleep_pullup_dis(gpio_dev_t *hw, uint32_t gpio_num)
{
PIN_SLP_PULLUP_DISABLE(IO_MUX_GPIO0_REG + (gpio_num * 4));
}
/**
* @brief Enable GPIO pull-up in sleep mode.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
static inline void gpio_ll_sleep_pullup_en(gpio_dev_t *hw, uint32_t gpio_num)
{
PIN_SLP_PULLUP_ENABLE(IO_MUX_GPIO0_REG + (gpio_num * 4));
}
/**
* @brief Enable GPIO pull-down in sleep mode.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
static inline void gpio_ll_sleep_pulldown_en(gpio_dev_t *hw, uint32_t gpio_num)
{
PIN_SLP_PULLDOWN_ENABLE(IO_MUX_GPIO0_REG + (gpio_num * 4));
}
/**
* @brief Disable GPIO pull-down in sleep mode.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
static inline void gpio_ll_sleep_pulldown_dis(gpio_dev_t *hw, uint32_t gpio_num)
{
PIN_SLP_PULLDOWN_DISABLE(IO_MUX_GPIO0_REG + (gpio_num * 4));
}
/**
* @brief Disable GPIO input in sleep mode.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
static inline void gpio_ll_sleep_input_disable(gpio_dev_t *hw, uint32_t gpio_num)
{
PIN_SLP_INPUT_DISABLE(IO_MUX_GPIO0_REG + (gpio_num * 4));
}
/**
* @brief Enable GPIO input in sleep mode.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
static inline void gpio_ll_sleep_input_enable(gpio_dev_t *hw, uint32_t gpio_num)
{
PIN_SLP_INPUT_ENABLE(IO_MUX_GPIO0_REG + (gpio_num * 4));
}
/**
* @brief Disable GPIO output in sleep mode.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
static inline void gpio_ll_sleep_output_disable(gpio_dev_t *hw, uint32_t gpio_num)
{
PIN_SLP_OUTPUT_DISABLE(IO_MUX_GPIO0_REG + (gpio_num * 4));
}
/**
* @brief Enable GPIO output in sleep mode.
*
* @param hw Peripheral GPIO hardware instance address.
* @param gpio_num GPIO number
*/
static inline void gpio_ll_sleep_output_enable(gpio_dev_t *hw, uint32_t gpio_num)
{
PIN_SLP_OUTPUT_ENABLE(IO_MUX_GPIO0_REG + (gpio_num * 4));
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,410 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*******************************************************************************
* NOTICE
* The ll is not public api, don't use in application code.
* See readme.md in soc/include/hal/readme.md
******************************************************************************/
// The Lowlevel layer for SPI Flash
#pragma once
#include <stdlib.h>
#include "soc/spi_periph.h"
#include "soc/spi_struct.h"
#include "hal/spi_types.h"
#include "hal/spi_flash_types.h"
#include <sys/param.h> // For MIN/MAX
#include <stdbool.h>
#include <string.h>
#include "hal/misc.h"
// TODO: [ESP32C61] IDF-9314, inherit from c6
#ifdef __cplusplus
extern "C" {
#endif
//NOTE: These macros are changed on c3 for build. MODIFY these when bringup flash.
#define gpspi_flash_ll_get_hw(host_id) ( ((host_id)==SPI2_HOST) ? &GPSPI2 : ({abort();(spi_dev_t*)0;}) )
#define gpspi_flash_ll_hw_get_id(dev) ( ((dev) == (void*)&GPSPI2) ? SPI2_HOST : -1 )
typedef typeof(GPSPI2.clock.val) gpspi_flash_ll_clock_reg_t;
#define GPSPI_FLASH_LL_PERIPHERAL_FREQUENCY_MHZ (80)
/*------------------------------------------------------------------------------
* Control
*----------------------------------------------------------------------------*/
/**
* Reset peripheral registers before configuration and starting control
*
* @param dev Beginning address of the peripheral registers.
*/
static inline void gpspi_flash_ll_reset(spi_dev_t *dev)
{
dev->user.val = 0;
dev->ctrl.val = 0;
dev->clk_gate.clk_en = 1;
dev->clk_gate.mst_clk_active = 1;
dev->clk_gate.mst_clk_sel = 1;
dev->dma_conf.val = 0;
dev->dma_conf.slv_tx_seg_trans_clr_en = 1;
dev->dma_conf.slv_rx_seg_trans_clr_en = 1;
dev->dma_conf.dma_slv_seg_trans_en = 0;
}
/**
* Check whether the previous operation is done.
*
* @param dev Beginning address of the peripheral registers.
*
* @return true if last command is done, otherwise false.
*/
static inline bool gpspi_flash_ll_cmd_is_done(const spi_dev_t *dev)
{
return (dev->cmd.usr == 0);
}
/**
* Get the read data from the buffer after ``gpspi_flash_ll_read`` is done.
*
* @param dev Beginning address of the peripheral registers.
* @param buffer Buffer to hold the output data
* @param read_len Length to get out of the buffer
*/
static inline void gpspi_flash_ll_get_buffer_data(spi_dev_t *dev, void *buffer, uint32_t read_len)
{
if (((intptr_t)buffer % 4 == 0) && (read_len % 4 == 0)) {
// If everything is word-aligned, do a faster memcpy
memcpy(buffer, (void *)dev->data_buf, read_len);
} else {
// Otherwise, slow(er) path copies word by word
int copy_len = read_len;
for (int i = 0; i < (read_len + 3) / 4; i++) {
int word_len = MIN(sizeof(uint32_t), copy_len);
uint32_t word = dev->data_buf[i].buf;
memcpy(buffer, &word, word_len);
buffer = (void *)((intptr_t)buffer + word_len);
copy_len -= word_len;
}
}
}
/**
* Write a word to the data buffer.
*
* @param dev Beginning address of the peripheral registers.
* @param word Data to write at address 0.
*/
static inline void gpspi_flash_ll_write_word(spi_dev_t *dev, uint32_t word)
{
dev->data_buf[0].buf = word;
}
/**
* Set the data to be written in the data buffer.
*
* @param dev Beginning address of the peripheral registers.
* @param buffer Buffer holding the data
* @param length Length of data in bytes.
*/
static inline void gpspi_flash_ll_set_buffer_data(spi_dev_t *dev, const void *buffer, uint32_t length)
{
// Load data registers, word at a time
int num_words = (length + 3) / 4;
for (int i = 0; i < num_words; i++) {
uint32_t word = 0;
uint32_t word_len = MIN(length, sizeof(word));
memcpy(&word, buffer, word_len);
dev->data_buf[i].buf = word;
length -= word_len;
buffer = (void *)((intptr_t)buffer + word_len);
}
}
/**
* Trigger a user defined transaction. All phases, including command, address, dummy, and the data phases,
* should be configured before this is called.
*
* @param dev Beginning address of the peripheral registers.
* @param pe_ops Is page program/erase operation or not. (not used in gpspi)
*/
static inline void gpspi_flash_ll_user_start(spi_dev_t *dev, bool pe_ops)
{
dev->cmd.update = 1;
while (dev->cmd.update);
dev->cmd.usr = 1;
}
/**
* In user mode, it is set to indicate that program/erase operation will be triggered.
*
* @param dev Beginning address of the peripheral registers.
*/
static inline void gpspi_flash_ll_set_pe_bit(spi_dev_t *dev)
{
// Not supported on GPSPI
}
/**
* Set HD pin high when flash work at spi mode.
*
* @param dev Beginning address of the peripheral registers.
*/
static inline void gpspi_flash_ll_set_hold_pol(spi_dev_t *dev, uint32_t pol_val)
{
dev->ctrl.hold_pol = pol_val;
}
/**
* Check whether the host is idle to perform new commands.
*
* @param dev Beginning address of the peripheral registers.
*
* @return true if the host is idle, otherwise false
*/
static inline bool gpspi_flash_ll_host_idle(const spi_dev_t *dev)
{
return dev->cmd.usr == 0;
}
/**
* Set phases for user-defined transaction to read
*
* @param dev Beginning address of the peripheral registers.
*/
static inline void gpspi_flash_ll_read_phase(spi_dev_t *dev)
{
typeof (dev->user) user = {
.usr_mosi = 0,
.usr_miso = 1,
.usr_addr = 1,
.usr_command = 1,
};
dev->user.val = user.val;
}
/*------------------------------------------------------------------------------
* Configs
*----------------------------------------------------------------------------*/
/**
* Select which pin to use for the flash
*
* @param dev Beginning address of the peripheral registers.
* @param pin Pin ID to use, 0-2. Set to other values to disable all the CS pins.
*/
static inline void gpspi_flash_ll_set_cs_pin(spi_dev_t *dev, int pin)
{
dev->misc.cs0_dis = (pin == 0) ? 0 : 1;
dev->misc.cs1_dis = (pin == 1) ? 0 : 1;
}
/**
* Set the read io mode.
*
* @param dev Beginning address of the peripheral registers.
* @param read_mode I/O mode to use in the following transactions.
*/
static inline void gpspi_flash_ll_set_read_mode(spi_dev_t *dev, esp_flash_io_mode_t read_mode)
{
typeof (dev->ctrl) ctrl;
ctrl.val = dev->ctrl.val;
typeof (dev->user) user;
user.val = dev->user.val;
ctrl.val &= ~(SPI_FCMD_QUAD_M | SPI_FADDR_QUAD_M | SPI_FREAD_QUAD_M | SPI_FCMD_DUAL_M | SPI_FADDR_DUAL_M | SPI_FREAD_DUAL_M);
user.val &= ~(SPI_FWRITE_QUAD_M | SPI_FWRITE_DUAL_M);
switch (read_mode) {
case SPI_FLASH_FASTRD:
//the default option
case SPI_FLASH_SLOWRD:
break;
case SPI_FLASH_QIO:
ctrl.fread_quad = 1;
ctrl.faddr_quad = 1;
user.fwrite_quad = 1;
break;
case SPI_FLASH_QOUT:
ctrl.fread_quad = 1;
user.fwrite_quad = 1;
break;
case SPI_FLASH_DIO:
ctrl.fread_dual = 1;
ctrl.faddr_dual = 1;
user.fwrite_dual = 1;
break;
case SPI_FLASH_DOUT:
ctrl.fread_dual = 1;
user.fwrite_dual = 1;
break;
default:
abort();
}
dev->ctrl.val = ctrl.val;
dev->user.val = user.val;
}
/**
* Set clock frequency to work at.
*
* @param dev Beginning address of the peripheral registers.
* @param clock_val pointer to the clock value to set
*/
static inline void gpspi_flash_ll_set_clock(spi_dev_t *dev, gpspi_flash_ll_clock_reg_t *clock_val)
{
dev->clock.val = *clock_val;
}
/**
* Set the input length, in bits.
*
* @param dev Beginning address of the peripheral registers.
* @param bitlen Length of input, in bits.
*/
static inline void gpspi_flash_ll_set_miso_bitlen(spi_dev_t *dev, uint32_t bitlen)
{
dev->user.usr_miso = bitlen > 0;
if (bitlen) {
dev->ms_dlen.ms_data_bitlen = bitlen - 1;
}
}
/**
* Set the output length, in bits (not including command, address and dummy
* phases)
*
* @param dev Beginning address of the peripheral registers.
* @param bitlen Length of output, in bits.
*/
static inline void gpspi_flash_ll_set_mosi_bitlen(spi_dev_t *dev, uint32_t bitlen)
{
dev->user.usr_mosi = bitlen > 0;
if (bitlen) {
dev->ms_dlen.ms_data_bitlen = bitlen - 1;
}
}
/**
* Set the command.
*
* @param dev Beginning address of the peripheral registers.
* @param command Command to send
* @param bitlen Length of the command
*/
static inline void gpspi_flash_ll_set_command(spi_dev_t *dev, uint8_t command, uint32_t bitlen)
{
dev->user.usr_command = 1;
typeof(dev->user2) user2 = {
.usr_command_value = command,
.usr_command_bitlen = (bitlen - 1),
};
dev->user2.val = user2.val;
}
/**
* Get the address length that is set in register, in bits.
*
* @param dev Beginning address of the peripheral registers.
*
*/
static inline int gpspi_flash_ll_get_addr_bitlen(spi_dev_t *dev)
{
return dev->user.usr_addr ? dev->user1.usr_addr_bitlen + 1 : 0;
}
/**
* Set the address length to send, in bits. Should be called before commands that requires the address e.g. erase sector, read, write...
*
* @param dev Beginning address of the peripheral registers.
* @param bitlen Length of the address, in bits
*/
static inline void gpspi_flash_ll_set_addr_bitlen(spi_dev_t *dev, uint32_t bitlen)
{
dev->user1.usr_addr_bitlen = (bitlen - 1);
dev->user.usr_addr = bitlen ? 1 : 0;
}
/**
* Set the address to send in user mode. Should be called before commands that requires the address e.g. erase sector, read, write...
*
* @param dev Beginning address of the peripheral registers.
* @param addr Address to send
*/
static inline void gpspi_flash_ll_set_usr_address(spi_dev_t *dev, uint32_t addr, uint32_t bitlen)
{
// The blank region should be all ones
uint32_t padding_ones = (bitlen == 32? 0 : UINT32_MAX >> bitlen);
dev->addr.val = (addr << (32 - bitlen)) | padding_ones;
}
/**
* Set the address to send. Should be called before commands that requires the address e.g. erase sector, read, write...
*
* @param dev Beginning address of the peripheral registers.
* @param addr Address to send
*/
static inline void gpspi_flash_ll_set_address(spi_dev_t *dev, uint32_t addr)
{
dev->addr.val = addr;
}
/**
* Set the length of dummy cycles.
*
* @param dev Beginning address of the peripheral registers.
* @param dummy_n Cycles of dummy phases
*/
static inline void gpspi_flash_ll_set_dummy(spi_dev_t *dev, uint32_t dummy_n)
{
dev->user.usr_dummy = dummy_n ? 1 : 0;
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->user1, usr_dummy_cyclelen, dummy_n - 1);
}
/**
* Set extra hold time of CS after the clocks.
*
* @param dev Beginning address of the peripheral registers.
* @param hold_n Cycles of clocks before CS is inactive
*/
static inline void gpspi_flash_ll_set_hold(spi_dev_t *dev, uint32_t hold_n)
{
dev->user1.cs_hold_time = hold_n - 1;
dev->user.cs_hold = (hold_n > 0? 1: 0);
}
static inline void gpspi_flash_ll_set_cs_setup(spi_dev_t *dev, uint32_t cs_setup_time)
{
dev->user.cs_setup = (cs_setup_time > 0 ? 1 : 0);
dev->user1.cs_setup_time = cs_setup_time - 1;
}
/**
* Calculate spi_flash clock frequency division parameters for register.
*
* @param clkdiv frequency division factor
*
* @return Register setting for the given clock division factor.
*/
static inline uint32_t gpspi_flash_ll_calculate_clock_reg(uint8_t clkdiv)
{
uint32_t div_parameter;
// See comments of `clock` in `spi_struct.h`
if (clkdiv == 1) {
div_parameter = (1 << 31);
} else {
div_parameter = ((clkdiv - 1) | (((clkdiv/2 - 1) & 0xff) << 6 ) | (((clkdiv - 1) & 0xff) << 12));
}
return div_parameter;
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,330 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
// The LL layer for Timer Group register operations.
// Note that most of the register operations in this layer are non-atomic operations.
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
#include <stdlib.h>
#include <stdbool.h>
#include "hal/misc.h"
#include "hal/wdt_types.h"
#include "soc/rtc_cntl_periph.h"
#include "soc/efuse_reg.h"
#include "esp_attr.h"
#include "esp_assert.h"
#include "esp32c61/rom/ets_sys.h"
// TODO: [ESP32C61] IDF-9243, inherit from c6
/* The value that needs to be written to LP_WDT_WPROTECT_REG to write-enable the wdt registers */
#define LP_WDT_WKEY_VALUE 0x50D83AA1
/* The value that needs to be written to LP_WDT_SWD_WPROTECT_REG to write-enable the swd registers */
#define LP_WDT_SWD_WKEY_VALUE 0x50D83AA1
/* Possible values for RTC_CNTL_WDT_CPU_RESET_LENGTH and RTC_CNTL_WDT_SYS_RESET_LENGTH */
#define LP_WDT_RESET_LENGTH_100_NS 0
#define LP_WDT_RESET_LENGTH_200_NS 1
#define LP_WDT_RESET_LENGTH_300_NS 2
#define LP_WDT_RESET_LENGTH_400_NS 3
#define LP_WDT_RESET_LENGTH_500_NS 4
#define LP_WDT_RESET_LENGTH_800_NS 5
#define LP_WDT_RESET_LENGTH_1600_NS 6
#define LP_WDT_RESET_LENGTH_3200_NS 7
#define LP_WDT_STG_SEL_OFF 0
#define LP_WDT_STG_SEL_INT 1
#define LP_WDT_STG_SEL_RESET_CPU 2
#define LP_WDT_STG_SEL_RESET_SYSTEM 3
#define LP_WDT_STG_SEL_RESET_RTC 4
//Type check wdt_stage_action_t
ESP_STATIC_ASSERT(WDT_STAGE_ACTION_OFF == LP_WDT_STG_SEL_OFF, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_stage_action_t");
ESP_STATIC_ASSERT(WDT_STAGE_ACTION_INT == LP_WDT_STG_SEL_INT, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_stage_action_t");
ESP_STATIC_ASSERT(WDT_STAGE_ACTION_RESET_CPU == LP_WDT_STG_SEL_RESET_CPU, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_stage_action_t");
ESP_STATIC_ASSERT(WDT_STAGE_ACTION_RESET_SYSTEM == LP_WDT_STG_SEL_RESET_SYSTEM, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_stage_action_t");
ESP_STATIC_ASSERT(WDT_STAGE_ACTION_RESET_RTC == LP_WDT_STG_SEL_RESET_RTC, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_stage_action_t");
//Type check wdt_reset_sig_length_t
ESP_STATIC_ASSERT(WDT_RESET_SIG_LENGTH_100ns == LP_WDT_RESET_LENGTH_100_NS, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_reset_sig_length_t");
ESP_STATIC_ASSERT(WDT_RESET_SIG_LENGTH_200ns == LP_WDT_RESET_LENGTH_200_NS, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_reset_sig_length_t");
ESP_STATIC_ASSERT(WDT_RESET_SIG_LENGTH_300ns == LP_WDT_RESET_LENGTH_300_NS, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_reset_sig_length_t");
ESP_STATIC_ASSERT(WDT_RESET_SIG_LENGTH_400ns == LP_WDT_RESET_LENGTH_400_NS, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_reset_sig_length_t");
ESP_STATIC_ASSERT(WDT_RESET_SIG_LENGTH_500ns == LP_WDT_RESET_LENGTH_500_NS, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_reset_sig_length_t");
ESP_STATIC_ASSERT(WDT_RESET_SIG_LENGTH_800ns == LP_WDT_RESET_LENGTH_800_NS, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_reset_sig_length_t");
ESP_STATIC_ASSERT(WDT_RESET_SIG_LENGTH_1_6us == LP_WDT_RESET_LENGTH_1600_NS, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_reset_sig_length_t");
ESP_STATIC_ASSERT(WDT_RESET_SIG_LENGTH_3_2us == LP_WDT_RESET_LENGTH_3200_NS, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_reset_sig_length_t");
/**
* @brief Enable the RWDT
*
* @param hw Start address of the peripheral registers.
*/
FORCE_INLINE_ATTR void lpwdt_ll_enable(lp_wdt_dev_t *hw)
{
hw->config0.wdt_en = 1;
}
/**
* @brief Disable the RWDT
*
* @param hw Start address of the peripheral registers.
* @note This function does not disable the flashboot mode. Therefore, given that
* the MWDT is disabled using this function, a timeout can still occur
* if the flashboot mode is simultaneously enabled.
*/
FORCE_INLINE_ATTR void lpwdt_ll_disable(lp_wdt_dev_t *hw)
{
hw->config0.wdt_en = 0;
}
/**
* @brief Check if the RWDT is enabled
*
* @param hw Start address of the peripheral registers.
* @return True if RTC WDT is enabled
*/
FORCE_INLINE_ATTR bool lpwdt_ll_check_if_enabled(lp_wdt_dev_t *hw)
{
return (hw->config0.wdt_en) ? true : false;
}
/**
* @brief Configure a particular stage of the RWDT
*
* @param hw Start address of the peripheral registers.
* @param stage Which stage to configure
* @param timeout Number of timer ticks for the stage to timeout (see note).
* @param behavior What action to take when the stage times out
*
* @note The value of of RWDT stage 0 timeout register is special, in
* that an implicit multiplier is applied to that value to produce
* and effective timeout tick value. The multiplier is dependent
* on an EFuse value. Therefore, when configuring stage 0, the valid
* values for the timeout argument are:
* - If Efuse value is 0, any even number between [2,2*UINT32_MAX]
* - If Efuse value is 1, any multiple of 4 between [4,4*UINT32_MAX]
* - If Efuse value is 2, any multiple of 8 between [8,8*UINT32_MAX]
* - If Efuse value is 3, any multiple of 16 between [16,16*UINT32_MAX]
*/
FORCE_INLINE_ATTR void lpwdt_ll_config_stage(lp_wdt_dev_t *hw, wdt_stage_t stage, uint32_t timeout_ticks, wdt_stage_action_t behavior)
{
switch (stage) {
case WDT_STAGE0:
hw->config0.wdt_stg0 = behavior;
//Account of implicty multiplier applied to stage 0 timeout tick config value
hw->config1.val = timeout_ticks >> (1 + REG_GET_FIELD(EFUSE_RD_REPEAT_DATA1_REG, EFUSE_WDT_DELAY_SEL));
break;
case WDT_STAGE1:
hw->config0.wdt_stg1 = behavior;
hw->config2.val = timeout_ticks;
break;
case WDT_STAGE2:
hw->config0.wdt_stg2 = behavior;
hw->config3.val = timeout_ticks;
break;
case WDT_STAGE3:
hw->config0.wdt_stg3 = behavior;
hw->config4.val = timeout_ticks;
break;
default:
abort();
}
}
/**
* @brief Disable a particular stage of the RWDT
*
* @param hw Start address of the peripheral registers.
* @param stage Which stage to disable
*/
FORCE_INLINE_ATTR void lpwdt_ll_disable_stage(lp_wdt_dev_t *hw, wdt_stage_t stage)
{
switch (stage) {
case WDT_STAGE0:
hw->config0.wdt_stg0 = WDT_STAGE_ACTION_OFF;
break;
case WDT_STAGE1:
hw->config0.wdt_stg1 = WDT_STAGE_ACTION_OFF;
break;
case WDT_STAGE2:
hw->config0.wdt_stg2 = WDT_STAGE_ACTION_OFF;
break;
case WDT_STAGE3:
hw->config0.wdt_stg3 = WDT_STAGE_ACTION_OFF;
break;
default:
abort();
}
}
/**
* @brief Set the length of the CPU reset action
*
* @param hw Start address of the peripheral registers.
* @param length Length of CPU reset signal
*/
FORCE_INLINE_ATTR void lpwdt_ll_set_cpu_reset_length(lp_wdt_dev_t *hw, wdt_reset_sig_length_t length)
{
hw->config0.wdt_cpu_reset_length = length;
}
/**
* @brief Set the length of the system reset action
*
* @param hw Start address of the peripheral registers.
* @param length Length of system reset signal
*/
FORCE_INLINE_ATTR void lpwdt_ll_set_sys_reset_length(lp_wdt_dev_t *hw, wdt_reset_sig_length_t length)
{
hw->config0.wdt_sys_reset_length = length;
}
/**
* @brief Enable/Disable the RWDT flashboot mode.
*
* @param hw Start address of the peripheral registers.
* @param enable True to enable RWDT flashboot mode, false to disable RWDT flashboot mode.
*
* @note Flashboot mode is independent and can trigger a WDT timeout event if the
* WDT's enable bit is set to 0. Flashboot mode for RWDT is automatically enabled
* on flashboot, and should be disabled by software when flashbooting completes.
*/
FORCE_INLINE_ATTR void lpwdt_ll_set_flashboot_en(lp_wdt_dev_t *hw, bool enable)
{
hw->config0.wdt_flashboot_mod_en = (enable) ? 1 : 0;
}
/**
* @brief Enable/Disable the CPU0 to be reset on WDT_STAGE_ACTION_RESET_CPU
*
* @param hw Start address of the peripheral registers.
* @param enable True to enable CPU0 to be reset, false to disable.
*/
FORCE_INLINE_ATTR void lpwdt_ll_set_procpu_reset_en(lp_wdt_dev_t *hw, bool enable)
{
hw->config0.wdt_procpu_reset_en = (enable) ? 1 : 0;
}
/**
* @brief Enable/Disable the CPU1 to be reset on WDT_STAGE_ACTION_RESET_CPU
*
* @param hw Start address of the peripheral registers.
* @param enable True to enable CPU1 to be reset, false to disable.
*/
FORCE_INLINE_ATTR void lpwdt_ll_set_appcpu_reset_en(lp_wdt_dev_t *hw, bool enable)
{
hw->config0.wdt_appcpu_reset_en = (enable) ? 1 : 0;
}
/**
* @brief Enable/Disable the RWDT pause during sleep functionality
*
* @param hw Start address of the peripheral registers.
* @param enable True to enable, false to disable.
*/
FORCE_INLINE_ATTR void lpwdt_ll_set_pause_in_sleep_en(lp_wdt_dev_t *hw, bool enable)
{
hw->config0.wdt_pause_in_slp = (enable) ? 1 : 0;
}
/**
* @brief Enable/Disable chip reset on RWDT timeout.
*
* A chip reset also resets the analog portion of the chip. It will appear as a
* POWERON reset rather than an RTC reset.
*
* @param hw Start address of the peripheral registers.
* @param enable True to enable, false to disable.
*/
FORCE_INLINE_ATTR void lpwdt_ll_set_chip_reset_en(lp_wdt_dev_t *hw, bool enable)
{
hw->config0.wdt_chip_reset_en = (enable) ? 1 : 0;
}
/**
* @brief Set width of chip reset signal
*
* @param hw Start address of the peripheral registers.
* @param width Width of chip reset signal in terms of number of RTC_SLOW_CLK cycles
*/
FORCE_INLINE_ATTR void lpwdt_ll_set_chip_reset_width(lp_wdt_dev_t *hw, uint32_t width)
{
HAL_FORCE_MODIFY_U32_REG_FIELD(hw->config0, wdt_chip_reset_width, width);
}
/**
* @brief Feed the RWDT
*
* Resets the current timer count and current stage.
*
* @param hw Start address of the peripheral registers.
*/
FORCE_INLINE_ATTR void lpwdt_ll_feed(lp_wdt_dev_t *hw)
{
hw->feed.rtc_wdt_feed = 1;
}
/**
* @brief Enable write protection of the RWDT registers
*
* @param hw Start address of the peripheral registers.
*/
FORCE_INLINE_ATTR void lpwdt_ll_write_protect_enable(lp_wdt_dev_t *hw)
{
hw->wprotect.val = 0;
}
/**
* @brief Disable write protection of the RWDT registers
*
* @param hw Start address of the peripheral registers.
*/
FORCE_INLINE_ATTR void lpwdt_ll_write_protect_disable(lp_wdt_dev_t *hw)
{
hw->wprotect.val = LP_WDT_WKEY_VALUE;
}
/**
* @brief Enable the RWDT interrupt.
*
* @param hw Start address of the peripheral registers.
* @param enable True to enable RWDT interrupt, false to disable.
*/
FORCE_INLINE_ATTR void lpwdt_ll_set_intr_enable(lp_wdt_dev_t *hw, bool enable)
{
hw->int_ena.lp_wdt_int_ena = (enable) ? 1 : 0;
}
/**
* @brief Check if the RWDT interrupt has been triggered
*
* @param hw Start address of the peripheral registers.
* @return True if the RWDT interrupt was triggered
*/
FORCE_INLINE_ATTR bool lpwdt_ll_check_intr_status(lp_wdt_dev_t *hw)
{
return (hw->int_st.lp_wdt_int_st) ? true : false;
}
/**
* @brief Clear the RWDT interrupt status.
*
* @param hw Start address of the peripheral registers.
*/
FORCE_INLINE_ATTR void lpwdt_ll_clear_intr_status(lp_wdt_dev_t *hw)
{
hw->int_clr.lp_wdt_int_clr = 1;
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,409 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
// The LL layer for MMU register operations
#pragma once
#include "soc/spi_mem_reg.h"
#include "soc/ext_mem_defs.h"
#include "hal/assert.h"
#include "hal/mmu_types.h"
#include "hal/efuse_ll.h"
// TODO: [ESP32C61] IDF-9265, inherit from c6
#ifdef __cplusplus
extern "C" {
#endif
/**
* Convert MMU virtual address to linear address
*
* @param vaddr virtual address
*
* @return linear address
*/
static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
{
return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
}
/**
* Convert MMU linear address to virtual address
*
* @param laddr linear address
* @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
* @param target virtual address aimed physical memory target, not used
*
* @return virtual address
*/
static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type, mmu_target_t target)
{
(void)target;
(void)vaddr_type;
//On ESP32C61, I/D share the same vaddr range
return SOC_MMU_IBUS_VADDR_BASE | laddr;
}
__attribute__((always_inline)) static inline bool mmu_ll_cache_encryption_enabled(void)
{
unsigned cnt = efuse_ll_get_flash_crypt_cnt();
// 3 bits wide, any odd number - 1 or 3 - bits set means encryption is on
cnt = ((cnt >> 2) ^ (cnt >> 1) ^ cnt) & 0x1;
return (cnt == 1);
}
/**
* Get MMU page size
*
* @param mmu_id MMU ID
*
* @return MMU page size code
*/
__attribute__((always_inline))
static inline mmu_page_size_t mmu_ll_get_page_size(uint32_t mmu_id)
{
(void)mmu_id;
uint32_t page_size_code = REG_GET_FIELD(SPI_MEM_MMU_POWER_CTRL_REG(0), SPI_MEM_MMU_PAGE_SIZE);
return (page_size_code == 0) ? MMU_PAGE_64KB : \
(page_size_code == 1) ? MMU_PAGE_32KB : \
(page_size_code == 2) ? MMU_PAGE_16KB : \
MMU_PAGE_8KB;
}
/**
* Set MMU page size
*
* @param size MMU page size
*/
__attribute__((always_inline))
static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size)
{
uint8_t reg_val = (size == MMU_PAGE_64KB) ? 0 : \
(size == MMU_PAGE_32KB) ? 1 : \
(size == MMU_PAGE_16KB) ? 2 : \
(size == MMU_PAGE_8KB) ? 3 : 0;
REG_SET_FIELD(SPI_MEM_MMU_POWER_CTRL_REG(0), SPI_MEM_MMU_PAGE_SIZE, reg_val);
}
/**
* Check if the external memory vaddr region is valid
*
* @param mmu_id MMU ID
* @param vaddr_start start of the virtual address
* @param len length, in bytes
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return
* True for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len, mmu_vaddr_t type)
{
(void)mmu_id;
(void)type;
uint32_t vaddr_end = vaddr_start + len - 1;
return (SOC_ADDRESS_IN_IRAM0_CACHE(vaddr_start) && SOC_ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (SOC_ADDRESS_IN_DRAM0_CACHE(vaddr_start) && SOC_ADDRESS_IN_DRAM0_CACHE(vaddr_end));
}
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * SOC_MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * SOC_MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * SOC_MMU_MAX_PADDR_PAGE_NUM));
}
/**
* To get the MMU table entry id to be mapped
*
* @param mmu_id MMU ID
* @param vaddr virtual address to be mapped
*
* @return
* MMU table entry id
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
{
(void)mmu_id;
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
case MMU_PAGE_64KB:
shift_code = 16;
break;
case MMU_PAGE_32KB:
shift_code = 15;
break;
case MMU_PAGE_16KB:
shift_code = 14;
break;
case MMU_PAGE_8KB:
shift_code = 13;
break;
default:
HAL_ASSERT(shift_code);
}
return ((vaddr & SOC_MMU_VADDR_MASK) >> shift_code);
}
/**
* Format the paddr to be mappable
*
* @param mmu_id MMU ID
* @param paddr physical address to be mapped
* @param target paddr memory target, not used
*
* @return
* mmu_val - paddr in MMU table supported format
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{
(void)mmu_id;
(void)target;
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
case MMU_PAGE_64KB:
shift_code = 16;
break;
case MMU_PAGE_32KB:
shift_code = 15;
break;
case MMU_PAGE_16KB:
shift_code = 14;
break;
case MMU_PAGE_8KB:
shift_code = 13;
break;
default:
HAL_ASSERT(shift_code);
}
return paddr >> shift_code;
}
/**
* Write to the MMU table to map the virtual memory and the physical memory
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param mmu_val Value to be set into an MMU entry, for physical address
* @param target MMU target physical memory.
*/
__attribute__((always_inline)) static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
(void)target;
uint32_t mmu_raw_value;
if (mmu_ll_cache_encryption_enabled()) {
mmu_val |= SOC_MMU_SENSITIVE;
}
mmu_val |= (target == MMU_TARGET_FLASH0) ? SOC_MMU_ACCESS_FLASH : SOC_MMU_ACCESS_SPIRAM;
mmu_raw_value = mmu_val | SOC_MMU_VALID;
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
REG_WRITE(SPI_MEM_MMU_ITEM_CONTENT_REG(0), mmu_raw_value);
}
/**
* Read the raw value from MMU table
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param mmu_val Value to be read from MMU table
*/
__attribute__((always_inline)) static inline uint32_t mmu_ll_read_entry(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
uint32_t mmu_raw_value;
uint32_t ret;
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
mmu_raw_value = REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0));
if (mmu_ll_cache_encryption_enabled()) {
mmu_raw_value &= ~SOC_MMU_SENSITIVE;
}
if (!(mmu_raw_value & SOC_MMU_VALID)) {
return 0;
}
ret = mmu_raw_value & SOC_MMU_VALID_VAL_MASK;
return ret;
}
/**
* Set MMU table entry as invalid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry
*/
__attribute__((always_inline)) static inline void mmu_ll_set_entry_invalid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
REG_WRITE(SPI_MEM_MMU_ITEM_CONTENT_REG(0), SOC_MMU_INVALID);
}
/**
* Unmap all the items in the MMU table
*
* @param mmu_id MMU ID
*/
__attribute__((always_inline))
static inline void mmu_ll_unmap_all(uint32_t mmu_id)
{
for (int i = 0; i < SOC_MMU_ENTRY_NUM; i++) {
mmu_ll_set_entry_invalid(mmu_id, i);
}
}
/**
* Check MMU table entry value is valid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Ture for MMU entry is valid; False for invalid
*/
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < SOC_MMU_ENTRY_NUM);
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
return (REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & SOC_MMU_VALID) ? true : false;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
return MMU_TARGET_FLASH0;
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < SOC_MMU_ENTRY_NUM);
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
case MMU_PAGE_64KB:
shift_code = 16;
break;
case MMU_PAGE_32KB:
shift_code = 15;
break;
case MMU_PAGE_16KB:
shift_code = 14;
break;
case MMU_PAGE_8KB:
shift_code = 13;
break;
default:
HAL_ASSERT(shift_code);
}
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
return (REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & SOC_MMU_VALID_VAL_MASK) << shift_code;
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
for (int i = 0; i < SOC_MMU_ENTRY_NUM; i++) {
if (mmu_ll_check_entry_valid(mmu_id, i)) {
if (mmu_ll_get_entry_target(mmu_id, i) == target) {
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), i);
if ((REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & SOC_MMU_VALID_VAL_MASK) == mmu_val) {
return i;
}
}
}
}
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
case MMU_PAGE_64KB:
shift_code = 16;
break;
case MMU_PAGE_32KB:
shift_code = 15;
break;
case MMU_PAGE_16KB:
shift_code = 14;
break;
case MMU_PAGE_8KB:
shift_code = 13;
break;
default:
HAL_ASSERT(shift_code);
}
uint32_t laddr = entry_id << shift_code;
/**
* For `mmu_ll_laddr_to_vaddr`, target is for compatibility on this chip.
* Here we just pass MMU_TARGET_FLASH0 to get vaddr
*/
return mmu_ll_laddr_to_vaddr(laddr, type, MMU_TARGET_FLASH0);
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,324 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
// The LL layer for Timer Group register operations.
// Note that most of the register operations in this layer are non-atomic operations.
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
#include <stdbool.h>
#include "soc/timer_periph.h"
#include "soc/timer_group_struct.h"
#include "soc/pcr_struct.h"
#include "hal/wdt_types.h"
#include "hal/assert.h"
#include "esp_attr.h"
#include "esp_assert.h"
#include "hal/misc.h"
// TODO: [ESP32C61] IDF-9257, inherit from c6
/* Pre-calculated prescaler to achieve 500 ticks/us (MWDT1_TICKS_PER_US) when using default clock (MWDT_CLK_SRC_DEFAULT ) */
#define MWDT_LL_DEFAULT_CLK_PRESCALER 20000
/* Possible values for TIMG_WDT_STGx */
#define TIMG_WDT_STG_SEL_OFF 0
#define TIMG_WDT_STG_SEL_INT 1
#define TIMG_WDT_STG_SEL_RESET_CPU 2
#define TIMG_WDT_STG_SEL_RESET_SYSTEM 3
#define TIMG_WDT_RESET_LENGTH_100_NS 0
#define TIMG_WDT_RESET_LENGTH_200_NS 1
#define TIMG_WDT_RESET_LENGTH_300_NS 2
#define TIMG_WDT_RESET_LENGTH_400_NS 3
#define TIMG_WDT_RESET_LENGTH_500_NS 4
#define TIMG_WDT_RESET_LENGTH_800_NS 5
#define TIMG_WDT_RESET_LENGTH_1600_NS 6
#define TIMG_WDT_RESET_LENGTH_3200_NS 7
//Type check wdt_stage_action_t
ESP_STATIC_ASSERT(WDT_STAGE_ACTION_OFF == TIMG_WDT_STG_SEL_OFF, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_stage_action_t");
ESP_STATIC_ASSERT(WDT_STAGE_ACTION_INT == TIMG_WDT_STG_SEL_INT, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_stage_action_t");
ESP_STATIC_ASSERT(WDT_STAGE_ACTION_RESET_CPU == TIMG_WDT_STG_SEL_RESET_CPU, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_stage_action_t");
ESP_STATIC_ASSERT(WDT_STAGE_ACTION_RESET_SYSTEM == TIMG_WDT_STG_SEL_RESET_SYSTEM, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_stage_action_t");
//Type check wdt_reset_sig_length_t
ESP_STATIC_ASSERT(WDT_RESET_SIG_LENGTH_100ns == TIMG_WDT_RESET_LENGTH_100_NS, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_reset_sig_length_t");
ESP_STATIC_ASSERT(WDT_RESET_SIG_LENGTH_200ns == TIMG_WDT_RESET_LENGTH_200_NS, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_reset_sig_length_t");
ESP_STATIC_ASSERT(WDT_RESET_SIG_LENGTH_300ns == TIMG_WDT_RESET_LENGTH_300_NS, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_reset_sig_length_t");
ESP_STATIC_ASSERT(WDT_RESET_SIG_LENGTH_400ns == TIMG_WDT_RESET_LENGTH_400_NS, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_reset_sig_length_t");
ESP_STATIC_ASSERT(WDT_RESET_SIG_LENGTH_500ns == TIMG_WDT_RESET_LENGTH_500_NS, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_reset_sig_length_t");
ESP_STATIC_ASSERT(WDT_RESET_SIG_LENGTH_800ns == TIMG_WDT_RESET_LENGTH_800_NS, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_reset_sig_length_t");
ESP_STATIC_ASSERT(WDT_RESET_SIG_LENGTH_1_6us == TIMG_WDT_RESET_LENGTH_1600_NS, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_reset_sig_length_t");
ESP_STATIC_ASSERT(WDT_RESET_SIG_LENGTH_3_2us == TIMG_WDT_RESET_LENGTH_3200_NS, "Add mapping to LL watchdog timeout behavior, since it's no longer naturally compatible with wdt_reset_sig_length_t");
/**
* @brief Enable the MWDT
*
* @param hw Start address of the peripheral registers.
*/
FORCE_INLINE_ATTR void mwdt_ll_enable(timg_dev_t *hw)
{
hw->wdtconfig0.wdt_en = 1;
}
/**
* @brief Disable the MWDT
*
* @param hw Start address of the peripheral registers.
* @note This function does not disable the flashboot mode. Therefore, given that
* the MWDT is disabled using this function, a timeout can still occur
* if the flashboot mode is simultaneously enabled.
*/
FORCE_INLINE_ATTR void mwdt_ll_disable(timg_dev_t *hw)
{
hw->wdtconfig0.wdt_en = 0;
}
/**
* Check if the MWDT is enabled
*
* @param hw Start address of the peripheral registers.
* @return True if the MWDT is enabled, false otherwise
*/
FORCE_INLINE_ATTR bool mwdt_ll_check_if_enabled(timg_dev_t *hw)
{
return (hw->wdtconfig0.wdt_en) ? true : false;
}
/**
* @brief Configure a particular stage of the MWDT
*
* @param hw Start address of the peripheral registers.
* @param stage Which stage to configure
* @param timeout Number of timer ticks for the stage to timeout
* @param behavior What action to take when the stage times out
*/
FORCE_INLINE_ATTR void mwdt_ll_config_stage(timg_dev_t *hw, wdt_stage_t stage, uint32_t timeout, wdt_stage_action_t behavior)
{
switch (stage) {
case WDT_STAGE0:
hw->wdtconfig0.wdt_stg0 = behavior;
hw->wdtconfig2.wdt_stg0_hold = timeout;
break;
case WDT_STAGE1:
hw->wdtconfig0.wdt_stg1 = behavior;
hw->wdtconfig3.wdt_stg1_hold = timeout;
break;
case WDT_STAGE2:
hw->wdtconfig0.wdt_stg2 = behavior;
hw->wdtconfig4.wdt_stg2_hold = timeout;
break;
case WDT_STAGE3:
hw->wdtconfig0.wdt_stg3 = behavior;
hw->wdtconfig5.wdt_stg3_hold = timeout;
break;
default:
HAL_ASSERT(false && "unsupported WDT stage");
break;
}
//Config registers are updated asynchronously
hw->wdtconfig0.wdt_conf_update_en = 1;
}
/**
* @brief Disable a particular stage of the MWDT
*
* @param hw Start address of the peripheral registers.
* @param stage Which stage to disable
*/
FORCE_INLINE_ATTR void mwdt_ll_disable_stage(timg_dev_t *hw, uint32_t stage)
{
switch (stage) {
case WDT_STAGE0:
hw->wdtconfig0.wdt_stg0 = WDT_STAGE_ACTION_OFF;
break;
case WDT_STAGE1:
hw->wdtconfig0.wdt_stg1 = WDT_STAGE_ACTION_OFF;
break;
case WDT_STAGE2:
hw->wdtconfig0.wdt_stg2 = WDT_STAGE_ACTION_OFF;
break;
case WDT_STAGE3:
hw->wdtconfig0.wdt_stg3 = WDT_STAGE_ACTION_OFF;
break;
default:
HAL_ASSERT(false && "unsupported WDT stage");
break;
}
//Config registers are updated asynchronously
hw->wdtconfig0.wdt_conf_update_en = 1;
}
/**
* @brief Set the length of the CPU reset action
*
* @param hw Start address of the peripheral registers.
* @param length Length of CPU reset signal
*/
FORCE_INLINE_ATTR void mwdt_ll_set_cpu_reset_length(timg_dev_t *hw, wdt_reset_sig_length_t length)
{
hw->wdtconfig0.wdt_cpu_reset_length = length;
//Config registers are updated asynchronously
hw->wdtconfig0.wdt_conf_update_en = 1;
}
/**
* @brief Set the length of the system reset action
*
* @param hw Start address of the peripheral registers.
* @param length Length of system reset signal
*/
FORCE_INLINE_ATTR void mwdt_ll_set_sys_reset_length(timg_dev_t *hw, wdt_reset_sig_length_t length)
{
hw->wdtconfig0.wdt_sys_reset_length = length;
//Config registers are updated asynchronously
hw->wdtconfig0.wdt_conf_update_en = 1;
}
/**
* @brief Enable/Disable the MWDT flashboot mode.
*
* @param hw Beginning address of the peripheral registers.
* @param enable True to enable WDT flashboot mode, false to disable WDT flashboot mode.
*
* @note Flashboot mode is independent and can trigger a WDT timeout event if the
* WDT's enable bit is set to 0. Flashboot mode for TG0 is automatically enabled
* on flashboot, and should be disabled by software when flashbooting completes.
*/
FORCE_INLINE_ATTR void mwdt_ll_set_flashboot_en(timg_dev_t *hw, bool enable)
{
hw->wdtconfig0.wdt_flashboot_mod_en = (enable) ? 1 : 0;
//Config registers are updated asynchronously
hw->wdtconfig0.wdt_conf_update_en = 1;
}
/**
* @brief Set the clock prescaler of the MWDT
*
* @param hw Start address of the peripheral registers.
* @param prescaler Prescaler value between 1 to 65535
*/
FORCE_INLINE_ATTR void mwdt_ll_set_prescaler(timg_dev_t *hw, uint32_t prescaler)
{
// In case the compiler optimise a 32bit instruction (e.g. s32i) into 8/16bit instruction (e.g. s8i, which is not allowed to access a register)
// We take care of the "read-modify-write" procedure by ourselves.
HAL_FORCE_MODIFY_U32_REG_FIELD(hw->wdtconfig1, wdt_clk_prescale, prescaler);
//Config registers are updated asynchronously
hw->wdtconfig0.wdt_conf_update_en = 1;
}
/**
* @brief Feed the MWDT
*
* Resets the current timer count and current stage.
*
* @param hw Start address of the peripheral registers.
*/
FORCE_INLINE_ATTR void mwdt_ll_feed(timg_dev_t *hw)
{
hw->wdtfeed.wdt_feed = 1;
}
/**
* @brief Enable write protection of the MWDT registers
*
* Locking the MWDT will prevent any of the MWDT's registers from being modified
*
* @param hw Start address of the peripheral registers.
*/
FORCE_INLINE_ATTR void mwdt_ll_write_protect_enable(timg_dev_t *hw)
{
hw->wdtwprotect.wdt_wkey = 0;
}
/**
* @brief Disable write protection of the MWDT registers
*
* @param hw Start address of the peripheral registers.
*/
FORCE_INLINE_ATTR void mwdt_ll_write_protect_disable(timg_dev_t *hw)
{
hw->wdtwprotect.wdt_wkey = TIMG_WDT_WKEY_VALUE;
}
/**
* @brief Clear the MWDT interrupt status.
*
* @param hw Start address of the peripheral registers.
*/
FORCE_INLINE_ATTR void mwdt_ll_clear_intr_status(timg_dev_t *hw)
{
hw->int_clr_timers.wdt_int_clr = 1;
}
/**
* @brief Set the interrupt enable bit for the MWDT interrupt.
*
* @param hw Beginning address of the peripheral registers.
* @param enable Whether to enable the MWDT interrupt
*/
FORCE_INLINE_ATTR void mwdt_ll_set_intr_enable(timg_dev_t *hw, bool enable)
{
hw->int_ena_timers.wdt_int_ena = (enable) ? 1 : 0;
}
/**
* @brief Set the clock source for the MWDT.
*
* @param hw Beginning address of the peripheral registers.
* @param clk_src Clock source
*/
FORCE_INLINE_ATTR void mwdt_ll_set_clock_source(timg_dev_t *hw, mwdt_clock_source_t clk_src)
{
uint8_t clk_id = 0;
switch (clk_src) {
case MWDT_CLK_SRC_XTAL:
clk_id = 0;
break;
case MWDT_CLK_SRC_PLL_F80M:
clk_id = 1;
break;
case MWDT_CLK_SRC_RC_FAST:
clk_id = 2;
break;
default:
HAL_ASSERT(false);
break;
}
if (hw == &TIMERG0) {
PCR.timergroup0_wdt_clk_conf.tg0_wdt_clk_sel = clk_id;
} else {
PCR.timergroup1_wdt_clk_conf.tg1_wdt_clk_sel = clk_id;
}
}
/**
* @brief Enable MWDT module clock
*
* @param hw Beginning address of the peripheral registers.
* @param en true to enable, false to disable
*/
__attribute__((always_inline))
static inline void mwdt_ll_enable_clock(timg_dev_t *hw, bool en)
{
if (hw == &TIMERG0) {
PCR.timergroup0_wdt_clk_conf.tg0_wdt_clk_en = en;
} else {
PCR.timergroup1_wdt_clk_conf.tg1_wdt_clk_en = en;
}
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,47 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
// The HAL layer for PMU
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
#include "soc/soc_caps.h"
#include "hal/pmu_ll.h"
#include "hal/pmu_types.h"
// TODO: [ESP32C61] IDF-9250, inherit from c6
typedef struct {
pmu_dev_t *dev;
} pmu_hal_context_t;
void pmu_hal_hp_set_digital_power_up_wait_cycle(pmu_hal_context_t *hal, uint32_t power_supply_wait_cycle, uint32_t power_up_wait_cycle);
uint32_t pmu_hal_hp_get_digital_power_up_wait_cycle(pmu_hal_context_t *hal);
void pmu_hal_lp_set_digital_power_up_wait_cycle(pmu_hal_context_t *hal, uint32_t power_supply_wait_cycle, uint32_t power_up_wait_cycle);
uint32_t pmu_hal_lp_get_digital_power_up_wait_cycle(pmu_hal_context_t *hal);
void pmu_hal_hp_set_sleep_active_backup_enable(pmu_hal_context_t *hal);
void pmu_hal_hp_set_sleep_active_backup_disable(pmu_hal_context_t *hal);
void pmu_hal_hp_set_sleep_modem_backup_enable(pmu_hal_context_t *hal);
void pmu_hal_hp_set_sleep_modem_backup_disable(pmu_hal_context_t *hal);
void pmu_hal_hp_set_modem_active_backup_enable(pmu_hal_context_t *hal);
void pmu_hal_hp_set_modem_active_backup_disable(pmu_hal_context_t *hal);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,688 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
// The LL layer for ESP32-C61 PMU register operations
#pragma once
#include <stdlib.h>
#include <stdbool.h>
#include "soc/soc.h"
#include "esp_attr.h"
#include "hal/assert.h"
#include "soc/pmu_struct.h"
#include "hal/pmu_types.h"
// TODO: [ESP32C61] IDF-9250, inherit from c6
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Set the power domain that needs to be powered down in the digital power
*
* @param hw Beginning address of the peripheral registers.
* @param mode The pmu mode
* @param flag Digital power domain flag
*
* @return None
*/
FORCE_INLINE_ATTR void pmu_ll_hp_set_dig_power(pmu_dev_t *hw, pmu_hp_mode_t mode, uint32_t flag)
{
hw->hp_sys[mode].dig_power.val = flag;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_icg_func(pmu_dev_t *hw, pmu_hp_mode_t mode, uint32_t icg_func)
{
hw->hp_sys[mode].icg_func = icg_func;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_icg_apb(pmu_dev_t *hw, pmu_hp_mode_t mode, uint32_t bitmap)
{
hw->hp_sys[mode].icg_apb = bitmap;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_icg_modem(pmu_dev_t *hw, pmu_hp_mode_t mode, uint32_t code)
{
hw->hp_sys[mode].icg_modem.code = code;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_uart_wakeup_enable(pmu_dev_t *hw, pmu_hp_mode_t mode, bool wakeup_en)
{
hw->hp_sys[mode].syscntl.uart_wakeup_en = wakeup_en;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_hold_all_lp_pad(pmu_dev_t *hw, pmu_hp_mode_t mode, bool hold_all)
{
hw->hp_sys[mode].syscntl.lp_pad_hold_all = hold_all;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_hold_all_hp_pad(pmu_dev_t *hw, pmu_hp_mode_t mode, bool hold_all)
{
hw->hp_sys[mode].syscntl.hp_pad_hold_all = hold_all;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_dig_pad_slp_sel(pmu_dev_t *hw, pmu_hp_mode_t mode, bool slp_sel)
{
hw->hp_sys[mode].syscntl.dig_pad_slp_sel = slp_sel;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_pause_watchdog(pmu_dev_t *hw, pmu_hp_mode_t mode, bool pause_wdt)
{
hw->hp_sys[mode].syscntl.dig_pause_wdt = pause_wdt;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_cpu_stall(pmu_dev_t *hw, pmu_hp_mode_t mode, bool cpu_stall)
{
hw->hp_sys[mode].syscntl.dig_cpu_stall = cpu_stall;
}
/**
* @brief Set the power domain that needs to be powered down in the clock power
*
* @param hw Beginning address of the peripheral registers.
* @param mode The pmu mode
* @param flag Clock power domain flag
*
* @return None
*/
FORCE_INLINE_ATTR void pmu_ll_hp_set_clk_power(pmu_dev_t *hw, pmu_hp_mode_t mode, uint32_t xpd_flag)
{
hw->hp_sys[mode].clk_power.val = xpd_flag;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_xtal_xpd(pmu_dev_t *hw, pmu_hp_mode_t mode, bool xpd_xtal)
{
hw->hp_sys[mode].xtal.xpd_xtal = xpd_xtal;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_bias_xpd(pmu_dev_t *hw, pmu_hp_mode_t mode, bool xpd_bias)
{
hw->hp_sys[mode].bias.xpd_bias = xpd_bias;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_dbg_atten(pmu_dev_t *hw, pmu_hp_mode_t mode, uint32_t value)
{
hw->hp_sys[mode].bias.dbg_atten = value;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_current_power_off(pmu_dev_t *hw, pmu_hp_mode_t mode, bool off)
{
hw->hp_sys[mode].bias.pd_cur = off;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_bias_sleep_enable(pmu_dev_t *hw, pmu_hp_mode_t mode, bool en)
{
hw->hp_sys[mode].bias.bias_sleep = en;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_retention_param(pmu_dev_t *hw, pmu_hp_mode_t mode, uint32_t param)
{
hw->hp_sys[mode].backup.val = param;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_sleep_to_active_backup_enable(pmu_dev_t *hw)
{
hw->hp_sys[PMU_MODE_HP_ACTIVE].backup.hp_sleep2active_backup_en = 1;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_sleep_to_active_backup_disable(pmu_dev_t *hw)
{
hw->hp_sys[PMU_MODE_HP_ACTIVE].backup.hp_sleep2active_backup_en = 0;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_modem_to_active_backup_enable(pmu_dev_t *hw)
{
hw->hp_sys[PMU_MODE_HP_ACTIVE].backup.hp_modem2active_backup_en = 1;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_modem_to_active_backup_disable(pmu_dev_t *hw)
{
hw->hp_sys[PMU_MODE_HP_ACTIVE].backup.hp_modem2active_backup_en = 0;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_sleep_to_modem_backup_enable(pmu_dev_t *hw)
{
hw->hp_sys[PMU_MODE_HP_MODEM].backup.hp_sleep2modem_backup_en = 1;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_sleep_to_modem_backup_disable(pmu_dev_t *hw)
{
hw->hp_sys[PMU_MODE_HP_MODEM].backup.hp_sleep2modem_backup_en = 0;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_active_to_sleep_backup_enable(pmu_dev_t *hw)
{
hw->hp_sys[PMU_MODE_HP_SLEEP].backup.hp_active2sleep_backup_en = 1;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_active_to_sleep_backup_disable(pmu_dev_t *hw)
{
hw->hp_sys[PMU_MODE_HP_SLEEP].backup.hp_active2sleep_backup_en = 0;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_modem_to_sleep_backup_enable(pmu_dev_t *hw)
{
hw->hp_sys[PMU_MODE_HP_SLEEP].backup.hp_modem2sleep_backup_en = 1;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_modem_to_sleep_backup_disable(pmu_dev_t *hw)
{
hw->hp_sys[PMU_MODE_HP_SLEEP].backup.hp_modem2sleep_backup_en = 0;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_backup_icg_func(pmu_dev_t *hw, pmu_hp_mode_t mode, uint32_t icg_func)
{
hw->hp_sys[mode].backup_clk = icg_func;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_sysclk_nodiv(pmu_dev_t *hw, pmu_hp_mode_t mode, bool sysclk_nodiv)
{
hw->hp_sys[mode].sysclk.dig_sysclk_nodiv = sysclk_nodiv;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_icg_sysclk_enable(pmu_dev_t *hw, pmu_hp_mode_t mode, bool icg_sysclk_en)
{
hw->hp_sys[mode].sysclk.icg_sysclk_en = icg_sysclk_en;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_sysclk_slp_sel(pmu_dev_t *hw, pmu_hp_mode_t mode, bool slp_sel)
{
hw->hp_sys[mode].sysclk.sysclk_slp_sel = slp_sel;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_icg_sysclk_slp_sel(pmu_dev_t *hw, pmu_hp_mode_t mode, bool slp_sel)
{
hw->hp_sys[mode].sysclk.icg_slp_sel = slp_sel;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_dig_sysclk(pmu_dev_t *hw, pmu_hp_mode_t mode, uint32_t sysclk_sel)
{
hw->hp_sys[mode].sysclk.dig_sysclk_sel = sysclk_sel;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_regulator_sleep_logic_xpd(pmu_dev_t *hw, pmu_hp_mode_t mode, bool slp_xpd)
{
hw->hp_sys[mode].regulator0.slp_logic_xpd = slp_xpd;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_regulator_sleep_memory_xpd(pmu_dev_t *hw, pmu_hp_mode_t mode, bool slp_xpd)
{
hw->hp_sys[mode].regulator0.slp_mem_xpd = slp_xpd;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_regulator_xpd(pmu_dev_t *hw, pmu_hp_mode_t mode, bool xpd)
{
hw->hp_sys[mode].regulator0.xpd = xpd;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_regulator_sleep_logic_dbias(pmu_dev_t *hw, pmu_hp_mode_t mode, uint32_t slp_dbias)
{
hw->hp_sys[mode].regulator0.slp_logic_dbias = slp_dbias;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_regulator_sleep_memory_dbias(pmu_dev_t *hw, pmu_hp_mode_t mode, uint32_t slp_dbias)
{
hw->hp_sys[mode].regulator0.slp_mem_dbias = slp_dbias;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_regulator_dbias(pmu_dev_t *hw, pmu_hp_mode_t mode, uint32_t dbias)
{
hw->hp_sys[mode].regulator0.dbias = dbias;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_regulator_driver_bar(pmu_dev_t *hw, pmu_hp_mode_t mode, uint32_t drv_b)
{
hw->hp_sys[mode].regulator1.drv_b = drv_b;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_regulator_slp_xpd(pmu_dev_t *hw, pmu_lp_mode_t mode, bool slp_xpd)
{
hw->lp_sys[mode].regulator0.slp_xpd = slp_xpd;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_regulator_xpd(pmu_dev_t *hw, pmu_lp_mode_t mode, bool xpd)
{
hw->lp_sys[mode].regulator0.xpd = xpd;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_regulator_sleep_dbias(pmu_dev_t *hw, pmu_lp_mode_t mode, uint32_t slp_dbias)
{
hw->lp_sys[mode].regulator0.slp_dbias = slp_dbias;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_regulator_dbias(pmu_dev_t *hw, pmu_lp_mode_t mode, uint32_t dbias)
{
hw->lp_sys[mode].regulator0.dbias = dbias;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_regulator_driver_bar(pmu_dev_t *hw, pmu_lp_mode_t mode, uint32_t drv_b)
{
hw->lp_sys[mode].regulator1.drv_b = drv_b;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_xtal_xpd(pmu_dev_t *hw, pmu_lp_mode_t mode, bool xpd_xtal)
{
HAL_ASSERT(mode == PMU_MODE_LP_SLEEP);
hw->lp_sys[mode].xtal.xpd_xtal = xpd_xtal;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_dig_power(pmu_dev_t *hw, pmu_lp_mode_t mode, uint32_t flag)
{
hw->lp_sys[mode].dig_power.val = flag;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_clk_power(pmu_dev_t *hw, pmu_lp_mode_t mode, uint32_t xpd_flag)
{
hw->lp_sys[mode].clk_power.val = xpd_flag;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_lp_get_clk_power(pmu_dev_t *hw, pmu_lp_mode_t mode)
{
return hw->lp_sys[mode].clk_power.val;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_bias_xpd(pmu_dev_t *hw, pmu_lp_mode_t mode, bool xpd_bias)
{
HAL_ASSERT(mode == PMU_MODE_LP_SLEEP);
hw->lp_sys[mode].bias.xpd_bias = xpd_bias;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_dbg_atten(pmu_dev_t *hw, pmu_lp_mode_t mode, uint32_t value)
{
HAL_ASSERT(mode == PMU_MODE_LP_SLEEP);
hw->lp_sys[mode].bias.dbg_atten = value;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_current_power_off(pmu_dev_t *hw, pmu_lp_mode_t mode, bool off)
{
HAL_ASSERT(mode == PMU_MODE_LP_SLEEP);
hw->lp_sys[mode].bias.pd_cur = off;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_bias_sleep_enable(pmu_dev_t *hw, pmu_lp_mode_t mode, bool en)
{
HAL_ASSERT(mode == PMU_MODE_LP_SLEEP);
hw->lp_sys[mode].bias.bias_sleep = en;
}
/****/
FORCE_INLINE_ATTR void pmu_ll_imm_set_clk_power(pmu_dev_t *hw, uint32_t flag)
{
hw->imm.clk_power.val = flag;
}
FORCE_INLINE_ATTR void pmu_ll_imm_set_icg_slp_sel(pmu_dev_t *hw, bool slp_sel)
{
if (slp_sel) {
hw->imm.sleep_sysclk.tie_high_icg_slp_sel = 1;
} else {
hw->imm.sleep_sysclk.tie_low_icg_slp_sel = 1;
}
}
FORCE_INLINE_ATTR void pmu_ll_imm_update_dig_sysclk_sel(pmu_dev_t *hw, bool update)
{
hw->imm.sleep_sysclk.update_dig_sysclk_sel = update;
}
FORCE_INLINE_ATTR void pmu_ll_imm_update_dig_icg_switch(pmu_dev_t *hw, bool update)
{
hw->imm.sleep_sysclk.update_dig_icg_switch = update;
}
FORCE_INLINE_ATTR void pmu_ll_imm_update_dig_icg_func(pmu_dev_t *hw, bool icg_func_update)
{
hw->imm.hp_func_icg.update_dig_icg_func_en = icg_func_update;
}
FORCE_INLINE_ATTR void pmu_ll_imm_update_dig_icg_apb(pmu_dev_t *hw, bool icg_apb_update)
{
hw->imm.hp_apb_icg.update_dig_icg_apb_en = icg_apb_update;
}
FORCE_INLINE_ATTR void pmu_ll_imm_update_dig_icg_modem_code(pmu_dev_t *hw, bool icg_modem_update)
{
hw->imm.modem_icg.update_dig_icg_modem_en = icg_modem_update;
}
FORCE_INLINE_ATTR void pmu_ll_imm_set_lp_rootclk_sel(pmu_dev_t *hw, bool rootclk_sel)
{
if (rootclk_sel) {
hw->imm.lp_icg.tie_high_lp_rootclk_sel = 1;
} else {
hw->imm.lp_icg.tie_low_lp_rootclk_sel = 1;
}
}
FORCE_INLINE_ATTR void pmu_ll_imm_set_hp_pad_hold_all(pmu_dev_t *hw, bool hold_all)
{
if (hold_all) {
hw->imm.pad_hold_all.tie_high_hp_pad_hold_all = 1;
} else {
hw->imm.pad_hold_all.tie_low_hp_pad_hold_all = 1;
}
}
FORCE_INLINE_ATTR void pmu_ll_imm_set_lp_pad_hold_all(pmu_dev_t *hw, bool hold_all)
{
if (hold_all) {
hw->imm.pad_hold_all.tie_high_lp_pad_hold_all = 1;
} else {
hw->imm.pad_hold_all.tie_low_lp_pad_hold_all = 1;
}
}
/*** */
FORCE_INLINE_ATTR void pmu_ll_hp_set_power_force_reset(pmu_dev_t *hw, pmu_hp_power_domain_t domain, bool rst)
{
hw->power.hp_pd[domain].force_reset = rst;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_power_force_isolate(pmu_dev_t *hw, pmu_hp_power_domain_t domain, bool iso)
{
hw->power.hp_pd[domain].force_iso = iso;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_power_force_power_up(pmu_dev_t *hw, pmu_hp_power_domain_t domain, bool fpu)
{
hw->power.hp_pd[domain].force_pu = fpu;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_power_force_no_reset(pmu_dev_t *hw, pmu_hp_power_domain_t domain, bool no_rst)
{
hw->power.hp_pd[domain].force_no_reset = no_rst;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_power_force_no_isolate(pmu_dev_t *hw, pmu_hp_power_domain_t domain, bool no_iso)
{
hw->power.hp_pd[domain].force_no_iso = no_iso;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_power_force_power_down(pmu_dev_t *hw, pmu_hp_power_domain_t domain, bool fpd)
{
hw->power.hp_pd[domain].force_pd = fpd;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_power_force_reset(pmu_dev_t *hw, bool rst)
{
hw->power.lp_peri.force_reset = rst;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_power_force_isolate(pmu_dev_t *hw, bool iso)
{
hw->power.lp_peri.force_iso = iso;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_power_force_power_up(pmu_dev_t *hw, bool fpu)
{
hw->power.lp_peri.force_pu = fpu;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_power_force_no_reset(pmu_dev_t *hw, bool no_rst)
{
hw->power.lp_peri.force_no_reset = no_rst;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_power_force_no_isolate(pmu_dev_t *hw, bool no_iso)
{
hw->power.lp_peri.force_no_iso = no_iso;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_power_force_power_down(pmu_dev_t *hw, bool fpd)
{
hw->power.lp_peri.force_pd = fpd;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_memory_isolate(pmu_dev_t *hw, uint32_t iso)
{
hw->power.mem_cntl.force_hp_mem_iso = iso;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_memory_power_down(pmu_dev_t *hw, uint32_t fpd)
{
hw->power.mem_cntl.force_hp_mem_pd = fpd;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_memory_no_isolate(pmu_dev_t *hw, uint32_t no_iso)
{
hw->power.mem_cntl.force_hp_mem_no_iso = no_iso;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_memory_power_up(pmu_dev_t *hw, uint32_t fpu)
{
hw->power.mem_cntl.force_hp_mem_pu = fpu;
}
/*** */
FORCE_INLINE_ATTR void pmu_ll_hp_set_sleep_enable(pmu_dev_t *hw)
{
hw->wakeup.cntl0.sleep_req = 1;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_reject_enable(pmu_dev_t *hw, uint32_t reject)
{
hw->wakeup.cntl1.sleep_reject_ena = reject;
hw->wakeup.cntl1.slp_reject_en = 1;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_reject_disable(pmu_dev_t *hw)
{
hw->wakeup.cntl1.slp_reject_en = 0;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_wakeup_enable(pmu_dev_t *hw, uint32_t wakeup)
{
hw->wakeup.cntl2 = wakeup;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_sleep_protect_mode(pmu_dev_t *hw, int mode)
{
hw->wakeup.cntl3.sleep_prt_sel = mode;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_min_sleep_cycle(pmu_dev_t *hw, uint32_t slow_clk_cycle)
{
hw->wakeup.cntl3.hp_min_slp_val = slow_clk_cycle;
}
FORCE_INLINE_ATTR void pmu_ll_hp_clear_reject_cause(pmu_dev_t *hw)
{
hw->wakeup.cntl4.slp_reject_cause_clr = 1;
}
FORCE_INLINE_ATTR bool pmu_ll_hp_is_sleep_wakeup(pmu_dev_t *hw)
{
return (hw->hp_ext.int_raw.wakeup == 1);
}
FORCE_INLINE_ATTR bool pmu_ll_hp_is_sleep_reject(pmu_dev_t *hw)
{
return (hw->hp_ext.int_raw.reject == 1);
}
FORCE_INLINE_ATTR void pmu_ll_hp_clear_sw_intr_status(pmu_dev_t *hw)
{
hw->hp_ext.int_clr.sw = 1;
}
FORCE_INLINE_ATTR void pmu_ll_hp_clear_wakeup_intr_status(pmu_dev_t *hw)
{
hw->hp_ext.int_clr.wakeup = 1;
}
FORCE_INLINE_ATTR void pmu_ll_hp_clear_reject_intr_status(pmu_dev_t *hw)
{
hw->hp_ext.int_clr.reject = 1;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_hp_get_wakeup_cause(pmu_dev_t *hw)
{
return hw->wakeup.status0;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_hp_get_reject_cause(pmu_dev_t *hw)
{
return hw->wakeup.status1;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_lp_get_interrupt_raw(pmu_dev_t *hw)
{
return hw->lp_ext.int_raw.val;
}
FORCE_INLINE_ATTR void pmu_ll_lp_clear_intsts_mask(pmu_dev_t *hw, uint32_t mask)
{
hw->lp_ext.int_clr.val = mask;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_min_sleep_cycle(pmu_dev_t *hw, uint32_t slow_clk_cycle)
{
hw->wakeup.cntl3.lp_min_slp_val = slow_clk_cycle;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_modify_icg_cntl_wait_cycle(pmu_dev_t *hw, uint32_t cycle)
{
hw->hp_ext.clk_cntl.modify_icg_cntl_wait = cycle;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_hp_get_modify_icg_cntl_wait_cycle(pmu_dev_t *hw)
{
return hw->hp_ext.clk_cntl.modify_icg_cntl_wait;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_switch_icg_cntl_wait_cycle(pmu_dev_t *hw, uint32_t cycle)
{
hw->hp_ext.clk_cntl.switch_icg_cntl_wait = cycle;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_hp_get_switch_icg_cntl_wait_cycle(pmu_dev_t *hw)
{
return hw->hp_ext.clk_cntl.switch_icg_cntl_wait;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_digital_power_down_wait_cycle(pmu_dev_t *hw, uint32_t cycle)
{
hw->power.wait_timer0.powerdown_timer = cycle;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_hp_get_digital_power_down_wait_cycle(pmu_dev_t *hw)
{
return hw->power.wait_timer0.powerdown_timer;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_digital_power_down_wait_cycle(pmu_dev_t *hw, uint32_t cycle)
{
hw->power.wait_timer1.powerdown_timer = cycle;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_lp_get_digital_power_down_wait_cycle(pmu_dev_t *hw)
{
return hw->power.wait_timer1.powerdown_timer;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_analog_wait_target_cycle(pmu_dev_t *hw, uint32_t slow_clk_cycle)
{
hw->wakeup.cntl5.lp_ana_wait_target = slow_clk_cycle;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_lp_get_analog_wait_target_cycle(pmu_dev_t *hw)
{
return hw->wakeup.cntl5.lp_ana_wait_target;
}
FORCE_INLINE_ATTR void pmu_ll_set_modem_wait_target_cycle(pmu_dev_t *hw, uint32_t cycle)
{
hw->wakeup.cntl5.modem_wait_target = cycle;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_get_modem_wait_target_cycle(pmu_dev_t *hw)
{
return hw->wakeup.cntl5.modem_wait_target;
}
FORCE_INLINE_ATTR void pmu_ll_set_xtal_stable_wait_cycle(pmu_dev_t *hw, uint32_t cycle)
{
hw->power.clk_wait.wait_xtal_stable = cycle;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_get_xtal_stable_wait_cycle(pmu_dev_t *hw)
{
return hw->power.clk_wait.wait_xtal_stable;
}
FORCE_INLINE_ATTR void pmu_ll_set_pll_stable_wait_cycle(pmu_dev_t *hw, uint32_t cycle)
{
hw->power.clk_wait.wait_pll_stable = cycle;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_get_pll_stable_wait_cycle(pmu_dev_t *hw)
{
return hw->power.clk_wait.wait_pll_stable;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_digital_power_supply_wait_cycle(pmu_dev_t *hw, uint32_t cycle)
{
hw->power.wait_timer1.wait_timer = cycle;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_lp_get_digital_power_supply_wait_cycle(pmu_dev_t *hw)
{
return hw->power.wait_timer1.wait_timer;
}
FORCE_INLINE_ATTR void pmu_ll_lp_set_digital_power_up_wait_cycle(pmu_dev_t *hw, uint32_t cycle)
{
hw->power.wait_timer1.powerup_timer = cycle;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_lp_get_digital_power_up_wait_cycle(pmu_dev_t *hw)
{
return hw->power.wait_timer1.powerup_timer;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_analog_wait_target_cycle(pmu_dev_t *hw, uint32_t cycle)
{
hw->wakeup.cntl7.ana_wait_target = cycle;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_hp_get_analog_wait_target_cycle(pmu_dev_t *hw)
{
return hw->wakeup.cntl7.ana_wait_target;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_digital_power_supply_wait_cycle(pmu_dev_t *hw, uint32_t cycle)
{
hw->power.wait_timer0.wait_timer = cycle;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_hp_get_digital_power_supply_wait_cycle(pmu_dev_t *hw)
{
return hw->power.wait_timer0.wait_timer;
}
FORCE_INLINE_ATTR void pmu_ll_hp_set_digital_power_up_wait_cycle(pmu_dev_t *hw, uint32_t cycle)
{
hw->power.wait_timer0.powerup_timer = cycle;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_hp_get_digital_power_up_wait_cycle(pmu_dev_t *hw)
{
return hw->power.wait_timer0.powerup_timer;
}
FORCE_INLINE_ATTR uint32_t pmu_ll_get_sysclk_sleep_select_state(pmu_dev_t *hw)
{
return hw->clk_state0.sysclk_slp_sel;
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,68 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdbool.h>
#include <stdint.h>
#include "soc/soc.h"
#include "soc/regi2c_defs.h"
#ifdef __cplusplus
extern "C" {
#endif
// TODO: [ESP32C61] IDF-9276, inherit from c6
/**
* @brief Start BBPLL self-calibration
*/
static inline __attribute__((always_inline)) void regi2c_ctrl_ll_bbpll_calibration_start(void)
{
REG_CLR_BIT(I2C_MST_ANA_CONF0_REG, I2C_MST_BBPLL_STOP_FORCE_HIGH);
REG_SET_BIT(I2C_MST_ANA_CONF0_REG, I2C_MST_BBPLL_STOP_FORCE_LOW);
}
/**
* @brief Stop BBPLL self-calibration
*/
static inline __attribute__((always_inline)) void regi2c_ctrl_ll_bbpll_calibration_stop(void)
{
REG_CLR_BIT(I2C_MST_ANA_CONF0_REG, I2C_MST_BBPLL_STOP_FORCE_LOW);
REG_SET_BIT(I2C_MST_ANA_CONF0_REG, I2C_MST_BBPLL_STOP_FORCE_HIGH);
}
/**
* @brief Check whether BBPLL calibration is done
*
* @return True if calibration is done; otherwise false
*/
static inline __attribute__((always_inline)) bool regi2c_ctrl_ll_bbpll_calibration_is_done(void)
{
return REG_GET_BIT(I2C_MST_ANA_CONF0_REG, I2C_MST_BBPLL_CAL_DONE);
}
/**
* @brief Enable the I2C internal bus to do I2C read/write operation to the SAR_ADC register
*/
static inline void regi2c_ctrl_ll_i2c_saradc_enable(void)
{
CLEAR_PERI_REG_MASK(ANA_CONFIG_REG, ANA_I2C_SAR_FORCE_PD);
SET_PERI_REG_MASK(ANA_CONFIG2_REG, ANA_I2C_SAR_FORCE_PU);
}
/**
* @brief Disable the I2C internal bus to do I2C read/write operation to the SAR_ADC register
*/
static inline void regi2c_ctrl_ll_i2c_saradc_disable(void)
{
CLEAR_PERI_REG_MASK(ANA_CONFIG_REG, ANA_I2C_SAR_FORCE_PU);
SET_PERI_REG_MASK(ANA_CONFIG2_REG, ANA_I2C_SAR_FORCE_PD);
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,82 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
// The LL layer for RTC(LP) watchdog register operations.
// Note that most of the register operations in this layer are non-atomic operations.
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
// TODO: [ESP32C61] IDF-9243, inherit from c6
#include "hal/lpwdt_ll.h"
typedef lp_wdt_dev_t rwdt_dev_t;
#define RWDT_DEV_GET() &LP_WDT
#define rwdt_ll_enable(hw) \
lpwdt_ll_enable(hw)
#define rwdt_ll_disable(hw) \
lpwdt_ll_disable(hw)
#define rwdt_ll_check_if_enabled(hw) \
lpwdt_ll_check_if_enabled(hw)
#define rwdt_ll_config_stage(hw, stage, timeout_ticks, behavior) \
lpwdt_ll_config_stage(hw, stage, timeout_ticks, behavior)
#define rwdt_ll_disable_stage(hw, stage) \
lpwdt_ll_disable_stage(hw, stage)
#define rwdt_ll_set_cpu_reset_length(hw, length) \
lpwdt_ll_set_cpu_reset_length(hw, length)
#define rwdt_ll_set_sys_reset_length(hw, length) \
lpwdt_ll_set_sys_reset_length(hw, length)
#define rwdt_ll_set_flashboot_en(hw, enable) \
lpwdt_ll_set_flashboot_en(hw, enable)
#define rwdt_ll_set_procpu_reset_en(hw, enable) \
lpwdt_ll_set_procpu_reset_en(hw, enable)
#define rwdt_ll_set_appcpu_reset_en(hw, enable) \
lpwdt_ll_set_appcpu_reset_en(hw, enable)
#define rwdt_ll_set_pause_in_sleep_en(hw, enable) \
lpwdt_ll_set_pause_in_sleep_en(hw, enable)
#define rwdt_ll_set_chip_reset_en(hw, enable) \
lpwdt_ll_set_chip_reset_en(hw, enable)
#define rwdt_ll_set_chip_reset_width(hw, width) \
lpwdt_ll_set_chip_reset_width(hw, width)
#define rwdt_ll_feed(hw) \
lpwdt_ll_feed(hw)
#define rwdt_ll_write_protect_enable(hw) \
lpwdt_ll_write_protect_enable(hw)
#define rwdt_ll_write_protect_disable(hw) \
lpwdt_ll_write_protect_disable(hw)
#define rwdt_ll_set_intr_enable(hw, enable) \
lpwdt_ll_set_intr_enable(hw, enable)
#define rwdt_ll_check_intr_status(hw) \
lpwdt_ll_check_intr_status(hw)
#define rwdt_ll_clear_intr_status(hw) \
lpwdt_ll_clear_intr_status(hw)
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,153 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*******************************************************************************
* NOTICE
* The ll is not public api, don't use in application code.
* See readme.md in hal/include/hal/readme.md
******************************************************************************/
// The Lowlevel layer for SPI Flash Encryption.
#include <stdbool.h>
#include <string.h>
#include "soc/hp_system_reg.h"
#include "soc/xts_aes_reg.h"
#include "soc/soc.h"
#include "soc/soc_caps.h"
#include "hal/assert.h"
// TODO: [ESP32C61] IDF-9232, inherit from c6
#ifdef __cplusplus
extern "C" {
#endif
/// Choose type of chip you want to encrypt manully
typedef enum
{
FLASH_ENCRYPTION_MANU = 0, ///!< Manually encrypt the flash chip.
PSRAM_ENCRYPTION_MANU = 1 ///!< Manually encrypt the psram chip.
} flash_encrypt_ll_type_t;
/**
* Enable the flash encryption function under spi boot mode and download boot mode.
*/
static inline void spi_flash_encrypt_ll_enable(void)
{
REG_SET_BIT(HP_SYSTEM_EXTERNAL_DEVICE_ENCRYPT_DECRYPT_CONTROL_REG,
HP_SYSTEM_ENABLE_DOWNLOAD_MANUAL_ENCRYPT |
HP_SYSTEM_ENABLE_SPI_MANUAL_ENCRYPT);
}
/*
* Disable the flash encryption mode.
*/
static inline void spi_flash_encrypt_ll_disable(void)
{
REG_CLR_BIT(HP_SYSTEM_EXTERNAL_DEVICE_ENCRYPT_DECRYPT_CONTROL_REG,
HP_SYSTEM_ENABLE_SPI_MANUAL_ENCRYPT);
}
/**
* Choose type of chip you want to encrypt manully
*
* @param type The type of chip to be encrypted
*
* @note The hardware currently support flash encryption.
*/
static inline void spi_flash_encrypt_ll_type(flash_encrypt_ll_type_t type)
{
// Our hardware only support flash encryption
HAL_ASSERT(type == FLASH_ENCRYPTION_MANU);
REG_SET_FIELD(XTS_AES_DESTINATION_REG(0), XTS_AES_DESTINATION, type);
}
/**
* Configure the data size of a single encryption.
*
* @param block_size Size of the desired block.
*/
static inline void spi_flash_encrypt_ll_buffer_length(uint32_t size)
{
// Desired block should not be larger than the block size.
REG_SET_FIELD(XTS_AES_LINESIZE_REG(0), XTS_AES_LINESIZE, size >> 5);
}
/**
* Save 32-bit piece of plaintext.
*
* @param address the address of written flash partition.
* @param buffer Buffer to store the input data.
* @param size Buffer size.
*
*/
static inline void spi_flash_encrypt_ll_plaintext_save(uint32_t address, const uint32_t* buffer, uint32_t size)
{
uint32_t plaintext_offs = (address % SOC_FLASH_ENCRYPTED_XTS_AES_BLOCK_MAX);
HAL_ASSERT(plaintext_offs + size <= SOC_FLASH_ENCRYPTED_XTS_AES_BLOCK_MAX);
memcpy((void *)(XTS_AES_PLAIN_MEM(0) + plaintext_offs), buffer, size);
}
/**
* Copy the flash address to XTS_AES physical address
*
* @param flash_addr flash address to write.
*/
static inline void spi_flash_encrypt_ll_address_save(uint32_t flash_addr)
{
REG_SET_FIELD(XTS_AES_PHYSICAL_ADDRESS_REG(0), XTS_AES_PHYSICAL_ADDRESS, flash_addr);
}
/**
* Start flash encryption
*/
static inline void spi_flash_encrypt_ll_calculate_start(void)
{
REG_SET_FIELD(XTS_AES_TRIGGER_REG(0), XTS_AES_TRIGGER, 1);
}
/**
* Wait for flash encryption termination
*/
static inline void spi_flash_encrypt_ll_calculate_wait_idle(void)
{
while(REG_GET_FIELD(XTS_AES_STATE_REG(0), XTS_AES_STATE) == 0x1) {
}
}
/**
* Finish the flash encryption and make encrypted result accessible to SPI.
*/
static inline void spi_flash_encrypt_ll_done(void)
{
REG_SET_BIT(XTS_AES_RELEASE_REG(0), XTS_AES_RELEASE);
while(REG_GET_FIELD(XTS_AES_STATE_REG(0), XTS_AES_STATE) != 0x3) {
}
}
/**
* Set to destroy encrypted result
*/
static inline void spi_flash_encrypt_ll_destroy(void)
{
REG_SET_BIT(XTS_AES_DESTROY_REG(0), XTS_AES_DESTROY);
}
/**
* Check if is qualified to encrypt the buffer
*
* @param address the address of written flash partition.
* @param length Buffer size.
*/
static inline bool spi_flash_encrypt_ll_check(uint32_t address, uint32_t length)
{
return ((address % length) == 0) ? true : false;
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,103 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*******************************************************************************
* NOTICE
* The ll is not public api, don't use in application code.
* See readme.md in soc/include/hal/readme.md
******************************************************************************/
// The Lowlevel layer for SPI Flash
#pragma once
// TODO: [ESP32C61] IDF-9314, inherit from c6
#include "gpspi_flash_ll.h"
#include "spimem_flash_ll.h"
#ifdef __cplusplus
extern "C" {
#endif
#define spi_flash_ll_calculate_clock_reg(host_id, clock_div) (((host_id)<=SPI1_HOST) ? spimem_flash_ll_calculate_clock_reg(clock_div) \
: gpspi_flash_ll_calculate_clock_reg(clock_div))
#define spi_flash_ll_get_source_clock_freq_mhz(host_id) (((host_id)<=SPI1_HOST) ? spimem_flash_ll_get_source_freq_mhz() : GPSPI_FLASH_LL_PERIPHERAL_FREQUENCY_MHZ)
#define spi_flash_ll_get_hw(host_id) (((host_id)<=SPI1_HOST ? (spi_dev_t*) spimem_flash_ll_get_hw(host_id) \
: gpspi_flash_ll_get_hw(host_id)))
#define spi_flash_ll_hw_get_id(dev) ({int dev_id = spimem_flash_ll_hw_get_id(dev); \
if (dev_id < 0) {\
dev_id = gpspi_flash_ll_hw_get_id(dev);\
}\
dev_id; \
})
// Since ESP32-C61, WB_mode is available, we extend 8 bits to occupy `Continuous Read Mode` bits.
#define SPI_FLASH_LL_CONTINUOUS_MODE_BIT_NUMS (8)
typedef union {
gpspi_flash_ll_clock_reg_t gpspi;
spimem_flash_ll_clock_reg_t spimem;
} spi_flash_ll_clock_reg_t;
#ifdef GPSPI_BUILD
#define spi_flash_ll_reset(dev) gpspi_flash_ll_reset((spi_dev_t*)dev)
#define spi_flash_ll_cmd_is_done(dev) gpspi_flash_ll_cmd_is_done((spi_dev_t*)dev)
#define spi_flash_ll_get_buffer_data(dev, buffer, read_len) gpspi_flash_ll_get_buffer_data((spi_dev_t*)dev, buffer, read_len)
#define spi_flash_ll_set_buffer_data(dev, buffer, len) gpspi_flash_ll_set_buffer_data((spi_dev_t*)dev, buffer, len)
#define spi_flash_ll_user_start(dev, pe_ops) gpspi_flash_ll_user_start((spi_dev_t*)dev, pe_ops)
#define spi_flash_ll_host_idle(dev) gpspi_flash_ll_host_idle((spi_dev_t*)dev)
#define spi_flash_ll_read_phase(dev) gpspi_flash_ll_read_phase((spi_dev_t*)dev)
#define spi_flash_ll_set_cs_pin(dev, pin) gpspi_flash_ll_set_cs_pin((spi_dev_t*)dev, pin)
#define spi_flash_ll_set_read_mode(dev, read_mode) gpspi_flash_ll_set_read_mode((spi_dev_t*)dev, read_mode)
#define spi_flash_ll_set_clock(dev, clk) gpspi_flash_ll_set_clock((spi_dev_t*)dev, (gpspi_flash_ll_clock_reg_t*)clk)
#define spi_flash_ll_set_miso_bitlen(dev, bitlen) gpspi_flash_ll_set_miso_bitlen((spi_dev_t*)dev, bitlen)
#define spi_flash_ll_set_mosi_bitlen(dev, bitlen) gpspi_flash_ll_set_mosi_bitlen((spi_dev_t*)dev, bitlen)
#define spi_flash_ll_set_command(dev, cmd, bitlen) gpspi_flash_ll_set_command((spi_dev_t*)dev, cmd, bitlen)
#define spi_flash_ll_set_addr_bitlen(dev, bitlen) gpspi_flash_ll_set_addr_bitlen((spi_dev_t*)dev, bitlen)
#define spi_flash_ll_get_addr_bitlen(dev) gpspi_flash_ll_get_addr_bitlen((spi_dev_t*)dev)
#define spi_flash_ll_set_address(dev, addr) gpspi_flash_ll_set_address((spi_dev_t*)dev, addr)
#define spi_flash_ll_set_usr_address(dev, addr, bitlen) gpspi_flash_ll_set_usr_address((spi_dev_t*)dev, addr, bitlen)
#define spi_flash_ll_set_dummy(dev, dummy) gpspi_flash_ll_set_dummy((spi_dev_t*)dev, dummy)
#define spi_flash_ll_set_hold(dev, hold_n) gpspi_flash_ll_set_hold((spi_dev_t*)dev, hold_n)
#define spi_flash_ll_set_cs_setup(dev, cs_setup_time) gpspi_flash_ll_set_cs_setup((spi_dev_t*)dev, cs_setup_time)
#define spi_flash_ll_set_extra_address(dev, extra_addr) { /* Not supported on gpspi on ESP32-C61*/ }
#else
#define spi_flash_ll_reset(dev) spimem_flash_ll_reset((spi_mem_dev_t*)dev)
#define spi_flash_ll_cmd_is_done(dev) spimem_flash_ll_cmd_is_done((spi_mem_dev_t*)dev)
#define spi_flash_ll_erase_chip(dev) spimem_flash_ll_erase_chip((spi_mem_dev_t*)dev)
#define spi_flash_ll_erase_sector(dev) spimem_flash_ll_erase_sector((spi_mem_dev_t*)dev)
#define spi_flash_ll_erase_block(dev) spimem_flash_ll_erase_block((spi_mem_dev_t*)dev)
#define spi_flash_ll_set_write_protect(dev, wp) spimem_flash_ll_set_write_protect((spi_mem_dev_t*)dev, wp)
#define spi_flash_ll_get_buffer_data(dev, buffer, read_len) spimem_flash_ll_get_buffer_data((spi_mem_dev_t*)dev, buffer, read_len)
#define spi_flash_ll_set_buffer_data(dev, buffer, len) spimem_flash_ll_set_buffer_data((spi_mem_dev_t*)dev, buffer, len)
#define spi_flash_ll_program_page(dev, buffer, len) spimem_flash_ll_program_page((spi_mem_dev_t*)dev, buffer, len)
#define spi_flash_ll_user_start(dev, pe_ops) spimem_flash_ll_user_start((spi_mem_dev_t*)dev, pe_ops)
#define spi_flash_ll_host_idle(dev) spimem_flash_ll_host_idle((spi_mem_dev_t*)dev)
#define spi_flash_ll_read_phase(dev) spimem_flash_ll_read_phase((spi_mem_dev_t*)dev)
#define spi_flash_ll_set_cs_pin(dev, pin) spimem_flash_ll_set_cs_pin((spi_mem_dev_t*)dev, pin)
#define spi_flash_ll_set_read_mode(dev, read_mode) spimem_flash_ll_set_read_mode((spi_mem_dev_t*)dev, read_mode)
#define spi_flash_ll_set_clock(dev, clk) spimem_flash_ll_set_clock((spi_mem_dev_t*)dev, (spimem_flash_ll_clock_reg_t*)clk)
#define spi_flash_ll_set_miso_bitlen(dev, bitlen) spimem_flash_ll_set_miso_bitlen((spi_mem_dev_t*)dev, bitlen)
#define spi_flash_ll_set_mosi_bitlen(dev, bitlen) spimem_flash_ll_set_mosi_bitlen((spi_mem_dev_t*)dev, bitlen)
#define spi_flash_ll_set_command(dev, cmd, bitlen) spimem_flash_ll_set_command((spi_mem_dev_t*)dev, cmd, bitlen)
#define spi_flash_ll_set_addr_bitlen(dev, bitlen) spimem_flash_ll_set_addr_bitlen((spi_mem_dev_t*)dev, bitlen)
#define spi_flash_ll_get_addr_bitlen(dev) spimem_flash_ll_get_addr_bitlen((spi_mem_dev_t*) dev)
#define spi_flash_ll_set_address(dev, addr) spimem_flash_ll_set_address((spi_mem_dev_t*)dev, addr)
#define spi_flash_ll_set_usr_address(dev, addr, bitlen) spimem_flash_ll_set_usr_address((spi_mem_dev_t*)dev, addr, bitlen)
#define spi_flash_ll_set_dummy(dev, dummy) spimem_flash_ll_set_dummy((spi_mem_dev_t*)dev, dummy)
#define spi_flash_ll_set_hold(dev, hold_n) spimem_flash_ll_set_hold((spi_mem_dev_t*)dev, hold_n)
#define spi_flash_ll_set_cs_setup(dev, cs_setup_time) spimem_flash_ll_set_cs_setup((spi_mem_dev_t*)dev, cs_setup_time)
#define spi_flash_ll_set_extra_address(dev, extra_addr) spimem_flash_ll_set_extra_address((spi_mem_dev_t*)dev, extra_addr)
#define spi_flash_ll_get_ctrl_val(dev) spimem_flash_ll_get_ctrl_val((spi_mem_dev_t*)dev)
#endif
#ifdef __cplusplus
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,705 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*******************************************************************************
* NOTICE
* The ll is not public api, don't use in application code.
* See readme.md in soc/include/hal/readme.md
******************************************************************************/
// The Lowlevel layer for SPI Flash
#pragma once
#include <stdlib.h>
#include <sys/param.h> // For MIN/MAX
#include <stdbool.h>
#include <string.h>
#include "soc/spi_periph.h"
#include "soc/spi_mem_struct.h"
#include "hal/assert.h"
#include "hal/misc.h"
#include "hal/spi_types.h"
#include "hal/spi_flash_types.h"
#include "soc/pcr_struct.h"
#include "esp_rom_sys.h"
// TODO: [ESP32C61] IDF-9314, inherit from c6
#ifdef __cplusplus
extern "C" {
#endif
#define spimem_flash_ll_get_hw(host_id) (((host_id)==SPI1_HOST ? &SPIMEM1 : NULL ))
#define spimem_flash_ll_hw_get_id(dev) ((dev) == (void*)&SPIMEM1? SPI1_HOST: -1)
#define SPIMEM_FLASH_LL_SPI0_MAX_LOCK_VAL_MSPI_TICKS (0x1f)
typedef typeof(SPIMEM1.clock.val) spimem_flash_ll_clock_reg_t;
/*------------------------------------------------------------------------------
* Control
*----------------------------------------------------------------------------*/
/**
* Reset peripheral registers before configuration and starting control
*
* @param dev Beginning address of the peripheral registers.
*/
static inline void spimem_flash_ll_reset(spi_mem_dev_t *dev)
{
dev->user.val = 0;
dev->ctrl.val = 0;
}
/**
* Check whether the previous operation is done.
*
* @param dev Beginning address of the peripheral registers.
*
* @return true if last command is done, otherwise false.
*/
static inline bool spimem_flash_ll_cmd_is_done(const spi_mem_dev_t *dev)
{
return (dev->cmd.val == 0);
}
/**
* Erase the flash chip.
*
* @param dev Beginning address of the peripheral registers.
*/
static inline void spimem_flash_ll_erase_chip(spi_mem_dev_t *dev)
{
dev->cmd.flash_ce = 1;
}
/**
* Erase the sector, the address should be set by spimem_flash_ll_set_address.
*
* @param dev Beginning address of the peripheral registers.
*/
static inline void spimem_flash_ll_erase_sector(spi_mem_dev_t *dev)
{
dev->ctrl.val = 0;
dev->cmd.flash_se = 1;
}
/**
* Erase the block, the address should be set by spimem_flash_ll_set_address.
*
* @param dev Beginning address of the peripheral registers.
*/
static inline void spimem_flash_ll_erase_block(spi_mem_dev_t *dev)
{
dev->cmd.flash_be = 1;
}
/**
* Suspend erase/program operation.
*
* @param dev Beginning address of the peripheral registers.
*/
static inline void spimem_flash_ll_suspend(spi_mem_dev_t *dev)
{
dev->flash_sus_ctrl.flash_pes = 1;
}
/**
* Resume suspended erase/program operation.
*
* @param dev Beginning address of the peripheral registers.
*/
static inline void spimem_flash_ll_resume(spi_mem_dev_t *dev)
{
dev->flash_sus_ctrl.flash_per = 1;
}
/**
* Initialize auto suspend mode, and esp32c3 doesn't support disable auto-suspend.
*
* @param dev Beginning address of the peripheral registers.
* @param auto_sus Enable/disable Flash Auto-Suspend.
*/
static inline void spimem_flash_ll_auto_suspend_init(spi_mem_dev_t *dev, bool auto_sus)
{
dev->flash_sus_ctrl.flash_pes_en = auto_sus;
}
/**
* Initialize auto resume mode
*
* @param dev Beginning address of the peripheral registers.
* @param auto_res Enable/Disable Flash Auto-Resume.
*
*/
static inline void spimem_flash_ll_auto_resume_init(spi_mem_dev_t *dev, bool auto_res)
{
dev->flash_sus_ctrl.pes_per_en = auto_res;
}
/**
* Setup the flash suspend command, may vary from chips to chips.
*
* @param dev Beginning address of the peripheral registers.
* @param sus_cmd Flash suspend command.
*
*/
static inline void spimem_flash_ll_suspend_cmd_setup(spi_mem_dev_t *dev, uint32_t sus_cmd)
{
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->flash_sus_cmd, flash_pes_command, sus_cmd);
}
/**
* Setup the flash resume command, may vary from chips to chips.
*
* @param dev Beginning address of the peripheral registers.
* @param res_cmd Flash resume command.
*
*/
static inline void spimem_flash_ll_resume_cmd_setup(spi_mem_dev_t *dev, uint32_t res_cmd)
{
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->sus_status, flash_per_command, res_cmd);
}
/**
* Setup the flash read suspend status command, may vary from chips to chips.
*
* @param dev Beginning address of the peripheral registers.
* @param pesr_cmd Flash read suspend status command.
*
*/
static inline void spimem_flash_ll_rd_sus_cmd_setup(spi_mem_dev_t *dev, uint32_t pesr_cmd)
{
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->flash_sus_cmd, wait_pesr_command, pesr_cmd);
}
/**
* Setup to check SUS/SUS1/SUS2 to ensure the suspend status of flashs.
*
* @param dev Beginning address of the peripheral registers.
* @param sus_check_sus_en 1: enable, 0: disable.
*
*/
static inline void spimem_flash_ll_sus_check_sus_setup(spi_mem_dev_t *dev, bool sus_check_sus_en)
{
dev->flash_sus_ctrl.sus_timeout_cnt = 5;
dev->flash_sus_ctrl.pes_end_en = sus_check_sus_en;
}
/**
* Setup to check SUS/SUS1/SUS2 to ensure the resume status of flashs.
*
* @param dev Beginning address of the peripheral registers.
* @param sus_check_sus_en 1: enable, 0: disable.
*
*/
static inline void spimem_flash_ll_res_check_sus_setup(spi_mem_dev_t *dev, bool res_check_sus_en)
{
dev->flash_sus_ctrl.sus_timeout_cnt = 5;
dev->flash_sus_ctrl.per_end_en = res_check_sus_en;
}
/**
* Set 8 bit command to read suspend status
*
* @param dev Beginning address of the peripheral registers.
*/
static inline void spimem_flash_ll_set_read_sus_status(spi_mem_dev_t *dev, uint32_t sus_conf)
{
dev->flash_sus_ctrl.frd_sus_2b = 0;
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->flash_sus_ctrl, pesr_end_msk, sus_conf);
}
/**
* Configure the delay after Suspend/Resume
*
* @param dev Beginning address of the peripheral registers.
* @param dly_val delay time
*/
static inline void spimem_flash_ll_set_sus_delay(spi_mem_dev_t *dev, uint32_t dly_val)
{
dev->ctrl1.cs_hold_dly_res = dly_val;
dev->sus_status.flash_per_dly_128 = 1;
dev->sus_status.flash_pes_dly_128 = 1;
}
/**
* Configure the cs hold delay time(used to set the minimum CS high time tSHSL)
*
* @param dev Beginning address of the peripheral registers.
* @param cs_hold_delay cs hold delay time
*/
static inline void spimem_flash_set_cs_hold_delay(spi_mem_dev_t *dev, uint32_t cs_hold_delay)
{
SPIMEM0.ctrl2.cs_hold_delay = cs_hold_delay;
}
/**
* Initialize auto wait idle mode
*
* @param dev Beginning address of the peripheral registers.
* @param auto_waiti Enable/disable auto wait-idle function
*/
static inline void spimem_flash_ll_auto_wait_idle_init(spi_mem_dev_t *dev, bool auto_waiti)
{
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->flash_waiti_ctrl, waiti_cmd, 0x05);
dev->flash_sus_ctrl.flash_per_wait_en = auto_waiti;
dev->flash_sus_ctrl.flash_pes_wait_en = auto_waiti;
}
/**
* This function is used to set dummy phase when auto suspend is enabled.
*
* @note This function is only used when timing tuning is enabled.
*
* @param dev Beginning address of the peripheral registers.
* @param extra_dummy extra dummy length. Get from timing tuning.
*/
static inline void spimem_flash_ll_set_wait_idle_dummy_phase(spi_mem_dev_t *dev, uint32_t extra_dummy)
{
// Not supported on this chip.
}
/**
* Return the suspend status of erase or program operations.
*
* @param dev Beginning address of the peripheral registers.
*
* @return true if suspended, otherwise false.
*/
static inline bool spimem_flash_ll_sus_status(spi_mem_dev_t *dev)
{
return dev->sus_status.flash_sus;
}
/**
* @brief Set lock for SPI0 so that spi0 can request new cache request after a cache transfer.
*
* @param dev Beginning address of the peripheral registers.
* @param lock_time Lock delay time
*/
static inline void spimem_flash_ll_sus_set_spi0_lock_trans(spi_mem_dev_t *dev, uint32_t lock_time)
{
dev->sus_status.spi0_lock_en = 1;
SPIMEM0.fsm.lock_delay_time = lock_time;
}
/**
* @brief Get tsus unit values in SPI_CLK cycles
*
* @param dev Beginning address of the peripheral registers.
* @return uint32_t tsus unit values
*/
static inline uint32_t spimem_flash_ll_get_tsus_unit_in_cycles(spi_mem_dev_t *dev)
{
uint32_t tsus_unit = 0;
if (dev->sus_status.flash_pes_dly_128 == 1) {
tsus_unit = 128;
} else {
tsus_unit = 4;
}
return tsus_unit;
}
/**
* Enable/disable write protection for the flash chip.
*
* @param dev Beginning address of the peripheral registers.
* @param wp true to enable the protection, false to disable (write enable).
*/
static inline void spimem_flash_ll_set_write_protect(spi_mem_dev_t *dev, bool wp)
{
if (wp) {
dev->cmd.flash_wrdi = 1;
} else {
dev->cmd.flash_wren = 1;
}
}
/**
* Get the read data from the buffer after ``spimem_flash_ll_read`` is done.
*
* @param dev Beginning address of the peripheral registers.
* @param buffer Buffer to hold the output data
* @param read_len Length to get out of the buffer
*/
static inline void spimem_flash_ll_get_buffer_data(spi_mem_dev_t *dev, void *buffer, uint32_t read_len)
{
if (((intptr_t)buffer % 4 == 0) && (read_len % 4 == 0)) {
// If everything is word-aligned, do a faster memcpy
memcpy(buffer, (void *)dev->data_buf, read_len);
} else {
// Otherwise, slow(er) path copies word by word
int copy_len = read_len;
for (int i = 0; i < (read_len + 3) / 4; i++) {
int word_len = MIN(sizeof(uint32_t), copy_len);
uint32_t word = dev->data_buf[i];
memcpy(buffer, &word, word_len);
buffer = (void *)((intptr_t)buffer + word_len);
copy_len -= word_len;
}
}
}
/**
* Set the data to be written in the data buffer.
*
* @param dev Beginning address of the peripheral registers.
* @param buffer Buffer holding the data
* @param length Length of data in bytes.
*/
static inline void spimem_flash_ll_set_buffer_data(spi_mem_dev_t *dev, const void *buffer, uint32_t length)
{
// Load data registers, word at a time
int num_words = (length + 3) / 4;
for (int i = 0; i < num_words; i++) {
uint32_t word = 0;
uint32_t word_len = MIN(length, sizeof(word));
memcpy(&word, buffer, word_len);
dev->data_buf[i] = word;
length -= word_len;
buffer = (void *)((intptr_t)buffer + word_len);
}
}
/**
* Program a page of the flash chip. Call ``spimem_flash_ll_set_address`` before
* this to set the address to program.
*
* @param dev Beginning address of the peripheral registers.
* @param buffer Buffer holding the data to program
* @param length Length to program.
*/
static inline void spimem_flash_ll_program_page(spi_mem_dev_t *dev, const void *buffer, uint32_t length)
{
dev->user.usr_dummy = 0;
spimem_flash_ll_set_buffer_data(dev, buffer, length);
dev->cmd.flash_pp = 1;
}
/**
* Trigger a user defined transaction. All phases, including command, address, dummy, and the data phases,
* should be configured before this is called.
*
* @param dev Beginning address of the peripheral registers.
* @param pe_ops Is page program/erase operation or not.
*/
static inline void spimem_flash_ll_user_start(spi_mem_dev_t *dev, bool pe_ops)
{
uint32_t usr_pe = (pe_ops ? 0x60000 : 0x40000);
dev->cmd.val |= usr_pe;
}
/**
* Check whether the host is idle to perform new commands.
*
* @param dev Beginning address of the peripheral registers.
*
* @return true if the host is idle, otherwise false
*/
static inline bool spimem_flash_ll_host_idle(const spi_mem_dev_t *dev)
{
return dev->cmd.mst_st == 0;
}
/**
* Set phases for user-defined transaction to read
*
* @param dev Beginning address of the peripheral registers.
*/
static inline void spimem_flash_ll_read_phase(spi_mem_dev_t *dev)
{
typeof (dev->user) user = {
.usr_mosi = 0,
.usr_miso = 1,
.usr_addr = 1,
.usr_command = 1,
};
dev->user.val = user.val;
}
/*------------------------------------------------------------------------------
* Configs
*----------------------------------------------------------------------------*/
/**
* Select which pin to use for the flash
*
* @param dev Beginning address of the peripheral registers.
* @param pin Pin ID to use, 0-2. Set to other values to disable all the CS pins.
*/
static inline void spimem_flash_ll_set_cs_pin(spi_mem_dev_t *dev, int pin)
{
dev->misc.cs0_dis = (pin == 0) ? 0 : 1;
dev->misc.cs1_dis = (pin == 1) ? 0 : 1;
}
/**
* Set the read io mode.
*
* @param dev Beginning address of the peripheral registers.
* @param read_mode I/O mode to use in the following transactions.
*/
static inline void spimem_flash_ll_set_read_mode(spi_mem_dev_t *dev, esp_flash_io_mode_t read_mode)
{
typeof (dev->ctrl) ctrl;
ctrl.val = dev->ctrl.val;
ctrl.val &= ~(SPI_MEM_FREAD_QIO_M | SPI_MEM_FREAD_QUAD_M | SPI_MEM_FREAD_DIO_M | SPI_MEM_FREAD_DUAL_M);
ctrl.val |= SPI_MEM_FASTRD_MODE_M;
switch (read_mode) {
case SPI_FLASH_FASTRD:
//the default option
break;
case SPI_FLASH_QIO:
ctrl.fread_qio = 1;
break;
case SPI_FLASH_QOUT:
ctrl.fread_quad = 1;
break;
case SPI_FLASH_DIO:
ctrl.fread_dio = 1;
break;
case SPI_FLASH_DOUT:
ctrl.fread_dual = 1;
break;
case SPI_FLASH_SLOWRD:
ctrl.fastrd_mode = 0;
break;
default:
abort();
}
dev->ctrl.val = ctrl.val;
}
/**
* Set clock frequency to work at.
*
* @param dev Beginning address of the peripheral registers.
* @param clock_val pointer to the clock value to set
*/
static inline void spimem_flash_ll_set_clock(spi_mem_dev_t *dev, spimem_flash_ll_clock_reg_t *clock_val)
{
dev->clock.val = *clock_val;
}
/**
* Set the input length, in bits.
*
* @param dev Beginning address of the peripheral registers.
* @param bitlen Length of input, in bits.
*/
static inline void spimem_flash_ll_set_miso_bitlen(spi_mem_dev_t *dev, uint32_t bitlen)
{
dev->user.usr_miso = bitlen > 0;
dev->miso_dlen.usr_miso_bit_len = bitlen ? (bitlen - 1) : 0;
}
/**
* Set the output length, in bits (not including command, address and dummy
* phases)
*
* @param dev Beginning address of the peripheral registers.
* @param bitlen Length of output, in bits.
*/
static inline void spimem_flash_ll_set_mosi_bitlen(spi_mem_dev_t *dev, uint32_t bitlen)
{
dev->user.usr_mosi = bitlen > 0;
dev->mosi_dlen.usr_mosi_bit_len = bitlen ? (bitlen - 1) : 0;
}
/**
* Set the command.
*
* @param dev Beginning address of the peripheral registers.
* @param command Command to send
* @param bitlen Length of the command
*/
static inline void spimem_flash_ll_set_command(spi_mem_dev_t *dev, uint32_t command, uint32_t bitlen)
{
dev->user.usr_command = 1;
typeof(dev->user2) user2 = {
.usr_command_value = command,
.usr_command_bitlen = (bitlen - 1),
};
dev->user2.val = user2.val;
}
/**
* Get the address length that is set in register, in bits.
*
* @param dev Beginning address of the peripheral registers.
*
*/
static inline int spimem_flash_ll_get_addr_bitlen(spi_mem_dev_t *dev)
{
return dev->user.usr_addr ? dev->user1.usr_addr_bitlen + 1 : 0;
}
/**
* Set the address length to send, in bits. Should be called before commands that requires the address e.g. erase sector, read, write...
*
* @param dev Beginning address of the peripheral registers.
* @param bitlen Length of the address, in bits
*/
static inline void spimem_flash_ll_set_addr_bitlen(spi_mem_dev_t *dev, uint32_t bitlen)
{
dev->user1.usr_addr_bitlen = (bitlen - 1);
dev->user.usr_addr = bitlen ? 1 : 0;
}
/**
* Set extra address for bits M0-M7 in DIO/QIO mode.
*
* @param dev Beginning address of the peripheral registers.
* @param extra_addr extra address(M0-M7) to send.
*/
static inline void spimem_flash_ll_set_extra_address(spi_mem_dev_t *dev, uint32_t extra_addr)
{
dev->cache_fctrl.usr_addr_4byte = 0;
dev->rd_status.wb_mode = extra_addr;
}
/**
* Set the address to send. Should be called before commands that requires the address e.g. erase sector, read, write...
*
* @param dev Beginning address of the peripheral registers.
* @param addr Address to send
*/
static inline void spimem_flash_ll_set_address(spi_mem_dev_t *dev, uint32_t addr)
{
dev->addr = addr;
}
/**
* Set the address to send in user mode. Should be called before commands that requires the address e.g. erase sector, read, write...
*
* @param dev Beginning address of the peripheral registers.
* @param addr Address to send
*/
static inline void spimem_flash_ll_set_usr_address(spi_mem_dev_t *dev, uint32_t addr, uint32_t bitlen)
{
(void)bitlen;
spimem_flash_ll_set_address(dev, addr);
}
/**
* Set the length of dummy cycles.
*
* @param dev Beginning address of the peripheral registers.
* @param dummy_n Cycles of dummy phases
*/
static inline void spimem_flash_ll_set_dummy(spi_mem_dev_t *dev, uint32_t dummy_n)
{
dev->user.usr_dummy = dummy_n ? 1 : 0;
dev->user1.usr_dummy_cyclelen = dummy_n - 1;
}
/**
* Set CS hold time.
*
* @param dev Beginning address of the peripheral registers.
* @param hold_n CS hold time config used by the host.
*/
static inline void spimem_flash_ll_set_hold(spi_mem_dev_t *dev, uint32_t hold_n)
{
dev->ctrl2.cs_hold_time = hold_n - 1;
dev->user.cs_hold = (hold_n > 0? 1: 0);
}
static inline void spimem_flash_ll_set_cs_setup(spi_mem_dev_t *dev, uint32_t cs_setup_time)
{
dev->user.cs_setup = (cs_setup_time > 0 ? 1 : 0);
dev->ctrl2.cs_setup_time = cs_setup_time - 1;
}
/**
* Get the spi flash source clock frequency. Used for calculating
* the divider parameters.
*
* @param None
*
* @return the frequency of spi flash clock source.(MHz)
*/
static inline uint8_t spimem_flash_ll_get_source_freq_mhz(void)
{
#if CONFIG_IDF_ENV_FPGA
// in FPGA, mspi source freq is fixed to 80M
return 80;
#else
// MAY CAN IMPROVE (ONLY rc_fast case is incorrect)!
// TODO: Default is PLL480M, this is hard-coded.
// In the future, we can get the CPU clock source by calling interface.
uint8_t clock_val = 0;
switch (PCR.mspi_clk_conf.mspi_fast_div_num) {
case 0:
clock_val = 40;
break;
case 1:
clock_val = 20;
break;
case 2:
clock_val = 10;
break;
case 3:
clock_val = 120;
break;
case 4:
clock_val = 96;
break;
case 5:
clock_val = 80;
break;
default:
HAL_ASSERT(false);
}
return clock_val;
#endif
}
/**
* Calculate spi_flash clock frequency division parameters for register.
*
* @param clkdiv frequency division factor
*
* @return Register setting for the given clock division factor.
*/
static inline uint32_t spimem_flash_ll_calculate_clock_reg(uint8_t clkdiv)
{
uint32_t div_parameter;
// See comments of `clock` in `spi_mem_struct.h`
if (clkdiv == 1) {
div_parameter = (1 << 31);
} else {
div_parameter = ((clkdiv - 1) | (((clkdiv - 1) / 2 & 0xff) << 8 ) | (((clkdiv - 1) & 0xff) << 16));
}
return div_parameter;
}
/**
* @brief Write protect signal output when SPI is idle
* @param level 1: 1: output high, 0: output low
*/
static inline void spimem_flash_ll_set_wp_level(spi_mem_dev_t *dev, bool level)
{
dev->ctrl.wp = level;
}
/**
* @brief Get the ctrl value of mspi
*
* @return uint32_t The value of ctrl register
*/
static inline uint32_t spimem_flash_ll_get_ctrl_val(spi_mem_dev_t *dev)
{
return dev->ctrl.val;
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,205 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include "soc/systimer_struct.h"
#include "soc/clk_tree_defs.h"
#include "soc/pcr_struct.h"
#include "hal/assert.h"
// TODO: [ESP32C61] IDF-9307, inherit from C6
#ifdef __cplusplus
extern "C" {
#endif
// All these functions get invoked either from ISR or HAL that linked to IRAM.
// Always inline these functions even no gcc optimization is applied.
/******************* Clock *************************/
__attribute__((always_inline)) static inline void systimer_ll_enable_clock(systimer_dev_t *dev, bool en)
{
dev->conf.clk_en = en;
}
// Set clock source: XTAL(default) or RC_FAST
static inline void systimer_ll_set_clock_source(soc_periph_systimer_clk_src_t clk_src)
{
PCR.systimer_func_clk_conf.systimer_func_clk_sel = (clk_src == SYSTIMER_CLK_SRC_RC_FAST) ? 1 : 0;
}
static inline soc_periph_systimer_clk_src_t systimer_ll_get_clock_source(void)
{
return (PCR.systimer_func_clk_conf.systimer_func_clk_sel == 1) ? SYSTIMER_CLK_SRC_RC_FAST : SYSTIMER_CLK_SRC_XTAL;
}
/**
* @brief Enable the bus clock for systimer module
*
* @param enable true to enable, false to disable
*/
static inline void systimer_ll_enable_bus_clock(bool enable)
{
PCR.systimer_conf.systimer_clk_en = enable;
}
/// use a macro to wrap the function, force the caller to use it in a critical section
/// the critical section needs to declare the __DECLARE_RCC_RC_ATOMIC_ENV variable in advance
#define systimer_ll_enable_bus_clock(...) (void)__DECLARE_RCC_RC_ATOMIC_ENV; systimer_ll_enable_bus_clock(__VA_ARGS__)
/**
* @brief Reset the systimer module
*
* @param group_id Group ID
*/
static inline void systimer_ll_reset_register(void)
{
PCR.systimer_conf.systimer_rst_en = 1;
PCR.systimer_conf.systimer_rst_en = 0;
}
/// use a macro to wrap the function, force the caller to use it in a critical section
/// the critical section needs to declare the __DECLARE_RCC_RC_ATOMIC_ENV variable in advance
#define systimer_ll_reset_register(...) (void)__DECLARE_RCC_RC_ATOMIC_ENV; systimer_ll_reset_register(__VA_ARGS__)
/********************** ETM *****************************/
__attribute__((always_inline)) static inline void systimer_ll_enable_etm(systimer_dev_t *dev, bool en)
{
dev->conf.etm_en = en;
}
/******************* Counter *************************/
__attribute__((always_inline)) static inline void systimer_ll_enable_counter(systimer_dev_t *dev, uint32_t counter_id, bool en)
{
if (en) {
dev->conf.val |= 1 << (30 - counter_id);
} else {
dev->conf.val &= ~(1 << (30 - counter_id));
}
}
__attribute__((always_inline)) static inline void systimer_ll_counter_can_stall_by_cpu(systimer_dev_t *dev, uint32_t counter_id, uint32_t cpu_id, bool can)
{
if (can) {
dev->conf.val |= 1 << ((28 - counter_id * 2) - cpu_id);
} else {
dev->conf.val &= ~(1 << ((28 - counter_id * 2) - cpu_id));
}
}
__attribute__((always_inline)) static inline void systimer_ll_counter_snapshot(systimer_dev_t *dev, uint32_t counter_id)
{
dev->unit_op[counter_id].timer_unit_update = 1;
}
__attribute__((always_inline)) static inline bool systimer_ll_is_counter_value_valid(systimer_dev_t *dev, uint32_t counter_id)
{
return dev->unit_op[counter_id].timer_unit_value_valid;
}
__attribute__((always_inline)) static inline void systimer_ll_set_counter_value(systimer_dev_t *dev, uint32_t counter_id, uint64_t value)
{
dev->unit_load_val[counter_id].hi.timer_unit_load_hi = value >> 32;
dev->unit_load_val[counter_id].lo.timer_unit_load_lo = value & 0xFFFFFFFF;
}
__attribute__((always_inline)) static inline uint32_t systimer_ll_get_counter_value_low(systimer_dev_t *dev, uint32_t counter_id)
{
return dev->unit_val[counter_id].lo.timer_unit_value_lo;
}
__attribute__((always_inline)) static inline uint32_t systimer_ll_get_counter_value_high(systimer_dev_t *dev, uint32_t counter_id)
{
return dev->unit_val[counter_id].hi.timer_unit_value_hi;
}
__attribute__((always_inline)) static inline void systimer_ll_apply_counter_value(systimer_dev_t *dev, uint32_t counter_id)
{
dev->unit_load[counter_id].val = 0x01;
}
/******************* Alarm *************************/
__attribute__((always_inline)) static inline void systimer_ll_set_alarm_target(systimer_dev_t *dev, uint32_t alarm_id, uint64_t value)
{
dev->target_val[alarm_id].hi.timer_target_hi = value >> 32;
dev->target_val[alarm_id].lo.timer_target_lo = value & 0xFFFFFFFF;
}
__attribute__((always_inline)) static inline uint64_t systimer_ll_get_alarm_target(systimer_dev_t *dev, uint32_t alarm_id)
{
return ((uint64_t)(dev->target_val[alarm_id].hi.timer_target_hi) << 32) | dev->target_val[alarm_id].lo.timer_target_lo;
}
__attribute__((always_inline)) static inline void systimer_ll_connect_alarm_counter(systimer_dev_t *dev, uint32_t alarm_id, uint32_t counter_id)
{
dev->target_conf[alarm_id].target_timer_unit_sel = counter_id;
}
__attribute__((always_inline)) static inline void systimer_ll_enable_alarm_oneshot(systimer_dev_t *dev, uint32_t alarm_id)
{
dev->target_conf[alarm_id].target_period_mode = 0;
}
__attribute__((always_inline)) static inline void systimer_ll_enable_alarm_period(systimer_dev_t *dev, uint32_t alarm_id)
{
dev->target_conf[alarm_id].target_period_mode = 1;
}
__attribute__((always_inline)) static inline void systimer_ll_set_alarm_period(systimer_dev_t *dev, uint32_t alarm_id, uint32_t period)
{
HAL_ASSERT(period < (1 << 26));
dev->target_conf[alarm_id].target_period = period;
}
__attribute__((always_inline)) static inline uint32_t systimer_ll_get_alarm_period(systimer_dev_t *dev, uint32_t alarm_id)
{
return dev->target_conf[alarm_id].target_period;
}
__attribute__((always_inline)) static inline void systimer_ll_apply_alarm_value(systimer_dev_t *dev, uint32_t alarm_id)
{
dev->comp_load[alarm_id].val = 0x01;
}
__attribute__((always_inline)) static inline void systimer_ll_enable_alarm(systimer_dev_t *dev, uint32_t alarm_id, bool en)
{
if (en) {
dev->conf.val |= 1 << (24 - alarm_id);
} else {
dev->conf.val &= ~(1 << (24 - alarm_id));
}
}
/******************* Interrupt *************************/
__attribute__((always_inline)) static inline void systimer_ll_enable_alarm_int(systimer_dev_t *dev, uint32_t alarm_id, bool en)
{
if (en) {
dev->int_ena.val |= 1 << alarm_id;
} else {
dev->int_ena.val &= ~(1 << alarm_id);
}
}
__attribute__((always_inline)) static inline bool systimer_ll_is_alarm_int_fired(systimer_dev_t *dev, uint32_t alarm_id)
{
return dev->int_st.val & (1 << alarm_id);
}
__attribute__((always_inline)) static inline void systimer_ll_clear_alarm_int(systimer_dev_t *dev, uint32_t alarm_id)
{
dev->int_clr.val |= 1 << alarm_id;
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,384 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
// Attention: Timer Group has 3 independent functions: General Purpose Timer, Watchdog Timer and Clock calibration.
// This Low Level driver only serve the General Purpose Timer function.
#pragma once
#include <stdbool.h>
#include "hal/assert.h"
#include "hal/misc.h"
#include "hal/timer_types.h"
#include "soc/timer_group_struct.h"
#include "soc/pcr_struct.h"
#include "soc/soc_etm_source.h"
// TODO: [ESP32C61] IDF-9306, inherit from c6
#ifdef __cplusplus
extern "C" {
#endif
// Get timer group register base address with giving group number
#define TIMER_LL_GET_HW(group_id) ((group_id == 0) ? (&TIMERG0) : (&TIMERG1))
#define TIMER_LL_EVENT_ALARM(timer_id) (1 << (timer_id))
#define TIMER_LL_ETM_TASK_TABLE(group, timer, task) \
(uint32_t [2][1][GPTIMER_ETM_TASK_MAX]){{{ \
[GPTIMER_ETM_TASK_START_COUNT] = TIMER0_TASK_CNT_START_TIMER0, \
[GPTIMER_ETM_TASK_STOP_COUNT] = TIMER0_TASK_CNT_STOP_TIMER0, \
[GPTIMER_ETM_TASK_EN_ALARM] = TIMER0_TASK_ALARM_START_TIMER0, \
[GPTIMER_ETM_TASK_RELOAD] = TIMER0_TASK_CNT_RELOAD_TIMER0, \
[GPTIMER_ETM_TASK_CAPTURE] = TIMER0_TASK_CNT_CAP_TIMER0, \
}}, \
{{ \
[GPTIMER_ETM_TASK_START_COUNT] = TIMER1_TASK_CNT_START_TIMER0, \
[GPTIMER_ETM_TASK_STOP_COUNT] = TIMER1_TASK_CNT_STOP_TIMER0, \
[GPTIMER_ETM_TASK_EN_ALARM] = TIMER1_TASK_ALARM_START_TIMER0, \
[GPTIMER_ETM_TASK_RELOAD] = TIMER1_TASK_CNT_RELOAD_TIMER0, \
[GPTIMER_ETM_TASK_CAPTURE] = TIMER1_TASK_CNT_CAP_TIMER0, \
}}, \
}[group][timer][task]
#define TIMER_LL_ETM_EVENT_TABLE(group, timer, event) \
(uint32_t [2][1][GPTIMER_ETM_EVENT_MAX]){{{ \
[GPTIMER_ETM_EVENT_ALARM_MATCH] = TIMER0_EVT_CNT_CMP_TIMER0, \
}}, \
{{ \
[GPTIMER_ETM_EVENT_ALARM_MATCH] = TIMER1_EVT_CNT_CMP_TIMER0, \
}}, \
}[group][timer][event]
/**
* @brief Enable the bus clock for timer group module
*
* @param group_id Group ID
* @param enable true to enable, false to disable
*/
static inline void timer_ll_enable_bus_clock(int group_id, bool enable)
{
if (group_id == 0) {
PCR.timergroup0_conf.tg0_clk_en = enable;
} else {
PCR.timergroup1_conf.tg1_clk_en = enable;
}
}
/// use a macro to wrap the function, force the caller to use it in a critical section
/// the critical section needs to declare the __DECLARE_RCC_RC_ATOMIC_ENV variable in advance
#define timer_ll_enable_bus_clock(...) (void)__DECLARE_RCC_RC_ATOMIC_ENV; timer_ll_enable_bus_clock(__VA_ARGS__)
/**
* @brief Reset the timer group module
*
* @note After reset the register, the "flash boot protection" will be enabled again.
* FLash boot protection is not used anymore after system boot up.
* This function will disable it by default in order to prevent the system from being reset unexpectedly.
*
* @param group_id Group ID
*/
static inline void timer_ll_reset_register(int group_id)
{
if (group_id == 0) {
PCR.timergroup0_conf.tg0_rst_en = 1;
PCR.timergroup0_conf.tg0_rst_en = 0;
TIMERG0.wdtconfig0.wdt_flashboot_mod_en = 0;
} else {
PCR.timergroup1_conf.tg1_rst_en = 1;
PCR.timergroup1_conf.tg1_rst_en = 0;
TIMERG1.wdtconfig0.wdt_flashboot_mod_en = 0;
}
}
/// use a macro to wrap the function, force the caller to use it in a critical section
/// the critical section needs to declare the __DECLARE_RCC_RC_ATOMIC_ENV variable in advance
#define timer_ll_reset_register(...) (void)__DECLARE_RCC_RC_ATOMIC_ENV; timer_ll_reset_register(__VA_ARGS__)
/**
* @brief Set clock source for timer
*
* @param hw Timer Group register base address
* @param timer_num Timer number in the group
* @param clk_src Clock source
*/
static inline void timer_ll_set_clock_source(timg_dev_t *hw, uint32_t timer_num, gptimer_clock_source_t clk_src)
{
(void)timer_num; // only one timer in each group
uint8_t clk_id = 0;
switch (clk_src) {
case GPTIMER_CLK_SRC_XTAL:
clk_id = 0;
break;
case GPTIMER_CLK_SRC_PLL_F80M:
clk_id = 1;
break;
case GPTIMER_CLK_SRC_RC_FAST:
clk_id = 2;
break;
default:
HAL_ASSERT(false);
break;
}
if (hw == &TIMERG0) {
PCR.timergroup0_timer_clk_conf.tg0_timer_clk_sel = clk_id;
} else {
PCR.timergroup1_timer_clk_conf.tg1_timer_clk_sel = clk_id;
}
}
/**
* @brief Enable Timer Group (GPTimer) module clock
*
* @param hw Timer Group register base address
* @param timer_num Timer index in the group
* @param en true to enable, false to disable
*/
static inline void timer_ll_enable_clock(timg_dev_t *hw, uint32_t timer_num, bool en)
{
(void)timer_num; // only one timer in each group
if (hw == &TIMERG0) {
PCR.timergroup0_timer_clk_conf.tg0_timer_clk_en = en;
} else {
PCR.timergroup1_timer_clk_conf.tg1_timer_clk_en = en;
}
}
/**
* @brief Enable alarm event
*
* @param hw Timer Group register base address
* @param timer_num Timer number in the group
* @param en True: enable alarm
* False: disable alarm
*/
__attribute__((always_inline))
static inline void timer_ll_enable_alarm(timg_dev_t *hw, uint32_t timer_num, bool en)
{
hw->hw_timer[timer_num].config.tx_alarm_en = en;
}
/**
* @brief Set clock prescale for timer
*
* @param hw Timer Group register base address
* @param timer_num Timer number in the group
* @param divider Prescale value (0 and 1 are not valid)
*/
static inline void timer_ll_set_clock_prescale(timg_dev_t *hw, uint32_t timer_num, uint32_t divider)
{
HAL_ASSERT(divider >= 2 && divider <= 65536);
if (divider >= 65536) {
divider = 0;
}
HAL_FORCE_MODIFY_U32_REG_FIELD(hw->hw_timer[timer_num].config, tx_divider, divider);
hw->hw_timer[timer_num].config.tx_divcnt_rst = 1;
}
/**
* @brief Enable auto-reload mode
*
* @param hw Timer Group register base address
* @param timer_num Timer number in the group
* @param en True: enable auto reload mode
* False: disable auto reload mode
*/
__attribute__((always_inline))
static inline void timer_ll_enable_auto_reload(timg_dev_t *hw, uint32_t timer_num, bool en)
{
hw->hw_timer[timer_num].config.tx_autoreload = en;
}
/**
* @brief Set count direction
*
* @param hw Timer peripheral register base address
* @param timer_num Timer number in the group
* @param direction Count direction
*/
static inline void timer_ll_set_count_direction(timg_dev_t *hw, uint32_t timer_num, gptimer_count_direction_t direction)
{
hw->hw_timer[timer_num].config.tx_increase = (direction == GPTIMER_COUNT_UP);
}
/**
* @brief Enable timer, start couting
*
* @param hw Timer Group register base address
* @param timer_num Timer number in the group
* @param en True: enable the counter
* False: disable the counter
*/
__attribute__((always_inline))
static inline void timer_ll_enable_counter(timg_dev_t *hw, uint32_t timer_num, bool en)
{
hw->hw_timer[timer_num].config.tx_en = en;
}
/**
* @brief Trigger software capture event
*
* @param hw Timer Group register base address
* @param timer_num Timer number in the group
*/
__attribute__((always_inline))
static inline void timer_ll_trigger_soft_capture(timg_dev_t *hw, uint32_t timer_num)
{
hw->hw_timer[timer_num].update.tx_update = 1;
// Timer register is in a different clock domain from Timer hardware logic
// We need to wait for the update to take effect before fetching the count value
while (hw->hw_timer[timer_num].update.tx_update) {
}
}
/**
* @brief Get counter value
*
* @param hw Timer Group register base address
* @param timer_num Timer number in the group
*
* @return counter value
*/
__attribute__((always_inline))
static inline uint64_t timer_ll_get_counter_value(timg_dev_t *hw, uint32_t timer_num)
{
return ((uint64_t)hw->hw_timer[timer_num].hi.tx_hi << 32) | (hw->hw_timer[timer_num].lo.tx_lo);
}
/**
* @brief Set alarm value
*
* @param hw Timer Group register base address
* @param timer_num Timer number in the group
* @param alarm_value When counter reaches alarm value, alarm event will be triggered
*/
__attribute__((always_inline))
static inline void timer_ll_set_alarm_value(timg_dev_t *hw, uint32_t timer_num, uint64_t alarm_value)
{
hw->hw_timer[timer_num].alarmhi.tx_alarm_hi = (uint32_t)(alarm_value >> 32);
hw->hw_timer[timer_num].alarmlo.tx_alarm_lo = (uint32_t)alarm_value;
}
/**
* @brief Set reload value
*
* @param hw Timer Group register base address
* @param timer_num Timer number in the group
* @param reload_val Reload counter value
*/
__attribute__((always_inline))
static inline void timer_ll_set_reload_value(timg_dev_t *hw, uint32_t timer_num, uint64_t reload_val)
{
hw->hw_timer[timer_num].loadhi.tx_load_hi = (uint32_t)(reload_val >> 32);
hw->hw_timer[timer_num].loadlo.tx_load_lo = (uint32_t)reload_val;
}
/**
* @brief Get reload value
*
* @param hw Timer Group register base address
* @param timer_num Timer number in the group
* @return reload count value
*/
__attribute__((always_inline))
static inline uint64_t timer_ll_get_reload_value(timg_dev_t *hw, uint32_t timer_num)
{
return ((uint64_t)hw->hw_timer[timer_num].loadhi.tx_load_hi << 32) | (hw->hw_timer[timer_num].loadlo.tx_load_lo);
}
/**
* @brief Trigger software reload, value set by `timer_ll_set_reload_value()` will be reflected into counter immediately
*
* @param hw Timer Group register base address
* @param timer_num Timer number in the group
*/
__attribute__((always_inline))
static inline void timer_ll_trigger_soft_reload(timg_dev_t *hw, uint32_t timer_num)
{
hw->hw_timer[timer_num].load.tx_load = 1;
}
/**
* @brief Enable ETM module
*
* @param hw Timer Group register base address
* @param en True: enable ETM module, False: disable ETM module
*/
static inline void timer_ll_enable_etm(timg_dev_t *hw, bool en)
{
hw->regclk.etm_en = en;
}
/**
* @brief Enable timer interrupt by mask
*
* @param hw Timer Group register base address
* @param mask Mask of interrupt events
* @param en True: enable interrupt
* False: disable interrupt
*/
__attribute__((always_inline))
static inline void timer_ll_enable_intr(timg_dev_t *hw, uint32_t mask, bool en)
{
if (en) {
hw->int_ena_timers.val |= mask;
} else {
hw->int_ena_timers.val &= ~mask;
}
}
/**
* @brief Get interrupt status
*
* @param hw Timer Group register base address
*
* @return Interrupt status
*/
__attribute__((always_inline))
static inline uint32_t timer_ll_get_intr_status(timg_dev_t *hw)
{
return hw->int_st_timers.val & 0x01;
}
/**
* @brief Clear interrupt status by mask
*
* @param hw Timer Group register base address
* @param mask Interrupt events mask
*/
__attribute__((always_inline))
static inline void timer_ll_clear_intr_status(timg_dev_t *hw, uint32_t mask)
{
hw->int_clr_timers.val = mask;
}
/**
* @brief Enable the register clock forever
*
* @param hw Timer Group register base address
* @param en True: Enable the register clock forever
* False: Register clock is enabled only when register operation happens
*/
static inline void timer_ll_enable_register_clock_always_on(timg_dev_t *hw, bool en)
{
hw->regclk.clk_en = en;
}
/**
* @brief Get interrupt status register address
*
* @param hw Timer Group register base address
*
* @return Interrupt status register address
*/
static inline volatile void *timer_ll_get_intr_status_reg(timg_dev_t *hw)
{
return &hw->int_st_timers;
}
#ifdef __cplusplus
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,58 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
// The HAL layer for PAU (ESP32-C61 specific part)
#include "soc/soc.h"
#include "esp_attr.h"
#include "hal/pau_hal.h"
#include "hal/pau_types.h"
void pau_hal_set_regdma_entry_link_addr(pau_hal_context_t *hal, pau_regdma_link_addr_t *link_addr)
{
pau_ll_set_regdma_link0_addr(hal->dev, (*link_addr)[0]);
pau_ll_set_regdma_link1_addr(hal->dev, (*link_addr)[1]);
pau_ll_set_regdma_link2_addr(hal->dev, (*link_addr)[2]);
/* The link 3 of REGDMA is reserved, PMU state switching will not use
* REGDMA link 3 */
}
void IRAM_ATTR pau_hal_start_regdma_modem_link(pau_hal_context_t *hal, bool backup_or_restore)
{
pau_ll_clear_regdma_backup_done_intr_state(hal->dev);
pau_ll_set_regdma_select_wifimac_link(hal->dev);
pau_ll_set_regdma_wifimac_link_backup_direction(hal->dev, backup_or_restore);
pau_ll_set_regdma_wifimac_link_backup_start_enable(hal->dev);
while (!(pau_ll_get_regdma_intr_raw_signal(hal->dev) & PAU_DONE_INT_RAW));
}
void IRAM_ATTR pau_hal_stop_regdma_modem_link(pau_hal_context_t *hal)
{
pau_ll_set_regdma_wifimac_link_backup_start_disable(hal->dev);
pau_ll_set_regdma_deselect_wifimac_link(hal->dev);
pau_ll_clear_regdma_backup_done_intr_state(hal->dev);
}
void IRAM_ATTR pau_hal_start_regdma_extra_link(pau_hal_context_t *hal, bool backup_or_restore)
{
pau_ll_clear_regdma_backup_done_intr_state(hal->dev);
/* The link 3 of REGDMA is reserved, we use it as an extra linked list to
* provide backup and restore services for BLE, IEEE802.15.4 and possibly
* other modules */
pau_ll_select_regdma_entry_link(hal->dev, 3);
pau_ll_set_regdma_entry_link_backup_direction(hal->dev, backup_or_restore);
pau_ll_set_regdma_entry_link_backup_start_enable(hal->dev);
while (!(pau_ll_get_regdma_intr_raw_signal(hal->dev) & PAU_DONE_INT_RAW));
}
void IRAM_ATTR pau_hal_stop_regdma_extra_link(pau_hal_context_t *hal)
{
pau_ll_set_regdma_entry_link_backup_start_disable(hal->dev);
pau_ll_select_regdma_entry_link(hal->dev, 0); /* restore link select to default */
pau_ll_clear_regdma_backup_done_intr_state(hal->dev);
}

View File

@ -0,0 +1,70 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
// The HAL layer for PMU (ESP32-C61 specific part)
#include "soc/soc.h"
#include "esp_attr.h"
#include "hal/pmu_hal.h"
#include "hal/pmu_types.h"
void pmu_hal_hp_set_digital_power_up_wait_cycle(pmu_hal_context_t *hal, uint32_t power_supply_wait_cycle, uint32_t power_up_wait_cycle)
{
pmu_ll_hp_set_digital_power_supply_wait_cycle(hal->dev, power_supply_wait_cycle);
pmu_ll_hp_set_digital_power_up_wait_cycle(hal->dev, power_up_wait_cycle);
}
uint32_t pmu_hal_hp_get_digital_power_up_wait_cycle(pmu_hal_context_t *hal)
{
uint32_t power_supply_wait_cycle = pmu_ll_hp_get_digital_power_supply_wait_cycle(hal->dev);
uint32_t power_up_wait_cycle = pmu_ll_hp_get_digital_power_up_wait_cycle(hal->dev);
return power_supply_wait_cycle + power_up_wait_cycle;
}
void pmu_hal_lp_set_digital_power_up_wait_cycle(pmu_hal_context_t *hal, uint32_t power_supply_wait_cycle, uint32_t power_up_wait_cycle)
{
pmu_ll_lp_set_digital_power_supply_wait_cycle(hal->dev, power_supply_wait_cycle);
pmu_ll_lp_set_digital_power_up_wait_cycle(hal->dev, power_up_wait_cycle);
}
uint32_t pmu_hal_lp_get_digital_power_up_wait_cycle(pmu_hal_context_t *hal)
{
uint32_t power_supply_wait_cycle = pmu_ll_lp_get_digital_power_supply_wait_cycle(hal->dev);
uint32_t power_up_wait_cycle = pmu_ll_lp_get_digital_power_up_wait_cycle(hal->dev);
return power_supply_wait_cycle + power_up_wait_cycle;
}
void pmu_hal_hp_set_sleep_active_backup_enable(pmu_hal_context_t *hal)
{
pmu_ll_hp_set_active_to_sleep_backup_enable(hal->dev);
pmu_ll_hp_set_sleep_to_active_backup_enable(hal->dev);
}
void pmu_hal_hp_set_sleep_active_backup_disable(pmu_hal_context_t *hal)
{
pmu_ll_hp_set_sleep_to_active_backup_disable(hal->dev);
pmu_ll_hp_set_active_to_sleep_backup_disable(hal->dev);
}
void pmu_hal_hp_set_sleep_modem_backup_enable(pmu_hal_context_t *hal)
{
pmu_ll_hp_set_sleep_to_modem_backup_enable(hal->dev);
}
void pmu_hal_hp_set_sleep_modem_backup_disable(pmu_hal_context_t *hal)
{
pmu_ll_hp_set_sleep_to_modem_backup_disable(hal->dev);
}
void pmu_hal_hp_set_modem_active_backup_enable(pmu_hal_context_t *hal)
{
pmu_ll_hp_set_modem_to_active_backup_enable(hal->dev);
}
void pmu_hal_hp_set_modem_active_backup_disable(pmu_hal_context_t *hal)
{
pmu_ll_hp_set_modem_to_active_backup_disable(hal->dev);
}

View File

@ -149,6 +149,10 @@ FORCE_INLINE_ATTR void rv_utils_set_mtvec(uint32_t mtvec_val)
#elif CONFIG_IDF_TARGET_ESP32C5
// TODO: [ESP32C5] IDF-8654, IDF-8655 (inherit from P4) Check the correctness
#define MINTSTATUS 0x346
#elif CONFIG_IDF_TARGET_ESP32C61
// TODO: [ESP32C61] IDF-9261, IDF-9262 (inherit from c6) Check
#define MINTSTATUS 0xFB1
#define MINTTHRESH 0x347
#else
#error "rv_utils_get_mintstatus() is not implemented. Check for correct mintstatus register address."
#endif /* CONFIG_IDF_TARGET_ESP32P4 */
@ -180,11 +184,18 @@ FORCE_INLINE_ATTR void rv_utils_intr_disable(uint32_t intr_mask)
FORCE_INLINE_ATTR void __attribute__((always_inline)) rv_utils_restore_intlevel(uint32_t restoreval)
{
// TODO: [ESP32C61] IDF-9261, changed in verify code, pls check
// RV_WRITE_CSR(MINTTHRESH, restoreval);
REG_SET_FIELD(CLIC_INT_THRESH_REG, CLIC_CPU_INT_THRESH, ((restoreval << (8 - NLBITS))) | 0x1f);
}
FORCE_INLINE_ATTR uint32_t __attribute__((always_inline)) rv_utils_set_intlevel(uint32_t intlevel)
{
// TODO: [ESP32C61] IDF-9261, added in verify code, pls check
// #if CONFIG_IDF_TARGET_ESP32C61
// uint32_t old_thresh = RV_READ_CSR(MINTTHRESH);
// RV_WRITE_CSR(MINTTHRESH, ((intlevel << (8 - NLBITS)) | 0x1f));
// #else
uint32_t old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
uint32_t old_thresh;
@ -201,7 +212,7 @@ FORCE_INLINE_ATTR uint32_t __attribute__((always_inline)) rv_utils_set_intlevel(
*/
REG_READ(CLIC_INT_THRESH_REG);
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
// #endif
return old_thresh;
}

View File

@ -5,11 +5,38 @@
*/
#include <stdint.h>
#include <stddef.h>
#include <assert.h>
#include "soc/soc.h"
#include "riscv/interrupt.h"
#include "soc/interrupt_reg.h"
#include "riscv/csr.h"
#include "esp_attr.h"
#include "riscv/rv_utils.h"
// TODO: [ESP32C61] IDF-9261, added in verify code, pls check
// #if SOC_INT_CLIC_SUPPORTED
// /**
// * If the target is using the CLIC as the interrupt controller, we have 32 external interrupt lines and 16 internal
// * lines. Let's consider the internal ones reserved and not mappable to any handler.
// */
// #define RV_EXTERNAL_INT_COUNT 32
// #define RV_EXTERNAL_INT_OFFSET (CLIC_EXT_INTR_NUM_OFFSET)
// #else // !SOC_INT_CLIC_SUPPORTED
// /**
// * In the case of INTC, all the interrupt lines are dedicated to external peripherals, so the offset is 0.
// * In the case of PLIC, the reserved interrupts are not contiguous, moreover, they are already marked as
// * unusable by the interrupt allocator, so the offset can also be 0 here.
// */
// #define RV_EXTERNAL_INT_COUNT 32
// #define RV_EXTERNAL_INT_OFFSET 0
// #endif // SOC_INT_CLIC_SUPPORTED
typedef struct {
intr_handler_t handler;
void *arg;
@ -64,6 +91,24 @@ void _global_interrupt_handler(intptr_t sp, int mcause)
}
}
// TODO: [ESP32C61] IDF-9261, added in verify code, pls check
// /*************************** ESP-RV Interrupt Controller ***************************/
// #if SOC_INT_CLIC_SUPPORTED
// bool esprv_intc_int_is_vectored(int rv_int_num)
// {
// const uint32_t shv = REG_GET_FIELD(CLIC_INT_CTRL_REG(rv_int_num + RV_EXTERNAL_INT_OFFSET), CLIC_INT_ATTR_SHV);
// return shv != 0;
// }
// void esprv_intc_int_set_vectored(int rv_int_num, bool vectored)
// {
// REG_SET_FIELD(CLIC_INT_CTRL_REG(rv_int_num + RV_EXTERNAL_INT_OFFSET), CLIC_INT_ATTR_SHV, vectored ? 1 : 0);
// }
// #endif // SOC_INT_CLIC_SUPPORTED
/*************************** Exception names. Used in .gdbinit file. ***************************/
const char *riscv_excp_names[16] __attribute__((used)) = {

View File

@ -0,0 +1,67 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "soc/gpio_periph.h"
const uint32_t GPIO_PIN_MUX_REG[] = {
IO_MUX_GPIO0_REG,
IO_MUX_GPIO1_REG,
IO_MUX_GPIO2_REG,
IO_MUX_GPIO3_REG,
IO_MUX_GPIO4_REG,
IO_MUX_GPIO5_REG,
IO_MUX_GPIO6_REG,
IO_MUX_GPIO7_REG,
IO_MUX_GPIO8_REG,
IO_MUX_GPIO9_REG,
IO_MUX_GPIO10_REG,
IO_MUX_GPIO11_REG,
IO_MUX_GPIO12_REG,
IO_MUX_GPIO13_REG,
IO_MUX_GPIO14_REG,
IO_MUX_GPIO15_REG,
IO_MUX_GPIO16_REG,
IO_MUX_GPIO17_REG,
IO_MUX_GPIO18_REG,
IO_MUX_GPIO19_REG,
IO_MUX_GPIO20_REG,
IO_MUX_GPIO21_REG,
IO_MUX_GPIO22_REG,
IO_MUX_GPIO23_REG,
IO_MUX_GPIO24_REG,
};
_Static_assert(sizeof(GPIO_PIN_MUX_REG) == SOC_GPIO_PIN_COUNT * sizeof(uint32_t), "Invalid size of GPIO_PIN_MUX_REG");
const uint32_t GPIO_HOLD_MASK[] = {
BIT(0), //GPIO0 // LP_AON_GPIO_HOLD0_REG
BIT(1), //GPIO1
BIT(2), //GPIO2
BIT(3), //GPIO3
BIT(4), //GPIO4
BIT(5), //GPIO5
BIT(6), //GPIO6
BIT(7), //GPIO7
BIT(8), //GPIO8
BIT(9), //GPIO9
BIT(10), //GPIO10
BIT(11), //GPIO11
BIT(12), //GPIO12
BIT(13), //GPIO13
BIT(14), //GPIO14
BIT(15), //GPIO15
BIT(16), //GPIO16
BIT(17), //GPIO17
BIT(18), //GPIO18
BIT(19), //GPIO19
BIT(20), //GPIO20
BIT(21), //GPIO21
BIT(22), //GPIO22
BIT(23), //GPIO23
BIT(24), //GPIO24
};
_Static_assert(sizeof(GPIO_HOLD_MASK) == SOC_GPIO_PIN_COUNT * sizeof(uint32_t), "Invalid size of GPIO_HOLD_MASK");

View File

@ -24,6 +24,7 @@
#define REG_SPI_MEM_BASE(i) (DR_REG_MSPI0_BASE + (i) * 0x1000) // SPIMEM0 and SPIMEM1
#define REG_SPI_BASE(i) (((i)==2) ? (DR_REG_SPI2_BASE) : (0)) // only one GPSPI on C61
#define REG_I2C_BASE(i) (DR_REG_I2C_EXT_BASE) // only one I2C on C61
#define DR_REG_INTERRUPT_CORE0_BASE DR_REG_INTERRUPT_MATRIX_BASE
//Registers Operation {{
#define ETS_UNCACHED_ADDR(addr) (addr)

View File

@ -0,0 +1,115 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "soc/uart_periph.h"
/*
Bunch of constants for every UART peripheral: GPIO signals, irqs, hw addr of registers etc
*/
const uart_signal_conn_t uart_periph_signal[SOC_UART_NUM] = {
{ // HP UART0
.pins = {
[SOC_UART_TX_PIN_IDX] = {
.default_gpio = U0TXD_GPIO_NUM,
.iomux_func = U0TXD_MUX_FUNC,
.input = 0,
.signal = U0TXD_OUT_IDX,
},
[SOC_UART_RX_PIN_IDX] = {
.default_gpio = U0RXD_GPIO_NUM,
.iomux_func = U0RXD_MUX_FUNC,
.input = 1,
.signal = U0RXD_IN_IDX,
},
[SOC_UART_RTS_PIN_IDX] = {
.default_gpio = U0RTS_GPIO_NUM,
.iomux_func = U0RTS_MUX_FUNC,
.input = 0,
.signal = U0RTS_OUT_IDX,
},
[SOC_UART_CTS_PIN_IDX] = {
.default_gpio = U0CTS_GPIO_NUM,
.iomux_func = U0CTS_MUX_FUNC,
.input = 1,
.signal = U0CTS_IN_IDX,
}
},
.irq = ETS_UART0_INTR_SOURCE,
.module = PERIPH_UART0_MODULE,
},
{ // HP UART1
.pins = {
[SOC_UART_TX_PIN_IDX] = {
.default_gpio = U1TXD_GPIO_NUM,
.iomux_func = U1TXD_MUX_FUNC,
.input = 0,
.signal = U1TXD_OUT_IDX,
},
[SOC_UART_RX_PIN_IDX] = {
.default_gpio = U1RXD_GPIO_NUM,
.iomux_func = U1RXD_MUX_FUNC,
.input = 1,
.signal = U1RXD_IN_IDX,
},
[SOC_UART_RTS_PIN_IDX] = {
.default_gpio = U1RTS_GPIO_NUM,
.iomux_func = U1RTS_MUX_FUNC,
.input = 0,
.signal = U1RTS_OUT_IDX,
},
[SOC_UART_CTS_PIN_IDX] = {
.default_gpio = U1CTS_GPIO_NUM,
.iomux_func = U1CTS_MUX_FUNC,
.input = 1,
.signal = U1CTS_IN_IDX,
},
},
.irq = ETS_UART1_INTR_SOURCE,
.module = PERIPH_UART1_MODULE,
},
#if 0 //TODO: [ESP32C61] IDF-9329, IDF-9341
{ // LP UART0
.pins = {
[SOC_UART_TX_PIN_IDX] = {
.default_gpio = LP_U0TXD_GPIO_NUM,
.iomux_func = LP_U0TXD_MUX_FUNC,
.input = 0,
.signal = UINT8_MAX, // Signal not available in signal map
},
[SOC_UART_RX_PIN_IDX] = {
.default_gpio = LP_U0RXD_GPIO_NUM,
.iomux_func = LP_U0RXD_MUX_FUNC,
.input = 1,
.signal = UINT8_MAX, // Signal not available in signal map
},
[SOC_UART_RTS_PIN_IDX] = {
.default_gpio = LP_U0RTS_GPIO_NUM,
.iomux_func = LP_U0RTS_MUX_FUNC,
.input = 0,
.signal = UINT8_MAX, // Signal not available in signal map
},
[SOC_UART_CTS_PIN_IDX] = {
.default_gpio = LP_U0CTS_GPIO_NUM,
.iomux_func = LP_U0CTS_MUX_FUNC,
.input = 1,
.signal = UINT8_MAX, // Signal not available in signal map
},
},
.irq = ETS_LP_UART_INTR_SOURCE,
.module = PERIPH_LP_UART0_MODULE,
},
#endif
};

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2019-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2019-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -7,12 +7,17 @@
#pragma once
#include "sdkconfig.h"
#include "soc/soc_caps.h"
// TODO: IDF-5645
#if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32C5_BETA3_VERSION
#include "soc/lp_io_reg.h"
#include "soc/lp_io_struct.h"
#include "soc/lp_aon_reg.h"
#elif CONFIG_IDF_TARGET_ESP32C61
#include "soc/lp_gpio_reg.h"
#include "soc/lp_gpio_struct.h"
#include "soc/lp_aon_reg.h"
// ESP32H2-TODO: IDF-6327
#elif CONFIG_IDF_TARGET_ESP32H2
@ -25,16 +30,22 @@
#endif
// TODO: IDF-5645
#if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32P4 || CONFIG_IDF_TARGET_ESP32C5
#if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32P4 || CONFIG_IDF_TARGET_ESP32C5 || CONFIG_IDF_TARGET_ESP32C61
#include "soc/lp_analog_peri_reg.h"
#include "soc/lp_clkrst_reg.h"
#include "soc/lp_clkrst_struct.h"
#if SOC_LP_I2C_SUPPORTED
#include "soc/lp_i2c_reg.h"
#include "soc/lp_i2c_struct.h"
#endif
#if SOC_LP_TIMER_SUPPORTED
#include "soc/lp_timer_reg.h"
#include "soc/lp_timer_struct.h"
#endif
#if SOC_ULP_LP_UART_SUPPORTED
#include "soc/lp_uart_reg.h"
#include "soc/lp_uart_struct.h"
#endif
#include "soc/lp_wdt_reg.h"
#include "soc/lp_wdt_struct.h"
#elif CONFIG_IDF_TARGET_ESP32H2

View File

@ -13,7 +13,6 @@
//include soc related (generated) definitions
#include "soc/soc_caps.h"
#include "soc/soc_pins.h"
#include "soc/spi_reg.h"
#include "soc/spi_struct.h"
#include "soc/spi_pins.h"