Merge branch 'esp32p4/add_aes_gcm_support' into 'master'

feat: add AES-GCM support for ESP32-P4

See merge request espressif/esp-idf!29516
This commit is contained in:
Mahavir Jain 2024-03-27 11:40:26 +08:00
commit c658351eab
9 changed files with 488 additions and 69 deletions

View File

@ -249,6 +249,92 @@ static inline void aes_ll_interrupt_clear(void)
REG_WRITE(AES_INT_CLEAR_REG, 1);
}
/**
* @brief Continue a previous started transform
*
* @note Only used when doing GCM
*/
static inline void aes_ll_cont_transform(void)
{
REG_WRITE(AES_CONTINUE_REG, 1);
}
/**
* @brief Reads the AES-GCM hash sub-key H
*
* @param gcm_hash hash value
*/
static inline void aes_ll_gcm_read_hash(uint8_t *gcm_hash)
{
const size_t REG_WIDTH = sizeof(uint32_t);
uint32_t hash_word;
for (size_t i = 0; i < AES_BLOCK_WORDS; i++) {
hash_word = REG_READ(AES_H_MEM + (i * REG_WIDTH));
/* Memcpy to avoid potential unaligned access */
memcpy(gcm_hash + i * 4, &hash_word, sizeof(hash_word));
}
}
/**
* @brief Sets the number of Additional Authenticated Data (AAD) blocks
*
* @note Only affects AES-GCM
* @param aad_num_blocks the number of Additional Authenticated Data (AAD) blocks
*/
static inline void aes_ll_gcm_set_aad_num_blocks(size_t aad_num_blocks)
{
REG_WRITE(AES_AAD_BLOCK_NUM_REG, aad_num_blocks);
}
/**
* @brief Sets the J0 value, for more information see the GCM subchapter in the TRM
*
* @note Only affects AES-GCM
*
* @param j0 J0 value
*/
static inline void aes_ll_gcm_set_j0(const uint8_t *j0)
{
uint32_t *reg_addr_buf = (uint32_t *)(AES_J0_MEM);
uint32_t j0_word;
for (int i = 0; i < AES_BLOCK_WORDS; i++) {
/* Memcpy to avoid potential unaligned access */
memcpy(&j0_word, j0 + 4 * i, sizeof(j0_word));
REG_WRITE(&reg_addr_buf[i], j0_word);
}
}
/**
* @brief Sets the number of effective bits of incomplete blocks in plaintext/cipertext.
*
* @note Only affects AES-GCM
*
* @param num_valid_bits the number of effective bits of incomplete blocks in plaintext/cipertext.
*/
static inline void aes_ll_gcm_set_num_valid_bit(size_t num_valid_bits)
{
REG_WRITE(AES_REMAINDER_BIT_NUM_REG, num_valid_bits);
}
/**
* @brief Read the tag after a AES-GCM transform
*
* @param tag Pointer to where to store the result with length TAG_WORDS
*/
static inline void aes_ll_gcm_read_tag(uint8_t *tag)
{
uint32_t tag_word;
const size_t REG_WIDTH = sizeof(uint32_t);
for (size_t i = 0; i < TAG_WORDS; i++) {
tag_word = REG_READ(AES_T0_MEM + (i * REG_WIDTH));
/* Memcpy to avoid potential unaligned access */
memcpy(tag + i * 4, &tag_word, sizeof(tag_word));
}
}
#ifdef __cplusplus
}

View File

@ -241,6 +241,32 @@ cleanup:
return ret;
}
/** Append a descriptor to the chain, set head if chain empty
*
* @param[out] head Pointer to the first/head node of the DMA descriptor linked list
* @param item Pointer to the DMA descriptor node that has to be appended
*/
static inline void dma_desc_append(crypto_dma_desc_t **head, crypto_dma_desc_t *item)
{
crypto_dma_desc_t *it;
if (*head == NULL) {
*head = item;
return;
}
it = *head;
while (it->next != 0) {
it = (crypto_dma_desc_t *)it->next;
}
it->dw0.suc_eof = 0;
it->next = item;
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
ESP_ERROR_CHECK(esp_cache_msync(it, sizeof(crypto_dma_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED));
#endif
}
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
#define ALIGN_UP(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
@ -282,7 +308,12 @@ static inline esp_err_t dma_desc_link(crypto_dma_desc_t *dmadesc, size_t crypto_
dmadesc[i].dw0.suc_eof = ((i == crypto_dma_desc_num - 1) ? 1 : 0);
dmadesc[i].next = ((i == crypto_dma_desc_num - 1) ? NULL : &dmadesc[i+1]);
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
/* Write back both input buffers and output buffers to clear any cache dirty bit if set */
/* Write back both input buffers and output buffers to clear any cache dirty bit if set
If we want to remove `ESP_CACHE_MSYNC_FLAG_UNALIGNED` aligned flag then we need to pass
cache msync size = ALIGN_UP(dma_desc.size, cache_line_size), instead of dma_desc.size
Keeping the `ESP_CACHE_MSYNC_FLAG_UNALIGNED` flag just because it should not look like
we are syncing extra bytes due to ALIGN_UP'ed size but just the number of bytes that
are needed in the operation. */
ret = esp_cache_msync(dmadesc[i].buffer, dmadesc[i].dw0.length, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED);
if (ret != ESP_OK) {
return ret;
@ -605,6 +636,218 @@ cleanup:
return ret;
}
#if CONFIG_MBEDTLS_HARDWARE_GCM
/* Encrypt/decrypt with AES-GCM the input using DMA
* The function esp_aes_process_dma_gcm zeroises the output buffer in the case of following conditions:
* 1. If key is not written in the hardware
* 2. Memory allocation failures
* 3. If AES interrupt is enabled and ISR initialisation fails
* 4. Failure in any of the AES operations
*/
int esp_aes_process_dma_gcm(esp_aes_context *ctx, const unsigned char *input, unsigned char *output, size_t len, const unsigned char *aad, size_t aad_len)
{
int ret = 0;
bool use_intr = false;
/* If no key is written to hardware yet, either the user hasn't called
mbedtls_aes_setkey_enc/mbedtls_aes_setkey_dec - meaning we also don't
know which mode to use - or a fault skipped the
key write to hardware. Treat this as a fatal error and zero the output block.
*/
if (ctx->key_in_hardware != ctx->key_bytes) {
mbedtls_platform_zeroize(output, len);
return MBEDTLS_ERR_AES_INVALID_INPUT_LENGTH;
}
unsigned stream_bytes = len % AES_BLOCK_BYTES; // bytes which aren't in a full block
unsigned block_bytes = len - stream_bytes; // bytes which are in a full block
unsigned blocks = (block_bytes / AES_BLOCK_BYTES) + ((stream_bytes > 0) ? 1 : 0);
size_t aad_cache_line_size = get_cache_line_size(aad);
size_t input_cache_line_size = get_cache_line_size(input);
size_t output_cache_line_size = get_cache_line_size(output);
if (aad_cache_line_size == 0 || input_cache_line_size == 0 || output_cache_line_size == 0) {
mbedtls_platform_zeroize(output, len);
ESP_LOGE(TAG, "Getting cache line size failed");
return -1;
}
crypto_dma_desc_t *in_desc_head = NULL;
crypto_dma_desc_t *out_desc_tail = NULL; /* pointer to the final output descriptor */
crypto_dma_desc_t *aad_desc = NULL, *len_desc = NULL;
crypto_dma_desc_t *input_desc = NULL;
crypto_dma_desc_t *output_desc = NULL;
size_t aad_alignment_buffer_size = MAX(2 * aad_cache_line_size, AES_BLOCK_BYTES);
uint8_t *aad_start_stream_buffer = NULL;
uint8_t *aad_end_stream_buffer = NULL;
size_t aad_dma_desc_num = 0;
if (generate_descriptor_list(aad, aad_len, &aad_start_stream_buffer, &aad_end_stream_buffer, aad_alignment_buffer_size, aad_cache_line_size, NULL, NULL, &aad_desc, &aad_dma_desc_num, false) != ESP_OK) {
mbedtls_platform_zeroize(output, len);
ESP_LOGE(TAG, "Generating aad DMA descriptors failed");
return -1;
}
dma_desc_append(&in_desc_head, aad_desc);
size_t input_alignment_buffer_size = MAX(2 * input_cache_line_size, AES_BLOCK_BYTES);
uint8_t *input_start_stream_buffer = NULL;
uint8_t *input_end_stream_buffer = NULL;
size_t input_dma_desc_num = 0;
if (generate_descriptor_list(input, len, &input_start_stream_buffer, &input_end_stream_buffer, input_alignment_buffer_size, input_cache_line_size, NULL, NULL, &input_desc, &input_dma_desc_num, false) != ESP_OK) {
mbedtls_platform_zeroize(output, len);
ESP_LOGE(TAG, "Generating input DMA descriptors failed");
return -1;
}
dma_desc_append(&in_desc_head, input_desc);
size_t output_alignment_buffer_size = MAX(2 * output_cache_line_size, AES_BLOCK_BYTES);
uint8_t *output_start_stream_buffer = NULL;
uint8_t *output_end_stream_buffer = NULL;
size_t output_start_alignment = 0;
size_t output_end_alignment = 0;
size_t output_dma_desc_num = 0;
if (generate_descriptor_list(output, len, &output_start_stream_buffer, &output_end_stream_buffer, output_alignment_buffer_size, output_cache_line_size, &output_start_alignment, &output_end_alignment, &output_desc, &output_dma_desc_num, true) != ESP_OK) {
mbedtls_platform_zeroize(output, len);
ESP_LOGE(TAG, "Generating output DMA descriptors failed");
return -1;
}
out_desc_tail = &output_desc[output_dma_desc_num - 1];
len_desc = aes_dma_calloc(1, sizeof(crypto_dma_desc_t), MALLOC_CAP_DMA, NULL);
if (len_desc == NULL) {
mbedtls_platform_zeroize(output, len);
ESP_LOGE(TAG, "Failed to allocate memory for len descriptor");
return -1;
}
uint32_t *len_buf = aes_dma_calloc(4, sizeof(uint32_t), MALLOC_CAP_DMA, NULL);
if (len_buf == NULL) {
mbedtls_platform_zeroize(output, len);
ESP_LOGE(TAG, "Failed to allocate memory for len buffer");
return -1;
}
len_buf[1] = __builtin_bswap32(aad_len * 8);
len_buf[3] = __builtin_bswap32(len * 8);
len_desc->dw0.length = 4 * sizeof(uint32_t);
len_desc->dw0.size = 4 * sizeof(uint32_t);
len_desc->dw0.owner = 1;
len_desc->dw0.suc_eof = 1;
len_desc->buffer = (void *) len_buf;
len_desc->next = NULL;
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
if (esp_cache_msync(len_desc->buffer, len_desc->dw0.length, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED) != ESP_OK) {
ESP_LOGE(TAG, "Length DMA descriptor cache sync C2M failed");
ret = -1;
goto cleanup;
}
if (esp_cache_msync(len_desc, sizeof(crypto_dma_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED) != ESP_OK) {
ESP_LOGE(TAG, "Length DMA descriptor cache sync C2M failed");
ret = -1;
goto cleanup;
}
#endif
dma_desc_append(&in_desc_head, len_desc);
#if defined (CONFIG_MBEDTLS_AES_USE_INTERRUPT)
/* Only use interrupt for long AES operations */
if (len > AES_DMA_INTR_TRIG_LEN) {
use_intr = true;
if (esp_aes_isr_initialise() != ESP_OK) {
ESP_LOGE(TAG, "ESP-AES ISR initialisation failed");
ret = -1;
goto cleanup;
}
} else
#endif
{
aes_hal_interrupt_enable(false);
}
/* Start AES operation */
if (esp_aes_dma_start(in_desc_head, output_desc) != ESP_OK) {
ESP_LOGE(TAG, "esp_aes_dma_start failed, no DMA channel available");
ret = -1;
goto cleanup;
}
aes_hal_transform_dma_gcm_start(blocks);
if (esp_aes_dma_wait_complete(use_intr, out_desc_tail) < 0) {
ESP_LOGE(TAG, "esp_aes_dma_wait_complete failed");
ret = -1;
goto cleanup;
}
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
if (esp_cache_msync(output_desc, ALIGN_UP(output_dma_desc_num * sizeof(crypto_dma_desc_t), output_cache_line_size), ESP_CACHE_MSYNC_FLAG_DIR_M2C) != ESP_OK) {
ESP_LOGE(TAG, "Output DMA descriptor cache sync M2C failed");
ret = -1;
goto cleanup;
}
for (int i = 0; i < output_dma_desc_num; i++) {
if (esp_cache_msync(output_desc[i].buffer, ALIGN_UP(output_desc[i].dw0.length, output_cache_line_size), ESP_CACHE_MSYNC_FLAG_DIR_M2C) != ESP_OK) {
ESP_LOGE(TAG, "Output DMA descriptor buffers cache sync M2C failed");
ret = -1;
goto cleanup;
}
}
#endif
aes_hal_transform_dma_finish();
/* Extra bytes that were needed to be processed for supplying the AES peripheral a padded multiple of 16 bytes input */
size_t extra_bytes = ALIGN_UP(len, AES_BLOCK_BYTES) - len;
if (output_start_alignment) {
memcpy(output, output_start_stream_buffer, (output_start_alignment > len) ? len : output_start_alignment);
}
if (output_end_alignment) {
memcpy(output + len - (output_end_alignment - extra_bytes), output_end_stream_buffer, output_end_alignment - extra_bytes);
}
cleanup:
if (ret != 0) {
mbedtls_platform_zeroize(output, len);
}
free(aad_start_stream_buffer);
free(aad_end_stream_buffer);
free(aad_desc);
free(input_start_stream_buffer);
free(input_end_stream_buffer);
free(input_desc);
free(output_start_stream_buffer);
free(output_end_stream_buffer);
free(output_desc);
free(len_buf);
free(len_desc);
return ret;
}
#endif //CONFIG_MBEDTLS_HARDWARE_GCM
#else /* SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE */
/* These are static due to:
@ -616,28 +859,6 @@ static DRAM_ATTR crypto_dma_desc_t s_stream_out_desc;
static DRAM_ATTR uint8_t s_stream_in[AES_BLOCK_BYTES];
static DRAM_ATTR uint8_t s_stream_out[AES_BLOCK_BYTES];
/** Append a descriptor to the chain, set head if chain empty
*
* @param[out] head Pointer to the first/head node of the DMA descriptor linked list
* @param item Pointer to the DMA descriptor node that has to be appended
*/
static inline void dma_desc_append(crypto_dma_desc_t **head, crypto_dma_desc_t *item)
{
crypto_dma_desc_t *it;
if (*head == NULL) {
*head = item;
return;
}
it = *head;
while (it->next != 0) {
it = (crypto_dma_desc_t *)it->next;
}
it->dw0.suc_eof = 0;
it->next = item;
}
/**
* Generate a linked list pointing to a (huge) buffer in an descriptor array.
*
@ -852,7 +1073,6 @@ cleanup:
free(block_desc);
return ret;
}
#endif /* SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE */
#if CONFIG_MBEDTLS_HARDWARE_GCM
@ -863,12 +1083,44 @@ cleanup:
* 3. If AES interrupt is enabled and ISR initialisation fails
* 4. Failure in any of the AES operations
*/
int esp_aes_process_dma_gcm(esp_aes_context *ctx, const unsigned char *input, unsigned char *output, size_t len, crypto_dma_desc_t *aad_desc, size_t aad_len)
int esp_aes_process_dma_gcm(esp_aes_context *ctx, const unsigned char *input, unsigned char *output, size_t len, const unsigned char *aad, size_t aad_len)
{
crypto_dma_desc_t aad_desc[2] = {};
crypto_dma_desc_t *aad_head_desc = NULL;
crypto_dma_desc_t *in_desc_head = NULL, *out_desc_head = NULL, *len_desc = NULL;
crypto_dma_desc_t *out_desc_tail = NULL; /* pointer to the final output descriptor */
crypto_dma_desc_t stream_in_desc, stream_out_desc;
crypto_dma_desc_t *block_desc = NULL, *block_in_desc = NULL, *block_out_desc = NULL;
uint8_t stream_in_aad[AES_BLOCK_BYTES] = {};
unsigned stream_bytes_aad = aad_len % AES_BLOCK_BYTES; // bytes which aren't in a full block
unsigned block_bytes_aad = aad_len - stream_bytes_aad; // bytes which are in a full block
assert(esp_ptr_dma_capable(stream_in_aad));
if (block_bytes_aad > 0) {
aad_desc[0].dw0.length = block_bytes_aad;
aad_desc[0].dw0.size = block_bytes_aad;
aad_desc[0].dw0.owner = 1;
aad_desc[0].buffer = (void*)aad;
}
if (stream_bytes_aad > 0) {
memcpy(stream_in_aad, aad + block_bytes_aad, stream_bytes_aad);
aad_desc[0].next = &aad_desc[1];
aad_desc[1].dw0.length = AES_BLOCK_BYTES;
aad_desc[1].dw0.size = AES_BLOCK_BYTES;
aad_desc[1].dw0.owner = 1;
aad_desc[1].buffer = (void*)stream_in_aad;
}
if (block_bytes_aad > 0) {
aad_head_desc = &aad_desc[0];
} else if (stream_bytes_aad > 0) {
aad_head_desc = &aad_desc[1];
}
size_t crypto_dma_desc_num = 0;
uint32_t len_buf[4] = {};
uint8_t stream_in[16] = {};
@ -906,8 +1158,8 @@ int esp_aes_process_dma_gcm(esp_aes_context *ctx, const unsigned char *input, un
len_desc = block_desc + crypto_dma_desc_num;
block_out_desc = block_desc + crypto_dma_desc_num + 1;
if (aad_desc != NULL) {
dma_desc_append(&in_desc_head, aad_desc);
if (aad_head_desc != NULL) {
dma_desc_append(&in_desc_head, aad_head_desc);
}
if (block_bytes > 0) {
@ -990,3 +1242,4 @@ cleanup:
}
#endif //CONFIG_MBEDTLS_HARDWARE_GCM
#endif /* SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE */

View File

@ -586,7 +586,7 @@ int esp_aes_gcm_finish( esp_gcm_context *ctx,
/* Due to restrictions in the hardware (e.g. need to do the whole conversion in one go),
some combinations of inputs are not supported */
static bool esp_aes_gcm_input_support_hw_accel(size_t length, const unsigned char *aad, size_t aad_len,
const unsigned char *input, unsigned char *output, uint8_t *stream_in)
const unsigned char *input, unsigned char *output)
{
bool support_hw_accel = true;
@ -601,10 +601,6 @@ static bool esp_aes_gcm_input_support_hw_accel(size_t length, const unsigned cha
} else if (!esp_ptr_dma_capable(output) && length > 0) {
/* output in non internal DMA memory */
support_hw_accel = false;
} else if (!esp_ptr_dma_capable(stream_in)) {
/* Stream in (and therefor other descriptors and buffers that come from the stack)
in non internal DMA memory */
support_hw_accel = false;
} else if (length == 0) {
support_hw_accel = false;
}
@ -672,15 +668,10 @@ int esp_aes_gcm_crypt_and_tag( esp_gcm_context *ctx,
#endif
#if CONFIG_MBEDTLS_HARDWARE_GCM
int ret;
crypto_dma_desc_t aad_desc[2] = {};
crypto_dma_desc_t *aad_head_desc = NULL;
size_t remainder_bit;
uint8_t stream_in[AES_BLOCK_BYTES] = {};
unsigned stream_bytes = aad_len % AES_BLOCK_BYTES; // bytes which aren't in a full block
unsigned block_bytes = aad_len - stream_bytes; // bytes which are in a full block
/* Due to hardware limition only certain cases are fully supported in HW */
if (!esp_aes_gcm_input_support_hw_accel(length, aad, aad_len, input, output, stream_in)) {
if (!esp_aes_gcm_input_support_hw_accel(length, aad, aad_len, input, output)) {
return esp_aes_gcm_crypt_and_tag_partial_hw(ctx, mode, length, iv, iv_len, aad, aad_len, input, output, tag_len, tag);
}
@ -725,29 +716,6 @@ int esp_aes_gcm_crypt_and_tag( esp_gcm_context *ctx,
ctx->aes_ctx.key_in_hardware = 0;
ctx->aes_ctx.key_in_hardware = aes_hal_setkey(ctx->aes_ctx.key, ctx->aes_ctx.key_bytes, mode);
if (block_bytes > 0) {
aad_desc[0].dw0.length = block_bytes;
aad_desc[0].dw0.size = block_bytes;
aad_desc[0].dw0.owner = 1;
aad_desc[0].buffer = (void*)aad;
}
if (stream_bytes > 0) {
memcpy(stream_in, aad + block_bytes, stream_bytes);
aad_desc[0].next = &aad_desc[1];
aad_desc[1].dw0.length = AES_BLOCK_BYTES;
aad_desc[1].dw0.size = AES_BLOCK_BYTES;
aad_desc[1].dw0.owner = 1;
aad_desc[1].buffer = (void*)stream_in;
}
if (block_bytes > 0) {
aad_head_desc = &aad_desc[0];
} else if (stream_bytes > 0) {
aad_head_desc = &aad_desc[1];
}
aes_hal_mode_init(ESP_AES_BLOCK_MODE_GCM);
/* See TRM GCM chapter for description of this calculation */
@ -760,7 +728,7 @@ int esp_aes_gcm_crypt_and_tag( esp_gcm_context *ctx,
aes_hal_gcm_set_j0(ctx->J0);
ret = esp_aes_process_dma_gcm(&ctx->aes_ctx, input, output, length, aad_head_desc, aad_len);
ret = esp_aes_process_dma_gcm(&ctx->aes_ctx, input, output, length, aad, aad_len);
if (ret != 0) {
esp_aes_release_hardware();
return ret;

View File

@ -43,7 +43,7 @@ int esp_aes_process_dma(esp_aes_context *ctx, const unsigned char *input, unsign
* @param aad_len GCM additional data length
* @return int -1 on error
*/
int esp_aes_process_dma_gcm(esp_aes_context *ctx, const unsigned char *input, unsigned char *output, size_t len, crypto_dma_desc_t *aad_desc, size_t aad_len);
int esp_aes_process_dma_gcm(esp_aes_context *ctx, const unsigned char *input, unsigned char *output, size_t len, const unsigned char *aad_desc, size_t aad_len);
#endif
#ifdef __cplusplus

View File

@ -468,7 +468,7 @@ TEST_CASE("mbedtls AES GCM performance, start, update, ret", "[aes-gcm]")
#ifdef CONFIG_MBEDTLS_HARDWARE_GCM
// Don't put a hard limit on software AES performance
TEST_PERFORMANCE_GREATER_THAN(AES_GCM_UPDATE_THROUGHPUT_MBSEC, "%.3fMB/sec", mb_sec);
TEST_PERFORMANCE_CCOMP_GREATER_THAN(AES_GCM_UPDATE_THROUGHPUT_MBSEC, "%.3fMB/sec", mb_sec);
#endif
}
@ -527,7 +527,7 @@ TEST_CASE("mbedtls AES GCM performance, crypt-and-tag", "[aes-gcm]")
#ifdef CONFIG_MBEDTLS_HARDWARE_GCM
// Don't put a hard limit on software AES performance
TEST_PERFORMANCE_GREATER_THAN(AES_GCM_CRYPT_TAG_THROUGHPUT_MBSEC, "%.3fMB/sec", mb_sec);
TEST_PERFORMANCE_CCOMP_GREATER_THAN(AES_GCM_CRYPT_TAG_THROUGHPUT_MBSEC, "%.3fMB/sec", mb_sec);
#endif
}

View File

@ -247,6 +247,10 @@ config SOC_AES_SUPPORT_DMA
bool
default y
config SOC_AES_SUPPORT_GCM
bool
default y
config SOC_AES_GDMA
bool
default y

View File

@ -1,5 +1,5 @@
/**
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -216,6 +216,19 @@ extern "C" {
#define AES_MODE_V 0x00000007U
#define AES_MODE_S 0
/** AES_ENDIAN_REG register
* AES Endian configure register
*/
#define AES_ENDIAN_REG (DR_REG_AES_BASE + 0x44)
/** AES_ENDIAN : R/W; bitpos: [5:0]; default: 0;
* endian. [1:0] key endian, [3:2] text_in endian or in_stream endian, [5:4] text_out
* endian or out_stream endian
*/
#define AES_ENDIAN 0x0000003FU
#define AES_ENDIAN_M (AES_ENDIAN_V << AES_ENDIAN_S)
#define AES_ENDIAN_V 0x0000003FU
#define AES_ENDIAN_S 0
/** AES_TRIGGER_REG register
* AES trigger register
*/
@ -314,6 +327,42 @@ extern "C" {
#define AES_INC_SEL_V 0x00000001U
#define AES_INC_SEL_S 0
/** AES_AAD_BLOCK_NUM_REG register
* Additional Authential Data block number register
*/
#define AES_AAD_BLOCK_NUM_REG (DR_REG_AES_BASE + 0xa0)
/** AES_AAD_BLOCK_NUM : R/W; bitpos: [31:0]; default: 0;
* Those bits stores the number of AAD block.
*/
#define AES_AAD_BLOCK_NUM 0xFFFFFFFFU
#define AES_AAD_BLOCK_NUM_M (AES_AAD_BLOCK_NUM_V << AES_AAD_BLOCK_NUM_S)
#define AES_AAD_BLOCK_NUM_V 0xFFFFFFFFU
#define AES_AAD_BLOCK_NUM_S 0
/** AES_REMAINDER_BIT_NUM_REG register
* AES remainder bit number register
*/
#define AES_REMAINDER_BIT_NUM_REG (DR_REG_AES_BASE + 0xa4)
/** AES_REMAINDER_BIT_NUM : R/W; bitpos: [6:0]; default: 0;
* Those bits stores the number of remainder bit.
*/
#define AES_REMAINDER_BIT_NUM 0x0000007FU
#define AES_REMAINDER_BIT_NUM_M (AES_REMAINDER_BIT_NUM_V << AES_REMAINDER_BIT_NUM_S)
#define AES_REMAINDER_BIT_NUM_V 0x0000007FU
#define AES_REMAINDER_BIT_NUM_S 0
/** AES_CONTINUE_REG register
* AES continue register
*/
#define AES_CONTINUE_REG (DR_REG_AES_BASE + 0xa8)
/** AES_CONTINUE : WT; bitpos: [0]; default: 0;
* Set this bit to continue GCM operation.
*/
#define AES_CONTINUE (BIT(0))
#define AES_CONTINUE_M (AES_CONTINUE_V << AES_CONTINUE_S)
#define AES_CONTINUE_V 0x00000001U
#define AES_CONTINUE_S 0
/** AES_INT_CLEAR_REG register
* AES Interrupt clear register
*/

View File

@ -1,5 +1,5 @@
/**
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -240,6 +240,21 @@ typedef union {
uint32_t val;
} aes_mode_reg_t;
/** Type of endian register
* AES Endian configure register
*/
typedef union {
struct {
/** endian : R/W; bitpos: [5:0]; default: 0;
* endian. [1:0] key endian, [3:2] text_in endian or in_stream endian, [5:4] text_out
* endian or out_stream endian
*/
uint32_t endian:6;
uint32_t reserved_6:26;
};
uint32_t val;
} aes_endian_reg_t;
/** Type of block_mode register
* AES cipher block mode register
*/
@ -282,6 +297,33 @@ typedef union {
uint32_t val;
} aes_inc_sel_reg_t;
/** Type of aad_block_num register
* Additional Authential Data block number register
*/
typedef union {
struct {
/** aad_block_num : R/W; bitpos: [31:0]; default: 0;
* Those bits stores the number of AAD block.
*/
uint32_t aad_block_num:32;
};
uint32_t val;
} aes_aad_block_num_reg_t;
/** Type of remainder_bit_num register
* AES remainder bit number register
*/
typedef union {
struct {
/** remainder_bit_num : R/W; bitpos: [6:0]; default: 0;
* Those bits stores the number of remainder bit.
*/
uint32_t remainder_bit_num:7;
uint32_t reserved_7:25;
};
uint32_t val;
} aes_remainder_bit_num_reg_t;
/** Group: Control/Status register */
/** Type of trigger register
@ -327,6 +369,20 @@ typedef union {
uint32_t val;
} aes_dma_enable_reg_t;
/** Type of continue register
* AES continue register
*/
typedef union {
struct {
/** continue : WT; bitpos: [0]; default: 0;
* Set this bit to continue GCM operation.
*/
uint32_t conti:1;
uint32_t reserved_1:31;
};
uint32_t val;
} aes_continue_reg_t;
/** Type of dma_exit register
* AES-DMA exit config
*/
@ -409,7 +465,7 @@ typedef struct {
volatile aes_text_out_2_reg_t text_out_2;
volatile aes_text_out_3_reg_t text_out_3;
volatile aes_mode_reg_t mode;
uint32_t reserved_044;
volatile aes_endian_reg_t endian;
volatile aes_trigger_reg_t trigger;
volatile aes_state_reg_t state;
volatile uint32_t iv[4];
@ -420,7 +476,9 @@ typedef struct {
volatile aes_block_mode_reg_t block_mode;
volatile aes_block_num_reg_t block_num;
volatile aes_inc_sel_reg_t inc_sel;
uint32_t reserved_0a0[3];
volatile aes_aad_block_num_reg_t aad_block_num;
volatile aes_remainder_bit_num_reg_t remainder_bit_num;
volatile aes_continue_reg_t conti;
volatile aes_int_clear_reg_t int_clear;
volatile aes_int_ena_reg_t int_ena;
volatile aes_date_reg_t date;

View File

@ -96,6 +96,7 @@
/*-------------------------- AES CAPS -----------------------------------------*/
#define SOC_AES_SUPPORT_DMA (1)
#define SOC_AES_SUPPORT_GCM (1)
/* Has a centralized DMA, which is shared with all peripherals */
#define SOC_AES_GDMA (1)