mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
Merge branch 'bugfix/fix_sign_compare' into 'master'
global: fix sign-compare warnings for system level components See merge request espressif/esp-idf!11252
This commit is contained in:
commit
9769be3fde
@ -880,7 +880,7 @@ static esp_err_t esp_apptrace_trax_status_reg_get(uint32_t *val)
|
||||
|
||||
static esp_err_t esp_apptrace_trax_dest_init(void)
|
||||
{
|
||||
for (int i = 0; i < ESP_APPTRACE_TRAX_BLOCKS_NUM; i++) {
|
||||
for (size_t i = 0; i < ESP_APPTRACE_TRAX_BLOCKS_NUM; i++) {
|
||||
s_trace_buf.trax.blocks[i].start = (uint8_t *)s_trax_blocks[i];
|
||||
s_trace_buf.trax.blocks[i].sz = ESP_APPTRACE_TRAX_BLOCK_SIZE;
|
||||
s_trace_buf.trax.state.markers[i] = 0;
|
||||
|
@ -404,7 +404,7 @@ static esp_err_t esp_rewrite_ota_data(esp_partition_subtype_t subtype)
|
||||
return ESP_ERR_NOT_FOUND;
|
||||
}
|
||||
|
||||
int ota_app_count = get_ota_partition_count();
|
||||
uint8_t ota_app_count = get_ota_partition_count();
|
||||
if (SUB_TYPE_ID(subtype) >= ota_app_count) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ int uECC_verify_antifault(const uint8_t *public_key,
|
||||
const uECC_word_t *mhash_words = (const uECC_word_t *)message_hash;
|
||||
uECC_word_t *vhash_words = (uECC_word_t *)verified_hash;
|
||||
unsigned hash_words = hash_size / sizeof(uECC_word_t);
|
||||
for (int w = 0; w < hash_words; w++) {
|
||||
for (unsigned int w = 0; w < hash_words; w++) {
|
||||
/* note: using curve->num_words here to encourage compiler to re-read this variable */
|
||||
vhash_words[w] = mhash_words[w] ^ rx[w % curve->num_words] ^ r[w % curve->num_words];
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ bool bootloader_common_label_search(const char *list, char *label)
|
||||
|
||||
// [start_delim] + label + [end_delim] was not found.
|
||||
// Position is moving to next delimiter if it is not the end of list.
|
||||
int pos_delim = strcspn(sub_list_start_like_label, ", ");
|
||||
size_t pos_delim = strcspn(sub_list_start_like_label, ", ");
|
||||
if (pos_delim == strlen(sub_list_start_like_label)) {
|
||||
break;
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ int bootloader_common_select_otadata(const esp_ota_select_entry_t *two_otadata,
|
||||
}
|
||||
int active_otadata = -1;
|
||||
if (valid_two_otadata[0] && valid_two_otadata[1]) {
|
||||
int condition = (max == true) ? MAX(two_otadata[0].ota_seq, two_otadata[1].ota_seq) : MIN(two_otadata[0].ota_seq, two_otadata[1].ota_seq);
|
||||
uint32_t condition = (max == true) ? MAX(two_otadata[0].ota_seq, two_otadata[1].ota_seq) : MIN(two_otadata[0].ota_seq, two_otadata[1].ota_seq);
|
||||
if (condition == two_otadata[0].ota_seq) {
|
||||
active_otadata = 0;
|
||||
} else {
|
||||
|
@ -300,7 +300,7 @@ static esp_err_t bootloader_flash_read_allow_decrypt(size_t src_addr, void *dest
|
||||
{
|
||||
uint32_t *dest_words = (uint32_t *)dest;
|
||||
|
||||
for (int word = 0; word < size / 4; word++) {
|
||||
for (size_t word = 0; word < size / 4; word++) {
|
||||
uint32_t word_src = src_addr + word * 4; /* Read this offset from flash */
|
||||
uint32_t map_at = word_src & MMU_FLASH_MASK; /* Map this 64KB block from flash */
|
||||
uint32_t *map_ptr;
|
||||
|
@ -34,7 +34,7 @@
|
||||
|
||||
assert(buffer != NULL);
|
||||
|
||||
for (int i = 0; i < length; i++) {
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
if (i == 0 || i % 4 == 0) { /* redundant check is for a compiler warning */
|
||||
/* in bootloader with ADC feeding HWRNG, we accumulate 1
|
||||
bit of entropy per 40 APB cycles (==80 CPU cycles.)
|
||||
|
@ -230,7 +230,7 @@ static esp_partition_pos_t index_to_partition(const bootloader_state_t *bs, int
|
||||
return bs->test;
|
||||
}
|
||||
|
||||
if (index >= 0 && index < MAX_OTA_SLOTS && index < bs->app_count) {
|
||||
if (index >= 0 && index < MAX_OTA_SLOTS && index < (int)bs->app_count) {
|
||||
return bs->ota[index];
|
||||
}
|
||||
|
||||
@ -500,7 +500,7 @@ void bootloader_utility_load_boot_image(const bootloader_state_t *bs, int start_
|
||||
}
|
||||
|
||||
/* failing that work forwards from start_index, try valid OTA slots */
|
||||
for (index = start_index + 1; index < bs->app_count; index++) {
|
||||
for (index = start_index + 1; index < (int)bs->app_count; index++) {
|
||||
part = index_to_partition(bs, index);
|
||||
if (part.size == 0) {
|
||||
continue;
|
||||
@ -718,7 +718,7 @@ static void set_cache_and_start_app(
|
||||
DPORT_PRO_FLASH_MMU_TABLE[i] = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
|
||||
}
|
||||
#else
|
||||
for (int i = 0; i < FLASH_MMU_TABLE_SIZE; i++) {
|
||||
for (size_t i = 0; i < FLASH_MMU_TABLE_SIZE; i++) {
|
||||
FLASH_MMU_TABLE[i] = MMU_TABLE_INVALID_VAL;
|
||||
}
|
||||
#endif
|
||||
@ -828,7 +828,7 @@ esp_err_t bootloader_sha256_hex_to_str(char *out_str, const uint8_t *in_array_he
|
||||
if (out_str == NULL || in_array_hex == NULL || len == 0) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
for (int i = 0; i < len; i++) {
|
||||
for (size_t i = 0; i < len; i++) {
|
||||
for (int shift = 0; shift < 2; shift++) {
|
||||
uint8_t nibble = (in_array_hex[i] >> (shift ? 0 : 4)) & 0x0F;
|
||||
if (nibble < 10) {
|
||||
@ -848,7 +848,7 @@ void bootloader_debug_buffer(const void *buffer, size_t length, const char *labe
|
||||
const uint8_t *bytes = (const uint8_t *)buffer;
|
||||
char hexbuf[length * 2 + 1];
|
||||
hexbuf[length * 2] = 0;
|
||||
for (int i = 0; i < length; i++) {
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
for (int shift = 0; shift < 2; shift++) {
|
||||
uint8_t nibble = (bytes[i] >> (shift ? 0 : 4)) & 0x0F;
|
||||
if (nibble < 10) {
|
||||
|
@ -55,7 +55,7 @@ void bootloader_sha256_data(bootloader_sha256_handle_t handle, const void *data,
|
||||
while (REG_READ(SHA_256_BUSY_REG) != 0) { }
|
||||
|
||||
// Copy to memory block
|
||||
for (int i = 0; i < copy_words; i++) {
|
||||
for (size_t i = 0; i < copy_words; i++) {
|
||||
sha_text_reg[block_count + i] = __builtin_bswap32(w[i]);
|
||||
}
|
||||
asm volatile ("memw");
|
||||
@ -117,7 +117,7 @@ void bootloader_sha256_finish(bootloader_sha256_handle_t handle, uint8_t *digest
|
||||
|
||||
uint32_t *digest_words = (uint32_t *)digest;
|
||||
uint32_t *sha_text_reg = (uint32_t *)(SHA_TEXT_BASE);
|
||||
for (int i = 0; i < DIGEST_WORDS; i++) {
|
||||
for (size_t i = 0; i < DIGEST_WORDS; i++) {
|
||||
digest_words[i] = __builtin_bswap32(sha_text_reg[i]);
|
||||
}
|
||||
asm volatile ("memw");
|
||||
|
@ -76,7 +76,7 @@ static bool secure_boot_generate(uint32_t image_len){
|
||||
ESP_LOGE(TAG, "bootloader_mmap(0x1000, 0x%x) failed", image_len);
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < image_len; i+= sizeof(digest.iv)) {
|
||||
for (size_t i = 0; i < image_len; i+= sizeof(digest.iv)) {
|
||||
ets_secure_boot_hash(&image[i/sizeof(uint32_t)]);
|
||||
}
|
||||
bootloader_munmap(image);
|
||||
|
@ -301,7 +301,7 @@ static esp_err_t image_load(esp_image_load_mode_t mode, const esp_partition_pos_
|
||||
uint32_t load_addr = data->segments[i].load_addr;
|
||||
if (should_load(load_addr)) {
|
||||
uint32_t *loaded = (uint32_t *)load_addr;
|
||||
for (int j = 0; j < data->segments[i].data_len / sizeof(uint32_t); j++) {
|
||||
for (size_t j = 0; j < data->segments[i].data_len / sizeof(uint32_t); j++) {
|
||||
loaded[j] ^= (j & 1) ? ram_obfs_value[0] : ram_obfs_value[1];
|
||||
}
|
||||
}
|
||||
@ -555,7 +555,7 @@ static esp_err_t process_segment(int index, uint32_t flash_addr, esp_image_segme
|
||||
uint32_t free_page_count = bootloader_mmap_get_free_pages();
|
||||
ESP_LOGD(TAG, "free data page_count 0x%08x", free_page_count);
|
||||
|
||||
int32_t data_len_remain = data_len;
|
||||
uint32_t data_len_remain = data_len;
|
||||
while (data_len_remain > 0) {
|
||||
#if SECURE_BOOT_CHECK_SIGNATURE && defined(BOOTLOADER_BUILD)
|
||||
/* Double check the address verification done above */
|
||||
@ -619,7 +619,7 @@ static esp_err_t process_segment_data(intptr_t load_addr, uint32_t data_addr, ui
|
||||
|
||||
const uint32_t *src = data;
|
||||
|
||||
for (int i = 0; i < data_len; i += 4) {
|
||||
for (size_t i = 0; i < data_len; i += 4) {
|
||||
int w_i = i / 4; // Word index
|
||||
uint32_t w = src[w_i];
|
||||
if (checksum != NULL) {
|
||||
|
@ -28,7 +28,7 @@ static const char *TAG = "flash_parts";
|
||||
esp_err_t esp_partition_table_verify(const esp_partition_info_t *partition_table, bool log_errors, int *num_partitions)
|
||||
{
|
||||
int md5_found = 0;
|
||||
int num_parts;
|
||||
size_t num_parts;
|
||||
uint32_t chip_size = g_rom_flashchip.chip_size;
|
||||
*num_partitions = 0;
|
||||
|
||||
|
@ -123,7 +123,7 @@ void bootloader_enable_qio_mode(void)
|
||||
uint32_t raw_flash_id;
|
||||
uint8_t mfg_id;
|
||||
uint16_t flash_id;
|
||||
int i;
|
||||
size_t i;
|
||||
|
||||
ESP_LOGD(TAG, "Probing for QIO mode enable...");
|
||||
esp_rom_spiflash_wait_idle(&g_rom_flashchip);
|
||||
|
@ -162,7 +162,7 @@ void esp_console_get_completion(const char *buf, linenoiseCompletions *lc)
|
||||
|
||||
const char *esp_console_get_hint(const char *buf, int *color, int *bold)
|
||||
{
|
||||
int len = strlen(buf);
|
||||
size_t len = strlen(buf);
|
||||
cmd_item_t *it;
|
||||
SLIST_FOREACH(it, &s_cmd_list, next) {
|
||||
if (strlen(it->command) == len &&
|
||||
@ -179,7 +179,7 @@ static const cmd_item_t *find_command_by_name(const char *name)
|
||||
{
|
||||
const cmd_item_t *cmd = NULL;
|
||||
cmd_item_t *it;
|
||||
int len = strlen(name);
|
||||
size_t len = strlen(name);
|
||||
SLIST_FOREACH(it, &s_cmd_list, next) {
|
||||
if (strlen(it->command) == len &&
|
||||
strcmp(name, it->command) == 0) {
|
||||
|
@ -238,6 +238,7 @@ static int getCursorPosition(void) {
|
||||
* if it fails. */
|
||||
static int getColumns(void) {
|
||||
int start, cols;
|
||||
int fd = fileno(stdout);
|
||||
|
||||
/* Get the initial position so we can restore it later. */
|
||||
start = getCursorPosition();
|
||||
@ -253,7 +254,7 @@ static int getColumns(void) {
|
||||
if (cols > start) {
|
||||
char seq[32];
|
||||
snprintf(seq,32,"\x1b[%dD",cols-start);
|
||||
if (fwrite(seq, 1, strlen(seq), stdout) == -1) {
|
||||
if (write(fd, seq, strlen(seq)) == -1) {
|
||||
/* Can't recover... */
|
||||
}
|
||||
flushWrite();
|
||||
@ -298,6 +299,7 @@ static int completeLine(struct linenoiseState *ls) {
|
||||
linenoiseCompletions lc = { 0, NULL };
|
||||
int nread, nwritten;
|
||||
char c = 0;
|
||||
int in_fd = fileno(stdin);
|
||||
|
||||
completionCallback(ls->buf,&lc);
|
||||
if (lc.len == 0) {
|
||||
@ -320,7 +322,7 @@ static int completeLine(struct linenoiseState *ls) {
|
||||
refreshLine(ls);
|
||||
}
|
||||
|
||||
nread = fread(&c, 1, 1, stdin);
|
||||
nread = read(in_fd, &c, 1);
|
||||
if (nread <= 0) {
|
||||
freeCompletions(&lc);
|
||||
return -1;
|
||||
@ -449,6 +451,7 @@ void refreshShowHints(struct abuf *ab, struct linenoiseState *l, int plen) {
|
||||
static void refreshSingleLine(struct linenoiseState *l) {
|
||||
char seq[64];
|
||||
size_t plen = l->plen;
|
||||
int fd = fileno(stdout);
|
||||
char *buf = l->buf;
|
||||
size_t len = l->len;
|
||||
size_t pos = l->pos;
|
||||
@ -478,7 +481,7 @@ static void refreshSingleLine(struct linenoiseState *l) {
|
||||
/* Move cursor to original position. */
|
||||
snprintf(seq,64,"\r\x1b[%dC", (int)(pos+plen));
|
||||
abAppend(&ab,seq,strlen(seq));
|
||||
if (fwrite(ab.b, ab.len, 1, stdout) == -1) {} /* Can't recover from write error. */
|
||||
if (write(fd, ab.b, ab.len) == -1) {} /* Can't recover from write error. */
|
||||
flushWrite();
|
||||
abFree(&ab);
|
||||
}
|
||||
@ -496,6 +499,7 @@ static void refreshMultiLine(struct linenoiseState *l) {
|
||||
int col; /* colum position, zero-based. */
|
||||
int old_rows = l->maxrows;
|
||||
int j;
|
||||
int fd = fileno(stdout);
|
||||
struct abuf ab;
|
||||
|
||||
/* Update maxrows if needed. */
|
||||
@ -566,7 +570,7 @@ static void refreshMultiLine(struct linenoiseState *l) {
|
||||
lndebug("\n");
|
||||
l->oldpos = l->pos;
|
||||
|
||||
if (fwrite(ab.b,ab.len,1,stdout) == -1) {} /* Can't recover from write error. */
|
||||
if (write(fd,ab.b,ab.len) == -1) {} /* Can't recover from write error. */
|
||||
flushWrite();
|
||||
abFree(&ab);
|
||||
}
|
||||
@ -584,6 +588,7 @@ static void refreshLine(struct linenoiseState *l) {
|
||||
*
|
||||
* On error writing to the terminal -1 is returned, otherwise 0. */
|
||||
int linenoiseEditInsert(struct linenoiseState *l, char c) {
|
||||
int fd = fileno(stdout);
|
||||
if (l->len < l->buflen) {
|
||||
if (l->len == l->pos) {
|
||||
l->buf[l->pos] = c;
|
||||
@ -593,7 +598,9 @@ int linenoiseEditInsert(struct linenoiseState *l, char c) {
|
||||
if ((!mlmode && l->plen+l->len < l->cols && !hintsCallback)) {
|
||||
/* Avoid a full update of the line in the
|
||||
* trivial case. */
|
||||
if (fwrite(&c,1,1,stdout) == -1) return -1;
|
||||
if (write(fd, &c,1) == -1) {
|
||||
return -1;
|
||||
}
|
||||
flushWrite();
|
||||
} else {
|
||||
refreshLine(l);
|
||||
@ -717,6 +724,8 @@ void linenoiseEditDeletePrevWord(struct linenoiseState *l) {
|
||||
static int linenoiseEdit(char *buf, size_t buflen, const char *prompt)
|
||||
{
|
||||
struct linenoiseState l;
|
||||
int out_fd = fileno(stdout);
|
||||
int in_fd = fileno(stdin);
|
||||
|
||||
/* Populate the linenoise state that we pass to functions implementing
|
||||
* specific editing functionalities. */
|
||||
@ -739,7 +748,9 @@ static int linenoiseEdit(char *buf, size_t buflen, const char *prompt)
|
||||
linenoiseHistoryAdd("");
|
||||
|
||||
int pos1 = getCursorPosition();
|
||||
if (fwrite(prompt,l.plen,1,stdout) == -1) return -1;
|
||||
if (write(out_fd, prompt,l.plen) == -1) {
|
||||
return -1;
|
||||
}
|
||||
flushWrite();
|
||||
int pos2 = getCursorPosition();
|
||||
if (pos1 >= 0 && pos2 >= 0) {
|
||||
@ -750,7 +761,7 @@ static int linenoiseEdit(char *buf, size_t buflen, const char *prompt)
|
||||
int nread;
|
||||
char seq[3];
|
||||
|
||||
nread = fread(&c, 1, 1, stdin);
|
||||
nread = read(in_fd, &c, 1);
|
||||
if (nread <= 0) return l.len;
|
||||
|
||||
/* Only autocomplete when the callback is set. It returns < 0 when
|
||||
@ -819,13 +830,17 @@ static int linenoiseEdit(char *buf, size_t buflen, const char *prompt)
|
||||
break;
|
||||
case ESC: /* escape sequence */
|
||||
/* Read the next two bytes representing the escape sequence. */
|
||||
if (fread(seq, 1, 2, stdin) < 2) break;
|
||||
if (read(in_fd, seq, 2) < 2) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* ESC [ sequences. */
|
||||
if (seq[0] == '[') {
|
||||
if (seq[1] >= '0' && seq[1] <= '9') {
|
||||
/* Extended escape, read additional byte. */
|
||||
if (fread(seq+2, 1, 1, stdin) == -1) break;
|
||||
if (read(in_fd, seq+2, 1) == -1) {
|
||||
break;
|
||||
}
|
||||
if (seq[2] == '~') {
|
||||
switch(seq[1]) {
|
||||
case '3': /* Delete key. */
|
||||
@ -924,7 +939,7 @@ int linenoiseProbe(void) {
|
||||
while (timeout_ms > 0 && read_bytes < 4) { // response is ESC[0n or ESC[3n
|
||||
usleep(10000);
|
||||
char c;
|
||||
int cb = fread(&c, 1, 1, stdin);
|
||||
int cb = read(stdin_fileno, &c, 1);
|
||||
read_bytes += cb;
|
||||
timeout_ms--;
|
||||
}
|
||||
@ -957,7 +972,7 @@ static int linenoiseRaw(char *buf, size_t buflen, const char *prompt) {
|
||||
static int linenoiseDumb(char* buf, size_t buflen, const char* prompt) {
|
||||
/* dumb terminal, fall back to fgets */
|
||||
fputs(prompt, stdout);
|
||||
int count = 0;
|
||||
size_t count = 0;
|
||||
while (count < buflen) {
|
||||
int c = fgetc(stdin);
|
||||
if (c == '\n') {
|
||||
|
@ -44,7 +44,7 @@ size_t esp_console_split_argv(char *line, char **argv, size_t argv_size)
|
||||
const int ESCAPE = '\\';
|
||||
const int SPACE = ' ';
|
||||
split_state_t state = SS_SPACE;
|
||||
int argc = 0;
|
||||
size_t argc = 0;
|
||||
char *next_arg_start = line;
|
||||
char *out_ptr = line;
|
||||
for (char *in_ptr = line; argc < argv_size - 1; ++in_ptr) {
|
||||
|
@ -92,7 +92,7 @@ esp_err_t temp_sensor_get_config(temp_sensor_config_t *tsens)
|
||||
SET_PERI_REG_MASK(ANA_CONFIG2_REG, ANA_SAR_CFG2_M);
|
||||
tsens->dac_offset = REGI2C_READ_MASK(I2C_SAR_ADC, I2C_SARADC_TSENS_DAC);
|
||||
for (int i = TSENS_DAC_L0; i < TSENS_DAC_MAX; i++) {
|
||||
if (tsens->dac_offset == dac_offset[i].set_val) {
|
||||
if ((int)tsens->dac_offset == dac_offset[i].set_val) {
|
||||
tsens->dac_offset = dac_offset[i].index;
|
||||
break;
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ static void touch_pad_workaround_isr_internal(void *arg)
|
||||
{
|
||||
uint16_t ch_mask = 0;
|
||||
uint32_t intr_mask = touch_hal_read_intr_status_mask();
|
||||
uint32_t pad_num = touch_hal_get_current_meas_channel();
|
||||
int pad_num = touch_hal_get_current_meas_channel();
|
||||
/* Make sure that the scan done interrupt is generated after the last channel measurement is completed. */
|
||||
if (intr_mask & TOUCH_PAD_INTR_MASK_SCAN_DONE) {
|
||||
touch_hal_get_channel_mask(&ch_mask);
|
||||
|
@ -114,7 +114,7 @@ static int _i2s_adc_channel = -1;
|
||||
static i2s_dma_t *i2s_create_dma_queue(i2s_port_t i2s_num, int dma_buf_count, int dma_buf_len);
|
||||
static esp_err_t i2s_destroy_dma_queue(i2s_port_t i2s_num, i2s_dma_t *dma);
|
||||
|
||||
static inline void gpio_matrix_out_check(uint32_t gpio, uint32_t signal_idx, bool out_inv, bool oen_inv)
|
||||
static inline void gpio_matrix_out_check(int gpio, uint32_t signal_idx, bool out_inv, bool oen_inv)
|
||||
{
|
||||
//if pin = -1, do not need to configure
|
||||
if (gpio != -1) {
|
||||
@ -124,7 +124,7 @@ static inline void gpio_matrix_out_check(uint32_t gpio, uint32_t signal_idx, boo
|
||||
}
|
||||
}
|
||||
|
||||
static inline void gpio_matrix_in_check(uint32_t gpio, uint32_t signal_idx, bool inv)
|
||||
static inline void gpio_matrix_in_check(int gpio, uint32_t signal_idx, bool inv)
|
||||
{
|
||||
if (gpio != -1) {
|
||||
PIN_FUNC_SELECT(GPIO_PIN_MUX_REG[gpio], PIN_FUNC_GPIO);
|
||||
@ -352,11 +352,11 @@ esp_err_t i2s_set_clk(i2s_port_t i2s_num, uint32_t rate, i2s_bits_per_sample_t b
|
||||
#endif
|
||||
i2s_hal_set_tx_mode(&(p_i2s_obj[i2s_num]->hal), ch, bits);
|
||||
|
||||
if (p_i2s_obj[i2s_num]->channel_num != ch) {
|
||||
if (p_i2s_obj[i2s_num]->channel_num != (int)ch) {
|
||||
p_i2s_obj[i2s_num]->channel_num = (ch == 2) ? 2 : 1;
|
||||
}
|
||||
|
||||
if (bits != p_i2s_obj[i2s_num]->bits_per_sample) {
|
||||
if ((int)bits != p_i2s_obj[i2s_num]->bits_per_sample) {
|
||||
p_i2s_obj[i2s_num]->bits_per_sample = bits;
|
||||
|
||||
// Round bytes_per_sample up to next multiple of 16 bits
|
||||
@ -1040,7 +1040,7 @@ esp_err_t i2s_driver_uninstall(i2s_port_t i2s_num)
|
||||
esp_err_t i2s_write(i2s_port_t i2s_num, const void *src, size_t size, size_t *bytes_written, TickType_t ticks_to_wait)
|
||||
{
|
||||
char *data_ptr, *src_byte;
|
||||
int bytes_can_write;
|
||||
size_t bytes_can_write;
|
||||
*bytes_written = 0;
|
||||
I2S_CHECK((i2s_num < I2S_NUM_MAX), "i2s_num error", ESP_ERR_INVALID_ARG);
|
||||
I2S_CHECK((size < SOC_I2S_MAX_BUFFER_SIZE), "size is too large", ESP_ERR_INVALID_ARG);
|
||||
@ -1148,7 +1148,7 @@ esp_err_t i2s_write_expand(i2s_port_t i2s_num, const void *src, size_t size, siz
|
||||
data_ptr = (char*)p_i2s_obj[i2s_num]->tx->curr_ptr;
|
||||
data_ptr += p_i2s_obj[i2s_num]->tx->rw_pos;
|
||||
bytes_can_write = p_i2s_obj[i2s_num]->tx->buf_size - p_i2s_obj[i2s_num]->tx->rw_pos;
|
||||
if (bytes_can_write > size) {
|
||||
if (bytes_can_write > (int)size) {
|
||||
bytes_can_write = size;
|
||||
}
|
||||
tail = bytes_can_write % aim_bytes;
|
||||
@ -1190,7 +1190,7 @@ esp_err_t i2s_read(i2s_port_t i2s_num, void *dest, size_t size, size_t *bytes_re
|
||||
data_ptr = (char*)p_i2s_obj[i2s_num]->rx->curr_ptr;
|
||||
data_ptr += p_i2s_obj[i2s_num]->rx->rw_pos;
|
||||
bytes_can_read = p_i2s_obj[i2s_num]->rx->buf_size - p_i2s_obj[i2s_num]->rx->rw_pos;
|
||||
if (bytes_can_read > size) {
|
||||
if (bytes_can_read > (int)size) {
|
||||
bytes_can_read = size;
|
||||
}
|
||||
memcpy(dest_byte, data_ptr, bytes_can_read);
|
||||
|
@ -166,7 +166,7 @@ static void _ledc_op_lock_release(ledc_mode_t mode, ledc_channel_t channel)
|
||||
}
|
||||
}
|
||||
|
||||
static int ledc_get_max_duty(ledc_mode_t speed_mode, ledc_channel_t channel)
|
||||
static uint32_t ledc_get_max_duty(ledc_mode_t speed_mode, ledc_channel_t channel)
|
||||
{
|
||||
// The arguments are checked before internally calling this function.
|
||||
uint32_t max_duty;
|
||||
@ -731,7 +731,7 @@ static esp_err_t _ledc_set_fade_with_time(ledc_mode_t speed_mode, ledc_channel_t
|
||||
if (duty_delta == 0) {
|
||||
return _ledc_set_fade_with_step(speed_mode, channel, target_duty, 0, 0);
|
||||
}
|
||||
int total_cycles = max_fade_time_ms * freq / 1000;
|
||||
uint32_t total_cycles = max_fade_time_ms * freq / 1000;
|
||||
if (total_cycles == 0) {
|
||||
ESP_LOGW(LEDC_TAG, LEDC_FADE_TOO_FAST_STR);
|
||||
return _ledc_set_fade_with_step(speed_mode, channel, target_duty, 0, 0);
|
||||
|
@ -96,7 +96,7 @@ typedef struct {
|
||||
rmt_item32_t *rx_item_buf;
|
||||
uint32_t rx_item_buf_size;
|
||||
uint32_t rx_item_len;
|
||||
uint32_t rx_item_start_idx;
|
||||
int rx_item_start_idx;
|
||||
#endif
|
||||
sample_to_rmt_t sample_to_rmt;
|
||||
size_t sample_size_remain;
|
||||
@ -798,7 +798,7 @@ static void IRAM_ATTR rmt_driver_isr_default(void *arg)
|
||||
}
|
||||
}
|
||||
const rmt_item32_t *pdata = p_rmt->tx_data;
|
||||
int len_rem = p_rmt->tx_len_rem;
|
||||
size_t len_rem = p_rmt->tx_len_rem;
|
||||
if (len_rem >= p_rmt->tx_sub_len) {
|
||||
rmt_fill_memory(channel, pdata, p_rmt->tx_sub_len, p_rmt->tx_offset);
|
||||
p_rmt->tx_data += p_rmt->tx_sub_len;
|
||||
|
@ -548,7 +548,7 @@ static void sdio_intr_send(void* arg)
|
||||
assert(ret == pdTRUE);
|
||||
}
|
||||
//get_next_finished_arg returns the total amount of returned descs.
|
||||
for(int i = 0; i < returned_cnt; i++) {
|
||||
for(size_t i = 0; i < returned_cnt; i++) {
|
||||
portBASE_TYPE ret = xSemaphoreGiveFromISR(context.remain_cnt, &yield);
|
||||
assert(ret == pdTRUE);
|
||||
}
|
||||
@ -611,7 +611,7 @@ static esp_err_t send_flush_data(void)
|
||||
if (err == ESP_OK) {
|
||||
portBASE_TYPE ret = xQueueSend(context.ret_queue, &finished_arg, portMAX_DELAY);
|
||||
assert(ret == pdTRUE);
|
||||
for (int i = 0; i < return_cnt; i++) {
|
||||
for (size_t i = 0; i < return_cnt; i++) {
|
||||
portBASE_TYPE ret = xSemaphoreGive(context.remain_cnt);
|
||||
assert(ret == pdTRUE);
|
||||
}
|
||||
|
@ -541,7 +541,7 @@ static esp_err_t poll_busy(slot_info_t *slot, int timeout_ms, bool polling)
|
||||
};
|
||||
esp_err_t ret;
|
||||
|
||||
uint64_t t_end = esp_timer_get_time() + timeout_ms * 1000;
|
||||
int64_t t_end = esp_timer_get_time() + timeout_ms * 1000;
|
||||
int nonzero_count = 0;
|
||||
do {
|
||||
t_rx = SDSPI_MOSI_IDLE_VAL;
|
||||
@ -576,7 +576,7 @@ static esp_err_t poll_data_token(slot_info_t *slot, uint8_t *extra_ptr, size_t *
|
||||
.length = sizeof(t_rx) * 8,
|
||||
};
|
||||
esp_err_t ret;
|
||||
uint64_t t_end = esp_timer_get_time() + timeout_ms * 1000;
|
||||
int64_t t_end = esp_timer_get_time() + timeout_ms * 1000;
|
||||
do {
|
||||
memset(t_rx, SDSPI_MOSI_IDLE_VAL, sizeof(t_rx));
|
||||
ret = spi_device_polling_transmit(slot->spi_handle, &t);
|
||||
@ -584,7 +584,7 @@ static esp_err_t poll_data_token(slot_info_t *slot, uint8_t *extra_ptr, size_t *
|
||||
return ret;
|
||||
}
|
||||
bool found = false;
|
||||
for (int byte_idx = 0; byte_idx < sizeof(t_rx); byte_idx++) {
|
||||
for (size_t byte_idx = 0; byte_idx < sizeof(t_rx); byte_idx++) {
|
||||
uint8_t rd_data = t_rx[byte_idx];
|
||||
if (rd_data == TOKEN_BLOCK_START) {
|
||||
found = true;
|
||||
@ -704,7 +704,7 @@ static esp_err_t start_command_read_blocks(slot_info_t *slot, sdspi_hw_cmd_t *cm
|
||||
const uint8_t* extra_data_ptr = NULL;
|
||||
bool need_poll = true;
|
||||
|
||||
for (int i = 0; i < pre_scan_data_size; ++i) {
|
||||
for (size_t i = 0; i < pre_scan_data_size; ++i) {
|
||||
if (pre_scan_data_ptr[i] == TOKEN_BLOCK_START) {
|
||||
extra_data_size = pre_scan_data_size - i - 1;
|
||||
extra_data_ptr = pre_scan_data_ptr + i + 1;
|
||||
@ -993,7 +993,7 @@ esp_err_t sdspi_host_init_slot(int slot, const sdspi_slot_config_t* slot_config)
|
||||
if (ret != ESP_OK) {
|
||||
goto cleanup;
|
||||
}
|
||||
if (sdspi_handle != host_id) {
|
||||
if (sdspi_handle != (int)host_id) {
|
||||
ESP_LOGE(TAG, "The deprecated sdspi_host_init_slot should be called before all other devices on the specified bus.");
|
||||
sdspi_host_remove_device(sdspi_handle);
|
||||
ret = ESP_ERR_INVALID_STATE;
|
||||
|
@ -233,8 +233,8 @@ DRAM_ATTR static const char TAG[] = "bus_lock";
|
||||
return (ret_val); \
|
||||
}
|
||||
|
||||
static inline uint32_t mask_get_id(uint32_t mask);
|
||||
static inline uint32_t dev_lock_get_id(spi_bus_lock_dev_t *dev_lock);
|
||||
static inline int mask_get_id(uint32_t mask);
|
||||
static inline int dev_lock_get_id(spi_bus_lock_dev_t *dev_lock);
|
||||
|
||||
/*******************************************************************************
|
||||
* atomic operations to the status
|
||||
@ -631,12 +631,12 @@ void spi_bus_lock_unregister_dev(spi_bus_lock_dev_handle_t dev_handle)
|
||||
free(dev_handle);
|
||||
}
|
||||
|
||||
IRAM_ATTR static inline uint32_t mask_get_id(uint32_t mask)
|
||||
IRAM_ATTR static inline int mask_get_id(uint32_t mask)
|
||||
{
|
||||
return ID_DEV_MASK(mask);
|
||||
}
|
||||
|
||||
IRAM_ATTR static inline uint32_t dev_lock_get_id(spi_bus_lock_dev_t *dev_lock)
|
||||
IRAM_ATTR static inline int dev_lock_get_id(spi_bus_lock_dev_t *dev_lock)
|
||||
{
|
||||
return mask_get_id(dev_lock->mask);
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ static inline void twai_handle_rx_buffer_frames(BaseType_t *task_woken, int *ale
|
||||
{
|
||||
uint32_t msg_count = twai_hal_get_rx_msg_count(&twai_context);
|
||||
|
||||
for (int i = 0; i < msg_count; i++) {
|
||||
for (uint32_t i = 0; i < msg_count; i++) {
|
||||
twai_hal_frame_t frame;
|
||||
twai_hal_read_rx_buffer_and_clear(&twai_context, &frame);
|
||||
//Copy frame into RX Queue
|
||||
|
@ -121,7 +121,7 @@ typedef struct {
|
||||
int rx_buf_size; /*!< RX ring buffer size */
|
||||
RingbufHandle_t rx_ring_buf; /*!< RX ring buffer handler*/
|
||||
bool rx_buffer_full_flg; /*!< RX ring buffer full flag. */
|
||||
int rx_cur_remain; /*!< Data number that waiting to be read out in ring buffer item*/
|
||||
uint32_t rx_cur_remain; /*!< Data number that waiting to be read out in ring buffer item*/
|
||||
uint8_t* rx_ptr; /*!< pointer to the current data in ring buffer*/
|
||||
uint8_t* rx_head_ptr; /*!< pointer to the head of RX item*/
|
||||
uint8_t rx_data_buf[SOC_UART_FIFO_LEN]; /*!< Data buffer to stash FIFO data*/
|
||||
@ -756,7 +756,7 @@ static void UART_ISR_ATTR uart_rx_intr_handler_default(void *param)
|
||||
continue;
|
||||
}
|
||||
bool en_tx_flg = false;
|
||||
int tx_fifo_rem = uart_hal_get_txfifo_len(&(uart_context[uart_num].hal));
|
||||
uint32_t tx_fifo_rem = uart_hal_get_txfifo_len(&(uart_context[uart_num].hal));
|
||||
//We need to put a loop here, in case all the buffer items are very short.
|
||||
//That would cause a watch_dog reset because empty interrupt happens so often.
|
||||
//Although this is a loop in ISR, this loop will execute at most 128 turns.
|
||||
@ -1102,7 +1102,7 @@ static int uart_tx_all(uart_port_t uart_num, const char* src, size_t size, bool
|
||||
xSemaphoreTake(p_uart_obj[uart_num]->tx_mux, (portTickType)portMAX_DELAY);
|
||||
p_uart_obj[uart_num]->coll_det_flg = false;
|
||||
if(p_uart_obj[uart_num]->tx_buf_size > 0) {
|
||||
int max_size = xRingbufferGetMaxItemSize(p_uart_obj[uart_num]->tx_ring_buf);
|
||||
size_t max_size = xRingbufferGetMaxItemSize(p_uart_obj[uart_num]->tx_ring_buf);
|
||||
int offset = 0;
|
||||
uart_tx_data_t evt;
|
||||
evt.tx_data.size = size;
|
||||
@ -1114,7 +1114,7 @@ static int uart_tx_all(uart_port_t uart_num, const char* src, size_t size, bool
|
||||
}
|
||||
xRingbufferSend(p_uart_obj[uart_num]->tx_ring_buf, (void*) &evt, sizeof(uart_tx_data_t), portMAX_DELAY);
|
||||
while(size > 0) {
|
||||
int send_size = size > max_size / 2 ? max_size / 2 : size;
|
||||
size_t send_size = size > max_size / 2 ? max_size / 2 : size;
|
||||
xRingbufferSend(p_uart_obj[uart_num]->tx_ring_buf, (void*) (src + offset), send_size, portMAX_DELAY);
|
||||
size -= send_size;
|
||||
offset += send_size;
|
||||
|
@ -273,7 +273,7 @@ static void psram_reset_mode(int spi_num)
|
||||
|
||||
esp_err_t psram_enable_wrap(uint32_t wrap_size)
|
||||
{
|
||||
static int current_wrap_size = 0;
|
||||
static uint32_t current_wrap_size = 0;
|
||||
if (current_wrap_size == wrap_size) {
|
||||
return ESP_OK;
|
||||
}
|
||||
|
@ -275,7 +275,7 @@ static void psram_reset_mode(int spi_num)
|
||||
|
||||
esp_err_t psram_enable_wrap(uint32_t wrap_size)
|
||||
{
|
||||
static int current_wrap_size = 0;
|
||||
static uint32_t current_wrap_size = 0;
|
||||
if (current_wrap_size == wrap_size) {
|
||||
return ESP_OK;
|
||||
}
|
||||
|
@ -741,7 +741,7 @@ static const char esp_unknown_msg[] =
|
||||
const char *esp_err_to_name(esp_err_t code)
|
||||
{
|
||||
#ifdef CONFIG_ESP_ERR_TO_NAME_LOOKUP
|
||||
int i;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < sizeof(esp_err_msg_table)/sizeof(esp_err_msg_table[0]); ++i) {
|
||||
if (esp_err_msg_table[i].code == code) {
|
||||
@ -756,7 +756,7 @@ const char *esp_err_to_name(esp_err_t code)
|
||||
const char *esp_err_to_name_r(esp_err_t code, char *buf, size_t buflen)
|
||||
{
|
||||
#ifdef CONFIG_ESP_ERR_TO_NAME_LOOKUP
|
||||
int i;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < sizeof(esp_err_msg_table)/sizeof(esp_err_msg_table[0]); ++i) {
|
||||
if (esp_err_msg_table[i].code == code) {
|
||||
|
@ -30,7 +30,7 @@ static const char esp_unknown_msg[] =
|
||||
const char *esp_err_to_name(esp_err_t code)
|
||||
{
|
||||
#ifdef CONFIG_ESP_ERR_TO_NAME_LOOKUP
|
||||
int i;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < sizeof(esp_err_msg_table)/sizeof(esp_err_msg_table[0]); ++i) {
|
||||
if (esp_err_msg_table[i].code == code) {
|
||||
@ -45,7 +45,7 @@ const char *esp_err_to_name(esp_err_t code)
|
||||
const char *esp_err_to_name_r(esp_err_t code, char *buf, size_t buflen)
|
||||
{
|
||||
#ifdef CONFIG_ESP_ERR_TO_NAME_LOOKUP
|
||||
int i;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < sizeof(esp_err_msg_table)/sizeof(esp_err_msg_table[0]); ++i) {
|
||||
if (esp_err_msg_table[i].code == code) {
|
||||
|
@ -219,7 +219,7 @@ ESP_EVENT_DECLARE_BASE(ETH_EVENT);
|
||||
* - ESP_ERR_NOT_FOUND: can't detect any PHY device
|
||||
* - ESP_FAIL: detect phy address failed because some error occurred
|
||||
*/
|
||||
esp_err_t esp_eth_detect_phy_addr(esp_eth_mediator_t *eth, uint32_t *detected_addr);
|
||||
esp_err_t esp_eth_detect_phy_addr(esp_eth_mediator_t *eth, int *detected_addr);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -19,13 +19,13 @@
|
||||
|
||||
static const char *TAG = "esp_eth.phy";
|
||||
|
||||
esp_err_t esp_eth_detect_phy_addr(esp_eth_mediator_t *eth, uint32_t *detected_addr)
|
||||
esp_err_t esp_eth_detect_phy_addr(esp_eth_mediator_t *eth, int *detected_addr)
|
||||
{
|
||||
if (!eth || !detected_addr) {
|
||||
ESP_LOGE(TAG, "eth and detected_addr can't be null");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
uint32_t addr_try = 0;
|
||||
int addr_try = 0;
|
||||
uint32_t reg_value = 0;
|
||||
for (; addr_try < 16; addr_try++) {
|
||||
eth->phy_reg_read(eth, addr_try, ETH_PHY_IDR1_REG_ADDR, ®_value);
|
||||
|
@ -82,7 +82,7 @@ typedef union {
|
||||
typedef struct {
|
||||
esp_eth_phy_t parent;
|
||||
esp_eth_mediator_t *eth;
|
||||
uint32_t addr;
|
||||
int addr;
|
||||
uint32_t reset_timeout_ms;
|
||||
uint32_t autonego_timeout_ms;
|
||||
eth_link_t link_status;
|
||||
|
@ -88,7 +88,7 @@ typedef union {
|
||||
typedef struct {
|
||||
esp_eth_phy_t parent;
|
||||
esp_eth_mediator_t *eth;
|
||||
uint32_t addr;
|
||||
int addr;
|
||||
uint32_t reset_timeout_ms;
|
||||
uint32_t autonego_timeout_ms;
|
||||
eth_link_t link_status;
|
||||
|
@ -105,7 +105,7 @@ typedef union {
|
||||
typedef struct {
|
||||
esp_eth_phy_t parent;
|
||||
esp_eth_mediator_t *eth;
|
||||
uint32_t addr;
|
||||
int addr;
|
||||
uint32_t reset_timeout_ms;
|
||||
uint32_t autonego_timeout_ms;
|
||||
eth_link_t link_status;
|
||||
|
@ -63,7 +63,7 @@ typedef union {
|
||||
typedef struct {
|
||||
esp_eth_phy_t parent;
|
||||
esp_eth_mediator_t *eth;
|
||||
uint32_t addr;
|
||||
int addr;
|
||||
uint32_t reset_timeout_ms;
|
||||
uint32_t autonego_timeout_ms;
|
||||
eth_link_t link_status;
|
||||
@ -204,7 +204,7 @@ static esp_err_t ksz8041_negotiate(esp_eth_phy_t *phy)
|
||||
/* Wait for auto negotiation complete */
|
||||
bmsr_reg_t bmsr;
|
||||
pc2r_reg_t pc2r;
|
||||
int32_t to = 0;
|
||||
uint32_t to = 0;
|
||||
for (to = 0; to < ksz8041->autonego_timeout_ms / 10; to++) {
|
||||
vTaskDelay(pdMS_TO_TICKS(10));
|
||||
PHY_CHECK(eth->phy_reg_read(eth, ksz8041->addr, ETH_PHY_BMSR_REG_ADDR, &(bmsr.val)) == ESP_OK,
|
||||
|
@ -160,7 +160,7 @@ typedef union {
|
||||
typedef struct {
|
||||
esp_eth_phy_t parent;
|
||||
esp_eth_mediator_t *eth;
|
||||
uint32_t addr;
|
||||
int addr;
|
||||
uint32_t reset_timeout_ms;
|
||||
uint32_t autonego_timeout_ms;
|
||||
eth_link_t link_status;
|
||||
@ -301,7 +301,7 @@ static esp_err_t lan8720_negotiate(esp_eth_phy_t *phy)
|
||||
/* Wait for auto negotiation complete */
|
||||
bmsr_reg_t bmsr;
|
||||
pscsr_reg_t pscsr;
|
||||
int32_t to = 0;
|
||||
uint32_t to = 0;
|
||||
for (to = 0; to < lan8720->autonego_timeout_ms / 10; to++) {
|
||||
vTaskDelay(pdMS_TO_TICKS(10));
|
||||
PHY_CHECK(eth->phy_reg_read(eth, lan8720->addr, ETH_PHY_BMSR_REG_ADDR, &(bmsr.val)) == ESP_OK,
|
||||
|
@ -66,7 +66,7 @@ typedef union {
|
||||
typedef struct {
|
||||
esp_eth_phy_t parent;
|
||||
esp_eth_mediator_t *eth;
|
||||
uint32_t addr;
|
||||
int addr;
|
||||
uint32_t reset_timeout_ms;
|
||||
uint32_t autonego_timeout_ms;
|
||||
eth_link_t link_status;
|
||||
|
@ -55,7 +55,7 @@ typedef union {
|
||||
typedef struct {
|
||||
esp_eth_phy_t parent;
|
||||
esp_eth_mediator_t *eth;
|
||||
uint32_t addr;
|
||||
int addr;
|
||||
uint32_t reset_timeout_ms;
|
||||
uint32_t autonego_timeout_ms;
|
||||
eth_link_t link_status;
|
||||
|
@ -24,10 +24,10 @@
|
||||
#define mem_check(x) assert(x)
|
||||
#endif
|
||||
|
||||
char *http_utils_join_string(const char *first_str, int len_first, const char *second_str, int len_second)
|
||||
char *http_utils_join_string(const char *first_str, size_t len_first, const char *second_str, size_t len_second)
|
||||
{
|
||||
int first_str_len = len_first > 0 ? len_first : strlen(first_str);
|
||||
int second_str_len = len_second > 0 ? len_second : strlen(second_str);
|
||||
size_t first_str_len = len_first > 0 ? len_first : strlen(first_str);
|
||||
size_t second_str_len = len_second > 0 ? len_second : strlen(second_str);
|
||||
char *ret = NULL;
|
||||
if (first_str_len + second_str_len > 0) {
|
||||
ret = calloc(1, first_str_len + second_str_len + 1);
|
||||
|
@ -64,7 +64,7 @@ char *http_utils_get_string_between(const char *str, const char *begin, const ch
|
||||
* - New string pointer
|
||||
* - NULL: Invalid input
|
||||
*/
|
||||
char *http_utils_join_string(const char *first_str, int len_first, const char *second_str, int len_second);
|
||||
char *http_utils_join_string(const char *first_str, size_t len_first, const char *second_str, size_t len_second);
|
||||
|
||||
/**
|
||||
* @brief Check if ``str`` is start with ``start``
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <esp_log.h>
|
||||
#include <esp_ota_ops.h>
|
||||
#include <errno.h>
|
||||
#include <sys/param.h>
|
||||
|
||||
#define IMAGE_HEADER_SIZE sizeof(esp_image_header_t) + sizeof(esp_image_segment_header_t) + sizeof(esp_app_desc_t) + 1
|
||||
#define DEFAULT_OTA_BUF_SIZE IMAGE_HEADER_SIZE
|
||||
@ -199,8 +200,7 @@ esp_err_t esp_https_ota_begin(esp_https_ota_config_t *ota_config, esp_https_ota_
|
||||
ESP_LOGI(TAG, "Writing to partition subtype %d at offset 0x%x",
|
||||
https_ota_handle->update_partition->subtype, https_ota_handle->update_partition->address);
|
||||
|
||||
const int alloc_size = (ota_config->http_config->buffer_size > DEFAULT_OTA_BUF_SIZE) ?
|
||||
ota_config->http_config->buffer_size : DEFAULT_OTA_BUF_SIZE;
|
||||
const int alloc_size = MAX(ota_config->http_config->buffer_size, DEFAULT_OTA_BUF_SIZE);
|
||||
https_ota_handle->ota_upgrade_buf = (char *)malloc(alloc_size);
|
||||
if (!https_ota_handle->ota_upgrade_buf) {
|
||||
ESP_LOGE(TAG, "Couldn't allocate memory to upgrade data buffer");
|
||||
|
@ -103,7 +103,7 @@ void esp_cpu_configure_region_protection(void)
|
||||
* Both chips have the address space divided into 8 regions, 512MB each.
|
||||
*/
|
||||
const int illegal_regions[] = {0, 4, 5, 6, 7}; // 0x00000000, 0x80000000, 0xa0000000, 0xc0000000, 0xe0000000
|
||||
for (int i = 0; i < sizeof(illegal_regions) / sizeof(illegal_regions[0]); ++i) {
|
||||
for (size_t i = 0; i < sizeof(illegal_regions) / sizeof(illegal_regions[0]); ++i) {
|
||||
mpu_hal_set_region_access(illegal_regions[i], MPU_REGION_ILLEGAL);
|
||||
}
|
||||
|
||||
|
@ -109,7 +109,7 @@ static void rtc_clk_bbpll_enable(void);
|
||||
static void rtc_clk_cpu_freq_to_pll_mhz(int cpu_freq_mhz);
|
||||
|
||||
// Current PLL frequency, in MHZ (320 or 480). Zero if PLL is not enabled.
|
||||
static int s_cur_pll_freq;
|
||||
static uint32_t s_cur_pll_freq;
|
||||
|
||||
static const char* TAG = "rtc_clk";
|
||||
|
||||
|
@ -41,7 +41,7 @@ static const char *TAG = "rtc_clk";
|
||||
|
||||
// Current PLL frequency, in MHZ (320 or 480). Zero if PLL is not enabled.
|
||||
// On the ESP32-S2, 480MHz PLL is enabled at reset.
|
||||
static int s_cur_pll_freq = RTC_PLL_FREQ_480M;
|
||||
static uint32_t s_cur_pll_freq = RTC_PLL_FREQ_480M;
|
||||
|
||||
static void rtc_clk_cpu_freq_to_8m(void);
|
||||
|
||||
|
@ -41,7 +41,7 @@ static const char *TAG = "rtc_clk";
|
||||
#define DELAY_RTC_CLK_SWITCH 5
|
||||
|
||||
// Current PLL frequency, in MHZ (320 or 480). Zero if PLL is not enabled.
|
||||
static int s_cur_pll_freq = RTC_PLL_FREQ_480M;
|
||||
static uint32_t s_cur_pll_freq = RTC_PLL_FREQ_480M;
|
||||
|
||||
static void rtc_clk_cpu_freq_to_8m(void);
|
||||
|
||||
|
@ -42,7 +42,7 @@ static volatile esp_ipc_wait_t s_ipc_wait[portNUM_PROCESSORS];// This variable t
|
||||
|
||||
static void IRAM_ATTR ipc_task(void* arg)
|
||||
{
|
||||
const uint32_t cpuid = (uint32_t) arg;
|
||||
const int cpuid = (int) arg;
|
||||
assert(cpuid == xPortGetCoreID());
|
||||
while (true) {
|
||||
// Wait for IPC to be initiated.
|
||||
|
@ -189,9 +189,7 @@ static esp_err_t cmd_set_prop_vals_handler(LocalCtrlMessage *req,
|
||||
|
||||
static int lookup_cmd_handler(int cmd_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sizeof(cmd_table)/sizeof(esp_local_ctrl_cmd_t); i++) {
|
||||
for (size_t i = 0; i < sizeof(cmd_table)/sizeof(esp_local_ctrl_cmd_t); i++) {
|
||||
if (cmd_table[i].cmd_num == cmd_id) {
|
||||
return i;
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ esp_err_t esp_async_memcpy_install(const async_memcpy_config_t *config, async_me
|
||||
mcp_hdl->max_stream_num = config->backlog;
|
||||
|
||||
// circle TX/RX descriptors
|
||||
for (int i = 0; i < mcp_hdl->max_stream_num; i++) {
|
||||
for (size_t i = 0; i < mcp_hdl->max_stream_num; i++) {
|
||||
mcp_hdl->out_streams[i].desc.dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_CPU;
|
||||
mcp_hdl->out_streams[i].desc.next = &mcp_hdl->out_streams[i + 1].desc;
|
||||
mcp_hdl->in_streams[i].desc.dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_CPU;
|
||||
@ -236,8 +236,8 @@ esp_err_t esp_async_memcpy(async_memcpy_t asmcp, void *dst, void *src, size_t n,
|
||||
dma_descriptor_t *rx_end_desc = NULL;
|
||||
dma_descriptor_t *tx_start_desc = NULL;
|
||||
dma_descriptor_t *tx_end_desc = NULL;
|
||||
int rx_prepared_size = 0;
|
||||
int tx_prepared_size = 0;
|
||||
size_t rx_prepared_size = 0;
|
||||
size_t tx_prepared_size = 0;
|
||||
ASMCP_CHECK(asmcp, "mcp handle can't be null", err, ESP_ERR_INVALID_ARG);
|
||||
ASMCP_CHECK(async_memcpy_impl_is_buffer_address_valid(&asmcp->mcp_impl, src, dst), "buffer address not valid", err, ESP_ERR_INVALID_ARG);
|
||||
ASMCP_CHECK(n <= DMA_DESCRIPTOR_BUFFER_MAX_SIZE * asmcp->max_stream_num, "buffer size too large", err, ESP_ERR_INVALID_ARG);
|
||||
|
@ -276,7 +276,7 @@ ssize_t esp_usb_console_flush_internal(size_t last_write_size)
|
||||
assert(s_usb_tx_buf_pos >= last_write_size);
|
||||
ssize_t ret;
|
||||
size_t tx_buf_pos_before = s_usb_tx_buf_pos - last_write_size;
|
||||
int sent = cdc_acm_fifo_fill(s_cdc_acm_device, (const uint8_t*) s_usb_tx_buf, s_usb_tx_buf_pos);
|
||||
size_t sent = cdc_acm_fifo_fill(s_cdc_acm_device, (const uint8_t*) s_usb_tx_buf, s_usb_tx_buf_pos);
|
||||
if (sent == last_write_size) {
|
||||
/* everything was sent */
|
||||
ret = last_write_size;
|
||||
|
@ -911,9 +911,8 @@ static int vfs_fat_truncate(void* ctx, const char *path, off_t length)
|
||||
goto out;
|
||||
}
|
||||
|
||||
res = f_size(file);
|
||||
|
||||
if (res < length) {
|
||||
long sz = f_size(file);
|
||||
if (sz < length) {
|
||||
_lock_release(&fat_ctx->lock);
|
||||
ESP_LOGD(TAG, "truncate does not support extending size");
|
||||
errno = EPERM;
|
||||
|
@ -93,7 +93,7 @@ typedef unsigned portBASE_TYPE UBaseType_t;
|
||||
/*-----------------------------------------------------------*/
|
||||
#include "portbenchmark.h"
|
||||
|
||||
static inline uint32_t IRAM_ATTR xPortGetCoreID(void) {
|
||||
static inline BaseType_t IRAM_ATTR xPortGetCoreID(void) {
|
||||
return cpu_hal_get_core_id();
|
||||
}
|
||||
|
||||
|
@ -445,7 +445,7 @@ BaseType_t xPortInterruptedFromISRContext(void);
|
||||
#endif
|
||||
|
||||
/* Multi-core: get current core ID */
|
||||
static inline uint32_t IRAM_ATTR xPortGetCoreID(void) {
|
||||
static inline BaseType_t IRAM_ATTR xPortGetCoreID(void) {
|
||||
return cpu_hal_get_core_id();
|
||||
}
|
||||
|
||||
|
@ -457,14 +457,14 @@ void __attribute__((optimize("-O3"))) vPortExitCritical(portMUX_TYPE *mux)
|
||||
{
|
||||
vPortCPUReleaseMutex( mux );
|
||||
BaseType_t coreID = xPortGetCoreID();
|
||||
BaseType_t nesting = port_uxCriticalNesting[coreID];
|
||||
BaseType_t nesting = port_uxCriticalNesting[coreID];
|
||||
|
||||
if(nesting > 0U)
|
||||
if(nesting > 0)
|
||||
{
|
||||
nesting--;
|
||||
port_uxCriticalNesting[coreID] = nesting;
|
||||
|
||||
if( nesting == 0U )
|
||||
if( nesting == 0 )
|
||||
{
|
||||
portEXIT_CRITICAL_NESTED(port_uxOldInterruptState[coreID]);
|
||||
}
|
||||
@ -480,7 +480,7 @@ void __attribute__((weak)) vApplicationStackOverflowHook( TaskHandle_t xTask, c
|
||||
char buf[sizeof(ERR_STR1) + CONFIG_FREERTOS_MAX_TASK_NAME_LEN + sizeof(ERR_STR2) + 1 /* null char */] = { 0 };
|
||||
|
||||
char *dest = buf;
|
||||
for (int i = 0 ; i < sizeof(str)/ sizeof(str[0]); i++) {
|
||||
for (size_t i = 0 ; i < sizeof(str)/ sizeof(str[0]); i++) {
|
||||
dest = strcat(dest, str[i]);
|
||||
}
|
||||
esp_system_abort(buf);
|
||||
|
@ -4429,7 +4429,7 @@ BaseType_t xTaskGetAffinity( TaskHandle_t xTask )
|
||||
static void prvDeleteTLS( TCB_t *pxTCB )
|
||||
{
|
||||
configASSERT( pxTCB );
|
||||
for( int x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
|
||||
for( int x = 0; x < configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
|
||||
{
|
||||
if (pxTCB->pvThreadLocalStoragePointersDelCallback[ x ] != NULL) //If del cb is set
|
||||
{
|
||||
|
@ -36,7 +36,7 @@ void adc_hal_digi_controller_config(const adc_digi_config_t *cfg)
|
||||
if (cfg->adc1_pattern_len) {
|
||||
adc_ll_digi_clear_pattern_table(ADC_NUM_1);
|
||||
adc_ll_digi_set_pattern_table_len(ADC_NUM_1, cfg->adc1_pattern_len);
|
||||
for (int i = 0; i < cfg->adc1_pattern_len; i++) {
|
||||
for (uint32_t i = 0; i < cfg->adc1_pattern_len; i++) {
|
||||
adc_ll_digi_set_pattern_table(ADC_NUM_1, i, cfg->adc1_pattern[i]);
|
||||
}
|
||||
}
|
||||
@ -46,7 +46,7 @@ void adc_hal_digi_controller_config(const adc_digi_config_t *cfg)
|
||||
if (cfg->adc2_pattern_len) {
|
||||
adc_ll_digi_clear_pattern_table(ADC_NUM_2);
|
||||
adc_ll_digi_set_pattern_table_len(ADC_NUM_2, cfg->adc2_pattern_len);
|
||||
for (int i = 0; i < cfg->adc2_pattern_len; i++) {
|
||||
for (uint32_t i = 0; i < cfg->adc2_pattern_len; i++) {
|
||||
adc_ll_digi_set_pattern_table(ADC_NUM_2, i, cfg->adc2_pattern[i]);
|
||||
}
|
||||
}
|
||||
|
@ -473,7 +473,7 @@ uint32_t emac_hal_transmit_frame(emac_hal_context_t *hal, uint8_t *buf, uint32_t
|
||||
|
||||
eth_dma_tx_descriptor_t *desc_iter = hal->tx_desc;
|
||||
/* A frame is transmitted in multiple descriptor */
|
||||
for (int i = 0; i < bufcount; i++) {
|
||||
for (size_t i = 0; i < bufcount; i++) {
|
||||
/* Check if the descriptor is owned by the Ethernet DMA (when 1) or CPU (when 0) */
|
||||
if (desc_iter->TDES0.Own != EMAC_DMADESC_OWNER_CPU) {
|
||||
goto err;
|
||||
@ -508,7 +508,7 @@ uint32_t emac_hal_transmit_frame(emac_hal_context_t *hal, uint8_t *buf, uint32_t
|
||||
}
|
||||
|
||||
/* Set Own bit of the Tx descriptor Status: gives the buffer back to ETHERNET DMA */
|
||||
for (int i = 0; i < bufcount; i++) {
|
||||
for (size_t i = 0; i < bufcount; i++) {
|
||||
hal->tx_desc->TDES0.Own = EMAC_DMADESC_OWNER_DMA;
|
||||
hal->tx_desc = (eth_dma_tx_descriptor_t *)(hal->tx_desc->Buffer2NextDescAddr);
|
||||
}
|
||||
@ -563,7 +563,7 @@ uint32_t emac_hal_receive_frame(emac_hal_context_t *hal, uint8_t *buf, uint32_t
|
||||
desc_iter = (eth_dma_rx_descriptor_t *)(desc_iter->Buffer2NextDescAddr);
|
||||
}
|
||||
desc_iter = first_desc;
|
||||
for (int i = 0; i < seg_count - 1; i++) {
|
||||
for (size_t i = 0; i < seg_count - 1; i++) {
|
||||
used_descs--;
|
||||
write_len = copy_len < CONFIG_ETH_DMA_BUFFER_SIZE ? copy_len : CONFIG_ETH_DMA_BUFFER_SIZE;
|
||||
/* copy data to buffer */
|
||||
|
@ -116,7 +116,7 @@ static inline void cpu_ll_set_watchpoint(int id,
|
||||
|
||||
//We support watching 2^n byte values, from 1 to 64. Calculate the mask for that.
|
||||
for (int x = 0; x < 7; x++) {
|
||||
if (size == (size_t)(1 << x)) {
|
||||
if (size == (size_t)(1U << x)) {
|
||||
break;
|
||||
}
|
||||
dbreakc <<= 1;
|
||||
|
@ -122,7 +122,7 @@ static inline void sha_ll_fill_text_block(const void *input_text, size_t block_w
|
||||
uint32_t *data_words = NULL;
|
||||
reg_addr_buf = (uint32_t *)(SHA_TEXT_BASE);
|
||||
data_words = (uint32_t *)input_text;
|
||||
for (int i = 0; i < block_word_len; i++) {
|
||||
for (size_t i = 0; i < block_word_len; i++) {
|
||||
reg_addr_buf[i] = __builtin_bswap32(data_words[i]);
|
||||
}
|
||||
}
|
||||
@ -141,7 +141,7 @@ static inline void sha_ll_read_digest(esp_sha_type sha_type, void *digest_state,
|
||||
if (sha_type == SHA2_384 || sha_type == SHA2_512) {
|
||||
/* for these ciphers using 64-bit states, swap each pair of words */
|
||||
DPORT_INTERRUPT_DISABLE(); // Disable interrupt only on current CPU.
|
||||
for (int i = 0; i < digest_word_len; i += 2) {
|
||||
for (size_t i = 0; i < digest_word_len; i += 2) {
|
||||
digest_state_words[i + 1] = DPORT_SEQUENCE_REG_READ((uint32_t)®_addr_buf[i]);
|
||||
digest_state_words[i] = DPORT_SEQUENCE_REG_READ((uint32_t)®_addr_buf[i + 1]);
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ static inline void spi_flash_ll_get_buffer_data(spi_dev_t *dev, void *buffer, ui
|
||||
} else {
|
||||
// Otherwise, slow(er) path copies word by word
|
||||
int copy_len = read_len;
|
||||
for (int i = 0; i < (read_len + 3) / 4; i++) {
|
||||
for (size_t i = 0; i < (read_len + 3) / 4; i++) {
|
||||
int word_len = MIN(sizeof(uint32_t), copy_len);
|
||||
uint32_t word = dev->data_buf[i];
|
||||
memcpy(buffer, &word, word_len);
|
||||
|
@ -268,7 +268,7 @@ static inline void spi_ll_dma_set_rx_eof_generation(spi_dev_t *hw, bool enable)
|
||||
*/
|
||||
static inline void spi_ll_write_buffer(spi_dev_t *hw, const uint8_t *buffer_to_send, size_t bitlen)
|
||||
{
|
||||
for (int x = 0; x < bitlen; x += 32) {
|
||||
for (size_t x = 0; x < bitlen; x += 32) {
|
||||
//Use memcpy to get around alignment issues for txdata
|
||||
uint32_t word;
|
||||
memcpy(&word, &buffer_to_send[x / 8], 4);
|
||||
@ -285,7 +285,7 @@ static inline void spi_ll_write_buffer(spi_dev_t *hw, const uint8_t *buffer_to_s
|
||||
*/
|
||||
static inline void spi_ll_read_buffer(spi_dev_t *hw, uint8_t *buffer_to_rcv, size_t bitlen)
|
||||
{
|
||||
for (int x = 0; x < bitlen; x += 32) {
|
||||
for (size_t x = 0; x < bitlen; x += 32) {
|
||||
//Do a memcpy to get around possible alignment issues in rx_buffer
|
||||
uint32_t word = hw->data_buf[x / 32];
|
||||
int len = bitlen - x;
|
||||
|
@ -207,7 +207,7 @@ static inline void uart_ll_read_rxfifo(uart_dev_t *hw, uint8_t *buf, uint32_t rd
|
||||
{
|
||||
//Get the UART APB fifo addr. Read fifo, we use APB address
|
||||
uint32_t fifo_addr = (hw == &UART0) ? UART_FIFO_REG(0) : (hw == &UART1) ? UART_FIFO_REG(1) : UART_FIFO_REG(2);
|
||||
for(int i = 0; i < rd_len; i++) {
|
||||
for(uint32_t i = 0; i < rd_len; i++) {
|
||||
buf[i] = READ_PERI_REG(fifo_addr);
|
||||
#ifdef CONFIG_COMPILER_OPTIMIZATION_PERF
|
||||
__asm__ __volatile__("nop");
|
||||
@ -228,7 +228,7 @@ static inline void uart_ll_write_txfifo(uart_dev_t *hw, const uint8_t *buf, uint
|
||||
{
|
||||
//Get the UART AHB fifo addr, Write fifo, we use AHB address
|
||||
uint32_t fifo_addr = (hw == &UART0) ? UART_FIFO_AHB_REG(0) : (hw == &UART1) ? UART_FIFO_AHB_REG(1) : UART_FIFO_AHB_REG(2);
|
||||
for(int i = 0; i < wr_len; i++) {
|
||||
for(uint32_t i = 0; i < wr_len; i++) {
|
||||
WRITE_PERI_REG(fifo_addr, buf[i]);
|
||||
}
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ void adc_hal_digi_controller_config(const adc_digi_config_t *cfg)
|
||||
if (cfg->adc1_pattern_len) {
|
||||
adc_ll_digi_clear_pattern_table(ADC_NUM_1);
|
||||
adc_ll_digi_set_pattern_table_len(ADC_NUM_1, cfg->adc1_pattern_len);
|
||||
for (int i = 0; i < cfg->adc1_pattern_len; i++) {
|
||||
for (uint32_t i = 0; i < cfg->adc1_pattern_len; i++) {
|
||||
adc_ll_digi_set_pattern_table(ADC_NUM_1, i, cfg->adc1_pattern[i]);
|
||||
}
|
||||
}
|
||||
@ -55,7 +55,7 @@ void adc_hal_digi_controller_config(const adc_digi_config_t *cfg)
|
||||
if (cfg->adc2_pattern_len) {
|
||||
adc_ll_digi_clear_pattern_table(ADC_NUM_2);
|
||||
adc_ll_digi_set_pattern_table_len(ADC_NUM_2, cfg->adc2_pattern_len);
|
||||
for (int i = 0; i < cfg->adc2_pattern_len; i++) {
|
||||
for (uint32_t i = 0; i < cfg->adc2_pattern_len; i++) {
|
||||
adc_ll_digi_set_pattern_table(ADC_NUM_2, i, cfg->adc2_pattern[i]);
|
||||
}
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ static inline void cpu_ll_set_watchpoint(int id,
|
||||
|
||||
//We support watching 2^n byte values, from 1 to 64. Calculate the mask for that.
|
||||
for (int x = 0; x < 7; x++) {
|
||||
if (size == (size_t)(1 << x)) {
|
||||
if (size == (size_t)(1U << x)) {
|
||||
break;
|
||||
}
|
||||
dbreakc <<= 1;
|
||||
|
@ -94,7 +94,7 @@ static inline void gpspi_flash_ll_get_buffer_data(spi_dev_t *dev, void *buffer,
|
||||
} else {
|
||||
// Otherwise, slow(er) path copies word by word
|
||||
int copy_len = read_len;
|
||||
for (int i = 0; i < (read_len + 3) / 4; i++) {
|
||||
for (size_t i = 0; i < (read_len + 3) / 4; i++) {
|
||||
int word_len = MIN(sizeof(uint32_t), copy_len);
|
||||
uint32_t word = dev->data_buf[i];
|
||||
memcpy(buffer, &word, word_len);
|
||||
|
@ -310,7 +310,7 @@ static inline void esp_memprot_iram0_sram_set_prot(uint32_t *split_addr, bool lw
|
||||
uint32_t write_bit, read_bit, exec_bit;
|
||||
uint32_t uni_block_perm = 0;
|
||||
|
||||
for (size_t x = 0; x < IRAM0_SRAM_TOTAL_UNI_BLOCKS; x++) {
|
||||
for (int x = 0; x < IRAM0_SRAM_TOTAL_UNI_BLOCKS; x++) {
|
||||
esp_memprot_iram0_sram_get_uni_block_sgnf_bits(x, &write_bit, &read_bit, &exec_bit);
|
||||
if (x <= uni_blocks_low) {
|
||||
if (lw) {
|
||||
@ -734,7 +734,7 @@ static inline void esp_memprot_dram0_sram_set_prot(uint32_t *split_addr, bool lw
|
||||
|
||||
//set unified mgmt region
|
||||
uint32_t write_bit, read_bit, uni_block_perm = 0;
|
||||
for (size_t x = 0; x < DRAM0_SRAM_TOTAL_UNI_BLOCKS; x++) {
|
||||
for (int x = 0; x < DRAM0_SRAM_TOTAL_UNI_BLOCKS; x++) {
|
||||
esp_memprot_dram0_sram_get_uni_block_sgnf_bits(x, &write_bit, &read_bit);
|
||||
if (x <= uni_blocks_low) {
|
||||
if (lw) {
|
||||
|
@ -112,7 +112,7 @@ static inline void sha_ll_fill_text_block(const void *input_text, size_t block_w
|
||||
uint32_t *data_words = (uint32_t *)input_text;
|
||||
uint32_t *reg_addr_buf = (uint32_t *)(SHA_TEXT_BASE);
|
||||
|
||||
for (int i = 0; i < block_word_len; i++) {
|
||||
for (size_t i = 0; i < block_word_len; i++) {
|
||||
REG_WRITE(®_addr_buf[i], data_words[i]);
|
||||
}
|
||||
}
|
||||
@ -143,7 +143,7 @@ static inline void sha_ll_write_digest(esp_sha_type sha_type, void *digest_state
|
||||
uint32_t *digest_state_words = (uint32_t *)digest_state;
|
||||
uint32_t *reg_addr_buf = (uint32_t *)(SHA_H_BASE);
|
||||
|
||||
for (int i = 0; i < digest_word_len; i++) {
|
||||
for (size_t i = 0; i < digest_word_len; i++) {
|
||||
REG_WRITE(®_addr_buf[i], digest_state_words[i]);
|
||||
}
|
||||
}
|
||||
|
@ -314,7 +314,7 @@ static inline void spi_ll_dma_set_rx_eof_generation(spi_dev_t *hw, bool enable)
|
||||
*/
|
||||
static inline void spi_ll_write_buffer(spi_dev_t *hw, const uint8_t *buffer_to_send, size_t bitlen)
|
||||
{
|
||||
for (int x = 0; x < bitlen; x += 32) {
|
||||
for (size_t x = 0; x < bitlen; x += 32) {
|
||||
//Use memcpy to get around alignment issues for txdata
|
||||
uint32_t word;
|
||||
memcpy(&word, &buffer_to_send[x / 8], 4);
|
||||
@ -331,7 +331,7 @@ static inline void spi_ll_write_buffer(spi_dev_t *hw, const uint8_t *buffer_to_s
|
||||
*/
|
||||
static inline void spi_ll_read_buffer(spi_dev_t *hw, uint8_t *buffer_to_rcv, size_t bitlen)
|
||||
{
|
||||
for (int x = 0; x < bitlen; x += 32) {
|
||||
for (size_t x = 0; x < bitlen; x += 32) {
|
||||
//Do a memcpy to get around possible alignment issues in rx_buffer
|
||||
uint32_t word = hw->data_buf[x / 32];
|
||||
int len = bitlen - x;
|
||||
|
@ -135,7 +135,7 @@ static inline void spimem_flash_ll_get_buffer_data(spi_mem_dev_t *dev, void *buf
|
||||
} else {
|
||||
// Otherwise, slow(er) path copies word by word
|
||||
int copy_len = read_len;
|
||||
for (int i = 0; i < (read_len + 3) / 4; i++) {
|
||||
for (size_t i = 0; i < (read_len + 3) / 4; i++) {
|
||||
int word_len = MIN(sizeof(uint32_t), copy_len);
|
||||
uint32_t word = dev->data_buf[i];
|
||||
memcpy(buffer, &word, word_len);
|
||||
|
@ -204,7 +204,7 @@ static inline void uart_ll_read_rxfifo(uart_dev_t *hw, uint8_t *buf, uint32_t rd
|
||||
{
|
||||
//Get the UART fifo addr, ESP32-S2 have 2 UART
|
||||
uint32_t fifo_addr = (hw == &UART0) ? UART_FIFO_AHB_REG(0) : UART_FIFO_AHB_REG(1);
|
||||
for(int i = 0; i < rd_len; i++) {
|
||||
for(uint32_t i = 0; i < rd_len; i++) {
|
||||
buf[i] = READ_PERI_REG(fifo_addr);
|
||||
}
|
||||
}
|
||||
@ -222,7 +222,7 @@ static inline void uart_ll_write_txfifo(uart_dev_t *hw, const uint8_t *buf, uint
|
||||
{
|
||||
//Get the UART fifo addr, ESP32-S2 have 2 UART
|
||||
uint32_t fifo_addr = (hw == &UART0) ? UART_FIFO_AHB_REG(0) : UART_FIFO_AHB_REG(1);
|
||||
for(int i = 0; i < wr_len; i++) {
|
||||
for(uint32_t i = 0; i < wr_len; i++) {
|
||||
WRITE_PERI_REG(fifo_addr, buf[i]);
|
||||
}
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ static inline void cpu_ll_set_watchpoint(int id,
|
||||
|
||||
//We support watching 2^n byte values, from 1 to 64. Calculate the mask for that.
|
||||
for (int x = 0; x < 7; x++) {
|
||||
if (size == (1 << x)) {
|
||||
if (size == (size_t)(1U << x)) {
|
||||
break;
|
||||
}
|
||||
dbreakc <<= 1;
|
||||
|
@ -103,7 +103,7 @@ static inline void gpspi_flash_ll_get_buffer_data(spi_dev_t *dev, void *buffer,
|
||||
} else {
|
||||
// Otherwise, slow(er) path copies word by word
|
||||
int copy_len = read_len;
|
||||
for (int i = 0; i < (read_len + 3) / 4; i++) {
|
||||
for (uint32_t i = 0; i < (read_len + 3) / 4; i++) {
|
||||
int word_len = MIN(sizeof(uint32_t), copy_len);
|
||||
uint32_t word = dev->data_buf[i];
|
||||
memcpy(buffer, &word, word_len);
|
||||
|
@ -112,7 +112,7 @@ static inline void sha_ll_fill_text_block(const void *input_text, size_t block_w
|
||||
uint32_t *data_words = (uint32_t *)input_text;
|
||||
uint32_t *reg_addr_buf = (uint32_t *)(SHA_TEXT_BASE);
|
||||
|
||||
for (int i = 0; i < block_word_len; i++) {
|
||||
for (size_t i = 0; i < block_word_len; i++) {
|
||||
REG_WRITE(®_addr_buf[i], data_words[i]);
|
||||
}
|
||||
}
|
||||
@ -143,7 +143,7 @@ static inline void sha_ll_write_digest(esp_sha_type sha_type, void *digest_state
|
||||
uint32_t *digest_state_words = (uint32_t *)digest_state;
|
||||
uint32_t *reg_addr_buf = (uint32_t *)(SHA_H_BASE);
|
||||
|
||||
for (int i = 0; i < digest_word_len; i++) {
|
||||
for (size_t i = 0; i < digest_word_len; i++) {
|
||||
REG_WRITE(®_addr_buf[i], digest_state_words[i]);
|
||||
}
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ static inline void spi_ll_dma_set_rx_eof_generation(spi_dev_t *hw, bool enable)
|
||||
*/
|
||||
static inline void spi_ll_write_buffer(spi_dev_t *hw, const uint8_t *buffer_to_send, size_t bitlen)
|
||||
{
|
||||
for (int x = 0; x < bitlen; x += 32) {
|
||||
for (size_t x = 0; x < bitlen; x += 32) {
|
||||
//Use memcpy to get around alignment issues for txdata
|
||||
uint32_t word;
|
||||
memcpy(&word, &buffer_to_send[x / 8], 4);
|
||||
@ -381,7 +381,7 @@ static inline void spi_ll_write_buffer_byte(spi_dev_t *hw, int byte_id, uint8_t
|
||||
*/
|
||||
static inline void spi_ll_read_buffer(spi_dev_t *hw, uint8_t *buffer_to_rcv, size_t bitlen)
|
||||
{
|
||||
for (int x = 0; x < bitlen; x += 32) {
|
||||
for (size_t x = 0; x < bitlen; x += 32) {
|
||||
//Do a memcpy to get around possible alignment issues in rx_buffer
|
||||
uint32_t word = hw->data_buf[x / 32];
|
||||
int len = bitlen - x;
|
||||
|
@ -135,7 +135,7 @@ static inline void spimem_flash_ll_get_buffer_data(spi_mem_dev_t *dev, void *buf
|
||||
} else {
|
||||
// Otherwise, slow(er) path copies word by word
|
||||
int copy_len = read_len;
|
||||
for (int i = 0; i < (read_len + 3) / 4; i++) {
|
||||
for (uint32_t i = 0; i < (read_len + 3) / 4; i++) {
|
||||
int word_len = MIN(sizeof(uint32_t), copy_len);
|
||||
uint32_t word = dev->data_buf[i];
|
||||
memcpy(buffer, &word, word_len);
|
||||
|
@ -116,7 +116,7 @@ bool i2c_hal_is_bus_busy(i2c_hal_context_t *hal)
|
||||
|
||||
void i2c_hal_get_sda_timing(i2c_hal_context_t *hal, int *sample_time, int *hold_time)
|
||||
{
|
||||
i2c_ll_get_sda_timing(hal->dev, sample_time ,hold_time);
|
||||
i2c_ll_get_sda_timing(hal->dev, sample_time, hold_time);
|
||||
}
|
||||
|
||||
void i2c_hal_get_tout(i2c_hal_context_t *hal, int *tout_val)
|
||||
@ -174,12 +174,12 @@ void i2c_hal_disable_slave_rx_it(i2c_hal_context_t *hal)
|
||||
i2c_ll_slave_disable_rx_it(hal->dev);
|
||||
}
|
||||
|
||||
void i2c_hal_set_bus_timing(i2c_hal_context_t *hal, uint32_t scl_freq, i2c_sclk_t src_clk)
|
||||
void i2c_hal_set_bus_timing(i2c_hal_context_t *hal, int scl_freq, i2c_sclk_t src_clk)
|
||||
{
|
||||
i2c_ll_set_source_clk(hal->dev, src_clk);
|
||||
uint32_t sclk = I2C_LL_CLK_SRC_FREQ(src_clk);
|
||||
i2c_clk_cal_t clk_cal = {0};
|
||||
uint32_t scl_hw_freq = (scl_freq == I2C_CLK_FREQ_MAX) ? (src_clk / 20) : scl_freq; // FREQ_MAX use the highest freq of the chosen clk.
|
||||
uint32_t scl_hw_freq = (scl_freq == I2C_CLK_FREQ_MAX) ? (sclk / 20) : (uint32_t)scl_freq; // FREQ_MAX use the highest freq of the chosen clk.
|
||||
i2c_ll_cal_bus_clk(sclk, scl_hw_freq, &clk_cal);
|
||||
i2c_ll_set_bus_timing(hal->dev, &clk_cal);
|
||||
}
|
||||
|
@ -416,7 +416,7 @@ void i2c_hal_get_rxfifo_cnt(i2c_hal_context_t *hal, uint32_t *len);
|
||||
*
|
||||
* @return None
|
||||
*/
|
||||
void i2c_hal_set_bus_timing(i2c_hal_context_t *hal, uint32_t scl_freq, i2c_sclk_t src_clk);
|
||||
void i2c_hal_set_bus_timing(i2c_hal_context_t *hal, int scl_freq, i2c_sclk_t src_clk);
|
||||
|
||||
/**
|
||||
* @brief Get I2C txFIFO writeable length
|
||||
|
@ -127,8 +127,8 @@ static inline esp_err_t sdio_ringbuf_recv(sdio_ringbuf_t *buf, uint8_t **start,
|
||||
static inline int sdio_ringbuf_return(sdio_ringbuf_t* buf, uint8_t *ptr)
|
||||
{
|
||||
assert(sdio_ringbuf_offset_ptr(buf, RINGBUF_FREE_PTR, SDIO_SLAVE_SEND_DESC_SIZE) == ptr);
|
||||
int size = (buf->read_ptr + buf->size - buf->free_ptr) % buf->size;
|
||||
int count = size / SDIO_SLAVE_SEND_DESC_SIZE;
|
||||
size_t size = (buf->read_ptr + buf->size - buf->free_ptr) % buf->size;
|
||||
size_t count = size / SDIO_SLAVE_SEND_DESC_SIZE;
|
||||
assert(count * SDIO_SLAVE_SEND_DESC_SIZE==size);
|
||||
buf->free_ptr = buf->read_ptr;
|
||||
return count;
|
||||
|
@ -177,7 +177,7 @@ void sha_hal_read_digest(esp_sha_type sha_type, void *digest_state)
|
||||
/* Fault injection check: verify SHA engine actually ran,
|
||||
state is not all zeroes.
|
||||
*/
|
||||
for (int i = 0; i < word_len; i++) {
|
||||
for (size_t i = 0; i < word_len; i++) {
|
||||
if (digest_state_words[i] != 0) {
|
||||
return;
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ void uart_hal_tx_break(uart_hal_context_t *hal, uint32_t break_num)
|
||||
void uart_hal_write_txfifo(uart_hal_context_t *hal, const uint8_t *buf, uint32_t data_size, uint32_t *write_size)
|
||||
{
|
||||
uint16_t fill_len = uart_ll_get_txfifo_len(hal->dev);
|
||||
if(fill_len > data_size) {
|
||||
if (fill_len > data_size) {
|
||||
fill_len = data_size;
|
||||
}
|
||||
*write_size = fill_len;
|
||||
@ -42,7 +42,8 @@ void uart_hal_write_txfifo(uart_hal_context_t *hal, const uint8_t *buf, uint32_t
|
||||
|
||||
void uart_hal_read_rxfifo(uart_hal_context_t *hal, uint8_t *buf, int *inout_rd_len)
|
||||
{
|
||||
uint16_t read_len = (*inout_rd_len > 0) ? *inout_rd_len : uart_ll_get_rxfifo_len(hal->dev);
|
||||
*inout_rd_len = read_len;
|
||||
uart_ll_read_rxfifo(hal->dev, buf, read_len);
|
||||
if (*inout_rd_len <= 0) {
|
||||
*inout_rd_len = uart_ll_get_rxfifo_len(hal->dev);
|
||||
}
|
||||
uart_ll_read_rxfifo(hal->dev, buf, *inout_rd_len);
|
||||
}
|
||||
|
@ -177,7 +177,7 @@ IRAM_ATTR void *heap_caps_malloc_default( size_t size )
|
||||
return heap_caps_malloc( size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL);
|
||||
} else {
|
||||
void *r;
|
||||
if (size <= malloc_alwaysinternal_limit) {
|
||||
if (size <= (size_t)malloc_alwaysinternal_limit) {
|
||||
r=heap_caps_malloc( size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL );
|
||||
} else {
|
||||
r=heap_caps_malloc( size, MALLOC_CAP_DEFAULT | MALLOC_CAP_SPIRAM );
|
||||
@ -200,7 +200,7 @@ IRAM_ATTR void *heap_caps_realloc_default( void *ptr, size_t size )
|
||||
return heap_caps_realloc( ptr, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL );
|
||||
} else {
|
||||
void *r;
|
||||
if (size <= malloc_alwaysinternal_limit) {
|
||||
if (size <= (size_t)malloc_alwaysinternal_limit) {
|
||||
r=heap_caps_realloc( ptr, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL );
|
||||
} else {
|
||||
r=heap_caps_realloc( ptr, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_SPIRAM );
|
||||
@ -305,7 +305,7 @@ IRAM_ATTR void heap_caps_free( void *ptr)
|
||||
multi_heap_free(heap->heap, ptr);
|
||||
}
|
||||
|
||||
IRAM_ATTR void *heap_caps_realloc( void *ptr, size_t size, int caps)
|
||||
IRAM_ATTR void *heap_caps_realloc( void *ptr, size_t size, uint32_t caps)
|
||||
{
|
||||
bool ptr_in_diram_case = false;
|
||||
heap_t *heap = NULL;
|
||||
@ -540,7 +540,7 @@ size_t heap_caps_get_allocated_size( void *ptr )
|
||||
return size;
|
||||
}
|
||||
|
||||
IRAM_ATTR void *heap_caps_aligned_alloc(size_t alignment, size_t size, int caps)
|
||||
IRAM_ATTR void *heap_caps_aligned_alloc(size_t alignment, size_t size, uint32_t caps)
|
||||
{
|
||||
void *ret = NULL;
|
||||
|
||||
|
@ -66,10 +66,10 @@ void heap_caps_init(void)
|
||||
|
||||
//The heap allocator will treat every region given to it as separate. In order to get bigger ranges of contiguous memory,
|
||||
//it's useful to coalesce adjacent regions that have the same type.
|
||||
for (int i = 1; i < num_regions; i++) {
|
||||
for (size_t i = 1; i < num_regions; i++) {
|
||||
soc_memory_region_t *a = ®ions[i - 1];
|
||||
soc_memory_region_t *b = ®ions[i];
|
||||
if (b->start == a->start + a->size && b->type == a->type ) {
|
||||
if (b->start == (intptr_t)(a->start + a->size) && b->type == a->type ) {
|
||||
a->type = -1;
|
||||
b->start = a->start;
|
||||
b->size += a->size;
|
||||
@ -78,7 +78,7 @@ void heap_caps_init(void)
|
||||
|
||||
/* Count the heaps left after merging */
|
||||
size_t num_heaps = 0;
|
||||
for (int i = 0; i < num_regions; i++) {
|
||||
for (size_t i = 0; i < num_regions; i++) {
|
||||
if (regions[i].type != -1) {
|
||||
num_heaps++;
|
||||
}
|
||||
@ -92,7 +92,7 @@ void heap_caps_init(void)
|
||||
size_t heap_idx = 0;
|
||||
|
||||
ESP_EARLY_LOGI(TAG, "Initializing. RAM available for dynamic allocation:");
|
||||
for (int i = 0; i < num_regions; i++) {
|
||||
for (size_t i = 0; i < num_regions; i++) {
|
||||
soc_memory_region_t *region = ®ions[i];
|
||||
const soc_memory_type_desc_t *type = &soc_memory_types[region->type];
|
||||
heap_t *heap = &temp_heaps[heap_idx];
|
||||
@ -126,7 +126,7 @@ void heap_caps_init(void)
|
||||
assert(SLIST_EMPTY(®istered_heaps));
|
||||
|
||||
heap_t *heaps_array = NULL;
|
||||
for (int i = 0; i < num_heaps; i++) {
|
||||
for (size_t i = 0; i < num_heaps; i++) {
|
||||
if (heap_caps_match(&temp_heaps[i], MALLOC_CAP_8BIT|MALLOC_CAP_INTERNAL)) {
|
||||
/* use the first DRAM heap which can fit the data */
|
||||
heaps_array = multi_heap_malloc(temp_heaps[i].heap, sizeof(heap_t) * num_heaps);
|
||||
@ -140,7 +140,7 @@ void heap_caps_init(void)
|
||||
memcpy(heaps_array, temp_heaps, sizeof(heap_t)*num_heaps);
|
||||
|
||||
/* Iterate the heaps and set their locks, also add them to the linked list. */
|
||||
for (int i = 0; i < num_heaps; i++) {
|
||||
for (size_t i = 0; i < num_heaps; i++) {
|
||||
if (heaps_array[i].heap != NULL) {
|
||||
multi_heap_set_lock(heaps_array[i].heap, &heaps_array[i].heap_mux);
|
||||
}
|
||||
@ -158,10 +158,10 @@ esp_err_t heap_caps_add_region(intptr_t start, intptr_t end)
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
|
||||
for (int i = 0; i < soc_memory_region_count; i++) {
|
||||
for (size_t i = 0; i < soc_memory_region_count; i++) {
|
||||
const soc_memory_region_t *region = &soc_memory_regions[i];
|
||||
// Test requested start only as 'end' may be in a different region entry, assume 'end' has same caps
|
||||
if (region->start <= start && (region->start + region->size) > start) {
|
||||
if (region->start <= start && (intptr_t)(region->start + region->size) > start) {
|
||||
const uint32_t *caps = soc_memory_types[region->type].caps;
|
||||
return heap_caps_add_region_with_caps(caps, start, end);
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ void heap_caps_free( void *ptr);
|
||||
*
|
||||
* @return Pointer to a new buffer of size 'size' with capabilities 'caps', or NULL if allocation failed.
|
||||
*/
|
||||
void *heap_caps_realloc( void *ptr, size_t size, int caps);
|
||||
void *heap_caps_realloc( void *ptr, size_t size, uint32_t caps);
|
||||
|
||||
/**
|
||||
* @brief Allocate a aligned chunk of memory which has the given capabilities
|
||||
@ -119,7 +119,7 @@ void *heap_caps_realloc( void *ptr, size_t size, int caps);
|
||||
*
|
||||
*
|
||||
*/
|
||||
void *heap_caps_aligned_alloc(size_t alignment, size_t size, int caps);
|
||||
void *heap_caps_aligned_alloc(size_t alignment, size_t size, uint32_t caps);
|
||||
|
||||
/**
|
||||
* @brief Used to deallocate memory previously allocated with heap_caps_aligned_alloc
|
||||
|
@ -167,7 +167,7 @@ static bool verify_fill_pattern(void *data, size_t size, bool print_errors, bool
|
||||
}
|
||||
|
||||
uint8_t *p = data;
|
||||
for (int i = 0; i < size; i++) {
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if (p[i] != (uint8_t)EXPECT_WORD) {
|
||||
if (print_errors) {
|
||||
MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Invalid data at %p. Expected 0x%02x got 0x%02x\n", p, (uint8_t)EXPECT_WORD, *p);
|
||||
|
@ -136,7 +136,7 @@ void esp_log_level_set(const char *tag, esp_log_level_t level)
|
||||
}
|
||||
|
||||
// search in the cache and update the entry it if exists
|
||||
for (int i = 0; i < s_log_cache_entry_count; ++i) {
|
||||
for (uint32_t i = 0; i < s_log_cache_entry_count; ++i) {
|
||||
#ifdef LOG_BUILTIN_CHECKS
|
||||
assert(i == 0 || s_log_cache[(i - 1) / 2].generation < s_log_cache[i].generation);
|
||||
#endif
|
||||
@ -203,7 +203,7 @@ void esp_log_write(esp_log_level_t level,
|
||||
static inline bool get_cached_log_level(const char *tag, esp_log_level_t *level)
|
||||
{
|
||||
// Look for `tag` in cache
|
||||
int i;
|
||||
uint32_t i;
|
||||
for (i = 0; i < s_log_cache_entry_count; ++i) {
|
||||
#ifdef LOG_BUILTIN_CHECKS
|
||||
assert(i == 0 || s_log_cache[(i - 1) / 2].generation < s_log_cache[i].generation);
|
||||
|
@ -84,10 +84,10 @@ static esp_err_t esp_ping_send(esp_ping_t *ep)
|
||||
ep->packet_hdr->chksum = inet_chksum(ep->packet_hdr, ep->icmp_pkt_size);
|
||||
}
|
||||
|
||||
int sent = sendto(ep->sock, ep->packet_hdr, ep->icmp_pkt_size, 0,
|
||||
ssize_t sent = sendto(ep->sock, ep->packet_hdr, ep->icmp_pkt_size, 0,
|
||||
(struct sockaddr *)&ep->target_addr, sizeof(ep->target_addr));
|
||||
|
||||
if (sent != ep->icmp_pkt_size) {
|
||||
if (sent != (ssize_t)ep->icmp_pkt_size) {
|
||||
int opt_val;
|
||||
socklen_t opt_len = sizeof(opt_val);
|
||||
getsockopt(ep->sock, SOL_SOCKET, SO_ERROR, &opt_val, &opt_len);
|
||||
|
@ -76,12 +76,12 @@ static inline void mpi_to_mem_block(uint32_t mem_base, const mbedtls_mpi *mpi, s
|
||||
uint32_t copy_words = MIN(hw_words, mpi->n);
|
||||
|
||||
/* Copy MPI data to memory block registers */
|
||||
for (int i = 0; i < copy_words; i++) {
|
||||
for (uint32_t i = 0; i < copy_words; i++) {
|
||||
pbase[i] = mpi->p[i];
|
||||
}
|
||||
|
||||
/* Zero any remaining memory block data */
|
||||
for (int i = copy_words; i < hw_words; i++) {
|
||||
for (uint32_t i = copy_words; i < hw_words; i++) {
|
||||
pbase[i] = 0;
|
||||
}
|
||||
}
|
||||
@ -93,7 +93,7 @@ static inline void mpi_to_mem_block(uint32_t mem_base, const mbedtls_mpi *mpi, s
|
||||
Bignum 'x' should already be grown to at least num_words by caller (can be done while
|
||||
calculation is in progress, to save some cycles)
|
||||
*/
|
||||
static inline void mem_block_to_mpi(mbedtls_mpi *x, uint32_t mem_base, int num_words)
|
||||
static inline void mem_block_to_mpi(mbedtls_mpi *x, uint32_t mem_base, size_t num_words)
|
||||
{
|
||||
assert(x->n >= num_words);
|
||||
|
||||
@ -246,7 +246,7 @@ void esp_mpi_mult_mpi_failover_mod_mult_hw_op(const mbedtls_mpi *X, const mbedtl
|
||||
size_t hw_words = num_words;
|
||||
|
||||
/* M = 2^num_words - 1, so block is entirely FF */
|
||||
for (int i = 0; i < hw_words; i++) {
|
||||
for (size_t i = 0; i < hw_words; i++) {
|
||||
DPORT_REG_WRITE(RSA_MEM_M_BLOCK_BASE + i * 4, UINT32_MAX);
|
||||
}
|
||||
/* Mprime = 1 */
|
||||
@ -262,7 +262,7 @@ void esp_mpi_mult_mpi_failover_mod_mult_hw_op(const mbedtls_mpi *X, const mbedtl
|
||||
DPORT_REG_WRITE(RSA_MEM_RB_BLOCK_BASE, 1);
|
||||
|
||||
/* Zero out rest of the Rinv words */
|
||||
for (int i = 1; i < hw_words; i++) {
|
||||
for (size_t i = 1; i < hw_words; i++) {
|
||||
DPORT_REG_WRITE(RSA_MEM_RB_BLOCK_BASE + i * 4, 0);
|
||||
}
|
||||
|
||||
|
@ -70,12 +70,12 @@ static inline void mpi_to_mem_block(uint32_t mem_base, const mbedtls_mpi *mpi, s
|
||||
uint32_t copy_words = MIN(num_words, mpi->n);
|
||||
|
||||
/* Copy MPI data to memory block registers */
|
||||
for (int i = 0; i < copy_words; i++) {
|
||||
for (uint32_t i = 0; i < copy_words; i++) {
|
||||
pbase[i] = mpi->p[i];
|
||||
}
|
||||
|
||||
/* Zero any remaining memory block data */
|
||||
for (int i = copy_words; i < num_words; i++) {
|
||||
for (uint32_t i = copy_words; i < num_words; i++) {
|
||||
pbase[i] = 0;
|
||||
}
|
||||
}
|
||||
@ -201,7 +201,7 @@ void esp_mpi_mul_mpi_hw_op(const mbedtls_mpi *X, const mbedtls_mpi *Y, size_t nu
|
||||
void esp_mpi_mult_mpi_failover_mod_mult_hw_op(const mbedtls_mpi *X, const mbedtls_mpi *Y, size_t num_words)
|
||||
{
|
||||
/* M = 2^num_words - 1, so block is entirely FF */
|
||||
for (int i = 0; i < num_words; i++) {
|
||||
for (size_t i = 0; i < num_words; i++) {
|
||||
DPORT_REG_WRITE(RSA_MEM_M_BLOCK_BASE + i * 4, UINT32_MAX);
|
||||
}
|
||||
|
||||
@ -217,7 +217,7 @@ void esp_mpi_mult_mpi_failover_mod_mult_hw_op(const mbedtls_mpi *X, const mbedtl
|
||||
DPORT_REG_WRITE(RSA_MEM_RB_BLOCK_BASE, 1);
|
||||
|
||||
/* Zero out rest of the Rinv words */
|
||||
for (int i = 1; i < num_words; i++) {
|
||||
for (size_t i = 1; i < num_words; i++) {
|
||||
DPORT_REG_WRITE(RSA_MEM_RB_BLOCK_BASE + i * 4, 0);
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@ static const char *TAG = "ESP_RSA_SIGN_ALT";
|
||||
static hmac_key_id_t s_esp_ds_hmac_key_id;
|
||||
static esp_ds_data_t *s_ds_data;
|
||||
static SemaphoreHandle_t s_ds_lock;
|
||||
static uint32_t s_timeout_ms = 0;
|
||||
static int s_timeout_ms = 0;
|
||||
|
||||
/* key length in bytes = (esp_digital_signature_length_t key + 1 ) * FACTOR_KEYLEN_IN_BYTES */
|
||||
#define FACTOR_KEYLEN_IN_BYTES 4
|
||||
@ -221,7 +221,7 @@ int esp_ds_rsa_sign( void *ctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
for ( int i = 0; i < (s_ds_data->rsa_length + 1); i++) {
|
||||
for (unsigned int i = 0; i < (s_ds_data->rsa_length + 1); i++) {
|
||||
signature[i] = SWAP_INT32(((uint32_t *)sig)[(s_ds_data->rsa_length + 1) - (i + 1)]);
|
||||
}
|
||||
|
||||
@ -242,7 +242,7 @@ int esp_ds_rsa_sign( void *ctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
for ( int i = 0; i < (s_ds_data->rsa_length + 1); i++) {
|
||||
for (unsigned int i = 0; i < (s_ds_data->rsa_length + 1); i++) {
|
||||
((uint32_t *)sig)[i] = SWAP_INT32(((uint32_t *)signature)[(s_ds_data->rsa_length + 1) - (i + 1)]);
|
||||
}
|
||||
heap_caps_free(signature);
|
||||
|
@ -68,12 +68,12 @@ static inline void mpi_to_mem_block(uint32_t mem_base, const mbedtls_mpi *mpi, s
|
||||
uint32_t copy_words = MIN(num_words, mpi->n);
|
||||
|
||||
/* Copy MPI data to memory block registers */
|
||||
for (int i = 0; i < copy_words; i++) {
|
||||
for (uint32_t i = 0; i < copy_words; i++) {
|
||||
pbase[i] = mpi->p[i];
|
||||
}
|
||||
|
||||
/* Zero any remaining memory block data */
|
||||
for (int i = copy_words; i < num_words; i++) {
|
||||
for (uint32_t i = copy_words; i < num_words; i++) {
|
||||
pbase[i] = 0;
|
||||
}
|
||||
}
|
||||
@ -199,7 +199,7 @@ void esp_mpi_mul_mpi_hw_op(const mbedtls_mpi *X, const mbedtls_mpi *Y, size_t nu
|
||||
void esp_mpi_mult_mpi_failover_mod_mult_hw_op(const mbedtls_mpi *X, const mbedtls_mpi *Y, size_t num_words)
|
||||
{
|
||||
/* M = 2^num_words - 1, so block is entirely FF */
|
||||
for (int i = 0; i < num_words; i++) {
|
||||
for (size_t i = 0; i < num_words; i++) {
|
||||
DPORT_REG_WRITE(RSA_MEM_M_BLOCK_BASE + i * 4, UINT32_MAX);
|
||||
}
|
||||
|
||||
@ -215,7 +215,7 @@ void esp_mpi_mult_mpi_failover_mod_mult_hw_op(const mbedtls_mpi *X, const mbedtl
|
||||
DPORT_REG_WRITE(RSA_MEM_RB_BLOCK_BASE, 1);
|
||||
|
||||
/* Zero out rest of the Rinv words */
|
||||
for (int i = 1; i < num_words; i++) {
|
||||
for (size_t i = 1; i < num_words; i++) {
|
||||
DPORT_REG_WRITE(RSA_MEM_RB_BLOCK_BASE + i * 4, 0);
|
||||
}
|
||||
|
||||
|
@ -1605,7 +1605,7 @@ static void _mdns_init_pcb_probe(mdns_if_t tcpip_if, mdns_ip_protocol_t ip_proto
|
||||
mdns_srv_item_t * new_probe_services[len];
|
||||
int new_probe_service_len = 0;
|
||||
bool found;
|
||||
for (int j=0; j < len; ++j) {
|
||||
for (size_t j=0; j < len; ++j) {
|
||||
found = false;
|
||||
for (int i=0; i < pcb->probe_services_len; ++i) {
|
||||
if (pcb->probe_services[i] == services[j]) {
|
||||
@ -2508,11 +2508,10 @@ static int _mdns_txt_items_count_get(const uint8_t * data, size_t len)
|
||||
*/
|
||||
static int _mdns_txt_item_name_get_len(const uint8_t * data, size_t len)
|
||||
{
|
||||
int i;
|
||||
if (*data == '=') {
|
||||
return -1;
|
||||
}
|
||||
for (i = 0; i < len; i++) {
|
||||
for (size_t i = 0; i < len; i++) {
|
||||
if (data[i] == '=') {
|
||||
return i;
|
||||
}
|
||||
@ -3483,7 +3482,6 @@ static void _mdns_search_result_add_srv(mdns_search_once_t * search, const char
|
||||
*/
|
||||
static void _mdns_search_result_add_txt(mdns_search_once_t * search, mdns_txt_item_t * txt, size_t txt_count, mdns_if_t tcpip_if, mdns_ip_protocol_t ip_protocol)
|
||||
{
|
||||
int i;
|
||||
mdns_result_t * r = search->result;
|
||||
while (r) {
|
||||
if (r->tcpip_if == tcpip_if && r->ip_protocol == ip_protocol) {
|
||||
@ -3515,7 +3513,7 @@ static void _mdns_search_result_add_txt(mdns_search_once_t * search, mdns_txt_it
|
||||
return;
|
||||
|
||||
free_txt:
|
||||
for (i=0; i<txt_count; i++) {
|
||||
for (size_t i=0; i<txt_count; i++) {
|
||||
free((char *)(txt[i].key));
|
||||
free((char *)(txt[i].value));
|
||||
}
|
||||
@ -4702,7 +4700,6 @@ void mdns_query_results_free(mdns_result_t * results)
|
||||
{
|
||||
mdns_result_t * r;
|
||||
mdns_ip_addr_t * a;
|
||||
int i;
|
||||
|
||||
while (results) {
|
||||
r = results;
|
||||
@ -4710,7 +4707,7 @@ void mdns_query_results_free(mdns_result_t * results)
|
||||
free((char *)(r->hostname));
|
||||
free((char *)(r->instance_name));
|
||||
|
||||
for (i=0; i<r->txt_count; i++) {
|
||||
for (size_t i=0; i<r->txt_count; i++) {
|
||||
free((char *)(r->txt[i].key));
|
||||
free((char *)(r->txt[i].value));
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ static void mdns_print_results(mdns_result_t * results)
|
||||
{
|
||||
mdns_result_t * r = results;
|
||||
mdns_ip_addr_t * a = NULL;
|
||||
int i = 1, t;
|
||||
int i = 1;
|
||||
while (r) {
|
||||
printf("%d: Interface: %s, Type: %s\n", i++, if_str[r->tcpip_if], ip_protocol_str[r->ip_protocol]);
|
||||
if (r->instance_name) {
|
||||
@ -35,7 +35,7 @@ static void mdns_print_results(mdns_result_t * results)
|
||||
}
|
||||
if (r->txt_count) {
|
||||
printf(" TXT : [%u] ", r->txt_count);
|
||||
for (t=0; t<r->txt_count; t++) {
|
||||
for (size_t t=0; t<r->txt_count; t++) {
|
||||
printf("%s=%s; ", r->txt[t].key, r->txt[t].value);
|
||||
}
|
||||
printf("\n");
|
||||
|
@ -39,7 +39,7 @@ void __attribute__((noreturn)) abort(void)
|
||||
|
||||
char *dest = buf;
|
||||
|
||||
for (int i = 0; i < sizeof(str) / sizeof(str[0]); i++) {
|
||||
for (size_t i = 0; i < sizeof(str) / sizeof(str[0]); i++) {
|
||||
strcat(dest, str[i]);
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ int poll(struct pollfd *fds, nfds_t nfds, int timeout)
|
||||
FD_ZERO(&writefds);
|
||||
FD_ZERO(&errorfds);
|
||||
|
||||
for (int i = 0; i < nfds; ++i) {
|
||||
for (unsigned int i = 0; i < nfds; ++i) {
|
||||
fds[i].revents = 0;
|
||||
|
||||
if (fds[i].fd < 0) {
|
||||
@ -73,7 +73,7 @@ int poll(struct pollfd *fds, nfds_t nfds, int timeout)
|
||||
if (select_ret > 0) {
|
||||
ret += select_ret;
|
||||
|
||||
for (int i = 0; i < nfds; ++i) {
|
||||
for (unsigned int i = 0; i < nfds; ++i) {
|
||||
if (FD_ISSET(fds[i].fd, &readfds)) {
|
||||
fds[i].revents |= POLLIN;
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ void esp_reent_cleanup(void)
|
||||
/* Clean up storage used by mprec functions */
|
||||
if (r->_mp) {
|
||||
if (_REENT_MP_FREELIST(r)) {
|
||||
for (int i = 0; i < _Kmax; ++i) {
|
||||
for (unsigned int i = 0; i < _Kmax; ++i) {
|
||||
struct _Bigint *cur, *next;
|
||||
next = _REENT_MP_FREELIST(r)[i];
|
||||
while (next) {
|
||||
|
@ -129,7 +129,7 @@ void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu(void)
|
||||
|
||||
spi_flash_op_lock();
|
||||
|
||||
const uint32_t cpuid = xPortGetCoreID();
|
||||
const int cpuid = xPortGetCoreID();
|
||||
const uint32_t other_cpuid = (cpuid == 0) ? 1 : 0;
|
||||
#ifndef NDEBUG
|
||||
// For sanity check later: record the CPU which has started doing flash operation
|
||||
@ -178,7 +178,7 @@ void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu(void)
|
||||
|
||||
void IRAM_ATTR spi_flash_enable_interrupts_caches_and_other_cpu(void)
|
||||
{
|
||||
const uint32_t cpuid = xPortGetCoreID();
|
||||
const int cpuid = xPortGetCoreID();
|
||||
const uint32_t other_cpuid = (cpuid == 0) ? 1 : 0;
|
||||
#ifndef NDEBUG
|
||||
// Sanity check: flash operation ends on the same CPU as it has started
|
||||
|
@ -41,7 +41,7 @@ TEST_CASE("Can use access() for UART", "[vfs]")
|
||||
uart_driver_install(UART_NUM_2, 256, 0, 0, NULL, 0);
|
||||
#endif
|
||||
|
||||
for (int i = 0; i < sizeof(uarts)/sizeof(uarts[0]); ++i) {
|
||||
for (size_t i = 0; i < sizeof(uarts)/sizeof(uarts[0]); ++i) {
|
||||
TEST_ASSERT_EQUAL_MESSAGE(access(uarts[i], F_OK), 0, uarts[i]);
|
||||
|
||||
TEST_ASSERT_EQUAL_MESSAGE(access(uarts[i], R_OK), 0, uarts[i]);
|
||||
|
@ -111,7 +111,7 @@ static concurrent_test_path_to_fd_t concurrent_test_path_to_fd[] = {
|
||||
|
||||
static int concurrent_test_vfs_open(const char * path, int flags, int mode)
|
||||
{
|
||||
for (int i = 0; i < sizeof(concurrent_test_path_to_fd)/sizeof(concurrent_test_path_to_fd[0]); ++i) {
|
||||
for (size_t i = 0; i < sizeof(concurrent_test_path_to_fd)/sizeof(concurrent_test_path_to_fd[0]); ++i) {
|
||||
if (strcmp(concurrent_test_path_to_fd[i].path, path) == 0) {
|
||||
// This behaves like UART: opening the same file gives always the
|
||||
// same local FD (even when opening at the same time multiple FDs)
|
||||
@ -125,7 +125,7 @@ static int concurrent_test_vfs_open(const char * path, int flags, int mode)
|
||||
|
||||
static int concurrent_test_vfs_close(int fd)
|
||||
{
|
||||
for (int i = 0; i < sizeof(concurrent_test_path_to_fd)/sizeof(concurrent_test_path_to_fd[0]); ++i) {
|
||||
for (size_t i = 0; i < sizeof(concurrent_test_path_to_fd)/sizeof(concurrent_test_path_to_fd[0]); ++i) {
|
||||
if (concurrent_test_path_to_fd[i].local_fd == fd) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -950,7 +950,7 @@ int esp_vfs_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *errorfds
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (int i = 0; i < vfs_count; ++i) {
|
||||
for (size_t i = 0; i < vfs_count; ++i) {
|
||||
const vfs_entry_t *vfs = get_vfs_for_index(i);
|
||||
fds_triple_t *item = &vfs_fds_triple[i];
|
||||
|
||||
|
@ -1212,7 +1212,7 @@ esp_err_t wifi_prov_mgr_init(wifi_prov_mgr_config_t config)
|
||||
};
|
||||
|
||||
/* All function pointers in the scheme structure must be non-null */
|
||||
for (int i = 0; i < sizeof(fn_ptrs)/sizeof(fn_ptrs[0]); i++) {
|
||||
for (size_t i = 0; i < sizeof(fn_ptrs)/sizeof(fn_ptrs[0]); i++) {
|
||||
if (!fn_ptrs[i]) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user