Merge branch 'bugfix/bootloader_map_size_v3.1' into 'release/v3.1'

bootloader: fix incorrect mapping size (backport v3.1)

See merge request idf/esp-idf!4139
This commit is contained in:
Jiang Jiang Jian 2019-01-24 14:09:58 +08:00
commit c6750d271e
4 changed files with 52 additions and 21 deletions

View File

@ -100,4 +100,21 @@ esp_err_t bootloader_flash_write(size_t dest_addr, void *src, size_t size, bool
*/
esp_err_t bootloader_flash_erase_sector(size_t sector);
/* Cache MMU block size */
#define MMU_BLOCK_SIZE 0x00010000
/* Cache MMU address mask (MMU tables ignore bits which are zero) */
#define MMU_FLASH_MASK (~(MMU_BLOCK_SIZE - 1))
/**
* @brief Calculate the number of cache pages to map
* @param size size of data to map
* @param vaddr virtual address where data will be mapped
* @return number of cache MMU pages required to do the mapping
*/
static inline uint32_t bootloader_cache_pages_to_map(uint32_t size, uint32_t vaddr)
{
return (size + (vaddr - (vaddr & MMU_FLASH_MASK)) + MMU_BLOCK_SIZE - 1) / MMU_BLOCK_SIZE;
}
#endif

View File

@ -86,8 +86,6 @@ static const char *TAG = "bootloader_flash";
*/
#define MMU_BLOCK0_VADDR 0x3f400000
#define MMU_BLOCK50_VADDR 0x3f720000
#define MMU_FLASH_MASK 0xffff0000
#define MMU_BLOCK_SIZE 0x00010000
static bool mapped;
@ -107,10 +105,11 @@ const void *bootloader_mmap(uint32_t src_addr, uint32_t size)
}
uint32_t src_addr_aligned = src_addr & MMU_FLASH_MASK;
uint32_t count = (size + (src_addr - src_addr_aligned) + 0xffff) / MMU_BLOCK_SIZE;
uint32_t count = bootloader_cache_pages_to_map(size, src_addr);
Cache_Read_Disable(0);
Cache_Flush(0);
ESP_LOGD(TAG, "mmu set paddr=%08x count=%d", src_addr_aligned, count );
ESP_LOGD(TAG, "mmu set paddr=%08x count=%d size=%x src_addr=%x src_addr_aligned=%x",
src_addr & MMU_FLASH_MASK, count, size, src_addr, src_addr_aligned );
int e = cache_flash_mmu_set(0, 0, MMU_BLOCK0_VADDR, src_addr_aligned, 64, count);
if (e != 0) {
ESP_LOGE(TAG, "cache_flash_mmu_set failed: %d\n", e);

View File

@ -387,7 +387,7 @@ static void unpack_load_app(const esp_image_metadata_t* data)
// Find DROM & IROM addresses, to configure cache mappings
for (int i = 0; i < data->image.segment_count; i++) {
const esp_image_segment_header_t *header = &data->segments[i];
if (header->load_addr >= SOC_IROM_LOW && header->load_addr < SOC_IROM_HIGH) {
if (header->load_addr >= SOC_DROM_LOW && header->load_addr < SOC_DROM_HIGH) {
if (drom_addr != 0) {
ESP_LOGE(TAG, MAP_ERR_MSG, "DROM");
} else {
@ -397,7 +397,7 @@ static void unpack_load_app(const esp_image_metadata_t* data)
drom_load_addr = header->load_addr;
drom_size = header->data_len;
}
if (header->load_addr >= SOC_DROM_LOW && header->load_addr < SOC_DROM_HIGH) {
if (header->load_addr >= SOC_IROM_LOW && header->load_addr < SOC_IROM_HIGH) {
if (irom_addr != 0) {
ESP_LOGE(TAG, MAP_ERR_MSG, "IROM");
} else {
@ -428,6 +428,7 @@ static void set_cache_and_start_app(
uint32_t irom_size,
uint32_t entry_addr)
{
int rc;
ESP_LOGD(TAG, "configure drom and irom and start");
Cache_Read_Disable( 0 );
Cache_Flush( 0 );
@ -439,20 +440,34 @@ static void set_cache_and_start_app(
DPORT_PRO_FLASH_MMU_TABLE[i] = DPORT_FLASH_MMU_TABLE_INVALID_VAL;
}
uint32_t drom_page_count = (drom_size + 64*1024 - 1) / (64*1024); // round up to 64k
ESP_LOGV(TAG, "d mmu set paddr=%08x vaddr=%08x size=%d n=%d", drom_addr & 0xffff0000, drom_load_addr & 0xffff0000, drom_size, drom_page_count );
int rc = cache_flash_mmu_set( 0, 0, drom_load_addr & 0xffff0000, drom_addr & 0xffff0000, 64, drom_page_count );
ESP_LOGV(TAG, "rc=%d", rc );
rc = cache_flash_mmu_set( 1, 0, drom_load_addr & 0xffff0000, drom_addr & 0xffff0000, 64, drom_page_count );
ESP_LOGV(TAG, "rc=%d", rc );
uint32_t irom_page_count = (irom_size + 64*1024 - 1) / (64*1024); // round up to 64k
ESP_LOGV(TAG, "i mmu set paddr=%08x vaddr=%08x size=%d n=%d", irom_addr & 0xffff0000, irom_load_addr & 0xffff0000, irom_size, irom_page_count );
rc = cache_flash_mmu_set( 0, 0, irom_load_addr & 0xffff0000, irom_addr & 0xffff0000, 64, irom_page_count );
ESP_LOGV(TAG, "rc=%d", rc );
rc = cache_flash_mmu_set( 1, 0, irom_load_addr & 0xffff0000, irom_addr & 0xffff0000, 64, irom_page_count );
ESP_LOGV(TAG, "rc=%d", rc );
DPORT_REG_CLR_BIT( DPORT_PRO_CACHE_CTRL1_REG, (DPORT_PRO_CACHE_MASK_IRAM0) | (DPORT_PRO_CACHE_MASK_IRAM1 & 0) | (DPORT_PRO_CACHE_MASK_IROM0 & 0) | DPORT_PRO_CACHE_MASK_DROM0 | DPORT_PRO_CACHE_MASK_DRAM1 );
DPORT_REG_CLR_BIT( DPORT_APP_CACHE_CTRL1_REG, (DPORT_APP_CACHE_MASK_IRAM0) | (DPORT_APP_CACHE_MASK_IRAM1 & 0) | (DPORT_APP_CACHE_MASK_IROM0 & 0) | DPORT_APP_CACHE_MASK_DROM0 | DPORT_APP_CACHE_MASK_DRAM1 );
uint32_t drom_load_addr_aligned = drom_load_addr & MMU_FLASH_MASK;
uint32_t drom_page_count = bootloader_cache_pages_to_map(drom_size, drom_load_addr);
ESP_LOGV(TAG, "d mmu set paddr=%08x vaddr=%08x size=%d n=%d",
drom_addr & MMU_FLASH_MASK, drom_load_addr_aligned, drom_size, drom_page_count);
rc = cache_flash_mmu_set(0, 0, drom_load_addr_aligned, drom_addr & MMU_FLASH_MASK, 64, drom_page_count);
ESP_LOGV(TAG, "rc=%d", rc);
rc = cache_flash_mmu_set(1, 0, drom_load_addr_aligned, drom_addr & MMU_FLASH_MASK, 64, drom_page_count);
ESP_LOGV(TAG, "rc=%d", rc);
uint32_t irom_load_addr_aligned = irom_load_addr & MMU_FLASH_MASK;
uint32_t irom_page_count = bootloader_cache_pages_to_map(irom_size, irom_load_addr);
ESP_LOGV(TAG, "i mmu set paddr=%08x vaddr=%08x size=%d n=%d",
irom_addr & MMU_FLASH_MASK, irom_load_addr_aligned, irom_size, irom_page_count);
rc = cache_flash_mmu_set(0, 0, irom_load_addr_aligned, irom_addr & MMU_FLASH_MASK, 64, irom_page_count);
ESP_LOGV(TAG, "rc=%d", rc);
rc = cache_flash_mmu_set(1, 0, irom_load_addr_aligned, irom_addr & MMU_FLASH_MASK, 64, irom_page_count);
ESP_LOGV(TAG, "rc=%d", rc);
DPORT_REG_CLR_BIT( DPORT_PRO_CACHE_CTRL1_REG,
(DPORT_PRO_CACHE_MASK_IRAM0) | (DPORT_PRO_CACHE_MASK_IRAM1 & 0) |
(DPORT_PRO_CACHE_MASK_IROM0 & 0) | DPORT_PRO_CACHE_MASK_DROM0 |
DPORT_PRO_CACHE_MASK_DRAM1 );
DPORT_REG_CLR_BIT( DPORT_APP_CACHE_CTRL1_REG,
(DPORT_APP_CACHE_MASK_IRAM0) | (DPORT_APP_CACHE_MASK_IRAM1 & 0) |
(DPORT_APP_CACHE_MASK_IROM0 & 0) | DPORT_APP_CACHE_MASK_DROM0 |
DPORT_APP_CACHE_MASK_DRAM1 );
Cache_Read_Enable( 0 );
// Application will need to do Cache_Flush(1) and Cache_Read_Enable(1)

@ -1 +1 @@
Subproject commit 9fbe1eec656dc69942f5163cc6cc79c33d5aad64
Subproject commit 9ad444a6e06e58833d5e6044c1d5f3eb3dd56023