2022-05-30 04:49:19 -04:00
|
|
|
/*
|
|
|
|
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2019-01-08 05:29:25 -05:00
|
|
|
|
|
|
|
#include <stdlib.h>
|
2020-04-29 22:37:35 -04:00
|
|
|
#include <string.h>
|
2019-01-08 05:29:25 -05:00
|
|
|
#include <sys/param.h> // For MIN/MAX
|
|
|
|
#include "spi_flash_chip_generic.h"
|
|
|
|
#include "spi_flash_defs.h"
|
2021-02-24 23:25:38 -05:00
|
|
|
#include "hal/spi_flash_encrypt_hal.h"
|
2019-01-08 05:29:25 -05:00
|
|
|
#include "esp_log.h"
|
2020-04-29 22:37:35 -04:00
|
|
|
#include "esp_attr.h"
|
2022-05-30 04:49:19 -04:00
|
|
|
#include "esp_private/spi_flash_os.h"
|
2020-04-29 22:37:35 -04:00
|
|
|
|
2021-09-01 03:58:15 -04:00
|
|
|
typedef struct flash_chip_dummy {
|
|
|
|
uint8_t dio_dummy_bitlen;
|
|
|
|
uint8_t qio_dummy_bitlen;
|
|
|
|
uint8_t qout_dummy_bitlen;
|
|
|
|
uint8_t dout_dummy_bitlen;
|
|
|
|
uint8_t fastrd_dummy_bitlen;
|
|
|
|
uint8_t slowrd_dummy_bitlen;
|
|
|
|
} flash_chip_dummy_t;
|
|
|
|
|
2020-04-29 22:37:35 -04:00
|
|
|
// These parameters can be placed in the ROM. For now we use the code in IDF.
|
|
|
|
DRAM_ATTR const static flash_chip_dummy_t default_flash_chip_dummy = {
|
|
|
|
.dio_dummy_bitlen = SPI_FLASH_DIO_DUMMY_BITLEN,
|
|
|
|
.qio_dummy_bitlen = SPI_FLASH_QIO_DUMMY_BITLEN,
|
|
|
|
.qout_dummy_bitlen = SPI_FLASH_QOUT_DUMMY_BITLEN,
|
|
|
|
.dout_dummy_bitlen = SPI_FLASH_DOUT_DUMMY_BITLEN,
|
|
|
|
.fastrd_dummy_bitlen = SPI_FLASH_FASTRD_DUMMY_BITLEN,
|
|
|
|
.slowrd_dummy_bitlen = SPI_FLASH_SLOWRD_DUMMY_BITLEN,
|
|
|
|
};
|
|
|
|
|
2022-06-23 03:19:56 -04:00
|
|
|
DRAM_ATTR const static flash_chip_dummy_t hpm_flash_chip_dummy = {
|
|
|
|
.dio_dummy_bitlen = SPI_FLASH_DIO_HPM_DUMMY_BITLEN,
|
|
|
|
.qio_dummy_bitlen = SPI_FLASH_QIO_HPM_DUMMY_BITLEN,
|
|
|
|
.qout_dummy_bitlen = SPI_FLASH_QOUT_DUMMY_BITLEN,
|
|
|
|
.dout_dummy_bitlen = SPI_FLASH_DOUT_DUMMY_BITLEN,
|
|
|
|
.fastrd_dummy_bitlen = SPI_FLASH_FASTRD_DUMMY_BITLEN,
|
|
|
|
.slowrd_dummy_bitlen = SPI_FLASH_SLOWRD_DUMMY_BITLEN,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
DRAM_ATTR flash_chip_dummy_t *rom_flash_chip_dummy = (flash_chip_dummy_t *)&default_flash_chip_dummy;
|
|
|
|
|
|
|
|
DRAM_ATTR flash_chip_dummy_t *rom_flash_chip_dummy_hpm = (flash_chip_dummy_t *)&hpm_flash_chip_dummy;
|
|
|
|
|
2021-02-24 23:25:38 -05:00
|
|
|
// These are the pointer to HW flash encryption. Default using hardware encryption.
|
|
|
|
DRAM_ATTR static spi_flash_encryption_t esp_flash_encryption_default __attribute__((__unused__)) = {
|
|
|
|
.flash_encryption_enable = spi_flash_encryption_hal_enable,
|
|
|
|
.flash_encryption_disable = spi_flash_encryption_hal_disable,
|
|
|
|
.flash_encryption_data_prepare = spi_flash_encryption_hal_prepare,
|
|
|
|
.flash_encryption_done = spi_flash_encryption_hal_done,
|
|
|
|
.flash_encryption_destroy = spi_flash_encryption_hal_destroy,
|
|
|
|
.flash_encryption_check = spi_flash_encryption_hal_check,
|
|
|
|
};
|
|
|
|
|
2020-05-08 05:35:22 -04:00
|
|
|
#define SPI_FLASH_DEFAULT_IDLE_TIMEOUT_MS 200
|
|
|
|
#define SPI_FLASH_GENERIC_CHIP_ERASE_TIMEOUT_MS 4000
|
2020-08-24 04:57:21 -04:00
|
|
|
#define SPI_FLASH_GENERIC_SECTOR_ERASE_TIMEOUT_MS 600 //according to GD25Q127(125°) + 100ms
|
|
|
|
#define SPI_FLASH_GENERIC_BLOCK_ERASE_TIMEOUT_MS 4100 //according to GD25Q127(125°) + 100ms
|
2020-05-08 05:35:22 -04:00
|
|
|
#define SPI_FLASH_GENERIC_PAGE_PROGRAM_TIMEOUT_MS 500
|
|
|
|
|
|
|
|
#define HOST_DELAY_INTERVAL_US 1
|
|
|
|
#define CHIP_WAIT_IDLE_INTERVAL_US 20
|
2019-01-08 05:29:25 -05:00
|
|
|
|
2020-04-29 22:37:35 -04:00
|
|
|
const DRAM_ATTR flash_chip_op_timeout_t spi_flash_chip_generic_timeout = {
|
2021-01-08 05:36:23 -05:00
|
|
|
.idle_timeout = SPI_FLASH_DEFAULT_IDLE_TIMEOUT_MS * 1000,
|
2020-04-29 22:37:35 -04:00
|
|
|
.chip_erase_timeout = SPI_FLASH_GENERIC_CHIP_ERASE_TIMEOUT_MS * 1000,
|
|
|
|
.block_erase_timeout = SPI_FLASH_GENERIC_BLOCK_ERASE_TIMEOUT_MS * 1000,
|
|
|
|
.sector_erase_timeout = SPI_FLASH_GENERIC_SECTOR_ERASE_TIMEOUT_MS * 1000,
|
|
|
|
.page_program_timeout = SPI_FLASH_GENERIC_PAGE_PROGRAM_TIMEOUT_MS * 1000,
|
|
|
|
};
|
2019-01-08 05:29:25 -05:00
|
|
|
|
2022-05-30 04:49:19 -04:00
|
|
|
#define SET_FLASH_ERASE_STATUS(CHIP, status) do { \
|
|
|
|
if (CHIP->os_func->set_flash_op_status) { \
|
|
|
|
CHIP->os_func->set_flash_op_status(status); \
|
|
|
|
} \
|
|
|
|
} while(0)
|
|
|
|
|
2020-12-15 22:50:13 -05:00
|
|
|
static const char TAG[] = "chip_generic";
|
|
|
|
|
2020-11-27 06:09:40 -05:00
|
|
|
#ifndef CONFIG_SPI_FLASH_ROM_IMPL
|
|
|
|
|
2019-01-08 05:29:25 -05:00
|
|
|
esp_err_t spi_flash_chip_generic_probe(esp_flash_t *chip, uint32_t flash_id)
|
|
|
|
{
|
|
|
|
// This is the catch-all probe function, claim the chip always if nothing
|
|
|
|
// else has claimed it yet.
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t spi_flash_chip_generic_reset(esp_flash_t *chip)
|
|
|
|
{
|
|
|
|
//this is written following the winbond spec..
|
|
|
|
spi_flash_trans_t t;
|
|
|
|
t = (spi_flash_trans_t) {
|
|
|
|
.command = CMD_RST_EN,
|
|
|
|
};
|
2020-05-07 02:46:41 -04:00
|
|
|
esp_err_t err = chip->host->driver->common_command(chip->host, &t);
|
2019-01-08 05:29:25 -05:00
|
|
|
if (err != ESP_OK) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
t = (spi_flash_trans_t) {
|
|
|
|
.command = CMD_RST_DEV,
|
|
|
|
};
|
2020-05-07 02:46:41 -04:00
|
|
|
err = chip->host->driver->common_command(chip->host, &t);
|
2019-01-08 05:29:25 -05:00
|
|
|
if (err != ESP_OK) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-04-29 22:37:35 -04:00
|
|
|
err = chip->chip_drv->wait_idle(chip, chip->chip_drv->timeout->idle_timeout);
|
2019-01-08 05:29:25 -05:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t spi_flash_chip_generic_detect_size(esp_flash_t *chip, uint32_t *size)
|
|
|
|
{
|
2019-09-09 12:56:46 -04:00
|
|
|
uint32_t id = chip->chip_id;
|
2019-01-08 05:29:25 -05:00
|
|
|
*size = 0;
|
|
|
|
|
|
|
|
/* Can't detect size unless the high byte of the product ID matches the same convention, which is usually 0x40 or
|
|
|
|
* 0xC0 or similar. */
|
2020-04-29 22:37:35 -04:00
|
|
|
if (((id & 0xFFFF) == 0x0000) || ((id & 0xFFFF) == 0xFFFF)) {
|
2019-01-08 05:29:25 -05:00
|
|
|
return ESP_ERR_FLASH_UNSUPPORTED_CHIP;
|
|
|
|
}
|
|
|
|
|
|
|
|
*size = 1 << (id & 0xFF);
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
esp_err_t spi_flash_chip_generic_erase_chip(esp_flash_t *chip)
|
|
|
|
{
|
|
|
|
esp_err_t err;
|
|
|
|
|
2019-08-02 01:04:48 -04:00
|
|
|
err = chip->chip_drv->set_chip_write_protect(chip, false);
|
2019-01-08 05:29:25 -05:00
|
|
|
if (err == ESP_OK) {
|
2020-04-29 22:37:35 -04:00
|
|
|
err = chip->chip_drv->wait_idle(chip, chip->chip_drv->timeout->idle_timeout);
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
2020-09-18 02:32:37 -04:00
|
|
|
//The chip didn't accept the previous write command. Ignore this in preparation stage.
|
|
|
|
if (err == ESP_OK || err == ESP_ERR_NOT_SUPPORTED) {
|
2022-05-30 04:49:19 -04:00
|
|
|
SET_FLASH_ERASE_STATUS(chip, SPI_FLASH_OS_IS_ERASING_STATUS_FLAG);
|
2020-05-07 02:46:41 -04:00
|
|
|
chip->host->driver->erase_chip(chip->host);
|
2020-09-18 02:32:37 -04:00
|
|
|
chip->busy = 1;
|
2020-08-24 04:57:21 -04:00
|
|
|
#ifdef CONFIG_SPI_FLASH_CHECK_ERASE_TIMEOUT_DISABLED
|
|
|
|
err = chip->chip_drv->wait_idle(chip, ESP_FLASH_CHIP_GENERIC_NO_TIMEOUT);
|
|
|
|
#else
|
2020-04-29 22:37:35 -04:00
|
|
|
err = chip->chip_drv->wait_idle(chip, chip->chip_drv->timeout->chip_erase_timeout);
|
2020-08-24 04:57:21 -04:00
|
|
|
#endif
|
2022-05-30 04:49:19 -04:00
|
|
|
SET_FLASH_ERASE_STATUS(chip, 0);
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
2020-09-18 02:32:37 -04:00
|
|
|
// Ensure WEL is 0, even if the erase failed.
|
|
|
|
if (err == ESP_ERR_NOT_SUPPORTED) {
|
|
|
|
err = chip->chip_drv->set_chip_write_protect(chip, true);
|
|
|
|
}
|
|
|
|
|
2019-01-08 05:29:25 -05:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t spi_flash_chip_generic_erase_sector(esp_flash_t *chip, uint32_t start_address)
|
|
|
|
{
|
2019-08-02 01:04:48 -04:00
|
|
|
esp_err_t err = chip->chip_drv->set_chip_write_protect(chip, false);
|
2019-01-08 05:29:25 -05:00
|
|
|
if (err == ESP_OK) {
|
2020-04-29 22:37:35 -04:00
|
|
|
err = chip->chip_drv->wait_idle(chip, chip->chip_drv->timeout->idle_timeout);
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
2020-09-18 02:32:37 -04:00
|
|
|
//The chip didn't accept the previous write command. Ignore this in preparationstage.
|
|
|
|
if (err == ESP_OK || err == ESP_ERR_NOT_SUPPORTED) {
|
2022-05-30 04:49:19 -04:00
|
|
|
SET_FLASH_ERASE_STATUS(chip, SPI_FLASH_OS_IS_ERASING_STATUS_FLAG);
|
2020-05-07 02:46:41 -04:00
|
|
|
chip->host->driver->erase_sector(chip->host, start_address);
|
2020-09-18 02:32:37 -04:00
|
|
|
chip->busy = 1;
|
2020-08-24 04:57:21 -04:00
|
|
|
#ifdef CONFIG_SPI_FLASH_CHECK_ERASE_TIMEOUT_DISABLED
|
|
|
|
err = chip->chip_drv->wait_idle(chip, ESP_FLASH_CHIP_GENERIC_NO_TIMEOUT);
|
|
|
|
#else
|
2020-04-29 22:37:35 -04:00
|
|
|
err = chip->chip_drv->wait_idle(chip, chip->chip_drv->timeout->sector_erase_timeout);
|
2020-08-24 04:57:21 -04:00
|
|
|
#endif
|
2022-05-30 04:49:19 -04:00
|
|
|
SET_FLASH_ERASE_STATUS(chip, 0);
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
2020-09-18 02:32:37 -04:00
|
|
|
// Ensure WEL is 0, even if the erase failed.
|
|
|
|
if (err == ESP_ERR_NOT_SUPPORTED) {
|
|
|
|
err = chip->chip_drv->set_chip_write_protect(chip, true);
|
|
|
|
}
|
|
|
|
|
2019-01-08 05:29:25 -05:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t spi_flash_chip_generic_erase_block(esp_flash_t *chip, uint32_t start_address)
|
|
|
|
{
|
2019-08-02 01:04:48 -04:00
|
|
|
esp_err_t err = chip->chip_drv->set_chip_write_protect(chip, false);
|
2019-01-08 05:29:25 -05:00
|
|
|
if (err == ESP_OK) {
|
2020-04-29 22:37:35 -04:00
|
|
|
err = chip->chip_drv->wait_idle(chip, chip->chip_drv->timeout->idle_timeout);
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
2020-09-18 02:32:37 -04:00
|
|
|
//The chip didn't accept the previous write command. Ignore this in preparationstage.
|
|
|
|
if (err == ESP_OK || err == ESP_ERR_NOT_SUPPORTED) {
|
2022-05-30 04:49:19 -04:00
|
|
|
SET_FLASH_ERASE_STATUS(chip, SPI_FLASH_OS_IS_ERASING_STATUS_FLAG);
|
2020-05-07 02:46:41 -04:00
|
|
|
chip->host->driver->erase_block(chip->host, start_address);
|
2020-09-18 02:32:37 -04:00
|
|
|
chip->busy = 1;
|
2020-08-24 04:57:21 -04:00
|
|
|
#ifdef CONFIG_SPI_FLASH_CHECK_ERASE_TIMEOUT_DISABLED
|
|
|
|
err = chip->chip_drv->wait_idle(chip, ESP_FLASH_CHIP_GENERIC_NO_TIMEOUT);
|
|
|
|
#else
|
2020-04-29 22:37:35 -04:00
|
|
|
err = chip->chip_drv->wait_idle(chip, chip->chip_drv->timeout->block_erase_timeout);
|
2020-08-24 04:57:21 -04:00
|
|
|
#endif
|
2022-05-30 04:49:19 -04:00
|
|
|
SET_FLASH_ERASE_STATUS(chip, 0);
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
2020-09-18 02:32:37 -04:00
|
|
|
// Ensure WEL is 0, even if the erase failed.
|
|
|
|
if (err == ESP_ERR_NOT_SUPPORTED) {
|
|
|
|
err = chip->chip_drv->set_chip_write_protect(chip, true);
|
|
|
|
}
|
|
|
|
|
2019-01-08 05:29:25 -05:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t spi_flash_chip_generic_read(esp_flash_t *chip, void *buffer, uint32_t address, uint32_t length)
|
|
|
|
{
|
|
|
|
esp_err_t err = ESP_OK;
|
2020-04-29 22:37:35 -04:00
|
|
|
const uint32_t page_size = chip->chip_drv->page_size;
|
|
|
|
uint32_t align_address;
|
|
|
|
uint8_t temp_buffer[64]; //spiflash hal max length of read no longer than 64byte
|
2021-09-01 03:58:15 -04:00
|
|
|
uint32_t config_io_flags = 0;
|
2020-04-29 22:37:35 -04:00
|
|
|
|
2019-01-08 05:29:25 -05:00
|
|
|
// Configure the host, and return
|
2021-09-01 03:58:15 -04:00
|
|
|
err = chip->chip_drv->config_host_io_mode(chip, config_io_flags);
|
2019-11-27 20:20:00 -05:00
|
|
|
|
|
|
|
if (err == ESP_ERR_NOT_SUPPORTED) {
|
|
|
|
ESP_LOGE(TAG, "configure host io mode failed - unsupported");
|
|
|
|
return err;
|
|
|
|
}
|
2019-01-08 05:29:25 -05:00
|
|
|
|
|
|
|
while (err == ESP_OK && length > 0) {
|
2020-04-29 22:37:35 -04:00
|
|
|
memset(temp_buffer, 0xFF, sizeof(temp_buffer));
|
2020-05-07 02:46:41 -04:00
|
|
|
uint32_t read_len = chip->host->driver->read_data_slicer(chip->host, address, length, &align_address, page_size);
|
2020-04-29 22:37:35 -04:00
|
|
|
uint32_t left_off = address - align_address;
|
|
|
|
uint32_t data_len = MIN(align_address + read_len, address + length) - address;
|
2020-05-07 02:46:41 -04:00
|
|
|
err = chip->host->driver->read(chip->host, temp_buffer, align_address, read_len);
|
2019-01-08 05:29:25 -05:00
|
|
|
|
2020-04-29 22:37:35 -04:00
|
|
|
memcpy(buffer, temp_buffer + left_off, data_len);
|
|
|
|
|
|
|
|
address += data_len;
|
|
|
|
buffer = (void *)((intptr_t)buffer + data_len);
|
|
|
|
length = length - data_len;
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t spi_flash_chip_generic_page_program(esp_flash_t *chip, const void *buffer, uint32_t address, uint32_t length)
|
|
|
|
{
|
|
|
|
esp_err_t err;
|
|
|
|
|
2020-04-29 22:37:35 -04:00
|
|
|
err = chip->chip_drv->wait_idle(chip, chip->chip_drv->timeout->idle_timeout);
|
2020-09-18 02:32:37 -04:00
|
|
|
//The chip didn't accept the previous write command. Ignore this in preparationstage.
|
|
|
|
if (err == ESP_OK || err == ESP_ERR_NOT_SUPPORTED) {
|
2019-01-08 05:29:25 -05:00
|
|
|
// Perform the actual Page Program command
|
2020-05-07 02:46:41 -04:00
|
|
|
chip->host->driver->program_page(chip->host, buffer, address, length);
|
2020-09-18 02:32:37 -04:00
|
|
|
chip->busy = 1;
|
2019-01-08 05:29:25 -05:00
|
|
|
|
2020-04-29 22:37:35 -04:00
|
|
|
err = chip->chip_drv->wait_idle(chip, chip->chip_drv->timeout->page_program_timeout);
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
2020-09-18 02:32:37 -04:00
|
|
|
// Ensure WEL is 0, even if the page program failed.
|
|
|
|
if (err == ESP_ERR_NOT_SUPPORTED) {
|
|
|
|
err = chip->chip_drv->set_chip_write_protect(chip, true);
|
|
|
|
}
|
2019-01-08 05:29:25 -05:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t spi_flash_chip_generic_write(esp_flash_t *chip, const void *buffer, uint32_t address, uint32_t length)
|
|
|
|
{
|
|
|
|
esp_err_t err = ESP_OK;
|
|
|
|
const uint32_t page_size = chip->chip_drv->page_size;
|
2020-04-29 22:37:35 -04:00
|
|
|
uint32_t align_address;
|
|
|
|
uint8_t temp_buffer[64]; //spiflash hal max length of write no longer than 64byte
|
2019-01-08 05:29:25 -05:00
|
|
|
|
|
|
|
while (err == ESP_OK && length > 0) {
|
2020-04-29 22:37:35 -04:00
|
|
|
memset(temp_buffer, 0xFF, sizeof(temp_buffer));
|
2020-05-07 02:46:41 -04:00
|
|
|
uint32_t page_len = chip->host->driver->write_data_slicer(chip->host, address, length, &align_address, page_size);
|
2020-04-29 22:37:35 -04:00
|
|
|
uint32_t left_off = address - align_address;
|
|
|
|
uint32_t write_len = MIN(align_address + page_len, address + length) - address;
|
|
|
|
memcpy(temp_buffer + left_off, buffer, write_len);
|
2019-01-08 05:29:25 -05:00
|
|
|
|
2019-08-02 01:04:48 -04:00
|
|
|
err = chip->chip_drv->set_chip_write_protect(chip, false);
|
2020-04-29 22:37:35 -04:00
|
|
|
if (err == ESP_OK && length > 0) {
|
|
|
|
err = chip->chip_drv->program_page(chip, temp_buffer, align_address, page_len);
|
2019-01-08 05:29:25 -05:00
|
|
|
|
2020-04-29 22:37:35 -04:00
|
|
|
address += write_len;
|
|
|
|
buffer = (void *)((intptr_t)buffer + write_len);
|
|
|
|
length -= write_len;
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
|
|
|
}
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
// The caller is responsible to do host->driver->flush_cache, because this function may be
|
|
|
|
// called in small pieces. Frequency call of flush cache will do harm to the performance.
|
2019-01-08 05:29:25 -05:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t spi_flash_chip_generic_write_encrypted(esp_flash_t *chip, const void *buffer, uint32_t address, uint32_t length)
|
|
|
|
{
|
2021-02-24 23:25:38 -05:00
|
|
|
spi_flash_encryption_t *esp_flash_encryption = &esp_flash_encryption_default;
|
|
|
|
esp_err_t err = ESP_OK;
|
|
|
|
// Encryption must happen on main flash.
|
|
|
|
if (chip != esp_flash_default_chip) {
|
|
|
|
return ESP_ERR_NOT_SUPPORTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if the buffer and length can qualify the requirments */
|
|
|
|
if (esp_flash_encryption->flash_encryption_check(address, length) != true) {
|
|
|
|
return ESP_ERR_NOT_SUPPORTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
const uint8_t *data_bytes = (const uint8_t *)buffer;
|
|
|
|
esp_flash_encryption->flash_encryption_enable();
|
|
|
|
while (length > 0) {
|
|
|
|
int block_size;
|
|
|
|
/* Write the largest block if possible */
|
|
|
|
if (address % 64 == 0 && length >= 64) {
|
|
|
|
block_size = 64;
|
|
|
|
} else if (address % 32 == 0 && length >= 32) {
|
|
|
|
block_size = 32;
|
|
|
|
} else {
|
|
|
|
block_size = 16;
|
|
|
|
}
|
|
|
|
// Prepare the flash chip (same time as AES operation, for performance)
|
|
|
|
esp_flash_encryption->flash_encryption_data_prepare(address, (uint32_t *)data_bytes, block_size);
|
|
|
|
err = chip->chip_drv->set_chip_write_protect(chip, false);
|
|
|
|
if (err != ESP_OK) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
// Waiting for encrypting buffer to finish and making result visible for SPI1
|
|
|
|
esp_flash_encryption->flash_encryption_done();
|
|
|
|
|
|
|
|
// Note: For encryption function, after write flash command is sent. The hardware will write the encrypted buffer
|
|
|
|
// prepared in XTS_FLASH_ENCRYPTION register in function `flash_encryption_data_prepare`, instead of the origin
|
|
|
|
// buffer named `data_bytes`.
|
|
|
|
|
|
|
|
err = chip->chip_drv->write(chip, (uint32_t *)data_bytes, address, length);
|
|
|
|
if (err != ESP_OK) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
err = chip->chip_drv->wait_idle(chip, chip->chip_drv->timeout->page_program_timeout);
|
|
|
|
if (err != ESP_OK) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note: we don't wait for idle status here, because this way
|
|
|
|
// the AES peripheral can start encrypting the next
|
|
|
|
// block while the SPI flash chip is busy completing the write
|
|
|
|
|
|
|
|
esp_flash_encryption->flash_encryption_destroy();
|
|
|
|
|
|
|
|
length -= block_size;
|
|
|
|
data_bytes += block_size;
|
|
|
|
address += block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_flash_encryption->flash_encryption_disable();
|
|
|
|
return err;
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
|
|
|
|
2019-08-02 01:04:48 -04:00
|
|
|
esp_err_t spi_flash_chip_generic_set_write_protect(esp_flash_t *chip, bool write_protect)
|
2019-01-08 05:29:25 -05:00
|
|
|
{
|
|
|
|
esp_err_t err = ESP_OK;
|
|
|
|
|
2020-04-29 22:37:35 -04:00
|
|
|
err = chip->chip_drv->wait_idle(chip, chip->chip_drv->timeout->idle_timeout);
|
2020-09-18 02:32:37 -04:00
|
|
|
//The chip didn't accept the previous write command. Ignore this in preparationstage.
|
|
|
|
if (err == ESP_OK || err == ESP_ERR_NOT_SUPPORTED) {
|
2020-05-07 02:46:41 -04:00
|
|
|
chip->host->driver->set_write_protect(chip->host, write_protect);
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
|
|
|
|
2019-08-02 01:04:48 -04:00
|
|
|
bool wp_read;
|
|
|
|
err = chip->chip_drv->get_chip_write_protect(chip, &wp_read);
|
|
|
|
if (err == ESP_OK && wp_read != write_protect) {
|
|
|
|
// WREN flag has not been set!
|
|
|
|
err = ESP_ERR_NOT_FOUND;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t spi_flash_chip_generic_get_write_protect(esp_flash_t *chip, bool *out_write_protect)
|
|
|
|
{
|
|
|
|
esp_err_t err = ESP_OK;
|
2020-09-18 02:32:37 -04:00
|
|
|
uint32_t status;
|
2019-08-02 01:04:48 -04:00
|
|
|
assert(out_write_protect!=NULL);
|
2020-09-18 02:32:37 -04:00
|
|
|
err = chip->chip_drv->read_reg(chip, SPI_FLASH_REG_STATUS, &status);
|
2019-01-08 05:29:25 -05:00
|
|
|
if (err != ESP_OK) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-08-02 01:04:48 -04:00
|
|
|
*out_write_protect = ((status & SR_WREN) == 0);
|
2019-01-08 05:29:25 -05:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-07-26 15:13:07 -04:00
|
|
|
esp_err_t spi_flash_chip_generic_read_reg(esp_flash_t* chip, spi_flash_register_t reg_id, uint32_t* out_reg)
|
|
|
|
{
|
|
|
|
return chip->host->driver->read_status(chip->host, (uint8_t*)out_reg);
|
|
|
|
}
|
|
|
|
|
2020-12-15 22:50:13 -05:00
|
|
|
esp_err_t spi_flash_chip_generic_yield(esp_flash_t* chip, uint32_t wip)
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
{
|
|
|
|
esp_err_t err = ESP_OK;
|
|
|
|
uint32_t flags = wip? 1: 0; //check_yield() and yield() impls should not issue suspend/resume if this flag is zero
|
|
|
|
|
|
|
|
if (chip->os_func->check_yield) {
|
|
|
|
uint32_t request;
|
|
|
|
//According to the implementation, the check_yield() function may block, poll, delay or do nothing but return
|
|
|
|
err = chip->os_func->check_yield(chip->os_func_data, flags, &request);
|
|
|
|
if (err == ESP_OK) {
|
|
|
|
if (err == ESP_OK && (request & SPI_FLASH_YIELD_REQ_YIELD) != 0) {
|
|
|
|
uint32_t status;
|
|
|
|
//According to the implementation, the yield() function may block until something happen
|
|
|
|
err = chip->os_func->yield(chip->os_func_data, &status);
|
|
|
|
}
|
|
|
|
} else if (err == ESP_ERR_TIMEOUT) {
|
|
|
|
err = ESP_OK;
|
|
|
|
} else {
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-05-08 05:35:22 -04:00
|
|
|
esp_err_t spi_flash_chip_generic_wait_idle(esp_flash_t *chip, uint32_t timeout_us)
|
2019-01-08 05:29:25 -05:00
|
|
|
{
|
2020-08-24 04:57:21 -04:00
|
|
|
bool timeout_en = (timeout_us != ESP_FLASH_CHIP_GENERIC_NO_TIMEOUT);
|
|
|
|
if (timeout_us == ESP_FLASH_CHIP_GENERIC_NO_TIMEOUT) {
|
|
|
|
timeout_us = 0;// In order to go into while
|
|
|
|
}
|
2020-05-08 05:35:22 -04:00
|
|
|
timeout_us++; // allow at least one pass before timeout, last one has no sleep cycle
|
2019-01-08 05:29:25 -05:00
|
|
|
|
|
|
|
uint8_t status = 0;
|
2020-05-08 05:35:22 -04:00
|
|
|
const int interval = CHIP_WAIT_IDLE_INTERVAL_US;
|
|
|
|
while (timeout_us > 0) {
|
2020-12-17 23:57:55 -05:00
|
|
|
while (!chip->host->driver->host_status(chip->host) && timeout_us > 0) {
|
2019-01-08 05:29:25 -05:00
|
|
|
|
2020-12-17 23:57:55 -05:00
|
|
|
#if HOST_DELAY_INTERVAL_US > 0
|
|
|
|
if (timeout_us > 1) {
|
|
|
|
int delay = MIN(HOST_DELAY_INTERVAL_US, timeout_us);
|
|
|
|
chip->os_func->delay_us(chip->os_func_data, delay);
|
|
|
|
timeout_us -= delay;
|
|
|
|
}
|
|
|
|
#endif
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
|
|
|
|
2020-07-26 15:13:07 -04:00
|
|
|
uint32_t read;
|
2020-12-17 23:57:55 -05:00
|
|
|
esp_err_t err = chip->chip_drv->read_reg(chip, SPI_FLASH_REG_STATUS, &read);
|
2019-01-08 05:29:25 -05:00
|
|
|
if (err != ESP_OK) {
|
|
|
|
return err;
|
|
|
|
}
|
2020-07-26 15:13:07 -04:00
|
|
|
status = read;
|
|
|
|
|
2020-09-18 02:32:37 -04:00
|
|
|
if ((status & SR_WIP) == 0) { // Verify write in progress is complete
|
|
|
|
if (chip->busy == 1) {
|
|
|
|
chip->busy = 0;
|
|
|
|
if ((status & SR_WREN) != 0) { // The previous command is not accepted, leaving the WEL still set.
|
|
|
|
return ESP_ERR_NOT_SUPPORTED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
2020-05-08 05:35:22 -04:00
|
|
|
if (timeout_us > 0 && interval > 0) {
|
|
|
|
int delay = MIN(interval, timeout_us);
|
|
|
|
chip->os_func->delay_us(chip->os_func_data, delay);
|
2020-08-24 04:57:21 -04:00
|
|
|
if (timeout_en) {
|
|
|
|
timeout_us -= delay;
|
|
|
|
}
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
|
|
|
}
|
2020-05-08 05:35:22 -04:00
|
|
|
return (timeout_us > 0) ? ESP_OK : ESP_ERR_TIMEOUT;
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
|
|
|
|
2021-09-01 03:58:15 -04:00
|
|
|
esp_err_t spi_flash_chip_generic_config_host_io_mode(esp_flash_t *chip, uint32_t flags)
|
2019-01-08 05:29:25 -05:00
|
|
|
{
|
|
|
|
uint32_t dummy_cyclelen_base;
|
|
|
|
uint32_t addr_bitlen;
|
|
|
|
uint32_t read_command;
|
2020-07-26 15:13:07 -04:00
|
|
|
bool conf_required = false;
|
|
|
|
esp_flash_io_mode_t read_mode = chip->read_mode;
|
2021-09-01 03:58:15 -04:00
|
|
|
bool addr_32bit = (flags & SPI_FLASH_CONFIG_IO_MODE_32B_ADDR);
|
2019-01-08 05:29:25 -05:00
|
|
|
|
2020-07-26 15:13:07 -04:00
|
|
|
switch (read_mode & 0xFFFF) {
|
2019-01-08 05:29:25 -05:00
|
|
|
case SPI_FLASH_QIO:
|
|
|
|
//for QIO mode, the 4 bit right after the address are used for continuous mode, should be set to 0 to avoid that.
|
2020-01-17 02:14:13 -05:00
|
|
|
addr_bitlen = SPI_FLASH_QIO_ADDR_BITLEN;
|
2022-06-23 03:19:56 -04:00
|
|
|
dummy_cyclelen_base = (chip->hpm_dummy_ena ? rom_flash_chip_dummy_hpm->qio_dummy_bitlen : rom_flash_chip_dummy->qio_dummy_bitlen);
|
2020-07-26 15:13:07 -04:00
|
|
|
read_command = (addr_32bit? CMD_FASTRD_QIO_4B: CMD_FASTRD_QIO);
|
|
|
|
conf_required = true;
|
2019-01-08 05:29:25 -05:00
|
|
|
break;
|
|
|
|
case SPI_FLASH_QOUT:
|
2020-01-17 02:14:13 -05:00
|
|
|
addr_bitlen = SPI_FLASH_QOUT_ADDR_BITLEN;
|
2022-06-23 03:19:56 -04:00
|
|
|
dummy_cyclelen_base = (chip->hpm_dummy_ena ? rom_flash_chip_dummy_hpm->qout_dummy_bitlen : rom_flash_chip_dummy->qout_dummy_bitlen);
|
2020-07-26 15:13:07 -04:00
|
|
|
read_command = (addr_32bit? CMD_FASTRD_QUAD_4B: CMD_FASTRD_QUAD);
|
2019-01-08 05:29:25 -05:00
|
|
|
break;
|
|
|
|
case SPI_FLASH_DIO:
|
|
|
|
//for DIO mode, the 4 bit right after the address are used for continuous mode, should be set to 0 to avoid that.
|
2020-01-17 02:14:13 -05:00
|
|
|
addr_bitlen = SPI_FLASH_DIO_ADDR_BITLEN;
|
2022-06-23 03:19:56 -04:00
|
|
|
dummy_cyclelen_base = (chip->hpm_dummy_ena ? rom_flash_chip_dummy_hpm->dio_dummy_bitlen : rom_flash_chip_dummy->dio_dummy_bitlen);
|
2020-07-26 15:13:07 -04:00
|
|
|
read_command = (addr_32bit? CMD_FASTRD_DIO_4B: CMD_FASTRD_DIO);
|
|
|
|
conf_required = true;
|
2019-01-08 05:29:25 -05:00
|
|
|
break;
|
|
|
|
case SPI_FLASH_DOUT:
|
2020-01-17 02:14:13 -05:00
|
|
|
addr_bitlen = SPI_FLASH_DOUT_ADDR_BITLEN;
|
2022-06-23 03:19:56 -04:00
|
|
|
dummy_cyclelen_base = (chip->hpm_dummy_ena ? rom_flash_chip_dummy_hpm->dout_dummy_bitlen : rom_flash_chip_dummy->dout_dummy_bitlen);
|
2020-07-26 15:13:07 -04:00
|
|
|
read_command = (addr_32bit? CMD_FASTRD_DUAL_4B: CMD_FASTRD_DUAL);
|
2019-01-08 05:29:25 -05:00
|
|
|
break;
|
|
|
|
case SPI_FLASH_FASTRD:
|
2020-01-17 02:14:13 -05:00
|
|
|
addr_bitlen = SPI_FLASH_FASTRD_ADDR_BITLEN;
|
2022-06-23 03:19:56 -04:00
|
|
|
dummy_cyclelen_base = (chip->hpm_dummy_ena ? rom_flash_chip_dummy_hpm->fastrd_dummy_bitlen : rom_flash_chip_dummy->fastrd_dummy_bitlen);
|
2020-07-26 15:13:07 -04:00
|
|
|
read_command = (addr_32bit? CMD_FASTRD_4B: CMD_FASTRD);
|
2019-01-08 05:29:25 -05:00
|
|
|
break;
|
|
|
|
case SPI_FLASH_SLOWRD:
|
2020-01-17 02:14:13 -05:00
|
|
|
addr_bitlen = SPI_FLASH_SLOWRD_ADDR_BITLEN;
|
2022-06-23 03:19:56 -04:00
|
|
|
dummy_cyclelen_base = (chip->hpm_dummy_ena ? rom_flash_chip_dummy_hpm->slowrd_dummy_bitlen : rom_flash_chip_dummy->slowrd_dummy_bitlen);
|
2020-07-26 15:13:07 -04:00
|
|
|
read_command = (addr_32bit? CMD_READ_4B: CMD_READ);
|
2019-01-08 05:29:25 -05:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return ESP_ERR_FLASH_NOT_INITIALISED;
|
|
|
|
}
|
2020-07-26 15:13:07 -04:00
|
|
|
//For W25Q256 chip, the only difference between 4-Byte address command and 3-Byte version is the command value and the address bit length.
|
|
|
|
if (addr_32bit) {
|
|
|
|
addr_bitlen += 8;
|
|
|
|
}
|
2019-01-08 05:29:25 -05:00
|
|
|
|
2020-07-26 15:13:07 -04:00
|
|
|
if (conf_required) {
|
|
|
|
read_mode |= SPI_FLASH_CONFIG_CONF_BITS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return chip->host->driver->configure_host_io_mode(chip->host, read_command, addr_bitlen, dummy_cyclelen_base, read_mode);
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
|
|
|
|
2019-09-05 01:11:36 -04:00
|
|
|
esp_err_t spi_flash_chip_generic_get_io_mode(esp_flash_t *chip, esp_flash_io_mode_t* out_io_mode)
|
2019-01-08 05:29:25 -05:00
|
|
|
{
|
2019-09-05 01:11:36 -04:00
|
|
|
// On "generic" chips, this involves checking
|
|
|
|
// bit 1 (QE) of RDSR2 (35h) result
|
|
|
|
// (it works this way on GigaDevice & Fudan Micro chips, probably others...)
|
|
|
|
const uint8_t BIT_QE = 1 << 1;
|
|
|
|
uint32_t sr;
|
|
|
|
esp_err_t ret = spi_flash_common_read_status_8b_rdsr2(chip, &sr);
|
|
|
|
if (ret == ESP_OK) {
|
|
|
|
*out_io_mode = ((sr & BIT_QE)? SPI_FLASH_QOUT: 0);
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
2019-09-05 01:11:36 -04:00
|
|
|
return ret;
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
|
|
|
|
2019-09-05 01:11:36 -04:00
|
|
|
esp_err_t spi_flash_chip_generic_set_io_mode(esp_flash_t *chip)
|
2019-01-08 05:29:25 -05:00
|
|
|
{
|
|
|
|
// On "generic" chips, this involves checking
|
2019-09-05 01:11:36 -04:00
|
|
|
// bit 9 (QE) of RDSR (05h) result
|
|
|
|
const uint32_t BIT_QE = 1 << 9;
|
|
|
|
return spi_flash_common_set_io_mode(chip,
|
|
|
|
spi_flash_common_write_status_16b_wrsr,
|
|
|
|
spi_flash_common_read_status_16b_rdsr_rdsr2,
|
|
|
|
BIT_QE);
|
2019-01-08 05:29:25 -05:00
|
|
|
}
|
2020-12-15 22:50:13 -05:00
|
|
|
#endif // CONFIG_SPI_FLASH_ROM_IMPL
|
2019-01-08 05:29:25 -05:00
|
|
|
|
2020-11-27 06:09:40 -05:00
|
|
|
esp_err_t spi_flash_chip_generic_read_unique_id(esp_flash_t *chip, uint64_t* flash_unique_id)
|
|
|
|
{
|
|
|
|
uint64_t unique_id_buf = 0;
|
|
|
|
spi_flash_trans_t transfer = {
|
|
|
|
.command = CMD_RDUID,
|
|
|
|
.miso_len = 8,
|
|
|
|
.miso_data = ((uint8_t *)&unique_id_buf),
|
|
|
|
.dummy_bitlen = 32, //RDUID command followed by 4 bytes (32 bits) of dummy clocks.
|
|
|
|
};
|
|
|
|
esp_err_t err = chip->host->driver->common_command(chip->host, &transfer);
|
|
|
|
|
|
|
|
if (unique_id_buf == 0 || unique_id_buf == UINT64_MAX) {
|
|
|
|
ESP_EARLY_LOGE(TAG, "No response from device when trying to retrieve Unique ID\n");
|
|
|
|
*flash_unique_id = unique_id_buf;
|
|
|
|
return ESP_ERR_NOT_SUPPORTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
*flash_unique_id = __builtin_bswap64(unique_id_buf);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-09-01 03:58:15 -04:00
|
|
|
esp_err_t spi_flash_chip_generic_read_unique_id_none(esp_flash_t *chip, uint64_t* flash_unique_id)
|
|
|
|
{
|
|
|
|
// For flash doesn't support read unique id.
|
|
|
|
return ESP_ERR_NOT_SUPPORTED;
|
|
|
|
}
|
|
|
|
|
2021-05-18 00:05:41 -04:00
|
|
|
spi_flash_caps_t spi_flash_chip_generic_get_caps(esp_flash_t *chip)
|
|
|
|
{
|
|
|
|
// For generic part flash capability, take the XMC chip as reference.
|
|
|
|
spi_flash_caps_t caps_flags = 0;
|
|
|
|
// 32M-bits address support
|
|
|
|
|
|
|
|
// flash suspend support
|
|
|
|
// Only `XMC` support suspend for now.
|
|
|
|
if (chip->chip_id >> 16 == 0x20) {
|
|
|
|
caps_flags |= SPI_FLASH_CHIP_CAP_SUSPEND;
|
|
|
|
}
|
|
|
|
// flash read unique id.
|
|
|
|
caps_flags |= SPI_FLASH_CHIP_CAP_UNIQUE_ID;
|
|
|
|
return caps_flags;
|
|
|
|
}
|
|
|
|
|
2019-01-08 05:29:25 -05:00
|
|
|
static const char chip_name[] = "generic";
|
|
|
|
|
|
|
|
const spi_flash_chip_t esp_flash_chip_generic = {
|
|
|
|
.name = chip_name,
|
2020-04-29 22:37:35 -04:00
|
|
|
.timeout = &spi_flash_chip_generic_timeout,
|
2019-01-08 05:29:25 -05:00
|
|
|
.probe = spi_flash_chip_generic_probe,
|
|
|
|
.reset = spi_flash_chip_generic_reset,
|
|
|
|
.detect_size = spi_flash_chip_generic_detect_size,
|
|
|
|
.erase_chip = spi_flash_chip_generic_erase_chip,
|
|
|
|
.erase_sector = spi_flash_chip_generic_erase_sector,
|
|
|
|
.erase_block = spi_flash_chip_generic_erase_block,
|
|
|
|
.sector_size = 4 * 1024,
|
|
|
|
.block_erase_size = 64 * 1024,
|
|
|
|
|
|
|
|
// TODO: figure out if generic chip-wide protection bits exist across some manufacturers
|
2019-08-02 01:04:48 -04:00
|
|
|
.get_chip_write_protect = spi_flash_chip_generic_get_write_protect,
|
|
|
|
.set_chip_write_protect = spi_flash_chip_generic_set_write_protect,
|
2019-01-08 05:29:25 -05:00
|
|
|
|
|
|
|
// Chip write protection regions do not appear to be standardised
|
|
|
|
// at all, this is implemented in chip-specific drivers only.
|
|
|
|
.num_protectable_regions = 0,
|
|
|
|
.protectable_regions = NULL,
|
|
|
|
.get_protected_regions = NULL,
|
|
|
|
.set_protected_regions = NULL,
|
|
|
|
|
|
|
|
.read = spi_flash_chip_generic_read,
|
|
|
|
.write = spi_flash_chip_generic_write,
|
|
|
|
.program_page = spi_flash_chip_generic_page_program,
|
|
|
|
.page_size = 256,
|
|
|
|
.write_encrypted = spi_flash_chip_generic_write_encrypted,
|
|
|
|
|
|
|
|
.wait_idle = spi_flash_chip_generic_wait_idle,
|
2019-09-05 01:11:36 -04:00
|
|
|
.set_io_mode = spi_flash_chip_generic_set_io_mode,
|
|
|
|
.get_io_mode = spi_flash_chip_generic_get_io_mode,
|
2020-07-26 15:13:07 -04:00
|
|
|
|
|
|
|
.read_reg = spi_flash_chip_generic_read_reg,
|
2020-12-15 22:50:13 -05:00
|
|
|
.yield = spi_flash_chip_generic_yield,
|
2020-12-17 23:57:55 -05:00
|
|
|
.sus_setup = spi_flash_chip_generic_suspend_cmd_conf,
|
2020-11-27 06:09:40 -05:00
|
|
|
.read_unique_id = spi_flash_chip_generic_read_unique_id,
|
2021-05-18 00:05:41 -04:00
|
|
|
.get_chip_caps = spi_flash_chip_generic_get_caps,
|
2021-09-01 03:58:15 -04:00
|
|
|
.config_host_io_mode = spi_flash_chip_generic_config_host_io_mode,
|
2019-01-08 05:29:25 -05:00
|
|
|
};
|
2019-09-05 01:11:36 -04:00
|
|
|
|
2020-12-15 22:50:13 -05:00
|
|
|
#ifndef CONFIG_SPI_FLASH_ROM_IMPL
|
2019-09-05 01:11:36 -04:00
|
|
|
/*******************************************************************************
|
|
|
|
* Utility functions
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
static esp_err_t spi_flash_common_read_qe_sr(esp_flash_t *chip, uint8_t qe_rdsr_command, uint8_t qe_sr_bitwidth, uint32_t *sr)
|
|
|
|
{
|
2019-11-27 20:20:00 -05:00
|
|
|
uint32_t sr_buf = 0;
|
2019-09-05 01:11:36 -04:00
|
|
|
spi_flash_trans_t t = {
|
|
|
|
.command = qe_rdsr_command,
|
2019-11-27 20:20:00 -05:00
|
|
|
.miso_data = (uint8_t*) &sr_buf,
|
|
|
|
.miso_len = qe_sr_bitwidth / 8,
|
2019-09-05 01:11:36 -04:00
|
|
|
};
|
2020-05-07 02:46:41 -04:00
|
|
|
esp_err_t ret = chip->host->driver->common_command(chip->host, &t);
|
2019-11-27 20:20:00 -05:00
|
|
|
*sr = sr_buf;
|
2019-09-05 01:11:36 -04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static esp_err_t spi_flash_common_write_qe_sr(esp_flash_t *chip, uint8_t qe_wrsr_command, uint8_t qe_sr_bitwidth, uint32_t qe)
|
|
|
|
{
|
|
|
|
spi_flash_trans_t t = {
|
|
|
|
.command = qe_wrsr_command,
|
2019-11-27 20:20:00 -05:00
|
|
|
.mosi_data = ((uint8_t*) &qe),
|
|
|
|
.mosi_len = qe_sr_bitwidth / 8,
|
2019-09-05 01:11:36 -04:00
|
|
|
.miso_len = 0,
|
|
|
|
};
|
2020-05-07 02:46:41 -04:00
|
|
|
return chip->host->driver->common_command(chip->host, &t);
|
2019-09-05 01:11:36 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t spi_flash_common_read_status_16b_rdsr_rdsr2(esp_flash_t* chip, uint32_t* out_sr)
|
|
|
|
{
|
|
|
|
uint32_t sr, sr2;
|
|
|
|
esp_err_t ret = spi_flash_common_read_qe_sr(chip, CMD_RDSR2, 8, &sr2);
|
|
|
|
if (ret == ESP_OK) {
|
|
|
|
ret = spi_flash_common_read_qe_sr(chip, CMD_RDSR, 8, &sr);
|
|
|
|
}
|
|
|
|
if (ret == ESP_OK) {
|
|
|
|
*out_sr = (sr & 0xff) | ((sr2 & 0xff) << 8);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t spi_flash_common_read_status_8b_rdsr2(esp_flash_t* chip, uint32_t* out_sr)
|
|
|
|
{
|
|
|
|
return spi_flash_common_read_qe_sr(chip, CMD_RDSR2, 8, out_sr);
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t spi_flash_common_read_status_8b_rdsr(esp_flash_t* chip, uint32_t* out_sr)
|
|
|
|
{
|
|
|
|
return spi_flash_common_read_qe_sr(chip, CMD_RDSR, 8, out_sr);
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t spi_flash_common_write_status_16b_wrsr(esp_flash_t* chip, uint32_t sr)
|
|
|
|
{
|
|
|
|
return spi_flash_common_write_qe_sr(chip, CMD_WRSR, 16, sr);
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t spi_flash_common_write_status_8b_wrsr(esp_flash_t* chip, uint32_t sr)
|
|
|
|
{
|
|
|
|
return spi_flash_common_write_qe_sr(chip, CMD_WRSR, 8, sr);
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t spi_flash_common_write_status_8b_wrsr2(esp_flash_t* chip, uint32_t sr)
|
|
|
|
{
|
|
|
|
return spi_flash_common_write_qe_sr(chip, CMD_WRSR2, 8, sr);
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t spi_flash_common_set_io_mode(esp_flash_t *chip, esp_flash_wrsr_func_t wrsr_func, esp_flash_rdsr_func_t rdsr_func, uint32_t qe_sr_bit)
|
|
|
|
{
|
|
|
|
esp_err_t ret = ESP_OK;
|
|
|
|
const bool is_quad_mode = esp_flash_is_quad_mode(chip);
|
|
|
|
bool update_config = false;
|
2020-04-02 05:30:08 -04:00
|
|
|
/*
|
|
|
|
* By default, we don't clear the QE bit even the flash mode is not QIO or QOUT. Force clearing
|
|
|
|
* QE bit by the generic chip driver (command 01H with 2 bytes) may cause the output of some
|
|
|
|
* chips (MXIC) no longer valid.
|
|
|
|
* Enable this option when testing a new flash chip for clearing of QE.
|
|
|
|
*/
|
|
|
|
const bool force_check = false;
|
|
|
|
|
|
|
|
bool need_check = is_quad_mode || force_check;
|
2019-09-05 01:11:36 -04:00
|
|
|
|
|
|
|
uint32_t sr_update;
|
|
|
|
if (need_check) {
|
|
|
|
// Ensure quad modes are enabled, using the Quad Enable parameters supplied.
|
|
|
|
uint32_t sr;
|
|
|
|
ret = (*rdsr_func)(chip, &sr);
|
|
|
|
if (ret != ESP_OK) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
ESP_EARLY_LOGD(TAG, "set_io_mode: status before 0x%x", sr);
|
|
|
|
if (is_quad_mode) {
|
|
|
|
sr_update = sr | qe_sr_bit;
|
|
|
|
} else {
|
|
|
|
sr_update = sr & (~qe_sr_bit);
|
|
|
|
}
|
|
|
|
ESP_EARLY_LOGV(TAG, "set_io_mode: status update 0x%x", sr_update);
|
|
|
|
if (sr != sr_update) {
|
|
|
|
update_config = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (update_config) {
|
|
|
|
//some chips needs the write protect to be disabled before writing to Status Register
|
|
|
|
chip->chip_drv->set_chip_write_protect(chip, false);
|
|
|
|
|
|
|
|
ret = (*wrsr_func)(chip, sr_update);
|
|
|
|
if (ret != ESP_OK) {
|
2020-09-18 02:32:37 -04:00
|
|
|
chip->chip_drv->set_chip_write_protect(chip, true);
|
2019-09-05 01:11:36 -04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-04-29 22:37:35 -04:00
|
|
|
ret = chip->chip_drv->wait_idle(chip, chip->chip_drv->timeout->idle_timeout);
|
2020-09-18 02:32:37 -04:00
|
|
|
if (ret == ESP_ERR_NOT_SUPPORTED) {
|
|
|
|
chip->chip_drv->set_chip_write_protect(chip, true);
|
|
|
|
}
|
|
|
|
/* This function is the fallback approach, so we give it higher tolerance.
|
|
|
|
* When the previous WRSR is rejected by the flash,
|
|
|
|
* the result of this function is determined by the result -whether the value of RDSR meets the expectation.
|
|
|
|
*/
|
|
|
|
if (ret != ESP_OK && ret != ESP_ERR_NOT_SUPPORTED) {
|
2019-09-05 01:11:36 -04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check the new QE bit has stayed set */
|
|
|
|
uint32_t sr;
|
|
|
|
ret = (*rdsr_func)(chip, &sr);
|
|
|
|
if (ret != ESP_OK) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
ESP_EARLY_LOGD(TAG, "set_io_mode: status after 0x%x", sr);
|
|
|
|
if (sr != sr_update) {
|
|
|
|
ret = ESP_ERR_FLASH_NO_RESPONSE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
esp_flash: refactor to support various type of yield
There is a periodically yield in the esp_flash driver, to ensure the
cache will not be disabled for too long on ESP32.
On ESP32-S2 and later, we need to support more different kind of yield:
1. polling conditions, including timeout, SW read request, etc.
2. wait for events, including HW done/error/auto-suspend, timeout
semaphore, etc.
The check_yield() and yield() is separated into two parts, because we
may need to insert suspend, etc. between them.
2020-09-11 06:20:08 -04:00
|
|
|
}
|
2020-12-15 22:50:13 -05:00
|
|
|
|
|
|
|
#endif // !CONFIG_SPI_FLASH_ROM_IMPL
|
2020-12-17 23:57:55 -05:00
|
|
|
|
|
|
|
esp_err_t spi_flash_chip_generic_suspend_cmd_conf(esp_flash_t *chip)
|
|
|
|
{
|
2021-03-19 05:39:56 -04:00
|
|
|
// Only XMC support auto-suspend
|
|
|
|
if (chip->chip_id >> 16 != 0x20) {
|
|
|
|
ESP_EARLY_LOGE(TAG, "The flash you use doesn't support auto suspend, only \'XMC\' is supported");
|
|
|
|
return ESP_ERR_NOT_SUPPORTED;
|
|
|
|
}
|
2020-12-17 23:57:55 -05:00
|
|
|
spi_flash_sus_cmd_conf sus_conf = {
|
2021-01-23 11:48:07 -05:00
|
|
|
.sus_mask = 0x80,
|
2020-12-17 23:57:55 -05:00
|
|
|
.cmd_rdsr = CMD_RDSR2,
|
|
|
|
.sus_cmd = CMD_SUSPEND,
|
|
|
|
.res_cmd = CMD_RESUME,
|
|
|
|
};
|
|
|
|
|
|
|
|
return chip->host->driver->sus_setup(chip->host, &sus_conf);
|
|
|
|
}
|