mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
fatfs: enabled reading sector size and sectors count from boot sector in rawflash
This commit is contained in:
parent
f33ac0a037
commit
41742c2369
@ -1,16 +1,8 @@
|
||||
// Copyright 2015-2018 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
#include "diskio_impl.h"
|
||||
@ -22,11 +14,48 @@
|
||||
|
||||
static const char* TAG = "diskio_rawflash";
|
||||
|
||||
const esp_partition_t* ff_raw_handles[FF_VOLUMES];
|
||||
static const esp_partition_t* s_ff_raw_handles[FF_VOLUMES];
|
||||
// Determine the sector size and sector count by parsing the boot sector
|
||||
static size_t s_sector_size[FF_VOLUMES];
|
||||
static size_t s_sectors_count[FF_VOLUMES];
|
||||
|
||||
#define BPB_BytsPerSec 11
|
||||
#define BPB_TotSec16 19
|
||||
#define BPB_TotSec32 32
|
||||
|
||||
|
||||
DSTATUS ff_raw_initialize (BYTE pdrv)
|
||||
{
|
||||
|
||||
uint16_t sector_size_tmp;
|
||||
uint16_t sectors_count_tmp_16;
|
||||
uint32_t sectors_count_tmp_32;
|
||||
|
||||
const esp_partition_t* part = s_ff_raw_handles[pdrv];
|
||||
assert(part);
|
||||
esp_err_t err = esp_partition_read(part, BPB_BytsPerSec, §or_size_tmp, sizeof(sector_size_tmp));
|
||||
if (unlikely(err != ESP_OK)) {
|
||||
ESP_LOGE(TAG, "esp_partition_read failed (0x%x)", err);
|
||||
return RES_ERROR;
|
||||
}
|
||||
s_sector_size[pdrv] = sector_size_tmp;
|
||||
|
||||
err = esp_partition_read(part, BPB_TotSec16, §ors_count_tmp_16, sizeof(sectors_count_tmp_16));
|
||||
if (unlikely(err != ESP_OK)) {
|
||||
ESP_LOGE(TAG, "esp_partition_read failed (0x%x)", err);
|
||||
return RES_ERROR;
|
||||
}
|
||||
s_sectors_count[pdrv] = sectors_count_tmp_16;
|
||||
// For FAT32, the number of sectors is stored in a different field
|
||||
if (sectors_count_tmp_16 == 0){
|
||||
err = esp_partition_read(part, BPB_TotSec32, §ors_count_tmp_32, sizeof(sectors_count_tmp_32));
|
||||
if (unlikely(err != ESP_OK)) {
|
||||
ESP_LOGE(TAG, "esp_partition_read failed (0x%x)", err);
|
||||
return RES_ERROR;
|
||||
}
|
||||
s_sectors_count[pdrv] = sectors_count_tmp_32;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -38,9 +67,9 @@ DSTATUS ff_raw_status (BYTE pdrv)
|
||||
DRESULT ff_raw_read (BYTE pdrv, BYTE *buff, DWORD sector, UINT count)
|
||||
{
|
||||
ESP_LOGV(TAG, "ff_raw_read - pdrv=%i, sector=%i, count=%in", (unsigned int)pdrv, (unsigned int)sector, (unsigned int)count);
|
||||
const esp_partition_t* part = ff_raw_handles[pdrv];
|
||||
const esp_partition_t* part = s_ff_raw_handles[pdrv];
|
||||
assert(part);
|
||||
esp_err_t err = esp_partition_read(part, sector * SPI_FLASH_SEC_SIZE, buff, count * SPI_FLASH_SEC_SIZE);
|
||||
esp_err_t err = esp_partition_read(part, sector * s_sector_size[pdrv], buff, count * s_sector_size[pdrv]);
|
||||
if (unlikely(err != ESP_OK)) {
|
||||
ESP_LOGE(TAG, "esp_partition_read failed (0x%x)", err);
|
||||
return RES_ERROR;
|
||||
@ -56,17 +85,17 @@ DRESULT ff_raw_write (BYTE pdrv, const BYTE *buff, DWORD sector, UINT count)
|
||||
|
||||
DRESULT ff_raw_ioctl (BYTE pdrv, BYTE cmd, void *buff)
|
||||
{
|
||||
const esp_partition_t* part = ff_raw_handles[pdrv];
|
||||
const esp_partition_t* part = s_ff_raw_handles[pdrv];
|
||||
ESP_LOGV(TAG, "ff_raw_ioctl: cmd=%in", cmd);
|
||||
assert(part);
|
||||
switch (cmd) {
|
||||
case CTRL_SYNC:
|
||||
return RES_OK;
|
||||
case GET_SECTOR_COUNT:
|
||||
*((DWORD *) buff) = part->size / SPI_FLASH_SEC_SIZE;
|
||||
*((DWORD *) buff) = s_sectors_count[pdrv];
|
||||
return RES_OK;
|
||||
case GET_SECTOR_SIZE:
|
||||
*((WORD *) buff) = SPI_FLASH_SEC_SIZE;
|
||||
*((WORD *) buff) = s_sector_size[pdrv];
|
||||
return RES_OK;
|
||||
case GET_BLOCK_SIZE:
|
||||
return RES_ERROR;
|
||||
@ -88,7 +117,7 @@ esp_err_t ff_diskio_register_raw_partition(BYTE pdrv, const esp_partition_t* par
|
||||
.ioctl = &ff_raw_ioctl
|
||||
};
|
||||
ff_diskio_register(pdrv, &raw_impl);
|
||||
ff_raw_handles[pdrv] = part_handle;
|
||||
s_ff_raw_handles[pdrv] = part_handle;
|
||||
return ESP_OK;
|
||||
|
||||
}
|
||||
@ -97,7 +126,7 @@ esp_err_t ff_diskio_register_raw_partition(BYTE pdrv, const esp_partition_t* par
|
||||
BYTE ff_diskio_get_pdrv_raw(const esp_partition_t* part_handle)
|
||||
{
|
||||
for (int i = 0; i < FF_VOLUMES; i++) {
|
||||
if (part_handle == ff_raw_handles[i]) {
|
||||
if (part_handle == s_ff_raw_handles[i]) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
@ -48,8 +48,9 @@ LONG_NAMES_ENCODING: str = 'utf-16'
|
||||
SHORT_NAMES_ENCODING: str = 'utf-8'
|
||||
|
||||
# compatible with WL_SECTOR_SIZE
|
||||
# choices are WL_SECTOR_SIZE_512 and WL_SECTOR_SIZE_4096
|
||||
ALLOWED_SECTOR_SIZES: List[int] = [512, 4096]
|
||||
# choices for WL are WL_SECTOR_SIZE_512 and WL_SECTOR_SIZE_4096
|
||||
ALLOWED_WL_SECTOR_SIZES: List[int] = [512, 4096]
|
||||
ALLOWED_SECTOR_SIZES: List[int] = [512, 1024, 2048, 4096]
|
||||
|
||||
ALLOWED_SECTORS_PER_CLUSTER: List[int] = [1, 2, 4, 8, 16, 32, 64, 128]
|
||||
|
||||
@ -164,7 +165,7 @@ def split_content_into_sectors(content: bytes, sector_size: int) -> List[bytes]:
|
||||
return result
|
||||
|
||||
|
||||
def get_args_for_partition_generator(desc: str) -> argparse.Namespace:
|
||||
def get_args_for_partition_generator(desc: str, wl: bool) -> argparse.Namespace:
|
||||
parser: argparse.ArgumentParser = argparse.ArgumentParser(description=desc)
|
||||
parser.add_argument('input_directory',
|
||||
help='Path to the directory that will be encoded into fatfs image')
|
||||
@ -177,7 +178,7 @@ def get_args_for_partition_generator(desc: str) -> argparse.Namespace:
|
||||
parser.add_argument('--sector_size',
|
||||
default=FATDefaults.SECTOR_SIZE,
|
||||
type=int,
|
||||
choices=ALLOWED_SECTOR_SIZES,
|
||||
choices=ALLOWED_WL_SECTOR_SIZES if wl else ALLOWED_SECTOR_SIZES,
|
||||
help='Size of the partition in bytes')
|
||||
parser.add_argument('--sectors_per_cluster',
|
||||
default=1,
|
||||
|
@ -181,9 +181,7 @@ class FATFS:
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = get_args_for_partition_generator('Create a FAT filesystem and populate it with directory content')
|
||||
if args.sector_size != 0x1000:
|
||||
raise NotImplementedError('The sector size not equal to 4096 is currently not supported for read-only mode!')
|
||||
args = get_args_for_partition_generator('Create a FAT filesystem and populate it with directory content', wl=False)
|
||||
fatfs = FATFS(sector_size=args.sector_size,
|
||||
sectors_per_cluster=args.sectors_per_cluster,
|
||||
size=args.partition_size,
|
||||
|
@ -201,8 +201,7 @@ class WLFATFS:
|
||||
|
||||
if __name__ == '__main__':
|
||||
desc = 'Create a FAT filesystem with support for wear levelling and populate it with directory content'
|
||||
args = get_args_for_partition_generator(desc)
|
||||
|
||||
args = get_args_for_partition_generator(desc, wl=True)
|
||||
wl_fatfs = WLFATFS(sectors_per_cluster=args.sectors_per_cluster,
|
||||
size=args.partition_size,
|
||||
sector_size=args.sector_size,
|
||||
|
@ -649,7 +649,6 @@ components/esp_wifi/src/smartconfig.c
|
||||
components/esp_wifi/test/test_wifi_init.c
|
||||
components/fatfs/diskio/diskio.c
|
||||
components/fatfs/diskio/diskio_impl.h
|
||||
components/fatfs/diskio/diskio_rawflash.c
|
||||
components/fatfs/diskio/diskio_rawflash.h
|
||||
components/fatfs/diskio/diskio_wl.h
|
||||
components/fatfs/port/freertos/ffsystem.c
|
||||
|
Loading…
Reference in New Issue
Block a user