init esp
This commit is contained in:
commit
9454ed4f90
270 changed files with 135555 additions and 0 deletions
3
.gitignore
vendored
Normal file
3
.gitignore
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
build/
|
||||
sdkconfig
|
||||
sdkconfig.old
|
||||
6
CMakeLists.txt
Normal file
6
CMakeLists.txt
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
# The following five lines of boilerplate have to be in your project's
|
||||
# CMakeLists in this exact order for cmake to work correctly
|
||||
cmake_minimum_required(VERSION 3.16)
|
||||
|
||||
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
|
||||
project(controlplane)
|
||||
1
components/ctx.graphics
Submodule
1
components/ctx.graphics
Submodule
|
|
@ -0,0 +1 @@
|
|||
Subproject commit cbdf96f6666341fb667defbfd2189876e0084412
|
||||
38
components/joltwallet__littlefs/.bumpversion.cfg
Normal file
38
components/joltwallet__littlefs/.bumpversion.cfg
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
[bumpversion]
|
||||
current_version = 1.20.3
|
||||
commit = True
|
||||
tag = True
|
||||
|
||||
[bumpversion:file:README.md]
|
||||
search = littlefs=={current_version}
|
||||
replace = littlefs=={new_version}
|
||||
|
||||
[bumpversion:file:idf_component.yml]
|
||||
search = "{current_version}"
|
||||
replace = "{new_version}"
|
||||
|
||||
[bumpversion:file:library.json]
|
||||
search = "{current_version}"
|
||||
replace = "{new_version}"
|
||||
|
||||
[bumpversion:file(number):include/esp_littlefs.h]
|
||||
search = ESP_LITTLEFS_VERSION_NUMBER "{current_version}"
|
||||
replace = ESP_LITTLEFS_VERSION_NUMBER "{new_version}"
|
||||
|
||||
[bumpversion:file(major):include/esp_littlefs.h]
|
||||
parse = (?P<major>\d+)
|
||||
serialize = {major}
|
||||
search = ESP_LITTLEFS_VERSION_MAJOR {current_version}
|
||||
replace = ESP_LITTLEFS_VERSION_MAJOR {new_version}
|
||||
|
||||
[bumpversion:file(minor):include/esp_littlefs.h]
|
||||
parse = (?P<minor>\d+)
|
||||
serialize = {minor}
|
||||
search = ESP_LITTLEFS_VERSION_MINOR {current_version}
|
||||
replace = ESP_LITTLEFS_VERSION_MINOR {new_version}
|
||||
|
||||
[bumpversion:file(patch):include/esp_littlefs.h]
|
||||
parse = (?P<patch>\d+)
|
||||
serialize = {patch}
|
||||
search = ESP_LITTLEFS_VERSION_PATCH {current_version}
|
||||
replace = ESP_LITTLEFS_VERSION_PATCH {new_version}
|
||||
1
components/joltwallet__littlefs/.component_hash
Normal file
1
components/joltwallet__littlefs/.component_hash
Normal file
|
|
@ -0,0 +1 @@
|
|||
1808d73e99168f6f3c26dd31799a248484762b3a320ec4962dec11a145f4277f
|
||||
11
components/joltwallet__littlefs/.gitignore
vendored
Normal file
11
components/joltwallet__littlefs/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
build/
|
||||
sdkconfig
|
||||
sdkconfig.old
|
||||
|
||||
example/build/
|
||||
example/sdkconfig
|
||||
example/sdkconfig.old
|
||||
example/dependencies.lock
|
||||
|
||||
*.DS_Store
|
||||
*/.cache
|
||||
3
components/joltwallet__littlefs/.gitmodules
vendored
Normal file
3
components/joltwallet__littlefs/.gitmodules
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
[submodule "main/littlefs"]
|
||||
path = src/littlefs
|
||||
url = https://github.com/littlefs-project/littlefs.git
|
||||
1
components/joltwallet__littlefs/CHECKSUMS.json
Normal file
1
components/joltwallet__littlefs/CHECKSUMS.json
Normal file
File diff suppressed because one or more lines are too long
47
components/joltwallet__littlefs/CMakeLists.txt
Normal file
47
components/joltwallet__littlefs/CMakeLists.txt
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
cmake_minimum_required(VERSION 3.10)
|
||||
|
||||
file(GLOB SOURCES src/littlefs/*.c)
|
||||
list(APPEND SOURCES src/esp_littlefs.c src/littlefs_esp_part.c src/lfs_config.c)
|
||||
|
||||
if(IDF_TARGET STREQUAL "esp8266")
|
||||
# ESP8266 configuration here
|
||||
else()
|
||||
# non-ESP8266 configuration
|
||||
list(APPEND pub_requires sdmmc)
|
||||
|
||||
if(CONFIG_LITTLEFS_SDMMC_SUPPORT)
|
||||
list(APPEND SOURCES src/littlefs_sdmmc.c)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
list(APPEND pub_requires esp_partition)
|
||||
list(APPEND priv_requires esptool_py spi_flash vfs)
|
||||
|
||||
idf_component_register(
|
||||
SRCS ${SOURCES}
|
||||
INCLUDE_DIRS include
|
||||
PRIV_INCLUDE_DIRS src
|
||||
REQUIRES ${pub_requires}
|
||||
PRIV_REQUIRES ${priv_requires}
|
||||
)
|
||||
|
||||
set_source_files_properties(
|
||||
${SOURCES}
|
||||
PROPERTIES COMPILE_FLAGS "-DLFS_CONFIG=lfs_config.h"
|
||||
)
|
||||
|
||||
if(CONFIG_LITTLEFS_FCNTL_GET_PATH)
|
||||
target_compile_definitions(${COMPONENT_LIB} PUBLIC -DF_GETPATH=${CONFIG_LITTLEFS_FCNTL_F_GETPATH_VALUE})
|
||||
endif()
|
||||
|
||||
if(CONFIG_LITTLEFS_MULTIVERSION)
|
||||
target_compile_definitions(${COMPONENT_LIB} PUBLIC -DLFS_MULTIVERSION)
|
||||
endif()
|
||||
|
||||
if(CONFIG_LITTLEFS_MALLOC_STRATEGY_DISABLE)
|
||||
target_compile_definitions(${COMPONENT_LIB} PUBLIC -DLFS_NO_MALLOC)
|
||||
endif()
|
||||
|
||||
if(NOT CONFIG_LITTLEFS_ASSERTS)
|
||||
target_compile_definitions(${COMPONENT_LIB} PUBLIC -DLFS_NO_ASSERT)
|
||||
endif()
|
||||
273
components/joltwallet__littlefs/Kconfig
Normal file
273
components/joltwallet__littlefs/Kconfig
Normal file
|
|
@ -0,0 +1,273 @@
|
|||
menu "LittleFS"
|
||||
|
||||
config LITTLEFS_SDMMC_SUPPORT
|
||||
bool "SDMMC support (requires ESP-IDF v5+)"
|
||||
default n
|
||||
help
|
||||
Toggle SD card support
|
||||
This requires IDF v5+ as older ESP-IDF do not support SD card erase.
|
||||
|
||||
config LITTLEFS_MAX_PARTITIONS
|
||||
int "Maximum Number of Partitions"
|
||||
default 3
|
||||
range 1 10
|
||||
help
|
||||
Define maximum number of partitions that can be mounted.
|
||||
|
||||
config LITTLEFS_PAGE_SIZE
|
||||
int "LITTLEFS logical page size"
|
||||
default 256
|
||||
range 256 1024
|
||||
help
|
||||
Logical page size of LITTLEFS partition, in bytes. Must be multiple
|
||||
of flash page size (which is usually 256 bytes).
|
||||
Larger page sizes reduce overhead when storing large files, and
|
||||
improve filesystem performance when reading large files.
|
||||
Smaller page sizes reduce overhead when storing small (< page size)
|
||||
files.
|
||||
|
||||
config LITTLEFS_OBJ_NAME_LEN
|
||||
int "Maximum object name length including NULL terminator."
|
||||
default 64
|
||||
range 16 1022
|
||||
help
|
||||
Includes NULL-terminator. If flashing a prebuilt filesystem image,
|
||||
rebuild the filesystem image if this value changes.
|
||||
mklittlefs, the tool that generates the image will automatically be rebuilt.
|
||||
If downloading a pre-built release of mklittlefs, it was most-likely
|
||||
built with LFS_NAME_MAX=32 and should not be used.
|
||||
|
||||
config LITTLEFS_READ_SIZE
|
||||
int "Minimum size of a block read."
|
||||
default 128
|
||||
help
|
||||
Minimum size of a block read. All read operations will be a
|
||||
multiple of this value.
|
||||
|
||||
config LITTLEFS_WRITE_SIZE
|
||||
int "Minimum size of a block write."
|
||||
default 128
|
||||
help
|
||||
Minimum size of a block program. All write operations will be a
|
||||
multiple of this value.
|
||||
|
||||
config LITTLEFS_LOOKAHEAD_SIZE
|
||||
int "Look ahead size."
|
||||
default 128
|
||||
help
|
||||
Look ahead size. Must be a multiple of 8.
|
||||
|
||||
config LITTLEFS_CACHE_SIZE
|
||||
int "Cache Size"
|
||||
default 512
|
||||
help
|
||||
Size of block caches. Each cache buffers a portion of a block in RAM.
|
||||
The littlefs needs a read cache, a program cache, and one additional
|
||||
cache per file. Larger caches can improve performance by storing more
|
||||
data and reducing the number of disk accesses. Must be a multiple of
|
||||
the read and program sizes, and a factor of the block size (4096).
|
||||
|
||||
config LITTLEFS_BLOCK_CYCLES
|
||||
int "LittleFS wear-leveling block cycles"
|
||||
default 512
|
||||
range -1 1024
|
||||
help
|
||||
Number of erase cycles before littlefs evicts metadata logs and moves
|
||||
the metadata to another block. Suggested values are in the
|
||||
range 100-1000, with large values having better performance at the cost
|
||||
of less consistent wear distribution.
|
||||
Set to -1 to disable block-level wear-leveling.
|
||||
|
||||
config LITTLEFS_USE_MTIME
|
||||
bool "Save file modification time"
|
||||
default "y"
|
||||
help
|
||||
Saves timestamp on modification. Uses an additional 4bytes.
|
||||
|
||||
config LITTLEFS_USE_ONLY_HASH
|
||||
bool "Don't store filepath in the file descriptor"
|
||||
default "n"
|
||||
help
|
||||
Records the filepath only as a 32-bit hash in the file descriptor instead
|
||||
of the entire filepath. Saves approximately `sizeof(filepath)` bytes
|
||||
per file descriptor.
|
||||
If enabled, functionality (like fstat) that requires the file path
|
||||
from the file descriptor will not work.
|
||||
In rare cases, may cause unlinking or renaming issues (unlikely) if
|
||||
there's a hash collision between an open filepath and a filepath
|
||||
to be modified.
|
||||
|
||||
config LITTLEFS_HUMAN_READABLE
|
||||
bool "Make errno human-readable"
|
||||
default "n"
|
||||
help
|
||||
Converts LittleFS error codes into human readable strings.
|
||||
May increase binary size depending on logging level.
|
||||
|
||||
choice LITTLEFS_MTIME
|
||||
prompt "mtime attribute options"
|
||||
depends on LITTLEFS_USE_MTIME
|
||||
default LITTLEFS_MTIME_USE_SECONDS
|
||||
help
|
||||
Save an additional 4-byte attribute. Options listed below.
|
||||
|
||||
config LITTLEFS_MTIME_USE_SECONDS
|
||||
bool "Use Seconds"
|
||||
help
|
||||
Saves timestamp on modification.
|
||||
|
||||
config LITTLEFS_MTIME_USE_NONCE
|
||||
bool "Use Nonce"
|
||||
help
|
||||
Saves nonce on modification; intended for detecting filechanges
|
||||
on systems without access to a RTC.
|
||||
|
||||
A file who's nonce is the same as it was at a previous time has
|
||||
high probability of not having been modified.
|
||||
|
||||
Upon file modification, the nonce is incremented by one. Upon file
|
||||
creation, a random nonce is assigned.
|
||||
|
||||
There is a very slim chance that a file will have the same nonce if
|
||||
it is deleted and created again (approx 1 in 4 billion).
|
||||
|
||||
endchoice
|
||||
|
||||
config LITTLEFS_SPIFFS_COMPAT
|
||||
bool "Improve SPIFFS drop-in compatability"
|
||||
default "n"
|
||||
help
|
||||
Enabling this feature allows for greater drop-in compatability
|
||||
when replacing SPIFFS. Since SPIFFS doesn't have folders, and
|
||||
folders are just considered as part of a file name, enabling this
|
||||
will automatically create folders as necessary to create a file
|
||||
instead of throwing an error. Similarly, upon the deletion of the
|
||||
last file in a folder, the folder will be deleted. It is recommended
|
||||
to only enable this flag as a stop-gap solution.
|
||||
|
||||
config LITTLEFS_FLUSH_FILE_EVERY_WRITE
|
||||
bool "Flush file to flash after each write operation"
|
||||
default "n"
|
||||
help
|
||||
Enabling this feature extends SPIFFS capability.
|
||||
In SPIFFS data is written immediately to the flash storage when fflush() function called.
|
||||
In LittleFS flush() does not write data to the flash, and fsync() call needed after.
|
||||
With this feature fflush() will write data to the storage.
|
||||
|
||||
config LITTLEFS_OPEN_DIR
|
||||
bool "Support opening directory"
|
||||
default "n"
|
||||
depends on !LITTLEFS_USE_ONLY_HASH && LITTLEFS_SPIFFS_COMPAT
|
||||
help
|
||||
Support opening directory by following APIs:
|
||||
|
||||
int fd = open("my_directory", O_DIRECTORY);
|
||||
|
||||
config LITTLEFS_FCNTL_GET_PATH
|
||||
bool "Support get file or directory path"
|
||||
default "n"
|
||||
depends on !LITTLEFS_USE_ONLY_HASH
|
||||
help
|
||||
Support getting directory by following APIs:
|
||||
|
||||
char buffer[MAXPATHLEN];
|
||||
|
||||
int fd = open("my_file", flags);
|
||||
fcntl(fd, F_GETPATH, buffer);
|
||||
|
||||
config LITTLEFS_FCNTL_F_GETPATH_VALUE
|
||||
int "Value of command F_GETPATH"
|
||||
default 20
|
||||
depends on LITTLEFS_FCNTL_GET_PATH
|
||||
help
|
||||
ESP-IDF's header file "fcntl.h" doesn't support macro "F_GETPATH",
|
||||
so we should define this macro here.
|
||||
|
||||
config LITTLEFS_MULTIVERSION
|
||||
bool "Support selecting the LittleFS minor version to write to disk"
|
||||
default "n"
|
||||
help
|
||||
LittleFS 2.6 bumps the on-disk minor version of littlefs from lfs2.0 -> lfs2.1.
|
||||
|
||||
This change is backwards-compatible, but after the first write with the new version,
|
||||
the image on disk will no longer be mountable by older versions of littlefs.
|
||||
|
||||
Enabling LITTLEFS_MULTIVERSION allows to select the On-disk version
|
||||
to use when writing in the form of 16-bit major version
|
||||
+ 16-bit minor version. This limiting metadata to what is supported by
|
||||
older minor versions. Note that some features will be lost. Defaults to
|
||||
to the most recent minor version when zero.
|
||||
|
||||
choice LITTLEFS_DISK_VERSION
|
||||
prompt "LITTLEFS_DISK_VERSION"
|
||||
depends on LITTLEFS_MULTIVERSION
|
||||
default LITTLEFS_DISK_VERSION_MOST_RECENT
|
||||
help
|
||||
See LITTLEFS_MULTIVERSION for details.
|
||||
|
||||
config LITTLEFS_DISK_VERSION_MOST_RECENT
|
||||
bool "Write the most recent LittleFS version"
|
||||
|
||||
config LITTLEFS_DISK_VERSION_2_1
|
||||
bool "Write LittleFS 2.1"
|
||||
|
||||
config LITTLEFS_DISK_VERSION_2_0
|
||||
bool "Write LittleFS 2.0 (no forward-looking erase-state CRCs)"
|
||||
|
||||
endchoice
|
||||
|
||||
choice LITTLEFS_MALLOC_STRATEGY
|
||||
prompt "Buffer allocation strategy"
|
||||
default LITTLEFS_MALLOC_STRATEGY_DEFAULT
|
||||
help
|
||||
Maps lfs_malloc to ESP-IDF capabilities-based memory allocator or
|
||||
disables dynamic allocation in favour of user-provided static buffers.
|
||||
|
||||
config LITTLEFS_MALLOC_STRATEGY_DISABLE
|
||||
bool "Static buffers only"
|
||||
help
|
||||
Disallow dynamic allocation, static buffers must be provided by the calling application.
|
||||
|
||||
config LITTLEFS_MALLOC_STRATEGY_DEFAULT
|
||||
bool "Default heap selection"
|
||||
help
|
||||
Uses an automatic allocation strategy. On systems with heap in SPIRAM, if
|
||||
the allocation size does not exceed SPIRAM_MALLOC_ALWAYSINTERNAL then internal
|
||||
heap allocation if preferred, otherwise allocation will be attempted from SPIRAM
|
||||
heap.
|
||||
|
||||
config LITTLEFS_MALLOC_STRATEGY_INTERNAL
|
||||
bool "Internal heap"
|
||||
help
|
||||
Uses ESP-IDF heap_caps_malloc to allocate from internal heap.
|
||||
|
||||
config LITTLEFS_MALLOC_STRATEGY_SPIRAM
|
||||
bool "SPIRAM heap"
|
||||
depends on SPIRAM_USE_MALLOC || SPIRAM_USE_CAPS_ALLOC
|
||||
help
|
||||
Uses ESP-IDF heap_caps_malloc to allocate from SPIRAM heap.
|
||||
|
||||
endchoice
|
||||
|
||||
config LITTLEFS_ASSERTS
|
||||
bool "Enable asserts"
|
||||
default "y"
|
||||
help
|
||||
Selects whether littlefs performs runtime assert checks.
|
||||
|
||||
config LITTLEFS_MMAP_PARTITION
|
||||
bool "Memory map LITTLEFS partitions"
|
||||
default "n"
|
||||
help
|
||||
Use esp_partition_mmap to map the partitions to memory, which can provide a significant
|
||||
performance boost in some cases. Make sure the chip you're using has enough available address
|
||||
space to map the partition (for the ESP32 there is 4MB available).
|
||||
|
||||
config LITTLEFS_WDT_RESET
|
||||
bool "Reset task watchdog during flash operations"
|
||||
default "n"
|
||||
help
|
||||
Enable calling esp_task_wdt_reset() during flash read/write/erase operations
|
||||
to prevent task watchdog timeouts during long-running filesystem operations.
|
||||
|
||||
endmenu
|
||||
7
components/joltwallet__littlefs/LICENSE
Normal file
7
components/joltwallet__littlefs/LICENSE
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
Copyright 2020 Brian Pugh
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
28
components/joltwallet__littlefs/Makefile
Normal file
28
components/joltwallet__littlefs/Makefile
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
PROJECT_NAME := littlefs
|
||||
|
||||
EXTRA_COMPONENT_DIRS := \
|
||||
$(abspath .) \
|
||||
$(abspath unit_tester) \
|
||||
$(IDF_PATH)/tools/unit-test-app/components/
|
||||
|
||||
CFLAGS += \
|
||||
-Werror
|
||||
|
||||
include $(IDF_PATH)/make/project.mk
|
||||
|
||||
.PHONY: tests
|
||||
|
||||
tests-build:
|
||||
$(MAKE) \
|
||||
TEST_COMPONENTS='src'
|
||||
|
||||
tests:
|
||||
$(MAKE) \
|
||||
TEST_COMPONENTS='src' \
|
||||
flash monitor;
|
||||
|
||||
tests-enc:
|
||||
$(MAKE) \
|
||||
TEST_COMPONENTS='src' \
|
||||
encrypted-flash monitor;
|
||||
|
||||
267
components/joltwallet__littlefs/README.md
Normal file
267
components/joltwallet__littlefs/README.md
Normal file
|
|
@ -0,0 +1,267 @@
|
|||
LittleFS for ESP-IDF.
|
||||
|
||||
# What is LittleFS?
|
||||
|
||||
[LittleFS](https://github.com/ARMmbed/littlefs) is a small fail-safe filesystem
|
||||
for microcontrollers. We ported LittleFS to esp-idf (specifically, the ESP32)
|
||||
because SPIFFS was too slow, and FAT was too fragile.
|
||||
|
||||
# How to Use
|
||||
|
||||
## ESP-IDF
|
||||
|
||||
There are two ways to add this component to your project
|
||||
|
||||
1. As a ESP-IDF managed component: In your project directory run
|
||||
|
||||
```
|
||||
idf.py add-dependency joltwallet/littlefs==1.20.3
|
||||
```
|
||||
|
||||
2. As a submodule: In your project, add this as a submodule to your `components/` directory.
|
||||
|
||||
```
|
||||
git submodule add https://github.com/joltwallet/esp_littlefs.git
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
|
||||
The library can be configured via `idf.py menuconfig` under `Component config->LittleFS`.
|
||||
|
||||
#### Example
|
||||
User @wreyford has kindly provided a [demo repo](https://github.com/wreyford/demo_esp_littlefs) showing the use of `esp_littlefs`. A modified copy exists in the `example/` directory.
|
||||
|
||||
## PlatformIO
|
||||
Add to the following line to your project's `platformio.ini` file:
|
||||
|
||||
```
|
||||
lib_deps = https://github.com/joltwallet/esp_littlefs.git
|
||||
```
|
||||
|
||||
Example `platformio.ini` file:
|
||||
|
||||
```
|
||||
[env]
|
||||
platform = espressif32
|
||||
framework = espidf
|
||||
monitor_speed = 115200
|
||||
|
||||
[common]
|
||||
lib_deps = https://github.com/joltwallet/esp_littlefs.git
|
||||
|
||||
[env:nodemcu-32s]
|
||||
board = nodemcu-32s
|
||||
board_build.filesystem = littlefs
|
||||
board_build.partitions = min_littlefs.csv
|
||||
lib_deps = ${common.lib_deps}
|
||||
```
|
||||
|
||||
Example `min_littlefs.cvs` flash partition layout:
|
||||
```
|
||||
# Name, Type, SubType, Offset, Size, Flags
|
||||
nvs, data, nvs, 0x9000, 0x5000,
|
||||
otadata, data, ota, 0xe000, 0x2000,
|
||||
app0, app, ota_0, 0x10000, 0x1E0000,
|
||||
app1, app, ota_1, 0x1F0000,0x1E0000,
|
||||
littlefs, data, littlefs, 0x3D0000,0x20000,
|
||||
coredump, data, coredump, 0x3F0000,0x10000,
|
||||
```
|
||||
|
||||
[Currently, it is required](https://github.com/platformio/platform-espressif32/issues/479) to modify `CMakeList.txt`. Add the following 2 lines to the your project's `CMakeList.txt`:
|
||||
|
||||
```
|
||||
get_filename_component(configName "${CMAKE_BINARY_DIR}" NAME)
|
||||
list(APPEND EXTRA_COMPONENT_DIRS "${CMAKE_SOURCE_DIR}/.pio/libdeps/${configName}/esp_littlefs")
|
||||
```
|
||||
|
||||
Example `CMakeList.txt`:
|
||||
|
||||
```
|
||||
cmake_minimum_required(VERSION 3.16.0)
|
||||
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
|
||||
|
||||
get_filename_component(configName "${CMAKE_BINARY_DIR}" NAME)
|
||||
list(APPEND EXTRA_COMPONENT_DIRS "${CMAKE_SOURCE_DIR}/.pio/libdeps/${configName}/esp_littlefs")
|
||||
|
||||
project(my_project_name_here)
|
||||
```
|
||||
|
||||
To configure LittleFS from PlatformIO, run the following command:
|
||||
|
||||
```console
|
||||
$ pio run -t menuconfig
|
||||
```
|
||||
An entry `Component config->LittleFS` should be available for configuration. If not, check your `CMakeList.txt` configuration.
|
||||
|
||||
|
||||
# Documentation
|
||||
|
||||
See the official [ESP-IDF SPIFFS documentation](https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-reference/storage/spiffs.html), basically all the functionality is the
|
||||
same; just replace `spiffs` with `littlefs` in all function calls.
|
||||
|
||||
Also see the comments in `include/esp_littlefs.h`
|
||||
|
||||
Slight differences between this configuration and SPIFFS's configuration is in the `esp_vfs_littlefs_conf_t`:
|
||||
|
||||
1. `max_files` field doesn't exist since we removed the file limit, thanks to @X-Ryl669
|
||||
2. `grow_on_mount` will expand an existing filesystem to fill the partition. Defaults to `false`.
|
||||
* LittleFS filesystems can only grow, they cannot shrink.
|
||||
|
||||
### Filesystem Image Creation
|
||||
|
||||
At compile time, a filesystem image can be created and flashed to the device by adding the following to your project's `CMakeLists.txt` file:
|
||||
|
||||
```
|
||||
littlefs_create_partition_image(partition_name path_to_folder_containing_files FLASH_IN_PROJECT)
|
||||
```
|
||||
|
||||
If `FLASH_IN_PROJECT` is not specified, the image will still be generated, but you will have to flash it manually using `esptool.py`, `parttool.py`, or a custom build system target.
|
||||
|
||||
For example, if your partition table looks like:
|
||||
|
||||
```
|
||||
# Name, Type, SubType, Offset, Size, Flags
|
||||
nvs, data, nvs, 0x9000, 0x6000,
|
||||
phy_init, data, phy, 0xf000, 0x1000,
|
||||
factory, app, factory, 0x10000, 1M,
|
||||
graphics, data, spiffs, , 0xF0000,
|
||||
```
|
||||
|
||||
change it to:
|
||||
|
||||
```
|
||||
# Name, Type, SubType, Offset, Size, Flags
|
||||
nvs, data, nvs, 0x9000, 0x6000,
|
||||
phy_init, data, phy, 0xf000, 0x1000,
|
||||
factory, app, factory, 0x10000, 1M,
|
||||
graphics, data, littlefs, , 0xF0000,
|
||||
```
|
||||
|
||||
|
||||
and your project has a folder called `device_graphics/`, your call should be:
|
||||
|
||||
```
|
||||
littlefs_create_partition_image(graphics device_graphics FLASH_IN_PROJECT)
|
||||
```
|
||||
|
||||
|
||||
# Performance
|
||||
|
||||
Here are some naive benchmarks to give a vague indicator on performance.
|
||||
Tests were performed with the following configuration:
|
||||
|
||||
* ESP-IDF: v4.4
|
||||
* Target: ESP32
|
||||
* CPU Clock: 160MHz
|
||||
* Flash SPI Freq: 80MHz
|
||||
* Flash SPI Mode: QIO
|
||||
|
||||
In these tests, FAT has a cache size of 4096, and SPIFFS has a cahce size of 256 bytes.
|
||||
|
||||
#### Formatting a 512KB partition
|
||||
|
||||
```
|
||||
FAT: 549,494 us
|
||||
SPIFFS: 10,715,425 us
|
||||
LittleFS: 110,997 us
|
||||
```
|
||||
|
||||
#### Writing 5 88KB files
|
||||
|
||||
```
|
||||
FAT: 7,124,812 us
|
||||
SPIFFS*: 99,138,905 us
|
||||
LittleFS (cache=128): 8,261,920 us
|
||||
LittleFS (cache=512 default): 6,356,247 us
|
||||
LittleFS (cache=4096): 6,026,592 us
|
||||
*Only wrote 374,784 bytes instead of the benchmark 440,000, so this value is extrapolated
|
||||
```
|
||||
|
||||
In the above test, SPIFFS drastically slows down as the filesystem fills up. Below
|
||||
is the specific breakdown of file write times for SPIFFS. Not sure what happens
|
||||
on the last file write.
|
||||
|
||||
|
||||
```
|
||||
SPIFFS:
|
||||
|
||||
88000 bytes written in 2190635 us
|
||||
88000 bytes written in 2190321 us
|
||||
88000 bytes written in 5133605 us
|
||||
88000 bytes written in 16570667 us
|
||||
22784 bytes written in 73053677 us
|
||||
```
|
||||
|
||||
#### Reading 5 88KB files
|
||||
|
||||
```
|
||||
FAT: 5,685,230 us
|
||||
SPIFFS*: 5,162,289 us
|
||||
LittleFS (cache=128): 6,284,142 us
|
||||
LittleFS (cache=512 default): 5,874,931 us
|
||||
LittleFS (cache=4096): 5,731,385 us
|
||||
*Only read 374,784 bytes instead of the benchmark 440,000, so this value is extrapolated
|
||||
```
|
||||
|
||||
#### Deleting 5 88KB files
|
||||
|
||||
```
|
||||
FAT: 680,358 us
|
||||
SPIFFS*: 1,653,500 us
|
||||
LittleFS (cache=128): 86,090 us
|
||||
LittleFS (cache=512 default): 53,705 us
|
||||
LittleFS (cache=4096): 27,709 us
|
||||
*The 5th file was smaller, did not extrapolate value.
|
||||
```
|
||||
|
||||
|
||||
# Tips, Tricks, and Gotchas
|
||||
|
||||
* LittleFS operates on blocks, and blocks have a size of 4096 bytes on the ESP32.
|
||||
|
||||
* A freshly formatted LittleFS will have 2 blocks in use, making it seem like 8KB are in use.
|
||||
|
||||
* The esp32 has [flash concurrency constraints](https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-reference/peripherals/spi_flash/spi_flash_concurrency.html#concurrency-constraints-for-flash-on-spi1).
|
||||
When using UART (either for data transfer or generic logging) at the same time, you *MUST* enable the following option in KConfig:
|
||||
`menuconfig > Component config > Driver config > UART > UART ISR in IRAM`.
|
||||
|
||||
# Running Unit Tests
|
||||
|
||||
To flash the unit-tester app and the unit-tests, clone or symbolicly link this
|
||||
component to `$IDF_PATH/tools/unit-test-app/components/littlefs`. Make sure the
|
||||
folder name is `littlefs`, not `esp_littlefs`. Then, run the following:
|
||||
|
||||
```
|
||||
cd $IDF_PATH/tools/unit-test-app
|
||||
idf.py menuconfig # See notes
|
||||
idf.py -T littlefs -p YOUR_PORT_HERE flash monitor
|
||||
```
|
||||
|
||||
In `menuconfig`:
|
||||
|
||||
* Set the partition table to `components/littlefs/partition_table_unit_test_app.csv`
|
||||
|
||||
* Double check your crystal frequency `ESP32_XTAL_FREQ_SEL`; my board doesn't work with autodetect.
|
||||
|
||||
To test on an encrypted partition, add the `encrypted` flag to the `flash_test` partition
|
||||
in `partition_table_unit_test_app.csv`. I.e.
|
||||
|
||||
```
|
||||
flash_test, data, spiffs, , 512K, encrypted
|
||||
```
|
||||
|
||||
Also make sure that `CONFIG_SECURE_FLASH_ENC_ENABLED=y` in `menuconfig`.
|
||||
|
||||
The unit tester can then be flashed via the command:
|
||||
|
||||
```
|
||||
idf.py -T littlefs -p YOUR_PORT_HERE encrypted-flash monitor
|
||||
```
|
||||
|
||||
# Breaking Changes
|
||||
|
||||
* July 22, 2020 - Changed attribute type for file timestamp from `0` to `0x74` ('t' ascii value).
|
||||
* May 3, 2023 - All logging tags have been changed to a unified `esp_littlefs`.
|
||||
|
||||
# Acknowledgement
|
||||
|
||||
This code base was heavily modeled after the SPIFFS esp-idf component.
|
||||
24
components/joltwallet__littlefs/component.mk
Normal file
24
components/joltwallet__littlefs/component.mk
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
#
|
||||
# Component Makefile
|
||||
#
|
||||
|
||||
COMPONENT_SRCDIRS := src src/littlefs
|
||||
|
||||
COMPONENT_ADD_INCLUDEDIRS := include
|
||||
|
||||
COMPONENT_PRIV_INCLUDEDIRS := src
|
||||
|
||||
COMPONENT_SUBMODULES := src/littlefs
|
||||
|
||||
CFLAGS += \
|
||||
-DLFS_CONFIG=lfs_config.h
|
||||
|
||||
ifdef CONFIG_LITTLEFS_FCNTL_GET_PATH
|
||||
CFLAGS += \
|
||||
-DF_GETPATH=$(CONFIG_LITTLEFS_FCNTL_F_GETPATH_VALUE)
|
||||
endif
|
||||
|
||||
ifdef CONFIG_LITTLEFS_MULTIVERSION
|
||||
CFLAGS += \
|
||||
-DLFS_MULTIVERSION
|
||||
endif
|
||||
9
components/joltwallet__littlefs/example/CMakeLists.txt
Normal file
9
components/joltwallet__littlefs/example/CMakeLists.txt
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
# The following lines of boilerplate have to be in your project's
|
||||
# CMakeLists in this exact order for cmake to work correctly
|
||||
cmake_minimum_required(VERSION 3.5)
|
||||
|
||||
# Add the root of this git repo to the component search path.
|
||||
set(EXTRA_COMPONENT_DIRS "../")
|
||||
|
||||
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
|
||||
project(demo_esp_littlefs)
|
||||
11
components/joltwallet__littlefs/example/Makefile
Normal file
11
components/joltwallet__littlefs/example/Makefile
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
#
|
||||
# This is a project Makefile. It is assumed the directory this Makefile resides in is a
|
||||
# project subdirectory.
|
||||
#
|
||||
|
||||
PROJECT_NAME := demo_esp_littlefs
|
||||
|
||||
EXTRA_COMPONENT_DIRS := $(realpath ..)
|
||||
|
||||
include $(IDF_PATH)/make/project.mk
|
||||
|
||||
3
components/joltwallet__littlefs/example/README.md
Normal file
3
components/joltwallet__littlefs/example/README.md
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
This example is based on [wreyford's](https://github.com/wreyford/demo_esp_littlefs) demo project.
|
||||
|
||||
Modifications were made so that this example project could be built as a part of CI.
|
||||
|
|
@ -0,0 +1 @@
|
|||
Example text to compile into a LittleFS disk image to be flashed to the ESP device.
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
idf_component_register(SRCS "demo_esp_littlefs.c"
|
||||
INCLUDE_DIRS "."
|
||||
)
|
||||
|
||||
# Note: you must have a partition named the first argument (here it's "littlefs")
|
||||
# in your partition table csv file.
|
||||
littlefs_create_partition_image(littlefs ../flash_data FLASH_IN_PROJECT)
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
#
|
||||
# "main" pseudo-component makefile.
|
||||
#
|
||||
# (Uses default behaviour of compiling all source files in directory, adding 'include' to include path.)
|
||||
|
||||
166
components/joltwallet__littlefs/example/main/demo_esp_littlefs.c
Normal file
166
components/joltwallet__littlefs/example/main/demo_esp_littlefs.c
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
/* Demo ESP LittleFS Example
|
||||
|
||||
This example code is in the Public Domain (or CC0 licensed, at your option.)
|
||||
|
||||
Unless required by applicable law or agreed to in writing, this
|
||||
software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
CONDITIONS OF ANY KIND, either express or implied.
|
||||
*/
|
||||
#include "esp_err.h"
|
||||
#include "esp_log.h"
|
||||
#include "esp_system.h"
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "sdkconfig.h"
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
#include "esp_idf_version.h"
|
||||
#include "esp_flash.h"
|
||||
#include "esp_chip_info.h"
|
||||
#include "spi_flash_mmap.h"
|
||||
|
||||
|
||||
#include "esp_littlefs.h"
|
||||
|
||||
static const char *TAG = "demo_esp_littlefs";
|
||||
|
||||
void app_main(void)
|
||||
{
|
||||
printf("Demo LittleFs implementation by esp_littlefs!\n");
|
||||
printf(" https://github.com/joltwallet/esp_littlefs\n");
|
||||
|
||||
/* Print chip information */
|
||||
esp_chip_info_t chip_info;
|
||||
esp_chip_info(&chip_info);
|
||||
printf("This is %s chip with %d CPU cores, WiFi%s%s, ",
|
||||
CONFIG_IDF_TARGET,
|
||||
chip_info.cores,
|
||||
(chip_info.features & CHIP_FEATURE_BT) ? "/BT" : "",
|
||||
(chip_info.features & CHIP_FEATURE_BLE) ? "/BLE" : "");
|
||||
|
||||
printf("silicon revision %d, ", chip_info.revision);
|
||||
|
||||
uint32_t size_flash_chip = 0;
|
||||
esp_flash_get_size(NULL, &size_flash_chip);
|
||||
printf("%uMB %s flash\n", (unsigned int)size_flash_chip >> 20,
|
||||
(chip_info.features & CHIP_FEATURE_EMB_FLASH) ? "embedded" : "external");
|
||||
|
||||
printf("Free heap: %u\n", (unsigned int) esp_get_free_heap_size());
|
||||
|
||||
printf("Now we are starting the LittleFs Demo ...\n");
|
||||
|
||||
ESP_LOGI(TAG, "Initializing LittleFS");
|
||||
|
||||
esp_vfs_littlefs_conf_t conf = {
|
||||
.base_path = "/littlefs",
|
||||
.partition_label = "littlefs",
|
||||
.format_if_mount_failed = true,
|
||||
.dont_mount = false,
|
||||
};
|
||||
|
||||
// Use settings defined above to initialize and mount LittleFS filesystem.
|
||||
// Note: esp_vfs_littlefs_register is an all-in-one convenience function.
|
||||
esp_err_t ret = esp_vfs_littlefs_register(&conf);
|
||||
|
||||
if (ret != ESP_OK)
|
||||
{
|
||||
if (ret == ESP_FAIL)
|
||||
{
|
||||
ESP_LOGE(TAG, "Failed to mount or format filesystem");
|
||||
}
|
||||
else if (ret == ESP_ERR_NOT_FOUND)
|
||||
{
|
||||
ESP_LOGE(TAG, "Failed to find LittleFS partition");
|
||||
}
|
||||
else
|
||||
{
|
||||
ESP_LOGE(TAG, "Failed to initialize LittleFS (%s)", esp_err_to_name(ret));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
size_t total = 0, used = 0;
|
||||
ret = esp_littlefs_info(conf.partition_label, &total, &used);
|
||||
if (ret != ESP_OK)
|
||||
{
|
||||
ESP_LOGE(TAG, "Failed to get LittleFS partition information (%s)", esp_err_to_name(ret));
|
||||
}
|
||||
else
|
||||
{
|
||||
ESP_LOGI(TAG, "Partition size: total: %d, used: %d", total, used);
|
||||
}
|
||||
|
||||
// Use POSIX and C standard library functions to work with files.
|
||||
// First create a file.
|
||||
ESP_LOGI(TAG, "Opening file");
|
||||
FILE *f = fopen("/littlefs/hello.txt", "w");
|
||||
if (f == NULL)
|
||||
{
|
||||
ESP_LOGE(TAG, "Failed to open file for writing");
|
||||
return;
|
||||
}
|
||||
fprintf(f, "LittleFS Rocks!\n");
|
||||
fclose(f);
|
||||
ESP_LOGI(TAG, "File written");
|
||||
|
||||
// Check if destination file exists before renaming
|
||||
struct stat st;
|
||||
if (stat("/littlefs/foo.txt", &st) == 0)
|
||||
{
|
||||
// Delete it if it exists
|
||||
unlink("/littlefs/foo.txt");
|
||||
}
|
||||
|
||||
// Rename original file
|
||||
ESP_LOGI(TAG, "Renaming file");
|
||||
if (rename("/littlefs/hello.txt", "/littlefs/foo.txt") != 0)
|
||||
{
|
||||
ESP_LOGE(TAG, "Rename failed");
|
||||
return;
|
||||
}
|
||||
|
||||
// Open renamed file for reading
|
||||
ESP_LOGI(TAG, "Reading file");
|
||||
f = fopen("/littlefs/foo.txt", "r");
|
||||
if (f == NULL)
|
||||
{
|
||||
ESP_LOGE(TAG, "Failed to open file for reading");
|
||||
return;
|
||||
}
|
||||
|
||||
char line[128];
|
||||
char *pos;
|
||||
|
||||
fgets(line, sizeof(line), f);
|
||||
fclose(f);
|
||||
// strip newline
|
||||
pos = strchr(line, '\n');
|
||||
if (pos)
|
||||
{
|
||||
*pos = '\0';
|
||||
}
|
||||
ESP_LOGI(TAG, "Read from file: '%s'", line);
|
||||
|
||||
ESP_LOGI(TAG, "Reading from flashed filesystem example.txt");
|
||||
f = fopen("/littlefs/example.txt", "r");
|
||||
if (f == NULL)
|
||||
{
|
||||
ESP_LOGE(TAG, "Failed to open file for reading");
|
||||
return;
|
||||
}
|
||||
fgets(line, sizeof(line), f);
|
||||
fclose(f);
|
||||
// strip newline
|
||||
pos = strchr(line, '\n');
|
||||
if (pos)
|
||||
{
|
||||
*pos = '\0';
|
||||
}
|
||||
ESP_LOGI(TAG, "Read from file: '%s'", line);
|
||||
|
||||
// All done, unmount partition and disable LittleFS
|
||||
esp_vfs_littlefs_unregister(conf.partition_label);
|
||||
ESP_LOGI(TAG, "LittleFS unmounted");
|
||||
}
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
# Name, Type, SubType, Offset, Size, Flags
|
||||
# Note: if you have increased the bootloader size, make sure to update the offsets to avoid overlap
|
||||
nvs, data, nvs, 0x9000, 0x6000,
|
||||
phy_init, data, phy, 0xf000, 0x1000,
|
||||
factory, app, factory, 0x10000, 1M,
|
||||
littlefs, data, littlefs, , 0xF0000,
|
||||
|
12
components/joltwallet__littlefs/example/sdkconfig.defaults
Normal file
12
components/joltwallet__littlefs/example/sdkconfig.defaults
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
CONFIG_PARTITION_TABLE_CUSTOM=y
|
||||
CONFIG_PARTITION_TABLE_CUSTOM_FILENAME="partitions_demo_esp_littlefs.csv"
|
||||
|
||||
#
|
||||
# Serial flasher config
|
||||
#
|
||||
CONFIG_ESPTOOLPY_BAUD_921600B=y
|
||||
CONFIG_ESPTOOLPY_COMPRESSED=y
|
||||
CONFIG_ESPTOOLPY_MONITOR_BAUD_CONSOLE=y
|
||||
|
||||
# BOOTLOADER
|
||||
CONFIG_BOOTLOADER_LOG_LEVEL_WARN=y
|
||||
9
components/joltwallet__littlefs/idf_component.yml
Normal file
9
components/joltwallet__littlefs/idf_component.yml
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
dependencies:
|
||||
idf: '>=5.0'
|
||||
description: LittleFS is a small fail-safe filesystem for micro-controllers.
|
||||
repository: git://github.com/joltwallet/esp_littlefs.git
|
||||
repository_info:
|
||||
commit_sha: 8274371dc5912196f66ac3e71dbb6291760cb8b0
|
||||
path: .
|
||||
url: https://github.com/joltwallet/esp_littlefs
|
||||
version: 1.20.3
|
||||
|
|
@ -0,0 +1 @@
|
|||
littlefs-python==0.15.0
|
||||
212
components/joltwallet__littlefs/include/esp_littlefs.h
Normal file
212
components/joltwallet__littlefs/include/esp_littlefs.h
Normal file
|
|
@ -0,0 +1,212 @@
|
|||
#ifndef ESP_LITTLEFS_H__
|
||||
#define ESP_LITTLEFS_H__
|
||||
|
||||
#include "sdkconfig.h"
|
||||
#include "esp_err.h"
|
||||
#include "esp_idf_version.h"
|
||||
#include <stdbool.h>
|
||||
#include "esp_partition.h"
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT
|
||||
#include <sdmmc_cmd.h>
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define ESP_LITTLEFS_VERSION_NUMBER "1.20.3"
|
||||
#define ESP_LITTLEFS_VERSION_MAJOR 1
|
||||
#define ESP_LITTLEFS_VERSION_MINOR 20
|
||||
#define ESP_LITTLEFS_VERSION_PATCH 3
|
||||
|
||||
#ifdef ESP8266
|
||||
// ESP8266 RTOS SDK default enables VFS DIR support
|
||||
#define CONFIG_VFS_SUPPORT_DIR 1
|
||||
#endif
|
||||
|
||||
#if CONFIG_VFS_SUPPORT_DIR
|
||||
#define ESP_LITTLEFS_ENABLE_FTRUNCATE
|
||||
#endif // CONFIG_VFS_SUPPORT_DIR
|
||||
|
||||
/**
|
||||
*Configuration structure for esp_vfs_littlefs_register.
|
||||
*/
|
||||
typedef struct {
|
||||
const char *base_path; /**< Mounting point. */
|
||||
const char *partition_label; /**< Label of partition to use. If partition_label, partition, and sdcard are all NULL,
|
||||
then the first partition with data subtype 'littlefs' will be used. */
|
||||
const esp_partition_t* partition; /**< partition to use if partition_label is NULL */
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT
|
||||
sdmmc_card_t *sdcard; /**< SD card handle to use if both esp_partition handle & partition label is NULL */
|
||||
#endif
|
||||
|
||||
uint8_t format_if_mount_failed:1; /**< Format the file system if it fails to mount. */
|
||||
uint8_t read_only : 1; /**< Mount the partition as read-only. */
|
||||
uint8_t dont_mount:1; /**< Don't attempt to mount.*/
|
||||
uint8_t grow_on_mount:1; /**< Grow filesystem to match partition size on mount.*/
|
||||
} esp_vfs_littlefs_conf_t;
|
||||
|
||||
/**
|
||||
* Register and mount (if configured to) littlefs to VFS with given path prefix.
|
||||
*
|
||||
* @param conf Pointer to esp_vfs_littlefs_conf_t configuration structure
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK if success
|
||||
* - ESP_ERR_NO_MEM if objects could not be allocated
|
||||
* - ESP_ERR_INVALID_STATE if already mounted or partition is encrypted
|
||||
* - ESP_ERR_NOT_FOUND if partition for littlefs was not found
|
||||
* - ESP_FAIL if mount or format fails
|
||||
*/
|
||||
esp_err_t esp_vfs_littlefs_register(const esp_vfs_littlefs_conf_t * conf);
|
||||
|
||||
/**
|
||||
* Unregister and unmount littlefs from VFS
|
||||
*
|
||||
* @param partition_label Label of the partition to unregister.
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK if successful
|
||||
* - ESP_ERR_INVALID_STATE already unregistered
|
||||
*/
|
||||
esp_err_t esp_vfs_littlefs_unregister(const char* partition_label);
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT
|
||||
/**
|
||||
* Unregister and unmount LittleFS from VFS for SD card
|
||||
*
|
||||
* @param sdcard SD card to unregister.
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK if successful
|
||||
* - ESP_ERR_INVALID_STATE already unregistered
|
||||
*/
|
||||
esp_err_t esp_vfs_littlefs_unregister_sdmmc(sdmmc_card_t *sdcard);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Unregister and unmount littlefs from VFS
|
||||
*
|
||||
* @param partition partition to unregister.
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK if successful
|
||||
* - ESP_ERR_INVALID_STATE already unregistered
|
||||
*/
|
||||
esp_err_t esp_vfs_littlefs_unregister_partition(const esp_partition_t* partition);
|
||||
|
||||
/**
|
||||
* Check if littlefs is mounted
|
||||
*
|
||||
* @param partition_label Label of the partition to check.
|
||||
*
|
||||
* @return
|
||||
* - true if mounted
|
||||
* - false if not mounted
|
||||
*/
|
||||
bool esp_littlefs_mounted(const char* partition_label);
|
||||
|
||||
/**
|
||||
* Check if littlefs is mounted
|
||||
*
|
||||
* @param partition partition to check.
|
||||
*
|
||||
* @return
|
||||
* - true if mounted
|
||||
* - false if not mounted
|
||||
*/
|
||||
bool esp_littlefs_partition_mounted(const esp_partition_t* partition);
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT
|
||||
/**
|
||||
* Check if littlefs is mounted
|
||||
*
|
||||
* @param sdcard SD card to check.
|
||||
*
|
||||
* @return
|
||||
* - true if mounted
|
||||
* - false if not mounted
|
||||
*/
|
||||
bool esp_littlefs_sdmmc_mounted(sdmmc_card_t *sdcard);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Format the littlefs partition
|
||||
*
|
||||
* @param partition_label Label of the partition to format.
|
||||
* @return
|
||||
* - ESP_OK if successful
|
||||
* - ESP_FAIL on error
|
||||
*/
|
||||
esp_err_t esp_littlefs_format(const char* partition_label);
|
||||
|
||||
/**
|
||||
* Format the littlefs partition
|
||||
*
|
||||
* @param partition partition to format.
|
||||
* @return
|
||||
* - ESP_OK if successful
|
||||
* - ESP_FAIL on error
|
||||
*/
|
||||
esp_err_t esp_littlefs_format_partition(const esp_partition_t* partition);
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT
|
||||
/**
|
||||
* Format the LittleFS on a SD card
|
||||
*
|
||||
* @param sdcard SD card to format
|
||||
* @return
|
||||
* - ESP_OK if successful
|
||||
* - ESP_FAIL on error
|
||||
*/
|
||||
esp_err_t esp_littlefs_format_sdmmc(sdmmc_card_t *sdcard);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Get information for littlefs
|
||||
*
|
||||
* @param partition_label Optional, label of the partition to get info for.
|
||||
* @param[out] total_bytes Size of the file system
|
||||
* @param[out] used_bytes Current used bytes in the file system
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK if success
|
||||
* - ESP_ERR_INVALID_STATE if not mounted
|
||||
*/
|
||||
esp_err_t esp_littlefs_info(const char* partition_label, size_t* total_bytes, size_t* used_bytes);
|
||||
|
||||
/**
|
||||
* Get information for littlefs
|
||||
*
|
||||
* @param parition the partition to get info for.
|
||||
* @param[out] total_bytes Size of the file system
|
||||
* @param[out] used_bytes Current used bytes in the file system
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK if success
|
||||
* - ESP_ERR_INVALID_STATE if not mounted
|
||||
*/
|
||||
esp_err_t esp_littlefs_partition_info(const esp_partition_t* partition, size_t *total_bytes, size_t *used_bytes);
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT
|
||||
/**
|
||||
* Get information for littlefs on SD card
|
||||
*
|
||||
* @param[in] sdcard the SD card to get info for.
|
||||
* @param[out] total_bytes Size of the file system
|
||||
* @param[out] used_bytes Current used bytes in the file system
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK if success
|
||||
* - ESP_ERR_INVALID_STATE if not mounted
|
||||
*/
|
||||
esp_err_t esp_littlefs_sdmmc_info(sdmmc_card_t *sdcard, size_t *total_bytes, size_t *used_bytes);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif
|
||||
14
components/joltwallet__littlefs/library.json
Normal file
14
components/joltwallet__littlefs/library.json
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
"name": "esp_littlefs",
|
||||
"version": "1.20.3",
|
||||
"description": "LittleFS is a small fail-safe filesystem for micro-controllers.",
|
||||
"frameworks": "espidf",
|
||||
"platforms": "*",
|
||||
"build": {
|
||||
"srcFilter": "+<*> -<littlefs/runners> -<littlefs/benches> -<littlefs/tests>",
|
||||
"flags": [
|
||||
"-I ./src/littlefs/",
|
||||
"-DLFS_CONFIG=lfs_config.h"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
# Special partition table for unit test app
|
||||
#
|
||||
# Name, Type, SubType, Offset, Size, Flags
|
||||
# Note: if you change the phy_init or app partition offset, make sure to change the offset in Kconfig.projbuild
|
||||
nvs, data, nvs, 0x9000, 0x4000
|
||||
otadata, data, ota, 0xd000, 0x2000
|
||||
phy_init, data, phy, 0xf000, 0x1000
|
||||
factory, 0, 0, 0x10000, 2M
|
||||
# these OTA partitions are used for tests, but can't fit real OTA apps in them
|
||||
# (done this way so tests can run in 2MB of flash.)
|
||||
ota_0, 0, ota_0, , 64K
|
||||
ota_1, 0, ota_1, , 64K
|
||||
# flash_test partition used for SPI flash tests, WL FAT tests, and SPIFFS tests
|
||||
fat_store, data, fat, , 528K
|
||||
spiffs_store, data, spiffs, , 512K
|
||||
flash_test, data, spiffs, , 512K
|
||||
named_part, data, littlefs, , 64K
|
||||
|
83
components/joltwallet__littlefs/project_include.cmake
Normal file
83
components/joltwallet__littlefs/project_include.cmake
Normal file
|
|
@ -0,0 +1,83 @@
|
|||
|
||||
# littlefs_create_partition_image
|
||||
#
|
||||
# Create a littlefs image of the specified directory on the host during build and optionally
|
||||
# have the created image flashed using `idf.py flash`
|
||||
|
||||
set(littlefs_py_venv "${CMAKE_CURRENT_BINARY_DIR}/littlefs_py_venv")
|
||||
set(littlefs_py_requirements "${CMAKE_CURRENT_LIST_DIR}/image-building-requirements.txt")
|
||||
|
||||
set_directory_properties(PROPERTIES
|
||||
ADDITIONAL_CLEAN_FILES "${littlefs_py_venv}"
|
||||
)
|
||||
|
||||
function(littlefs_create_partition_image partition base_dir)
|
||||
set(options FLASH_IN_PROJECT)
|
||||
set(multi DEPENDS)
|
||||
cmake_parse_arguments(arg "${options}" "" "${multi}" "${ARGN}")
|
||||
|
||||
idf_build_get_property(idf_path IDF_PATH)
|
||||
|
||||
get_filename_component(base_dir_full_path ${base_dir} ABSOLUTE)
|
||||
|
||||
partition_table_get_partition_info(size "--partition-name ${partition}" "size")
|
||||
partition_table_get_partition_info(offset "--partition-name ${partition}" "offset")
|
||||
|
||||
if("${size}" AND "${offset}")
|
||||
set(image_file ${CMAKE_BINARY_DIR}/${partition}.bin)
|
||||
|
||||
if(CMAKE_HOST_WIN32)
|
||||
set(littlefs_py "${littlefs_py_venv}/Scripts/littlefs-python.exe")
|
||||
add_custom_command(
|
||||
OUTPUT ${littlefs_py_venv}
|
||||
COMMAND ${PYTHON} -m venv ${littlefs_py_venv} && ${littlefs_py_venv}/Scripts/pip.exe install -r ${littlefs_py_requirements}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
DEPENDS ${littlefs_py_requirements}
|
||||
)
|
||||
else()
|
||||
set(littlefs_py "${littlefs_py_venv}/bin/littlefs-python")
|
||||
add_custom_command(
|
||||
OUTPUT ${littlefs_py_venv}
|
||||
COMMAND ${PYTHON} -m venv ${littlefs_py_venv} && ${littlefs_py_venv}/bin/pip install -r ${littlefs_py_requirements}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
DEPENDS ${littlefs_py_requirements}
|
||||
)
|
||||
endif()
|
||||
|
||||
# Execute LittleFS image generation; this always executes as there is no way to specify for CMake to watch for
|
||||
# contents of the base dir changing.
|
||||
|
||||
add_custom_target(littlefs_${partition}_bin ALL
|
||||
COMMAND ${littlefs_py} create ${base_dir_full_path} ${image_file} -v --fs-size=${size} --name-max=${CONFIG_LITTLEFS_OBJ_NAME_LEN} --block-size=4096
|
||||
DEPENDS ${arg_DEPENDS} ${littlefs_py_venv}
|
||||
)
|
||||
|
||||
set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" APPEND PROPERTY
|
||||
ADDITIONAL_MAKE_CLEAN_FILES
|
||||
${image_file})
|
||||
|
||||
set(IDF_VER_NO_V "${IDF_VERSION_MAJOR}.${IDF_VERSION_MINOR}")
|
||||
|
||||
if(${IDF_VER_NO_V} VERSION_LESS 5.0)
|
||||
message(WARNING "Unsupported/unmaintained/deprecated ESP-IDF version ${IDF_VER}")
|
||||
endif()
|
||||
|
||||
idf_component_get_property(main_args esptool_py FLASH_ARGS)
|
||||
idf_component_get_property(sub_args esptool_py FLASH_SUB_ARGS)
|
||||
esptool_py_flash_target(${partition}-flash "${main_args}" "${sub_args}")
|
||||
esptool_py_flash_target_image(${partition}-flash "${partition}" "${offset}" "${image_file}")
|
||||
|
||||
add_dependencies(${partition}-flash littlefs_${partition}_bin)
|
||||
|
||||
if(arg_FLASH_IN_PROJECT)
|
||||
esptool_py_flash_target_image(flash "${partition}" "${offset}" "${image_file}")
|
||||
add_dependencies(flash littlefs_${partition}_bin)
|
||||
endif()
|
||||
|
||||
else()
|
||||
set(message "Failed to create littlefs image for partition '${partition}'. "
|
||||
"Check project configuration if using the correct partition table file."
|
||||
)
|
||||
fail_at_build_time(littlefs_${partition}_bin "${message}")
|
||||
endif()
|
||||
endfunction()
|
||||
130
components/joltwallet__littlefs/sdkconfig.defaults
Normal file
130
components/joltwallet__littlefs/sdkconfig.defaults
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
#
|
||||
# Partition Table
|
||||
#
|
||||
CONFIG_PARTITION_TABLE_CUSTOM=y
|
||||
CONFIG_PARTITION_TABLE_CUSTOM_FILENAME="partition_table_unit_test_app.csv"
|
||||
CONFIG_PARTITION_TABLE_FILENAME="partition_table_unit_test_app.csv"
|
||||
CONFIG_PARTITION_TABLE_OFFSET=0x8000
|
||||
CONFIG_PARTITION_TABLE_MD5=y
|
||||
|
||||
#
|
||||
# Heap
|
||||
#
|
||||
CONFIG_HEAP_POISONING_COMPREHENSIVE=y
|
||||
|
||||
#
|
||||
# Watchdog
|
||||
#
|
||||
CONFIG_ESP_TASK_WDT=y
|
||||
CONFIG_ESP_TASK_WDT_CHECK_IDLE_TASK_CPU0=n
|
||||
CONFIG_ESP_TASK_WDT_CHECK_IDLE_TASK_CPU1=n
|
||||
|
||||
#
|
||||
# ESP32-specific
|
||||
#
|
||||
CONFIG_IDF_TARGET_ESP32=y
|
||||
CONFIG_ESP32_DEFAULT_CPU_FREQ_240=y
|
||||
CONFIG_ESP32_DEFAULT_CPU_FREQ_MHZ=240
|
||||
|
||||
CONFIG_ESP32_XTAL_FREQ_AUTO=y
|
||||
|
||||
#
|
||||
# Serial flasher config
|
||||
#
|
||||
CONFIG_ESPTOOLPY_BAUD_921600B=y
|
||||
CONFIG_ESPTOOLPY_COMPRESSED=y
|
||||
CONFIG_ESPTOOLPY_FLASHMODE_QIO=y
|
||||
CONFIG_ESPTOOLPY_FLASHFREQ_80M=y
|
||||
CONFIG_ESPTOOLPY_FLASHFREQ="80m"
|
||||
CONFIG_ESPTOOLPY_FLASHSIZE_4MB=y
|
||||
CONFIG_ESPTOOLPY_FLASHSIZE="4MB"
|
||||
CONFIG_ESPTOOLPY_BEFORE_RESET=y
|
||||
CONFIG_ESPTOOLPY_BEFORE="default_reset"
|
||||
CONFIG_ESPTOOLPY_AFTER_RESET=y
|
||||
CONFIG_ESPTOOLPY_AFTER_NORESET=n
|
||||
CONFIG_ESPTOOLPY_AFTER="hard_reset"
|
||||
CONFIG_ESPTOOLPY_MONITOR_BAUD_CONSOLE=y
|
||||
CONFIG_ESPTOOLPY_FLASHSIZE="4MB"
|
||||
CONFIG_ESPTOOLPY_FLASHSIZE_DETECT=n
|
||||
|
||||
CONFIG_ESP_CONSOLE_UART_NUM=0
|
||||
|
||||
#
|
||||
# SPI Flash driver
|
||||
#
|
||||
CONFIG_SPI_FLASH_VERIFY_WRITE=n
|
||||
CONFIG_SPI_FLASH_ENABLE_COUNTERS=n
|
||||
CONFIG_SPI_FLASH_ROM_DRIVER_PATCH=y
|
||||
CONFIG_SPI_FLASH_DANGEROUS_WRITE_ABORTS=y
|
||||
CONFIG_SPI_FLASH_DANGEROUS_WRITE_FAILS=n
|
||||
CONFIG_SPI_FLASH_DANGEROUS_WRITE_ALLOWED=n
|
||||
|
||||
#
|
||||
# SPIFFS Configuration
|
||||
#
|
||||
CONFIG_SPIFFS_MAX_PARTITIONS=3
|
||||
|
||||
#
|
||||
# SPIFFS Cache Configuration
|
||||
#
|
||||
CONFIG_SPIFFS_CACHE=y
|
||||
CONFIG_SPIFFS_CACHE_WR=y
|
||||
CONFIG_SPIFFS_CACHE_STATS=n
|
||||
CONFIG_SPIFFS_PAGE_CHECK=y
|
||||
CONFIG_SPIFFS_GC_MAX_RUNS=10
|
||||
CONFIG_SPIFFS_GC_STATS=n
|
||||
CONFIG_SPIFFS_PAGE_SIZE=256
|
||||
CONFIG_SPIFFS_OBJ_NAME_LEN=32
|
||||
CONFIG_SPIFFS_USE_MAGIC=y
|
||||
CONFIG_SPIFFS_USE_MAGIC_LENGTH=y
|
||||
CONFIG_SPIFFS_META_LENGTH=4
|
||||
CONFIG_SPIFFS_USE_MTIME=n
|
||||
|
||||
#
|
||||
# FAT Filesystem support
|
||||
#
|
||||
CONFIG_FATFS_CODEPAGE_DYNAMIC=n
|
||||
CONFIG_FATFS_CODEPAGE_437=y
|
||||
CONFIG_FATFS_CODEPAGE_720=n
|
||||
CONFIG_FATFS_CODEPAGE_737=n
|
||||
CONFIG_FATFS_CODEPAGE_771=n
|
||||
CONFIG_FATFS_CODEPAGE_775=n
|
||||
CONFIG_FATFS_CODEPAGE_850=n
|
||||
CONFIG_FATFS_CODEPAGE_852=n
|
||||
CONFIG_FATFS_CODEPAGE_855=n
|
||||
CONFIG_FATFS_CODEPAGE_857=n
|
||||
CONFIG_FATFS_CODEPAGE_860=n
|
||||
CONFIG_FATFS_CODEPAGE_861=n
|
||||
CONFIG_FATFS_CODEPAGE_862=n
|
||||
CONFIG_FATFS_CODEPAGE_863=n
|
||||
CONFIG_FATFS_CODEPAGE_864=n
|
||||
CONFIG_FATFS_CODEPAGE_865=n
|
||||
CONFIG_FATFS_CODEPAGE_866=n
|
||||
CONFIG_FATFS_CODEPAGE_869=n
|
||||
CONFIG_FATFS_CODEPAGE_932=n
|
||||
CONFIG_FATFS_CODEPAGE_936=n
|
||||
CONFIG_FATFS_CODEPAGE_949=n
|
||||
CONFIG_FATFS_CODEPAGE_950=n
|
||||
CONFIG_FATFS_CODEPAGE=437
|
||||
CONFIG_FATFS_LFN_NONE=y
|
||||
CONFIG_FATFS_LFN_HEAP=n
|
||||
CONFIG_FATFS_LFN_STACK=n
|
||||
CONFIG_FATFS_FS_LOCK=0
|
||||
CONFIG_FATFS_TIMEOUT_MS=10000
|
||||
CONFIG_FATFS_PER_FILE_CACHE=y
|
||||
|
||||
CONFIG_UNITY_FREERTOS_PRIORITY=5
|
||||
CONFIG_UNITY_FREERTOS_CPU=0
|
||||
CONFIG_UNITY_FREERTOS_STACK_SIZE=12000
|
||||
CONFIG_UNITY_WARN_LEAK_LEVEL_GENERAL=255
|
||||
CONFIG_UNITY_CRITICAL_LEAK_LEVEL_GENERAL=1024
|
||||
CONFIG_UNITY_CRITICAL_LEAK_LEVEL_LWIP=4095
|
||||
CONFIG_UNITY_ENABLE_FLOAT=y
|
||||
CONFIG_UNITY_ENABLE_DOUBLE=y
|
||||
CONFIG_UNITY_ENABLE_COLOR=y
|
||||
CONFIG_UNITY_ENABLE_IDF_TEST_RUNNER=y
|
||||
CONFIG_UNITY_ENABLE_FIXTURE=y
|
||||
CONFIG_UNITY_ENABLE_BACKTRACE_ON_FAIL=y
|
||||
|
||||
# BOOTLOADER
|
||||
CONFIG_BOOTLOADER_LOG_LEVEL_WARN=y
|
||||
2573
components/joltwallet__littlefs/src/esp_littlefs.c
Normal file
2573
components/joltwallet__littlefs/src/esp_littlefs.c
Normal file
File diff suppressed because it is too large
Load diff
28
components/joltwallet__littlefs/src/lfs_config.c
Normal file
28
components/joltwallet__littlefs/src/lfs_config.c
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* lfs util functions
|
||||
*
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#include "lfs_config.h"
|
||||
|
||||
const char ESP_LITTLEFS_TAG[] = "esp_littlefs";
|
||||
|
||||
// Software CRC implementation with small lookup table
|
||||
uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size) {
|
||||
static const uint32_t rtable[16] = {
|
||||
0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
|
||||
0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
|
||||
0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
|
||||
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c,
|
||||
};
|
||||
|
||||
const uint8_t *data = buffer;
|
||||
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 0)) & 0xf];
|
||||
crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 4)) & 0xf];
|
||||
}
|
||||
|
||||
return crc;
|
||||
}
|
||||
244
components/joltwallet__littlefs/src/lfs_config.h
Normal file
244
components/joltwallet__littlefs/src/lfs_config.h
Normal file
|
|
@ -0,0 +1,244 @@
|
|||
/*
|
||||
* lfs utility functions
|
||||
*
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#ifndef LFS_CFG_H
|
||||
#define LFS_CFG_H
|
||||
|
||||
// System includes
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
#include "sdkconfig.h"
|
||||
#include "esp_log.h"
|
||||
|
||||
|
||||
#if defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_DEFAULT) || \
|
||||
defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_INTERNAL) || \
|
||||
defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_SPIRAM)
|
||||
#include <stdlib.h>
|
||||
#include "esp_heap_caps.h"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_ASSERTS
|
||||
#include <assert.h>
|
||||
#endif
|
||||
|
||||
#if !defined(LFS_NO_DEBUG) || \
|
||||
!defined(LFS_NO_WARN) || \
|
||||
!defined(LFS_NO_ERROR) || \
|
||||
defined(LFS_YES_TRACE)
|
||||
#include <stdio.h>
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
|
||||
// Macros, may be replaced by system specific wrappers. Arguments to these
|
||||
// macros must not have side-effects as the macros can be removed for a smaller
|
||||
// code footprint
|
||||
extern const char ESP_LITTLEFS_TAG[];
|
||||
|
||||
// Logging functions
|
||||
#ifndef LFS_TRACE
|
||||
#ifdef LFS_YES_TRACE
|
||||
#define LFS_TRACE_(fmt, ...) \
|
||||
ESP_LOGV(ESP_LITTLEFS_TAG, "%s:%d:trace: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||
#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
|
||||
#else
|
||||
#define LFS_TRACE(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef LFS_DEBUG
|
||||
#ifndef LFS_NO_DEBUG
|
||||
#define LFS_DEBUG_(fmt, ...) \
|
||||
ESP_LOGD(ESP_LITTLEFS_TAG, "%s:%d:debug: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||
#define LFS_DEBUG(...) LFS_DEBUG_(__VA_ARGS__, "")
|
||||
#else
|
||||
#define LFS_DEBUG(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef LFS_WARN
|
||||
#ifndef LFS_NO_WARN
|
||||
#define LFS_WARN_(fmt, ...) \
|
||||
ESP_LOGW(ESP_LITTLEFS_TAG, "%s:%d:warn: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||
#define LFS_WARN(...) LFS_WARN_(__VA_ARGS__, "")
|
||||
#else
|
||||
#define LFS_WARN(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef LFS_ERROR
|
||||
#ifndef LFS_NO_ERROR
|
||||
#define LFS_ERROR_(fmt, ...) \
|
||||
ESP_LOGE(ESP_LITTLEFS_TAG, "%s:%d:error: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||
#define LFS_ERROR(...) LFS_ERROR_(__VA_ARGS__, "")
|
||||
#else
|
||||
#define LFS_ERROR(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Runtime assertions
|
||||
#ifdef CONFIG_LITTLEFS_ASSERTS
|
||||
#define LFS_ASSERT(test) assert(test)
|
||||
#else
|
||||
#define LFS_ASSERT(test)
|
||||
#endif
|
||||
|
||||
|
||||
// Builtin functions, these may be replaced by more efficient
|
||||
// toolchain-specific implementations. LFS_NO_INTRINSICS falls back to a more
|
||||
// expensive basic C implementation for debugging purposes
|
||||
|
||||
// Min/max functions for unsigned 32-bit numbers
|
||||
static inline uint32_t lfs_max(uint32_t a, uint32_t b) {
|
||||
return (a > b) ? a : b;
|
||||
}
|
||||
|
||||
static inline uint32_t lfs_min(uint32_t a, uint32_t b) {
|
||||
return (a < b) ? a : b;
|
||||
}
|
||||
|
||||
// Align to nearest multiple of a size
|
||||
static inline uint32_t lfs_aligndown(uint32_t a, uint32_t alignment) {
|
||||
return a - (a % alignment);
|
||||
}
|
||||
|
||||
static inline uint32_t lfs_alignup(uint32_t a, uint32_t alignment) {
|
||||
return lfs_aligndown(a + alignment-1, alignment);
|
||||
}
|
||||
|
||||
// Find the smallest power of 2 greater than or equal to a
|
||||
static inline uint32_t lfs_npw2(uint32_t a) {
|
||||
#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM))
|
||||
return 32 - __builtin_clz(a-1);
|
||||
#else
|
||||
uint32_t r = 0;
|
||||
uint32_t s;
|
||||
a -= 1;
|
||||
s = (a > 0xffff) << 4; a >>= s; r |= s;
|
||||
s = (a > 0xff ) << 3; a >>= s; r |= s;
|
||||
s = (a > 0xf ) << 2; a >>= s; r |= s;
|
||||
s = (a > 0x3 ) << 1; a >>= s; r |= s;
|
||||
return (r | (a >> 1)) + 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Count the number of trailing binary zeros in a
|
||||
// lfs_ctz(0) may be undefined
|
||||
static inline uint32_t lfs_ctz(uint32_t a) {
|
||||
#if !defined(LFS_NO_INTRINSICS) && defined(__GNUC__)
|
||||
return __builtin_ctz(a);
|
||||
#else
|
||||
return lfs_npw2((a & -a) + 1) - 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Count the number of binary ones in a
|
||||
static inline uint32_t lfs_popc(uint32_t a) {
|
||||
#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM))
|
||||
return __builtin_popcount(a);
|
||||
#else
|
||||
a = a - ((a >> 1) & 0x55555555);
|
||||
a = (a & 0x33333333) + ((a >> 2) & 0x33333333);
|
||||
return (((a + (a >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Find the sequence comparison of a and b, this is the distance
|
||||
// between a and b ignoring overflow
|
||||
static inline int lfs_scmp(uint32_t a, uint32_t b) {
|
||||
return (int)(unsigned)(a - b);
|
||||
}
|
||||
|
||||
// Convert between 32-bit little-endian and native order
|
||||
static inline uint32_t lfs_fromle32(uint32_t a) {
|
||||
#if !defined(LFS_NO_INTRINSICS) && ( \
|
||||
(defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
|
||||
return a;
|
||||
#elif !defined(LFS_NO_INTRINSICS) && ( \
|
||||
(defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
|
||||
return __builtin_bswap32(a);
|
||||
#else
|
||||
return (((uint8_t*)&a)[0] << 0) |
|
||||
(((uint8_t*)&a)[1] << 8) |
|
||||
(((uint8_t*)&a)[2] << 16) |
|
||||
(((uint8_t*)&a)[3] << 24);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint32_t lfs_tole32(uint32_t a) {
|
||||
return lfs_fromle32(a);
|
||||
}
|
||||
|
||||
// Convert between 32-bit big-endian and native order
|
||||
static inline uint32_t lfs_frombe32(uint32_t a) {
|
||||
#if !defined(LFS_NO_INTRINSICS) && ( \
|
||||
(defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
|
||||
return __builtin_bswap32(a);
|
||||
#elif !defined(LFS_NO_INTRINSICS) && ( \
|
||||
(defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
|
||||
return a;
|
||||
#else
|
||||
return (((uint8_t*)&a)[0] << 24) |
|
||||
(((uint8_t*)&a)[1] << 16) |
|
||||
(((uint8_t*)&a)[2] << 8) |
|
||||
(((uint8_t*)&a)[3] << 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint32_t lfs_tobe32(uint32_t a) {
|
||||
return lfs_frombe32(a);
|
||||
}
|
||||
|
||||
// Calculate CRC-32 with polynomial = 0x04c11db7
|
||||
uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size);
|
||||
|
||||
// Allocate memory, only used if buffers are not provided to littlefs
|
||||
// For the lookahead buffer, memory must be 32-bit aligned
|
||||
static inline void *lfs_malloc(size_t size) {
|
||||
#if defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_DEFAULT)
|
||||
return malloc(size); // Equivalent to heap_caps_malloc_default(size);
|
||||
#elif defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_INTERNAL)
|
||||
return heap_caps_malloc(size, MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL);
|
||||
#elif defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_SPIRAM)
|
||||
return heap_caps_malloc(size, MALLOC_CAP_8BIT | MALLOC_CAP_SPIRAM);
|
||||
#else // CONFIG_LITTLEFS_MALLOC_STRATEGY_DISABLE or not defined
|
||||
(void)size;
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Deallocate memory, only used if buffers are not provided to littlefs
|
||||
static inline void lfs_free(void *p) {
|
||||
#if defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_DEFAULT) || \
|
||||
defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_INTERNAL) || \
|
||||
defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_SPIRAM)
|
||||
free(p);
|
||||
#else // CONFIG_LITTLEFS_MALLOC_STRATEGY_DISABLE or not defined
|
||||
(void)p;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
4
components/joltwallet__littlefs/src/littlefs/.gitattributes
vendored
Normal file
4
components/joltwallet__littlefs/src/littlefs/.gitattributes
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
# GitHub really wants to mark littlefs as a python project, telling it to
|
||||
# reclassify our test .toml files as C code (which they are 95% of anyways)
|
||||
# remedies this
|
||||
*.toml linguist-language=c
|
||||
34
components/joltwallet__littlefs/src/littlefs/.gitignore
vendored
Normal file
34
components/joltwallet__littlefs/src/littlefs/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
# Compilation output
|
||||
*.o
|
||||
*.d
|
||||
*.a
|
||||
*.ci
|
||||
*.csv
|
||||
*.t.*
|
||||
*.b.*
|
||||
*.gcno
|
||||
*.gcda
|
||||
*.perf
|
||||
lfs
|
||||
liblfs.a
|
||||
|
||||
# Testing things
|
||||
runners/test_runner
|
||||
runners/bench_runner
|
||||
lfs.code.csv
|
||||
lfs.data.csv
|
||||
lfs.stack.csv
|
||||
lfs.structs.csv
|
||||
lfs.cov.csv
|
||||
lfs.perf.csv
|
||||
lfs.perfbd.csv
|
||||
lfs.test.csv
|
||||
lfs.bench.csv
|
||||
|
||||
# Misc
|
||||
tags
|
||||
.gdb_history
|
||||
scripts/__pycache__
|
||||
|
||||
# Historical, probably should remove at some point
|
||||
tests/*.toml.*
|
||||
2173
components/joltwallet__littlefs/src/littlefs/DESIGN.md
Normal file
2173
components/joltwallet__littlefs/src/littlefs/DESIGN.md
Normal file
File diff suppressed because it is too large
Load diff
25
components/joltwallet__littlefs/src/littlefs/LICENSE.md
Normal file
25
components/joltwallet__littlefs/src/littlefs/LICENSE.md
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
Copyright (c) 2022, The littlefs authors.
|
||||
Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
- Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
- Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
- Neither the name of ARM nor the names of its contributors may be used to
|
||||
endorse or promote products derived from this software without specific prior
|
||||
written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
595
components/joltwallet__littlefs/src/littlefs/Makefile
Normal file
595
components/joltwallet__littlefs/src/littlefs/Makefile
Normal file
|
|
@ -0,0 +1,595 @@
|
|||
# overrideable build dir, default is in-place
|
||||
BUILDDIR ?= .
|
||||
# overridable target/src/tools/flags/etc
|
||||
ifneq ($(wildcard test.c main.c),)
|
||||
TARGET ?= $(BUILDDIR)/lfs
|
||||
else
|
||||
TARGET ?= $(BUILDDIR)/liblfs.a
|
||||
endif
|
||||
|
||||
|
||||
CC ?= gcc
|
||||
AR ?= ar
|
||||
SIZE ?= size
|
||||
CTAGS ?= ctags
|
||||
NM ?= nm
|
||||
OBJDUMP ?= objdump
|
||||
VALGRIND ?= valgrind
|
||||
GDB ?= gdb
|
||||
PERF ?= perf
|
||||
|
||||
# guess clang or gcc (clang sometimes masquerades as gcc because of
|
||||
# course it does)
|
||||
ifneq ($(shell $(CC) --version | grep clang),)
|
||||
NO_GCC = 1
|
||||
endif
|
||||
|
||||
SRC ?= $(filter-out $(wildcard *.t.* *.b.*),$(wildcard *.c))
|
||||
OBJ := $(SRC:%.c=$(BUILDDIR)/%.o)
|
||||
DEP := $(SRC:%.c=$(BUILDDIR)/%.d)
|
||||
ASM := $(SRC:%.c=$(BUILDDIR)/%.s)
|
||||
CI := $(SRC:%.c=$(BUILDDIR)/%.ci)
|
||||
GCDA := $(SRC:%.c=$(BUILDDIR)/%.t.gcda)
|
||||
|
||||
TESTS ?= $(wildcard tests/*.toml)
|
||||
TEST_SRC ?= $(SRC) \
|
||||
$(filter-out $(wildcard bd/*.t.* bd/*.b.*),$(wildcard bd/*.c)) \
|
||||
runners/test_runner.c
|
||||
TEST_RUNNER ?= $(BUILDDIR)/runners/test_runner
|
||||
TEST_A := $(TESTS:%.toml=$(BUILDDIR)/%.t.a.c) \
|
||||
$(TEST_SRC:%.c=$(BUILDDIR)/%.t.a.c)
|
||||
TEST_C := $(TEST_A:%.t.a.c=%.t.c)
|
||||
TEST_OBJ := $(TEST_C:%.t.c=%.t.o)
|
||||
TEST_DEP := $(TEST_C:%.t.c=%.t.d)
|
||||
TEST_CI := $(TEST_C:%.t.c=%.t.ci)
|
||||
TEST_GCNO := $(TEST_C:%.t.c=%.t.gcno)
|
||||
TEST_GCDA := $(TEST_C:%.t.c=%.t.gcda)
|
||||
TEST_PERF := $(TEST_RUNNER:%=%.perf)
|
||||
TEST_TRACE := $(TEST_RUNNER:%=%.trace)
|
||||
TEST_CSV := $(TEST_RUNNER:%=%.csv)
|
||||
|
||||
BENCHES ?= $(wildcard benches/*.toml)
|
||||
BENCH_SRC ?= $(SRC) \
|
||||
$(filter-out $(wildcard bd/*.t.* bd/*.b.*),$(wildcard bd/*.c)) \
|
||||
runners/bench_runner.c
|
||||
BENCH_RUNNER ?= $(BUILDDIR)/runners/bench_runner
|
||||
BENCH_A := $(BENCHES:%.toml=$(BUILDDIR)/%.b.a.c) \
|
||||
$(BENCH_SRC:%.c=$(BUILDDIR)/%.b.a.c)
|
||||
BENCH_C := $(BENCH_A:%.b.a.c=%.b.c)
|
||||
BENCH_OBJ := $(BENCH_C:%.b.c=%.b.o)
|
||||
BENCH_DEP := $(BENCH_C:%.b.c=%.b.d)
|
||||
BENCH_CI := $(BENCH_C:%.b.c=%.b.ci)
|
||||
BENCH_GCNO := $(BENCH_C:%.b.c=%.b.gcno)
|
||||
BENCH_GCDA := $(BENCH_C:%.b.c=%.b.gcda)
|
||||
BENCH_PERF := $(BENCH_RUNNER:%=%.perf)
|
||||
BENCH_TRACE := $(BENCH_RUNNER:%=%.trace)
|
||||
BENCH_CSV := $(BENCH_RUNNER:%=%.csv)
|
||||
|
||||
CFLAGS += -g3
|
||||
CFLAGS += -I.
|
||||
CFLAGS += -std=c99 -Wall -Wextra -pedantic
|
||||
CFLAGS += -Wmissing-prototypes
|
||||
ifndef NO_GCC
|
||||
CFLAGS += -fcallgraph-info=su
|
||||
CFLAGS += -ftrack-macro-expansion=0
|
||||
endif
|
||||
|
||||
ifdef DEBUG
|
||||
CFLAGS += -O0
|
||||
else
|
||||
CFLAGS += -Os
|
||||
endif
|
||||
ifdef TRACE
|
||||
CFLAGS += -DLFS_YES_TRACE
|
||||
endif
|
||||
ifdef YES_COV
|
||||
CFLAGS += --coverage
|
||||
endif
|
||||
ifdef YES_PERF
|
||||
CFLAGS += -fno-omit-frame-pointer
|
||||
endif
|
||||
ifdef YES_PERFBD
|
||||
CFLAGS += -fno-omit-frame-pointer
|
||||
endif
|
||||
|
||||
ifdef VERBOSE
|
||||
CODEFLAGS += -v
|
||||
DATAFLAGS += -v
|
||||
STACKFLAGS += -v
|
||||
STRUCTSFLAGS += -v
|
||||
COVFLAGS += -v
|
||||
PERFFLAGS += -v
|
||||
PERFBDFLAGS += -v
|
||||
endif
|
||||
# forward -j flag
|
||||
PERFFLAGS += $(filter -j%,$(MAKEFLAGS))
|
||||
PERFBDFLAGS += $(filter -j%,$(MAKEFLAGS))
|
||||
ifneq ($(NM),nm)
|
||||
CODEFLAGS += --nm-path="$(NM)"
|
||||
DATAFLAGS += --nm-path="$(NM)"
|
||||
endif
|
||||
ifneq ($(OBJDUMP),objdump)
|
||||
CODEFLAGS += --objdump-path="$(OBJDUMP)"
|
||||
DATAFLAGS += --objdump-path="$(OBJDUMP)"
|
||||
STRUCTSFLAGS += --objdump-path="$(OBJDUMP)"
|
||||
PERFFLAGS += --objdump-path="$(OBJDUMP)"
|
||||
PERFBDFLAGS += --objdump-path="$(OBJDUMP)"
|
||||
endif
|
||||
ifneq ($(PERF),perf)
|
||||
PERFFLAGS += --perf-path="$(PERF)"
|
||||
endif
|
||||
|
||||
TESTFLAGS += -b
|
||||
BENCHFLAGS += -b
|
||||
# forward -j flag
|
||||
TESTFLAGS += $(filter -j%,$(MAKEFLAGS))
|
||||
BENCHFLAGS += $(filter -j%,$(MAKEFLAGS))
|
||||
ifdef YES_PERF
|
||||
TESTFLAGS += -p $(TEST_PERF)
|
||||
BENCHFLAGS += -p $(BENCH_PERF)
|
||||
endif
|
||||
ifdef YES_PERFBD
|
||||
TESTFLAGS += -t $(TEST_TRACE) --trace-backtrace --trace-freq=100
|
||||
endif
|
||||
ifndef NO_PERFBD
|
||||
BENCHFLAGS += -t $(BENCH_TRACE) --trace-backtrace --trace-freq=100
|
||||
endif
|
||||
ifdef YES_TESTMARKS
|
||||
TESTFLAGS += -o $(TEST_CSV)
|
||||
endif
|
||||
ifndef NO_BENCHMARKS
|
||||
BENCHFLAGS += -o $(BENCH_CSV)
|
||||
endif
|
||||
ifdef VERBOSE
|
||||
TESTFLAGS += -v
|
||||
TESTCFLAGS += -v
|
||||
BENCHFLAGS += -v
|
||||
BENCHCFLAGS += -v
|
||||
endif
|
||||
ifdef EXEC
|
||||
TESTFLAGS += --exec="$(EXEC)"
|
||||
BENCHFLAGS += --exec="$(EXEC)"
|
||||
endif
|
||||
ifneq ($(GDB),gdb)
|
||||
TESTFLAGS += --gdb-path="$(GDB)"
|
||||
BENCHFLAGS += --gdb-path="$(GDB)"
|
||||
endif
|
||||
ifneq ($(VALGRIND),valgrind)
|
||||
TESTFLAGS += --valgrind-path="$(VALGRIND)"
|
||||
BENCHFLAGS += --valgrind-path="$(VALGRIND)"
|
||||
endif
|
||||
ifneq ($(PERF),perf)
|
||||
TESTFLAGS += --perf-path="$(PERF)"
|
||||
BENCHFLAGS += --perf-path="$(PERF)"
|
||||
endif
|
||||
|
||||
# this is a bit of a hack, but we want to make sure the BUILDDIR
|
||||
# directory structure is correct before we run any commands
|
||||
ifneq ($(BUILDDIR),.)
|
||||
$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \
|
||||
$(addprefix $(BUILDDIR)/,$(dir \
|
||||
$(SRC) \
|
||||
$(TESTS) \
|
||||
$(TEST_SRC) \
|
||||
$(BENCHES) \
|
||||
$(BENCH_SRC)))))
|
||||
endif
|
||||
|
||||
|
||||
# commands
|
||||
|
||||
## Build littlefs
|
||||
.PHONY: all build
|
||||
all build: $(TARGET)
|
||||
|
||||
## Build assembly files
|
||||
.PHONY: asm
|
||||
asm: $(ASM)
|
||||
|
||||
## Find the total size
|
||||
.PHONY: size
|
||||
size: $(OBJ)
|
||||
$(SIZE) -t $^
|
||||
|
||||
## Generate a ctags file
|
||||
.PHONY: tags
|
||||
tags:
|
||||
$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC)
|
||||
|
||||
## Show this help text
|
||||
.PHONY: help
|
||||
help:
|
||||
@$(strip awk '/^## / { \
|
||||
sub(/^## /,""); \
|
||||
getline rule; \
|
||||
while (rule ~ /^(#|\.PHONY|ifdef|ifndef)/) getline rule; \
|
||||
gsub(/:.*/, "", rule); \
|
||||
printf " "" %-25s %s\n", rule, $$0 \
|
||||
}' $(MAKEFILE_LIST))
|
||||
|
||||
## Find the per-function code size
|
||||
.PHONY: code
|
||||
code: CODEFLAGS+=-S
|
||||
code: $(OBJ) $(BUILDDIR)/lfs.code.csv
|
||||
./scripts/code.py $(OBJ) $(CODEFLAGS)
|
||||
|
||||
## Compare per-function code size
|
||||
.PHONY: code-diff
|
||||
code-diff: $(OBJ)
|
||||
./scripts/code.py $^ $(CODEFLAGS) -d $(BUILDDIR)/lfs.code.csv
|
||||
|
||||
## Find the per-function data size
|
||||
.PHONY: data
|
||||
data: DATAFLAGS+=-S
|
||||
data: $(OBJ) $(BUILDDIR)/lfs.data.csv
|
||||
./scripts/data.py $(OBJ) $(DATAFLAGS)
|
||||
|
||||
## Compare per-function data size
|
||||
.PHONY: data-diff
|
||||
data-diff: $(OBJ)
|
||||
./scripts/data.py $^ $(DATAFLAGS) -d $(BUILDDIR)/lfs.data.csv
|
||||
|
||||
## Find the per-function stack usage
|
||||
.PHONY: stack
|
||||
stack: STACKFLAGS+=-S
|
||||
stack: $(CI) $(BUILDDIR)/lfs.stack.csv
|
||||
./scripts/stack.py $(CI) $(STACKFLAGS)
|
||||
|
||||
## Compare per-function stack usage
|
||||
.PHONY: stack-diff
|
||||
stack-diff: $(CI)
|
||||
./scripts/stack.py $^ $(STACKFLAGS) -d $(BUILDDIR)/lfs.stack.csv
|
||||
|
||||
## Find function sizes
|
||||
.PHONY: funcs
|
||||
funcs: SUMMARYFLAGS+=-S
|
||||
funcs: \
|
||||
$(BUILDDIR)/lfs.code.csv \
|
||||
$(BUILDDIR)/lfs.data.csv \
|
||||
$(BUILDDIR)/lfs.stack.csv
|
||||
$(strip ./scripts/summary.py $^ \
|
||||
-bfunction \
|
||||
-fcode=code_size \
|
||||
-fdata=data_size \
|
||||
-fstack=stack_limit --max=stack \
|
||||
$(SUMMARYFLAGS))
|
||||
|
||||
## Compare function sizes
|
||||
.PHONY: funcs-diff
|
||||
funcs-diff: SHELL=/bin/bash
|
||||
funcs-diff: $(OBJ) $(CI)
|
||||
$(strip ./scripts/summary.py \
|
||||
<(./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o-) \
|
||||
<(./scripts/data.py $(OBJ) -q $(DATAFLAGS) -o-) \
|
||||
<(./scripts/stack.py $(CI) -q $(STACKFLAGS) -o-) \
|
||||
-bfunction \
|
||||
-fcode=code_size \
|
||||
-fdata=data_size \
|
||||
-fstack=stack_limit --max=stack \
|
||||
$(SUMMARYFLAGS) -d <(./scripts/summary.py \
|
||||
$(BUILDDIR)/lfs.code.csv \
|
||||
$(BUILDDIR)/lfs.data.csv \
|
||||
$(BUILDDIR)/lfs.stack.csv \
|
||||
-q $(SUMMARYFLAGS) -o-))
|
||||
|
||||
## Find struct sizes
|
||||
.PHONY: structs
|
||||
structs: STRUCTSFLAGS+=-S
|
||||
structs: $(OBJ) $(BUILDDIR)/lfs.structs.csv
|
||||
./scripts/structs.py $(OBJ) $(STRUCTSFLAGS)
|
||||
|
||||
## Compare struct sizes
|
||||
.PHONY: structs-diff
|
||||
structs-diff: $(OBJ)
|
||||
./scripts/structs.py $^ $(STRUCTSFLAGS) -d $(BUILDDIR)/lfs.structs.csv
|
||||
|
||||
## Find the line/branch coverage after a test run
|
||||
.PHONY: cov
|
||||
cov: COVFLAGS+=-s
|
||||
cov: $(GCDA) $(BUILDDIR)/lfs.cov.csv
|
||||
$(strip ./scripts/cov.py $(GCDA) \
|
||||
$(patsubst %,-F%,$(SRC)) \
|
||||
$(COVFLAGS))
|
||||
|
||||
## Compare line/branch coverage
|
||||
.PHONY: cov-diff
|
||||
cov-diff: $(GCDA)
|
||||
$(strip ./scripts/cov.py $^ \
|
||||
$(patsubst %,-F%,$(SRC)) \
|
||||
$(COVFLAGS) -d $(BUILDDIR)/lfs.cov.csv)
|
||||
|
||||
## Find the perf results after bench run with YES_PERF
|
||||
.PHONY: perf
|
||||
perf: PERFFLAGS+=-S
|
||||
perf: $(BENCH_PERF) $(BUILDDIR)/lfs.perf.csv
|
||||
$(strip ./scripts/perf.py $(BENCH_PERF) \
|
||||
$(patsubst %,-F%,$(SRC)) \
|
||||
$(PERFFLAGS))
|
||||
|
||||
## Compare perf results
|
||||
.PHONY: perf-diff
|
||||
perf-diff: $(BENCH_PERF)
|
||||
$(strip ./scripts/perf.py $^ \
|
||||
$(patsubst %,-F%,$(SRC)) \
|
||||
$(PERFFLAGS) -d $(BUILDDIR)/lfs.perf.csv)
|
||||
|
||||
## Find the perfbd results after a bench run
|
||||
.PHONY: perfbd
|
||||
perfbd: PERFBDFLAGS+=-S
|
||||
perfbd: $(BENCH_TRACE) $(BUILDDIR)/lfs.perfbd.csv
|
||||
$(strip ./scripts/perfbd.py $(BENCH_RUNNER) $(BENCH_TRACE) \
|
||||
$(patsubst %,-F%,$(SRC)) \
|
||||
$(PERFBDFLAGS))
|
||||
|
||||
## Compare perfbd results
|
||||
.PHONY: perfbd-diff
|
||||
perfbd-diff: $(BENCH_TRACE)
|
||||
$(strip ./scripts/perfbd.py $(BENCH_RUNNER) $^ \
|
||||
$(patsubst %,-F%,$(SRC)) \
|
||||
$(PERFBDFLAGS) -d $(BUILDDIR)/lfs.perfbd.csv)
|
||||
|
||||
## Find a summary of compile-time sizes
|
||||
.PHONY: summary sizes
|
||||
summary sizes: \
|
||||
$(BUILDDIR)/lfs.code.csv \
|
||||
$(BUILDDIR)/lfs.data.csv \
|
||||
$(BUILDDIR)/lfs.stack.csv \
|
||||
$(BUILDDIR)/lfs.structs.csv
|
||||
$(strip ./scripts/summary.py $^ \
|
||||
-fcode=code_size \
|
||||
-fdata=data_size \
|
||||
-fstack=stack_limit --max=stack \
|
||||
-fstructs=struct_size \
|
||||
-Y $(SUMMARYFLAGS))
|
||||
|
||||
## Compare compile-time sizes
|
||||
.PHONY: summary-diff sizes-diff
|
||||
summary-diff sizes-diff: SHELL=/bin/bash
|
||||
summary-diff sizes-diff: $(OBJ) $(CI)
|
||||
$(strip ./scripts/summary.py \
|
||||
<(./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o-) \
|
||||
<(./scripts/data.py $(OBJ) -q $(DATAFLAGS) -o-) \
|
||||
<(./scripts/stack.py $(CI) -q $(STACKFLAGS) -o-) \
|
||||
<(./scripts/structs.py $(OBJ) -q $(STRUCTSFLAGS) -o-) \
|
||||
-fcode=code_size \
|
||||
-fdata=data_size \
|
||||
-fstack=stack_limit --max=stack \
|
||||
-fstructs=struct_size \
|
||||
-Y $(SUMMARYFLAGS) -d <(./scripts/summary.py \
|
||||
$(BUILDDIR)/lfs.code.csv \
|
||||
$(BUILDDIR)/lfs.data.csv \
|
||||
$(BUILDDIR)/lfs.stack.csv \
|
||||
$(BUILDDIR)/lfs.structs.csv \
|
||||
-q $(SUMMARYFLAGS) -o-))
|
||||
|
||||
## Build the test-runner
|
||||
.PHONY: test-runner build-test
|
||||
test-runner build-test: CFLAGS+=-Wno-missing-prototypes
|
||||
ifndef NO_COV
|
||||
test-runner build-test: CFLAGS+=--coverage
|
||||
endif
|
||||
ifdef YES_PERF
|
||||
test-runner build-test: CFLAGS+=-fno-omit-frame-pointer
|
||||
endif
|
||||
ifdef YES_PERFBD
|
||||
test-runner build-test: CFLAGS+=-fno-omit-frame-pointer
|
||||
endif
|
||||
# note we remove some binary dependent files during compilation,
|
||||
# otherwise it's way to easy to end up with outdated results
|
||||
test-runner build-test: $(TEST_RUNNER)
|
||||
ifndef NO_COV
|
||||
rm -f $(TEST_GCDA)
|
||||
endif
|
||||
ifdef YES_PERF
|
||||
rm -f $(TEST_PERF)
|
||||
endif
|
||||
ifdef YES_PERFBD
|
||||
rm -f $(TEST_TRACE)
|
||||
endif
|
||||
|
||||
## Run the tests, -j enables parallel tests
|
||||
.PHONY: test
|
||||
test: test-runner
|
||||
./scripts/test.py $(TEST_RUNNER) $(TESTFLAGS)
|
||||
|
||||
## List the tests
|
||||
.PHONY: test-list
|
||||
test-list: test-runner
|
||||
./scripts/test.py $(TEST_RUNNER) $(TESTFLAGS) -l
|
||||
|
||||
## Summarize the testmarks
|
||||
.PHONY: testmarks
|
||||
testmarks: SUMMARYFLAGS+=-spassed
|
||||
testmarks: $(TEST_CSV) $(BUILDDIR)/lfs.test.csv
|
||||
$(strip ./scripts/summary.py $(TEST_CSV) \
|
||||
-bsuite \
|
||||
-fpassed=test_passed \
|
||||
$(SUMMARYFLAGS))
|
||||
|
||||
## Compare testmarks against a previous run
|
||||
.PHONY: testmarks-diff
|
||||
testmarks-diff: $(TEST_CSV)
|
||||
$(strip ./scripts/summary.py $^ \
|
||||
-bsuite \
|
||||
-fpassed=test_passed \
|
||||
$(SUMMARYFLAGS) -d $(BUILDDIR)/lfs.test.csv)
|
||||
|
||||
## Build the bench-runner
|
||||
.PHONY: bench-runner build-bench
|
||||
bench-runner build-bench: CFLAGS+=-Wno-missing-prototypes
|
||||
ifdef YES_COV
|
||||
bench-runner build-bench: CFLAGS+=--coverage
|
||||
endif
|
||||
ifdef YES_PERF
|
||||
bench-runner build-bench: CFLAGS+=-fno-omit-frame-pointer
|
||||
endif
|
||||
ifndef NO_PERFBD
|
||||
bench-runner build-bench: CFLAGS+=-fno-omit-frame-pointer
|
||||
endif
|
||||
# note we remove some binary dependent files during compilation,
|
||||
# otherwise it's way to easy to end up with outdated results
|
||||
bench-runner build-bench: $(BENCH_RUNNER)
|
||||
ifdef YES_COV
|
||||
rm -f $(BENCH_GCDA)
|
||||
endif
|
||||
ifdef YES_PERF
|
||||
rm -f $(BENCH_PERF)
|
||||
endif
|
||||
ifndef NO_PERFBD
|
||||
rm -f $(BENCH_TRACE)
|
||||
endif
|
||||
|
||||
## Run the benchmarks, -j enables parallel benchmarks
|
||||
.PHONY: bench
|
||||
bench: bench-runner
|
||||
./scripts/bench.py $(BENCH_RUNNER) $(BENCHFLAGS)
|
||||
|
||||
## List the benchmarks
|
||||
.PHONY: bench-list
|
||||
bench-list: bench-runner
|
||||
./scripts/bench.py $(BENCH_RUNNER) $(BENCHFLAGS) -l
|
||||
|
||||
## Summarize the benchmarks
|
||||
.PHONY: benchmarks
|
||||
benchmarks: SUMMARYFLAGS+=-Serased -Sproged -Sreaded
|
||||
benchmarks: $(BENCH_CSV) $(BUILDDIR)/lfs.bench.csv
|
||||
$(strip ./scripts/summary.py $(BENCH_CSV) \
|
||||
-bsuite \
|
||||
-freaded=bench_readed \
|
||||
-fproged=bench_proged \
|
||||
-ferased=bench_erased \
|
||||
$(SUMMARYFLAGS))
|
||||
|
||||
## Compare benchmarks against a previous run
|
||||
.PHONY: benchmarks-diff
|
||||
benchmarks-diff: $(BENCH_CSV)
|
||||
$(strip ./scripts/summary.py $^ \
|
||||
-bsuite \
|
||||
-freaded=bench_readed \
|
||||
-fproged=bench_proged \
|
||||
-ferased=bench_erased \
|
||||
$(SUMMARYFLAGS) -d $(BUILDDIR)/lfs.bench.csv)
|
||||
|
||||
|
||||
|
||||
# rules
|
||||
-include $(DEP)
|
||||
-include $(TEST_DEP)
|
||||
-include $(BENCH_DEP)
|
||||
.SUFFIXES:
|
||||
.SECONDARY:
|
||||
|
||||
$(BUILDDIR)/lfs: $(OBJ)
|
||||
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
|
||||
|
||||
$(BUILDDIR)/liblfs.a: $(OBJ)
|
||||
$(AR) rcs $@ $^
|
||||
|
||||
$(BUILDDIR)/lfs.code.csv: $(OBJ)
|
||||
./scripts/code.py $^ -q $(CODEFLAGS) -o $@
|
||||
|
||||
$(BUILDDIR)/lfs.data.csv: $(OBJ)
|
||||
./scripts/data.py $^ -q $(DATAFLAGS) -o $@
|
||||
|
||||
$(BUILDDIR)/lfs.stack.csv: $(CI)
|
||||
./scripts/stack.py $^ -q $(STACKFLAGS) -o $@
|
||||
|
||||
$(BUILDDIR)/lfs.structs.csv: $(OBJ)
|
||||
./scripts/structs.py $^ -q $(STRUCTSFLAGS) -o $@
|
||||
|
||||
$(BUILDDIR)/lfs.cov.csv: $(GCDA)
|
||||
$(strip ./scripts/cov.py $^ \
|
||||
$(patsubst %,-F%,$(SRC)) \
|
||||
-q $(COVFLAGS) -o $@)
|
||||
|
||||
$(BUILDDIR)/lfs.perf.csv: $(BENCH_PERF)
|
||||
$(strip ./scripts/perf.py $^ \
|
||||
$(patsubst %,-F%,$(SRC)) \
|
||||
-q $(PERFFLAGS) -o $@)
|
||||
|
||||
$(BUILDDIR)/lfs.perfbd.csv: $(BENCH_TRACE)
|
||||
$(strip ./scripts/perfbd.py $(BENCH_RUNNER) $^ \
|
||||
$(patsubst %,-F%,$(SRC)) \
|
||||
-q $(PERFBDFLAGS) -o $@)
|
||||
|
||||
$(BUILDDIR)/lfs.test.csv: $(TEST_CSV)
|
||||
cp $^ $@
|
||||
|
||||
$(BUILDDIR)/lfs.bench.csv: $(BENCH_CSV)
|
||||
cp $^ $@
|
||||
|
||||
$(BUILDDIR)/runners/test_runner: $(TEST_OBJ)
|
||||
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
|
||||
|
||||
$(BUILDDIR)/runners/bench_runner: $(BENCH_OBJ)
|
||||
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
|
||||
|
||||
# our main build rule generates .o, .d, and .ci files, the latter
|
||||
# used for stack analysis
|
||||
$(BUILDDIR)/%.o $(BUILDDIR)/%.ci: %.c
|
||||
$(CC) -c -MMD $(CFLAGS) $< -o $(BUILDDIR)/$*.o
|
||||
|
||||
$(BUILDDIR)/%.o $(BUILDDIR)/%.ci: $(BUILDDIR)/%.c
|
||||
$(CC) -c -MMD $(CFLAGS) $< -o $(BUILDDIR)/$*.o
|
||||
|
||||
$(BUILDDIR)/%.s: %.c
|
||||
$(CC) -S $(CFLAGS) $< -o $@
|
||||
|
||||
$(BUILDDIR)/%.c: %.a.c
|
||||
./scripts/prettyasserts.py -p LFS_ASSERT $< -o $@
|
||||
|
||||
$(BUILDDIR)/%.c: $(BUILDDIR)/%.a.c
|
||||
./scripts/prettyasserts.py -p LFS_ASSERT $< -o $@
|
||||
|
||||
$(BUILDDIR)/%.t.a.c: %.toml
|
||||
./scripts/test.py -c $< $(TESTCFLAGS) -o $@
|
||||
|
||||
$(BUILDDIR)/%.t.a.c: %.c $(TESTS)
|
||||
./scripts/test.py -c $(TESTS) -s $< $(TESTCFLAGS) -o $@
|
||||
|
||||
$(BUILDDIR)/%.b.a.c: %.toml
|
||||
./scripts/bench.py -c $< $(BENCHCFLAGS) -o $@
|
||||
|
||||
$(BUILDDIR)/%.b.a.c: %.c $(BENCHES)
|
||||
./scripts/bench.py -c $(BENCHES) -s $< $(BENCHCFLAGS) -o $@
|
||||
|
||||
## Clean everything
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -f $(BUILDDIR)/lfs
|
||||
rm -f $(BUILDDIR)/liblfs.a
|
||||
rm -f $(BUILDDIR)/lfs.code.csv
|
||||
rm -f $(BUILDDIR)/lfs.data.csv
|
||||
rm -f $(BUILDDIR)/lfs.stack.csv
|
||||
rm -f $(BUILDDIR)/lfs.structs.csv
|
||||
rm -f $(BUILDDIR)/lfs.cov.csv
|
||||
rm -f $(BUILDDIR)/lfs.perf.csv
|
||||
rm -f $(BUILDDIR)/lfs.perfbd.csv
|
||||
rm -f $(BUILDDIR)/lfs.test.csv
|
||||
rm -f $(BUILDDIR)/lfs.bench.csv
|
||||
rm -f $(OBJ)
|
||||
rm -f $(DEP)
|
||||
rm -f $(ASM)
|
||||
rm -f $(CI)
|
||||
rm -f $(TEST_RUNNER)
|
||||
rm -f $(TEST_A)
|
||||
rm -f $(TEST_C)
|
||||
rm -f $(TEST_OBJ)
|
||||
rm -f $(TEST_DEP)
|
||||
rm -f $(TEST_CI)
|
||||
rm -f $(TEST_GCNO)
|
||||
rm -f $(TEST_GCDA)
|
||||
rm -f $(TEST_PERF)
|
||||
rm -f $(TEST_TRACE)
|
||||
rm -f $(TEST_CSV)
|
||||
rm -f $(BENCH_RUNNER)
|
||||
rm -f $(BENCH_A)
|
||||
rm -f $(BENCH_C)
|
||||
rm -f $(BENCH_OBJ)
|
||||
rm -f $(BENCH_DEP)
|
||||
rm -f $(BENCH_CI)
|
||||
rm -f $(BENCH_GCNO)
|
||||
rm -f $(BENCH_GCDA)
|
||||
rm -f $(BENCH_PERF)
|
||||
rm -f $(BENCH_TRACE)
|
||||
rm -f $(BENCH_CSV)
|
||||
342
components/joltwallet__littlefs/src/littlefs/README.md
Normal file
342
components/joltwallet__littlefs/src/littlefs/README.md
Normal file
|
|
@ -0,0 +1,342 @@
|
|||
## littlefs
|
||||
|
||||
A little fail-safe filesystem designed for microcontrollers.
|
||||
|
||||
```
|
||||
| | | .---._____
|
||||
.-----. | |
|
||||
--|o |---| littlefs |
|
||||
--| |---| |
|
||||
'-----' '----------'
|
||||
| | |
|
||||
```
|
||||
|
||||
**Power-loss resilience** - littlefs is designed to handle random power
|
||||
failures. All file operations have strong copy-on-write guarantees and if
|
||||
power is lost the filesystem will fall back to the last known good state.
|
||||
|
||||
**Dynamic wear leveling** - littlefs is designed with flash in mind, and
|
||||
provides wear leveling over dynamic blocks. Additionally, littlefs can
|
||||
detect bad blocks and work around them.
|
||||
|
||||
**Bounded RAM/ROM** - littlefs is designed to work with a small amount of
|
||||
memory. RAM usage is strictly bounded, which means RAM consumption does not
|
||||
change as the filesystem grows. The filesystem contains no unbounded
|
||||
recursion and dynamic memory is limited to configurable buffers that can be
|
||||
provided statically.
|
||||
|
||||
## Example
|
||||
|
||||
Here's a simple example that updates a file named `boot_count` every time
|
||||
main runs. The program can be interrupted at any time without losing track
|
||||
of how many times it has been booted and without corrupting the filesystem:
|
||||
|
||||
``` c
|
||||
#include "lfs.h"
|
||||
|
||||
// variables used by the filesystem
|
||||
lfs_t lfs;
|
||||
lfs_file_t file;
|
||||
|
||||
// configuration of the filesystem is provided by this struct
|
||||
const struct lfs_config cfg = {
|
||||
// block device operations
|
||||
.read = user_provided_block_device_read,
|
||||
.prog = user_provided_block_device_prog,
|
||||
.erase = user_provided_block_device_erase,
|
||||
.sync = user_provided_block_device_sync,
|
||||
|
||||
// block device configuration
|
||||
.read_size = 16,
|
||||
.prog_size = 16,
|
||||
.block_size = 4096,
|
||||
.block_count = 128,
|
||||
.cache_size = 16,
|
||||
.lookahead_size = 16,
|
||||
.block_cycles = 500,
|
||||
};
|
||||
|
||||
// entry point
|
||||
int main(void) {
|
||||
// mount the filesystem
|
||||
int err = lfs_mount(&lfs, &cfg);
|
||||
|
||||
// reformat if we can't mount the filesystem
|
||||
// this should only happen on the first boot
|
||||
if (err) {
|
||||
lfs_format(&lfs, &cfg);
|
||||
lfs_mount(&lfs, &cfg);
|
||||
}
|
||||
|
||||
// read current count
|
||||
uint32_t boot_count = 0;
|
||||
lfs_file_open(&lfs, &file, "boot_count", LFS_O_RDWR | LFS_O_CREAT);
|
||||
lfs_file_read(&lfs, &file, &boot_count, sizeof(boot_count));
|
||||
|
||||
// update boot count
|
||||
boot_count += 1;
|
||||
lfs_file_rewind(&lfs, &file);
|
||||
lfs_file_write(&lfs, &file, &boot_count, sizeof(boot_count));
|
||||
|
||||
// remember the storage is not updated until the file is closed successfully
|
||||
lfs_file_close(&lfs, &file);
|
||||
|
||||
// release any resources we were using
|
||||
lfs_unmount(&lfs);
|
||||
|
||||
// print the boot count
|
||||
printf("boot_count: %d\n", boot_count);
|
||||
}
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Detailed documentation (or at least as much detail as is currently available)
|
||||
can be found in the comments in [lfs.h](lfs.h).
|
||||
|
||||
littlefs takes in a configuration structure that defines how the filesystem
|
||||
operates. The configuration struct provides the filesystem with the block
|
||||
device operations and dimensions, tweakable parameters that tradeoff memory
|
||||
usage for performance, and optional static buffers if the user wants to avoid
|
||||
dynamic memory.
|
||||
|
||||
The state of the littlefs is stored in the `lfs_t` type which is left up
|
||||
to the user to allocate, allowing multiple filesystems to be in use
|
||||
simultaneously. With the `lfs_t` and configuration struct, a user can
|
||||
format a block device or mount the filesystem.
|
||||
|
||||
Once mounted, the littlefs provides a full set of POSIX-like file and
|
||||
directory functions, with the deviation that the allocation of filesystem
|
||||
structures must be provided by the user.
|
||||
|
||||
All POSIX operations, such as remove and rename, are atomic, even in event
|
||||
of power-loss. Additionally, file updates are not actually committed to
|
||||
the filesystem until sync or close is called on the file.
|
||||
|
||||
## Other notes
|
||||
|
||||
Littlefs is written in C, and specifically should compile with any compiler
|
||||
that conforms to the `C99` standard.
|
||||
|
||||
All littlefs calls have the potential to return a negative error code. The
|
||||
errors can be either one of those found in the `enum lfs_error` in
|
||||
[lfs.h](lfs.h), or an error returned by the user's block device operations.
|
||||
|
||||
In the configuration struct, the `prog` and `erase` function provided by the
|
||||
user may return a `LFS_ERR_CORRUPT` error if the implementation already can
|
||||
detect corrupt blocks. However, the wear leveling does not depend on the return
|
||||
code of these functions, instead all data is read back and checked for
|
||||
integrity.
|
||||
|
||||
If your storage caches writes, make sure that the provided `sync` function
|
||||
flushes all the data to memory and ensures that the next read fetches the data
|
||||
from memory, otherwise data integrity can not be guaranteed. If the `write`
|
||||
function does not perform caching, and therefore each `read` or `write` call
|
||||
hits the memory, the `sync` function can simply return 0.
|
||||
|
||||
## Design
|
||||
|
||||
At a high level, littlefs is a block based filesystem that uses small logs to
|
||||
store metadata and larger copy-on-write (COW) structures to store file data.
|
||||
|
||||
In littlefs, these ingredients form a sort of two-layered cake, with the small
|
||||
logs (called metadata pairs) providing fast updates to metadata anywhere on
|
||||
storage, while the COW structures store file data compactly and without any
|
||||
wear amplification cost.
|
||||
|
||||
Both of these data structures are built out of blocks, which are fed by a
|
||||
common block allocator. By limiting the number of erases allowed on a block
|
||||
per allocation, the allocator provides dynamic wear leveling over the entire
|
||||
filesystem.
|
||||
|
||||
```
|
||||
root
|
||||
.--------.--------.
|
||||
| A'| B'| |
|
||||
| | |-> |
|
||||
| | | |
|
||||
'--------'--------'
|
||||
.----' '--------------.
|
||||
A v B v
|
||||
.--------.--------. .--------.--------.
|
||||
| C'| D'| | | E'|new| |
|
||||
| | |-> | | | E'|-> |
|
||||
| | | | | | | |
|
||||
'--------'--------' '--------'--------'
|
||||
.-' '--. | '------------------.
|
||||
v v .-' v
|
||||
.--------. .--------. v .--------.
|
||||
| C | | D | .--------. write | new E |
|
||||
| | | | | E | ==> | |
|
||||
| | | | | | | |
|
||||
'--------' '--------' | | '--------'
|
||||
'--------' .-' |
|
||||
.-' '-. .-------------|------'
|
||||
v v v v
|
||||
.--------. .--------. .--------.
|
||||
| F | | G | | new F |
|
||||
| | | | | |
|
||||
| | | | | |
|
||||
'--------' '--------' '--------'
|
||||
```
|
||||
|
||||
More details on how littlefs works can be found in [DESIGN.md](DESIGN.md) and
|
||||
[SPEC.md](SPEC.md).
|
||||
|
||||
- [DESIGN.md](DESIGN.md) - A fully detailed dive into how littlefs works.
|
||||
I would suggest reading it as the tradeoffs at work are quite interesting.
|
||||
|
||||
- [SPEC.md](SPEC.md) - The on-disk specification of littlefs with all the
|
||||
nitty-gritty details. May be useful for tooling development.
|
||||
|
||||
## Testing
|
||||
|
||||
The littlefs comes with a test suite designed to run on a PC using the
|
||||
[emulated block device](bd/lfs_testbd.h) found in the `bd` directory.
|
||||
The tests assume a Linux environment and can be started with make:
|
||||
|
||||
``` bash
|
||||
make test
|
||||
```
|
||||
|
||||
Tests are implemented in C in the .toml files found in the `tests` directory.
|
||||
When developing a feature or fixing a bug, it is frequently useful to run a
|
||||
single test case or suite of tests:
|
||||
|
||||
``` bash
|
||||
./scripts/test.py -l runners/test_runner # list available test suites
|
||||
./scripts/test.py -L runners/test_runner test_dirs # list available test cases
|
||||
./scripts/test.py runners/test_runner test_dirs # run a specific test suite
|
||||
```
|
||||
|
||||
If an assert fails in a test, test.py will try to print information about the
|
||||
failure:
|
||||
|
||||
``` bash
|
||||
tests/test_dirs.toml:1:failure: test_dirs_root:1g12gg2 (PROG_SIZE=16, ERASE_SIZE=512) failed
|
||||
tests/test_dirs.toml:5:assert: assert failed with 0, expected eq 42
|
||||
lfs_mount(&lfs, cfg) => 42;
|
||||
```
|
||||
|
||||
This includes the test id, which can be passed to test.py to run only that
|
||||
specific test permutation:
|
||||
|
||||
``` bash
|
||||
./scripts/test.py runners/test_runner test_dirs_root:1g12gg2 # run a specific test permutation
|
||||
./scripts/test.py runners/test_runner test_dirs_root:1g12gg2 --gdb # drop into gdb on failure
|
||||
```
|
||||
|
||||
Some other flags that may be useful:
|
||||
|
||||
```bash
|
||||
./scripts/test.py runners/test_runner -b -j # run tests in parallel
|
||||
./scripts/test.py runners/test_runner -v -O- # redirect stdout to stdout
|
||||
./scripts/test.py runners/test_runner -ddisk # capture resulting disk image
|
||||
```
|
||||
|
||||
See `-h/--help` for a full list of available flags:
|
||||
|
||||
``` bash
|
||||
./scripts/test.py --help
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
The littlefs is provided under the [BSD-3-Clause] license. See
|
||||
[LICENSE.md](LICENSE.md) for more information. Contributions to this project
|
||||
are accepted under the same license.
|
||||
|
||||
Individual files contain the following tag instead of the full license text.
|
||||
|
||||
SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
This enables machine processing of license information based on the SPDX
|
||||
License Identifiers that are here available: http://spdx.org/licenses/
|
||||
|
||||
## Related projects
|
||||
|
||||
- [littlefs-fuse] - A [FUSE] wrapper for littlefs. The project allows you to
|
||||
mount littlefs directly on a Linux machine. Can be useful for debugging
|
||||
littlefs if you have an SD card handy.
|
||||
|
||||
- [littlefs-js] - A javascript wrapper for littlefs. I'm not sure why you would
|
||||
want this, but it is handy for demos. You can see it in action
|
||||
[here][littlefs-js-demo].
|
||||
|
||||
- [littlefs-python] - A Python wrapper for littlefs. The project allows you
|
||||
to create images of the filesystem on your PC. Check if littlefs will fit
|
||||
your needs, create images for a later download to the target memory or
|
||||
inspect the content of a binary image of the target memory.
|
||||
|
||||
- [littlefs-toy] - A command-line tool for creating and working with littlefs
|
||||
images. Uses syntax similar to tar command for ease of use. Supports working
|
||||
on littlefs images embedded inside another file (firmware image, etc).
|
||||
|
||||
- [littlefs2-rust] - A Rust wrapper for littlefs. This project allows you
|
||||
to use littlefs in a Rust-friendly API, reaping the benefits of Rust's memory
|
||||
safety and other guarantees.
|
||||
|
||||
- [nim-littlefs] - A Nim wrapper and API for littlefs. Includes a fuse
|
||||
implementation based on [littlefs-fuse]
|
||||
|
||||
- [chamelon] - A pure-OCaml implementation of (most of) littlefs, designed for
|
||||
use with the MirageOS library operating system project. It is interoperable
|
||||
with the reference implementation, with some caveats.
|
||||
|
||||
- [littlefs-disk-img-viewer] - A memory-efficient web application for viewing
|
||||
littlefs disk images in your web browser.
|
||||
|
||||
- [mklfs] - A command line tool for creating littlefs images. Used in the Lua
|
||||
RTOS ecosystem.
|
||||
|
||||
- [mklittlefs] - A command line tool for creating littlefs images. Used in the
|
||||
ESP8266 and RP2040 ecosystem.
|
||||
|
||||
- [pico-littlefs-usb] - An interface for littlefs that emulates a FAT12
|
||||
filesystem over USB. Allows mounting littlefs on a host PC without additional
|
||||
drivers.
|
||||
|
||||
- [ramcrc32bd] - An example block device using littlefs's 32-bit CRC for
|
||||
error-correction.
|
||||
|
||||
- [ramrsbd] - An example block device using Reed-Solomon codes for
|
||||
error-correction.
|
||||
|
||||
- [Mbed OS] - The easiest way to get started with littlefs is to jump into Mbed
|
||||
which already has block device drivers for most forms of embedded storage.
|
||||
littlefs is available in Mbed OS as the [LittleFileSystem] class.
|
||||
|
||||
- [SPIFFS] - Another excellent embedded filesystem for NOR flash. As a more
|
||||
traditional logging filesystem with full static wear-leveling, SPIFFS will
|
||||
likely outperform littlefs on small memories such as the internal flash on
|
||||
microcontrollers.
|
||||
|
||||
- [Dhara] - An interesting NAND flash translation layer designed for small
|
||||
MCUs. It offers static wear-leveling and power-resilience with only a fixed
|
||||
_O(|address|)_ pointer structure stored on each block and in RAM.
|
||||
|
||||
- [ChaN's FatFs] - A lightweight reimplementation of the infamous FAT filesystem
|
||||
for microcontroller-scale devices. Due to limitations of FAT it can't provide
|
||||
power-loss resilience, but it does allow easy interop with PCs.
|
||||
|
||||
[BSD-3-Clause]: https://spdx.org/licenses/BSD-3-Clause.html
|
||||
[littlefs-fuse]: https://github.com/geky/littlefs-fuse
|
||||
[FUSE]: https://github.com/libfuse/libfuse
|
||||
[littlefs-js]: https://github.com/geky/littlefs-js
|
||||
[littlefs-js-demo]:http://littlefs.geky.net/demo.html
|
||||
[littlefs-python]: https://pypi.org/project/littlefs-python/
|
||||
[littlefs-toy]: https://github.com/tjko/littlefs-toy
|
||||
[littlefs2-rust]: https://crates.io/crates/littlefs2
|
||||
[nim-littlefs]: https://github.com/Graveflo/nim-littlefs
|
||||
[chamelon]: https://github.com/yomimono/chamelon
|
||||
[littlefs-disk-img-viewer]: https://github.com/tniessen/littlefs-disk-img-viewer
|
||||
[mklfs]: https://github.com/whitecatboard/Lua-RTOS-ESP32/tree/master/components/mklfs/src
|
||||
[mklittlefs]: https://github.com/earlephilhower/mklittlefs
|
||||
[pico-littlefs-usb]: https://github.com/oyama/pico-littlefs-usb
|
||||
[ramcrc32bd]: https://github.com/geky/ramcrc32bd
|
||||
[ramrsbd]: https://github.com/geky/ramrsbd
|
||||
[Mbed OS]: https://github.com/armmbed/mbed-os
|
||||
[LittleFileSystem]: https://os.mbed.com/docs/mbed-os/latest/apis/littlefilesystem.html
|
||||
[SPIFFS]: https://github.com/pellepl/spiffs
|
||||
[Dhara]: https://github.com/dlbeer/dhara
|
||||
[ChaN's FatFs]: http://elm-chan.org/fsw/ff/00index_e.html
|
||||
867
components/joltwallet__littlefs/src/littlefs/SPEC.md
Normal file
867
components/joltwallet__littlefs/src/littlefs/SPEC.md
Normal file
|
|
@ -0,0 +1,867 @@
|
|||
## littlefs technical specification
|
||||
|
||||
This is the technical specification of the little filesystem with on-disk
|
||||
version lfs2.1. This document covers the technical details of how the littlefs
|
||||
is stored on disk for introspection and tooling. This document assumes you are
|
||||
familiar with the design of the littlefs, for more info on how littlefs works
|
||||
check out [DESIGN.md](DESIGN.md).
|
||||
|
||||
```
|
||||
| | | .---._____
|
||||
.-----. | |
|
||||
--|o |---| littlefs |
|
||||
--| |---| |
|
||||
'-----' '----------'
|
||||
| | |
|
||||
```
|
||||
|
||||
## Some quick notes
|
||||
|
||||
- littlefs is a block-based filesystem. The disk is divided into an array of
|
||||
evenly sized blocks that are used as the logical unit of storage.
|
||||
|
||||
- Block pointers are stored in 32 bits, with the special value `0xffffffff`
|
||||
representing a null block address.
|
||||
|
||||
- In addition to the logical block size (which usually matches the erase
|
||||
block size), littlefs also uses a program block size and read block size.
|
||||
These determine the alignment of block device operations, but don't need
|
||||
to be consistent for portability.
|
||||
|
||||
- By default, all values in littlefs are stored in little-endian byte order.
|
||||
|
||||
## Directories / Metadata pairs
|
||||
|
||||
Metadata pairs form the backbone of littlefs and provide a system for
|
||||
distributed atomic updates. Even the superblock is stored in a metadata pair.
|
||||
|
||||
As their name suggests, a metadata pair is stored in two blocks, with one block
|
||||
providing a backup during erase cycles in case power is lost. These two blocks
|
||||
are not necessarily sequential and may be anywhere on disk, so a "pointer" to a
|
||||
metadata pair is stored as two block pointers.
|
||||
|
||||
On top of this, each metadata block behaves as an appendable log, containing a
|
||||
variable number of commits. Commits can be appended to the metadata log in
|
||||
order to update the metadata without requiring an erase cycles. Note that
|
||||
successive commits may supersede the metadata in previous commits. Only the
|
||||
most recent metadata should be considered valid.
|
||||
|
||||
The high-level layout of a metadata block is fairly simple:
|
||||
|
||||
```
|
||||
.---------------------------------------.
|
||||
.-| revision count | entries | \
|
||||
| |-------------------+ | |
|
||||
| | | |
|
||||
| | | +-- 1st commit
|
||||
| | | |
|
||||
| | +-------------------| |
|
||||
| | | CRC | /
|
||||
| |-------------------+-------------------|
|
||||
| | entries | \
|
||||
| | | |
|
||||
| | | +-- 2nd commit
|
||||
| | +-------------------+--------------| |
|
||||
| | | CRC | padding | /
|
||||
| |----+-------------------+--------------|
|
||||
| | entries | \
|
||||
| | | |
|
||||
| | | +-- 3rd commit
|
||||
| | +-------------------+---------| |
|
||||
| | | CRC | | /
|
||||
| |---------+-------------------+ |
|
||||
| | unwritten storage | more commits
|
||||
| | | |
|
||||
| | | v
|
||||
| | |
|
||||
| | |
|
||||
| '---------------------------------------'
|
||||
'---------------------------------------'
|
||||
```
|
||||
|
||||
Each metadata block contains a 32-bit revision count followed by a number of
|
||||
commits. Each commit contains a variable number of metadata entries followed
|
||||
by a 32-bit CRC.
|
||||
|
||||
Note also that entries aren't necessarily word-aligned. This allows us to
|
||||
store metadata more compactly, however we can only write to addresses that are
|
||||
aligned to our program block size. This means each commit may have padding for
|
||||
alignment.
|
||||
|
||||
Metadata block fields:
|
||||
|
||||
1. **Revision count (32-bits)** - Incremented every erase cycle. If both blocks
|
||||
contain valid commits, only the block with the most recent revision count
|
||||
should be used. Sequence comparison must be used to avoid issues with
|
||||
integer overflow.
|
||||
|
||||
2. **CRC (32-bits)** - Detects corruption from power-loss or other write
|
||||
issues. Uses a CRC-32 with a polynomial of `0x04c11db7` initialized
|
||||
with `0xffffffff`.
|
||||
|
||||
Entries themselves are stored as a 32-bit tag followed by a variable length
|
||||
blob of data. But exactly how these tags are stored is a little bit tricky.
|
||||
|
||||
Metadata blocks support both forward and backward iteration. In order to do
|
||||
this without duplicating the space for each tag, neighboring entries have their
|
||||
tags XORed together, starting with `0xffffffff`.
|
||||
|
||||
```
|
||||
Forward iteration Backward iteration
|
||||
|
||||
.-------------------. 0xffffffff .-------------------.
|
||||
| revision count | | | revision count |
|
||||
|-------------------| v |-------------------|
|
||||
| tag ~A |---> xor -> tag A | tag ~A |---> xor -> 0xffffffff
|
||||
|-------------------| | |-------------------| ^
|
||||
| data A | | | data A | |
|
||||
| | | | | |
|
||||
| | | | | |
|
||||
|-------------------| v |-------------------| |
|
||||
| tag AxB |---> xor -> tag B | tag AxB |---> xor -> tag A
|
||||
|-------------------| | |-------------------| ^
|
||||
| data B | | | data B | |
|
||||
| | | | | |
|
||||
| | | | | |
|
||||
|-------------------| v |-------------------| |
|
||||
| tag BxC |---> xor -> tag C | tag BxC |---> xor -> tag B
|
||||
|-------------------| |-------------------| ^
|
||||
| data C | | data C | |
|
||||
| | | | tag C
|
||||
| | | |
|
||||
| | | |
|
||||
'-------------------' '-------------------'
|
||||
```
|
||||
|
||||
Here's a more complete example of metadata block containing 4 entries:
|
||||
|
||||
```
|
||||
.---------------------------------------.
|
||||
.-| revision count | tag ~A | \
|
||||
| |-------------------+-------------------| |
|
||||
| | data A | |
|
||||
| | | |
|
||||
| |-------------------+-------------------| |
|
||||
| | tag AxB | data B | <--. |
|
||||
| |-------------------+ | | |
|
||||
| | | | +-- 1st commit
|
||||
| | +-------------------+---------| | |
|
||||
| | | tag BxC | | <-.| |
|
||||
| |---------+-------------------+ | || |
|
||||
| | data C | || |
|
||||
| | | || |
|
||||
| |-------------------+-------------------| || |
|
||||
| | tag CxCRC | CRC | || /
|
||||
| |-------------------+-------------------| ||
|
||||
| | tag CRCxA' | data A' | || \
|
||||
| |-------------------+ | || |
|
||||
| | | || |
|
||||
| | +-------------------+----| || +-- 2nd commit
|
||||
| | | tag CRCxA' | | || |
|
||||
| |--------------+-------------------+----| || |
|
||||
| | CRC | padding | || /
|
||||
| |--------------+----+-------------------| ||
|
||||
| | tag CRCxA'' | data A'' | <---. \
|
||||
| |-------------------+ | ||| |
|
||||
| | | ||| |
|
||||
| | +-------------------+---------| ||| |
|
||||
| | | tag A''xD | | < ||| |
|
||||
| |---------+-------------------+ | |||| +-- 3rd commit
|
||||
| | data D | |||| |
|
||||
| | +---------| |||| |
|
||||
| | | tag Dx| |||| |
|
||||
| |---------+-------------------+---------| |||| |
|
||||
| |CRC | CRC | | |||| /
|
||||
| |---------+-------------------+ | ||||
|
||||
| | unwritten storage | |||| more commits
|
||||
| | | |||| |
|
||||
| | | |||| v
|
||||
| | | ||||
|
||||
| | | ||||
|
||||
| '---------------------------------------' ||||
|
||||
'---------------------------------------' |||'- most recent A
|
||||
||'-- most recent B
|
||||
|'--- most recent C
|
||||
'---- most recent D
|
||||
```
|
||||
|
||||
Two things to note before we get into the details around tag encoding:
|
||||
|
||||
1. Each tag contains a valid bit used to indicate if the tag and containing
|
||||
commit is valid. After XORing, this bit should always be zero.
|
||||
|
||||
At the end of each commit, the valid bit of the previous tag is XORed
|
||||
with the lowest bit in the type field of the CRC tag. This allows
|
||||
the CRC tag to force the next commit to fail the valid bit test if it
|
||||
has not yet been written to.
|
||||
|
||||
2. The valid bit alone is not enough info to know if the next commit has been
|
||||
erased. We don't know the order bits will be programmed in a program block,
|
||||
so it's possible that the next commit had an attempted program that left the
|
||||
valid bit unchanged.
|
||||
|
||||
To ensure we only ever program erased bytes, each commit can contain an
|
||||
optional forward-CRC (FCRC). An FCRC contains a checksum of some amount of
|
||||
bytes in the next commit at the time it was erased.
|
||||
|
||||
```
|
||||
.-------------------. \ \
|
||||
| revision count | | |
|
||||
|-------------------| | |
|
||||
| metadata | | |
|
||||
| | +---. +-- current commit
|
||||
| | | | |
|
||||
|-------------------| | | |
|
||||
| FCRC ---|-. | |
|
||||
|-------------------| / | | |
|
||||
| CRC -----|-' /
|
||||
|-------------------| |
|
||||
| padding | | padding (does't need CRC)
|
||||
| | |
|
||||
|-------------------| \ | \
|
||||
| erased? | +-' |
|
||||
| | | | +-- next commit
|
||||
| v | / |
|
||||
| | /
|
||||
| |
|
||||
'-------------------'
|
||||
```
|
||||
|
||||
If the FCRC is missing or the checksum does not match, we must assume a
|
||||
commit was attempted but failed due to power-loss.
|
||||
|
||||
Note that end-of-block commits do not need an FCRC.
|
||||
|
||||
## Metadata tags
|
||||
|
||||
So in littlefs, 32-bit tags describe every type of metadata. And this means
|
||||
_every_ type of metadata, including file entries, directory fields, and
|
||||
global state. Even the CRCs used to mark the end of commits get their own tag.
|
||||
|
||||
Because of this, the tag format contains some densely packed information. Note
|
||||
that there are multiple levels of types which break down into more info:
|
||||
|
||||
```
|
||||
[---- 32 ----]
|
||||
[1|-- 11 --|-- 10 --|-- 10 --]
|
||||
^. ^ . ^ ^- length
|
||||
|. | . '------------ id
|
||||
|. '-----.------------------ type (type3)
|
||||
'.-----------.------------------ valid bit
|
||||
[-3-|-- 8 --]
|
||||
^ ^- chunk
|
||||
'------- type (type1)
|
||||
```
|
||||
|
||||
|
||||
Before we go further, there's one important thing to note. These tags are
|
||||
**not** stored in little-endian. Tags stored in commits are actually stored
|
||||
in big-endian (and is the only thing in littlefs stored in big-endian). This
|
||||
little bit of craziness comes from the fact that the valid bit must be the
|
||||
first bit in a commit, and when converted to little-endian, the valid bit finds
|
||||
itself in byte 4. We could restructure the tag to store the valid bit lower,
|
||||
but, because none of the fields are byte-aligned, this would be more
|
||||
complicated than just storing the tag in big-endian.
|
||||
|
||||
Another thing to note is that both the tags `0x00000000` and `0xffffffff` are
|
||||
invalid and can be used for null values.
|
||||
|
||||
Metadata tag fields:
|
||||
|
||||
1. **Valid bit (1-bit)** - Indicates if the tag is valid.
|
||||
|
||||
2. **Type3 (11-bits)** - Type of the tag. This field is broken down further
|
||||
into a 3-bit abstract type and an 8-bit chunk field. Note that the value
|
||||
`0x000` is invalid and not assigned a type.
|
||||
|
||||
1. **Type1 (3-bits)** - Abstract type of the tag. Groups the tags into
|
||||
8 categories that facilitate bitmasked lookups.
|
||||
|
||||
2. **Chunk (8-bits)** - Chunk field used for various purposes by the different
|
||||
abstract types. type1+chunk+id form a unique identifier for each tag in the
|
||||
metadata block.
|
||||
|
||||
3. **Id (10-bits)** - File id associated with the tag. Each file in a metadata
|
||||
block gets a unique id which is used to associate tags with that file. The
|
||||
special value `0x3ff` is used for any tags that are not associated with a
|
||||
file, such as directory and global metadata.
|
||||
|
||||
4. **Length (10-bits)** - Length of the data in bytes. The special value
|
||||
`0x3ff` indicates that this tag has been deleted.
|
||||
|
||||
## Metadata types
|
||||
|
||||
What follows is an exhaustive list of metadata in littlefs.
|
||||
|
||||
---
|
||||
#### `0x401` LFS_TYPE_CREATE
|
||||
|
||||
Creates a new file with this id. Note that files in a metadata block
|
||||
don't necessarily need a create tag. All a create does is move over any
|
||||
files using this id. In this sense a create is similar to insertion into
|
||||
an imaginary array of files.
|
||||
|
||||
The create and delete tags allow littlefs to keep files in a directory
|
||||
ordered alphabetically by filename.
|
||||
|
||||
---
|
||||
#### `0x4ff` LFS_TYPE_DELETE
|
||||
|
||||
Deletes the file with this id. An inverse to create, this tag moves over
|
||||
any files neighboring this id similar to a deletion from an imaginary
|
||||
array of files.
|
||||
|
||||
---
|
||||
#### `0x0xx` LFS_TYPE_NAME
|
||||
|
||||
Associates the id with a file name and file type.
|
||||
|
||||
The data contains the file name stored as an ASCII string (may be expanded to
|
||||
UTF8 in the future).
|
||||
|
||||
The chunk field in this tag indicates an 8-bit file type which can be one of
|
||||
the following.
|
||||
|
||||
Currently, the name tag must precede any other tags associated with the id and
|
||||
can not be reassigned without deleting the file.
|
||||
|
||||
Layout of the name tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][--- variable length ---]
|
||||
[1| 3| 8 | 10 | 10 ][--- (size * 8) ---]
|
||||
^ ^ ^ ^ ^- size ^- file name
|
||||
| | | '------ id
|
||||
| | '----------- file type
|
||||
| '-------------- type1 (0x0)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
Name fields:
|
||||
|
||||
1. **file type (8-bits)** - Type of the file.
|
||||
|
||||
2. **file name** - File name stored as an ASCII string.
|
||||
|
||||
---
|
||||
#### `0x001` LFS_TYPE_REG
|
||||
|
||||
Initializes the id + name as a regular file.
|
||||
|
||||
How each file is stored depends on its struct tag, which is described below.
|
||||
|
||||
---
|
||||
#### `0x002` LFS_TYPE_DIR
|
||||
|
||||
Initializes the id + name as a directory.
|
||||
|
||||
Directories in littlefs are stored on disk as a linked-list of metadata pairs,
|
||||
each pair containing any number of files in alphabetical order. A pointer to
|
||||
the directory is stored in the struct tag, which is described below.
|
||||
|
||||
---
|
||||
#### `0x0ff` LFS_TYPE_SUPERBLOCK
|
||||
|
||||
Initializes the id as a superblock entry.
|
||||
|
||||
The superblock entry is a special entry used to store format-time configuration
|
||||
and identify the filesystem.
|
||||
|
||||
The name is a bit of a misnomer. While the superblock entry serves the same
|
||||
purpose as a superblock found in other filesystems, in littlefs the superblock
|
||||
does not get a dedicated block. Instead, the superblock entry is duplicated
|
||||
across a linked-list of metadata pairs rooted on the blocks 0 and 1. The last
|
||||
metadata pair doubles as the root directory of the filesystem.
|
||||
|
||||
```
|
||||
.--------. .--------. .--------. .--------. .--------.
|
||||
.| super |->| super |->| super |->| super |->| file B |
|
||||
|| block | || block | || block | || block | || file C |
|
||||
|| | || | || | || file A | || file D |
|
||||
|'--------' |'--------' |'--------' |'--------' |'--------'
|
||||
'--------' '--------' '--------' '--------' '--------'
|
||||
|
||||
\----------------+----------------/ \----------+----------/
|
||||
superblock pairs root directory
|
||||
```
|
||||
|
||||
The filesystem starts with only the root directory. The superblock metadata
|
||||
pairs grow every time the root pair is compacted in order to prolong the
|
||||
life of the device exponentially.
|
||||
|
||||
The contents of the superblock entry are stored in a name tag with the
|
||||
superblock type and an inline-struct tag. The name tag contains the magic
|
||||
string "littlefs", while the inline-struct tag contains version and
|
||||
configuration information.
|
||||
|
||||
Layout of the superblock name tag and inline-struct tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][-- 32 --|-- 32 --]
|
||||
[1|- 11 -| 10 | 10 ][--- 64 ---]
|
||||
^ ^ ^ ^- size (8) ^- magic string ("littlefs")
|
||||
| | '------ id (0)
|
||||
| '------------ type (0x0ff)
|
||||
'----------------- valid bit
|
||||
|
||||
tag data
|
||||
[-- 32 --][-- 32 --|-- 32 --|-- 32 --]
|
||||
[1|- 11 -| 10 | 10 ][-- 32 --|-- 32 --|-- 32 --]
|
||||
^ ^ ^ ^ ^- version ^- block size ^- block count
|
||||
| | | | [-- 32 --|-- 32 --|-- 32 --]
|
||||
| | | | [-- 32 --|-- 32 --|-- 32 --]
|
||||
| | | | ^- name max ^- file max ^- attr max
|
||||
| | | '- size (24)
|
||||
| | '------ id (0)
|
||||
| '------------ type (0x201)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
Superblock fields:
|
||||
|
||||
1. **Magic string (8-bytes)** - Magic string indicating the presence of
|
||||
littlefs on the device. Must be the string "littlefs".
|
||||
|
||||
2. **Version (32-bits)** - The version of littlefs at format time. The version
|
||||
is encoded in a 32-bit value with the upper 16-bits containing the major
|
||||
version, and the lower 16-bits containing the minor version.
|
||||
|
||||
This specification describes version 2.0 (`0x00020000`).
|
||||
|
||||
3. **Block size (32-bits)** - Size of the logical block size used by the
|
||||
filesystem in bytes.
|
||||
|
||||
4. **Block count (32-bits)** - Number of blocks in the filesystem.
|
||||
|
||||
5. **Name max (32-bits)** - Maximum size of file names in bytes.
|
||||
|
||||
6. **File max (32-bits)** - Maximum size of files in bytes.
|
||||
|
||||
7. **Attr max (32-bits)** - Maximum size of file attributes in bytes.
|
||||
|
||||
The superblock must always be the first entry (id 0) in the metadata pair, and
|
||||
the name tag must always be the first tag in the metadata pair. This makes it
|
||||
so that the magic string "littlefs" will always reside at offset=8 in a valid
|
||||
littlefs superblock.
|
||||
|
||||
---
|
||||
#### `0x2xx` LFS_TYPE_STRUCT
|
||||
|
||||
Associates the id with an on-disk data structure.
|
||||
|
||||
The exact layout of the data depends on the data structure type stored in the
|
||||
chunk field and can be one of the following.
|
||||
|
||||
Any type of struct supersedes all other structs associated with the id. For
|
||||
example, appending a ctz-struct replaces an inline-struct on the same file.
|
||||
|
||||
---
|
||||
#### `0x200` LFS_TYPE_DIRSTRUCT
|
||||
|
||||
Gives the id a directory data structure.
|
||||
|
||||
Directories in littlefs are stored on disk as a linked-list of metadata pairs,
|
||||
each pair containing any number of files in alphabetical order.
|
||||
|
||||
```
|
||||
|
|
||||
v
|
||||
.--------. .--------. .--------. .--------. .--------. .--------.
|
||||
.| file A |->| file D |->| file G |->| file I |->| file J |->| file M |
|
||||
|| file B | || file E | || file H | || | || file K | || file N |
|
||||
|| file C | || file F | || | || | || file L | || |
|
||||
|'--------' |'--------' |'--------' |'--------' |'--------' |'--------'
|
||||
'--------' '--------' '--------' '--------' '--------' '--------'
|
||||
```
|
||||
|
||||
The dir-struct tag contains only the pointer to the first metadata-pair in the
|
||||
directory. The directory size is not known without traversing the directory.
|
||||
|
||||
The pointer to the next metadata-pair in the directory is stored in a tail tag,
|
||||
which is described below.
|
||||
|
||||
Layout of the dir-struct tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][-- 32 --|-- 32 --]
|
||||
[1|- 11 -| 10 | 10 ][--- 64 ---]
|
||||
^ ^ ^ ^- size (8) ^- metadata pair
|
||||
| | '------ id
|
||||
| '------------ type (0x200)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
Dir-struct fields:
|
||||
|
||||
1. **Metadata pair (8-bytes)** - Pointer to the first metadata-pair
|
||||
in the directory.
|
||||
|
||||
---
|
||||
#### `0x201` LFS_TYPE_INLINESTRUCT
|
||||
|
||||
Gives the id an inline data structure.
|
||||
|
||||
Inline structs store small files that can fit in the metadata pair. In this
|
||||
case, the file data is stored directly in the tag's data area.
|
||||
|
||||
Layout of the inline-struct tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][--- variable length ---]
|
||||
[1|- 11 -| 10 | 10 ][--- (size * 8) ---]
|
||||
^ ^ ^ ^- size ^- inline data
|
||||
| | '------ id
|
||||
| '------------ type (0x201)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
Inline-struct fields:
|
||||
|
||||
1. **Inline data** - File data stored directly in the metadata-pair.
|
||||
|
||||
---
|
||||
#### `0x202` LFS_TYPE_CTZSTRUCT
|
||||
|
||||
Gives the id a CTZ skip-list data structure.
|
||||
|
||||
CTZ skip-lists store files that can not fit in the metadata pair. These files
|
||||
are stored in a skip-list in reverse, with a pointer to the head of the
|
||||
skip-list. Note that the head of the skip-list and the file size is enough
|
||||
information to read the file.
|
||||
|
||||
How exactly CTZ skip-lists work is a bit complicated. A full explanation can be
|
||||
found in the [DESIGN.md](DESIGN.md#ctz-skip-lists).
|
||||
|
||||
A quick summary: For every _n_‍th block where _n_ is divisible by
|
||||
2‍_ˣ_, that block contains a pointer to block _n_-2‍_ˣ_.
|
||||
These pointers are stored in increasing order of _x_ in each block of the file
|
||||
before the actual data.
|
||||
|
||||
```
|
||||
|
|
||||
v
|
||||
.--------. .--------. .--------. .--------. .--------. .--------.
|
||||
| A |<-| D |<-| G |<-| J |<-| M |<-| P |
|
||||
| B |<-| E |--| H |<-| K |--| N | | Q |
|
||||
| C |<-| F |--| I |--| L |--| O | | |
|
||||
'--------' '--------' '--------' '--------' '--------' '--------'
|
||||
block 0 block 1 block 2 block 3 block 4 block 5
|
||||
1 skip 2 skips 1 skip 3 skips 1 skip
|
||||
```
|
||||
|
||||
Note that the maximum number of pointers in a block is bounded by the maximum
|
||||
file size divided by the block size. With 32 bits for file size, this results
|
||||
in a minimum block size of 104 bytes.
|
||||
|
||||
Layout of the CTZ-struct tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][-- 32 --|-- 32 --]
|
||||
[1|- 11 -| 10 | 10 ][-- 32 --|-- 32 --]
|
||||
^ ^ ^ ^ ^ ^- file size
|
||||
| | | | '-------------------- file head
|
||||
| | | '- size (8)
|
||||
| | '------ id
|
||||
| '------------ type (0x202)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
CTZ-struct fields:
|
||||
|
||||
1. **File head (32-bits)** - Pointer to the block that is the head of the
|
||||
file's CTZ skip-list.
|
||||
|
||||
2. **File size (32-bits)** - Size of the file in bytes.
|
||||
|
||||
---
|
||||
#### `0x3xx` LFS_TYPE_USERATTR
|
||||
|
||||
Attaches a user attribute to an id.
|
||||
|
||||
littlefs has a concept of "user attributes". These are small user-provided
|
||||
attributes that can be used to store things like timestamps, hashes,
|
||||
permissions, etc.
|
||||
|
||||
Each user attribute is uniquely identified by an 8-bit type which is stored in
|
||||
the chunk field, and the user attribute itself can be found in the tag's data.
|
||||
|
||||
There are currently no standard user attributes and a portable littlefs
|
||||
implementation should work with any user attributes missing.
|
||||
|
||||
Layout of the user-attr tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][--- variable length ---]
|
||||
[1| 3| 8 | 10 | 10 ][--- (size * 8) ---]
|
||||
^ ^ ^ ^ ^- size ^- attr data
|
||||
| | | '------ id
|
||||
| | '----------- attr type
|
||||
| '-------------- type1 (0x3)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
User-attr fields:
|
||||
|
||||
1. **Attr type (8-bits)** - Type of the user attributes.
|
||||
|
||||
2. **Attr data** - The data associated with the user attribute.
|
||||
|
||||
---
|
||||
#### `0x6xx` LFS_TYPE_TAIL
|
||||
|
||||
Provides the tail pointer for the metadata pair itself.
|
||||
|
||||
The metadata pair's tail pointer is used in littlefs for a linked-list
|
||||
containing all metadata pairs. The chunk field contains the type of the tail,
|
||||
which indicates if the following metadata pair is a part of the directory
|
||||
(hard-tail) or only used to traverse the filesystem (soft-tail).
|
||||
|
||||
```
|
||||
.--------.
|
||||
.| dir A |-.
|
||||
||softtail| |
|
||||
.--------| |-'
|
||||
| |'--------'
|
||||
| '---|--|-'
|
||||
| .-' '-------------.
|
||||
| v v
|
||||
| .--------. .--------. .--------.
|
||||
'->| dir B |->| dir B |->| dir C |
|
||||
||hardtail| ||softtail| || |
|
||||
|| | || | || |
|
||||
|'--------' |'--------' |'--------'
|
||||
'--------' '--------' '--------'
|
||||
```
|
||||
|
||||
Currently any type supersedes any other preceding tails in the metadata pair,
|
||||
but this may change if additional metadata pair state is added.
|
||||
|
||||
A note about the metadata pair linked-list: Normally, this linked-list contains
|
||||
every metadata pair in the filesystem. However, there are some operations that
|
||||
can cause this linked-list to become out of sync if a power-loss were to occur.
|
||||
When this happens, littlefs sets the "sync" flag in the global state. How
|
||||
exactly this flag is stored is described below.
|
||||
|
||||
When the sync flag is set:
|
||||
|
||||
1. The linked-list may contain an orphaned directory that has been removed in
|
||||
the filesystem.
|
||||
2. The linked-list may contain a metadata pair with a bad block that has been
|
||||
replaced in the filesystem.
|
||||
|
||||
If the sync flag is set, the threaded linked-list must be checked for these
|
||||
errors before it can be used reliably. Note that the threaded linked-list can
|
||||
be ignored if littlefs is mounted read-only.
|
||||
|
||||
Layout of the tail tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][-- 32 --|-- 32 --]
|
||||
[1| 3| 8 | 10 | 10 ][--- 64 ---]
|
||||
^ ^ ^ ^ ^- size (8) ^- metadata pair
|
||||
| | | '------ id
|
||||
| | '---------- tail type
|
||||
| '------------- type1 (0x6)
|
||||
'---------------- valid bit
|
||||
```
|
||||
|
||||
Tail fields:
|
||||
|
||||
1. **Tail type (8-bits)** - Type of the tail pointer.
|
||||
|
||||
2. **Metadata pair (8-bytes)** - Pointer to the next metadata-pair.
|
||||
|
||||
---
|
||||
#### `0x600` LFS_TYPE_SOFTTAIL
|
||||
|
||||
Provides a tail pointer that points to the next metadata pair in the
|
||||
filesystem.
|
||||
|
||||
In this case, the next metadata pair is not a part of our current directory
|
||||
and should only be followed when traversing the entire filesystem.
|
||||
|
||||
---
|
||||
#### `0x601` LFS_TYPE_HARDTAIL
|
||||
|
||||
Provides a tail pointer that points to the next metadata pair in the
|
||||
directory.
|
||||
|
||||
In this case, the next metadata pair belongs to the current directory. Note
|
||||
that because directories in littlefs are sorted alphabetically, the next
|
||||
metadata pair should only contain filenames greater than any filename in the
|
||||
current pair.
|
||||
|
||||
---
|
||||
#### `0x7xx` LFS_TYPE_GSTATE
|
||||
|
||||
Provides delta bits for global state entries.
|
||||
|
||||
littlefs has a concept of "global state". This is a small set of state that
|
||||
can be updated by a commit to _any_ metadata pair in the filesystem.
|
||||
|
||||
The way this works is that the global state is stored as a set of deltas
|
||||
distributed across the filesystem such that the global state can be found by
|
||||
the xor-sum of these deltas.
|
||||
|
||||
```
|
||||
.--------. .--------. .--------. .--------. .--------.
|
||||
.| |->| gdelta |->| |->| gdelta |->| gdelta |
|
||||
|| | || 0x23 | || | || 0xff | || 0xce |
|
||||
|| | || | || | || | || |
|
||||
|'--------' |'--------' |'--------' |'--------' |'--------'
|
||||
'--------' '----|---' '--------' '----|---' '----|---'
|
||||
v v v
|
||||
0x00 --> xor ------------------> xor ------> xor --> gstate = 0x12
|
||||
```
|
||||
|
||||
Note that storing globals this way is very expensive in terms of storage usage,
|
||||
so any global state should be kept very small.
|
||||
|
||||
The size and format of each piece of global state depends on the type, which
|
||||
is stored in the chunk field. Currently, the only global state is move state,
|
||||
which is outlined below.
|
||||
|
||||
---
|
||||
#### `0x7ff` LFS_TYPE_MOVESTATE
|
||||
|
||||
Provides delta bits for the global move state.
|
||||
|
||||
The move state in littlefs is used to store info about operations that could
|
||||
cause to filesystem to go out of sync if the power is lost. The operations
|
||||
where this could occur is moves of files between metadata pairs and any
|
||||
operation that changes metadata pairs on the threaded linked-list.
|
||||
|
||||
In the case of moves, the move state contains a tag + metadata pair describing
|
||||
the source of the ongoing move. If this tag is non-zero, that means that power
|
||||
was lost during a move, and the file exists in two different locations. If this
|
||||
happens, the source of the move should be considered deleted, and the move
|
||||
should be completed (the source should be deleted) before any other write
|
||||
operations to the filesystem.
|
||||
|
||||
In the case of operations to the threaded linked-list, a single "sync" bit is
|
||||
used to indicate that a modification is ongoing. If this sync flag is set, the
|
||||
threaded linked-list will need to be checked for errors before it can be used
|
||||
reliably. The exact cases to check for are described above in the tail tag.
|
||||
|
||||
Layout of the move state:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][-- 32 --|-- 32 --|-- 32 --]
|
||||
[1|- 11 -| 10 | 10 ][1|- 11 -| 10 | 10 |--- 64 ---]
|
||||
^ ^ ^ ^ ^ ^ ^ ^- padding (0) ^- metadata pair
|
||||
| | | | | | '------ move id
|
||||
| | | | | '------------ move type
|
||||
| | | | '----------------- sync bit
|
||||
| | | |
|
||||
| | | '- size (12)
|
||||
| | '------ id (0x3ff)
|
||||
| '------------ type (0x7ff)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
Move state fields:
|
||||
|
||||
1. **Sync bit (1-bit)** - Indicates if the metadata pair threaded linked-list
|
||||
is in-sync. If set, the threaded linked-list should be checked for errors.
|
||||
|
||||
2. **Move type (11-bits)** - Type of move being performed. Must be either
|
||||
`0x000`, indicating no move, or `0x4ff` indicating the source file should
|
||||
be deleted.
|
||||
|
||||
3. **Move id (10-bits)** - The file id being moved.
|
||||
|
||||
4. **Metadata pair (8-bytes)** - Pointer to the metadata-pair containing
|
||||
the move.
|
||||
|
||||
---
|
||||
#### `0x5xx` LFS_TYPE_CRC
|
||||
|
||||
Last but not least, the CRC tag marks the end of a commit and provides a
|
||||
checksum for any commits to the metadata block.
|
||||
|
||||
The first 32-bits of the data contain a CRC-32 with a polynomial of
|
||||
`0x04c11db7` initialized with `0xffffffff`. This CRC provides a checksum for
|
||||
all metadata since the previous CRC tag, including the CRC tag itself. For
|
||||
the first commit, this includes the revision count for the metadata block.
|
||||
|
||||
However, the size of the data is not limited to 32-bits. The data field may
|
||||
larger to pad the commit to the next program-aligned boundary.
|
||||
|
||||
In addition, the CRC tag's chunk field contains a set of flags which can
|
||||
change the behaviour of commits. Currently the only flag in use is the lowest
|
||||
bit, which determines the expected state of the valid bit for any following
|
||||
tags. This is used to guarantee that unwritten storage in a metadata block
|
||||
will be detected as invalid.
|
||||
|
||||
Layout of the CRC tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][-- 32 --|--- variable length ---]
|
||||
[1| 3| 8 | 10 | 10 ][-- 32 --|--- (size * 8 - 32) ---]
|
||||
^ ^ ^ ^ ^ ^- crc ^- padding
|
||||
| | | | '- size
|
||||
| | | '------ id (0x3ff)
|
||||
| | '----------- valid state
|
||||
| '-------------- type1 (0x5)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
CRC fields:
|
||||
|
||||
1. **Valid state (1-bit)** - Indicates the expected value of the valid bit for
|
||||
any tags in the next commit.
|
||||
|
||||
2. **CRC (32-bits)** - CRC-32 with a polynomial of `0x04c11db7` initialized
|
||||
with `0xffffffff`.
|
||||
|
||||
3. **Padding** - Padding to the next program-aligned boundary. No guarantees
|
||||
are made about the contents.
|
||||
|
||||
---
|
||||
#### `0x5ff` LFS_TYPE_FCRC
|
||||
|
||||
Added in lfs2.1, the optional FCRC tag contains a checksum of some amount of
|
||||
bytes in the next commit at the time it was erased. This allows us to ensure
|
||||
that we only ever program erased bytes, even if a previous commit failed due
|
||||
to power-loss.
|
||||
|
||||
When programming a commit, the FCRC size must be at least as large as the
|
||||
program block size. However, the program block is not saved on disk, and can
|
||||
change between mounts, so the FCRC size on disk may be different than the
|
||||
current program block size.
|
||||
|
||||
If the FCRC is missing or the checksum does not match, we must assume a
|
||||
commit was attempted but failed due to power-loss.
|
||||
|
||||
Layout of the FCRC tag:
|
||||
|
||||
```
|
||||
tag data
|
||||
[-- 32 --][-- 32 --|-- 32 --]
|
||||
[1|- 11 -| 10 | 10 ][-- 32 --|-- 32 --]
|
||||
^ ^ ^ ^ ^- fcrc size ^- fcrc
|
||||
| | | '- size (8)
|
||||
| | '------ id (0x3ff)
|
||||
| '------------ type (0x5ff)
|
||||
'----------------- valid bit
|
||||
```
|
||||
|
||||
FCRC fields:
|
||||
|
||||
1. **FCRC size (32-bits)** - Number of bytes after this commit's CRC tag's
|
||||
padding to include in the FCRC.
|
||||
|
||||
2. **FCRC (32-bits)** - CRC of the bytes after this commit's CRC tag's padding
|
||||
when erased. Like the CRC tag, this uses a CRC-32 with a polynomial of
|
||||
`0x04c11db7` initialized with `0xffffffff`.
|
||||
|
||||
---
|
||||
739
components/joltwallet__littlefs/src/littlefs/bd/lfs_emubd.c
Normal file
739
components/joltwallet__littlefs/src/littlefs/bd/lfs_emubd.c
Normal file
|
|
@ -0,0 +1,739 @@
|
|||
/*
|
||||
* Emulating block device, wraps filebd and rambd while providing a bunch
|
||||
* of hooks for testing littlefs in various conditions.
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#ifndef _POSIX_C_SOURCE
|
||||
#define _POSIX_C_SOURCE 199309L
|
||||
#endif
|
||||
|
||||
#include "bd/lfs_emubd.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <time.h>
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
|
||||
// access to lazily-allocated/copy-on-write blocks
|
||||
//
|
||||
// Note we can only modify a block if we have exclusive access to it (rc == 1)
|
||||
//
|
||||
|
||||
static lfs_emubd_block_t *lfs_emubd_incblock(lfs_emubd_block_t *block) {
|
||||
if (block) {
|
||||
block->rc += 1;
|
||||
}
|
||||
return block;
|
||||
}
|
||||
|
||||
static void lfs_emubd_decblock(lfs_emubd_block_t *block) {
|
||||
if (block) {
|
||||
block->rc -= 1;
|
||||
if (block->rc == 0) {
|
||||
free(block);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static lfs_emubd_block_t *lfs_emubd_mutblock(
|
||||
const struct lfs_config *cfg,
|
||||
lfs_emubd_block_t **block) {
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
lfs_emubd_block_t *block_ = *block;
|
||||
if (block_ && block_->rc == 1) {
|
||||
// rc == 1? can modify
|
||||
return block_;
|
||||
|
||||
} else if (block_) {
|
||||
// rc > 1? need to create a copy
|
||||
lfs_emubd_block_t *nblock = malloc(
|
||||
sizeof(lfs_emubd_block_t) + bd->cfg->erase_size);
|
||||
if (!nblock) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memcpy(nblock, block_,
|
||||
sizeof(lfs_emubd_block_t) + bd->cfg->erase_size);
|
||||
nblock->rc = 1;
|
||||
|
||||
lfs_emubd_decblock(block_);
|
||||
*block = nblock;
|
||||
return nblock;
|
||||
|
||||
} else {
|
||||
// no block? need to allocate
|
||||
lfs_emubd_block_t *nblock = malloc(
|
||||
sizeof(lfs_emubd_block_t) + bd->cfg->erase_size);
|
||||
if (!nblock) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
nblock->rc = 1;
|
||||
nblock->wear = 0;
|
||||
|
||||
// zero for consistency
|
||||
memset(nblock->data,
|
||||
(bd->cfg->erase_value != -1) ? bd->cfg->erase_value : 0,
|
||||
bd->cfg->erase_size);
|
||||
|
||||
*block = nblock;
|
||||
return nblock;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// emubd create/destroy
|
||||
|
||||
int lfs_emubd_create(const struct lfs_config *cfg,
|
||||
const struct lfs_emubd_config *bdcfg) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_create(%p {.context=%p, "
|
||||
".read=%p, .prog=%p, .erase=%p, .sync=%p}, "
|
||||
"%p {.read_size=%"PRIu32", .prog_size=%"PRIu32", "
|
||||
".erase_size=%"PRIu32", .erase_count=%"PRIu32", "
|
||||
".erase_value=%"PRId32", .erase_cycles=%"PRIu32", "
|
||||
".badblock_behavior=%"PRIu8", .power_cycles=%"PRIu32", "
|
||||
".powerloss_behavior=%"PRIu8", .powerloss_cb=%p, "
|
||||
".powerloss_data=%p, .track_branches=%d})",
|
||||
(void*)cfg, cfg->context,
|
||||
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
|
||||
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
|
||||
(void*)bdcfg,
|
||||
bdcfg->read_size, bdcfg->prog_size, bdcfg->erase_size,
|
||||
bdcfg->erase_count, bdcfg->erase_value, bdcfg->erase_cycles,
|
||||
bdcfg->badblock_behavior, bdcfg->power_cycles,
|
||||
bdcfg->powerloss_behavior, (void*)(uintptr_t)bdcfg->powerloss_cb,
|
||||
bdcfg->powerloss_data, bdcfg->track_branches);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
bd->cfg = bdcfg;
|
||||
|
||||
// allocate our block array, all blocks start as uninitialized
|
||||
bd->blocks = malloc(bd->cfg->erase_count * sizeof(lfs_emubd_block_t*));
|
||||
if (!bd->blocks) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_create -> %d", LFS_ERR_NOMEM);
|
||||
return LFS_ERR_NOMEM;
|
||||
}
|
||||
memset(bd->blocks, 0, bd->cfg->erase_count * sizeof(lfs_emubd_block_t*));
|
||||
|
||||
// setup testing things
|
||||
bd->readed = 0;
|
||||
bd->proged = 0;
|
||||
bd->erased = 0;
|
||||
bd->power_cycles = bd->cfg->power_cycles;
|
||||
bd->ooo_block = -1;
|
||||
bd->ooo_data = NULL;
|
||||
bd->disk = NULL;
|
||||
|
||||
if (bd->cfg->disk_path) {
|
||||
bd->disk = malloc(sizeof(lfs_emubd_disk_t));
|
||||
if (!bd->disk) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_create -> %d", LFS_ERR_NOMEM);
|
||||
return LFS_ERR_NOMEM;
|
||||
}
|
||||
bd->disk->rc = 1;
|
||||
bd->disk->scratch = NULL;
|
||||
|
||||
#ifdef _WIN32
|
||||
bd->disk->fd = open(bd->cfg->disk_path,
|
||||
O_RDWR | O_CREAT | O_BINARY, 0666);
|
||||
#else
|
||||
bd->disk->fd = open(bd->cfg->disk_path,
|
||||
O_RDWR | O_CREAT, 0666);
|
||||
#endif
|
||||
if (bd->disk->fd < 0) {
|
||||
int err = -errno;
|
||||
LFS_EMUBD_TRACE("lfs_emubd_create -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
// if we're emulating erase values, we can keep a block around in
|
||||
// memory of just the erase state to speed up emulated erases
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
bd->disk->scratch = malloc(bd->cfg->erase_size);
|
||||
if (!bd->disk->scratch) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_create -> %d", LFS_ERR_NOMEM);
|
||||
return LFS_ERR_NOMEM;
|
||||
}
|
||||
memset(bd->disk->scratch,
|
||||
bd->cfg->erase_value,
|
||||
bd->cfg->erase_size);
|
||||
|
||||
// go ahead and erase all of the disk, otherwise the file will not
|
||||
// match our internal representation
|
||||
for (size_t i = 0; i < bd->cfg->erase_count; i++) {
|
||||
ssize_t res = write(bd->disk->fd,
|
||||
bd->disk->scratch,
|
||||
bd->cfg->erase_size);
|
||||
if (res < 0) {
|
||||
int err = -errno;
|
||||
LFS_EMUBD_TRACE("lfs_emubd_create -> %d", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
LFS_EMUBD_TRACE("lfs_emubd_create -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_emubd_destroy(const struct lfs_config *cfg) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_destroy(%p)", (void*)cfg);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
|
||||
// decrement reference counts
|
||||
for (lfs_block_t i = 0; i < bd->cfg->erase_count; i++) {
|
||||
lfs_emubd_decblock(bd->blocks[i]);
|
||||
}
|
||||
free(bd->blocks);
|
||||
|
||||
// clean up other resources
|
||||
lfs_emubd_decblock(bd->ooo_data);
|
||||
if (bd->disk) {
|
||||
bd->disk->rc -= 1;
|
||||
if (bd->disk->rc == 0) {
|
||||
close(bd->disk->fd);
|
||||
free(bd->disk->scratch);
|
||||
free(bd->disk);
|
||||
}
|
||||
}
|
||||
|
||||
LFS_EMUBD_TRACE("lfs_emubd_destroy -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
// powerloss hook
|
||||
static int lfs_emubd_powerloss(const struct lfs_config *cfg) {
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
|
||||
// emulate out-of-order writes?
|
||||
lfs_emubd_block_t *ooo_data = NULL;
|
||||
if (bd->cfg->powerloss_behavior == LFS_EMUBD_POWERLOSS_OOO
|
||||
&& bd->ooo_block != -1) {
|
||||
// since writes between syncs are allowed to be out-of-order, it
|
||||
// shouldn't hurt to restore the first write on powerloss, right?
|
||||
ooo_data = bd->blocks[bd->ooo_block];
|
||||
bd->blocks[bd->ooo_block] = lfs_emubd_incblock(bd->ooo_data);
|
||||
|
||||
// mirror to disk file?
|
||||
if (bd->disk
|
||||
&& (bd->blocks[bd->ooo_block]
|
||||
|| bd->cfg->erase_value != -1)) {
|
||||
off_t res1 = lseek(bd->disk->fd,
|
||||
(off_t)bd->ooo_block*bd->cfg->erase_size,
|
||||
SEEK_SET);
|
||||
if (res1 < 0) {
|
||||
return -errno;
|
||||
}
|
||||
|
||||
ssize_t res2 = write(bd->disk->fd,
|
||||
(bd->blocks[bd->ooo_block])
|
||||
? bd->blocks[bd->ooo_block]->data
|
||||
: bd->disk->scratch,
|
||||
bd->cfg->erase_size);
|
||||
if (res2 < 0) {
|
||||
return -errno;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// simulate power loss
|
||||
bd->cfg->powerloss_cb(bd->cfg->powerloss_data);
|
||||
|
||||
// if we continue, undo out-of-order write emulation
|
||||
if (bd->cfg->powerloss_behavior == LFS_EMUBD_POWERLOSS_OOO
|
||||
&& bd->ooo_block != -1) {
|
||||
lfs_emubd_decblock(bd->blocks[bd->ooo_block]);
|
||||
bd->blocks[bd->ooo_block] = ooo_data;
|
||||
|
||||
// mirror to disk file?
|
||||
if (bd->disk
|
||||
&& (bd->blocks[bd->ooo_block]
|
||||
|| bd->cfg->erase_value != -1)) {
|
||||
off_t res1 = lseek(bd->disk->fd,
|
||||
(off_t)bd->ooo_block*bd->cfg->erase_size,
|
||||
SEEK_SET);
|
||||
if (res1 < 0) {
|
||||
return -errno;
|
||||
}
|
||||
|
||||
ssize_t res2 = write(bd->disk->fd,
|
||||
(bd->blocks[bd->ooo_block])
|
||||
? bd->blocks[bd->ooo_block]->data
|
||||
: bd->disk->scratch,
|
||||
bd->cfg->erase_size);
|
||||
if (res2 < 0) {
|
||||
return -errno;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
// block device API
|
||||
|
||||
int lfs_emubd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_read(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
|
||||
// check if read is valid
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
LFS_ASSERT(off % bd->cfg->read_size == 0);
|
||||
LFS_ASSERT(size % bd->cfg->read_size == 0);
|
||||
LFS_ASSERT(off+size <= bd->cfg->erase_size);
|
||||
|
||||
// get the block
|
||||
const lfs_emubd_block_t *b = bd->blocks[block];
|
||||
if (b) {
|
||||
// block bad?
|
||||
if (bd->cfg->erase_cycles && b->wear >= bd->cfg->erase_cycles &&
|
||||
bd->cfg->badblock_behavior == LFS_EMUBD_BADBLOCK_READERROR) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_read -> %d", LFS_ERR_CORRUPT);
|
||||
return LFS_ERR_CORRUPT;
|
||||
}
|
||||
|
||||
// read data
|
||||
memcpy(buffer, &b->data[off], size);
|
||||
} else {
|
||||
// zero for consistency
|
||||
memset(buffer,
|
||||
(bd->cfg->erase_value != -1) ? bd->cfg->erase_value : 0,
|
||||
size);
|
||||
}
|
||||
|
||||
// track reads
|
||||
bd->readed += size;
|
||||
if (bd->cfg->read_sleep) {
|
||||
int err = nanosleep(&(struct timespec){
|
||||
.tv_sec=bd->cfg->read_sleep/1000000000,
|
||||
.tv_nsec=bd->cfg->read_sleep%1000000000},
|
||||
NULL);
|
||||
if (err) {
|
||||
err = -errno;
|
||||
LFS_EMUBD_TRACE("lfs_emubd_read -> %d", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
LFS_EMUBD_TRACE("lfs_emubd_read -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_prog(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
|
||||
// check if write is valid
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
LFS_ASSERT(off % bd->cfg->prog_size == 0);
|
||||
LFS_ASSERT(size % bd->cfg->prog_size == 0);
|
||||
LFS_ASSERT(off+size <= bd->cfg->erase_size);
|
||||
|
||||
// get the block
|
||||
lfs_emubd_block_t *b = lfs_emubd_mutblock(cfg, &bd->blocks[block]);
|
||||
if (!b) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", LFS_ERR_NOMEM);
|
||||
return LFS_ERR_NOMEM;
|
||||
}
|
||||
|
||||
// block bad?
|
||||
if (bd->cfg->erase_cycles && b->wear >= bd->cfg->erase_cycles) {
|
||||
if (bd->cfg->badblock_behavior ==
|
||||
LFS_EMUBD_BADBLOCK_PROGERROR) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", LFS_ERR_CORRUPT);
|
||||
return LFS_ERR_CORRUPT;
|
||||
} else if (bd->cfg->badblock_behavior ==
|
||||
LFS_EMUBD_BADBLOCK_PROGNOOP ||
|
||||
bd->cfg->badblock_behavior ==
|
||||
LFS_EMUBD_BADBLOCK_ERASENOOP) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// were we erased properly?
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
for (lfs_off_t i = 0; i < size; i++) {
|
||||
LFS_ASSERT(b->data[off+i] == bd->cfg->erase_value);
|
||||
}
|
||||
}
|
||||
|
||||
// prog data
|
||||
memcpy(&b->data[off], buffer, size);
|
||||
|
||||
// mirror to disk file?
|
||||
if (bd->disk) {
|
||||
off_t res1 = lseek(bd->disk->fd,
|
||||
(off_t)block*bd->cfg->erase_size + (off_t)off,
|
||||
SEEK_SET);
|
||||
if (res1 < 0) {
|
||||
int err = -errno;
|
||||
LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
ssize_t res2 = write(bd->disk->fd, buffer, size);
|
||||
if (res2 < 0) {
|
||||
int err = -errno;
|
||||
LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
// track progs
|
||||
bd->proged += size;
|
||||
if (bd->cfg->prog_sleep) {
|
||||
int err = nanosleep(&(struct timespec){
|
||||
.tv_sec=bd->cfg->prog_sleep/1000000000,
|
||||
.tv_nsec=bd->cfg->prog_sleep%1000000000},
|
||||
NULL);
|
||||
if (err) {
|
||||
err = -errno;
|
||||
LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
// lose power?
|
||||
if (bd->power_cycles > 0) {
|
||||
bd->power_cycles -= 1;
|
||||
if (bd->power_cycles == 0) {
|
||||
int err = lfs_emubd_powerloss(cfg);
|
||||
if (err) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_emubd_erase(const struct lfs_config *cfg, lfs_block_t block) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_erase(%p, 0x%"PRIx32" (%"PRIu32"))",
|
||||
(void*)cfg, block, ((lfs_emubd_t*)cfg->context)->cfg->erase_size);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
|
||||
// check if erase is valid
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
|
||||
// emulate out-of-order writes? save first write
|
||||
if (bd->cfg->powerloss_behavior == LFS_EMUBD_POWERLOSS_OOO
|
||||
&& bd->ooo_block == -1) {
|
||||
bd->ooo_block = block;
|
||||
bd->ooo_data = lfs_emubd_incblock(bd->blocks[block]);
|
||||
}
|
||||
|
||||
// get the block
|
||||
lfs_emubd_block_t *b = lfs_emubd_mutblock(cfg, &bd->blocks[block]);
|
||||
if (!b) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", LFS_ERR_NOMEM);
|
||||
return LFS_ERR_NOMEM;
|
||||
}
|
||||
|
||||
// block bad?
|
||||
if (bd->cfg->erase_cycles) {
|
||||
if (b->wear >= bd->cfg->erase_cycles) {
|
||||
if (bd->cfg->badblock_behavior ==
|
||||
LFS_EMUBD_BADBLOCK_ERASEERROR) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", LFS_ERR_CORRUPT);
|
||||
return LFS_ERR_CORRUPT;
|
||||
} else if (bd->cfg->badblock_behavior ==
|
||||
LFS_EMUBD_BADBLOCK_ERASENOOP) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
// mark wear
|
||||
b->wear += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// emulate an erase value?
|
||||
if (bd->cfg->erase_value != -1) {
|
||||
memset(b->data, bd->cfg->erase_value, bd->cfg->erase_size);
|
||||
|
||||
// mirror to disk file?
|
||||
if (bd->disk) {
|
||||
off_t res1 = lseek(bd->disk->fd,
|
||||
(off_t)block*bd->cfg->erase_size,
|
||||
SEEK_SET);
|
||||
if (res1 < 0) {
|
||||
int err = -errno;
|
||||
LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
ssize_t res2 = write(bd->disk->fd,
|
||||
bd->disk->scratch,
|
||||
bd->cfg->erase_size);
|
||||
if (res2 < 0) {
|
||||
int err = -errno;
|
||||
LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// track erases
|
||||
bd->erased += bd->cfg->erase_size;
|
||||
if (bd->cfg->erase_sleep) {
|
||||
int err = nanosleep(&(struct timespec){
|
||||
.tv_sec=bd->cfg->erase_sleep/1000000000,
|
||||
.tv_nsec=bd->cfg->erase_sleep%1000000000},
|
||||
NULL);
|
||||
if (err) {
|
||||
err = -errno;
|
||||
LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
// lose power?
|
||||
if (bd->power_cycles > 0) {
|
||||
bd->power_cycles -= 1;
|
||||
if (bd->power_cycles == 0) {
|
||||
int err = lfs_emubd_powerloss(cfg);
|
||||
if (err) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_emubd_sync(const struct lfs_config *cfg) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_sync(%p)", (void*)cfg);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
|
||||
// emulate out-of-order writes? reset first write, writes
|
||||
// cannot be out-of-order across sync
|
||||
if (bd->cfg->powerloss_behavior == LFS_EMUBD_POWERLOSS_OOO) {
|
||||
lfs_emubd_decblock(bd->ooo_data);
|
||||
bd->ooo_block = -1;
|
||||
bd->ooo_data = NULL;
|
||||
}
|
||||
|
||||
LFS_EMUBD_TRACE("lfs_emubd_sync -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/// Additional extended API for driving test features ///
|
||||
|
||||
static int lfs_emubd_crc_(const struct lfs_config *cfg,
|
||||
lfs_block_t block, uint32_t *crc) {
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
|
||||
// check if crc is valid
|
||||
LFS_ASSERT(block < cfg->block_count);
|
||||
|
||||
// crc the block
|
||||
uint32_t crc_ = 0xffffffff;
|
||||
const lfs_emubd_block_t *b = bd->blocks[block];
|
||||
if (b) {
|
||||
crc_ = lfs_crc(crc_, b->data, cfg->block_size);
|
||||
} else {
|
||||
uint8_t erase_value = (bd->cfg->erase_value != -1)
|
||||
? bd->cfg->erase_value
|
||||
: 0;
|
||||
for (lfs_size_t i = 0; i < cfg->block_size; i++) {
|
||||
crc_ = lfs_crc(crc_, &erase_value, 1);
|
||||
}
|
||||
}
|
||||
*crc = 0xffffffff ^ crc_;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_emubd_crc(const struct lfs_config *cfg,
|
||||
lfs_block_t block, uint32_t *crc) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_crc(%p, %"PRIu32", %p)",
|
||||
(void*)cfg, block, crc);
|
||||
int err = lfs_emubd_crc_(cfg, block, crc);
|
||||
LFS_EMUBD_TRACE("lfs_emubd_crc -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
int lfs_emubd_bdcrc(const struct lfs_config *cfg, uint32_t *crc) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_bdcrc(%p, %p)", (void*)cfg, crc);
|
||||
|
||||
uint32_t crc_ = 0xffffffff;
|
||||
for (lfs_block_t i = 0; i < cfg->block_count; i++) {
|
||||
uint32_t i_crc;
|
||||
int err = lfs_emubd_crc_(cfg, i, &i_crc);
|
||||
if (err) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_bdcrc -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
crc_ = lfs_crc(crc_, &i_crc, sizeof(uint32_t));
|
||||
}
|
||||
*crc = 0xffffffff ^ crc_;
|
||||
|
||||
LFS_EMUBD_TRACE("lfs_emubd_bdcrc -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
lfs_emubd_sio_t lfs_emubd_readed(const struct lfs_config *cfg) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_readed(%p)", (void*)cfg);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
LFS_EMUBD_TRACE("lfs_emubd_readed -> %"PRIu64, bd->readed);
|
||||
return bd->readed;
|
||||
}
|
||||
|
||||
lfs_emubd_sio_t lfs_emubd_proged(const struct lfs_config *cfg) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_proged(%p)", (void*)cfg);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
LFS_EMUBD_TRACE("lfs_emubd_proged -> %"PRIu64, bd->proged);
|
||||
return bd->proged;
|
||||
}
|
||||
|
||||
lfs_emubd_sio_t lfs_emubd_erased(const struct lfs_config *cfg) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_erased(%p)", (void*)cfg);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
LFS_EMUBD_TRACE("lfs_emubd_erased -> %"PRIu64, bd->erased);
|
||||
return bd->erased;
|
||||
}
|
||||
|
||||
int lfs_emubd_setreaded(const struct lfs_config *cfg, lfs_emubd_io_t readed) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_setreaded(%p, %"PRIu64")", (void*)cfg, readed);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
bd->readed = readed;
|
||||
LFS_EMUBD_TRACE("lfs_emubd_setreaded -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_emubd_setproged(const struct lfs_config *cfg, lfs_emubd_io_t proged) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_setproged(%p, %"PRIu64")", (void*)cfg, proged);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
bd->proged = proged;
|
||||
LFS_EMUBD_TRACE("lfs_emubd_setproged -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_emubd_seterased(const struct lfs_config *cfg, lfs_emubd_io_t erased) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_seterased(%p, %"PRIu64")", (void*)cfg, erased);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
bd->erased = erased;
|
||||
LFS_EMUBD_TRACE("lfs_emubd_seterased -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
lfs_emubd_swear_t lfs_emubd_wear(const struct lfs_config *cfg,
|
||||
lfs_block_t block) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_wear(%p, %"PRIu32")", (void*)cfg, block);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
|
||||
// check if block is valid
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
|
||||
// get the wear
|
||||
lfs_emubd_wear_t wear;
|
||||
const lfs_emubd_block_t *b = bd->blocks[block];
|
||||
if (b) {
|
||||
wear = b->wear;
|
||||
} else {
|
||||
wear = 0;
|
||||
}
|
||||
|
||||
LFS_EMUBD_TRACE("lfs_emubd_wear -> %"PRIi32, wear);
|
||||
return wear;
|
||||
}
|
||||
|
||||
int lfs_emubd_setwear(const struct lfs_config *cfg,
|
||||
lfs_block_t block, lfs_emubd_wear_t wear) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_setwear(%p, %"PRIu32", %"PRIi32")",
|
||||
(void*)cfg, block, wear);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
|
||||
// check if block is valid
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
|
||||
// set the wear
|
||||
lfs_emubd_block_t *b = lfs_emubd_mutblock(cfg, &bd->blocks[block]);
|
||||
if (!b) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_setwear -> %d", LFS_ERR_NOMEM);
|
||||
return LFS_ERR_NOMEM;
|
||||
}
|
||||
b->wear = wear;
|
||||
|
||||
LFS_EMUBD_TRACE("lfs_emubd_setwear -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
lfs_emubd_spowercycles_t lfs_emubd_powercycles(
|
||||
const struct lfs_config *cfg) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_powercycles(%p)", (void*)cfg);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
|
||||
LFS_EMUBD_TRACE("lfs_emubd_powercycles -> %"PRIi32, bd->power_cycles);
|
||||
return bd->power_cycles;
|
||||
}
|
||||
|
||||
int lfs_emubd_setpowercycles(const struct lfs_config *cfg,
|
||||
lfs_emubd_powercycles_t power_cycles) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_setpowercycles(%p, %"PRIi32")",
|
||||
(void*)cfg, power_cycles);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
|
||||
bd->power_cycles = power_cycles;
|
||||
|
||||
LFS_EMUBD_TRACE("lfs_emubd_powercycles -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_emubd_copy(const struct lfs_config *cfg, lfs_emubd_t *copy) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_copy(%p, %p)", (void*)cfg, (void*)copy);
|
||||
lfs_emubd_t *bd = cfg->context;
|
||||
|
||||
// lazily copy over our block array
|
||||
copy->blocks = malloc(bd->cfg->erase_count * sizeof(lfs_emubd_block_t*));
|
||||
if (!copy->blocks) {
|
||||
LFS_EMUBD_TRACE("lfs_emubd_copy -> %d", LFS_ERR_NOMEM);
|
||||
return LFS_ERR_NOMEM;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < bd->cfg->erase_count; i++) {
|
||||
copy->blocks[i] = lfs_emubd_incblock(bd->blocks[i]);
|
||||
}
|
||||
|
||||
// other state
|
||||
copy->readed = bd->readed;
|
||||
copy->proged = bd->proged;
|
||||
copy->erased = bd->erased;
|
||||
copy->power_cycles = bd->power_cycles;
|
||||
copy->ooo_block = bd->ooo_block;
|
||||
copy->ooo_data = lfs_emubd_incblock(bd->ooo_data);
|
||||
copy->disk = bd->disk;
|
||||
if (copy->disk) {
|
||||
copy->disk->rc += 1;
|
||||
}
|
||||
copy->cfg = bd->cfg;
|
||||
|
||||
LFS_EMUBD_TRACE("lfs_emubd_copy -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
244
components/joltwallet__littlefs/src/littlefs/bd/lfs_emubd.h
Normal file
244
components/joltwallet__littlefs/src/littlefs/bd/lfs_emubd.h
Normal file
|
|
@ -0,0 +1,244 @@
|
|||
/*
|
||||
* Emulating block device, wraps filebd and rambd while providing a bunch
|
||||
* of hooks for testing littlefs in various conditions.
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#ifndef LFS_EMUBD_H
|
||||
#define LFS_EMUBD_H
|
||||
|
||||
#include "lfs.h"
|
||||
#include "lfs_util.h"
|
||||
#include "bd/lfs_rambd.h"
|
||||
#include "bd/lfs_filebd.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
|
||||
// Block device specific tracing
|
||||
#ifndef LFS_EMUBD_TRACE
|
||||
#ifdef LFS_EMUBD_YES_TRACE
|
||||
#define LFS_EMUBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
|
||||
#else
|
||||
#define LFS_EMUBD_TRACE(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Mode determining how "bad-blocks" behave during testing. This simulates
|
||||
// some real-world circumstances such as progs not sticking (prog-noop),
|
||||
// a readonly disk (erase-noop), and ECC failures (read-error).
|
||||
//
|
||||
// Not that read-noop is not allowed. Read _must_ return a consistent (but
|
||||
// may be arbitrary) value on every read.
|
||||
typedef enum lfs_emubd_badblock_behavior {
|
||||
LFS_EMUBD_BADBLOCK_PROGERROR = 0, // Error on prog
|
||||
LFS_EMUBD_BADBLOCK_ERASEERROR = 1, // Error on erase
|
||||
LFS_EMUBD_BADBLOCK_READERROR = 2, // Error on read
|
||||
LFS_EMUBD_BADBLOCK_PROGNOOP = 3, // Prog does nothing silently
|
||||
LFS_EMUBD_BADBLOCK_ERASENOOP = 4, // Erase does nothing silently
|
||||
} lfs_emubd_badblock_behavior_t;
|
||||
|
||||
// Mode determining how power-loss behaves during testing. For now this
|
||||
// only supports a noop behavior, leaving the data on-disk untouched.
|
||||
typedef enum lfs_emubd_powerloss_behavior {
|
||||
LFS_EMUBD_POWERLOSS_NOOP = 0, // Progs are atomic
|
||||
LFS_EMUBD_POWERLOSS_OOO = 1, // Blocks are written out-of-order
|
||||
} lfs_emubd_powerloss_behavior_t;
|
||||
|
||||
// Type for measuring read/program/erase operations
|
||||
typedef uint64_t lfs_emubd_io_t;
|
||||
typedef int64_t lfs_emubd_sio_t;
|
||||
|
||||
// Type for measuring wear
|
||||
typedef uint32_t lfs_emubd_wear_t;
|
||||
typedef int32_t lfs_emubd_swear_t;
|
||||
|
||||
// Type for tracking power-cycles
|
||||
typedef uint32_t lfs_emubd_powercycles_t;
|
||||
typedef int32_t lfs_emubd_spowercycles_t;
|
||||
|
||||
// Type for delays in nanoseconds
|
||||
typedef uint64_t lfs_emubd_sleep_t;
|
||||
typedef int64_t lfs_emubd_ssleep_t;
|
||||
|
||||
// emubd config, this is required for testing
|
||||
struct lfs_emubd_config {
|
||||
// Minimum size of a read operation in bytes.
|
||||
lfs_size_t read_size;
|
||||
|
||||
// Minimum size of a program operation in bytes.
|
||||
lfs_size_t prog_size;
|
||||
|
||||
// Size of an erase operation in bytes.
|
||||
lfs_size_t erase_size;
|
||||
|
||||
// Number of erase blocks on the device.
|
||||
lfs_size_t erase_count;
|
||||
|
||||
// 8-bit erase value to use for simulating erases. -1 does not simulate
|
||||
// erases, which can speed up testing by avoiding the extra block-device
|
||||
// operations to store the erase value.
|
||||
int32_t erase_value;
|
||||
|
||||
// Number of erase cycles before a block becomes "bad". The exact behavior
|
||||
// of bad blocks is controlled by badblock_behavior.
|
||||
uint32_t erase_cycles;
|
||||
|
||||
// The mode determining how bad-blocks fail
|
||||
lfs_emubd_badblock_behavior_t badblock_behavior;
|
||||
|
||||
// Number of write operations (erase/prog) before triggering a power-loss.
|
||||
// power_cycles=0 disables this. The exact behavior of power-loss is
|
||||
// controlled by a combination of powerloss_behavior and powerloss_cb.
|
||||
lfs_emubd_powercycles_t power_cycles;
|
||||
|
||||
// The mode determining how power-loss affects disk
|
||||
lfs_emubd_powerloss_behavior_t powerloss_behavior;
|
||||
|
||||
// Function to call to emulate power-loss. The exact behavior of power-loss
|
||||
// is up to the runner to provide.
|
||||
void (*powerloss_cb)(void*);
|
||||
|
||||
// Data for power-loss callback
|
||||
void *powerloss_data;
|
||||
|
||||
// True to track when power-loss could have occured. Note this involves
|
||||
// heavy memory usage!
|
||||
bool track_branches;
|
||||
|
||||
// Path to file to use as a mirror of the disk. This provides a way to view
|
||||
// the current state of the block device.
|
||||
const char *disk_path;
|
||||
|
||||
// Artificial delay in nanoseconds, there is no purpose for this other
|
||||
// than slowing down the simulation.
|
||||
lfs_emubd_sleep_t read_sleep;
|
||||
|
||||
// Artificial delay in nanoseconds, there is no purpose for this other
|
||||
// than slowing down the simulation.
|
||||
lfs_emubd_sleep_t prog_sleep;
|
||||
|
||||
// Artificial delay in nanoseconds, there is no purpose for this other
|
||||
// than slowing down the simulation.
|
||||
lfs_emubd_sleep_t erase_sleep;
|
||||
};
|
||||
|
||||
// A reference counted block
|
||||
typedef struct lfs_emubd_block {
|
||||
uint32_t rc;
|
||||
lfs_emubd_wear_t wear;
|
||||
|
||||
uint8_t data[];
|
||||
} lfs_emubd_block_t;
|
||||
|
||||
// Disk mirror
|
||||
typedef struct lfs_emubd_disk {
|
||||
uint32_t rc;
|
||||
int fd;
|
||||
uint8_t *scratch;
|
||||
} lfs_emubd_disk_t;
|
||||
|
||||
// emubd state
|
||||
typedef struct lfs_emubd {
|
||||
// array of copy-on-write blocks
|
||||
lfs_emubd_block_t **blocks;
|
||||
|
||||
// some other test state
|
||||
lfs_emubd_io_t readed;
|
||||
lfs_emubd_io_t proged;
|
||||
lfs_emubd_io_t erased;
|
||||
lfs_emubd_powercycles_t power_cycles;
|
||||
lfs_ssize_t ooo_block;
|
||||
lfs_emubd_block_t *ooo_data;
|
||||
lfs_emubd_disk_t *disk;
|
||||
|
||||
const struct lfs_emubd_config *cfg;
|
||||
} lfs_emubd_t;
|
||||
|
||||
|
||||
/// Block device API ///
|
||||
|
||||
// Create an emulating block device using the geometry in lfs_config
|
||||
int lfs_emubd_create(const struct lfs_config *cfg,
|
||||
const struct lfs_emubd_config *bdcfg);
|
||||
|
||||
// Clean up memory associated with block device
|
||||
int lfs_emubd_destroy(const struct lfs_config *cfg);
|
||||
|
||||
// Read a block
|
||||
int lfs_emubd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
|
||||
// Program a block
|
||||
//
|
||||
// The block must have previously been erased.
|
||||
int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
|
||||
// Erase a block
|
||||
//
|
||||
// A block must be erased before being programmed. The
|
||||
// state of an erased block is undefined.
|
||||
int lfs_emubd_erase(const struct lfs_config *cfg, lfs_block_t block);
|
||||
|
||||
// Sync the block device
|
||||
int lfs_emubd_sync(const struct lfs_config *cfg);
|
||||
|
||||
|
||||
/// Additional extended API for driving test features ///
|
||||
|
||||
// A CRC of a block for debugging purposes
|
||||
int lfs_emubd_crc(const struct lfs_config *cfg,
|
||||
lfs_block_t block, uint32_t *crc);
|
||||
|
||||
// A CRC of the entire block device for debugging purposes
|
||||
int lfs_emubd_bdcrc(const struct lfs_config *cfg, uint32_t *crc);
|
||||
|
||||
// Get total amount of bytes read
|
||||
lfs_emubd_sio_t lfs_emubd_readed(const struct lfs_config *cfg);
|
||||
|
||||
// Get total amount of bytes programmed
|
||||
lfs_emubd_sio_t lfs_emubd_proged(const struct lfs_config *cfg);
|
||||
|
||||
// Get total amount of bytes erased
|
||||
lfs_emubd_sio_t lfs_emubd_erased(const struct lfs_config *cfg);
|
||||
|
||||
// Manually set amount of bytes read
|
||||
int lfs_emubd_setreaded(const struct lfs_config *cfg, lfs_emubd_io_t readed);
|
||||
|
||||
// Manually set amount of bytes programmed
|
||||
int lfs_emubd_setproged(const struct lfs_config *cfg, lfs_emubd_io_t proged);
|
||||
|
||||
// Manually set amount of bytes erased
|
||||
int lfs_emubd_seterased(const struct lfs_config *cfg, lfs_emubd_io_t erased);
|
||||
|
||||
// Get simulated wear on a given block
|
||||
lfs_emubd_swear_t lfs_emubd_wear(const struct lfs_config *cfg,
|
||||
lfs_block_t block);
|
||||
|
||||
// Manually set simulated wear on a given block
|
||||
int lfs_emubd_setwear(const struct lfs_config *cfg,
|
||||
lfs_block_t block, lfs_emubd_wear_t wear);
|
||||
|
||||
// Get the remaining power-cycles
|
||||
lfs_emubd_spowercycles_t lfs_emubd_powercycles(
|
||||
const struct lfs_config *cfg);
|
||||
|
||||
// Manually set the remaining power-cycles
|
||||
int lfs_emubd_setpowercycles(const struct lfs_config *cfg,
|
||||
lfs_emubd_powercycles_t power_cycles);
|
||||
|
||||
// Create a copy-on-write copy of the state of this block device
|
||||
int lfs_emubd_copy(const struct lfs_config *cfg, lfs_emubd_t *copy);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
167
components/joltwallet__littlefs/src/littlefs/bd/lfs_filebd.c
Normal file
167
components/joltwallet__littlefs/src/littlefs/bd/lfs_filebd.c
Normal file
|
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
* Block device emulated in a file
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#include "bd/lfs_filebd.h"
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
int lfs_filebd_create(const struct lfs_config *cfg, const char *path,
|
||||
const struct lfs_filebd_config *bdcfg) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_create(%p {.context=%p, "
|
||||
".read=%p, .prog=%p, .erase=%p, .sync=%p}, "
|
||||
"\"%s\", "
|
||||
"%p {.read_size=%"PRIu32", .prog_size=%"PRIu32", "
|
||||
".erase_size=%"PRIu32", .erase_count=%"PRIu32"})",
|
||||
(void*)cfg, cfg->context,
|
||||
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
|
||||
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
|
||||
path,
|
||||
(void*)bdcfg,
|
||||
bdcfg->read_size, bdcfg->prog_size, bdcfg->erase_size,
|
||||
bdcfg->erase_count);
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
bd->cfg = bdcfg;
|
||||
|
||||
// open file
|
||||
#ifdef _WIN32
|
||||
bd->fd = open(path, O_RDWR | O_CREAT | O_BINARY, 0666);
|
||||
#else
|
||||
bd->fd = open(path, O_RDWR | O_CREAT, 0666);
|
||||
#endif
|
||||
|
||||
if (bd->fd < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_create -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
LFS_FILEBD_TRACE("lfs_filebd_create -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_filebd_destroy(const struct lfs_config *cfg) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_destroy(%p)", (void*)cfg);
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
int err = close(bd->fd);
|
||||
if (err < 0) {
|
||||
err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_destroy -> %d", err);
|
||||
return err;
|
||||
}
|
||||
LFS_FILEBD_TRACE("lfs_filebd_destroy -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_read(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
|
||||
// check if read is valid
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
LFS_ASSERT(off % bd->cfg->read_size == 0);
|
||||
LFS_ASSERT(size % bd->cfg->read_size == 0);
|
||||
LFS_ASSERT(off+size <= bd->cfg->erase_size);
|
||||
|
||||
// zero for reproducibility (in case file is truncated)
|
||||
memset(buffer, 0, size);
|
||||
|
||||
// read
|
||||
off_t res1 = lseek(bd->fd,
|
||||
(off_t)block*bd->cfg->erase_size + (off_t)off, SEEK_SET);
|
||||
if (res1 < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_read -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
ssize_t res2 = read(bd->fd, buffer, size);
|
||||
if (res2 < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_read -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
LFS_FILEBD_TRACE("lfs_filebd_read -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_prog(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
|
||||
// check if write is valid
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
LFS_ASSERT(off % bd->cfg->prog_size == 0);
|
||||
LFS_ASSERT(size % bd->cfg->prog_size == 0);
|
||||
LFS_ASSERT(off+size <= bd->cfg->erase_size);
|
||||
|
||||
// program data
|
||||
off_t res1 = lseek(bd->fd,
|
||||
(off_t)block*bd->cfg->erase_size + (off_t)off, SEEK_SET);
|
||||
if (res1 < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
ssize_t res2 = write(bd->fd, buffer, size);
|
||||
if (res2 < 0) {
|
||||
int err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_erase(%p, 0x%"PRIx32" (%"PRIu32"))",
|
||||
(void*)cfg, block, ((lfs_filebd_t*)cfg->context)->cfg->erase_size);
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
|
||||
// check if erase is valid
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
|
||||
// erase is a noop
|
||||
(void)block;
|
||||
|
||||
LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_filebd_sync(const struct lfs_config *cfg) {
|
||||
LFS_FILEBD_TRACE("lfs_filebd_sync(%p)", (void*)cfg);
|
||||
|
||||
// file sync
|
||||
lfs_filebd_t *bd = cfg->context;
|
||||
#ifdef _WIN32
|
||||
int err = FlushFileBuffers((HANDLE) _get_osfhandle(bd->fd)) ? 0 : -1;
|
||||
#else
|
||||
int err = fsync(bd->fd);
|
||||
#endif
|
||||
if (err) {
|
||||
err = -errno;
|
||||
LFS_FILEBD_TRACE("lfs_filebd_sync -> %d", 0);
|
||||
return err;
|
||||
}
|
||||
|
||||
LFS_FILEBD_TRACE("lfs_filebd_sync -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
82
components/joltwallet__littlefs/src/littlefs/bd/lfs_filebd.h
Normal file
82
components/joltwallet__littlefs/src/littlefs/bd/lfs_filebd.h
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
* Block device emulated in a file
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#ifndef LFS_FILEBD_H
|
||||
#define LFS_FILEBD_H
|
||||
|
||||
#include "lfs.h"
|
||||
#include "lfs_util.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
|
||||
// Block device specific tracing
|
||||
#ifndef LFS_FILEBD_TRACE
|
||||
#ifdef LFS_FILEBD_YES_TRACE
|
||||
#define LFS_FILEBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
|
||||
#else
|
||||
#define LFS_FILEBD_TRACE(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// filebd config
|
||||
struct lfs_filebd_config {
|
||||
// Minimum size of a read operation in bytes.
|
||||
lfs_size_t read_size;
|
||||
|
||||
// Minimum size of a program operation in bytes.
|
||||
lfs_size_t prog_size;
|
||||
|
||||
// Size of an erase operation in bytes.
|
||||
lfs_size_t erase_size;
|
||||
|
||||
// Number of erase blocks on the device.
|
||||
lfs_size_t erase_count;
|
||||
};
|
||||
|
||||
// filebd state
|
||||
typedef struct lfs_filebd {
|
||||
int fd;
|
||||
const struct lfs_filebd_config *cfg;
|
||||
} lfs_filebd_t;
|
||||
|
||||
|
||||
// Create a file block device
|
||||
int lfs_filebd_create(const struct lfs_config *cfg, const char *path,
|
||||
const struct lfs_filebd_config *bdcfg);
|
||||
|
||||
// Clean up memory associated with block device
|
||||
int lfs_filebd_destroy(const struct lfs_config *cfg);
|
||||
|
||||
// Read a block
|
||||
int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
|
||||
// Program a block
|
||||
//
|
||||
// The block must have previously been erased.
|
||||
int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
|
||||
// Erase a block
|
||||
//
|
||||
// A block must be erased before being programmed. The
|
||||
// state of an erased block is undefined.
|
||||
int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block);
|
||||
|
||||
// Sync the block device
|
||||
int lfs_filebd_sync(const struct lfs_config *cfg);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
118
components/joltwallet__littlefs/src/littlefs/bd/lfs_rambd.c
Normal file
118
components/joltwallet__littlefs/src/littlefs/bd/lfs_rambd.c
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
* Block device emulated in RAM
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#include "bd/lfs_rambd.h"
|
||||
|
||||
int lfs_rambd_create(const struct lfs_config *cfg,
|
||||
const struct lfs_rambd_config *bdcfg) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_create(%p {.context=%p, "
|
||||
".read=%p, .prog=%p, .erase=%p, .sync=%p}, "
|
||||
"%p {.read_size=%"PRIu32", .prog_size=%"PRIu32", "
|
||||
".erase_size=%"PRIu32", .erase_count=%"PRIu32", "
|
||||
".buffer=%p})",
|
||||
(void*)cfg, cfg->context,
|
||||
(void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
|
||||
(void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
|
||||
(void*)bdcfg,
|
||||
bdcfg->read_size, bdcfg->prog_size, bdcfg->erase_size,
|
||||
bdcfg->erase_count, bdcfg->buffer);
|
||||
lfs_rambd_t *bd = cfg->context;
|
||||
bd->cfg = bdcfg;
|
||||
|
||||
// allocate buffer?
|
||||
if (bd->cfg->buffer) {
|
||||
bd->buffer = bd->cfg->buffer;
|
||||
} else {
|
||||
bd->buffer = lfs_malloc(bd->cfg->erase_size * bd->cfg->erase_count);
|
||||
if (!bd->buffer) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_create -> %d", LFS_ERR_NOMEM);
|
||||
return LFS_ERR_NOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
// zero for reproducibility
|
||||
memset(bd->buffer, 0, bd->cfg->erase_size * bd->cfg->erase_count);
|
||||
|
||||
LFS_RAMBD_TRACE("lfs_rambd_create -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_rambd_destroy(const struct lfs_config *cfg) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_destroy(%p)", (void*)cfg);
|
||||
// clean up memory
|
||||
lfs_rambd_t *bd = cfg->context;
|
||||
if (!bd->cfg->buffer) {
|
||||
lfs_free(bd->buffer);
|
||||
}
|
||||
LFS_RAMBD_TRACE("lfs_rambd_destroy -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_rambd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_read(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_rambd_t *bd = cfg->context;
|
||||
|
||||
// check if read is valid
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
LFS_ASSERT(off % bd->cfg->read_size == 0);
|
||||
LFS_ASSERT(size % bd->cfg->read_size == 0);
|
||||
LFS_ASSERT(off+size <= bd->cfg->erase_size);
|
||||
|
||||
// read data
|
||||
memcpy(buffer, &bd->buffer[block*bd->cfg->erase_size + off], size);
|
||||
|
||||
LFS_RAMBD_TRACE("lfs_rambd_read -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_prog(%p, "
|
||||
"0x%"PRIx32", %"PRIu32", %p, %"PRIu32")",
|
||||
(void*)cfg, block, off, buffer, size);
|
||||
lfs_rambd_t *bd = cfg->context;
|
||||
|
||||
// check if write is valid
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
LFS_ASSERT(off % bd->cfg->prog_size == 0);
|
||||
LFS_ASSERT(size % bd->cfg->prog_size == 0);
|
||||
LFS_ASSERT(off+size <= bd->cfg->erase_size);
|
||||
|
||||
// program data
|
||||
memcpy(&bd->buffer[block*bd->cfg->erase_size + off], buffer, size);
|
||||
|
||||
LFS_RAMBD_TRACE("lfs_rambd_prog -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_erase(%p, 0x%"PRIx32" (%"PRIu32"))",
|
||||
(void*)cfg, block, ((lfs_rambd_t*)cfg->context)->cfg->erase_size);
|
||||
lfs_rambd_t *bd = cfg->context;
|
||||
|
||||
// check if erase is valid
|
||||
LFS_ASSERT(block < bd->cfg->erase_count);
|
||||
|
||||
// erase is a noop
|
||||
(void)block;
|
||||
|
||||
LFS_RAMBD_TRACE("lfs_rambd_erase -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lfs_rambd_sync(const struct lfs_config *cfg) {
|
||||
LFS_RAMBD_TRACE("lfs_rambd_sync(%p)", (void*)cfg);
|
||||
|
||||
// sync is a noop
|
||||
(void)cfg;
|
||||
|
||||
LFS_RAMBD_TRACE("lfs_rambd_sync -> %d", 0);
|
||||
return 0;
|
||||
}
|
||||
85
components/joltwallet__littlefs/src/littlefs/bd/lfs_rambd.h
Normal file
85
components/joltwallet__littlefs/src/littlefs/bd/lfs_rambd.h
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Block device emulated in RAM
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#ifndef LFS_RAMBD_H
|
||||
#define LFS_RAMBD_H
|
||||
|
||||
#include "lfs.h"
|
||||
#include "lfs_util.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
|
||||
// Block device specific tracing
|
||||
#ifndef LFS_RAMBD_TRACE
|
||||
#ifdef LFS_RAMBD_YES_TRACE
|
||||
#define LFS_RAMBD_TRACE(...) LFS_TRACE(__VA_ARGS__)
|
||||
#else
|
||||
#define LFS_RAMBD_TRACE(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// rambd config
|
||||
struct lfs_rambd_config {
|
||||
// Minimum size of a read operation in bytes.
|
||||
lfs_size_t read_size;
|
||||
|
||||
// Minimum size of a program operation in bytes.
|
||||
lfs_size_t prog_size;
|
||||
|
||||
// Size of an erase operation in bytes.
|
||||
lfs_size_t erase_size;
|
||||
|
||||
// Number of erase blocks on the device.
|
||||
lfs_size_t erase_count;
|
||||
|
||||
// Optional statically allocated buffer for the block device.
|
||||
void *buffer;
|
||||
};
|
||||
|
||||
// rambd state
|
||||
typedef struct lfs_rambd {
|
||||
uint8_t *buffer;
|
||||
const struct lfs_rambd_config *cfg;
|
||||
} lfs_rambd_t;
|
||||
|
||||
|
||||
// Create a RAM block device
|
||||
int lfs_rambd_create(const struct lfs_config *cfg,
|
||||
const struct lfs_rambd_config *bdcfg);
|
||||
|
||||
// Clean up memory associated with block device
|
||||
int lfs_rambd_destroy(const struct lfs_config *cfg);
|
||||
|
||||
// Read a block
|
||||
int lfs_rambd_read(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
|
||||
// Program a block
|
||||
//
|
||||
// The block must have previously been erased.
|
||||
int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
|
||||
// Erase a block
|
||||
//
|
||||
// A block must be erased before being programmed. The
|
||||
// state of an erased block is undefined.
|
||||
int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block);
|
||||
|
||||
// Sync the block device
|
||||
int lfs_rambd_sync(const struct lfs_config *cfg);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,270 @@
|
|||
[cases.bench_dir_open]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
defines.N = 1024
|
||||
defines.FILE_SIZE = 8
|
||||
defines.CHUNK_SIZE = 8
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// first create the files
|
||||
char name[256];
|
||||
uint8_t buffer[CHUNK_SIZE];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
sprintf(name, "file%08x", i);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, name,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
|
||||
uint32_t file_prng = i;
|
||||
for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
|
||||
for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
|
||||
buffer[k] = BENCH_PRNG(&file_prng);
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
// then read the files
|
||||
BENCH_START();
|
||||
uint32_t prng = 42;
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (N-1-i)
|
||||
: BENCH_PRNG(&prng) % N;
|
||||
sprintf(name, "file%08x", i_);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, name, LFS_O_RDONLY) => 0;
|
||||
|
||||
uint32_t file_prng = i_;
|
||||
for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
|
||||
for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
|
||||
assert(buffer[k] == BENCH_PRNG(&file_prng));
|
||||
}
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
BENCH_STOP();
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.bench_dir_creat]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
defines.N = 1024
|
||||
defines.FILE_SIZE = 8
|
||||
defines.CHUNK_SIZE = 8
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
BENCH_START();
|
||||
uint32_t prng = 42;
|
||||
char name[256];
|
||||
uint8_t buffer[CHUNK_SIZE];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (N-1-i)
|
||||
: BENCH_PRNG(&prng) % N;
|
||||
sprintf(name, "file%08x", i_);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, name,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
uint32_t file_prng = i_;
|
||||
for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
|
||||
for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
|
||||
buffer[k] = BENCH_PRNG(&file_prng);
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
BENCH_STOP();
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.bench_dir_remove]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
defines.N = 1024
|
||||
defines.FILE_SIZE = 8
|
||||
defines.CHUNK_SIZE = 8
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// first create the files
|
||||
char name[256];
|
||||
uint8_t buffer[CHUNK_SIZE];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
sprintf(name, "file%08x", i);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, name,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
|
||||
uint32_t file_prng = i;
|
||||
for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
|
||||
for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
|
||||
buffer[k] = BENCH_PRNG(&file_prng);
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
// then remove the files
|
||||
BENCH_START();
|
||||
uint32_t prng = 42;
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (N-1-i)
|
||||
: BENCH_PRNG(&prng) % N;
|
||||
sprintf(name, "file%08x", i_);
|
||||
int err = lfs_remove(&lfs, name);
|
||||
assert(!err || err == LFS_ERR_NOENT);
|
||||
}
|
||||
BENCH_STOP();
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.bench_dir_read]
|
||||
defines.N = 1024
|
||||
defines.FILE_SIZE = 8
|
||||
defines.CHUNK_SIZE = 8
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// first create the files
|
||||
char name[256];
|
||||
uint8_t buffer[CHUNK_SIZE];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
sprintf(name, "file%08x", i);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, name,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
|
||||
uint32_t file_prng = i;
|
||||
for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
|
||||
for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
|
||||
buffer[k] = BENCH_PRNG(&file_prng);
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
// then read the directory
|
||||
BENCH_START();
|
||||
lfs_dir_t dir;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
struct lfs_info info;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
for (int i = 0; i < N; i++) {
|
||||
sprintf(name, "file%08x", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(strcmp(info.name, name) == 0);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
BENCH_STOP();
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.bench_dir_mkdir]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
defines.N = 8
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
BENCH_START();
|
||||
uint32_t prng = 42;
|
||||
char name[256];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (N-1-i)
|
||||
: BENCH_PRNG(&prng) % N;
|
||||
printf("hm %d\n", i);
|
||||
sprintf(name, "dir%08x", i_);
|
||||
int err = lfs_mkdir(&lfs, name);
|
||||
assert(!err || err == LFS_ERR_EXIST);
|
||||
}
|
||||
BENCH_STOP();
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.bench_dir_rmdir]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
defines.N = 8
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// first create the dirs
|
||||
char name[256];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
sprintf(name, "dir%08x", i);
|
||||
lfs_mkdir(&lfs, name) => 0;
|
||||
}
|
||||
|
||||
// then remove the dirs
|
||||
BENCH_START();
|
||||
uint32_t prng = 42;
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (N-1-i)
|
||||
: BENCH_PRNG(&prng) % N;
|
||||
sprintf(name, "dir%08x", i_);
|
||||
int err = lfs_remove(&lfs, name);
|
||||
assert(!err || err == LFS_ERR_NOENT);
|
||||
}
|
||||
BENCH_STOP();
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
[cases.bench_file_read]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
defines.SIZE = '128*1024'
|
||||
defines.CHUNK_SIZE = 64
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_size_t chunks = (SIZE+CHUNK_SIZE-1)/CHUNK_SIZE;
|
||||
|
||||
// first write the file
|
||||
lfs_file_t file;
|
||||
uint8_t buffer[CHUNK_SIZE];
|
||||
lfs_file_open(&lfs, &file, "file",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
for (lfs_size_t i = 0; i < chunks; i++) {
|
||||
uint32_t chunk_prng = i;
|
||||
for (lfs_size_t j = 0; j < CHUNK_SIZE; j++) {
|
||||
buffer[j] = BENCH_PRNG(&chunk_prng);
|
||||
}
|
||||
|
||||
lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// then read the file
|
||||
BENCH_START();
|
||||
lfs_file_open(&lfs, &file, "file", LFS_O_RDONLY) => 0;
|
||||
|
||||
uint32_t prng = 42;
|
||||
for (lfs_size_t i = 0; i < chunks; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (chunks-1-i)
|
||||
: BENCH_PRNG(&prng) % chunks;
|
||||
lfs_file_seek(&lfs, &file, i_*CHUNK_SIZE, LFS_SEEK_SET)
|
||||
=> i_*CHUNK_SIZE;
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
|
||||
|
||||
uint32_t chunk_prng = i_;
|
||||
for (lfs_size_t j = 0; j < CHUNK_SIZE; j++) {
|
||||
assert(buffer[j] == BENCH_PRNG(&chunk_prng));
|
||||
}
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
BENCH_STOP();
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.bench_file_write]
|
||||
# 0 = in-order
|
||||
# 1 = reversed-order
|
||||
# 2 = random-order
|
||||
defines.ORDER = [0, 1, 2]
|
||||
defines.SIZE = '128*1024'
|
||||
defines.CHUNK_SIZE = 64
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_size_t chunks = (SIZE+CHUNK_SIZE-1)/CHUNK_SIZE;
|
||||
|
||||
BENCH_START();
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "file",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
|
||||
uint8_t buffer[CHUNK_SIZE];
|
||||
uint32_t prng = 42;
|
||||
for (lfs_size_t i = 0; i < chunks; i++) {
|
||||
lfs_off_t i_
|
||||
= (ORDER == 0) ? i
|
||||
: (ORDER == 1) ? (chunks-1-i)
|
||||
: BENCH_PRNG(&prng) % chunks;
|
||||
uint32_t chunk_prng = i_;
|
||||
for (lfs_size_t j = 0; j < CHUNK_SIZE; j++) {
|
||||
buffer[j] = BENCH_PRNG(&chunk_prng);
|
||||
}
|
||||
|
||||
lfs_file_seek(&lfs, &file, i_*CHUNK_SIZE, LFS_SEEK_SET)
|
||||
=> i_*CHUNK_SIZE;
|
||||
lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
BENCH_STOP();
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
[cases.bench_superblocks_found]
|
||||
# support benchmarking with files
|
||||
defines.N = [0, 1024]
|
||||
defines.FILE_SIZE = 8
|
||||
defines.CHUNK_SIZE = 8
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
// create files?
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
char name[256];
|
||||
uint8_t buffer[CHUNK_SIZE];
|
||||
for (lfs_size_t i = 0; i < N; i++) {
|
||||
sprintf(name, "file%08x", i);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, name,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
|
||||
for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) {
|
||||
for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) {
|
||||
buffer[k] = i+j+k;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
BENCH_START();
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
BENCH_STOP();
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.bench_superblocks_missing]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
|
||||
BENCH_START();
|
||||
int err = lfs_mount(&lfs, cfg);
|
||||
assert(err != 0);
|
||||
BENCH_STOP();
|
||||
'''
|
||||
|
||||
[cases.bench_superblocks_format]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
|
||||
BENCH_START();
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
BENCH_STOP();
|
||||
'''
|
||||
|
||||
6549
components/joltwallet__littlefs/src/littlefs/lfs.c
Normal file
6549
components/joltwallet__littlefs/src/littlefs/lfs.c
Normal file
File diff suppressed because it is too large
Load diff
801
components/joltwallet__littlefs/src/littlefs/lfs.h
Normal file
801
components/joltwallet__littlefs/src/littlefs/lfs.h
Normal file
|
|
@ -0,0 +1,801 @@
|
|||
/*
|
||||
* The little filesystem
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#ifndef LFS_H
|
||||
#define LFS_H
|
||||
|
||||
#include "lfs_util.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
|
||||
/// Version info ///
|
||||
|
||||
// Software library version
|
||||
// Major (top-nibble), incremented on backwards incompatible changes
|
||||
// Minor (bottom-nibble), incremented on feature additions
|
||||
#define LFS_VERSION 0x0002000b
|
||||
#define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
|
||||
#define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0))
|
||||
|
||||
// Version of On-disk data structures
|
||||
// Major (top-nibble), incremented on backwards incompatible changes
|
||||
// Minor (bottom-nibble), incremented on feature additions
|
||||
#define LFS_DISK_VERSION 0x00020001
|
||||
#define LFS_DISK_VERSION_MAJOR (0xffff & (LFS_DISK_VERSION >> 16))
|
||||
#define LFS_DISK_VERSION_MINOR (0xffff & (LFS_DISK_VERSION >> 0))
|
||||
|
||||
|
||||
/// Definitions ///
|
||||
|
||||
// Type definitions
|
||||
typedef uint32_t lfs_size_t;
|
||||
typedef uint32_t lfs_off_t;
|
||||
|
||||
typedef int32_t lfs_ssize_t;
|
||||
typedef int32_t lfs_soff_t;
|
||||
|
||||
typedef uint32_t lfs_block_t;
|
||||
|
||||
// Maximum name size in bytes, may be redefined to reduce the size of the
|
||||
// info struct. Limited to <= 1022. Stored in superblock and must be
|
||||
// respected by other littlefs drivers.
|
||||
#ifndef LFS_NAME_MAX
|
||||
#define LFS_NAME_MAX 255
|
||||
#endif
|
||||
|
||||
// Maximum size of a file in bytes, may be redefined to limit to support other
|
||||
// drivers. Limited on disk to <= 2147483647. Stored in superblock and must be
|
||||
// respected by other littlefs drivers.
|
||||
#ifndef LFS_FILE_MAX
|
||||
#define LFS_FILE_MAX 2147483647
|
||||
#endif
|
||||
|
||||
// Maximum size of custom attributes in bytes, may be redefined, but there is
|
||||
// no real benefit to using a smaller LFS_ATTR_MAX. Limited to <= 1022. Stored
|
||||
// in superblock and must be respected by other littlefs drivers.
|
||||
#ifndef LFS_ATTR_MAX
|
||||
#define LFS_ATTR_MAX 1022
|
||||
#endif
|
||||
|
||||
// Possible error codes, these are negative to allow
|
||||
// valid positive return values
|
||||
enum lfs_error {
|
||||
LFS_ERR_OK = 0, // No error
|
||||
LFS_ERR_IO = -5, // Error during device operation
|
||||
LFS_ERR_CORRUPT = -84, // Corrupted
|
||||
LFS_ERR_NOENT = -2, // No directory entry
|
||||
LFS_ERR_EXIST = -17, // Entry already exists
|
||||
LFS_ERR_NOTDIR = -20, // Entry is not a dir
|
||||
LFS_ERR_ISDIR = -21, // Entry is a dir
|
||||
LFS_ERR_NOTEMPTY = -39, // Dir is not empty
|
||||
LFS_ERR_BADF = -9, // Bad file number
|
||||
LFS_ERR_FBIG = -27, // File too large
|
||||
LFS_ERR_INVAL = -22, // Invalid parameter
|
||||
LFS_ERR_NOSPC = -28, // No space left on device
|
||||
LFS_ERR_NOMEM = -12, // No more memory available
|
||||
LFS_ERR_NOATTR = -61, // No data/attr available
|
||||
LFS_ERR_NAMETOOLONG = -36, // File name too long
|
||||
};
|
||||
|
||||
// File types
|
||||
enum lfs_type {
|
||||
// file types
|
||||
LFS_TYPE_REG = 0x001,
|
||||
LFS_TYPE_DIR = 0x002,
|
||||
|
||||
// internally used types
|
||||
LFS_TYPE_SPLICE = 0x400,
|
||||
LFS_TYPE_NAME = 0x000,
|
||||
LFS_TYPE_STRUCT = 0x200,
|
||||
LFS_TYPE_USERATTR = 0x300,
|
||||
LFS_TYPE_FROM = 0x100,
|
||||
LFS_TYPE_TAIL = 0x600,
|
||||
LFS_TYPE_GLOBALS = 0x700,
|
||||
LFS_TYPE_CRC = 0x500,
|
||||
|
||||
// internally used type specializations
|
||||
LFS_TYPE_CREATE = 0x401,
|
||||
LFS_TYPE_DELETE = 0x4ff,
|
||||
LFS_TYPE_SUPERBLOCK = 0x0ff,
|
||||
LFS_TYPE_DIRSTRUCT = 0x200,
|
||||
LFS_TYPE_CTZSTRUCT = 0x202,
|
||||
LFS_TYPE_INLINESTRUCT = 0x201,
|
||||
LFS_TYPE_SOFTTAIL = 0x600,
|
||||
LFS_TYPE_HARDTAIL = 0x601,
|
||||
LFS_TYPE_MOVESTATE = 0x7ff,
|
||||
LFS_TYPE_CCRC = 0x500,
|
||||
LFS_TYPE_FCRC = 0x5ff,
|
||||
|
||||
// internal chip sources
|
||||
LFS_FROM_NOOP = 0x000,
|
||||
LFS_FROM_MOVE = 0x101,
|
||||
LFS_FROM_USERATTRS = 0x102,
|
||||
};
|
||||
|
||||
// File open flags
|
||||
enum lfs_open_flags {
|
||||
// open flags
|
||||
LFS_O_RDONLY = 1, // Open a file as read only
|
||||
#ifndef LFS_READONLY
|
||||
LFS_O_WRONLY = 2, // Open a file as write only
|
||||
LFS_O_RDWR = 3, // Open a file as read and write
|
||||
LFS_O_CREAT = 0x0100, // Create a file if it does not exist
|
||||
LFS_O_EXCL = 0x0200, // Fail if a file already exists
|
||||
LFS_O_TRUNC = 0x0400, // Truncate the existing file to zero size
|
||||
LFS_O_APPEND = 0x0800, // Move to end of file on every write
|
||||
#endif
|
||||
|
||||
// internally used flags
|
||||
#ifndef LFS_READONLY
|
||||
LFS_F_DIRTY = 0x010000, // File does not match storage
|
||||
LFS_F_WRITING = 0x020000, // File has been written since last flush
|
||||
#endif
|
||||
LFS_F_READING = 0x040000, // File has been read since last flush
|
||||
#ifndef LFS_READONLY
|
||||
LFS_F_ERRED = 0x080000, // An error occurred during write
|
||||
#endif
|
||||
LFS_F_INLINE = 0x100000, // Currently inlined in directory entry
|
||||
};
|
||||
|
||||
// File seek flags
|
||||
enum lfs_whence_flags {
|
||||
LFS_SEEK_SET = 0, // Seek relative to an absolute position
|
||||
LFS_SEEK_CUR = 1, // Seek relative to the current file position
|
||||
LFS_SEEK_END = 2, // Seek relative to the end of the file
|
||||
};
|
||||
|
||||
|
||||
// Configuration provided during initialization of the littlefs
|
||||
struct lfs_config {
|
||||
// Opaque user provided context that can be used to pass
|
||||
// information to the block device operations
|
||||
void *context;
|
||||
|
||||
// Read a region in a block. Negative error codes are propagated
|
||||
// to the user.
|
||||
int (*read)(const struct lfs_config *c, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
|
||||
// Program a region in a block. The block must have previously
|
||||
// been erased. Negative error codes are propagated to the user.
|
||||
// May return LFS_ERR_CORRUPT if the block should be considered bad.
|
||||
int (*prog)(const struct lfs_config *c, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
|
||||
// Erase a block. A block must be erased before being programmed.
|
||||
// The state of an erased block is undefined. Negative error codes
|
||||
// are propagated to the user.
|
||||
// May return LFS_ERR_CORRUPT if the block should be considered bad.
|
||||
int (*erase)(const struct lfs_config *c, lfs_block_t block);
|
||||
|
||||
// Sync the state of the underlying block device. Negative error codes
|
||||
// are propagated to the user.
|
||||
int (*sync)(const struct lfs_config *c);
|
||||
|
||||
#ifdef LFS_THREADSAFE
|
||||
// Lock the underlying block device. Negative error codes
|
||||
// are propagated to the user.
|
||||
int (*lock)(const struct lfs_config *c);
|
||||
|
||||
// Unlock the underlying block device. Negative error codes
|
||||
// are propagated to the user.
|
||||
int (*unlock)(const struct lfs_config *c);
|
||||
#endif
|
||||
|
||||
// Minimum size of a block read in bytes. All read operations will be a
|
||||
// multiple of this value.
|
||||
lfs_size_t read_size;
|
||||
|
||||
// Minimum size of a block program in bytes. All program operations will be
|
||||
// a multiple of this value.
|
||||
lfs_size_t prog_size;
|
||||
|
||||
// Size of an erasable block in bytes. This does not impact ram consumption
|
||||
// and may be larger than the physical erase size. However, non-inlined
|
||||
// files take up at minimum one block. Must be a multiple of the read and
|
||||
// program sizes.
|
||||
lfs_size_t block_size;
|
||||
|
||||
// Number of erasable blocks on the device. Defaults to block_count stored
|
||||
// on disk when zero.
|
||||
lfs_size_t block_count;
|
||||
|
||||
// Number of erase cycles before littlefs evicts metadata logs and moves
|
||||
// the metadata to another block. Suggested values are in the
|
||||
// range 100-1000, with large values having better performance at the cost
|
||||
// of less consistent wear distribution.
|
||||
//
|
||||
// Set to -1 to disable block-level wear-leveling.
|
||||
int32_t block_cycles;
|
||||
|
||||
// Size of block caches in bytes. Each cache buffers a portion of a block in
|
||||
// RAM. The littlefs needs a read cache, a program cache, and one additional
|
||||
// cache per file. Larger caches can improve performance by storing more
|
||||
// data and reducing the number of disk accesses. Must be a multiple of the
|
||||
// read and program sizes, and a factor of the block size.
|
||||
lfs_size_t cache_size;
|
||||
|
||||
// Size of the lookahead buffer in bytes. A larger lookahead buffer
|
||||
// increases the number of blocks found during an allocation pass. The
|
||||
// lookahead buffer is stored as a compact bitmap, so each byte of RAM
|
||||
// can track 8 blocks.
|
||||
lfs_size_t lookahead_size;
|
||||
|
||||
// Threshold for metadata compaction during lfs_fs_gc in bytes. Metadata
|
||||
// pairs that exceed this threshold will be compacted during lfs_fs_gc.
|
||||
// Defaults to ~88% block_size when zero, though the default may change
|
||||
// in the future.
|
||||
//
|
||||
// Note this only affects lfs_fs_gc. Normal compactions still only occur
|
||||
// when full.
|
||||
//
|
||||
// Set to -1 to disable metadata compaction during lfs_fs_gc.
|
||||
lfs_size_t compact_thresh;
|
||||
|
||||
// Optional statically allocated read buffer. Must be cache_size.
|
||||
// By default lfs_malloc is used to allocate this buffer.
|
||||
void *read_buffer;
|
||||
|
||||
// Optional statically allocated program buffer. Must be cache_size.
|
||||
// By default lfs_malloc is used to allocate this buffer.
|
||||
void *prog_buffer;
|
||||
|
||||
// Optional statically allocated lookahead buffer. Must be lookahead_size.
|
||||
// By default lfs_malloc is used to allocate this buffer.
|
||||
void *lookahead_buffer;
|
||||
|
||||
// Optional upper limit on length of file names in bytes. No downside for
|
||||
// larger names except the size of the info struct which is controlled by
|
||||
// the LFS_NAME_MAX define. Defaults to LFS_NAME_MAX or name_max stored on
|
||||
// disk when zero.
|
||||
lfs_size_t name_max;
|
||||
|
||||
// Optional upper limit on files in bytes. No downside for larger files
|
||||
// but must be <= LFS_FILE_MAX. Defaults to LFS_FILE_MAX or file_max stored
|
||||
// on disk when zero.
|
||||
lfs_size_t file_max;
|
||||
|
||||
// Optional upper limit on custom attributes in bytes. No downside for
|
||||
// larger attributes size but must be <= LFS_ATTR_MAX. Defaults to
|
||||
// LFS_ATTR_MAX or attr_max stored on disk when zero.
|
||||
lfs_size_t attr_max;
|
||||
|
||||
// Optional upper limit on total space given to metadata pairs in bytes. On
|
||||
// devices with large blocks (e.g. 128kB) setting this to a low size (2-8kB)
|
||||
// can help bound the metadata compaction time. Must be <= block_size.
|
||||
// Defaults to block_size when zero.
|
||||
lfs_size_t metadata_max;
|
||||
|
||||
// Optional upper limit on inlined files in bytes. Inlined files live in
|
||||
// metadata and decrease storage requirements, but may be limited to
|
||||
// improve metadata-related performance. Must be <= cache_size, <=
|
||||
// attr_max, and <= block_size/8. Defaults to the largest possible
|
||||
// inline_max when zero.
|
||||
//
|
||||
// Set to -1 to disable inlined files.
|
||||
lfs_size_t inline_max;
|
||||
|
||||
#ifdef LFS_MULTIVERSION
|
||||
// On-disk version to use when writing in the form of 16-bit major version
|
||||
// + 16-bit minor version. This limiting metadata to what is supported by
|
||||
// older minor versions. Note that some features will be lost. Defaults to
|
||||
// to the most recent minor version when zero.
|
||||
uint32_t disk_version;
|
||||
#endif
|
||||
};
|
||||
|
||||
// File info structure
|
||||
struct lfs_info {
|
||||
// Type of the file, either LFS_TYPE_REG or LFS_TYPE_DIR
|
||||
uint8_t type;
|
||||
|
||||
// Size of the file, only valid for REG files. Limited to 32-bits.
|
||||
lfs_size_t size;
|
||||
|
||||
// Name of the file stored as a null-terminated string. Limited to
|
||||
// LFS_NAME_MAX+1, which can be changed by redefining LFS_NAME_MAX to
|
||||
// reduce RAM. LFS_NAME_MAX is stored in superblock and must be
|
||||
// respected by other littlefs drivers.
|
||||
char name[LFS_NAME_MAX+1];
|
||||
};
|
||||
|
||||
// Filesystem info structure
|
||||
struct lfs_fsinfo {
|
||||
// On-disk version.
|
||||
uint32_t disk_version;
|
||||
|
||||
// Size of a logical block in bytes.
|
||||
lfs_size_t block_size;
|
||||
|
||||
// Number of logical blocks in filesystem.
|
||||
lfs_size_t block_count;
|
||||
|
||||
// Upper limit on the length of file names in bytes.
|
||||
lfs_size_t name_max;
|
||||
|
||||
// Upper limit on the size of files in bytes.
|
||||
lfs_size_t file_max;
|
||||
|
||||
// Upper limit on the size of custom attributes in bytes.
|
||||
lfs_size_t attr_max;
|
||||
};
|
||||
|
||||
// Custom attribute structure, used to describe custom attributes
|
||||
// committed atomically during file writes.
|
||||
struct lfs_attr {
|
||||
// 8-bit type of attribute, provided by user and used to
|
||||
// identify the attribute
|
||||
uint8_t type;
|
||||
|
||||
// Pointer to buffer containing the attribute
|
||||
void *buffer;
|
||||
|
||||
// Size of attribute in bytes, limited to LFS_ATTR_MAX
|
||||
lfs_size_t size;
|
||||
};
|
||||
|
||||
// Optional configuration provided during lfs_file_opencfg
|
||||
struct lfs_file_config {
|
||||
// Optional statically allocated file buffer. Must be cache_size.
|
||||
// By default lfs_malloc is used to allocate this buffer.
|
||||
void *buffer;
|
||||
|
||||
// Optional list of custom attributes related to the file. If the file
|
||||
// is opened with read access, these attributes will be read from disk
|
||||
// during the open call. If the file is opened with write access, the
|
||||
// attributes will be written to disk every file sync or close. This
|
||||
// write occurs atomically with update to the file's contents.
|
||||
//
|
||||
// Custom attributes are uniquely identified by an 8-bit type and limited
|
||||
// to LFS_ATTR_MAX bytes. When read, if the stored attribute is smaller
|
||||
// than the buffer, it will be padded with zeros. If the stored attribute
|
||||
// is larger, then it will be silently truncated. If the attribute is not
|
||||
// found, it will be created implicitly.
|
||||
struct lfs_attr *attrs;
|
||||
|
||||
// Number of custom attributes in the list
|
||||
lfs_size_t attr_count;
|
||||
};
|
||||
|
||||
|
||||
/// internal littlefs data structures ///
|
||||
typedef struct lfs_cache {
|
||||
lfs_block_t block;
|
||||
lfs_off_t off;
|
||||
lfs_size_t size;
|
||||
uint8_t *buffer;
|
||||
} lfs_cache_t;
|
||||
|
||||
typedef struct lfs_mdir {
|
||||
lfs_block_t pair[2];
|
||||
uint32_t rev;
|
||||
lfs_off_t off;
|
||||
uint32_t etag;
|
||||
uint16_t count;
|
||||
bool erased;
|
||||
bool split;
|
||||
lfs_block_t tail[2];
|
||||
} lfs_mdir_t;
|
||||
|
||||
// littlefs directory type
|
||||
typedef struct lfs_dir {
|
||||
struct lfs_dir *next;
|
||||
uint16_t id;
|
||||
uint8_t type;
|
||||
lfs_mdir_t m;
|
||||
|
||||
lfs_off_t pos;
|
||||
lfs_block_t head[2];
|
||||
} lfs_dir_t;
|
||||
|
||||
// littlefs file type
|
||||
typedef struct lfs_file {
|
||||
struct lfs_file *next;
|
||||
uint16_t id;
|
||||
uint8_t type;
|
||||
lfs_mdir_t m;
|
||||
|
||||
struct lfs_ctz {
|
||||
lfs_block_t head;
|
||||
lfs_size_t size;
|
||||
} ctz;
|
||||
|
||||
uint32_t flags;
|
||||
lfs_off_t pos;
|
||||
lfs_block_t block;
|
||||
lfs_off_t off;
|
||||
lfs_cache_t cache;
|
||||
|
||||
const struct lfs_file_config *cfg;
|
||||
} lfs_file_t;
|
||||
|
||||
typedef struct lfs_superblock {
|
||||
uint32_t version;
|
||||
lfs_size_t block_size;
|
||||
lfs_size_t block_count;
|
||||
lfs_size_t name_max;
|
||||
lfs_size_t file_max;
|
||||
lfs_size_t attr_max;
|
||||
} lfs_superblock_t;
|
||||
|
||||
typedef struct lfs_gstate {
|
||||
uint32_t tag;
|
||||
lfs_block_t pair[2];
|
||||
} lfs_gstate_t;
|
||||
|
||||
// The littlefs filesystem type
|
||||
typedef struct lfs {
|
||||
lfs_cache_t rcache;
|
||||
lfs_cache_t pcache;
|
||||
|
||||
lfs_block_t root[2];
|
||||
struct lfs_mlist {
|
||||
struct lfs_mlist *next;
|
||||
uint16_t id;
|
||||
uint8_t type;
|
||||
lfs_mdir_t m;
|
||||
} *mlist;
|
||||
uint32_t seed;
|
||||
|
||||
lfs_gstate_t gstate;
|
||||
lfs_gstate_t gdisk;
|
||||
lfs_gstate_t gdelta;
|
||||
|
||||
struct lfs_lookahead {
|
||||
lfs_block_t start;
|
||||
lfs_block_t size;
|
||||
lfs_block_t next;
|
||||
lfs_block_t ckpoint;
|
||||
uint8_t *buffer;
|
||||
} lookahead;
|
||||
|
||||
const struct lfs_config *cfg;
|
||||
lfs_size_t block_count;
|
||||
lfs_size_t name_max;
|
||||
lfs_size_t file_max;
|
||||
lfs_size_t attr_max;
|
||||
lfs_size_t inline_max;
|
||||
|
||||
#ifdef LFS_MIGRATE
|
||||
struct lfs1 *lfs1;
|
||||
#endif
|
||||
} lfs_t;
|
||||
|
||||
|
||||
/// Filesystem functions ///
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Format a block device with the littlefs
|
||||
//
|
||||
// Requires a littlefs object and config struct. This clobbers the littlefs
|
||||
// object, and does not leave the filesystem mounted. The config struct must
|
||||
// be zeroed for defaults and backwards compatibility.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_format(lfs_t *lfs, const struct lfs_config *config);
|
||||
#endif
|
||||
|
||||
// Mounts a littlefs
|
||||
//
|
||||
// Requires a littlefs object and config struct. Multiple filesystems
|
||||
// may be mounted simultaneously with multiple littlefs objects. Both
|
||||
// lfs and config must be allocated while mounted. The config struct must
|
||||
// be zeroed for defaults and backwards compatibility.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_mount(lfs_t *lfs, const struct lfs_config *config);
|
||||
|
||||
// Unmounts a littlefs
|
||||
//
|
||||
// Does nothing besides releasing any allocated resources.
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_unmount(lfs_t *lfs);
|
||||
|
||||
/// General operations ///
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Removes a file or directory
|
||||
//
|
||||
// If removing a directory, the directory must be empty.
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_remove(lfs_t *lfs, const char *path);
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Rename or move a file or directory
|
||||
//
|
||||
// If the destination exists, it must match the source in type.
|
||||
// If the destination is a directory, the directory must be empty.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath);
|
||||
#endif
|
||||
|
||||
// Find info about a file or directory
|
||||
//
|
||||
// Fills out the info structure, based on the specified file or directory.
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info);
|
||||
|
||||
// Get a custom attribute
|
||||
//
|
||||
// Custom attributes are uniquely identified by an 8-bit type and limited
|
||||
// to LFS_ATTR_MAX bytes. When read, if the stored attribute is smaller than
|
||||
// the buffer, it will be padded with zeros. If the stored attribute is larger,
|
||||
// then it will be silently truncated. If no attribute is found, the error
|
||||
// LFS_ERR_NOATTR is returned and the buffer is filled with zeros.
|
||||
//
|
||||
// Returns the size of the attribute, or a negative error code on failure.
|
||||
// Note, the returned size is the size of the attribute on disk, irrespective
|
||||
// of the size of the buffer. This can be used to dynamically allocate a buffer
|
||||
// or check for existence.
|
||||
lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
|
||||
uint8_t type, void *buffer, lfs_size_t size);
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Set custom attributes
|
||||
//
|
||||
// Custom attributes are uniquely identified by an 8-bit type and limited
|
||||
// to LFS_ATTR_MAX bytes. If an attribute is not found, it will be
|
||||
// implicitly created.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_setattr(lfs_t *lfs, const char *path,
|
||||
uint8_t type, const void *buffer, lfs_size_t size);
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Removes a custom attribute
|
||||
//
|
||||
// If an attribute is not found, nothing happens.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type);
|
||||
#endif
|
||||
|
||||
|
||||
/// File operations ///
|
||||
|
||||
#ifndef LFS_NO_MALLOC
|
||||
// Open a file
|
||||
//
|
||||
// The mode that the file is opened in is determined by the flags, which
|
||||
// are values from the enum lfs_open_flags that are bitwise-ored together.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_file_open(lfs_t *lfs, lfs_file_t *file,
|
||||
const char *path, int flags);
|
||||
|
||||
// if LFS_NO_MALLOC is defined, lfs_file_open() will fail with LFS_ERR_NOMEM
|
||||
// thus use lfs_file_opencfg() with config.buffer set.
|
||||
#endif
|
||||
|
||||
// Open a file with extra configuration
|
||||
//
|
||||
// The mode that the file is opened in is determined by the flags, which
|
||||
// are values from the enum lfs_open_flags that are bitwise-ored together.
|
||||
//
|
||||
// The config struct provides additional config options per file as described
|
||||
// above. The config struct must remain allocated while the file is open, and
|
||||
// the config struct must be zeroed for defaults and backwards compatibility.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
|
||||
const char *path, int flags,
|
||||
const struct lfs_file_config *config);
|
||||
|
||||
// Close a file
|
||||
//
|
||||
// Any pending writes are written out to storage as though
|
||||
// sync had been called and releases any allocated resources.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_file_close(lfs_t *lfs, lfs_file_t *file);
|
||||
|
||||
// Synchronize a file on storage
|
||||
//
|
||||
// Any pending writes are written out to storage.
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_file_sync(lfs_t *lfs, lfs_file_t *file);
|
||||
|
||||
// Read data from file
|
||||
//
|
||||
// Takes a buffer and size indicating where to store the read data.
|
||||
// Returns the number of bytes read, or a negative error code on failure.
|
||||
lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
|
||||
void *buffer, lfs_size_t size);
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Write data to file
|
||||
//
|
||||
// Takes a buffer and size indicating the data to write. The file will not
|
||||
// actually be updated on the storage until either sync or close is called.
|
||||
//
|
||||
// Returns the number of bytes written, or a negative error code on failure.
|
||||
lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
|
||||
const void *buffer, lfs_size_t size);
|
||||
#endif
|
||||
|
||||
// Change the position of the file
|
||||
//
|
||||
// The change in position is determined by the offset and whence flag.
|
||||
// Returns the new position of the file, or a negative error code on failure.
|
||||
lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
|
||||
lfs_soff_t off, int whence);
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Truncates the size of the file to the specified size
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size);
|
||||
#endif
|
||||
|
||||
// Return the position of the file
|
||||
//
|
||||
// Equivalent to lfs_file_seek(lfs, file, 0, LFS_SEEK_CUR)
|
||||
// Returns the position of the file, or a negative error code on failure.
|
||||
lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file);
|
||||
|
||||
// Change the position of the file to the beginning of the file
|
||||
//
|
||||
// Equivalent to lfs_file_seek(lfs, file, 0, LFS_SEEK_SET)
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file);
|
||||
|
||||
// Return the size of the file
|
||||
//
|
||||
// Similar to lfs_file_seek(lfs, file, 0, LFS_SEEK_END)
|
||||
// Returns the size of the file, or a negative error code on failure.
|
||||
lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file);
|
||||
|
||||
|
||||
/// Directory operations ///
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Create a directory
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_mkdir(lfs_t *lfs, const char *path);
|
||||
#endif
|
||||
|
||||
// Open a directory
|
||||
//
|
||||
// Once open a directory can be used with read to iterate over files.
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path);
|
||||
|
||||
// Close a directory
|
||||
//
|
||||
// Releases any allocated resources.
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir);
|
||||
|
||||
// Read an entry in the directory
|
||||
//
|
||||
// Fills out the info structure, based on the specified file or directory.
|
||||
// Returns a positive value on success, 0 at the end of directory,
|
||||
// or a negative error code on failure.
|
||||
int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info);
|
||||
|
||||
// Change the position of the directory
|
||||
//
|
||||
// The new off must be a value previous returned from tell and specifies
|
||||
// an absolute offset in the directory seek.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off);
|
||||
|
||||
// Return the position of the directory
|
||||
//
|
||||
// The returned offset is only meant to be consumed by seek and may not make
|
||||
// sense, but does indicate the current position in the directory iteration.
|
||||
//
|
||||
// Returns the position of the directory, or a negative error code on failure.
|
||||
lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir);
|
||||
|
||||
// Change the position of the directory to the beginning of the directory
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir);
|
||||
|
||||
|
||||
/// Filesystem-level filesystem operations
|
||||
|
||||
// Find on-disk info about the filesystem
|
||||
//
|
||||
// Fills out the fsinfo structure based on the filesystem found on-disk.
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_fs_stat(lfs_t *lfs, struct lfs_fsinfo *fsinfo);
|
||||
|
||||
// Finds the current size of the filesystem
|
||||
//
|
||||
// Note: Result is best effort. If files share COW structures, the returned
|
||||
// size may be larger than the filesystem actually is.
|
||||
//
|
||||
// Returns the number of allocated blocks, or a negative error code on failure.
|
||||
lfs_ssize_t lfs_fs_size(lfs_t *lfs);
|
||||
|
||||
// Traverse through all blocks in use by the filesystem
|
||||
//
|
||||
// The provided callback will be called with each block address that is
|
||||
// currently in use by the filesystem. This can be used to determine which
|
||||
// blocks are in use or how much of the storage is available.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data);
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Attempt to make the filesystem consistent and ready for writing
|
||||
//
|
||||
// Calling this function is not required, consistency will be implicitly
|
||||
// enforced on the first operation that writes to the filesystem, but this
|
||||
// function allows the work to be performed earlier and without other
|
||||
// filesystem changes.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_fs_mkconsistent(lfs_t *lfs);
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Attempt any janitorial work
|
||||
//
|
||||
// This currently:
|
||||
// 1. Calls mkconsistent if not already consistent
|
||||
// 2. Compacts metadata > compact_thresh
|
||||
// 3. Populates the block allocator
|
||||
//
|
||||
// Though additional janitorial work may be added in the future.
|
||||
//
|
||||
// Calling this function is not required, but may allow the offloading of
|
||||
// expensive janitorial work to a less time-critical code path.
|
||||
//
|
||||
// Returns a negative error code on failure. Accomplishing nothing is not
|
||||
// an error.
|
||||
int lfs_fs_gc(lfs_t *lfs);
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
// Grows the filesystem to a new size, updating the superblock with the new
|
||||
// block count.
|
||||
//
|
||||
// If LFS_SHRINKNONRELOCATING is defined, this function will also accept
|
||||
// block_counts smaller than the current configuration, after checking
|
||||
// that none of the blocks that are being removed are in use.
|
||||
// Note that littlefs's pseudorandom block allocation means that
|
||||
// this is very unlikely to work in the general case.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_fs_grow(lfs_t *lfs, lfs_size_t block_count);
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
#ifdef LFS_MIGRATE
|
||||
// Attempts to migrate a previous version of littlefs
|
||||
//
|
||||
// Behaves similarly to the lfs_format function. Attempts to mount
|
||||
// the previous version of littlefs and update the filesystem so it can be
|
||||
// mounted with the current version of littlefs.
|
||||
//
|
||||
// Requires a littlefs object and config struct. This clobbers the littlefs
|
||||
// object, and does not leave the filesystem mounted. The config struct must
|
||||
// be zeroed for defaults and backwards compatibility.
|
||||
//
|
||||
// Returns a negative error code on failure.
|
||||
int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
37
components/joltwallet__littlefs/src/littlefs/lfs_util.c
Normal file
37
components/joltwallet__littlefs/src/littlefs/lfs_util.c
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* lfs util functions
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#include "lfs_util.h"
|
||||
|
||||
// Only compile if user does not provide custom config
|
||||
#ifndef LFS_CONFIG
|
||||
|
||||
|
||||
// If user provides their own CRC impl we don't need this
|
||||
#ifndef LFS_CRC
|
||||
// Software CRC implementation with small lookup table
|
||||
uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size) {
|
||||
static const uint32_t rtable[16] = {
|
||||
0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
|
||||
0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
|
||||
0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
|
||||
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c,
|
||||
};
|
||||
|
||||
const uint8_t *data = buffer;
|
||||
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 0)) & 0xf];
|
||||
crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 4)) & 0xf];
|
||||
}
|
||||
|
||||
return crc;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif
|
||||
273
components/joltwallet__littlefs/src/littlefs/lfs_util.h
Normal file
273
components/joltwallet__littlefs/src/littlefs/lfs_util.h
Normal file
|
|
@ -0,0 +1,273 @@
|
|||
/*
|
||||
* lfs utility functions
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* Copyright (c) 2017, Arm Limited. All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#ifndef LFS_UTIL_H
|
||||
#define LFS_UTIL_H
|
||||
|
||||
#define LFS_STRINGIZE(x) LFS_STRINGIZE2(x)
|
||||
#define LFS_STRINGIZE2(x) #x
|
||||
|
||||
// Users can override lfs_util.h with their own configuration by defining
|
||||
// LFS_CONFIG as a header file to include (-DLFS_CONFIG=lfs_config.h).
|
||||
//
|
||||
// If LFS_CONFIG is used, none of the default utils will be emitted and must be
|
||||
// provided by the config file. To start, I would suggest copying lfs_util.h
|
||||
// and modifying as needed.
|
||||
#ifdef LFS_CONFIG
|
||||
#include LFS_STRINGIZE(LFS_CONFIG)
|
||||
#else
|
||||
|
||||
// Alternatively, users can provide a header file which defines
|
||||
// macros and other things consumed by littlefs.
|
||||
//
|
||||
// For example, provide my_defines.h, which contains
|
||||
// something like:
|
||||
//
|
||||
// #include <stddef.h>
|
||||
// extern void *my_malloc(size_t sz);
|
||||
// #define LFS_MALLOC(sz) my_malloc(sz)
|
||||
//
|
||||
// And build littlefs with the header by defining LFS_DEFINES.
|
||||
// (-DLFS_DEFINES=my_defines.h)
|
||||
|
||||
#ifdef LFS_DEFINES
|
||||
#include LFS_STRINGIZE(LFS_DEFINES)
|
||||
#endif
|
||||
|
||||
// System includes
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
|
||||
#ifndef LFS_NO_MALLOC
|
||||
#include <stdlib.h>
|
||||
#endif
|
||||
#ifndef LFS_NO_ASSERT
|
||||
#include <assert.h>
|
||||
#endif
|
||||
#if !defined(LFS_NO_DEBUG) || \
|
||||
!defined(LFS_NO_WARN) || \
|
||||
!defined(LFS_NO_ERROR) || \
|
||||
defined(LFS_YES_TRACE)
|
||||
#include <stdio.h>
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
|
||||
// Macros, may be replaced by system specific wrappers. Arguments to these
|
||||
// macros must not have side-effects as the macros can be removed for a smaller
|
||||
// code footprint
|
||||
|
||||
// Logging functions
|
||||
#ifndef LFS_TRACE
|
||||
#ifdef LFS_YES_TRACE
|
||||
#define LFS_TRACE_(fmt, ...) \
|
||||
printf("%s:%d:trace: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||
#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
|
||||
#else
|
||||
#define LFS_TRACE(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef LFS_DEBUG
|
||||
#ifndef LFS_NO_DEBUG
|
||||
#define LFS_DEBUG_(fmt, ...) \
|
||||
printf("%s:%d:debug: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||
#define LFS_DEBUG(...) LFS_DEBUG_(__VA_ARGS__, "")
|
||||
#else
|
||||
#define LFS_DEBUG(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef LFS_WARN
|
||||
#ifndef LFS_NO_WARN
|
||||
#define LFS_WARN_(fmt, ...) \
|
||||
printf("%s:%d:warn: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||
#define LFS_WARN(...) LFS_WARN_(__VA_ARGS__, "")
|
||||
#else
|
||||
#define LFS_WARN(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef LFS_ERROR
|
||||
#ifndef LFS_NO_ERROR
|
||||
#define LFS_ERROR_(fmt, ...) \
|
||||
printf("%s:%d:error: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
|
||||
#define LFS_ERROR(...) LFS_ERROR_(__VA_ARGS__, "")
|
||||
#else
|
||||
#define LFS_ERROR(...)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Runtime assertions
|
||||
#ifndef LFS_ASSERT
|
||||
#ifndef LFS_NO_ASSERT
|
||||
#define LFS_ASSERT(test) assert(test)
|
||||
#else
|
||||
#define LFS_ASSERT(test)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
// Builtin functions, these may be replaced by more efficient
|
||||
// toolchain-specific implementations. LFS_NO_INTRINSICS falls back to a more
|
||||
// expensive basic C implementation for debugging purposes
|
||||
|
||||
// Min/max functions for unsigned 32-bit numbers
|
||||
static inline uint32_t lfs_max(uint32_t a, uint32_t b) {
|
||||
return (a > b) ? a : b;
|
||||
}
|
||||
|
||||
static inline uint32_t lfs_min(uint32_t a, uint32_t b) {
|
||||
return (a < b) ? a : b;
|
||||
}
|
||||
|
||||
// Align to nearest multiple of a size
|
||||
static inline uint32_t lfs_aligndown(uint32_t a, uint32_t alignment) {
|
||||
return a - (a % alignment);
|
||||
}
|
||||
|
||||
static inline uint32_t lfs_alignup(uint32_t a, uint32_t alignment) {
|
||||
return lfs_aligndown(a + alignment-1, alignment);
|
||||
}
|
||||
|
||||
// Find the smallest power of 2 greater than or equal to a
|
||||
static inline uint32_t lfs_npw2(uint32_t a) {
|
||||
#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM))
|
||||
return 32 - __builtin_clz(a-1);
|
||||
#else
|
||||
uint32_t r = 0;
|
||||
uint32_t s;
|
||||
a -= 1;
|
||||
s = (a > 0xffff) << 4; a >>= s; r |= s;
|
||||
s = (a > 0xff ) << 3; a >>= s; r |= s;
|
||||
s = (a > 0xf ) << 2; a >>= s; r |= s;
|
||||
s = (a > 0x3 ) << 1; a >>= s; r |= s;
|
||||
return (r | (a >> 1)) + 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Count the number of trailing binary zeros in a
|
||||
// lfs_ctz(0) may be undefined
|
||||
static inline uint32_t lfs_ctz(uint32_t a) {
|
||||
#if !defined(LFS_NO_INTRINSICS) && defined(__GNUC__)
|
||||
return __builtin_ctz(a);
|
||||
#else
|
||||
return lfs_npw2((a & -a) + 1) - 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Count the number of binary ones in a
|
||||
static inline uint32_t lfs_popc(uint32_t a) {
|
||||
#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM))
|
||||
return __builtin_popcount(a);
|
||||
#else
|
||||
a = a - ((a >> 1) & 0x55555555);
|
||||
a = (a & 0x33333333) + ((a >> 2) & 0x33333333);
|
||||
return (((a + (a >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Find the sequence comparison of a and b, this is the distance
|
||||
// between a and b ignoring overflow
|
||||
static inline int lfs_scmp(uint32_t a, uint32_t b) {
|
||||
return (int)(unsigned)(a - b);
|
||||
}
|
||||
|
||||
// Convert between 32-bit little-endian and native order
|
||||
static inline uint32_t lfs_fromle32(uint32_t a) {
|
||||
#if (defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
|
||||
return a;
|
||||
#elif !defined(LFS_NO_INTRINSICS) && ( \
|
||||
(defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
|
||||
return __builtin_bswap32(a);
|
||||
#else
|
||||
return ((uint32_t)((uint8_t*)&a)[0] << 0) |
|
||||
((uint32_t)((uint8_t*)&a)[1] << 8) |
|
||||
((uint32_t)((uint8_t*)&a)[2] << 16) |
|
||||
((uint32_t)((uint8_t*)&a)[3] << 24);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint32_t lfs_tole32(uint32_t a) {
|
||||
return lfs_fromle32(a);
|
||||
}
|
||||
|
||||
// Convert between 32-bit big-endian and native order
|
||||
static inline uint32_t lfs_frombe32(uint32_t a) {
|
||||
#if !defined(LFS_NO_INTRINSICS) && ( \
|
||||
(defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
|
||||
return __builtin_bswap32(a);
|
||||
#elif (defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
|
||||
return a;
|
||||
#else
|
||||
return ((uint32_t)((uint8_t*)&a)[0] << 24) |
|
||||
((uint32_t)((uint8_t*)&a)[1] << 16) |
|
||||
((uint32_t)((uint8_t*)&a)[2] << 8) |
|
||||
((uint32_t)((uint8_t*)&a)[3] << 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint32_t lfs_tobe32(uint32_t a) {
|
||||
return lfs_frombe32(a);
|
||||
}
|
||||
|
||||
// Calculate CRC-32 with polynomial = 0x04c11db7
|
||||
#ifdef LFS_CRC
|
||||
static inline uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size) {
|
||||
return LFS_CRC(crc, buffer, size);
|
||||
}
|
||||
#else
|
||||
uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size);
|
||||
#endif
|
||||
|
||||
// Allocate memory, only used if buffers are not provided to littlefs
|
||||
//
|
||||
// littlefs current has no alignment requirements, as it only allocates
|
||||
// byte-level buffers.
|
||||
static inline void *lfs_malloc(size_t size) {
|
||||
#if defined(LFS_MALLOC)
|
||||
return LFS_MALLOC(size);
|
||||
#elif !defined(LFS_NO_MALLOC)
|
||||
return malloc(size);
|
||||
#else
|
||||
(void)size;
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Deallocate memory, only used if buffers are not provided to littlefs
|
||||
static inline void lfs_free(void *p) {
|
||||
#if defined(LFS_FREE)
|
||||
LFS_FREE(p);
|
||||
#elif !defined(LFS_NO_MALLOC)
|
||||
free(p);
|
||||
#else
|
||||
(void)p;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
#endif
|
||||
2063
components/joltwallet__littlefs/src/littlefs/runners/bench_runner.c
Normal file
2063
components/joltwallet__littlefs/src/littlefs/runners/bench_runner.c
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,146 @@
|
|||
/*
|
||||
* Runner for littlefs benchmarks
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#ifndef BENCH_RUNNER_H
|
||||
#define BENCH_RUNNER_H
|
||||
|
||||
|
||||
// override LFS_TRACE
|
||||
void bench_trace(const char *fmt, ...);
|
||||
|
||||
#define LFS_TRACE_(fmt, ...) \
|
||||
bench_trace("%s:%d:trace: " fmt "%s\n", \
|
||||
__FILE__, \
|
||||
__LINE__, \
|
||||
__VA_ARGS__)
|
||||
#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
|
||||
#define LFS_EMUBD_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
|
||||
|
||||
// provide BENCH_START/BENCH_STOP macros
|
||||
void bench_start(void);
|
||||
void bench_stop(void);
|
||||
|
||||
#define BENCH_START() bench_start()
|
||||
#define BENCH_STOP() bench_stop()
|
||||
|
||||
|
||||
// note these are indirectly included in any generated files
|
||||
#include "bd/lfs_emubd.h"
|
||||
#include <stdio.h>
|
||||
|
||||
// give source a chance to define feature macros
|
||||
#undef _FEATURES_H
|
||||
#undef _STDIO_H
|
||||
|
||||
|
||||
// generated bench configurations
|
||||
struct lfs_config;
|
||||
|
||||
enum bench_flags {
|
||||
BENCH_REENTRANT = 0x1,
|
||||
};
|
||||
typedef uint8_t bench_flags_t;
|
||||
|
||||
typedef struct bench_define {
|
||||
intmax_t (*cb)(void *data);
|
||||
void *data;
|
||||
} bench_define_t;
|
||||
|
||||
struct bench_case {
|
||||
const char *name;
|
||||
const char *path;
|
||||
bench_flags_t flags;
|
||||
size_t permutations;
|
||||
|
||||
const bench_define_t *defines;
|
||||
|
||||
bool (*filter)(void);
|
||||
void (*run)(struct lfs_config *cfg);
|
||||
};
|
||||
|
||||
struct bench_suite {
|
||||
const char *name;
|
||||
const char *path;
|
||||
bench_flags_t flags;
|
||||
|
||||
const char *const *define_names;
|
||||
size_t define_count;
|
||||
|
||||
const struct bench_case *cases;
|
||||
size_t case_count;
|
||||
};
|
||||
|
||||
|
||||
// deterministic prng for pseudo-randomness in benches
|
||||
uint32_t bench_prng(uint32_t *state);
|
||||
|
||||
#define BENCH_PRNG(state) bench_prng(state)
|
||||
|
||||
|
||||
// access generated bench defines
|
||||
intmax_t bench_define(size_t define);
|
||||
|
||||
#define BENCH_DEFINE(i) bench_define(i)
|
||||
|
||||
// a few preconfigured defines that control how benches run
|
||||
|
||||
#define READ_SIZE_i 0
|
||||
#define PROG_SIZE_i 1
|
||||
#define ERASE_SIZE_i 2
|
||||
#define ERASE_COUNT_i 3
|
||||
#define BLOCK_SIZE_i 4
|
||||
#define BLOCK_COUNT_i 5
|
||||
#define CACHE_SIZE_i 6
|
||||
#define LOOKAHEAD_SIZE_i 7
|
||||
#define COMPACT_THRESH_i 8
|
||||
#define METADATA_MAX_i 9
|
||||
#define INLINE_MAX_i 10
|
||||
#define BLOCK_CYCLES_i 11
|
||||
#define ERASE_VALUE_i 12
|
||||
#define ERASE_CYCLES_i 13
|
||||
#define BADBLOCK_BEHAVIOR_i 14
|
||||
#define POWERLOSS_BEHAVIOR_i 15
|
||||
|
||||
#define READ_SIZE bench_define(READ_SIZE_i)
|
||||
#define PROG_SIZE bench_define(PROG_SIZE_i)
|
||||
#define ERASE_SIZE bench_define(ERASE_SIZE_i)
|
||||
#define ERASE_COUNT bench_define(ERASE_COUNT_i)
|
||||
#define BLOCK_SIZE bench_define(BLOCK_SIZE_i)
|
||||
#define BLOCK_COUNT bench_define(BLOCK_COUNT_i)
|
||||
#define CACHE_SIZE bench_define(CACHE_SIZE_i)
|
||||
#define LOOKAHEAD_SIZE bench_define(LOOKAHEAD_SIZE_i)
|
||||
#define COMPACT_THRESH bench_define(COMPACT_THRESH_i)
|
||||
#define METADATA_MAX bench_define(METADATA_MAX_i)
|
||||
#define INLINE_MAX bench_define(INLINE_MAX_i)
|
||||
#define BLOCK_CYCLES bench_define(BLOCK_CYCLES_i)
|
||||
#define ERASE_VALUE bench_define(ERASE_VALUE_i)
|
||||
#define ERASE_CYCLES bench_define(ERASE_CYCLES_i)
|
||||
#define BADBLOCK_BEHAVIOR bench_define(BADBLOCK_BEHAVIOR_i)
|
||||
#define POWERLOSS_BEHAVIOR bench_define(POWERLOSS_BEHAVIOR_i)
|
||||
|
||||
#define BENCH_IMPLICIT_DEFINES \
|
||||
BENCH_DEF(READ_SIZE, PROG_SIZE) \
|
||||
BENCH_DEF(PROG_SIZE, ERASE_SIZE) \
|
||||
BENCH_DEF(ERASE_SIZE, 0) \
|
||||
BENCH_DEF(ERASE_COUNT, (1024*1024)/BLOCK_SIZE) \
|
||||
BENCH_DEF(BLOCK_SIZE, ERASE_SIZE) \
|
||||
BENCH_DEF(BLOCK_COUNT, ERASE_COUNT/lfs_max(BLOCK_SIZE/ERASE_SIZE,1))\
|
||||
BENCH_DEF(CACHE_SIZE, lfs_max(64,lfs_max(READ_SIZE,PROG_SIZE))) \
|
||||
BENCH_DEF(LOOKAHEAD_SIZE, 16) \
|
||||
BENCH_DEF(COMPACT_THRESH, 0) \
|
||||
BENCH_DEF(METADATA_MAX, 0) \
|
||||
BENCH_DEF(INLINE_MAX, 0) \
|
||||
BENCH_DEF(BLOCK_CYCLES, -1) \
|
||||
BENCH_DEF(ERASE_VALUE, 0xff) \
|
||||
BENCH_DEF(ERASE_CYCLES, 0) \
|
||||
BENCH_DEF(BADBLOCK_BEHAVIOR, LFS_EMUBD_BADBLOCK_PROGERROR) \
|
||||
BENCH_DEF(POWERLOSS_BEHAVIOR, LFS_EMUBD_POWERLOSS_NOOP)
|
||||
|
||||
#define BENCH_GEOMETRY_DEFINE_COUNT 4
|
||||
#define BENCH_IMPLICIT_DEFINE_COUNT 16
|
||||
|
||||
|
||||
#endif
|
||||
2818
components/joltwallet__littlefs/src/littlefs/runners/test_runner.c
Normal file
2818
components/joltwallet__littlefs/src/littlefs/runners/test_runner.c
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,142 @@
|
|||
/*
|
||||
* Runner for littlefs tests
|
||||
*
|
||||
* Copyright (c) 2022, The littlefs authors.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#ifndef TEST_RUNNER_H
|
||||
#define TEST_RUNNER_H
|
||||
|
||||
|
||||
// override LFS_TRACE
|
||||
void test_trace(const char *fmt, ...);
|
||||
|
||||
#define LFS_TRACE_(fmt, ...) \
|
||||
test_trace("%s:%d:trace: " fmt "%s\n", \
|
||||
__FILE__, \
|
||||
__LINE__, \
|
||||
__VA_ARGS__)
|
||||
#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
|
||||
#define LFS_EMUBD_TRACE(...) LFS_TRACE_(__VA_ARGS__, "")
|
||||
|
||||
|
||||
// note these are indirectly included in any generated files
|
||||
#include "bd/lfs_emubd.h"
|
||||
#include <stdio.h>
|
||||
|
||||
// give source a chance to define feature macros
|
||||
#undef _FEATURES_H
|
||||
#undef _STDIO_H
|
||||
|
||||
|
||||
// generated test configurations
|
||||
struct lfs_config;
|
||||
|
||||
enum test_flags {
|
||||
TEST_REENTRANT = 0x1,
|
||||
};
|
||||
typedef uint8_t test_flags_t;
|
||||
|
||||
typedef struct test_define {
|
||||
intmax_t (*cb)(void *data);
|
||||
void *data;
|
||||
} test_define_t;
|
||||
|
||||
struct test_case {
|
||||
const char *name;
|
||||
const char *path;
|
||||
test_flags_t flags;
|
||||
size_t permutations;
|
||||
|
||||
const test_define_t *defines;
|
||||
|
||||
bool (*filter)(void);
|
||||
void (*run)(struct lfs_config *cfg);
|
||||
};
|
||||
|
||||
struct test_suite {
|
||||
const char *name;
|
||||
const char *path;
|
||||
test_flags_t flags;
|
||||
|
||||
const char *const *define_names;
|
||||
size_t define_count;
|
||||
|
||||
const struct test_case *cases;
|
||||
size_t case_count;
|
||||
};
|
||||
|
||||
|
||||
// deterministic prng for pseudo-randomness in testes
|
||||
uint32_t test_prng(uint32_t *state);
|
||||
|
||||
#define TEST_PRNG(state) test_prng(state)
|
||||
|
||||
|
||||
// access generated test defines
|
||||
intmax_t test_define(size_t define);
|
||||
|
||||
#define TEST_DEFINE(i) test_define(i)
|
||||
|
||||
// a few preconfigured defines that control how tests run
|
||||
|
||||
#define READ_SIZE_i 0
|
||||
#define PROG_SIZE_i 1
|
||||
#define ERASE_SIZE_i 2
|
||||
#define ERASE_COUNT_i 3
|
||||
#define BLOCK_SIZE_i 4
|
||||
#define BLOCK_COUNT_i 5
|
||||
#define CACHE_SIZE_i 6
|
||||
#define LOOKAHEAD_SIZE_i 7
|
||||
#define COMPACT_THRESH_i 8
|
||||
#define METADATA_MAX_i 9
|
||||
#define INLINE_MAX_i 10
|
||||
#define BLOCK_CYCLES_i 11
|
||||
#define ERASE_VALUE_i 12
|
||||
#define ERASE_CYCLES_i 13
|
||||
#define BADBLOCK_BEHAVIOR_i 14
|
||||
#define POWERLOSS_BEHAVIOR_i 15
|
||||
#define DISK_VERSION_i 16
|
||||
|
||||
#define READ_SIZE TEST_DEFINE(READ_SIZE_i)
|
||||
#define PROG_SIZE TEST_DEFINE(PROG_SIZE_i)
|
||||
#define ERASE_SIZE TEST_DEFINE(ERASE_SIZE_i)
|
||||
#define ERASE_COUNT TEST_DEFINE(ERASE_COUNT_i)
|
||||
#define BLOCK_SIZE TEST_DEFINE(BLOCK_SIZE_i)
|
||||
#define BLOCK_COUNT TEST_DEFINE(BLOCK_COUNT_i)
|
||||
#define CACHE_SIZE TEST_DEFINE(CACHE_SIZE_i)
|
||||
#define LOOKAHEAD_SIZE TEST_DEFINE(LOOKAHEAD_SIZE_i)
|
||||
#define COMPACT_THRESH TEST_DEFINE(COMPACT_THRESH_i)
|
||||
#define METADATA_MAX TEST_DEFINE(METADATA_MAX_i)
|
||||
#define INLINE_MAX TEST_DEFINE(INLINE_MAX_i)
|
||||
#define BLOCK_CYCLES TEST_DEFINE(BLOCK_CYCLES_i)
|
||||
#define ERASE_VALUE TEST_DEFINE(ERASE_VALUE_i)
|
||||
#define ERASE_CYCLES TEST_DEFINE(ERASE_CYCLES_i)
|
||||
#define BADBLOCK_BEHAVIOR TEST_DEFINE(BADBLOCK_BEHAVIOR_i)
|
||||
#define POWERLOSS_BEHAVIOR TEST_DEFINE(POWERLOSS_BEHAVIOR_i)
|
||||
#define DISK_VERSION TEST_DEFINE(DISK_VERSION_i)
|
||||
|
||||
#define TEST_IMPLICIT_DEFINES \
|
||||
TEST_DEF(READ_SIZE, PROG_SIZE) \
|
||||
TEST_DEF(PROG_SIZE, ERASE_SIZE) \
|
||||
TEST_DEF(ERASE_SIZE, 0) \
|
||||
TEST_DEF(ERASE_COUNT, (1024*1024)/ERASE_SIZE) \
|
||||
TEST_DEF(BLOCK_SIZE, ERASE_SIZE) \
|
||||
TEST_DEF(BLOCK_COUNT, ERASE_COUNT/lfs_max(BLOCK_SIZE/ERASE_SIZE,1)) \
|
||||
TEST_DEF(CACHE_SIZE, lfs_max(64,lfs_max(READ_SIZE,PROG_SIZE))) \
|
||||
TEST_DEF(LOOKAHEAD_SIZE, 16) \
|
||||
TEST_DEF(COMPACT_THRESH, 0) \
|
||||
TEST_DEF(METADATA_MAX, 0) \
|
||||
TEST_DEF(INLINE_MAX, 0) \
|
||||
TEST_DEF(BLOCK_CYCLES, -1) \
|
||||
TEST_DEF(ERASE_VALUE, 0xff) \
|
||||
TEST_DEF(ERASE_CYCLES, 0) \
|
||||
TEST_DEF(BADBLOCK_BEHAVIOR, LFS_EMUBD_BADBLOCK_PROGERROR) \
|
||||
TEST_DEF(POWERLOSS_BEHAVIOR, LFS_EMUBD_POWERLOSS_NOOP) \
|
||||
TEST_DEF(DISK_VERSION, 0)
|
||||
|
||||
#define TEST_GEOMETRY_DEFINE_COUNT 4
|
||||
#define TEST_IMPLICIT_DEFINE_COUNT 17
|
||||
|
||||
|
||||
#endif
|
||||
1433
components/joltwallet__littlefs/src/littlefs/scripts/bench.py
Normal file
1433
components/joltwallet__littlefs/src/littlefs/scripts/bench.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,181 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Change prefixes in files/filenames. Useful for creating different versions
|
||||
# of a codebase that don't conflict at compile time.
|
||||
#
|
||||
# Example:
|
||||
# $ ./scripts/changeprefix.py lfs lfs3
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# Copyright (c) 2019, Arm Limited. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import glob
|
||||
import itertools
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
GIT_PATH = ['git']
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def changeprefix(from_prefix, to_prefix, line):
|
||||
line, count1 = re.subn(
|
||||
'\\b'+from_prefix,
|
||||
to_prefix,
|
||||
line)
|
||||
line, count2 = re.subn(
|
||||
'\\b'+from_prefix.upper(),
|
||||
to_prefix.upper(),
|
||||
line)
|
||||
line, count3 = re.subn(
|
||||
'\\B-D'+from_prefix.upper(),
|
||||
'-D'+to_prefix.upper(),
|
||||
line)
|
||||
return line, count1+count2+count3
|
||||
|
||||
def changefile(from_prefix, to_prefix, from_path, to_path, *,
|
||||
no_replacements=False):
|
||||
# rename any prefixes in file
|
||||
count = 0
|
||||
|
||||
# create a temporary file to avoid overwriting ourself
|
||||
if from_path == to_path and to_path != '-':
|
||||
to_path_temp = tempfile.NamedTemporaryFile('w', delete=False)
|
||||
to_path = to_path_temp.name
|
||||
else:
|
||||
to_path_temp = None
|
||||
|
||||
with openio(from_path) as from_f:
|
||||
with openio(to_path, 'w') as to_f:
|
||||
for line in from_f:
|
||||
if not no_replacements:
|
||||
line, n = changeprefix(from_prefix, to_prefix, line)
|
||||
count += n
|
||||
to_f.write(line)
|
||||
|
||||
if from_path != '-' and to_path != '-':
|
||||
shutil.copystat(from_path, to_path)
|
||||
|
||||
if to_path_temp:
|
||||
shutil.move(to_path, from_path)
|
||||
elif from_path != '-':
|
||||
os.remove(from_path)
|
||||
|
||||
# Summary
|
||||
print('%s: %d replacements' % (
|
||||
'%s -> %s' % (from_path, to_path) if not to_path_temp else from_path,
|
||||
count))
|
||||
|
||||
def main(from_prefix, to_prefix, paths=[], *,
|
||||
verbose=False,
|
||||
output=None,
|
||||
no_replacements=False,
|
||||
no_renames=False,
|
||||
git=False,
|
||||
no_stage=False,
|
||||
git_path=GIT_PATH):
|
||||
if not paths:
|
||||
if git:
|
||||
cmd = git_path + ['ls-tree', '-r', '--name-only', 'HEAD']
|
||||
if verbose:
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
paths = subprocess.check_output(cmd, encoding='utf8').split()
|
||||
else:
|
||||
print('no paths?', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
for from_path in paths:
|
||||
# rename filename?
|
||||
if output:
|
||||
to_path = output
|
||||
elif no_renames:
|
||||
to_path = from_path
|
||||
else:
|
||||
to_path = os.path.join(
|
||||
os.path.dirname(from_path),
|
||||
changeprefix(from_prefix, to_prefix,
|
||||
os.path.basename(from_path))[0])
|
||||
|
||||
# rename contents
|
||||
changefile(from_prefix, to_prefix, from_path, to_path,
|
||||
no_replacements=no_replacements)
|
||||
|
||||
# stage?
|
||||
if git and not no_stage:
|
||||
if from_path != to_path:
|
||||
cmd = git_path + ['rm', '-q', from_path]
|
||||
if verbose:
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
subprocess.check_call(cmd)
|
||||
cmd = git_path + ['add', to_path]
|
||||
if verbose:
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Change prefixes in files/filenames. Useful for creating "
|
||||
"different versions of a codebase that don't conflict at compile "
|
||||
"time.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'from_prefix',
|
||||
help="Prefix to replace.")
|
||||
parser.add_argument(
|
||||
'to_prefix',
|
||||
help="Prefix to replace with.")
|
||||
parser.add_argument(
|
||||
'paths',
|
||||
nargs='*',
|
||||
help="Files to operate on.")
|
||||
parser.add_argument(
|
||||
'-v', '--verbose',
|
||||
action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help="Output file.")
|
||||
parser.add_argument(
|
||||
'-N', '--no-replacements',
|
||||
action='store_true',
|
||||
help="Don't change prefixes in files")
|
||||
parser.add_argument(
|
||||
'-R', '--no-renames',
|
||||
action='store_true',
|
||||
help="Don't rename files")
|
||||
parser.add_argument(
|
||||
'--git',
|
||||
action='store_true',
|
||||
help="Use git to find/update files.")
|
||||
parser.add_argument(
|
||||
'--no-stage',
|
||||
action='store_true',
|
||||
help="Don't stage changes with git.")
|
||||
parser.add_argument(
|
||||
'--git-path',
|
||||
type=lambda x: x.split(),
|
||||
default=GIT_PATH,
|
||||
help="Path to git executable, may include flags. "
|
||||
"Defaults to %r." % GIT_PATH)
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
||||
707
components/joltwallet__littlefs/src/littlefs/scripts/code.py
Normal file
707
components/joltwallet__littlefs/src/littlefs/scripts/code.py
Normal file
|
|
@ -0,0 +1,707 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find code size at the function level. Basically just a big wrapper
|
||||
# around nm with some extra conveniences for comparing builds. Heavily inspired
|
||||
# by Linux's Bloat-O-Meter.
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/code.py lfs.o lfs_util.o -Ssize
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# Copyright (c) 2020, Arm Limited. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import csv
|
||||
import difflib
|
||||
import itertools as it
|
||||
import math as m
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import subprocess as sp
|
||||
|
||||
|
||||
NM_PATH = ['nm']
|
||||
NM_TYPES = 'tTrRdD'
|
||||
OBJDUMP_PATH = ['objdump']
|
||||
|
||||
|
||||
# integer fields
|
||||
class Int(co.namedtuple('Int', 'x')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, x=0):
|
||||
if isinstance(x, Int):
|
||||
return x
|
||||
if isinstance(x, str):
|
||||
try:
|
||||
x = int(x, 0)
|
||||
except ValueError:
|
||||
# also accept +-∞ and +-inf
|
||||
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
|
||||
x = m.inf
|
||||
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
|
||||
x = -m.inf
|
||||
else:
|
||||
raise
|
||||
assert isinstance(x, int) or m.isinf(x), x
|
||||
return super().__new__(cls, x)
|
||||
|
||||
def __str__(self):
|
||||
if self.x == m.inf:
|
||||
return '∞'
|
||||
elif self.x == -m.inf:
|
||||
return '-∞'
|
||||
else:
|
||||
return str(self.x)
|
||||
|
||||
def __int__(self):
|
||||
assert not m.isinf(self.x)
|
||||
return self.x
|
||||
|
||||
def __float__(self):
|
||||
return float(self.x)
|
||||
|
||||
none = '%7s' % '-'
|
||||
def table(self):
|
||||
return '%7s' % (self,)
|
||||
|
||||
diff_none = '%7s' % '-'
|
||||
diff_table = table
|
||||
|
||||
def diff_diff(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
diff = new - old
|
||||
if diff == +m.inf:
|
||||
return '%7s' % '+∞'
|
||||
elif diff == -m.inf:
|
||||
return '%7s' % '-∞'
|
||||
else:
|
||||
return '%+7d' % diff
|
||||
|
||||
def ratio(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
if m.isinf(new) and m.isinf(old):
|
||||
return 0.0
|
||||
elif m.isinf(new):
|
||||
return +m.inf
|
||||
elif m.isinf(old):
|
||||
return -m.inf
|
||||
elif not old and not new:
|
||||
return 0.0
|
||||
elif not old:
|
||||
return 1.0
|
||||
else:
|
||||
return (new-old) / old
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.x + other.x)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(self.x - other.x)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.__class__(self.x * other.x)
|
||||
|
||||
# code size results
|
||||
class CodeResult(co.namedtuple('CodeResult', [
|
||||
'file', 'function',
|
||||
'size'])):
|
||||
_by = ['file', 'function']
|
||||
_fields = ['size']
|
||||
_sort = ['size']
|
||||
_types = {'size': Int}
|
||||
|
||||
__slots__ = ()
|
||||
def __new__(cls, file='', function='', size=0):
|
||||
return super().__new__(cls, file, function,
|
||||
Int(size))
|
||||
|
||||
def __add__(self, other):
|
||||
return CodeResult(self.file, self.function,
|
||||
self.size + other.size)
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def collect(obj_paths, *,
|
||||
nm_path=NM_PATH,
|
||||
nm_types=NM_TYPES,
|
||||
objdump_path=OBJDUMP_PATH,
|
||||
sources=None,
|
||||
everything=False,
|
||||
**args):
|
||||
size_pattern = re.compile(
|
||||
'^(?P<size>[0-9a-fA-F]+)' +
|
||||
' (?P<type>[%s])' % re.escape(nm_types) +
|
||||
' (?P<func>.+?)$')
|
||||
line_pattern = re.compile(
|
||||
'^\s+(?P<no>[0-9]+)'
|
||||
'(?:\s+(?P<dir>[0-9]+))?'
|
||||
'\s+.*'
|
||||
'\s+(?P<path>[^\s]+)$')
|
||||
info_pattern = re.compile(
|
||||
'^(?:.*(?P<tag>DW_TAG_[a-z_]+).*'
|
||||
'|.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
|
||||
'|.*DW_AT_decl_file.*:\s*(?P<file>[0-9]+)\s*)$')
|
||||
|
||||
results = []
|
||||
for path in obj_paths:
|
||||
# guess the source, if we have debug-info we'll replace this later
|
||||
file = re.sub('(\.o)?$', '.c', path, 1)
|
||||
|
||||
# find symbol sizes
|
||||
results_ = []
|
||||
# note nm-path may contain extra args
|
||||
cmd = nm_path + ['--size-sort', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
for line in proc.stdout:
|
||||
m = size_pattern.match(line)
|
||||
if m:
|
||||
func = m.group('func')
|
||||
# discard internal functions
|
||||
if not everything and func.startswith('__'):
|
||||
continue
|
||||
results_.append(CodeResult(
|
||||
file, func,
|
||||
int(m.group('size'), 16)))
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
|
||||
# try to figure out the source file if we have debug-info
|
||||
dirs = {}
|
||||
files = {}
|
||||
# note objdump-path may contain extra args
|
||||
cmd = objdump_path + ['--dwarf=rawline', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
for line in proc.stdout:
|
||||
# note that files contain references to dirs, which we
|
||||
# dereference as soon as we see them as each file table follows a
|
||||
# dir table
|
||||
m = line_pattern.match(line)
|
||||
if m:
|
||||
if not m.group('dir'):
|
||||
# found a directory entry
|
||||
dirs[int(m.group('no'))] = m.group('path')
|
||||
else:
|
||||
# found a file entry
|
||||
dir = int(m.group('dir'))
|
||||
if dir in dirs:
|
||||
files[int(m.group('no'))] = os.path.join(
|
||||
dirs[dir],
|
||||
m.group('path'))
|
||||
else:
|
||||
files[int(m.group('no'))] = m.group('path')
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
# do nothing on error, we don't need objdump to work, source files
|
||||
# may just be inaccurate
|
||||
pass
|
||||
|
||||
defs = {}
|
||||
is_func = False
|
||||
f_name = None
|
||||
f_file = None
|
||||
# note objdump-path may contain extra args
|
||||
cmd = objdump_path + ['--dwarf=info', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
for line in proc.stdout:
|
||||
# state machine here to find definitions
|
||||
m = info_pattern.match(line)
|
||||
if m:
|
||||
if m.group('tag'):
|
||||
if is_func:
|
||||
defs[f_name] = files.get(f_file, '?')
|
||||
is_func = (m.group('tag') == 'DW_TAG_subprogram')
|
||||
elif m.group('name'):
|
||||
f_name = m.group('name')
|
||||
elif m.group('file'):
|
||||
f_file = int(m.group('file'))
|
||||
if is_func:
|
||||
defs[f_name] = files.get(f_file, '?')
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
# do nothing on error, we don't need objdump to work, source files
|
||||
# may just be inaccurate
|
||||
pass
|
||||
|
||||
for r in results_:
|
||||
# find best matching debug symbol, this may be slightly different
|
||||
# due to optimizations
|
||||
if defs:
|
||||
# exact match? avoid difflib if we can for speed
|
||||
if r.function in defs:
|
||||
file = defs[r.function]
|
||||
else:
|
||||
_, file = max(
|
||||
defs.items(),
|
||||
key=lambda d: difflib.SequenceMatcher(None,
|
||||
d[0],
|
||||
r.function, False).ratio())
|
||||
else:
|
||||
file = r.file
|
||||
|
||||
# ignore filtered sources
|
||||
if sources is not None:
|
||||
if not any(
|
||||
os.path.abspath(file) == os.path.abspath(s)
|
||||
for s in sources):
|
||||
continue
|
||||
else:
|
||||
# default to only cwd
|
||||
if not everything and not os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(file)]) == os.getcwd():
|
||||
continue
|
||||
|
||||
# simplify path
|
||||
if os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(file)]) == os.getcwd():
|
||||
file = os.path.relpath(file)
|
||||
else:
|
||||
file = os.path.abspath(file)
|
||||
|
||||
results.append(r._replace(file=file))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def fold(Result, results, *,
|
||||
by=None,
|
||||
defines=None,
|
||||
**_):
|
||||
if by is None:
|
||||
by = Result._by
|
||||
|
||||
for k in it.chain(by or [], (k for k, _ in defines or [])):
|
||||
if k not in Result._by and k not in Result._fields:
|
||||
print("error: could not find field %r?" % k)
|
||||
sys.exit(-1)
|
||||
|
||||
# filter by matching defines
|
||||
if defines is not None:
|
||||
results_ = []
|
||||
for r in results:
|
||||
if all(getattr(r, k) in vs for k, vs in defines):
|
||||
results_.append(r)
|
||||
results = results_
|
||||
|
||||
# organize results into conflicts
|
||||
folding = co.OrderedDict()
|
||||
for r in results:
|
||||
name = tuple(getattr(r, k) for k in by)
|
||||
if name not in folding:
|
||||
folding[name] = []
|
||||
folding[name].append(r)
|
||||
|
||||
# merge conflicts
|
||||
folded = []
|
||||
for name, rs in folding.items():
|
||||
folded.append(sum(rs[1:], start=rs[0]))
|
||||
|
||||
return folded
|
||||
|
||||
def table(Result, results, diff_results=None, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
sort=None,
|
||||
summary=False,
|
||||
all=False,
|
||||
percent=False,
|
||||
**_):
|
||||
all_, all = all, __builtins__.all
|
||||
|
||||
if by is None:
|
||||
by = Result._by
|
||||
if fields is None:
|
||||
fields = Result._fields
|
||||
types = Result._types
|
||||
|
||||
# fold again
|
||||
results = fold(Result, results, by=by)
|
||||
if diff_results is not None:
|
||||
diff_results = fold(Result, diff_results, by=by)
|
||||
|
||||
# organize by name
|
||||
table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in results}
|
||||
diff_table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in diff_results or []}
|
||||
names = list(table.keys() | diff_table.keys())
|
||||
|
||||
# sort again, now with diff info, note that python's sort is stable
|
||||
names.sort()
|
||||
if diff_results is not None:
|
||||
names.sort(key=lambda n: tuple(
|
||||
types[k].ratio(
|
||||
getattr(table.get(n), k, None),
|
||||
getattr(diff_table.get(n), k, None))
|
||||
for k in fields),
|
||||
reverse=True)
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
names.sort(
|
||||
key=lambda n: tuple(
|
||||
(getattr(table[n], k),)
|
||||
if getattr(table.get(n), k, None) is not None else ()
|
||||
for k in ([k] if k else [
|
||||
k for k in Result._sort if k in fields])),
|
||||
reverse=reverse ^ (not k or k in Result._fields))
|
||||
|
||||
|
||||
# build up our lines
|
||||
lines = []
|
||||
|
||||
# header
|
||||
header = []
|
||||
header.append('%s%s' % (
|
||||
','.join(by),
|
||||
' (%d added, %d removed)' % (
|
||||
sum(1 for n in table if n not in diff_table),
|
||||
sum(1 for n in diff_table if n not in table))
|
||||
if diff_results is not None and not percent else '')
|
||||
if not summary else '')
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
else:
|
||||
for k in fields:
|
||||
header.append('o'+k)
|
||||
for k in fields:
|
||||
header.append('n'+k)
|
||||
for k in fields:
|
||||
header.append('d'+k)
|
||||
header.append('')
|
||||
lines.append(header)
|
||||
|
||||
def table_entry(name, r, diff_r=None, ratios=[]):
|
||||
entry = []
|
||||
entry.append(name)
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].none)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
else:
|
||||
for k in fields:
|
||||
entry.append(getattr(diff_r, k).diff_table()
|
||||
if getattr(diff_r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(types[k].diff_diff(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None)))
|
||||
if diff_results is None:
|
||||
entry.append('')
|
||||
elif percent:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios))
|
||||
else:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios
|
||||
if t)
|
||||
if any(ratios) else '')
|
||||
return entry
|
||||
|
||||
# entries
|
||||
if not summary:
|
||||
for name in names:
|
||||
r = table.get(name)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = diff_table.get(name)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
if not all_ and not any(ratios):
|
||||
continue
|
||||
lines.append(table_entry(name, r, diff_r, ratios))
|
||||
|
||||
# total
|
||||
r = next(iter(fold(Result, results, by=[])), None)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = next(iter(fold(Result, diff_results, by=[])), None)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
lines.append(table_entry('TOTAL', r, diff_r, ratios))
|
||||
|
||||
# find the best widths, note that column 0 contains the names and column -1
|
||||
# the ratios, so those are handled a bit differently
|
||||
widths = [
|
||||
((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
|
||||
for w, i in zip(
|
||||
it.chain([23], it.repeat(7)),
|
||||
range(len(lines[0])-1))]
|
||||
|
||||
# print our table
|
||||
for line in lines:
|
||||
print('%-*s %s%s' % (
|
||||
widths[0], line[0],
|
||||
' '.join('%*s' % (w, x)
|
||||
for w, x in zip(widths[1:], line[1:-1])),
|
||||
line[-1]))
|
||||
|
||||
|
||||
def main(obj_paths, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
defines=None,
|
||||
sort=None,
|
||||
**args):
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
results = collect(obj_paths, **args)
|
||||
else:
|
||||
results = []
|
||||
with openio(args['use']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('code_'+k in r and r['code_'+k].strip()
|
||||
for k in CodeResult._fields):
|
||||
continue
|
||||
try:
|
||||
results.append(CodeResult(
|
||||
**{k: r[k] for k in CodeResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['code_'+k] for k in CodeResult._fields
|
||||
if 'code_'+k in r and r['code_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
results = fold(CodeResult, results, by=by, defines=defines)
|
||||
|
||||
# sort, note that python's sort is stable
|
||||
results.sort()
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
results.sort(
|
||||
key=lambda r: tuple(
|
||||
(getattr(r, k),) if getattr(r, k) is not None else ()
|
||||
for k in ([k] if k else CodeResult._sort)),
|
||||
reverse=reverse ^ (not k or k in CodeResult._fields))
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
with openio(args['output'], 'w') as f:
|
||||
writer = csv.DictWriter(f,
|
||||
(by if by is not None else CodeResult._by)
|
||||
+ ['code_'+k for k in (
|
||||
fields if fields is not None else CodeResult._fields)])
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
writer.writerow(
|
||||
{k: getattr(r, k) for k in (
|
||||
by if by is not None else CodeResult._by)}
|
||||
| {'code_'+k: getattr(r, k) for k in (
|
||||
fields if fields is not None else CodeResult._fields)})
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
diff_results = []
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('code_'+k in r and r['code_'+k].strip()
|
||||
for k in CodeResult._fields):
|
||||
continue
|
||||
try:
|
||||
diff_results.append(CodeResult(
|
||||
**{k: r[k] for k in CodeResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['code_'+k] for k in CodeResult._fields
|
||||
if 'code_'+k in r and r['code_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
diff_results = fold(CodeResult, diff_results, by=by, defines=defines)
|
||||
|
||||
# print table
|
||||
if not args.get('quiet'):
|
||||
table(CodeResult, results,
|
||||
diff_results if args.get('diff') else None,
|
||||
by=by if by is not None else ['function'],
|
||||
fields=fields,
|
||||
sort=sort,
|
||||
**args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find code size at the function level.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'obj_paths',
|
||||
nargs='*',
|
||||
help="Input *.o files.")
|
||||
parser.add_argument(
|
||||
'-v', '--verbose',
|
||||
action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument(
|
||||
'-q', '--quiet',
|
||||
action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument(
|
||||
'-u', '--use',
|
||||
help="Don't parse anything, use this CSV file.")
|
||||
parser.add_argument(
|
||||
'-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument(
|
||||
'-a', '--all',
|
||||
action='store_true',
|
||||
help="Show all, not just the ones that changed.")
|
||||
parser.add_argument(
|
||||
'-p', '--percent',
|
||||
action='store_true',
|
||||
help="Only show percentage change, not a full diff.")
|
||||
parser.add_argument(
|
||||
'-b', '--by',
|
||||
action='append',
|
||||
choices=CodeResult._by,
|
||||
help="Group by this field.")
|
||||
parser.add_argument(
|
||||
'-f', '--field',
|
||||
dest='fields',
|
||||
action='append',
|
||||
choices=CodeResult._fields,
|
||||
help="Show this field.")
|
||||
parser.add_argument(
|
||||
'-D', '--define',
|
||||
dest='defines',
|
||||
action='append',
|
||||
type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
|
||||
help="Only include results where this field is this value.")
|
||||
class AppendSort(argparse.Action):
|
||||
def __call__(self, parser, namespace, value, option):
|
||||
if namespace.sort is None:
|
||||
namespace.sort = []
|
||||
namespace.sort.append((value, True if option == '-S' else False))
|
||||
parser.add_argument(
|
||||
'-s', '--sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field.")
|
||||
parser.add_argument(
|
||||
'-S', '--reverse-sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field, but backwards.")
|
||||
parser.add_argument(
|
||||
'-Y', '--summary',
|
||||
action='store_true',
|
||||
help="Only show the total.")
|
||||
parser.add_argument(
|
||||
'-F', '--source',
|
||||
dest='sources',
|
||||
action='append',
|
||||
help="Only consider definitions in this file. Defaults to anything "
|
||||
"in the current directory.")
|
||||
parser.add_argument(
|
||||
'--everything',
|
||||
action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument(
|
||||
'--nm-types',
|
||||
default=NM_TYPES,
|
||||
help="Type of symbols to report, this uses the same single-character "
|
||||
"type-names emitted by nm. Defaults to %r." % NM_TYPES)
|
||||
parser.add_argument(
|
||||
'--nm-path',
|
||||
type=lambda x: x.split(),
|
||||
default=NM_PATH,
|
||||
help="Path to the nm executable, may include flags. "
|
||||
"Defaults to %r." % NM_PATH)
|
||||
parser.add_argument(
|
||||
'--objdump-path',
|
||||
type=lambda x: x.split(),
|
||||
default=OBJDUMP_PATH,
|
||||
help="Path to the objdump executable, may include flags. "
|
||||
"Defaults to %r." % OBJDUMP_PATH)
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
||||
828
components/joltwallet__littlefs/src/littlefs/scripts/cov.py
Normal file
828
components/joltwallet__littlefs/src/littlefs/scripts/cov.py
Normal file
|
|
@ -0,0 +1,828 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find coverage info after running tests.
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/cov.py \
|
||||
# lfs.t.a.gcda lfs_util.t.a.gcda \
|
||||
# -Flfs.c -Flfs_util.c -slines
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# Copyright (c) 2020, Arm Limited. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import csv
|
||||
import itertools as it
|
||||
import json
|
||||
import math as m
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import subprocess as sp
|
||||
|
||||
# TODO use explode_asserts to avoid counting assert branches?
|
||||
# TODO use dwarf=info to find functions for inline functions?
|
||||
|
||||
GCOV_PATH = ['gcov']
|
||||
|
||||
|
||||
# integer fields
|
||||
class Int(co.namedtuple('Int', 'x')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, x=0):
|
||||
if isinstance(x, Int):
|
||||
return x
|
||||
if isinstance(x, str):
|
||||
try:
|
||||
x = int(x, 0)
|
||||
except ValueError:
|
||||
# also accept +-∞ and +-inf
|
||||
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
|
||||
x = m.inf
|
||||
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
|
||||
x = -m.inf
|
||||
else:
|
||||
raise
|
||||
assert isinstance(x, int) or m.isinf(x), x
|
||||
return super().__new__(cls, x)
|
||||
|
||||
def __str__(self):
|
||||
if self.x == m.inf:
|
||||
return '∞'
|
||||
elif self.x == -m.inf:
|
||||
return '-∞'
|
||||
else:
|
||||
return str(self.x)
|
||||
|
||||
def __int__(self):
|
||||
assert not m.isinf(self.x)
|
||||
return self.x
|
||||
|
||||
def __float__(self):
|
||||
return float(self.x)
|
||||
|
||||
none = '%7s' % '-'
|
||||
def table(self):
|
||||
return '%7s' % (self,)
|
||||
|
||||
diff_none = '%7s' % '-'
|
||||
diff_table = table
|
||||
|
||||
def diff_diff(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
diff = new - old
|
||||
if diff == +m.inf:
|
||||
return '%7s' % '+∞'
|
||||
elif diff == -m.inf:
|
||||
return '%7s' % '-∞'
|
||||
else:
|
||||
return '%+7d' % diff
|
||||
|
||||
def ratio(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
if m.isinf(new) and m.isinf(old):
|
||||
return 0.0
|
||||
elif m.isinf(new):
|
||||
return +m.inf
|
||||
elif m.isinf(old):
|
||||
return -m.inf
|
||||
elif not old and not new:
|
||||
return 0.0
|
||||
elif not old:
|
||||
return 1.0
|
||||
else:
|
||||
return (new-old) / old
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.x + other.x)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(self.x - other.x)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.__class__(self.x * other.x)
|
||||
|
||||
# fractional fields, a/b
|
||||
class Frac(co.namedtuple('Frac', 'a,b')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, a=0, b=None):
|
||||
if isinstance(a, Frac) and b is None:
|
||||
return a
|
||||
if isinstance(a, str) and b is None:
|
||||
a, b = a.split('/', 1)
|
||||
if b is None:
|
||||
b = a
|
||||
return super().__new__(cls, Int(a), Int(b))
|
||||
|
||||
def __str__(self):
|
||||
return '%s/%s' % (self.a, self.b)
|
||||
|
||||
def __float__(self):
|
||||
return float(self.a)
|
||||
|
||||
none = '%11s %7s' % ('-', '-')
|
||||
def table(self):
|
||||
t = self.a.x/self.b.x if self.b.x else 1.0
|
||||
return '%11s %7s' % (
|
||||
self,
|
||||
'∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%.1f%%' % (100*t))
|
||||
|
||||
diff_none = '%11s' % '-'
|
||||
def diff_table(self):
|
||||
return '%11s' % (self,)
|
||||
|
||||
def diff_diff(self, other):
|
||||
new_a, new_b = self if self else (Int(0), Int(0))
|
||||
old_a, old_b = other if other else (Int(0), Int(0))
|
||||
return '%11s' % ('%s/%s' % (
|
||||
new_a.diff_diff(old_a).strip(),
|
||||
new_b.diff_diff(old_b).strip()))
|
||||
|
||||
def ratio(self, other):
|
||||
new_a, new_b = self if self else (Int(0), Int(0))
|
||||
old_a, old_b = other if other else (Int(0), Int(0))
|
||||
new = new_a.x/new_b.x if new_b.x else 1.0
|
||||
old = old_a.x/old_b.x if old_b.x else 1.0
|
||||
return new - old
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.a + other.a, self.b + other.b)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(self.a - other.a, self.b - other.b)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.__class__(self.a * other.a, self.b + other.b)
|
||||
|
||||
def __lt__(self, other):
|
||||
self_t = self.a.x/self.b.x if self.b.x else 1.0
|
||||
other_t = other.a.x/other.b.x if other.b.x else 1.0
|
||||
return (self_t, self.a.x) < (other_t, other.a.x)
|
||||
|
||||
def __gt__(self, other):
|
||||
return self.__class__.__lt__(other, self)
|
||||
|
||||
def __le__(self, other):
|
||||
return not self.__gt__(other)
|
||||
|
||||
def __ge__(self, other):
|
||||
return not self.__lt__(other)
|
||||
|
||||
# coverage results
|
||||
class CovResult(co.namedtuple('CovResult', [
|
||||
'file', 'function', 'line',
|
||||
'calls', 'hits', 'funcs', 'lines', 'branches'])):
|
||||
_by = ['file', 'function', 'line']
|
||||
_fields = ['calls', 'hits', 'funcs', 'lines', 'branches']
|
||||
_sort = ['funcs', 'lines', 'branches', 'hits', 'calls']
|
||||
_types = {
|
||||
'calls': Int, 'hits': Int,
|
||||
'funcs': Frac, 'lines': Frac, 'branches': Frac}
|
||||
|
||||
__slots__ = ()
|
||||
def __new__(cls, file='', function='', line=0,
|
||||
calls=0, hits=0, funcs=0, lines=0, branches=0):
|
||||
return super().__new__(cls, file, function, int(Int(line)),
|
||||
Int(calls), Int(hits), Frac(funcs), Frac(lines), Frac(branches))
|
||||
|
||||
def __add__(self, other):
|
||||
return CovResult(self.file, self.function, self.line,
|
||||
max(self.calls, other.calls),
|
||||
max(self.hits, other.hits),
|
||||
self.funcs + other.funcs,
|
||||
self.lines + other.lines,
|
||||
self.branches + other.branches)
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def collect(gcda_paths, *,
|
||||
gcov_path=GCOV_PATH,
|
||||
sources=None,
|
||||
everything=False,
|
||||
**args):
|
||||
results = []
|
||||
for path in gcda_paths:
|
||||
# get coverage info through gcov's json output
|
||||
# note, gcov-path may contain extra args
|
||||
cmd = GCOV_PATH + ['-b', '-t', '--json-format', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
data = json.load(proc.stdout)
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
# collect line/branch coverage
|
||||
for file in data['files']:
|
||||
# ignore filtered sources
|
||||
if sources is not None:
|
||||
if not any(
|
||||
os.path.abspath(file['file']) == os.path.abspath(s)
|
||||
for s in sources):
|
||||
continue
|
||||
else:
|
||||
# default to only cwd
|
||||
if not everything and not os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(file['file'])]) == os.getcwd():
|
||||
continue
|
||||
|
||||
# simplify path
|
||||
if os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(file['file'])]) == os.getcwd():
|
||||
file_name = os.path.relpath(file['file'])
|
||||
else:
|
||||
file_name = os.path.abspath(file['file'])
|
||||
|
||||
for func in file['functions']:
|
||||
func_name = func.get('name', '(inlined)')
|
||||
# discard internal functions (this includes injected test cases)
|
||||
if not everything:
|
||||
if func_name.startswith('__'):
|
||||
continue
|
||||
|
||||
# go ahead and add functions, later folding will merge this if
|
||||
# there are other hits on this line
|
||||
results.append(CovResult(
|
||||
file_name, func_name, func['start_line'],
|
||||
func['execution_count'], 0,
|
||||
Frac(1 if func['execution_count'] > 0 else 0, 1),
|
||||
0,
|
||||
0))
|
||||
|
||||
for line in file['lines']:
|
||||
func_name = line.get('function_name', '(inlined)')
|
||||
# discard internal function (this includes injected test cases)
|
||||
if not everything:
|
||||
if func_name.startswith('__'):
|
||||
continue
|
||||
|
||||
# go ahead and add lines, later folding will merge this if
|
||||
# there are other hits on this line
|
||||
results.append(CovResult(
|
||||
file_name, func_name, line['line_number'],
|
||||
0, line['count'],
|
||||
0,
|
||||
Frac(1 if line['count'] > 0 else 0, 1),
|
||||
Frac(
|
||||
sum(1 if branch['count'] > 0 else 0
|
||||
for branch in line['branches']),
|
||||
len(line['branches']))))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def fold(Result, results, *,
|
||||
by=None,
|
||||
defines=None,
|
||||
**_):
|
||||
if by is None:
|
||||
by = Result._by
|
||||
|
||||
for k in it.chain(by or [], (k for k, _ in defines or [])):
|
||||
if k not in Result._by and k not in Result._fields:
|
||||
print("error: could not find field %r?" % k)
|
||||
sys.exit(-1)
|
||||
|
||||
# filter by matching defines
|
||||
if defines is not None:
|
||||
results_ = []
|
||||
for r in results:
|
||||
if all(getattr(r, k) in vs for k, vs in defines):
|
||||
results_.append(r)
|
||||
results = results_
|
||||
|
||||
# organize results into conflicts
|
||||
folding = co.OrderedDict()
|
||||
for r in results:
|
||||
name = tuple(getattr(r, k) for k in by)
|
||||
if name not in folding:
|
||||
folding[name] = []
|
||||
folding[name].append(r)
|
||||
|
||||
# merge conflicts
|
||||
folded = []
|
||||
for name, rs in folding.items():
|
||||
folded.append(sum(rs[1:], start=rs[0]))
|
||||
|
||||
return folded
|
||||
|
||||
def table(Result, results, diff_results=None, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
sort=None,
|
||||
summary=False,
|
||||
all=False,
|
||||
percent=False,
|
||||
**_):
|
||||
all_, all = all, __builtins__.all
|
||||
|
||||
if by is None:
|
||||
by = Result._by
|
||||
if fields is None:
|
||||
fields = Result._fields
|
||||
types = Result._types
|
||||
|
||||
# fold again
|
||||
results = fold(Result, results, by=by)
|
||||
if diff_results is not None:
|
||||
diff_results = fold(Result, diff_results, by=by)
|
||||
|
||||
# organize by name
|
||||
table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in results}
|
||||
diff_table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in diff_results or []}
|
||||
names = list(table.keys() | diff_table.keys())
|
||||
|
||||
# sort again, now with diff info, note that python's sort is stable
|
||||
names.sort()
|
||||
if diff_results is not None:
|
||||
names.sort(key=lambda n: tuple(
|
||||
types[k].ratio(
|
||||
getattr(table.get(n), k, None),
|
||||
getattr(diff_table.get(n), k, None))
|
||||
for k in fields),
|
||||
reverse=True)
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
names.sort(
|
||||
key=lambda n: tuple(
|
||||
(getattr(table[n], k),)
|
||||
if getattr(table.get(n), k, None) is not None else ()
|
||||
for k in ([k] if k else [
|
||||
k for k in Result._sort if k in fields])),
|
||||
reverse=reverse ^ (not k or k in Result._fields))
|
||||
|
||||
|
||||
# build up our lines
|
||||
lines = []
|
||||
|
||||
# header
|
||||
header = []
|
||||
header.append('%s%s' % (
|
||||
','.join(by),
|
||||
' (%d added, %d removed)' % (
|
||||
sum(1 for n in table if n not in diff_table),
|
||||
sum(1 for n in diff_table if n not in table))
|
||||
if diff_results is not None and not percent else '')
|
||||
if not summary else '')
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
else:
|
||||
for k in fields:
|
||||
header.append('o'+k)
|
||||
for k in fields:
|
||||
header.append('n'+k)
|
||||
for k in fields:
|
||||
header.append('d'+k)
|
||||
header.append('')
|
||||
lines.append(header)
|
||||
|
||||
def table_entry(name, r, diff_r=None, ratios=[]):
|
||||
entry = []
|
||||
entry.append(name)
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].none)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
else:
|
||||
for k in fields:
|
||||
entry.append(getattr(diff_r, k).diff_table()
|
||||
if getattr(diff_r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(types[k].diff_diff(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None)))
|
||||
if diff_results is None:
|
||||
entry.append('')
|
||||
elif percent:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios))
|
||||
else:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios
|
||||
if t)
|
||||
if any(ratios) else '')
|
||||
return entry
|
||||
|
||||
# entries
|
||||
if not summary:
|
||||
for name in names:
|
||||
r = table.get(name)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = diff_table.get(name)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
if not all_ and not any(ratios):
|
||||
continue
|
||||
lines.append(table_entry(name, r, diff_r, ratios))
|
||||
|
||||
# total
|
||||
r = next(iter(fold(Result, results, by=[])), None)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = next(iter(fold(Result, diff_results, by=[])), None)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
lines.append(table_entry('TOTAL', r, diff_r, ratios))
|
||||
|
||||
# find the best widths, note that column 0 contains the names and column -1
|
||||
# the ratios, so those are handled a bit differently
|
||||
widths = [
|
||||
((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
|
||||
for w, i in zip(
|
||||
it.chain([23], it.repeat(7)),
|
||||
range(len(lines[0])-1))]
|
||||
|
||||
# print our table
|
||||
for line in lines:
|
||||
print('%-*s %s%s' % (
|
||||
widths[0], line[0],
|
||||
' '.join('%*s' % (w, x)
|
||||
for w, x in zip(widths[1:], line[1:-1])),
|
||||
line[-1]))
|
||||
|
||||
|
||||
def annotate(Result, results, *,
|
||||
annotate=False,
|
||||
lines=False,
|
||||
branches=False,
|
||||
**args):
|
||||
# if neither branches/lines specified, color both
|
||||
if annotate and not lines and not branches:
|
||||
lines, branches = True, True
|
||||
|
||||
for path in co.OrderedDict.fromkeys(r.file for r in results).keys():
|
||||
# flatten to line info
|
||||
results = fold(Result, results, by=['file', 'line'])
|
||||
table = {r.line: r for r in results if r.file == path}
|
||||
|
||||
# calculate spans to show
|
||||
if not annotate:
|
||||
spans = []
|
||||
last = None
|
||||
func = None
|
||||
for line, r in sorted(table.items()):
|
||||
if ((lines and int(r.hits) == 0)
|
||||
or (branches and r.branches.a < r.branches.b)):
|
||||
if last is not None and line - last.stop <= args['context']:
|
||||
last = range(
|
||||
last.start,
|
||||
line+1+args['context'])
|
||||
else:
|
||||
if last is not None:
|
||||
spans.append((last, func))
|
||||
last = range(
|
||||
line-args['context'],
|
||||
line+1+args['context'])
|
||||
func = r.function
|
||||
if last is not None:
|
||||
spans.append((last, func))
|
||||
|
||||
with open(path) as f:
|
||||
skipped = False
|
||||
for i, line in enumerate(f):
|
||||
# skip lines not in spans?
|
||||
if not annotate and not any(i+1 in s for s, _ in spans):
|
||||
skipped = True
|
||||
continue
|
||||
|
||||
if skipped:
|
||||
skipped = False
|
||||
print('%s@@ %s:%d: %s @@%s' % (
|
||||
'\x1b[36m' if args['color'] else '',
|
||||
path,
|
||||
i+1,
|
||||
next(iter(f for _, f in spans)),
|
||||
'\x1b[m' if args['color'] else ''))
|
||||
|
||||
# build line
|
||||
if line.endswith('\n'):
|
||||
line = line[:-1]
|
||||
|
||||
if i+1 in table:
|
||||
r = table[i+1]
|
||||
line = '%-*s // %s hits%s' % (
|
||||
args['width'],
|
||||
line,
|
||||
r.hits,
|
||||
', %s branches' % (r.branches,)
|
||||
if int(r.branches.b) else '')
|
||||
|
||||
if args['color']:
|
||||
if lines and int(r.hits) == 0:
|
||||
line = '\x1b[1;31m%s\x1b[m' % line
|
||||
elif branches and r.branches.a < r.branches.b:
|
||||
line = '\x1b[35m%s\x1b[m' % line
|
||||
|
||||
print(line)
|
||||
|
||||
|
||||
def main(gcda_paths, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
defines=None,
|
||||
sort=None,
|
||||
hits=False,
|
||||
**args):
|
||||
# figure out what color should be
|
||||
if args.get('color') == 'auto':
|
||||
args['color'] = sys.stdout.isatty()
|
||||
elif args.get('color') == 'always':
|
||||
args['color'] = True
|
||||
else:
|
||||
args['color'] = False
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
results = collect(gcda_paths, **args)
|
||||
else:
|
||||
results = []
|
||||
with openio(args['use']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('cov_'+k in r and r['cov_'+k].strip()
|
||||
for k in CovResult._fields):
|
||||
continue
|
||||
try:
|
||||
results.append(CovResult(
|
||||
**{k: r[k] for k in CovResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['cov_'+k]
|
||||
for k in CovResult._fields
|
||||
if 'cov_'+k in r
|
||||
and r['cov_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
results = fold(CovResult, results, by=by, defines=defines)
|
||||
|
||||
# sort, note that python's sort is stable
|
||||
results.sort()
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
results.sort(
|
||||
key=lambda r: tuple(
|
||||
(getattr(r, k),) if getattr(r, k) is not None else ()
|
||||
for k in ([k] if k else CovResult._sort)),
|
||||
reverse=reverse ^ (not k or k in CovResult._fields))
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
with openio(args['output'], 'w') as f:
|
||||
writer = csv.DictWriter(f,
|
||||
(by if by is not None else CovResult._by)
|
||||
+ ['cov_'+k for k in (
|
||||
fields if fields is not None else CovResult._fields)])
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
writer.writerow(
|
||||
{k: getattr(r, k) for k in (
|
||||
by if by is not None else CovResult._by)}
|
||||
| {'cov_'+k: getattr(r, k) for k in (
|
||||
fields if fields is not None else CovResult._fields)})
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
diff_results = []
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('cov_'+k in r and r['cov_'+k].strip()
|
||||
for k in CovResult._fields):
|
||||
continue
|
||||
try:
|
||||
diff_results.append(CovResult(
|
||||
**{k: r[k] for k in CovResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['cov_'+k]
|
||||
for k in CovResult._fields
|
||||
if 'cov_'+k in r
|
||||
and r['cov_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
diff_results = fold(CovResult, diff_results,
|
||||
by=by, defines=defines)
|
||||
|
||||
# print table
|
||||
if not args.get('quiet'):
|
||||
if (args.get('annotate')
|
||||
or args.get('lines')
|
||||
or args.get('branches')):
|
||||
# annotate sources
|
||||
annotate(CovResult, results, **args)
|
||||
else:
|
||||
# print table
|
||||
table(CovResult, results,
|
||||
diff_results if args.get('diff') else None,
|
||||
by=by if by is not None else ['function'],
|
||||
fields=fields if fields is not None
|
||||
else ['lines', 'branches'] if not hits
|
||||
else ['calls', 'hits'],
|
||||
sort=sort,
|
||||
**args)
|
||||
|
||||
# catch lack of coverage
|
||||
if args.get('error_on_lines') and any(
|
||||
r.lines.a < r.lines.b for r in results):
|
||||
sys.exit(2)
|
||||
elif args.get('error_on_branches') and any(
|
||||
r.branches.a < r.branches.b for r in results):
|
||||
sys.exit(3)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find coverage info after running tests.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'gcda_paths',
|
||||
nargs='*',
|
||||
help="Input *.gcda files.")
|
||||
parser.add_argument(
|
||||
'-v', '--verbose',
|
||||
action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument(
|
||||
'-q', '--quiet',
|
||||
action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument(
|
||||
'-u', '--use',
|
||||
help="Don't parse anything, use this CSV file.")
|
||||
parser.add_argument(
|
||||
'-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument(
|
||||
'-a', '--all',
|
||||
action='store_true',
|
||||
help="Show all, not just the ones that changed.")
|
||||
parser.add_argument(
|
||||
'-p', '--percent',
|
||||
action='store_true',
|
||||
help="Only show percentage change, not a full diff.")
|
||||
parser.add_argument(
|
||||
'-b', '--by',
|
||||
action='append',
|
||||
choices=CovResult._by,
|
||||
help="Group by this field.")
|
||||
parser.add_argument(
|
||||
'-f', '--field',
|
||||
dest='fields',
|
||||
action='append',
|
||||
choices=CovResult._fields,
|
||||
help="Show this field.")
|
||||
parser.add_argument(
|
||||
'-D', '--define',
|
||||
dest='defines',
|
||||
action='append',
|
||||
type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
|
||||
help="Only include results where this field is this value.")
|
||||
class AppendSort(argparse.Action):
|
||||
def __call__(self, parser, namespace, value, option):
|
||||
if namespace.sort is None:
|
||||
namespace.sort = []
|
||||
namespace.sort.append((value, True if option == '-S' else False))
|
||||
parser.add_argument(
|
||||
'-s', '--sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field.")
|
||||
parser.add_argument(
|
||||
'-S', '--reverse-sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field, but backwards.")
|
||||
parser.add_argument(
|
||||
'-Y', '--summary',
|
||||
action='store_true',
|
||||
help="Only show the total.")
|
||||
parser.add_argument(
|
||||
'-F', '--source',
|
||||
dest='sources',
|
||||
action='append',
|
||||
help="Only consider definitions in this file. Defaults to anything "
|
||||
"in the current directory.")
|
||||
parser.add_argument(
|
||||
'--everything',
|
||||
action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument(
|
||||
'--hits',
|
||||
action='store_true',
|
||||
help="Show total hits instead of coverage.")
|
||||
parser.add_argument(
|
||||
'-A', '--annotate',
|
||||
action='store_true',
|
||||
help="Show source files annotated with coverage info.")
|
||||
parser.add_argument(
|
||||
'-L', '--lines',
|
||||
action='store_true',
|
||||
help="Show uncovered lines.")
|
||||
parser.add_argument(
|
||||
'-B', '--branches',
|
||||
action='store_true',
|
||||
help="Show uncovered branches.")
|
||||
parser.add_argument(
|
||||
'-c', '--context',
|
||||
type=lambda x: int(x, 0),
|
||||
default=3,
|
||||
help="Show n additional lines of context. Defaults to 3.")
|
||||
parser.add_argument(
|
||||
'-W', '--width',
|
||||
type=lambda x: int(x, 0),
|
||||
default=80,
|
||||
help="Assume source is styled with this many columns. Defaults to 80.")
|
||||
parser.add_argument(
|
||||
'--color',
|
||||
choices=['never', 'always', 'auto'],
|
||||
default='auto',
|
||||
help="When to use terminal colors. Defaults to 'auto'.")
|
||||
parser.add_argument(
|
||||
'-e', '--error-on-lines',
|
||||
action='store_true',
|
||||
help="Error if any lines are not covered.")
|
||||
parser.add_argument(
|
||||
'-E', '--error-on-branches',
|
||||
action='store_true',
|
||||
help="Error if any branches are not covered.")
|
||||
parser.add_argument(
|
||||
'--gcov-path',
|
||||
default=GCOV_PATH,
|
||||
type=lambda x: x.split(),
|
||||
help="Path to the gcov executable, may include paths. "
|
||||
"Defaults to %r." % GCOV_PATH)
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
||||
704
components/joltwallet__littlefs/src/littlefs/scripts/data.py
Normal file
704
components/joltwallet__littlefs/src/littlefs/scripts/data.py
Normal file
|
|
@ -0,0 +1,704 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find data size at the function level. Basically just a big wrapper
|
||||
# around nm with some extra conveniences for comparing builds. Heavily inspired
|
||||
# by Linux's Bloat-O-Meter.
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/data.py lfs.o lfs_util.o -Ssize
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# Copyright (c) 2020, Arm Limited. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import csv
|
||||
import difflib
|
||||
import itertools as it
|
||||
import math as m
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import subprocess as sp
|
||||
|
||||
|
||||
NM_PATH = ['nm']
|
||||
NM_TYPES = 'dDbB'
|
||||
OBJDUMP_PATH = ['objdump']
|
||||
|
||||
|
||||
# integer fields
|
||||
class Int(co.namedtuple('Int', 'x')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, x=0):
|
||||
if isinstance(x, Int):
|
||||
return x
|
||||
if isinstance(x, str):
|
||||
try:
|
||||
x = int(x, 0)
|
||||
except ValueError:
|
||||
# also accept +-∞ and +-inf
|
||||
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
|
||||
x = m.inf
|
||||
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
|
||||
x = -m.inf
|
||||
else:
|
||||
raise
|
||||
assert isinstance(x, int) or m.isinf(x), x
|
||||
return super().__new__(cls, x)
|
||||
|
||||
def __str__(self):
|
||||
if self.x == m.inf:
|
||||
return '∞'
|
||||
elif self.x == -m.inf:
|
||||
return '-∞'
|
||||
else:
|
||||
return str(self.x)
|
||||
|
||||
def __int__(self):
|
||||
assert not m.isinf(self.x)
|
||||
return self.x
|
||||
|
||||
def __float__(self):
|
||||
return float(self.x)
|
||||
|
||||
none = '%7s' % '-'
|
||||
def table(self):
|
||||
return '%7s' % (self,)
|
||||
|
||||
diff_none = '%7s' % '-'
|
||||
diff_table = table
|
||||
|
||||
def diff_diff(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
diff = new - old
|
||||
if diff == +m.inf:
|
||||
return '%7s' % '+∞'
|
||||
elif diff == -m.inf:
|
||||
return '%7s' % '-∞'
|
||||
else:
|
||||
return '%+7d' % diff
|
||||
|
||||
def ratio(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
if m.isinf(new) and m.isinf(old):
|
||||
return 0.0
|
||||
elif m.isinf(new):
|
||||
return +m.inf
|
||||
elif m.isinf(old):
|
||||
return -m.inf
|
||||
elif not old and not new:
|
||||
return 0.0
|
||||
elif not old:
|
||||
return 1.0
|
||||
else:
|
||||
return (new-old) / old
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.x + other.x)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(self.x - other.x)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.__class__(self.x * other.x)
|
||||
|
||||
# data size results
|
||||
class DataResult(co.namedtuple('DataResult', [
|
||||
'file', 'function',
|
||||
'size'])):
|
||||
_by = ['file', 'function']
|
||||
_fields = ['size']
|
||||
_sort = ['size']
|
||||
_types = {'size': Int}
|
||||
|
||||
__slots__ = ()
|
||||
def __new__(cls, file='', function='', size=0):
|
||||
return super().__new__(cls, file, function,
|
||||
Int(size))
|
||||
|
||||
def __add__(self, other):
|
||||
return DataResult(self.file, self.function,
|
||||
self.size + other.size)
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def collect(obj_paths, *,
|
||||
nm_path=NM_PATH,
|
||||
nm_types=NM_TYPES,
|
||||
objdump_path=OBJDUMP_PATH,
|
||||
sources=None,
|
||||
everything=False,
|
||||
**args):
|
||||
size_pattern = re.compile(
|
||||
'^(?P<size>[0-9a-fA-F]+)' +
|
||||
' (?P<type>[%s])' % re.escape(nm_types) +
|
||||
' (?P<func>.+?)$')
|
||||
line_pattern = re.compile(
|
||||
'^\s+(?P<no>[0-9]+)'
|
||||
'(?:\s+(?P<dir>[0-9]+))?'
|
||||
'\s+.*'
|
||||
'\s+(?P<path>[^\s]+)$')
|
||||
info_pattern = re.compile(
|
||||
'^(?:.*(?P<tag>DW_TAG_[a-z_]+).*'
|
||||
'|.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
|
||||
'|.*DW_AT_decl_file.*:\s*(?P<file>[0-9]+)\s*)$')
|
||||
|
||||
results = []
|
||||
for path in obj_paths:
|
||||
# guess the source, if we have debug-info we'll replace this later
|
||||
file = re.sub('(\.o)?$', '.c', path, 1)
|
||||
|
||||
# find symbol sizes
|
||||
results_ = []
|
||||
# note nm-path may contain extra args
|
||||
cmd = nm_path + ['--size-sort', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
for line in proc.stdout:
|
||||
m = size_pattern.match(line)
|
||||
if m:
|
||||
func = m.group('func')
|
||||
# discard internal functions
|
||||
if not everything and func.startswith('__'):
|
||||
continue
|
||||
results_.append(DataResult(
|
||||
file, func,
|
||||
int(m.group('size'), 16)))
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
|
||||
# try to figure out the source file if we have debug-info
|
||||
dirs = {}
|
||||
files = {}
|
||||
# note objdump-path may contain extra args
|
||||
cmd = objdump_path + ['--dwarf=rawline', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
for line in proc.stdout:
|
||||
# note that files contain references to dirs, which we
|
||||
# dereference as soon as we see them as each file table follows a
|
||||
# dir table
|
||||
m = line_pattern.match(line)
|
||||
if m:
|
||||
if not m.group('dir'):
|
||||
# found a directory entry
|
||||
dirs[int(m.group('no'))] = m.group('path')
|
||||
else:
|
||||
# found a file entry
|
||||
dir = int(m.group('dir'))
|
||||
if dir in dirs:
|
||||
files[int(m.group('no'))] = os.path.join(
|
||||
dirs[dir],
|
||||
m.group('path'))
|
||||
else:
|
||||
files[int(m.group('no'))] = m.group('path')
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
# do nothing on error, we don't need objdump to work, source files
|
||||
# may just be inaccurate
|
||||
pass
|
||||
|
||||
defs = {}
|
||||
is_func = False
|
||||
f_name = None
|
||||
f_file = None
|
||||
# note objdump-path may contain extra args
|
||||
cmd = objdump_path + ['--dwarf=info', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
for line in proc.stdout:
|
||||
# state machine here to find definitions
|
||||
m = info_pattern.match(line)
|
||||
if m:
|
||||
if m.group('tag'):
|
||||
if is_func:
|
||||
defs[f_name] = files.get(f_file, '?')
|
||||
is_func = (m.group('tag') == 'DW_TAG_subprogram')
|
||||
elif m.group('name'):
|
||||
f_name = m.group('name')
|
||||
elif m.group('file'):
|
||||
f_file = int(m.group('file'))
|
||||
if is_func:
|
||||
defs[f_name] = files.get(f_file, '?')
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
# do nothing on error, we don't need objdump to work, source files
|
||||
# may just be inaccurate
|
||||
pass
|
||||
|
||||
for r in results_:
|
||||
# find best matching debug symbol, this may be slightly different
|
||||
# due to optimizations
|
||||
if defs:
|
||||
# exact match? avoid difflib if we can for speed
|
||||
if r.function in defs:
|
||||
file = defs[r.function]
|
||||
else:
|
||||
_, file = max(
|
||||
defs.items(),
|
||||
key=lambda d: difflib.SequenceMatcher(None,
|
||||
d[0],
|
||||
r.function, False).ratio())
|
||||
else:
|
||||
file = r.file
|
||||
|
||||
# ignore filtered sources
|
||||
if sources is not None:
|
||||
if not any(
|
||||
os.path.abspath(file) == os.path.abspath(s)
|
||||
for s in sources):
|
||||
continue
|
||||
else:
|
||||
# default to only cwd
|
||||
if not everything and not os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(file)]) == os.getcwd():
|
||||
continue
|
||||
|
||||
# simplify path
|
||||
if os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(file)]) == os.getcwd():
|
||||
file = os.path.relpath(file)
|
||||
else:
|
||||
file = os.path.abspath(file)
|
||||
|
||||
results.append(r._replace(file=file))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def fold(Result, results, *,
|
||||
by=None,
|
||||
defines=None,
|
||||
**_):
|
||||
if by is None:
|
||||
by = Result._by
|
||||
|
||||
for k in it.chain(by or [], (k for k, _ in defines or [])):
|
||||
if k not in Result._by and k not in Result._fields:
|
||||
print("error: could not find field %r?" % k)
|
||||
sys.exit(-1)
|
||||
|
||||
# filter by matching defines
|
||||
if defines is not None:
|
||||
results_ = []
|
||||
for r in results:
|
||||
if all(getattr(r, k) in vs for k, vs in defines):
|
||||
results_.append(r)
|
||||
results = results_
|
||||
|
||||
# organize results into conflicts
|
||||
folding = co.OrderedDict()
|
||||
for r in results:
|
||||
name = tuple(getattr(r, k) for k in by)
|
||||
if name not in folding:
|
||||
folding[name] = []
|
||||
folding[name].append(r)
|
||||
|
||||
# merge conflicts
|
||||
folded = []
|
||||
for name, rs in folding.items():
|
||||
folded.append(sum(rs[1:], start=rs[0]))
|
||||
|
||||
return folded
|
||||
|
||||
def table(Result, results, diff_results=None, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
sort=None,
|
||||
summary=False,
|
||||
all=False,
|
||||
percent=False,
|
||||
**_):
|
||||
all_, all = all, __builtins__.all
|
||||
|
||||
if by is None:
|
||||
by = Result._by
|
||||
if fields is None:
|
||||
fields = Result._fields
|
||||
types = Result._types
|
||||
|
||||
# fold again
|
||||
results = fold(Result, results, by=by)
|
||||
if diff_results is not None:
|
||||
diff_results = fold(Result, diff_results, by=by)
|
||||
|
||||
# organize by name
|
||||
table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in results}
|
||||
diff_table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in diff_results or []}
|
||||
names = list(table.keys() | diff_table.keys())
|
||||
|
||||
# sort again, now with diff info, note that python's sort is stable
|
||||
names.sort()
|
||||
if diff_results is not None:
|
||||
names.sort(key=lambda n: tuple(
|
||||
types[k].ratio(
|
||||
getattr(table.get(n), k, None),
|
||||
getattr(diff_table.get(n), k, None))
|
||||
for k in fields),
|
||||
reverse=True)
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
names.sort(
|
||||
key=lambda n: tuple(
|
||||
(getattr(table[n], k),)
|
||||
if getattr(table.get(n), k, None) is not None else ()
|
||||
for k in ([k] if k else [
|
||||
k for k in Result._sort if k in fields])),
|
||||
reverse=reverse ^ (not k or k in Result._fields))
|
||||
|
||||
|
||||
# build up our lines
|
||||
lines = []
|
||||
|
||||
# header
|
||||
header = []
|
||||
header.append('%s%s' % (
|
||||
','.join(by),
|
||||
' (%d added, %d removed)' % (
|
||||
sum(1 for n in table if n not in diff_table),
|
||||
sum(1 for n in diff_table if n not in table))
|
||||
if diff_results is not None and not percent else '')
|
||||
if not summary else '')
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
else:
|
||||
for k in fields:
|
||||
header.append('o'+k)
|
||||
for k in fields:
|
||||
header.append('n'+k)
|
||||
for k in fields:
|
||||
header.append('d'+k)
|
||||
header.append('')
|
||||
lines.append(header)
|
||||
|
||||
def table_entry(name, r, diff_r=None, ratios=[]):
|
||||
entry = []
|
||||
entry.append(name)
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].none)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
else:
|
||||
for k in fields:
|
||||
entry.append(getattr(diff_r, k).diff_table()
|
||||
if getattr(diff_r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(types[k].diff_diff(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None)))
|
||||
if diff_results is None:
|
||||
entry.append('')
|
||||
elif percent:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios))
|
||||
else:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios
|
||||
if t)
|
||||
if any(ratios) else '')
|
||||
return entry
|
||||
|
||||
# entries
|
||||
if not summary:
|
||||
for name in names:
|
||||
r = table.get(name)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = diff_table.get(name)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
if not all_ and not any(ratios):
|
||||
continue
|
||||
lines.append(table_entry(name, r, diff_r, ratios))
|
||||
|
||||
# total
|
||||
r = next(iter(fold(Result, results, by=[])), None)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = next(iter(fold(Result, diff_results, by=[])), None)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
lines.append(table_entry('TOTAL', r, diff_r, ratios))
|
||||
|
||||
# find the best widths, note that column 0 contains the names and column -1
|
||||
# the ratios, so those are handled a bit differently
|
||||
widths = [
|
||||
((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
|
||||
for w, i in zip(
|
||||
it.chain([23], it.repeat(7)),
|
||||
range(len(lines[0])-1))]
|
||||
|
||||
# print our table
|
||||
for line in lines:
|
||||
print('%-*s %s%s' % (
|
||||
widths[0], line[0],
|
||||
' '.join('%*s' % (w, x)
|
||||
for w, x in zip(widths[1:], line[1:-1])),
|
||||
line[-1]))
|
||||
|
||||
|
||||
def main(obj_paths, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
defines=None,
|
||||
sort=None,
|
||||
**args):
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
results = collect(obj_paths, **args)
|
||||
else:
|
||||
results = []
|
||||
with openio(args['use']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
try:
|
||||
results.append(DataResult(
|
||||
**{k: r[k] for k in DataResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['data_'+k] for k in DataResult._fields
|
||||
if 'data_'+k in r and r['data_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
results = fold(DataResult, results, by=by, defines=defines)
|
||||
|
||||
# sort, note that python's sort is stable
|
||||
results.sort()
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
results.sort(
|
||||
key=lambda r: tuple(
|
||||
(getattr(r, k),) if getattr(r, k) is not None else ()
|
||||
for k in ([k] if k else DataResult._sort)),
|
||||
reverse=reverse ^ (not k or k in DataResult._fields))
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
with openio(args['output'], 'w') as f:
|
||||
writer = csv.DictWriter(f,
|
||||
(by if by is not None else DataResult._by)
|
||||
+ ['data_'+k for k in (
|
||||
fields if fields is not None else DataResult._fields)])
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
writer.writerow(
|
||||
{k: getattr(r, k) for k in (
|
||||
by if by is not None else DataResult._by)}
|
||||
| {'data_'+k: getattr(r, k) for k in (
|
||||
fields if fields is not None else DataResult._fields)})
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
diff_results = []
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('data_'+k in r and r['data_'+k].strip()
|
||||
for k in DataResult._fields):
|
||||
continue
|
||||
try:
|
||||
diff_results.append(DataResult(
|
||||
**{k: r[k] for k in DataResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['data_'+k] for k in DataResult._fields
|
||||
if 'data_'+k in r and r['data_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
diff_results = fold(DataResult, diff_results, by=by, defines=defines)
|
||||
|
||||
# print table
|
||||
if not args.get('quiet'):
|
||||
table(DataResult, results,
|
||||
diff_results if args.get('diff') else None,
|
||||
by=by if by is not None else ['function'],
|
||||
fields=fields,
|
||||
sort=sort,
|
||||
**args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find data size at the function level.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'obj_paths',
|
||||
nargs='*',
|
||||
help="Input *.o files.")
|
||||
parser.add_argument(
|
||||
'-v', '--verbose',
|
||||
action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument(
|
||||
'-q', '--quiet',
|
||||
action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument(
|
||||
'-u', '--use',
|
||||
help="Don't parse anything, use this CSV file.")
|
||||
parser.add_argument(
|
||||
'-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument(
|
||||
'-a', '--all',
|
||||
action='store_true',
|
||||
help="Show all, not just the ones that changed.")
|
||||
parser.add_argument(
|
||||
'-p', '--percent',
|
||||
action='store_true',
|
||||
help="Only show percentage change, not a full diff.")
|
||||
parser.add_argument(
|
||||
'-b', '--by',
|
||||
action='append',
|
||||
choices=DataResult._by,
|
||||
help="Group by this field.")
|
||||
parser.add_argument(
|
||||
'-f', '--field',
|
||||
dest='fields',
|
||||
action='append',
|
||||
choices=DataResult._fields,
|
||||
help="Show this field.")
|
||||
parser.add_argument(
|
||||
'-D', '--define',
|
||||
dest='defines',
|
||||
action='append',
|
||||
type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
|
||||
help="Only include results where this field is this value.")
|
||||
class AppendSort(argparse.Action):
|
||||
def __call__(self, parser, namespace, value, option):
|
||||
if namespace.sort is None:
|
||||
namespace.sort = []
|
||||
namespace.sort.append((value, True if option == '-S' else False))
|
||||
parser.add_argument(
|
||||
'-s', '--sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field.")
|
||||
parser.add_argument(
|
||||
'-S', '--reverse-sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field, but backwards.")
|
||||
parser.add_argument(
|
||||
'-Y', '--summary',
|
||||
action='store_true',
|
||||
help="Only show the total.")
|
||||
parser.add_argument(
|
||||
'-F', '--source',
|
||||
dest='sources',
|
||||
action='append',
|
||||
help="Only consider definitions in this file. Defaults to anything "
|
||||
"in the current directory.")
|
||||
parser.add_argument(
|
||||
'--everything',
|
||||
action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument(
|
||||
'--nm-types',
|
||||
default=NM_TYPES,
|
||||
help="Type of symbols to report, this uses the same single-character "
|
||||
"type-names emitted by nm. Defaults to %r." % NM_TYPES)
|
||||
parser.add_argument(
|
||||
'--nm-path',
|
||||
type=lambda x: x.split(),
|
||||
default=NM_PATH,
|
||||
help="Path to the nm executable, may include flags. "
|
||||
"Defaults to %r." % NM_PATH)
|
||||
parser.add_argument(
|
||||
'--objdump-path',
|
||||
type=lambda x: x.split(),
|
||||
default=OBJDUMP_PATH,
|
||||
help="Path to the objdump executable, may include flags. "
|
||||
"Defaults to %r." % OBJDUMP_PATH)
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
||||
1344
components/joltwallet__littlefs/src/littlefs/scripts/perf.py
Normal file
1344
components/joltwallet__littlefs/src/littlefs/scripts/perf.py
Normal file
File diff suppressed because it is too large
Load diff
1276
components/joltwallet__littlefs/src/littlefs/scripts/perfbd.py
Normal file
1276
components/joltwallet__littlefs/src/littlefs/scripts/perfbd.py
Normal file
File diff suppressed because it is too large
Load diff
1592
components/joltwallet__littlefs/src/littlefs/scripts/plot.py
Normal file
1592
components/joltwallet__littlefs/src/littlefs/scripts/plot.py
Normal file
File diff suppressed because it is too large
Load diff
1262
components/joltwallet__littlefs/src/littlefs/scripts/plotmpl.py
Normal file
1262
components/joltwallet__littlefs/src/littlefs/scripts/plotmpl.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,478 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Preprocessor that makes asserts easier to debug.
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/prettyasserts.py -p LFS_ASSERT lfs.c -o lfs.a.c
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# Copyright (c) 2020, Arm Limited. All rights reserved.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
# NOTE the use of macros here helps keep a consistent stack depth which
|
||||
# tools may rely on.
|
||||
#
|
||||
# If compilation errors are noisy consider using -ftrack-macro-expansion=0.
|
||||
#
|
||||
|
||||
LIMIT = 16
|
||||
|
||||
CMP = {
|
||||
'==': 'eq',
|
||||
'!=': 'ne',
|
||||
'<=': 'le',
|
||||
'>=': 'ge',
|
||||
'<': 'lt',
|
||||
'>': 'gt',
|
||||
}
|
||||
|
||||
LEXEMES = {
|
||||
'ws': [r'(?:\s|\n|#.*?\n|//.*?\n|/\*.*?\*/)+'],
|
||||
'assert': ['assert'],
|
||||
'arrow': ['=>'],
|
||||
'string': [r'"(?:\\.|[^"])*"', r"'(?:\\.|[^'])\'"],
|
||||
'paren': [r'\(', r'\)'],
|
||||
'cmp': CMP.keys(),
|
||||
'logic': [r'\&\&', r'\|\|'],
|
||||
'sep': [':', ';', r'\{', r'\}', ','],
|
||||
'op': ['->'], # specifically ops that conflict with cmp
|
||||
}
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def write_header(f, limit=LIMIT):
|
||||
f.writeln("// Generated by %s:" % sys.argv[0])
|
||||
f.writeln("//")
|
||||
f.writeln("// %s" % ' '.join(sys.argv))
|
||||
f.writeln("//")
|
||||
f.writeln()
|
||||
|
||||
f.writeln("#include <stdbool.h>")
|
||||
f.writeln("#include <stdint.h>")
|
||||
f.writeln("#include <inttypes.h>")
|
||||
f.writeln("#include <stdio.h>")
|
||||
f.writeln("#include <string.h>")
|
||||
f.writeln("#include <signal.h>")
|
||||
# give source a chance to define feature macros
|
||||
f.writeln("#undef _FEATURES_H")
|
||||
f.writeln()
|
||||
|
||||
# write print macros
|
||||
f.writeln("__attribute__((unused))")
|
||||
f.writeln("static void __pretty_assert_print_bool(")
|
||||
f.writeln(" const void *v, size_t size) {")
|
||||
f.writeln(" (void)size;")
|
||||
f.writeln(" printf(\"%s\", *(const bool*)v ? \"true\" : \"false\");")
|
||||
f.writeln("}")
|
||||
f.writeln()
|
||||
f.writeln("__attribute__((unused))")
|
||||
f.writeln("static void __pretty_assert_print_int(")
|
||||
f.writeln(" const void *v, size_t size) {")
|
||||
f.writeln(" (void)size;")
|
||||
f.writeln(" printf(\"%\"PRIiMAX, *(const intmax_t*)v);")
|
||||
f.writeln("}")
|
||||
f.writeln()
|
||||
f.writeln("__attribute__((unused))")
|
||||
f.writeln("static void __pretty_assert_print_ptr(")
|
||||
f.writeln(" const void *v, size_t size) {")
|
||||
f.writeln(" (void)size;")
|
||||
f.writeln(" printf(\"%p\", v);")
|
||||
f.writeln("}")
|
||||
f.writeln()
|
||||
f.writeln("__attribute__((unused))")
|
||||
f.writeln("static void __pretty_assert_print_mem(")
|
||||
f.writeln(" const void *v, size_t size) {")
|
||||
f.writeln(" const uint8_t *v_ = v;")
|
||||
f.writeln(" printf(\"\\\"\");")
|
||||
f.writeln(" for (size_t i = 0; i < size && i < %d; i++) {" % limit)
|
||||
f.writeln(" if (v_[i] >= ' ' && v_[i] <= '~') {")
|
||||
f.writeln(" printf(\"%c\", v_[i]);")
|
||||
f.writeln(" } else {")
|
||||
f.writeln(" printf(\"\\\\x%02x\", v_[i]);")
|
||||
f.writeln(" }")
|
||||
f.writeln(" }")
|
||||
f.writeln(" if (size > %d) {" % limit)
|
||||
f.writeln(" printf(\"...\");")
|
||||
f.writeln(" }")
|
||||
f.writeln(" printf(\"\\\"\");")
|
||||
f.writeln("}")
|
||||
f.writeln()
|
||||
f.writeln("__attribute__((unused))")
|
||||
f.writeln("static void __pretty_assert_print_str(")
|
||||
f.writeln(" const void *v, size_t size) {")
|
||||
f.writeln(" __pretty_assert_print_mem(v, size);")
|
||||
f.writeln("}")
|
||||
f.writeln()
|
||||
f.writeln("__attribute__((unused, noinline))")
|
||||
f.writeln("static void __pretty_assert_fail(")
|
||||
f.writeln(" const char *file, int line,")
|
||||
f.writeln(" void (*type_print_cb)(const void*, size_t),")
|
||||
f.writeln(" const char *cmp,")
|
||||
f.writeln(" const void *lh, size_t lsize,")
|
||||
f.writeln(" const void *rh, size_t rsize) {")
|
||||
f.writeln(" printf(\"%s:%d:assert: assert failed with \", file, line);")
|
||||
f.writeln(" type_print_cb(lh, lsize);")
|
||||
f.writeln(" printf(\", expected %s \", cmp);")
|
||||
f.writeln(" type_print_cb(rh, rsize);")
|
||||
f.writeln(" printf(\"\\n\");")
|
||||
f.writeln(" fflush(NULL);")
|
||||
f.writeln(" raise(SIGABRT);")
|
||||
f.writeln("}")
|
||||
f.writeln()
|
||||
|
||||
# write assert macros
|
||||
for op, cmp in sorted(CMP.items()):
|
||||
f.writeln("#define __PRETTY_ASSERT_BOOL_%s(lh, rh) do { \\"
|
||||
% cmp.upper())
|
||||
f.writeln(" bool _lh = !!(lh); \\")
|
||||
f.writeln(" bool _rh = !!(rh); \\")
|
||||
f.writeln(" if (!(_lh %s _rh)) { \\" % op)
|
||||
f.writeln(" __pretty_assert_fail( \\")
|
||||
f.writeln(" __FILE__, __LINE__, \\")
|
||||
f.writeln(" __pretty_assert_print_bool, \"%s\", \\"
|
||||
% cmp)
|
||||
f.writeln(" &_lh, 0, \\")
|
||||
f.writeln(" &_rh, 0); \\")
|
||||
f.writeln(" } \\")
|
||||
f.writeln("} while (0)")
|
||||
for op, cmp in sorted(CMP.items()):
|
||||
f.writeln("#define __PRETTY_ASSERT_INT_%s(lh, rh) do { \\"
|
||||
% cmp.upper())
|
||||
f.writeln(" __typeof__(lh) _lh = lh; \\")
|
||||
f.writeln(" __typeof__(lh) _rh = rh; \\")
|
||||
f.writeln(" if (!(_lh %s _rh)) { \\" % op)
|
||||
f.writeln(" __pretty_assert_fail( \\")
|
||||
f.writeln(" __FILE__, __LINE__, \\")
|
||||
f.writeln(" __pretty_assert_print_int, \"%s\", \\"
|
||||
% cmp)
|
||||
f.writeln(" &(intmax_t){_lh}, 0, \\")
|
||||
f.writeln(" &(intmax_t){_rh}, 0); \\")
|
||||
f.writeln(" } \\")
|
||||
f.writeln("} while (0)")
|
||||
for op, cmp in sorted(CMP.items()):
|
||||
f.writeln("#define __PRETTY_ASSERT_MEM_%s(lh, rh, size) do { \\"
|
||||
% cmp.upper())
|
||||
f.writeln(" const void *_lh = lh; \\")
|
||||
f.writeln(" const void *_rh = rh; \\")
|
||||
f.writeln(" if (!(memcmp(_lh, _rh, size) %s 0)) { \\" % op)
|
||||
f.writeln(" __pretty_assert_fail( \\")
|
||||
f.writeln(" __FILE__, __LINE__, \\")
|
||||
f.writeln(" __pretty_assert_print_mem, \"%s\", \\"
|
||||
% cmp)
|
||||
f.writeln(" _lh, size, \\")
|
||||
f.writeln(" _rh, size); \\")
|
||||
f.writeln(" } \\")
|
||||
f.writeln("} while (0)")
|
||||
for op, cmp in sorted(CMP.items()):
|
||||
f.writeln("#define __PRETTY_ASSERT_STR_%s(lh, rh) do { \\"
|
||||
% cmp.upper())
|
||||
f.writeln(" const char *_lh = lh; \\")
|
||||
f.writeln(" const char *_rh = rh; \\")
|
||||
f.writeln(" if (!(strcmp(_lh, _rh) %s 0)) { \\" % op)
|
||||
f.writeln(" __pretty_assert_fail( \\")
|
||||
f.writeln(" __FILE__, __LINE__, \\")
|
||||
f.writeln(" __pretty_assert_print_str, \"%s\", \\"
|
||||
% cmp)
|
||||
f.writeln(" _lh, strlen(_lh), \\")
|
||||
f.writeln(" _rh, strlen(_rh)); \\")
|
||||
f.writeln(" } \\")
|
||||
f.writeln("} while (0)")
|
||||
for op, cmp in sorted(CMP.items()):
|
||||
# Only EQ and NE are supported when compared to NULL.
|
||||
if cmp not in ['eq', 'ne']:
|
||||
continue
|
||||
f.writeln("#define __PRETTY_ASSERT_PTR_%s(lh, rh) do { \\"
|
||||
% cmp.upper())
|
||||
f.writeln(" const void *_lh = (const void*)(uintptr_t)lh; \\")
|
||||
f.writeln(" const void *_rh = (const void*)(uintptr_t)rh; \\")
|
||||
f.writeln(" if (!(_lh %s _rh)) { \\" % op)
|
||||
f.writeln(" __pretty_assert_fail( \\")
|
||||
f.writeln(" __FILE__, __LINE__, \\")
|
||||
f.writeln(" __pretty_assert_print_ptr, \"%s\", \\"
|
||||
% cmp)
|
||||
f.writeln(" (const void*){_lh}, 0, \\")
|
||||
f.writeln(" (const void*){_rh}, 0); \\")
|
||||
f.writeln(" } \\")
|
||||
f.writeln("} while (0)")
|
||||
f.writeln()
|
||||
f.writeln()
|
||||
|
||||
def mkassert(type, cmp, lh, rh, size=None):
|
||||
if size is not None:
|
||||
return ("__PRETTY_ASSERT_%s_%s(%s, %s, %s)"
|
||||
% (type.upper(), cmp.upper(), lh, rh, size))
|
||||
else:
|
||||
return ("__PRETTY_ASSERT_%s_%s(%s, %s)"
|
||||
% (type.upper(), cmp.upper(), lh, rh))
|
||||
|
||||
|
||||
# simple recursive descent parser
|
||||
class ParseFailure(Exception):
|
||||
def __init__(self, expected, found):
|
||||
self.expected = expected
|
||||
self.found = found
|
||||
|
||||
def __str__(self):
|
||||
return "expected %r, found %s..." % (
|
||||
self.expected, repr(self.found)[:70])
|
||||
|
||||
class Parser:
|
||||
def __init__(self, in_f, lexemes=LEXEMES):
|
||||
p = '|'.join('(?P<%s>%s)' % (n, '|'.join(l))
|
||||
for n, l in lexemes.items())
|
||||
p = re.compile(p, re.DOTALL)
|
||||
data = in_f.read()
|
||||
tokens = []
|
||||
line = 1
|
||||
col = 0
|
||||
while True:
|
||||
m = p.search(data)
|
||||
if m:
|
||||
if m.start() > 0:
|
||||
tokens.append((None, data[:m.start()], line, col))
|
||||
tokens.append((m.lastgroup, m.group(), line, col))
|
||||
data = data[m.end():]
|
||||
else:
|
||||
tokens.append((None, data, line, col))
|
||||
break
|
||||
self.tokens = tokens
|
||||
self.off = 0
|
||||
|
||||
def lookahead(self, *pattern):
|
||||
if self.off < len(self.tokens):
|
||||
token = self.tokens[self.off]
|
||||
if token[0] in pattern or token[1] in pattern:
|
||||
self.m = token[1]
|
||||
return self.m
|
||||
self.m = None
|
||||
return self.m
|
||||
|
||||
def accept(self, *patterns):
|
||||
m = self.lookahead(*patterns)
|
||||
if m is not None:
|
||||
self.off += 1
|
||||
return m
|
||||
|
||||
def expect(self, *patterns):
|
||||
m = self.accept(*patterns)
|
||||
if not m:
|
||||
raise ParseFailure(patterns, self.tokens[self.off:])
|
||||
return m
|
||||
|
||||
def push(self):
|
||||
return self.off
|
||||
|
||||
def pop(self, state):
|
||||
self.off = state
|
||||
|
||||
def p_assert(p):
|
||||
state = p.push()
|
||||
|
||||
# assert(memcmp(a,b,size) cmp 0)?
|
||||
try:
|
||||
p.expect('assert') ; p.accept('ws')
|
||||
p.expect('(') ; p.accept('ws')
|
||||
p.expect('memcmp') ; p.accept('ws')
|
||||
p.expect('(') ; p.accept('ws')
|
||||
lh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
rh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
size = p_expr(p) ; p.accept('ws')
|
||||
p.expect(')') ; p.accept('ws')
|
||||
cmp = p.expect('cmp') ; p.accept('ws')
|
||||
p.expect('0') ; p.accept('ws')
|
||||
p.expect(')')
|
||||
return mkassert('mem', CMP[cmp], lh, rh, size)
|
||||
except ParseFailure:
|
||||
p.pop(state)
|
||||
|
||||
# assert(strcmp(a,b) cmp 0)?
|
||||
try:
|
||||
p.expect('assert') ; p.accept('ws')
|
||||
p.expect('(') ; p.accept('ws')
|
||||
p.expect('strcmp') ; p.accept('ws')
|
||||
p.expect('(') ; p.accept('ws')
|
||||
lh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
rh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(')') ; p.accept('ws')
|
||||
cmp = p.expect('cmp') ; p.accept('ws')
|
||||
p.expect('0') ; p.accept('ws')
|
||||
p.expect(')')
|
||||
return mkassert('str', CMP[cmp], lh, rh)
|
||||
except ParseFailure:
|
||||
p.pop(state)
|
||||
|
||||
# assert(a cmp b)?
|
||||
try:
|
||||
p.expect('assert') ; p.accept('ws')
|
||||
p.expect('(') ; p.accept('ws')
|
||||
lh = p_expr(p) ; p.accept('ws')
|
||||
cmp = p.expect('cmp') ; p.accept('ws')
|
||||
rh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(')')
|
||||
if rh == 'NULL' or lh == 'NULL':
|
||||
return mkassert('ptr', CMP[cmp], lh, rh)
|
||||
return mkassert('int', CMP[cmp], lh, rh)
|
||||
except ParseFailure:
|
||||
p.pop(state)
|
||||
|
||||
# assert(a)?
|
||||
p.expect('assert') ; p.accept('ws')
|
||||
p.expect('(') ; p.accept('ws')
|
||||
lh = p_exprs(p) ; p.accept('ws')
|
||||
p.expect(')')
|
||||
return mkassert('bool', 'eq', lh, 'true')
|
||||
|
||||
def p_expr(p):
|
||||
res = []
|
||||
while True:
|
||||
if p.accept('('):
|
||||
res.append(p.m)
|
||||
while True:
|
||||
res.append(p_exprs(p))
|
||||
if p.accept('sep'):
|
||||
res.append(p.m)
|
||||
else:
|
||||
break
|
||||
res.append(p.expect(')'))
|
||||
elif p.lookahead('assert'):
|
||||
state = p.push()
|
||||
try:
|
||||
res.append(p_assert(p))
|
||||
except ParseFailure:
|
||||
p.pop(state)
|
||||
res.append(p.expect('assert'))
|
||||
elif p.accept('string', 'op', 'ws', None):
|
||||
res.append(p.m)
|
||||
else:
|
||||
return ''.join(res)
|
||||
|
||||
def p_exprs(p):
|
||||
res = []
|
||||
while True:
|
||||
res.append(p_expr(p))
|
||||
if p.accept('cmp', 'logic', ','):
|
||||
res.append(p.m)
|
||||
else:
|
||||
return ''.join(res)
|
||||
|
||||
def p_stmt(p):
|
||||
ws = p.accept('ws') or ''
|
||||
|
||||
# memcmp(lh,rh,size) => 0?
|
||||
if p.lookahead('memcmp'):
|
||||
state = p.push()
|
||||
try:
|
||||
p.expect('memcmp') ; p.accept('ws')
|
||||
p.expect('(') ; p.accept('ws')
|
||||
lh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
rh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
size = p_expr(p) ; p.accept('ws')
|
||||
p.expect(')') ; p.accept('ws')
|
||||
p.expect('=>') ; p.accept('ws')
|
||||
p.expect('0') ; p.accept('ws')
|
||||
return ws + mkassert('mem', 'eq', lh, rh, size)
|
||||
except ParseFailure:
|
||||
p.pop(state)
|
||||
|
||||
# strcmp(lh,rh) => 0?
|
||||
if p.lookahead('strcmp'):
|
||||
state = p.push()
|
||||
try:
|
||||
p.expect('strcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
|
||||
lh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
rh = p_expr(p) ; p.accept('ws')
|
||||
p.expect(')') ; p.accept('ws')
|
||||
p.expect('=>') ; p.accept('ws')
|
||||
p.expect('0') ; p.accept('ws')
|
||||
return ws + mkassert('str', 'eq', lh, rh)
|
||||
except ParseFailure:
|
||||
p.pop(state)
|
||||
|
||||
# lh => rh?
|
||||
lh = p_exprs(p)
|
||||
if p.accept('=>'):
|
||||
rh = p_exprs(p)
|
||||
return ws + mkassert('int', 'eq', lh, rh)
|
||||
else:
|
||||
return ws + lh
|
||||
|
||||
def main(input=None, output=None, pattern=[], limit=LIMIT):
|
||||
with openio(input or '-', 'r') as in_f:
|
||||
# create parser
|
||||
lexemes = LEXEMES.copy()
|
||||
lexemes['assert'] += pattern
|
||||
p = Parser(in_f, lexemes)
|
||||
|
||||
with openio(output or '-', 'w') as f:
|
||||
def writeln(s=''):
|
||||
f.write(s)
|
||||
f.write('\n')
|
||||
f.writeln = writeln
|
||||
|
||||
# write extra verbose asserts
|
||||
write_header(f, limit=limit)
|
||||
if input is not None:
|
||||
f.writeln("#line %d \"%s\"" % (1, input))
|
||||
|
||||
# parse and write out stmt at a time
|
||||
try:
|
||||
while True:
|
||||
f.write(p_stmt(p))
|
||||
if p.accept('sep'):
|
||||
f.write(p.m)
|
||||
else:
|
||||
break
|
||||
except ParseFailure as e:
|
||||
print('warning: %s' % e)
|
||||
pass
|
||||
|
||||
for i in range(p.off, len(p.tokens)):
|
||||
f.write(p.tokens[i][1])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Preprocessor that makes asserts easier to debug.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'input',
|
||||
help="Input C file.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
required=True,
|
||||
help="Output C file.")
|
||||
parser.add_argument(
|
||||
'-p', '--pattern',
|
||||
action='append',
|
||||
help="Regex patterns to search for starting an assert statement. This"
|
||||
" implicitly includes \"assert\" and \"=>\".")
|
||||
parser.add_argument(
|
||||
'-l', '--limit',
|
||||
type=lambda x: int(x, 0),
|
||||
default=LIMIT,
|
||||
help="Maximum number of characters to display in strcmp and memcmp. "
|
||||
"Defaults to %r." % LIMIT)
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import subprocess as sp
|
||||
|
||||
def main(args):
|
||||
with open(args.disk, 'rb') as f:
|
||||
f.seek(args.block * args.block_size)
|
||||
block = (f.read(args.block_size)
|
||||
.ljust(args.block_size, b'\xff'))
|
||||
|
||||
# what did you expect?
|
||||
print("%-8s %-s" % ('off', 'data'))
|
||||
return sp.run(['xxd', '-g1', '-'], input=block).returncode
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Hex dump a specific block in a disk.")
|
||||
parser.add_argument('disk',
|
||||
help="File representing the block device.")
|
||||
parser.add_argument('block_size', type=lambda x: int(x, 0),
|
||||
help="Size of a block in bytes.")
|
||||
parser.add_argument('block', type=lambda x: int(x, 0),
|
||||
help="Address of block to dump.")
|
||||
sys.exit(main(parser.parse_args()))
|
||||
399
components/joltwallet__littlefs/src/littlefs/scripts/readmdir.py
Normal file
399
components/joltwallet__littlefs/src/littlefs/scripts/readmdir.py
Normal file
|
|
@ -0,0 +1,399 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import struct
|
||||
import binascii
|
||||
import sys
|
||||
import itertools as it
|
||||
|
||||
TAG_TYPES = {
|
||||
'splice': (0x700, 0x400),
|
||||
'create': (0x7ff, 0x401),
|
||||
'delete': (0x7ff, 0x4ff),
|
||||
'name': (0x700, 0x000),
|
||||
'reg': (0x7ff, 0x001),
|
||||
'dir': (0x7ff, 0x002),
|
||||
'superblock': (0x7ff, 0x0ff),
|
||||
'struct': (0x700, 0x200),
|
||||
'dirstruct': (0x7ff, 0x200),
|
||||
'ctzstruct': (0x7ff, 0x202),
|
||||
'inlinestruct': (0x7ff, 0x201),
|
||||
'userattr': (0x700, 0x300),
|
||||
'tail': (0x700, 0x600),
|
||||
'softtail': (0x7ff, 0x600),
|
||||
'hardtail': (0x7ff, 0x601),
|
||||
'gstate': (0x700, 0x700),
|
||||
'movestate': (0x7ff, 0x7ff),
|
||||
'crc': (0x700, 0x500),
|
||||
'ccrc': (0x780, 0x500),
|
||||
'fcrc': (0x7ff, 0x5ff),
|
||||
}
|
||||
|
||||
class Tag:
|
||||
def __init__(self, *args):
|
||||
if len(args) == 1:
|
||||
self.tag = args[0]
|
||||
elif len(args) == 3:
|
||||
if isinstance(args[0], str):
|
||||
type = TAG_TYPES[args[0]][1]
|
||||
else:
|
||||
type = args[0]
|
||||
|
||||
if isinstance(args[1], str):
|
||||
id = int(args[1], 0) if args[1] not in 'x.' else 0x3ff
|
||||
else:
|
||||
id = args[1]
|
||||
|
||||
if isinstance(args[2], str):
|
||||
size = int(args[2], str) if args[2] not in 'x.' else 0x3ff
|
||||
else:
|
||||
size = args[2]
|
||||
|
||||
self.tag = (type << 20) | (id << 10) | size
|
||||
else:
|
||||
assert False
|
||||
|
||||
@property
|
||||
def isvalid(self):
|
||||
return not bool(self.tag & 0x80000000)
|
||||
|
||||
@property
|
||||
def isattr(self):
|
||||
return not bool(self.tag & 0x40000000)
|
||||
|
||||
@property
|
||||
def iscompactable(self):
|
||||
return bool(self.tag & 0x20000000)
|
||||
|
||||
@property
|
||||
def isunique(self):
|
||||
return not bool(self.tag & 0x10000000)
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return (self.tag & 0x7ff00000) >> 20
|
||||
|
||||
@property
|
||||
def type1(self):
|
||||
return (self.tag & 0x70000000) >> 20
|
||||
|
||||
@property
|
||||
def type3(self):
|
||||
return (self.tag & 0x7ff00000) >> 20
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return (self.tag & 0x000ffc00) >> 10
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
return (self.tag & 0x000003ff) >> 0
|
||||
|
||||
@property
|
||||
def dsize(self):
|
||||
return 4 + (self.size if self.size != 0x3ff else 0)
|
||||
|
||||
@property
|
||||
def chunk(self):
|
||||
return self.type & 0xff
|
||||
|
||||
@property
|
||||
def schunk(self):
|
||||
return struct.unpack('b', struct.pack('B', self.chunk))[0]
|
||||
|
||||
def is_(self, type):
|
||||
try:
|
||||
if ' ' in type:
|
||||
type1, type3 = type.split()
|
||||
return (self.is_(type1) and
|
||||
(self.type & ~TAG_TYPES[type1][0]) == int(type3, 0))
|
||||
|
||||
return self.type == int(type, 0)
|
||||
|
||||
except (ValueError, KeyError):
|
||||
return (self.type & TAG_TYPES[type][0]) == TAG_TYPES[type][1]
|
||||
|
||||
def mkmask(self):
|
||||
return Tag(
|
||||
0x700 if self.isunique else 0x7ff,
|
||||
0x3ff if self.isattr else 0,
|
||||
0)
|
||||
|
||||
def chid(self, nid):
|
||||
ntag = Tag(self.type, nid, self.size)
|
||||
if hasattr(self, 'off'): ntag.off = self.off
|
||||
if hasattr(self, 'data'): ntag.data = self.data
|
||||
if hasattr(self, 'ccrc'): ntag.crc = self.crc
|
||||
if hasattr(self, 'erased'): ntag.erased = self.erased
|
||||
return ntag
|
||||
|
||||
def typerepr(self):
|
||||
if (self.is_('ccrc')
|
||||
and getattr(self, 'ccrc', 0xffffffff) != 0xffffffff):
|
||||
crc_status = ' (bad)'
|
||||
elif self.is_('fcrc') and getattr(self, 'erased', False):
|
||||
crc_status = ' (era)'
|
||||
else:
|
||||
crc_status = ''
|
||||
|
||||
reverse_types = {v: k for k, v in TAG_TYPES.items()}
|
||||
for prefix in range(12):
|
||||
mask = 0x7ff & ~((1 << prefix)-1)
|
||||
if (mask, self.type & mask) in reverse_types:
|
||||
type = reverse_types[mask, self.type & mask]
|
||||
if prefix > 0:
|
||||
return '%s %#x%s' % (
|
||||
type, self.type & ((1 << prefix)-1), crc_status)
|
||||
else:
|
||||
return '%s%s' % (type, crc_status)
|
||||
else:
|
||||
return '%02x%s' % (self.type, crc_status)
|
||||
|
||||
def idrepr(self):
|
||||
return repr(self.id) if self.id != 0x3ff else '.'
|
||||
|
||||
def sizerepr(self):
|
||||
return repr(self.size) if self.size != 0x3ff else 'x'
|
||||
|
||||
def __repr__(self):
|
||||
return 'Tag(%r, %d, %d)' % (self.typerepr(), self.id, self.size)
|
||||
|
||||
def __lt__(self, other):
|
||||
return (self.id, self.type) < (other.id, other.type)
|
||||
|
||||
def __bool__(self):
|
||||
return self.isvalid
|
||||
|
||||
def __int__(self):
|
||||
return self.tag
|
||||
|
||||
def __index__(self):
|
||||
return self.tag
|
||||
|
||||
class MetadataPair:
|
||||
def __init__(self, blocks):
|
||||
if len(blocks) > 1:
|
||||
self.pair = [MetadataPair([block]) for block in blocks]
|
||||
self.pair = sorted(self.pair, reverse=True)
|
||||
|
||||
self.data = self.pair[0].data
|
||||
self.rev = self.pair[0].rev
|
||||
self.tags = self.pair[0].tags
|
||||
self.ids = self.pair[0].ids
|
||||
self.log = self.pair[0].log
|
||||
self.all_ = self.pair[0].all_
|
||||
return
|
||||
|
||||
self.pair = [self]
|
||||
self.data = blocks[0]
|
||||
block = self.data
|
||||
|
||||
self.rev, = struct.unpack('<I', block[0:4])
|
||||
crc = binascii.crc32(block[0:4])
|
||||
fcrctag = None
|
||||
fcrcdata = None
|
||||
|
||||
# parse tags
|
||||
corrupt = False
|
||||
tag = Tag(0xffffffff)
|
||||
off = 4
|
||||
self.log = []
|
||||
self.all_ = []
|
||||
while len(block) - off >= 4:
|
||||
ntag, = struct.unpack('>I', block[off:off+4])
|
||||
|
||||
tag = Tag((int(tag) ^ ntag) & 0x7fffffff)
|
||||
tag.off = off + 4
|
||||
tag.data = block[off+4:off+tag.dsize]
|
||||
if tag.is_('ccrc'):
|
||||
crc = binascii.crc32(block[off:off+2*4], crc)
|
||||
else:
|
||||
crc = binascii.crc32(block[off:off+tag.dsize], crc)
|
||||
tag.crc = crc
|
||||
off += tag.dsize
|
||||
|
||||
self.all_.append(tag)
|
||||
|
||||
if tag.is_('fcrc') and len(tag.data) == 8:
|
||||
fcrctag = tag
|
||||
fcrcdata = struct.unpack('<II', tag.data)
|
||||
elif tag.is_('ccrc'):
|
||||
# is valid commit?
|
||||
if crc != 0xffffffff:
|
||||
corrupt = True
|
||||
if not corrupt:
|
||||
self.log = self.all_.copy()
|
||||
# end of commit?
|
||||
if fcrcdata:
|
||||
fcrcsize, fcrc = fcrcdata
|
||||
fcrc_ = 0xffffffff ^ binascii.crc32(
|
||||
block[off:off+fcrcsize])
|
||||
if fcrc_ == fcrc:
|
||||
fcrctag.erased = True
|
||||
corrupt = True
|
||||
|
||||
# reset tag parsing
|
||||
crc = 0
|
||||
tag = Tag(int(tag) ^ ((tag.type & 1) << 31))
|
||||
fcrctag = None
|
||||
fcrcdata = None
|
||||
|
||||
# find active ids
|
||||
self.ids = list(it.takewhile(
|
||||
lambda id: Tag('name', id, 0) in self,
|
||||
it.count()))
|
||||
|
||||
# find most recent tags
|
||||
self.tags = []
|
||||
for tag in self.log:
|
||||
if tag.is_('crc') or tag.is_('splice'):
|
||||
continue
|
||||
elif tag.id == 0x3ff:
|
||||
if tag in self and self[tag] is tag:
|
||||
self.tags.append(tag)
|
||||
else:
|
||||
# id could have change, I know this is messy and slow
|
||||
# but it works
|
||||
for id in self.ids:
|
||||
ntag = tag.chid(id)
|
||||
if ntag in self and self[ntag] is tag:
|
||||
self.tags.append(ntag)
|
||||
|
||||
self.tags = sorted(self.tags)
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.log)
|
||||
|
||||
def __lt__(self, other):
|
||||
# corrupt blocks don't count
|
||||
if not self or not other:
|
||||
return bool(other)
|
||||
|
||||
# use sequence arithmetic to avoid overflow
|
||||
return not ((other.rev - self.rev) & 0x80000000)
|
||||
|
||||
def __contains__(self, args):
|
||||
try:
|
||||
self[args]
|
||||
return True
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
def __getitem__(self, args):
|
||||
if isinstance(args, tuple):
|
||||
gmask, gtag = args
|
||||
else:
|
||||
gmask, gtag = args.mkmask(), args
|
||||
|
||||
gdiff = 0
|
||||
for tag in reversed(self.log):
|
||||
if (gmask.id != 0 and tag.is_('splice') and
|
||||
tag.id <= gtag.id - gdiff):
|
||||
if tag.is_('create') and tag.id == gtag.id - gdiff:
|
||||
# creation point
|
||||
break
|
||||
|
||||
gdiff += tag.schunk
|
||||
|
||||
if ((int(gmask) & int(tag)) ==
|
||||
(int(gmask) & int(gtag.chid(gtag.id - gdiff)))):
|
||||
if tag.size == 0x3ff:
|
||||
# deleted
|
||||
break
|
||||
|
||||
return tag
|
||||
|
||||
raise KeyError(gmask, gtag)
|
||||
|
||||
def _dump_tags(self, tags, f=sys.stdout, truncate=True):
|
||||
f.write("%-8s %-8s %-13s %4s %4s" % (
|
||||
'off', 'tag', 'type', 'id', 'len'))
|
||||
if truncate:
|
||||
f.write(' data (truncated)')
|
||||
f.write('\n')
|
||||
|
||||
for tag in tags:
|
||||
f.write("%08x: %08x %-14s %3s %4s" % (
|
||||
tag.off, tag,
|
||||
tag.typerepr(), tag.idrepr(), tag.sizerepr()))
|
||||
if truncate:
|
||||
f.write(" %-23s %-8s\n" % (
|
||||
' '.join('%02x' % c for c in tag.data[:8]),
|
||||
''.join(c if c >= ' ' and c <= '~' else '.'
|
||||
for c in map(chr, tag.data[:8]))))
|
||||
else:
|
||||
f.write("\n")
|
||||
for i in range(0, len(tag.data), 16):
|
||||
f.write(" %08x: %-47s %-16s\n" % (
|
||||
tag.off+i,
|
||||
' '.join('%02x' % c for c in tag.data[i:i+16]),
|
||||
''.join(c if c >= ' ' and c <= '~' else '.'
|
||||
for c in map(chr, tag.data[i:i+16]))))
|
||||
|
||||
def dump_tags(self, f=sys.stdout, truncate=True):
|
||||
self._dump_tags(self.tags, f=f, truncate=truncate)
|
||||
|
||||
def dump_log(self, f=sys.stdout, truncate=True):
|
||||
self._dump_tags(self.log, f=f, truncate=truncate)
|
||||
|
||||
def dump_all(self, f=sys.stdout, truncate=True):
|
||||
self._dump_tags(self.all_, f=f, truncate=truncate)
|
||||
|
||||
def main(args):
|
||||
blocks = []
|
||||
with open(args.disk, 'rb') as f:
|
||||
for block in [args.block1, args.block2]:
|
||||
if block is None:
|
||||
continue
|
||||
f.seek(block * args.block_size)
|
||||
blocks.append(f.read(args.block_size)
|
||||
.ljust(args.block_size, b'\xff'))
|
||||
|
||||
# find most recent pair
|
||||
mdir = MetadataPair(blocks)
|
||||
|
||||
try:
|
||||
mdir.tail = mdir[Tag('tail', 0, 0)]
|
||||
if mdir.tail.size != 8 or mdir.tail.data == 8*b'\xff':
|
||||
mdir.tail = None
|
||||
except KeyError:
|
||||
mdir.tail = None
|
||||
|
||||
print("mdir {%s} rev %d%s%s%s" % (
|
||||
', '.join('%#x' % b
|
||||
for b in [args.block1, args.block2]
|
||||
if b is not None),
|
||||
mdir.rev,
|
||||
' (was %s)' % ', '.join('%d' % m.rev for m in mdir.pair[1:])
|
||||
if len(mdir.pair) > 1 else '',
|
||||
' (corrupted!)' if not mdir else '',
|
||||
' -> {%#x, %#x}' % struct.unpack('<II', mdir.tail.data)
|
||||
if mdir.tail else ''))
|
||||
if args.all:
|
||||
mdir.dump_all(truncate=not args.no_truncate)
|
||||
elif args.log:
|
||||
mdir.dump_log(truncate=not args.no_truncate)
|
||||
else:
|
||||
mdir.dump_tags(truncate=not args.no_truncate)
|
||||
|
||||
return 0 if mdir else 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Dump useful info about metadata pairs in littlefs.")
|
||||
parser.add_argument('disk',
|
||||
help="File representing the block device.")
|
||||
parser.add_argument('block_size', type=lambda x: int(x, 0),
|
||||
help="Size of a block in bytes.")
|
||||
parser.add_argument('block1', type=lambda x: int(x, 0),
|
||||
help="First block address for finding the metadata pair.")
|
||||
parser.add_argument('block2', nargs='?', type=lambda x: int(x, 0),
|
||||
help="Second block address for finding the metadata pair.")
|
||||
parser.add_argument('-l', '--log', action='store_true',
|
||||
help="Show tags in log.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all tags in log, included tags in corrupted commits.")
|
||||
parser.add_argument('-T', '--no-truncate', action='store_true',
|
||||
help="Don't truncate large amounts of data.")
|
||||
sys.exit(main(parser.parse_args()))
|
||||
183
components/joltwallet__littlefs/src/littlefs/scripts/readtree.py
Normal file
183
components/joltwallet__littlefs/src/littlefs/scripts/readtree.py
Normal file
|
|
@ -0,0 +1,183 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import struct
|
||||
import sys
|
||||
import json
|
||||
import io
|
||||
import itertools as it
|
||||
from readmdir import Tag, MetadataPair
|
||||
|
||||
def main(args):
|
||||
superblock = None
|
||||
gstate = b'\0\0\0\0\0\0\0\0\0\0\0\0'
|
||||
dirs = []
|
||||
mdirs = []
|
||||
corrupted = []
|
||||
cycle = False
|
||||
with open(args.disk, 'rb') as f:
|
||||
tail = (args.block1, args.block2)
|
||||
hard = False
|
||||
while True:
|
||||
for m in it.chain((m for d in dirs for m in d), mdirs):
|
||||
if set(m.blocks) == set(tail):
|
||||
# cycle detected
|
||||
cycle = m.blocks
|
||||
if cycle:
|
||||
break
|
||||
|
||||
# load mdir
|
||||
data = []
|
||||
blocks = {}
|
||||
for block in tail:
|
||||
f.seek(block * args.block_size)
|
||||
data.append(f.read(args.block_size)
|
||||
.ljust(args.block_size, b'\xff'))
|
||||
blocks[id(data[-1])] = block
|
||||
|
||||
mdir = MetadataPair(data)
|
||||
mdir.blocks = tuple(blocks[id(p.data)] for p in mdir.pair)
|
||||
|
||||
# fetch some key metadata as a we scan
|
||||
try:
|
||||
mdir.tail = mdir[Tag('tail', 0, 0)]
|
||||
if mdir.tail.size != 8 or mdir.tail.data == 8*b'\xff':
|
||||
mdir.tail = None
|
||||
except KeyError:
|
||||
mdir.tail = None
|
||||
|
||||
# have superblock?
|
||||
try:
|
||||
nsuperblock = mdir[
|
||||
Tag(0x7ff, 0x3ff, 0), Tag('superblock', 0, 0)]
|
||||
superblock = nsuperblock, mdir[Tag('inlinestruct', 0, 0)]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# have gstate?
|
||||
try:
|
||||
ngstate = mdir[Tag('movestate', 0, 0)]
|
||||
gstate = bytes((a or 0) ^ (b or 0)
|
||||
for a,b in it.zip_longest(gstate, ngstate.data))
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# corrupted?
|
||||
if not mdir:
|
||||
corrupted.append(mdir)
|
||||
|
||||
# add to directories
|
||||
mdirs.append(mdir)
|
||||
if mdir.tail is None or not mdir.tail.is_('hardtail'):
|
||||
dirs.append(mdirs)
|
||||
mdirs = []
|
||||
|
||||
if mdir.tail is None:
|
||||
break
|
||||
|
||||
tail = struct.unpack('<II', mdir.tail.data)
|
||||
hard = mdir.tail.is_('hardtail')
|
||||
|
||||
# find paths
|
||||
dirtable = {}
|
||||
for dir in dirs:
|
||||
dirtable[frozenset(dir[0].blocks)] = dir
|
||||
|
||||
pending = [("/", dirs[0])]
|
||||
while pending:
|
||||
path, dir = pending.pop(0)
|
||||
for mdir in dir:
|
||||
for tag in mdir.tags:
|
||||
if tag.is_('dir'):
|
||||
try:
|
||||
npath = tag.data.decode('utf8')
|
||||
dirstruct = mdir[Tag('dirstruct', tag.id, 0)]
|
||||
nblocks = struct.unpack('<II', dirstruct.data)
|
||||
nmdir = dirtable[frozenset(nblocks)]
|
||||
pending.append(((path + '/' + npath), nmdir))
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
dir[0].path = path.replace('//', '/')
|
||||
|
||||
# print littlefs + version info
|
||||
version = ('?', '?')
|
||||
if superblock:
|
||||
version = tuple(reversed(
|
||||
struct.unpack('<HH', superblock[1].data[0:4].ljust(4, b'\xff'))))
|
||||
print("%-47s%s" % ("littlefs v%s.%s" % version,
|
||||
"data (truncated, if it fits)"
|
||||
if not any([args.no_truncate, args.log, args.all]) else ""))
|
||||
|
||||
# print gstate
|
||||
print("gstate 0x%s" % ''.join('%02x' % c for c in gstate))
|
||||
tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0])
|
||||
blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff'))
|
||||
if tag.size or not tag.isvalid:
|
||||
print(" orphans >=%d" % max(tag.size, 1))
|
||||
if tag.type:
|
||||
print(" move dir {%#x, %#x} id %d" % (
|
||||
blocks[0], blocks[1], tag.id))
|
||||
|
||||
# print mdir info
|
||||
for i, dir in enumerate(dirs):
|
||||
print("dir %s" % (json.dumps(dir[0].path)
|
||||
if hasattr(dir[0], 'path') else '(orphan)'))
|
||||
|
||||
for j, mdir in enumerate(dir):
|
||||
print("mdir {%#x, %#x} rev %d (was %d)%s%s" % (
|
||||
mdir.blocks[0], mdir.blocks[1], mdir.rev, mdir.pair[1].rev,
|
||||
' (corrupted!)' if not mdir else '',
|
||||
' -> {%#x, %#x}' % struct.unpack('<II', mdir.tail.data)
|
||||
if mdir.tail else ''))
|
||||
|
||||
f = io.StringIO()
|
||||
if args.log:
|
||||
mdir.dump_log(f, truncate=not args.no_truncate)
|
||||
elif args.all:
|
||||
mdir.dump_all(f, truncate=not args.no_truncate)
|
||||
else:
|
||||
mdir.dump_tags(f, truncate=not args.no_truncate)
|
||||
|
||||
lines = list(filter(None, f.getvalue().split('\n')))
|
||||
for k, line in enumerate(lines):
|
||||
print("%s %s" % (
|
||||
' ' if j == len(dir)-1 else
|
||||
'v' if k == len(lines)-1 else
|
||||
'|',
|
||||
line))
|
||||
|
||||
errcode = 0
|
||||
for mdir in corrupted:
|
||||
errcode = errcode or 1
|
||||
print("*** corrupted mdir {%#x, %#x}! ***" % (
|
||||
mdir.blocks[0], mdir.blocks[1]))
|
||||
|
||||
if cycle:
|
||||
errcode = errcode or 2
|
||||
print("*** cycle detected {%#x, %#x}! ***" % (
|
||||
cycle[0], cycle[1]))
|
||||
|
||||
return errcode
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Dump semantic info about the metadata tree in littlefs")
|
||||
parser.add_argument('disk',
|
||||
help="File representing the block device.")
|
||||
parser.add_argument('block_size', type=lambda x: int(x, 0),
|
||||
help="Size of a block in bytes.")
|
||||
parser.add_argument('block1', nargs='?', default=0,
|
||||
type=lambda x: int(x, 0),
|
||||
help="Optional first block address for finding the superblock.")
|
||||
parser.add_argument('block2', nargs='?', default=1,
|
||||
type=lambda x: int(x, 0),
|
||||
help="Optional second block address for finding the superblock.")
|
||||
parser.add_argument('-l', '--log', action='store_true',
|
||||
help="Show tags in log.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all tags in log, included tags in corrupted commits.")
|
||||
parser.add_argument('-T', '--no-truncate', action='store_true',
|
||||
help="Show the full contents of files/attrs/tags.")
|
||||
sys.exit(main(parser.parse_args()))
|
||||
735
components/joltwallet__littlefs/src/littlefs/scripts/stack.py
Normal file
735
components/joltwallet__littlefs/src/littlefs/scripts/stack.py
Normal file
|
|
@ -0,0 +1,735 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find stack usage at the function level. Will detect recursion and
|
||||
# report as infinite stack usage.
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/stack.py lfs.ci lfs_util.ci -Slimit
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import csv
|
||||
import itertools as it
|
||||
import math as m
|
||||
import os
|
||||
import re
|
||||
|
||||
|
||||
|
||||
# integer fields
|
||||
class Int(co.namedtuple('Int', 'x')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, x=0):
|
||||
if isinstance(x, Int):
|
||||
return x
|
||||
if isinstance(x, str):
|
||||
try:
|
||||
x = int(x, 0)
|
||||
except ValueError:
|
||||
# also accept +-∞ and +-inf
|
||||
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
|
||||
x = m.inf
|
||||
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
|
||||
x = -m.inf
|
||||
else:
|
||||
raise
|
||||
assert isinstance(x, int) or m.isinf(x), x
|
||||
return super().__new__(cls, x)
|
||||
|
||||
def __str__(self):
|
||||
if self.x == m.inf:
|
||||
return '∞'
|
||||
elif self.x == -m.inf:
|
||||
return '-∞'
|
||||
else:
|
||||
return str(self.x)
|
||||
|
||||
def __int__(self):
|
||||
assert not m.isinf(self.x)
|
||||
return self.x
|
||||
|
||||
def __float__(self):
|
||||
return float(self.x)
|
||||
|
||||
none = '%7s' % '-'
|
||||
def table(self):
|
||||
return '%7s' % (self,)
|
||||
|
||||
diff_none = '%7s' % '-'
|
||||
diff_table = table
|
||||
|
||||
def diff_diff(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
diff = new - old
|
||||
if diff == +m.inf:
|
||||
return '%7s' % '+∞'
|
||||
elif diff == -m.inf:
|
||||
return '%7s' % '-∞'
|
||||
else:
|
||||
return '%+7d' % diff
|
||||
|
||||
def ratio(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
if m.isinf(new) and m.isinf(old):
|
||||
return 0.0
|
||||
elif m.isinf(new):
|
||||
return +m.inf
|
||||
elif m.isinf(old):
|
||||
return -m.inf
|
||||
elif not old and not new:
|
||||
return 0.0
|
||||
elif not old:
|
||||
return 1.0
|
||||
else:
|
||||
return (new-old) / old
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.x + other.x)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(self.x - other.x)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.__class__(self.x * other.x)
|
||||
|
||||
# size results
|
||||
class StackResult(co.namedtuple('StackResult', [
|
||||
'file', 'function', 'frame', 'limit', 'children'])):
|
||||
_by = ['file', 'function']
|
||||
_fields = ['frame', 'limit']
|
||||
_sort = ['limit', 'frame']
|
||||
_types = {'frame': Int, 'limit': Int}
|
||||
|
||||
__slots__ = ()
|
||||
def __new__(cls, file='', function='',
|
||||
frame=0, limit=0, children=set()):
|
||||
return super().__new__(cls, file, function,
|
||||
Int(frame), Int(limit),
|
||||
children)
|
||||
|
||||
def __add__(self, other):
|
||||
return StackResult(self.file, self.function,
|
||||
self.frame + other.frame,
|
||||
max(self.limit, other.limit),
|
||||
self.children | other.children)
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def collect(ci_paths, *,
|
||||
sources=None,
|
||||
everything=False,
|
||||
**args):
|
||||
# parse the vcg format
|
||||
k_pattern = re.compile('([a-z]+)\s*:', re.DOTALL)
|
||||
v_pattern = re.compile('(?:"(.*?)"|([a-z]+))', re.DOTALL)
|
||||
def parse_vcg(rest):
|
||||
def parse_vcg(rest):
|
||||
node = []
|
||||
while True:
|
||||
rest = rest.lstrip()
|
||||
m_ = k_pattern.match(rest)
|
||||
if not m_:
|
||||
return (node, rest)
|
||||
k, rest = m_.group(1), rest[m_.end(0):]
|
||||
|
||||
rest = rest.lstrip()
|
||||
if rest.startswith('{'):
|
||||
v, rest = parse_vcg(rest[1:])
|
||||
assert rest[0] == '}', "unexpected %r" % rest[0:1]
|
||||
rest = rest[1:]
|
||||
node.append((k, v))
|
||||
else:
|
||||
m_ = v_pattern.match(rest)
|
||||
assert m_, "unexpected %r" % rest[0:1]
|
||||
v, rest = m_.group(1) or m_.group(2), rest[m_.end(0):]
|
||||
node.append((k, v))
|
||||
|
||||
node, rest = parse_vcg(rest)
|
||||
assert rest == '', "unexpected %r" % rest[0:1]
|
||||
return node
|
||||
|
||||
# collect into functions
|
||||
callgraph = co.defaultdict(lambda: (None, None, 0, set()))
|
||||
f_pattern = re.compile(
|
||||
r'([^\\]*)\\n([^:]*)[^\\]*\\n([0-9]+) bytes \((.*)\)')
|
||||
for path in ci_paths:
|
||||
with open(path) as f:
|
||||
vcg = parse_vcg(f.read())
|
||||
for k, graph in vcg:
|
||||
if k != 'graph':
|
||||
continue
|
||||
for k, info in graph:
|
||||
if k == 'node':
|
||||
info = dict(info)
|
||||
m_ = f_pattern.match(info['label'])
|
||||
if m_:
|
||||
function, file, size, type = m_.groups()
|
||||
if (not args.get('quiet')
|
||||
and 'static' not in type
|
||||
and 'bounded' not in type):
|
||||
print("warning: "
|
||||
"found non-static stack for %s (%s, %s)" % (
|
||||
function, type, size))
|
||||
_, _, _, targets = callgraph[info['title']]
|
||||
callgraph[info['title']] = (
|
||||
file, function, int(size), targets)
|
||||
elif k == 'edge':
|
||||
info = dict(info)
|
||||
_, _, _, targets = callgraph[info['sourcename']]
|
||||
targets.add(info['targetname'])
|
||||
else:
|
||||
continue
|
||||
|
||||
callgraph_ = co.defaultdict(lambda: (None, None, 0, set()))
|
||||
for source, (s_file, s_function, frame, targets) in callgraph.items():
|
||||
# discard internal functions
|
||||
if not everything and s_function.startswith('__'):
|
||||
continue
|
||||
# ignore filtered sources
|
||||
if sources is not None:
|
||||
if not any(
|
||||
os.path.abspath(s_file) == os.path.abspath(s)
|
||||
for s in sources):
|
||||
continue
|
||||
else:
|
||||
# default to only cwd
|
||||
if not everything and not os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(s_file)]) == os.getcwd():
|
||||
continue
|
||||
|
||||
# smiplify path
|
||||
if os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(s_file)]) == os.getcwd():
|
||||
s_file = os.path.relpath(s_file)
|
||||
else:
|
||||
s_file = os.path.abspath(s_file)
|
||||
|
||||
callgraph_[source] = (s_file, s_function, frame, targets)
|
||||
callgraph = callgraph_
|
||||
|
||||
if not everything:
|
||||
callgraph_ = co.defaultdict(lambda: (None, None, 0, set()))
|
||||
for source, (s_file, s_function, frame, targets) in callgraph.items():
|
||||
# discard filtered sources
|
||||
if sources is not None and not any(
|
||||
os.path.abspath(s_file) == os.path.abspath(s)
|
||||
for s in sources):
|
||||
continue
|
||||
# discard internal functions
|
||||
if s_function.startswith('__'):
|
||||
continue
|
||||
callgraph_[source] = (s_file, s_function, frame, targets)
|
||||
callgraph = callgraph_
|
||||
|
||||
# find maximum stack size recursively, this requires also detecting cycles
|
||||
# (in case of recursion)
|
||||
def find_limit(source, seen=None):
|
||||
seen = seen or set()
|
||||
if source not in callgraph:
|
||||
return 0
|
||||
_, _, frame, targets = callgraph[source]
|
||||
|
||||
limit = 0
|
||||
for target in targets:
|
||||
if target in seen:
|
||||
# found a cycle
|
||||
return m.inf
|
||||
limit_ = find_limit(target, seen | {target})
|
||||
limit = max(limit, limit_)
|
||||
|
||||
return frame + limit
|
||||
|
||||
def find_children(targets):
|
||||
children = set()
|
||||
for target in targets:
|
||||
if target in callgraph:
|
||||
t_file, t_function, _, _ = callgraph[target]
|
||||
children.add((t_file, t_function))
|
||||
return children
|
||||
|
||||
# build results
|
||||
results = []
|
||||
for source, (s_file, s_function, frame, targets) in callgraph.items():
|
||||
limit = find_limit(source)
|
||||
children = find_children(targets)
|
||||
results.append(StackResult(s_file, s_function, frame, limit, children))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def fold(Result, results, *,
|
||||
by=None,
|
||||
defines=None,
|
||||
**_):
|
||||
if by is None:
|
||||
by = Result._by
|
||||
|
||||
for k in it.chain(by or [], (k for k, _ in defines or [])):
|
||||
if k not in Result._by and k not in Result._fields:
|
||||
print("error: could not find field %r?" % k)
|
||||
sys.exit(-1)
|
||||
|
||||
# filter by matching defines
|
||||
if defines is not None:
|
||||
results_ = []
|
||||
for r in results:
|
||||
if all(getattr(r, k) in vs for k, vs in defines):
|
||||
results_.append(r)
|
||||
results = results_
|
||||
|
||||
# organize results into conflicts
|
||||
folding = co.OrderedDict()
|
||||
for r in results:
|
||||
name = tuple(getattr(r, k) for k in by)
|
||||
if name not in folding:
|
||||
folding[name] = []
|
||||
folding[name].append(r)
|
||||
|
||||
# merge conflicts
|
||||
folded = []
|
||||
for name, rs in folding.items():
|
||||
folded.append(sum(rs[1:], start=rs[0]))
|
||||
|
||||
return folded
|
||||
|
||||
def table(Result, results, diff_results=None, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
sort=None,
|
||||
summary=False,
|
||||
all=False,
|
||||
percent=False,
|
||||
tree=False,
|
||||
depth=1,
|
||||
**_):
|
||||
all_, all = all, __builtins__.all
|
||||
|
||||
if by is None:
|
||||
by = Result._by
|
||||
if fields is None:
|
||||
fields = Result._fields
|
||||
types = Result._types
|
||||
|
||||
# fold again
|
||||
results = fold(Result, results, by=by)
|
||||
if diff_results is not None:
|
||||
diff_results = fold(Result, diff_results, by=by)
|
||||
|
||||
# organize by name
|
||||
table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in results}
|
||||
diff_table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in diff_results or []}
|
||||
names = list(table.keys() | diff_table.keys())
|
||||
|
||||
# sort again, now with diff info, note that python's sort is stable
|
||||
names.sort()
|
||||
if diff_results is not None:
|
||||
names.sort(key=lambda n: tuple(
|
||||
types[k].ratio(
|
||||
getattr(table.get(n), k, None),
|
||||
getattr(diff_table.get(n), k, None))
|
||||
for k in fields),
|
||||
reverse=True)
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
names.sort(
|
||||
key=lambda n: tuple(
|
||||
(getattr(table[n], k),)
|
||||
if getattr(table.get(n), k, None) is not None else ()
|
||||
for k in ([k] if k else [
|
||||
k for k in Result._sort if k in fields])),
|
||||
reverse=reverse ^ (not k or k in Result._fields))
|
||||
|
||||
|
||||
# build up our lines
|
||||
lines = []
|
||||
|
||||
# header
|
||||
header = []
|
||||
header.append('%s%s' % (
|
||||
','.join(by),
|
||||
' (%d added, %d removed)' % (
|
||||
sum(1 for n in table if n not in diff_table),
|
||||
sum(1 for n in diff_table if n not in table))
|
||||
if diff_results is not None and not percent else '')
|
||||
if not summary else '')
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
else:
|
||||
for k in fields:
|
||||
header.append('o'+k)
|
||||
for k in fields:
|
||||
header.append('n'+k)
|
||||
for k in fields:
|
||||
header.append('d'+k)
|
||||
header.append('')
|
||||
lines.append(header)
|
||||
|
||||
def table_entry(name, r, diff_r=None, ratios=[]):
|
||||
entry = []
|
||||
entry.append(name)
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].none)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
else:
|
||||
for k in fields:
|
||||
entry.append(getattr(diff_r, k).diff_table()
|
||||
if getattr(diff_r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(types[k].diff_diff(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None)))
|
||||
if diff_results is None:
|
||||
entry.append('')
|
||||
elif percent:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios))
|
||||
else:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios
|
||||
if t)
|
||||
if any(ratios) else '')
|
||||
return entry
|
||||
|
||||
# entries
|
||||
if not summary:
|
||||
for name in names:
|
||||
r = table.get(name)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = diff_table.get(name)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
if not all_ and not any(ratios):
|
||||
continue
|
||||
lines.append(table_entry(name, r, diff_r, ratios))
|
||||
|
||||
# total
|
||||
r = next(iter(fold(Result, results, by=[])), None)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = next(iter(fold(Result, diff_results, by=[])), None)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
lines.append(table_entry('TOTAL', r, diff_r, ratios))
|
||||
|
||||
# find the best widths, note that column 0 contains the names and column -1
|
||||
# the ratios, so those are handled a bit differently
|
||||
widths = [
|
||||
((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
|
||||
for w, i in zip(
|
||||
it.chain([23], it.repeat(7)),
|
||||
range(len(lines[0])-1))]
|
||||
|
||||
# adjust the name width based on the expected call depth, though
|
||||
# note this doesn't really work with unbounded recursion
|
||||
if not summary and not m.isinf(depth):
|
||||
widths[0] += 4*(depth-1)
|
||||
|
||||
# print the tree recursively
|
||||
if not tree:
|
||||
print('%-*s %s%s' % (
|
||||
widths[0], lines[0][0],
|
||||
' '.join('%*s' % (w, x)
|
||||
for w, x in zip(widths[1:], lines[0][1:-1])),
|
||||
lines[0][-1]))
|
||||
|
||||
if not summary:
|
||||
line_table = {n: l for n, l in zip(names, lines[1:-1])}
|
||||
|
||||
def recurse(names_, depth_, prefixes=('', '', '', '')):
|
||||
for i, name in enumerate(names_):
|
||||
if name not in line_table:
|
||||
continue
|
||||
line = line_table[name]
|
||||
is_last = (i == len(names_)-1)
|
||||
|
||||
print('%s%-*s ' % (
|
||||
prefixes[0+is_last],
|
||||
widths[0] - (
|
||||
len(prefixes[0+is_last])
|
||||
if not m.isinf(depth) else 0),
|
||||
line[0]),
|
||||
end='')
|
||||
if not tree:
|
||||
print(' %s%s' % (
|
||||
' '.join('%*s' % (w, x)
|
||||
for w, x in zip(widths[1:], line[1:-1])),
|
||||
line[-1]),
|
||||
end='')
|
||||
print()
|
||||
|
||||
# recurse?
|
||||
if name in table and depth_ > 1:
|
||||
children = {
|
||||
','.join(str(getattr(Result(*c), k) or '') for k in by)
|
||||
for c in table[name].children}
|
||||
recurse(
|
||||
# note we're maintaining sort order
|
||||
[n for n in names if n in children],
|
||||
depth_-1,
|
||||
(prefixes[2+is_last] + "|-> ",
|
||||
prefixes[2+is_last] + "'-> ",
|
||||
prefixes[2+is_last] + "| ",
|
||||
prefixes[2+is_last] + " "))
|
||||
|
||||
recurse(names, depth)
|
||||
|
||||
if not tree:
|
||||
print('%-*s %s%s' % (
|
||||
widths[0], lines[-1][0],
|
||||
' '.join('%*s' % (w, x)
|
||||
for w, x in zip(widths[1:], lines[-1][1:-1])),
|
||||
lines[-1][-1]))
|
||||
|
||||
|
||||
def main(ci_paths,
|
||||
by=None,
|
||||
fields=None,
|
||||
defines=None,
|
||||
sort=None,
|
||||
**args):
|
||||
# it doesn't really make sense to not have a depth with tree,
|
||||
# so assume depth=inf if tree by default
|
||||
if args.get('depth') is None:
|
||||
args['depth'] = m.inf if args['tree'] else 1
|
||||
elif args.get('depth') == 0:
|
||||
args['depth'] = m.inf
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
results = collect(ci_paths, **args)
|
||||
else:
|
||||
results = []
|
||||
with openio(args['use']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('stack_'+k in r and r['stack_'+k].strip()
|
||||
for k in StackResult._fields):
|
||||
continue
|
||||
try:
|
||||
results.append(StackResult(
|
||||
**{k: r[k] for k in StackResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['stack_'+k] for k in StackResult._fields
|
||||
if 'stack_'+k in r and r['stack_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
results = fold(StackResult, results, by=by, defines=defines)
|
||||
|
||||
# sort, note that python's sort is stable
|
||||
results.sort()
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
results.sort(
|
||||
key=lambda r: tuple(
|
||||
(getattr(r, k),) if getattr(r, k) is not None else ()
|
||||
for k in ([k] if k else StackResult._sort)),
|
||||
reverse=reverse ^ (not k or k in StackResult._fields))
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
with openio(args['output'], 'w') as f:
|
||||
writer = csv.DictWriter(f,
|
||||
(by if by is not None else StackResult._by)
|
||||
+ ['stack_'+k for k in (
|
||||
fields if fields is not None else StackResult._fields)])
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
writer.writerow(
|
||||
{k: getattr(r, k) for k in (
|
||||
by if by is not None else StackResult._by)}
|
||||
| {'stack_'+k: getattr(r, k) for k in (
|
||||
fields if fields is not None else StackResult._fields)})
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
diff_results = []
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('stack_'+k in r and r['stack_'+k].strip()
|
||||
for k in StackResult._fields):
|
||||
continue
|
||||
try:
|
||||
diff_results.append(StackResult(
|
||||
**{k: r[k] for k in StackResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['stack_'+k] for k in StackResult._fields
|
||||
if 'stack_'+k in r and r['stack_'+k].strip()}))
|
||||
except TypeError:
|
||||
raise
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
diff_results = fold(StackResult, diff_results, by=by, defines=defines)
|
||||
|
||||
# print table
|
||||
if not args.get('quiet'):
|
||||
table(StackResult, results,
|
||||
diff_results if args.get('diff') else None,
|
||||
by=by if by is not None else ['function'],
|
||||
fields=fields,
|
||||
sort=sort,
|
||||
**args)
|
||||
|
||||
# error on recursion
|
||||
if args.get('error_on_recursion') and any(
|
||||
m.isinf(float(r.limit)) for r in results):
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find stack usage at the function level.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'ci_paths',
|
||||
nargs='*',
|
||||
help="Input *.ci files.")
|
||||
parser.add_argument(
|
||||
'-v', '--verbose',
|
||||
action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument(
|
||||
'-q', '--quiet',
|
||||
action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument(
|
||||
'-u', '--use',
|
||||
help="Don't parse anything, use this CSV file.")
|
||||
parser.add_argument(
|
||||
'-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument(
|
||||
'-a', '--all',
|
||||
action='store_true',
|
||||
help="Show all, not just the ones that changed.")
|
||||
parser.add_argument(
|
||||
'-p', '--percent',
|
||||
action='store_true',
|
||||
help="Only show percentage change, not a full diff.")
|
||||
parser.add_argument(
|
||||
'-b', '--by',
|
||||
action='append',
|
||||
choices=StackResult._by,
|
||||
help="Group by this field.")
|
||||
parser.add_argument(
|
||||
'-f', '--field',
|
||||
dest='fields',
|
||||
action='append',
|
||||
choices=StackResult._fields,
|
||||
help="Show this field.")
|
||||
parser.add_argument(
|
||||
'-D', '--define',
|
||||
dest='defines',
|
||||
action='append',
|
||||
type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
|
||||
help="Only include results where this field is this value.")
|
||||
class AppendSort(argparse.Action):
|
||||
def __call__(self, parser, namespace, value, option):
|
||||
if namespace.sort is None:
|
||||
namespace.sort = []
|
||||
namespace.sort.append((value, True if option == '-S' else False))
|
||||
parser.add_argument(
|
||||
'-s', '--sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field.")
|
||||
parser.add_argument(
|
||||
'-S', '--reverse-sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field, but backwards.")
|
||||
parser.add_argument(
|
||||
'-Y', '--summary',
|
||||
action='store_true',
|
||||
help="Only show the total.")
|
||||
parser.add_argument(
|
||||
'-F', '--source',
|
||||
dest='sources',
|
||||
action='append',
|
||||
help="Only consider definitions in this file. Defaults to anything "
|
||||
"in the current directory.")
|
||||
parser.add_argument(
|
||||
'--everything',
|
||||
action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument(
|
||||
'--tree',
|
||||
action='store_true',
|
||||
help="Only show the function call tree.")
|
||||
parser.add_argument(
|
||||
'-Z', '--depth',
|
||||
nargs='?',
|
||||
type=lambda x: int(x, 0),
|
||||
const=0,
|
||||
help="Depth of function calls to show. 0 shows all calls but may not "
|
||||
"terminate!")
|
||||
parser.add_argument(
|
||||
'-e', '--error-on-recursion',
|
||||
action='store_true',
|
||||
help="Error if any functions are recursive.")
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
||||
652
components/joltwallet__littlefs/src/littlefs/scripts/structs.py
Normal file
652
components/joltwallet__littlefs/src/littlefs/scripts/structs.py
Normal file
|
|
@ -0,0 +1,652 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find struct sizes.
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/structs.py lfs.o lfs_util.o -Ssize
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import csv
|
||||
import difflib
|
||||
import itertools as it
|
||||
import math as m
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import subprocess as sp
|
||||
|
||||
|
||||
OBJDUMP_PATH = ['objdump']
|
||||
|
||||
|
||||
|
||||
# integer fields
|
||||
class Int(co.namedtuple('Int', 'x')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, x=0):
|
||||
if isinstance(x, Int):
|
||||
return x
|
||||
if isinstance(x, str):
|
||||
try:
|
||||
x = int(x, 0)
|
||||
except ValueError:
|
||||
# also accept +-∞ and +-inf
|
||||
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
|
||||
x = m.inf
|
||||
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
|
||||
x = -m.inf
|
||||
else:
|
||||
raise
|
||||
assert isinstance(x, int) or m.isinf(x), x
|
||||
return super().__new__(cls, x)
|
||||
|
||||
def __str__(self):
|
||||
if self.x == m.inf:
|
||||
return '∞'
|
||||
elif self.x == -m.inf:
|
||||
return '-∞'
|
||||
else:
|
||||
return str(self.x)
|
||||
|
||||
def __int__(self):
|
||||
assert not m.isinf(self.x)
|
||||
return self.x
|
||||
|
||||
def __float__(self):
|
||||
return float(self.x)
|
||||
|
||||
none = '%7s' % '-'
|
||||
def table(self):
|
||||
return '%7s' % (self,)
|
||||
|
||||
diff_none = '%7s' % '-'
|
||||
diff_table = table
|
||||
|
||||
def diff_diff(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
diff = new - old
|
||||
if diff == +m.inf:
|
||||
return '%7s' % '+∞'
|
||||
elif diff == -m.inf:
|
||||
return '%7s' % '-∞'
|
||||
else:
|
||||
return '%+7d' % diff
|
||||
|
||||
def ratio(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
if m.isinf(new) and m.isinf(old):
|
||||
return 0.0
|
||||
elif m.isinf(new):
|
||||
return +m.inf
|
||||
elif m.isinf(old):
|
||||
return -m.inf
|
||||
elif not old and not new:
|
||||
return 0.0
|
||||
elif not old:
|
||||
return 1.0
|
||||
else:
|
||||
return (new-old) / old
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.x + other.x)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(self.x - other.x)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.__class__(self.x * other.x)
|
||||
|
||||
# struct size results
|
||||
class StructResult(co.namedtuple('StructResult', ['file', 'struct', 'size'])):
|
||||
_by = ['file', 'struct']
|
||||
_fields = ['size']
|
||||
_sort = ['size']
|
||||
_types = {'size': Int}
|
||||
|
||||
__slots__ = ()
|
||||
def __new__(cls, file='', struct='', size=0):
|
||||
return super().__new__(cls, file, struct,
|
||||
Int(size))
|
||||
|
||||
def __add__(self, other):
|
||||
return StructResult(self.file, self.struct,
|
||||
self.size + other.size)
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def collect(obj_paths, *,
|
||||
objdump_path=OBJDUMP_PATH,
|
||||
sources=None,
|
||||
everything=False,
|
||||
internal=False,
|
||||
**args):
|
||||
line_pattern = re.compile(
|
||||
'^\s+(?P<no>[0-9]+)'
|
||||
'(?:\s+(?P<dir>[0-9]+))?'
|
||||
'\s+.*'
|
||||
'\s+(?P<path>[^\s]+)$')
|
||||
info_pattern = re.compile(
|
||||
'^(?:.*(?P<tag>DW_TAG_[a-z_]+).*'
|
||||
'|.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
|
||||
'|.*DW_AT_decl_file.*:\s*(?P<file>[0-9]+)\s*'
|
||||
'|.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$')
|
||||
|
||||
results = []
|
||||
for path in obj_paths:
|
||||
# find files, we want to filter by structs in .h files
|
||||
dirs = {}
|
||||
files = {}
|
||||
# note objdump-path may contain extra args
|
||||
cmd = objdump_path + ['--dwarf=rawline', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
for line in proc.stdout:
|
||||
# note that files contain references to dirs, which we
|
||||
# dereference as soon as we see them as each file table follows a
|
||||
# dir table
|
||||
m = line_pattern.match(line)
|
||||
if m:
|
||||
if not m.group('dir'):
|
||||
# found a directory entry
|
||||
dirs[int(m.group('no'))] = m.group('path')
|
||||
else:
|
||||
# found a file entry
|
||||
dir = int(m.group('dir'))
|
||||
if dir in dirs:
|
||||
files[int(m.group('no'))] = os.path.join(
|
||||
dirs[dir],
|
||||
m.group('path'))
|
||||
else:
|
||||
files[int(m.group('no'))] = m.group('path')
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
# collect structs as we parse dwarf info
|
||||
results_ = []
|
||||
is_struct = False
|
||||
s_name = None
|
||||
s_file = None
|
||||
s_size = None
|
||||
# note objdump-path may contain extra args
|
||||
cmd = objdump_path + ['--dwarf=info', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace',
|
||||
close_fds=False)
|
||||
for line in proc.stdout:
|
||||
# state machine here to find structs
|
||||
m = info_pattern.match(line)
|
||||
if m:
|
||||
if m.group('tag'):
|
||||
if is_struct:
|
||||
file = files.get(s_file, '?')
|
||||
results_.append(StructResult(file, s_name, s_size))
|
||||
is_struct = (m.group('tag') == 'DW_TAG_structure_type')
|
||||
elif m.group('name'):
|
||||
s_name = m.group('name')
|
||||
elif m.group('file'):
|
||||
s_file = int(m.group('file'))
|
||||
elif m.group('size'):
|
||||
s_size = int(m.group('size'))
|
||||
if is_struct:
|
||||
file = files.get(s_file, '?')
|
||||
results_.append(StructResult(file, s_name, s_size))
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
for r in results_:
|
||||
# ignore filtered sources
|
||||
if sources is not None:
|
||||
if not any(
|
||||
os.path.abspath(r.file) == os.path.abspath(s)
|
||||
for s in sources):
|
||||
continue
|
||||
else:
|
||||
# default to only cwd
|
||||
if not everything and not os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(r.file)]) == os.getcwd():
|
||||
continue
|
||||
|
||||
# limit to .h files unless --internal
|
||||
if not internal and not r.file.endswith('.h'):
|
||||
continue
|
||||
|
||||
# simplify path
|
||||
if os.path.commonpath([
|
||||
os.getcwd(),
|
||||
os.path.abspath(r.file)]) == os.getcwd():
|
||||
file = os.path.relpath(r.file)
|
||||
else:
|
||||
file = os.path.abspath(r.file)
|
||||
|
||||
results.append(r._replace(file=file))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def fold(Result, results, *,
|
||||
by=None,
|
||||
defines=None,
|
||||
**_):
|
||||
if by is None:
|
||||
by = Result._by
|
||||
|
||||
for k in it.chain(by or [], (k for k, _ in defines or [])):
|
||||
if k not in Result._by and k not in Result._fields:
|
||||
print("error: could not find field %r?" % k)
|
||||
sys.exit(-1)
|
||||
|
||||
# filter by matching defines
|
||||
if defines is not None:
|
||||
results_ = []
|
||||
for r in results:
|
||||
if all(getattr(r, k) in vs for k, vs in defines):
|
||||
results_.append(r)
|
||||
results = results_
|
||||
|
||||
# organize results into conflicts
|
||||
folding = co.OrderedDict()
|
||||
for r in results:
|
||||
name = tuple(getattr(r, k) for k in by)
|
||||
if name not in folding:
|
||||
folding[name] = []
|
||||
folding[name].append(r)
|
||||
|
||||
# merge conflicts
|
||||
folded = []
|
||||
for name, rs in folding.items():
|
||||
folded.append(sum(rs[1:], start=rs[0]))
|
||||
|
||||
return folded
|
||||
|
||||
def table(Result, results, diff_results=None, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
sort=None,
|
||||
summary=False,
|
||||
all=False,
|
||||
percent=False,
|
||||
**_):
|
||||
all_, all = all, __builtins__.all
|
||||
|
||||
if by is None:
|
||||
by = Result._by
|
||||
if fields is None:
|
||||
fields = Result._fields
|
||||
types = Result._types
|
||||
|
||||
# fold again
|
||||
results = fold(Result, results, by=by)
|
||||
if diff_results is not None:
|
||||
diff_results = fold(Result, diff_results, by=by)
|
||||
|
||||
# organize by name
|
||||
table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in results}
|
||||
diff_table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in diff_results or []}
|
||||
names = list(table.keys() | diff_table.keys())
|
||||
|
||||
# sort again, now with diff info, note that python's sort is stable
|
||||
names.sort()
|
||||
if diff_results is not None:
|
||||
names.sort(key=lambda n: tuple(
|
||||
types[k].ratio(
|
||||
getattr(table.get(n), k, None),
|
||||
getattr(diff_table.get(n), k, None))
|
||||
for k in fields),
|
||||
reverse=True)
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
names.sort(
|
||||
key=lambda n: tuple(
|
||||
(getattr(table[n], k),)
|
||||
if getattr(table.get(n), k, None) is not None else ()
|
||||
for k in ([k] if k else [
|
||||
k for k in Result._sort if k in fields])),
|
||||
reverse=reverse ^ (not k or k in Result._fields))
|
||||
|
||||
|
||||
# build up our lines
|
||||
lines = []
|
||||
|
||||
# header
|
||||
header = []
|
||||
header.append('%s%s' % (
|
||||
','.join(by),
|
||||
' (%d added, %d removed)' % (
|
||||
sum(1 for n in table if n not in diff_table),
|
||||
sum(1 for n in diff_table if n not in table))
|
||||
if diff_results is not None and not percent else '')
|
||||
if not summary else '')
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
else:
|
||||
for k in fields:
|
||||
header.append('o'+k)
|
||||
for k in fields:
|
||||
header.append('n'+k)
|
||||
for k in fields:
|
||||
header.append('d'+k)
|
||||
header.append('')
|
||||
lines.append(header)
|
||||
|
||||
def table_entry(name, r, diff_r=None, ratios=[]):
|
||||
entry = []
|
||||
entry.append(name)
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].none)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
else:
|
||||
for k in fields:
|
||||
entry.append(getattr(diff_r, k).diff_table()
|
||||
if getattr(diff_r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(types[k].diff_diff(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None)))
|
||||
if diff_results is None:
|
||||
entry.append('')
|
||||
elif percent:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios))
|
||||
else:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios
|
||||
if t)
|
||||
if any(ratios) else '')
|
||||
return entry
|
||||
|
||||
# entries
|
||||
if not summary:
|
||||
for name in names:
|
||||
r = table.get(name)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = diff_table.get(name)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
if not all_ and not any(ratios):
|
||||
continue
|
||||
lines.append(table_entry(name, r, diff_r, ratios))
|
||||
|
||||
# total
|
||||
r = next(iter(fold(Result, results, by=[])), None)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = next(iter(fold(Result, diff_results, by=[])), None)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
lines.append(table_entry('TOTAL', r, diff_r, ratios))
|
||||
|
||||
# find the best widths, note that column 0 contains the names and column -1
|
||||
# the ratios, so those are handled a bit differently
|
||||
widths = [
|
||||
((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
|
||||
for w, i in zip(
|
||||
it.chain([23], it.repeat(7)),
|
||||
range(len(lines[0])-1))]
|
||||
|
||||
# print our table
|
||||
for line in lines:
|
||||
print('%-*s %s%s' % (
|
||||
widths[0], line[0],
|
||||
' '.join('%*s' % (w, x)
|
||||
for w, x in zip(widths[1:], line[1:-1])),
|
||||
line[-1]))
|
||||
|
||||
|
||||
def main(obj_paths, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
defines=None,
|
||||
sort=None,
|
||||
**args):
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
results = collect(obj_paths, **args)
|
||||
else:
|
||||
results = []
|
||||
with openio(args['use']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('struct_'+k in r and r['struct_'+k].strip()
|
||||
for k in StructResult._fields):
|
||||
continue
|
||||
try:
|
||||
results.append(StructResult(
|
||||
**{k: r[k] for k in StructResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['struct_'+k]
|
||||
for k in StructResult._fields
|
||||
if 'struct_'+k in r
|
||||
and r['struct_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
results = fold(StructResult, results, by=by, defines=defines)
|
||||
|
||||
# sort, note that python's sort is stable
|
||||
results.sort()
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
results.sort(
|
||||
key=lambda r: tuple(
|
||||
(getattr(r, k),) if getattr(r, k) is not None else ()
|
||||
for k in ([k] if k else StructResult._sort)),
|
||||
reverse=reverse ^ (not k or k in StructResult._fields))
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
with openio(args['output'], 'w') as f:
|
||||
writer = csv.DictWriter(f,
|
||||
(by if by is not None else StructResult._by)
|
||||
+ ['struct_'+k for k in (
|
||||
fields if fields is not None else StructResult._fields)])
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
writer.writerow(
|
||||
{k: getattr(r, k) for k in (
|
||||
by if by is not None else StructResult._by)}
|
||||
| {'struct_'+k: getattr(r, k) for k in (
|
||||
fields if fields is not None else StructResult._fields)})
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
diff_results = []
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
if not any('struct_'+k in r and r['struct_'+k].strip()
|
||||
for k in StructResult._fields):
|
||||
continue
|
||||
try:
|
||||
diff_results.append(StructResult(
|
||||
**{k: r[k] for k in StructResult._by
|
||||
if k in r and r[k].strip()},
|
||||
**{k: r['struct_'+k]
|
||||
for k in StructResult._fields
|
||||
if 'struct_'+k in r
|
||||
and r['struct_'+k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
diff_results = fold(StructResult, diff_results, by=by, defines=defines)
|
||||
|
||||
# print table
|
||||
if not args.get('quiet'):
|
||||
table(StructResult, results,
|
||||
diff_results if args.get('diff') else None,
|
||||
by=by if by is not None else ['struct'],
|
||||
fields=fields,
|
||||
sort=sort,
|
||||
**args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find struct sizes.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'obj_paths',
|
||||
nargs='*',
|
||||
help="Input *.o files.")
|
||||
parser.add_argument(
|
||||
'-v', '--verbose',
|
||||
action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument(
|
||||
'-q', '--quiet',
|
||||
action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument(
|
||||
'-u', '--use',
|
||||
help="Don't parse anything, use this CSV file.")
|
||||
parser.add_argument(
|
||||
'-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument(
|
||||
'-a', '--all',
|
||||
action='store_true',
|
||||
help="Show all, not just the ones that changed.")
|
||||
parser.add_argument(
|
||||
'-p', '--percent',
|
||||
action='store_true',
|
||||
help="Only show percentage change, not a full diff.")
|
||||
parser.add_argument(
|
||||
'-b', '--by',
|
||||
action='append',
|
||||
choices=StructResult._by,
|
||||
help="Group by this field.")
|
||||
parser.add_argument(
|
||||
'-f', '--field',
|
||||
dest='fields',
|
||||
action='append',
|
||||
choices=StructResult._fields,
|
||||
help="Show this field.")
|
||||
parser.add_argument(
|
||||
'-D', '--define',
|
||||
dest='defines',
|
||||
action='append',
|
||||
type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
|
||||
help="Only include results where this field is this value.")
|
||||
class AppendSort(argparse.Action):
|
||||
def __call__(self, parser, namespace, value, option):
|
||||
if namespace.sort is None:
|
||||
namespace.sort = []
|
||||
namespace.sort.append((value, True if option == '-S' else False))
|
||||
parser.add_argument(
|
||||
'-s', '--sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field.")
|
||||
parser.add_argument(
|
||||
'-S', '--reverse-sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field, but backwards.")
|
||||
parser.add_argument(
|
||||
'-Y', '--summary',
|
||||
action='store_true',
|
||||
help="Only show the total.")
|
||||
parser.add_argument(
|
||||
'-F', '--source',
|
||||
dest='sources',
|
||||
action='append',
|
||||
help="Only consider definitions in this file. Defaults to anything "
|
||||
"in the current directory.")
|
||||
parser.add_argument(
|
||||
'--everything',
|
||||
action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument(
|
||||
'--internal',
|
||||
action='store_true',
|
||||
help="Also show structs in .c files.")
|
||||
parser.add_argument(
|
||||
'--objdump-path',
|
||||
type=lambda x: x.split(),
|
||||
default=OBJDUMP_PATH,
|
||||
help="Path to the objdump executable, may include flags. "
|
||||
"Defaults to %r." % OBJDUMP_PATH)
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
||||
829
components/joltwallet__littlefs/src/littlefs/scripts/summary.py
Normal file
829
components/joltwallet__littlefs/src/littlefs/scripts/summary.py
Normal file
|
|
@ -0,0 +1,829 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to summarize the outputs of other scripts. Operates on CSV files.
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/code.py lfs.o lfs_util.o -q -o lfs.code.csv
|
||||
# ./scripts/data.py lfs.o lfs_util.o -q -o lfs.data.csv
|
||||
# ./scripts/summary.py lfs.code.csv lfs.data.csv -q -o lfs.csv
|
||||
# ./scripts/summary.py -Y lfs.csv -f code=code_size,data=data_size
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import csv
|
||||
import functools as ft
|
||||
import itertools as it
|
||||
import math as m
|
||||
import os
|
||||
import re
|
||||
|
||||
|
||||
# supported merge operations
|
||||
#
|
||||
# this is a terrible way to express these
|
||||
#
|
||||
OPS = {
|
||||
'sum': lambda xs: sum(xs[1:], start=xs[0]),
|
||||
'prod': lambda xs: m.prod(xs[1:], start=xs[0]),
|
||||
'min': min,
|
||||
'max': max,
|
||||
'mean': lambda xs: Float(sum(float(x) for x in xs) / len(xs)),
|
||||
'stddev': lambda xs: (
|
||||
lambda mean: Float(
|
||||
m.sqrt(sum((float(x) - mean)**2 for x in xs) / len(xs)))
|
||||
)(sum(float(x) for x in xs) / len(xs)),
|
||||
'gmean': lambda xs: Float(m.prod(float(x) for x in xs)**(1/len(xs))),
|
||||
'gstddev': lambda xs: (
|
||||
lambda gmean: Float(
|
||||
m.exp(m.sqrt(sum(m.log(float(x)/gmean)**2 for x in xs) / len(xs)))
|
||||
if gmean else m.inf)
|
||||
)(m.prod(float(x) for x in xs)**(1/len(xs))),
|
||||
}
|
||||
|
||||
|
||||
# integer fields
|
||||
class Int(co.namedtuple('Int', 'x')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, x=0):
|
||||
if isinstance(x, Int):
|
||||
return x
|
||||
if isinstance(x, str):
|
||||
try:
|
||||
x = int(x, 0)
|
||||
except ValueError:
|
||||
# also accept +-∞ and +-inf
|
||||
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
|
||||
x = m.inf
|
||||
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
|
||||
x = -m.inf
|
||||
else:
|
||||
raise
|
||||
assert isinstance(x, int) or m.isinf(x), x
|
||||
return super().__new__(cls, x)
|
||||
|
||||
def __str__(self):
|
||||
if self.x == m.inf:
|
||||
return '∞'
|
||||
elif self.x == -m.inf:
|
||||
return '-∞'
|
||||
else:
|
||||
return str(self.x)
|
||||
|
||||
def __int__(self):
|
||||
assert not m.isinf(self.x)
|
||||
return self.x
|
||||
|
||||
def __float__(self):
|
||||
return float(self.x)
|
||||
|
||||
none = '%7s' % '-'
|
||||
def table(self):
|
||||
return '%7s' % (self,)
|
||||
|
||||
diff_none = '%7s' % '-'
|
||||
diff_table = table
|
||||
|
||||
def diff_diff(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
diff = new - old
|
||||
if diff == +m.inf:
|
||||
return '%7s' % '+∞'
|
||||
elif diff == -m.inf:
|
||||
return '%7s' % '-∞'
|
||||
else:
|
||||
return '%+7d' % diff
|
||||
|
||||
def ratio(self, other):
|
||||
new = self.x if self else 0
|
||||
old = other.x if other else 0
|
||||
if m.isinf(new) and m.isinf(old):
|
||||
return 0.0
|
||||
elif m.isinf(new):
|
||||
return +m.inf
|
||||
elif m.isinf(old):
|
||||
return -m.inf
|
||||
elif not old and not new:
|
||||
return 0.0
|
||||
elif not old:
|
||||
return 1.0
|
||||
else:
|
||||
return (new-old) / old
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.x + other.x)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(self.x - other.x)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.__class__(self.x * other.x)
|
||||
|
||||
# float fields
|
||||
class Float(co.namedtuple('Float', 'x')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, x=0.0):
|
||||
if isinstance(x, Float):
|
||||
return x
|
||||
if isinstance(x, str):
|
||||
try:
|
||||
x = float(x)
|
||||
except ValueError:
|
||||
# also accept +-∞ and +-inf
|
||||
if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
|
||||
x = m.inf
|
||||
elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
|
||||
x = -m.inf
|
||||
else:
|
||||
raise
|
||||
assert isinstance(x, float), x
|
||||
return super().__new__(cls, x)
|
||||
|
||||
def __str__(self):
|
||||
if self.x == m.inf:
|
||||
return '∞'
|
||||
elif self.x == -m.inf:
|
||||
return '-∞'
|
||||
else:
|
||||
return '%.1f' % self.x
|
||||
|
||||
def __float__(self):
|
||||
return float(self.x)
|
||||
|
||||
none = Int.none
|
||||
table = Int.table
|
||||
diff_none = Int.diff_none
|
||||
diff_table = Int.diff_table
|
||||
diff_diff = Int.diff_diff
|
||||
ratio = Int.ratio
|
||||
__add__ = Int.__add__
|
||||
__sub__ = Int.__sub__
|
||||
__mul__ = Int.__mul__
|
||||
|
||||
# fractional fields, a/b
|
||||
class Frac(co.namedtuple('Frac', 'a,b')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, a=0, b=None):
|
||||
if isinstance(a, Frac) and b is None:
|
||||
return a
|
||||
if isinstance(a, str) and b is None:
|
||||
a, b = a.split('/', 1)
|
||||
if b is None:
|
||||
b = a
|
||||
return super().__new__(cls, Int(a), Int(b))
|
||||
|
||||
def __str__(self):
|
||||
return '%s/%s' % (self.a, self.b)
|
||||
|
||||
def __float__(self):
|
||||
return float(self.a)
|
||||
|
||||
none = '%11s %7s' % ('-', '-')
|
||||
def table(self):
|
||||
t = self.a.x/self.b.x if self.b.x else 1.0
|
||||
return '%11s %7s' % (
|
||||
self,
|
||||
'∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%.1f%%' % (100*t))
|
||||
|
||||
diff_none = '%11s' % '-'
|
||||
def diff_table(self):
|
||||
return '%11s' % (self,)
|
||||
|
||||
def diff_diff(self, other):
|
||||
new_a, new_b = self if self else (Int(0), Int(0))
|
||||
old_a, old_b = other if other else (Int(0), Int(0))
|
||||
return '%11s' % ('%s/%s' % (
|
||||
new_a.diff_diff(old_a).strip(),
|
||||
new_b.diff_diff(old_b).strip()))
|
||||
|
||||
def ratio(self, other):
|
||||
new_a, new_b = self if self else (Int(0), Int(0))
|
||||
old_a, old_b = other if other else (Int(0), Int(0))
|
||||
new = new_a.x/new_b.x if new_b.x else 1.0
|
||||
old = old_a.x/old_b.x if old_b.x else 1.0
|
||||
return new - old
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.a + other.a, self.b + other.b)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(self.a - other.a, self.b - other.b)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self.__class__(self.a * other.a, self.b + other.b)
|
||||
|
||||
def __lt__(self, other):
|
||||
self_t = self.a.x/self.b.x if self.b.x else 1.0
|
||||
other_t = other.a.x/other.b.x if other.b.x else 1.0
|
||||
return (self_t, self.a.x) < (other_t, other.a.x)
|
||||
|
||||
def __gt__(self, other):
|
||||
return self.__class__.__lt__(other, self)
|
||||
|
||||
def __le__(self, other):
|
||||
return not self.__gt__(other)
|
||||
|
||||
def __ge__(self, other):
|
||||
return not self.__lt__(other)
|
||||
|
||||
# available types
|
||||
TYPES = co.OrderedDict([
|
||||
('int', Int),
|
||||
('float', Float),
|
||||
('frac', Frac)
|
||||
])
|
||||
|
||||
|
||||
def infer(results, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
types={},
|
||||
ops={},
|
||||
renames=[],
|
||||
**_):
|
||||
# if fields not specified, try to guess from data
|
||||
if fields is None:
|
||||
fields = co.OrderedDict()
|
||||
for r in results:
|
||||
for k, v in r.items():
|
||||
if (by is None or k not in by) and v.strip():
|
||||
types_ = []
|
||||
for t in fields.get(k, TYPES.values()):
|
||||
try:
|
||||
t(v)
|
||||
types_.append(t)
|
||||
except ValueError:
|
||||
pass
|
||||
fields[k] = types_
|
||||
fields = list(k for k, v in fields.items() if v)
|
||||
|
||||
# deduplicate fields
|
||||
fields = list(co.OrderedDict.fromkeys(fields).keys())
|
||||
|
||||
# if by not specified, guess it's anything not in fields and not a
|
||||
# source of a rename
|
||||
if by is None:
|
||||
by = co.OrderedDict()
|
||||
for r in results:
|
||||
# also ignore None keys, these are introduced by csv.DictReader
|
||||
# when header + row mismatch
|
||||
by.update((k, True) for k in r.keys()
|
||||
if k is not None
|
||||
and k not in fields
|
||||
and not any(k == old_k for _, old_k in renames))
|
||||
by = list(by.keys())
|
||||
|
||||
# deduplicate fields
|
||||
by = list(co.OrderedDict.fromkeys(by).keys())
|
||||
|
||||
# find best type for all fields
|
||||
types_ = {}
|
||||
for k in fields:
|
||||
if k in types:
|
||||
types_[k] = types[k]
|
||||
else:
|
||||
for t in TYPES.values():
|
||||
for r in results:
|
||||
if k in r and r[k].strip():
|
||||
try:
|
||||
t(r[k])
|
||||
except ValueError:
|
||||
break
|
||||
else:
|
||||
types_[k] = t
|
||||
break
|
||||
else:
|
||||
print("error: no type matches field %r?" % k)
|
||||
sys.exit(-1)
|
||||
types = types_
|
||||
|
||||
# does folding change the type?
|
||||
types_ = {}
|
||||
for k, t in types.items():
|
||||
types_[k] = ops.get(k, OPS['sum'])([t()]).__class__
|
||||
|
||||
|
||||
# create result class
|
||||
def __new__(cls, **r):
|
||||
return cls.__mro__[1].__new__(cls,
|
||||
**{k: r.get(k, '') for k in by},
|
||||
**{k: r[k] if k in r and isinstance(r[k], list)
|
||||
else [types[k](r[k])] if k in r
|
||||
else []
|
||||
for k in fields})
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(
|
||||
**{k: getattr(self, k) for k in by},
|
||||
**{k: object.__getattribute__(self, k)
|
||||
+ object.__getattribute__(other, k)
|
||||
for k in fields})
|
||||
|
||||
def __getattribute__(self, k):
|
||||
if k in fields:
|
||||
if object.__getattribute__(self, k):
|
||||
return ops.get(k, OPS['sum'])(object.__getattribute__(self, k))
|
||||
else:
|
||||
return None
|
||||
return object.__getattribute__(self, k)
|
||||
|
||||
return type('Result', (co.namedtuple('Result', by + fields),), {
|
||||
'__slots__': (),
|
||||
'__new__': __new__,
|
||||
'__add__': __add__,
|
||||
'__getattribute__': __getattribute__,
|
||||
'_by': by,
|
||||
'_fields': fields,
|
||||
'_sort': fields,
|
||||
'_types': types_,
|
||||
})
|
||||
|
||||
|
||||
def fold(Result, results, *,
|
||||
by=None,
|
||||
defines=None,
|
||||
**_):
|
||||
if by is None:
|
||||
by = Result._by
|
||||
|
||||
for k in it.chain(by or [], (k for k, _ in defines or [])):
|
||||
if k not in Result._by and k not in Result._fields:
|
||||
print("error: could not find field %r?" % k)
|
||||
sys.exit(-1)
|
||||
|
||||
# filter by matching defines
|
||||
if defines is not None:
|
||||
results_ = []
|
||||
for r in results:
|
||||
if all(getattr(r, k) in vs for k, vs in defines):
|
||||
results_.append(r)
|
||||
results = results_
|
||||
|
||||
# organize results into conflicts
|
||||
folding = co.OrderedDict()
|
||||
for r in results:
|
||||
name = tuple(getattr(r, k) for k in by)
|
||||
if name not in folding:
|
||||
folding[name] = []
|
||||
folding[name].append(r)
|
||||
|
||||
# merge conflicts
|
||||
folded = []
|
||||
for name, rs in folding.items():
|
||||
folded.append(sum(rs[1:], start=rs[0]))
|
||||
|
||||
return folded
|
||||
|
||||
def table(Result, results, diff_results=None, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
sort=None,
|
||||
summary=False,
|
||||
all=False,
|
||||
percent=False,
|
||||
**_):
|
||||
all_, all = all, __builtins__.all
|
||||
|
||||
if by is None:
|
||||
by = Result._by
|
||||
if fields is None:
|
||||
fields = Result._fields
|
||||
types = Result._types
|
||||
|
||||
# fold again
|
||||
results = fold(Result, results, by=by)
|
||||
if diff_results is not None:
|
||||
diff_results = fold(Result, diff_results, by=by)
|
||||
|
||||
# organize by name
|
||||
table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in results}
|
||||
diff_table = {
|
||||
','.join(str(getattr(r, k) or '') for k in by): r
|
||||
for r in diff_results or []}
|
||||
names = list(table.keys() | diff_table.keys())
|
||||
|
||||
# sort again, now with diff info, note that python's sort is stable
|
||||
names.sort()
|
||||
if diff_results is not None:
|
||||
names.sort(key=lambda n: tuple(
|
||||
types[k].ratio(
|
||||
getattr(table.get(n), k, None),
|
||||
getattr(diff_table.get(n), k, None))
|
||||
for k in fields),
|
||||
reverse=True)
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
names.sort(
|
||||
key=lambda n: tuple(
|
||||
(getattr(table[n], k),)
|
||||
if getattr(table.get(n), k, None) is not None else ()
|
||||
for k in ([k] if k else [
|
||||
k for k in Result._sort if k in fields])),
|
||||
reverse=reverse ^ (not k or k in Result._fields))
|
||||
|
||||
|
||||
# build up our lines
|
||||
lines = []
|
||||
|
||||
# header
|
||||
header = []
|
||||
header.append('%s%s' % (
|
||||
','.join(by),
|
||||
' (%d added, %d removed)' % (
|
||||
sum(1 for n in table if n not in diff_table),
|
||||
sum(1 for n in diff_table if n not in table))
|
||||
if diff_results is not None and not percent else '')
|
||||
if not summary else '')
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
header.append(k)
|
||||
else:
|
||||
for k in fields:
|
||||
header.append('o'+k)
|
||||
for k in fields:
|
||||
header.append('n'+k)
|
||||
for k in fields:
|
||||
header.append('d'+k)
|
||||
header.append('')
|
||||
lines.append(header)
|
||||
|
||||
def table_entry(name, r, diff_r=None, ratios=[]):
|
||||
entry = []
|
||||
entry.append(name)
|
||||
if diff_results is None:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].none)
|
||||
elif percent:
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
else:
|
||||
for k in fields:
|
||||
entry.append(getattr(diff_r, k).diff_table()
|
||||
if getattr(diff_r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(getattr(r, k).diff_table()
|
||||
if getattr(r, k, None) is not None
|
||||
else types[k].diff_none)
|
||||
for k in fields:
|
||||
entry.append(types[k].diff_diff(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None)))
|
||||
if diff_results is None:
|
||||
entry.append('')
|
||||
elif percent:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios))
|
||||
else:
|
||||
entry.append(' (%s)' % ', '.join(
|
||||
'+∞%' if t == +m.inf
|
||||
else '-∞%' if t == -m.inf
|
||||
else '%+.1f%%' % (100*t)
|
||||
for t in ratios
|
||||
if t)
|
||||
if any(ratios) else '')
|
||||
return entry
|
||||
|
||||
# entries
|
||||
if not summary:
|
||||
for name in names:
|
||||
r = table.get(name)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = diff_table.get(name)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
if not all_ and not any(ratios):
|
||||
continue
|
||||
lines.append(table_entry(name, r, diff_r, ratios))
|
||||
|
||||
# total
|
||||
r = next(iter(fold(Result, results, by=[])), None)
|
||||
if diff_results is None:
|
||||
diff_r = None
|
||||
ratios = None
|
||||
else:
|
||||
diff_r = next(iter(fold(Result, diff_results, by=[])), None)
|
||||
ratios = [
|
||||
types[k].ratio(
|
||||
getattr(r, k, None),
|
||||
getattr(diff_r, k, None))
|
||||
for k in fields]
|
||||
lines.append(table_entry('TOTAL', r, diff_r, ratios))
|
||||
|
||||
# find the best widths, note that column 0 contains the names and column -1
|
||||
# the ratios, so those are handled a bit differently
|
||||
widths = [
|
||||
((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
|
||||
for w, i in zip(
|
||||
it.chain([23], it.repeat(7)),
|
||||
range(len(lines[0])-1))]
|
||||
|
||||
# print our table
|
||||
for line in lines:
|
||||
print('%-*s %s%s' % (
|
||||
widths[0], line[0],
|
||||
' '.join('%*s' % (w, x)
|
||||
for w, x in zip(widths[1:], line[1:-1])),
|
||||
line[-1]))
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def main(csv_paths, *,
|
||||
by=None,
|
||||
fields=None,
|
||||
defines=None,
|
||||
sort=None,
|
||||
**args):
|
||||
# separate out renames
|
||||
renames = list(it.chain.from_iterable(
|
||||
((k, v) for v in vs)
|
||||
for k, vs in it.chain(by or [], fields or [])))
|
||||
if by is not None:
|
||||
by = [k for k, _ in by]
|
||||
if fields is not None:
|
||||
fields = [k for k, _ in fields]
|
||||
|
||||
# figure out types
|
||||
types = {}
|
||||
for t in TYPES.keys():
|
||||
for k in args.get(t, []):
|
||||
if k in types:
|
||||
print("error: conflicting type for field %r?" % k)
|
||||
sys.exit(-1)
|
||||
types[k] = TYPES[t]
|
||||
# rename types?
|
||||
if renames:
|
||||
types_ = {}
|
||||
for new_k, old_k in renames:
|
||||
if old_k in types:
|
||||
types_[new_k] = types[old_k]
|
||||
types.update(types_)
|
||||
|
||||
# figure out merge operations
|
||||
ops = {}
|
||||
for o in OPS.keys():
|
||||
for k in args.get(o, []):
|
||||
if k in ops:
|
||||
print("error: conflicting op for field %r?" % k)
|
||||
sys.exit(-1)
|
||||
ops[k] = OPS[o]
|
||||
# rename ops?
|
||||
if renames:
|
||||
ops_ = {}
|
||||
for new_k, old_k in renames:
|
||||
if old_k in ops:
|
||||
ops_[new_k] = ops[old_k]
|
||||
ops.update(ops_)
|
||||
|
||||
# find CSV files
|
||||
results = []
|
||||
for path in csv_paths:
|
||||
try:
|
||||
with openio(path) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
# rename fields?
|
||||
if renames:
|
||||
# make a copy so renames can overlap
|
||||
r_ = {}
|
||||
for new_k, old_k in renames:
|
||||
if old_k in r:
|
||||
r_[new_k] = r[old_k]
|
||||
r.update(r_)
|
||||
|
||||
results.append(r)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# homogenize
|
||||
Result = infer(results,
|
||||
by=by,
|
||||
fields=fields,
|
||||
types=types,
|
||||
ops=ops,
|
||||
renames=renames)
|
||||
results_ = []
|
||||
for r in results:
|
||||
if not any(k in r and r[k].strip()
|
||||
for k in Result._fields):
|
||||
continue
|
||||
try:
|
||||
results_.append(Result(**{
|
||||
k: r[k] for k in Result._by + Result._fields
|
||||
if k in r and r[k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
results = results_
|
||||
|
||||
# fold
|
||||
results = fold(Result, results, by=by, defines=defines)
|
||||
|
||||
# sort, note that python's sort is stable
|
||||
results.sort()
|
||||
if sort:
|
||||
for k, reverse in reversed(sort):
|
||||
results.sort(
|
||||
key=lambda r: tuple(
|
||||
(getattr(r, k),) if getattr(r, k) is not None else ()
|
||||
for k in ([k] if k else Result._sort)),
|
||||
reverse=reverse ^ (not k or k in Result._fields))
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
with openio(args['output'], 'w') as f:
|
||||
writer = csv.DictWriter(f, Result._by + Result._fields)
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
# note we need to go through getattr to resolve lazy fields
|
||||
writer.writerow({
|
||||
k: getattr(r, k) for k in Result._by + Result._fields})
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
diff_results = []
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
reader = csv.DictReader(f, restval='')
|
||||
for r in reader:
|
||||
# rename fields?
|
||||
if renames:
|
||||
# make a copy so renames can overlap
|
||||
r_ = {}
|
||||
for new_k, old_k in renames:
|
||||
if old_k in r:
|
||||
r_[new_k] = r[old_k]
|
||||
r.update(r_)
|
||||
|
||||
if not any(k in r and r[k].strip()
|
||||
for k in Result._fields):
|
||||
continue
|
||||
try:
|
||||
diff_results.append(Result(**{
|
||||
k: r[k] for k in Result._by + Result._fields
|
||||
if k in r and r[k].strip()}))
|
||||
except TypeError:
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# fold
|
||||
diff_results = fold(Result, diff_results, by=by, defines=defines)
|
||||
|
||||
# print table
|
||||
if not args.get('quiet'):
|
||||
table(Result, results,
|
||||
diff_results if args.get('diff') else None,
|
||||
by=by,
|
||||
fields=fields,
|
||||
sort=sort,
|
||||
**args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Summarize measurements in CSV files.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'csv_paths',
|
||||
nargs='*',
|
||||
help="Input *.csv files.")
|
||||
parser.add_argument(
|
||||
'-q', '--quiet',
|
||||
action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument(
|
||||
'-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument(
|
||||
'-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument(
|
||||
'-a', '--all',
|
||||
action='store_true',
|
||||
help="Show all, not just the ones that changed.")
|
||||
parser.add_argument(
|
||||
'-p', '--percent',
|
||||
action='store_true',
|
||||
help="Only show percentage change, not a full diff.")
|
||||
parser.add_argument(
|
||||
'-b', '--by',
|
||||
action='append',
|
||||
type=lambda x: (
|
||||
lambda k,v=None: (k, v.split(',') if v is not None else ())
|
||||
)(*x.split('=', 1)),
|
||||
help="Group by this field. Can rename fields with new_name=old_name.")
|
||||
parser.add_argument(
|
||||
'-f', '--field',
|
||||
dest='fields',
|
||||
action='append',
|
||||
type=lambda x: (
|
||||
lambda k,v=None: (k, v.split(',') if v is not None else ())
|
||||
)(*x.split('=', 1)),
|
||||
help="Show this field. Can rename fields with new_name=old_name.")
|
||||
parser.add_argument(
|
||||
'-D', '--define',
|
||||
dest='defines',
|
||||
action='append',
|
||||
type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
|
||||
help="Only include results where this field is this value. May include "
|
||||
"comma-separated options.")
|
||||
class AppendSort(argparse.Action):
|
||||
def __call__(self, parser, namespace, value, option):
|
||||
if namespace.sort is None:
|
||||
namespace.sort = []
|
||||
namespace.sort.append((value, True if option == '-S' else False))
|
||||
parser.add_argument(
|
||||
'-s', '--sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field.")
|
||||
parser.add_argument(
|
||||
'-S', '--reverse-sort',
|
||||
nargs='?',
|
||||
action=AppendSort,
|
||||
help="Sort by this field, but backwards.")
|
||||
parser.add_argument(
|
||||
'-Y', '--summary',
|
||||
action='store_true',
|
||||
help="Only show the total.")
|
||||
parser.add_argument(
|
||||
'--int',
|
||||
action='append',
|
||||
help="Treat these fields as ints.")
|
||||
parser.add_argument(
|
||||
'--float',
|
||||
action='append',
|
||||
help="Treat these fields as floats.")
|
||||
parser.add_argument(
|
||||
'--frac',
|
||||
action='append',
|
||||
help="Treat these fields as fractions.")
|
||||
parser.add_argument(
|
||||
'--sum',
|
||||
action='append',
|
||||
help="Add these fields (the default).")
|
||||
parser.add_argument(
|
||||
'--prod',
|
||||
action='append',
|
||||
help="Multiply these fields.")
|
||||
parser.add_argument(
|
||||
'--min',
|
||||
action='append',
|
||||
help="Take the minimum of these fields.")
|
||||
parser.add_argument(
|
||||
'--max',
|
||||
action='append',
|
||||
help="Take the maximum of these fields.")
|
||||
parser.add_argument(
|
||||
'--mean',
|
||||
action='append',
|
||||
help="Average these fields.")
|
||||
parser.add_argument(
|
||||
'--stddev',
|
||||
action='append',
|
||||
help="Find the standard deviation of these fields.")
|
||||
parser.add_argument(
|
||||
'--gmean',
|
||||
action='append',
|
||||
help="Find the geometric mean of these fields.")
|
||||
parser.add_argument(
|
||||
'--gstddev',
|
||||
action='append',
|
||||
help="Find the geometric standard deviation of these fields.")
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
||||
177
components/joltwallet__littlefs/src/littlefs/scripts/tailpipe.py
Normal file
177
components/joltwallet__littlefs/src/littlefs/scripts/tailpipe.py
Normal file
|
|
@ -0,0 +1,177 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Efficiently displays the last n lines of a file/pipe.
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/tailpipe.py trace -n5
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import io
|
||||
import os
|
||||
import select
|
||||
import shutil
|
||||
import sys
|
||||
import threading as th
|
||||
import time
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
class LinesIO:
|
||||
def __init__(self, maxlen=None):
|
||||
self.maxlen = maxlen
|
||||
self.lines = co.deque(maxlen=maxlen)
|
||||
self.tail = io.StringIO()
|
||||
|
||||
# trigger automatic sizing
|
||||
if maxlen == 0:
|
||||
self.resize(0)
|
||||
|
||||
def write(self, s):
|
||||
# note using split here ensures the trailing string has no newline
|
||||
lines = s.split('\n')
|
||||
|
||||
if len(lines) > 1 and self.tail.getvalue():
|
||||
self.tail.write(lines[0])
|
||||
lines[0] = self.tail.getvalue()
|
||||
self.tail = io.StringIO()
|
||||
|
||||
self.lines.extend(lines[:-1])
|
||||
|
||||
if lines[-1]:
|
||||
self.tail.write(lines[-1])
|
||||
|
||||
def resize(self, maxlen):
|
||||
self.maxlen = maxlen
|
||||
if maxlen == 0:
|
||||
maxlen = shutil.get_terminal_size((80, 5))[1]
|
||||
if maxlen != self.lines.maxlen:
|
||||
self.lines = co.deque(self.lines, maxlen=maxlen)
|
||||
|
||||
canvas_lines = 1
|
||||
def draw(self):
|
||||
# did terminal size change?
|
||||
if self.maxlen == 0:
|
||||
self.resize(0)
|
||||
|
||||
# first thing first, give ourself a canvas
|
||||
while LinesIO.canvas_lines < len(self.lines):
|
||||
sys.stdout.write('\n')
|
||||
LinesIO.canvas_lines += 1
|
||||
|
||||
# clear the bottom of the canvas if we shrink
|
||||
shrink = LinesIO.canvas_lines - len(self.lines)
|
||||
if shrink > 0:
|
||||
for i in range(shrink):
|
||||
sys.stdout.write('\r')
|
||||
if shrink-1-i > 0:
|
||||
sys.stdout.write('\x1b[%dA' % (shrink-1-i))
|
||||
sys.stdout.write('\x1b[K')
|
||||
if shrink-1-i > 0:
|
||||
sys.stdout.write('\x1b[%dB' % (shrink-1-i))
|
||||
sys.stdout.write('\x1b[%dA' % shrink)
|
||||
LinesIO.canvas_lines = len(self.lines)
|
||||
|
||||
for i, line in enumerate(self.lines):
|
||||
# move cursor, clear line, disable/reenable line wrapping
|
||||
sys.stdout.write('\r')
|
||||
if len(self.lines)-1-i > 0:
|
||||
sys.stdout.write('\x1b[%dA' % (len(self.lines)-1-i))
|
||||
sys.stdout.write('\x1b[K')
|
||||
sys.stdout.write('\x1b[?7l')
|
||||
sys.stdout.write(line)
|
||||
sys.stdout.write('\x1b[?7h')
|
||||
if len(self.lines)-1-i > 0:
|
||||
sys.stdout.write('\x1b[%dB' % (len(self.lines)-1-i))
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def main(path='-', *, lines=5, cat=False, sleep=None, keep_open=False):
|
||||
if cat:
|
||||
ring = sys.stdout
|
||||
else:
|
||||
ring = LinesIO(lines)
|
||||
|
||||
# if sleep print in background thread to avoid getting stuck in a read call
|
||||
event = th.Event()
|
||||
lock = th.Lock()
|
||||
if not cat:
|
||||
done = False
|
||||
def background():
|
||||
while not done:
|
||||
event.wait()
|
||||
event.clear()
|
||||
with lock:
|
||||
ring.draw()
|
||||
time.sleep(sleep or 0.01)
|
||||
th.Thread(target=background, daemon=True).start()
|
||||
|
||||
try:
|
||||
while True:
|
||||
with openio(path) as f:
|
||||
for line in f:
|
||||
with lock:
|
||||
ring.write(line)
|
||||
event.set()
|
||||
|
||||
if not keep_open:
|
||||
break
|
||||
# don't just flood open calls
|
||||
time.sleep(sleep or 0.1)
|
||||
except FileNotFoundError as e:
|
||||
print("error: file not found %r" % path)
|
||||
sys.exit(-1)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
if not cat:
|
||||
done = True
|
||||
lock.acquire() # avoids https://bugs.python.org/issue42717
|
||||
sys.stdout.write('\n')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Efficiently displays the last n lines of a file/pipe.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'path',
|
||||
nargs='?',
|
||||
help="Path to read from.")
|
||||
parser.add_argument(
|
||||
'-n', '--lines',
|
||||
nargs='?',
|
||||
type=lambda x: int(x, 0),
|
||||
const=0,
|
||||
help="Show this many lines of history. 0 uses the terminal height. "
|
||||
"Defaults to 5.")
|
||||
parser.add_argument(
|
||||
'-z', '--cat',
|
||||
action='store_true',
|
||||
help="Pipe directly to stdout.")
|
||||
parser.add_argument(
|
||||
'-s', '--sleep',
|
||||
type=float,
|
||||
help="Seconds to sleep between reads. Defaults to 0.01.")
|
||||
parser.add_argument(
|
||||
'-k', '--keep-open',
|
||||
action='store_true',
|
||||
help="Reopen the pipe on EOF, useful when multiple "
|
||||
"processes are writing.")
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# tee, but for pipes
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/tee.py in_pipe out_pipe1 out_pipe2
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import os
|
||||
import io
|
||||
import time
|
||||
import sys
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def main(in_path, out_paths, *, keep_open=False):
|
||||
out_pipes = [openio(p, 'wb', 0) for p in out_paths]
|
||||
try:
|
||||
with openio(in_path, 'rb', 0) as f:
|
||||
while True:
|
||||
buf = f.read(io.DEFAULT_BUFFER_SIZE)
|
||||
if not buf:
|
||||
if not keep_open:
|
||||
break
|
||||
# don't just flood reads
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
|
||||
for p in out_pipes:
|
||||
try:
|
||||
p.write(buf)
|
||||
except BrokenPipeError:
|
||||
pass
|
||||
except FileNotFoundError as e:
|
||||
print("error: file not found %r" % in_path)
|
||||
sys.exit(-1)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(
|
||||
description="tee, but for pipes.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'in_path',
|
||||
help="Path to read from.")
|
||||
parser.add_argument(
|
||||
'out_paths',
|
||||
nargs='+',
|
||||
help="Path to write to.")
|
||||
parser.add_argument(
|
||||
'-k', '--keep-open',
|
||||
action='store_true',
|
||||
help="Reopen the pipe on EOF, useful when multiple "
|
||||
"processes are writing.")
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_intermixed_args()).items()
|
||||
if v is not None}))
|
||||
1487
components/joltwallet__littlefs/src/littlefs/scripts/test.py
Normal file
1487
components/joltwallet__littlefs/src/littlefs/scripts/test.py
Normal file
File diff suppressed because it is too large
Load diff
1002
components/joltwallet__littlefs/src/littlefs/scripts/tracebd.py
Normal file
1002
components/joltwallet__littlefs/src/littlefs/scripts/tracebd.py
Normal file
File diff suppressed because it is too large
Load diff
265
components/joltwallet__littlefs/src/littlefs/scripts/watch.py
Normal file
265
components/joltwallet__littlefs/src/littlefs/scripts/watch.py
Normal file
|
|
@ -0,0 +1,265 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Traditional watch command, but with higher resolution updates and a bit
|
||||
# different options/output format
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/watch.py -s0.1 date
|
||||
#
|
||||
# Copyright (c) 2022, The littlefs authors.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import collections as co
|
||||
import errno
|
||||
import fcntl
|
||||
import io
|
||||
import os
|
||||
import pty
|
||||
import re
|
||||
import shutil
|
||||
import struct
|
||||
import subprocess as sp
|
||||
import sys
|
||||
import termios
|
||||
import time
|
||||
|
||||
try:
|
||||
import inotify_simple
|
||||
except ModuleNotFoundError:
|
||||
inotify_simple = None
|
||||
|
||||
|
||||
def openio(path, mode='r', buffering=-1):
|
||||
# allow '-' for stdin/stdout
|
||||
if path == '-':
|
||||
if mode == 'r':
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering)
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering)
|
||||
else:
|
||||
return open(path, mode, buffering)
|
||||
|
||||
def inotifywait(paths):
|
||||
# wait for interesting events
|
||||
inotify = inotify_simple.INotify()
|
||||
flags = (inotify_simple.flags.ATTRIB
|
||||
| inotify_simple.flags.CREATE
|
||||
| inotify_simple.flags.DELETE
|
||||
| inotify_simple.flags.DELETE_SELF
|
||||
| inotify_simple.flags.MODIFY
|
||||
| inotify_simple.flags.MOVED_FROM
|
||||
| inotify_simple.flags.MOVED_TO
|
||||
| inotify_simple.flags.MOVE_SELF)
|
||||
|
||||
# recurse into directories
|
||||
for path in paths:
|
||||
if os.path.isdir(path):
|
||||
for dir, _, files in os.walk(path):
|
||||
inotify.add_watch(dir, flags)
|
||||
for f in files:
|
||||
inotify.add_watch(os.path.join(dir, f), flags)
|
||||
else:
|
||||
inotify.add_watch(path, flags)
|
||||
|
||||
# wait for event
|
||||
inotify.read()
|
||||
|
||||
class LinesIO:
|
||||
def __init__(self, maxlen=None):
|
||||
self.maxlen = maxlen
|
||||
self.lines = co.deque(maxlen=maxlen)
|
||||
self.tail = io.StringIO()
|
||||
|
||||
# trigger automatic sizing
|
||||
if maxlen == 0:
|
||||
self.resize(0)
|
||||
|
||||
def write(self, s):
|
||||
# note using split here ensures the trailing string has no newline
|
||||
lines = s.split('\n')
|
||||
|
||||
if len(lines) > 1 and self.tail.getvalue():
|
||||
self.tail.write(lines[0])
|
||||
lines[0] = self.tail.getvalue()
|
||||
self.tail = io.StringIO()
|
||||
|
||||
self.lines.extend(lines[:-1])
|
||||
|
||||
if lines[-1]:
|
||||
self.tail.write(lines[-1])
|
||||
|
||||
def resize(self, maxlen):
|
||||
self.maxlen = maxlen
|
||||
if maxlen == 0:
|
||||
maxlen = shutil.get_terminal_size((80, 5))[1]
|
||||
if maxlen != self.lines.maxlen:
|
||||
self.lines = co.deque(self.lines, maxlen=maxlen)
|
||||
|
||||
canvas_lines = 1
|
||||
def draw(self):
|
||||
# did terminal size change?
|
||||
if self.maxlen == 0:
|
||||
self.resize(0)
|
||||
|
||||
# first thing first, give ourself a canvas
|
||||
while LinesIO.canvas_lines < len(self.lines):
|
||||
sys.stdout.write('\n')
|
||||
LinesIO.canvas_lines += 1
|
||||
|
||||
# clear the bottom of the canvas if we shrink
|
||||
shrink = LinesIO.canvas_lines - len(self.lines)
|
||||
if shrink > 0:
|
||||
for i in range(shrink):
|
||||
sys.stdout.write('\r')
|
||||
if shrink-1-i > 0:
|
||||
sys.stdout.write('\x1b[%dA' % (shrink-1-i))
|
||||
sys.stdout.write('\x1b[K')
|
||||
if shrink-1-i > 0:
|
||||
sys.stdout.write('\x1b[%dB' % (shrink-1-i))
|
||||
sys.stdout.write('\x1b[%dA' % shrink)
|
||||
LinesIO.canvas_lines = len(self.lines)
|
||||
|
||||
for i, line in enumerate(self.lines):
|
||||
# move cursor, clear line, disable/reenable line wrapping
|
||||
sys.stdout.write('\r')
|
||||
if len(self.lines)-1-i > 0:
|
||||
sys.stdout.write('\x1b[%dA' % (len(self.lines)-1-i))
|
||||
sys.stdout.write('\x1b[K')
|
||||
sys.stdout.write('\x1b[?7l')
|
||||
sys.stdout.write(line)
|
||||
sys.stdout.write('\x1b[?7h')
|
||||
if len(self.lines)-1-i > 0:
|
||||
sys.stdout.write('\x1b[%dB' % (len(self.lines)-1-i))
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def main(command, *,
|
||||
lines=0,
|
||||
cat=False,
|
||||
sleep=None,
|
||||
keep_open=False,
|
||||
keep_open_paths=None,
|
||||
exit_on_error=False):
|
||||
returncode = 0
|
||||
try:
|
||||
while True:
|
||||
# reset ring each run
|
||||
if cat:
|
||||
ring = sys.stdout
|
||||
else:
|
||||
ring = LinesIO(lines)
|
||||
|
||||
try:
|
||||
# run the command under a pseudoterminal
|
||||
mpty, spty = pty.openpty()
|
||||
|
||||
# forward terminal size
|
||||
w, h = shutil.get_terminal_size((80, 5))
|
||||
if lines:
|
||||
h = lines
|
||||
fcntl.ioctl(spty, termios.TIOCSWINSZ,
|
||||
struct.pack('HHHH', h, w, 0, 0))
|
||||
|
||||
proc = sp.Popen(command,
|
||||
stdout=spty,
|
||||
stderr=spty,
|
||||
close_fds=False)
|
||||
os.close(spty)
|
||||
mpty = os.fdopen(mpty, 'r', 1)
|
||||
|
||||
while True:
|
||||
try:
|
||||
line = mpty.readline()
|
||||
except OSError as e:
|
||||
if e.errno != errno.EIO:
|
||||
raise
|
||||
break
|
||||
if not line:
|
||||
break
|
||||
|
||||
ring.write(line)
|
||||
if not cat:
|
||||
ring.draw()
|
||||
|
||||
mpty.close()
|
||||
proc.wait()
|
||||
if exit_on_error and proc.returncode != 0:
|
||||
returncode = proc.returncode
|
||||
break
|
||||
except OSError as e:
|
||||
if e.errno != errno.ETXTBSY:
|
||||
raise
|
||||
pass
|
||||
|
||||
# try to inotifywait
|
||||
if keep_open and inotify_simple is not None:
|
||||
if keep_open_paths:
|
||||
paths = set(keep_paths)
|
||||
else:
|
||||
# guess inotify paths from command
|
||||
paths = set()
|
||||
for p in command:
|
||||
for p in {
|
||||
p,
|
||||
re.sub('^-.', '', p),
|
||||
re.sub('^--[^=]+=', '', p)}:
|
||||
if p and os.path.exists(p):
|
||||
paths.add(p)
|
||||
ptime = time.time()
|
||||
inotifywait(paths)
|
||||
# sleep for a minimum amount of time, this helps issues around
|
||||
# rapidly updating files
|
||||
time.sleep(max(0, (sleep or 0.1) - (time.time()-ptime)))
|
||||
else:
|
||||
time.sleep(sleep or 0.1)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
if not cat:
|
||||
sys.stdout.write('\n')
|
||||
sys.exit(returncode)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Traditional watch command, but with higher resolution "
|
||||
"updates and a bit different options/output format.",
|
||||
allow_abbrev=False)
|
||||
parser.add_argument(
|
||||
'command',
|
||||
nargs=argparse.REMAINDER,
|
||||
help="Command to run.")
|
||||
parser.add_argument(
|
||||
'-n', '--lines',
|
||||
nargs='?',
|
||||
type=lambda x: int(x, 0),
|
||||
const=0,
|
||||
help="Show this many lines of history. 0 uses the terminal height. "
|
||||
"Defaults to 0.")
|
||||
parser.add_argument(
|
||||
'-z', '--cat',
|
||||
action='store_true',
|
||||
help="Pipe directly to stdout.")
|
||||
parser.add_argument(
|
||||
'-s', '--sleep',
|
||||
type=float,
|
||||
help="Seconds to sleep between runs. Defaults to 0.1.")
|
||||
parser.add_argument(
|
||||
'-k', '--keep-open',
|
||||
action='store_true',
|
||||
help="Try to use inotify to wait for changes.")
|
||||
parser.add_argument(
|
||||
'-K', '--keep-open-path',
|
||||
dest='keep_open_paths',
|
||||
action='append',
|
||||
help="Use this path for inotify. Defaults to guessing.")
|
||||
parser.add_argument(
|
||||
'-e', '--exit-on-error',
|
||||
action='store_true',
|
||||
help="Exit on error.")
|
||||
sys.exit(main(**{k: v
|
||||
for k, v in vars(parser.parse_args()).items()
|
||||
if v is not None}))
|
||||
|
|
@ -0,0 +1,772 @@
|
|||
# allocator tests
|
||||
# note for these to work there are a number constraints on the device geometry
|
||||
if = 'BLOCK_CYCLES == -1'
|
||||
|
||||
# parallel allocation test
|
||||
[cases.test_alloc_parallel]
|
||||
defines.FILES = 3
|
||||
defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
|
||||
defines.GC = [false, true]
|
||||
defines.COMPACT_THRESH = ['-1', '0', 'BLOCK_SIZE/2']
|
||||
defines.INFER_BC = [false, true]
|
||||
code = '''
|
||||
const char *names[] = {"bacon", "eggs", "pancakes"};
|
||||
lfs_file_t files[FILES];
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
if (INFER_BC) {
|
||||
cfg_.block_count = 0;
|
||||
}
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
lfs_mkdir(&lfs, "breakfast") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
char path[1024];
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &files[n], path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
}
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
if (GC) {
|
||||
lfs_fs_gc(&lfs) => 0;
|
||||
}
|
||||
size_t size = strlen(names[n]);
|
||||
for (lfs_size_t i = 0; i < SIZE; i += size) {
|
||||
lfs_file_write(&lfs, &files[n], names[n], size) => size;
|
||||
}
|
||||
}
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
lfs_file_close(&lfs, &files[n]) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
char path[1024];
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
size_t size = strlen(names[n]);
|
||||
for (lfs_size_t i = 0; i < SIZE; i += size) {
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(memcmp(buffer, names[n], size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# serial allocation test
|
||||
[cases.test_alloc_serial]
|
||||
defines.FILES = 3
|
||||
defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
|
||||
defines.GC = [false, true]
|
||||
defines.COMPACT_THRESH = ['-1', '0', 'BLOCK_SIZE/2']
|
||||
defines.INFER_BC = [false, true]
|
||||
code = '''
|
||||
const char *names[] = {"bacon", "eggs", "pancakes"};
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
if (INFER_BC) {
|
||||
cfg_.block_count = 0;
|
||||
}
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
lfs_mkdir(&lfs, "breakfast") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
char path[1024];
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size_t size = strlen(names[n]);
|
||||
uint8_t buffer[1024];
|
||||
memcpy(buffer, names[n], size);
|
||||
for (int i = 0; i < SIZE; i += size) {
|
||||
if (GC) {
|
||||
lfs_fs_gc(&lfs) => 0;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
char path[1024];
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
size_t size = strlen(names[n]);
|
||||
for (int i = 0; i < SIZE; i += size) {
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(memcmp(buffer, names[n], size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# parallel allocation reuse test
|
||||
[cases.test_alloc_parallel_reuse]
|
||||
defines.FILES = 3
|
||||
defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
|
||||
defines.CYCLES = [1, 10]
|
||||
defines.INFER_BC = [false, true]
|
||||
code = '''
|
||||
const char *names[] = {"bacon", "eggs", "pancakes"};
|
||||
lfs_file_t files[FILES];
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
if (INFER_BC) {
|
||||
cfg_.block_count = 0;
|
||||
}
|
||||
|
||||
for (int c = 0; c < CYCLES; c++) {
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
lfs_mkdir(&lfs, "breakfast") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
char path[1024];
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_open(&lfs, &files[n], path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
}
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
size_t size = strlen(names[n]);
|
||||
for (int i = 0; i < SIZE; i += size) {
|
||||
lfs_file_write(&lfs, &files[n], names[n], size) => size;
|
||||
}
|
||||
}
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
lfs_file_close(&lfs, &files[n]) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
char path[1024];
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
size_t size = strlen(names[n]);
|
||||
for (int i = 0; i < SIZE; i += size) {
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(memcmp(buffer, names[n], size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
char path[1024];
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
lfs_remove(&lfs, "breakfast") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
'''
|
||||
|
||||
# serial allocation reuse test
|
||||
[cases.test_alloc_serial_reuse]
|
||||
defines.FILES = 3
|
||||
defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-6)) / FILES)'
|
||||
defines.CYCLES = [1, 10]
|
||||
defines.INFER_BC = [false, true]
|
||||
code = '''
|
||||
const char *names[] = {"bacon", "eggs", "pancakes"};
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
if (INFER_BC) {
|
||||
cfg_.block_count = 0;
|
||||
}
|
||||
|
||||
for (int c = 0; c < CYCLES; c++) {
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
lfs_mkdir(&lfs, "breakfast") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
char path[1024];
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size_t size = strlen(names[n]);
|
||||
uint8_t buffer[1024];
|
||||
memcpy(buffer, names[n], size);
|
||||
for (int i = 0; i < SIZE; i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
char path[1024];
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
size_t size = strlen(names[n]);
|
||||
for (int i = 0; i < SIZE; i += size) {
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(memcmp(buffer, names[n], size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (int n = 0; n < FILES; n++) {
|
||||
char path[1024];
|
||||
sprintf(path, "breakfast/%s", names[n]);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
lfs_remove(&lfs, "breakfast") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
'''
|
||||
|
||||
# exhaustion test
|
||||
[cases.test_alloc_exhaustion]
|
||||
defines.INFER_BC = [false, true]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
if (INFER_BC) {
|
||||
cfg_.block_count = 0;
|
||||
}
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
size_t size = strlen("exhaustion");
|
||||
uint8_t buffer[1024];
|
||||
memcpy(buffer, "exhaustion", size);
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
lfs_ssize_t res;
|
||||
while (true) {
|
||||
res = lfs_file_write(&lfs, &file, buffer, size);
|
||||
if (res < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
res => size;
|
||||
}
|
||||
res => LFS_ERR_NOSPC;
|
||||
|
||||
// note that lfs_fs_gc should not error here
|
||||
lfs_fs_gc(&lfs) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_RDONLY);
|
||||
size = strlen("exhaustion");
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "exhaustion", size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# exhaustion wraparound test
|
||||
[cases.test_alloc_exhaustion_wraparound]
|
||||
defines.SIZE = '(((BLOCK_SIZE-8)*(BLOCK_COUNT-4)) / 3)'
|
||||
defines.INFER_BC = [false, true]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
if (INFER_BC) {
|
||||
cfg_.block_count = 0;
|
||||
}
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "padding", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
size_t size = strlen("buffering");
|
||||
uint8_t buffer[1024];
|
||||
memcpy(buffer, "buffering", size);
|
||||
for (int i = 0; i < SIZE; i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_remove(&lfs, "padding") => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
size = strlen("exhaustion");
|
||||
memcpy(buffer, "exhaustion", size);
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
lfs_ssize_t res;
|
||||
while (true) {
|
||||
res = lfs_file_write(&lfs, &file, buffer, size);
|
||||
if (res < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
res => size;
|
||||
}
|
||||
res => LFS_ERR_NOSPC;
|
||||
|
||||
// note that lfs_fs_gc should not error here
|
||||
lfs_fs_gc(&lfs) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_RDONLY);
|
||||
size = strlen("exhaustion");
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "exhaustion", size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_remove(&lfs, "exhaustion") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# dir exhaustion test
|
||||
[cases.test_alloc_dir_exhaustion]
|
||||
defines.INFER_BC = [false, true]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
struct lfs_config cfg_ = *cfg;
|
||||
if (INFER_BC) {
|
||||
cfg_.block_count = 0;
|
||||
}
|
||||
lfs_mount(&lfs, &cfg_) => 0;
|
||||
|
||||
// find out max file size
|
||||
lfs_mkdir(&lfs, "exhaustiondir") => 0;
|
||||
size_t size = strlen("blahblahblahblah");
|
||||
uint8_t buffer[1024];
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
int count = 0;
|
||||
int err;
|
||||
while (true) {
|
||||
err = lfs_file_write(&lfs, &file, buffer, size);
|
||||
if (err < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
count += 1;
|
||||
}
|
||||
err => LFS_ERR_NOSPC;
|
||||
// note that lfs_fs_gc should not error here
|
||||
lfs_fs_gc(&lfs) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_remove(&lfs, "exhaustion") => 0;
|
||||
lfs_remove(&lfs, "exhaustiondir") => 0;
|
||||
|
||||
// see if dir fits with max file size
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
for (int i = 0; i < count; i++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_mkdir(&lfs, "exhaustiondir") => 0;
|
||||
lfs_remove(&lfs, "exhaustiondir") => 0;
|
||||
lfs_remove(&lfs, "exhaustion") => 0;
|
||||
|
||||
// see if dir fits with > max file size
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
for (int i = 0; i < count+1; i++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_mkdir(&lfs, "exhaustiondir") => LFS_ERR_NOSPC;
|
||||
|
||||
lfs_remove(&lfs, "exhaustion") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# what if we have a bad block during an allocation scan?
|
||||
[cases.test_alloc_bad_blocks]
|
||||
in = "lfs.c"
|
||||
defines.ERASE_CYCLES = 0xffffffff
|
||||
defines.BADBLOCK_BEHAVIOR = 'LFS_EMUBD_BADBLOCK_READERROR'
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
// first fill to exhaustion to find available space
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
uint8_t buffer[1024];
|
||||
strcpy((char*)buffer, "waka");
|
||||
size_t size = strlen("waka");
|
||||
lfs_size_t filesize = 0;
|
||||
while (true) {
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
|
||||
assert(res == (lfs_ssize_t)size || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
break;
|
||||
}
|
||||
filesize += size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// now fill all but a couple of blocks of the filesystem with data
|
||||
filesize -= 3*BLOCK_SIZE;
|
||||
lfs_file_open(&lfs, &file, "pacman", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
strcpy((char*)buffer, "waka");
|
||||
size = strlen("waka");
|
||||
for (lfs_size_t i = 0; i < filesize/size; i++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// also save head of file so we can error during lookahead scan
|
||||
lfs_block_t fileblock = file.ctz.head;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// remount to force an alloc scan
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// but mark the head of our file as a "bad block", this is force our
|
||||
// scan to bail early
|
||||
lfs_emubd_setwear(cfg, fileblock, 0xffffffff) => 0;
|
||||
lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
strcpy((char*)buffer, "chomp");
|
||||
size = strlen("chomp");
|
||||
while (true) {
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
|
||||
assert(res == (lfs_ssize_t)size || res == LFS_ERR_CORRUPT);
|
||||
if (res == LFS_ERR_CORRUPT) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// now reverse the "bad block" and try to write the file again until we
|
||||
// run out of space
|
||||
lfs_emubd_setwear(cfg, fileblock, 0) => 0;
|
||||
lfs_file_open(&lfs, &file, "ghost", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
strcpy((char*)buffer, "chomp");
|
||||
size = strlen("chomp");
|
||||
while (true) {
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, buffer, size);
|
||||
assert(res == (lfs_ssize_t)size || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
// note that lfs_fs_gc should not error here
|
||||
lfs_fs_gc(&lfs) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// check that the disk isn't hurt
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "pacman", LFS_O_RDONLY) => 0;
|
||||
strcpy((char*)buffer, "waka");
|
||||
size = strlen("waka");
|
||||
for (lfs_size_t i = 0; i < filesize/size; i++) {
|
||||
uint8_t rbuffer[4];
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(memcmp(rbuffer, buffer, size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
|
||||
# Below, I don't like these tests. They're fragile and depend _heavily_
|
||||
# on the geometry of the block device. But they are valuable. Eventually they
|
||||
# should be removed and replaced with generalized tests.
|
||||
|
||||
# chained dir exhaustion test
|
||||
[cases.test_alloc_chained_dir_exhaustion]
|
||||
if = 'ERASE_SIZE == 512'
|
||||
defines.ERASE_COUNT = 1024
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// find out max file size
|
||||
lfs_mkdir(&lfs, "exhaustiondir") => 0;
|
||||
for (int i = 0; i < 10; i++) {
|
||||
char path[1024];
|
||||
sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
}
|
||||
size_t size = strlen("blahblahblahblah");
|
||||
uint8_t buffer[1024];
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
int count = 0;
|
||||
int err;
|
||||
while (true) {
|
||||
err = lfs_file_write(&lfs, &file, buffer, size);
|
||||
if (err < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
count += 1;
|
||||
}
|
||||
err => LFS_ERR_NOSPC;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_remove(&lfs, "exhaustion") => 0;
|
||||
lfs_remove(&lfs, "exhaustiondir") => 0;
|
||||
for (int i = 0; i < 10; i++) {
|
||||
char path[1024];
|
||||
sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
|
||||
// see that chained dir fails
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
for (int i = 0; i < count+1; i++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
char path[1024];
|
||||
sprintf(path, "dirwithanexhaustivelylongnameforpadding%d", i);
|
||||
lfs_mkdir(&lfs, path) => 0;
|
||||
}
|
||||
|
||||
lfs_mkdir(&lfs, "exhaustiondir") => LFS_ERR_NOSPC;
|
||||
|
||||
// shorten file to try a second chained dir
|
||||
while (true) {
|
||||
err = lfs_mkdir(&lfs, "exhaustiondir");
|
||||
if (err != LFS_ERR_NOSPC) {
|
||||
break;
|
||||
}
|
||||
|
||||
lfs_ssize_t filesize = lfs_file_size(&lfs, &file);
|
||||
filesize > 0 => true;
|
||||
|
||||
lfs_file_truncate(&lfs, &file, filesize - size) => 0;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
}
|
||||
err => 0;
|
||||
|
||||
lfs_mkdir(&lfs, "exhaustiondir2") => LFS_ERR_NOSPC;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# split dir test
|
||||
[cases.test_alloc_split_dir]
|
||||
if = 'ERASE_SIZE == 512'
|
||||
defines.ERASE_COUNT = 1024
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// create one block hole for half a directory
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "bump", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
for (lfs_size_t i = 0; i < cfg->block_size; i += 2) {
|
||||
uint8_t buffer[1024];
|
||||
memcpy(&buffer[i], "hi", 2);
|
||||
}
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_write(&lfs, &file, buffer, cfg->block_size) => cfg->block_size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "exhaustion", LFS_O_WRONLY | LFS_O_CREAT);
|
||||
size_t size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < (cfg->block_count-4)*(cfg->block_size-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// remount to force reset of lookahead
|
||||
lfs_unmount(&lfs) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// open hole
|
||||
lfs_remove(&lfs, "bump") => 0;
|
||||
|
||||
lfs_mkdir(&lfs, "splitdir") => 0;
|
||||
lfs_file_open(&lfs, &file, "splitdir/bump",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
for (lfs_size_t i = 0; i < cfg->block_size; i += 2) {
|
||||
memcpy(&buffer[i], "hi", 2);
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, 2*cfg->block_size) => LFS_ERR_NOSPC;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# outdated lookahead test
|
||||
[cases.test_alloc_outdated_lookahead]
|
||||
if = 'ERASE_SIZE == 512'
|
||||
defines.ERASE_COUNT = 1024
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// fill completely with two files
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "exhaustion1",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
size_t size = strlen("blahblahblahblah");
|
||||
uint8_t buffer[1024];
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg->block_count-2)/2)*(cfg->block_size-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "exhaustion2",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg->block_count-2+1)/2)*(cfg->block_size-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// remount to force reset of lookahead
|
||||
lfs_unmount(&lfs) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// rewrite one file
|
||||
lfs_file_open(&lfs, &file, "exhaustion1",
|
||||
LFS_O_WRONLY | LFS_O_TRUNC) => 0;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg->block_count-2)/2)*(cfg->block_size-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// rewrite second file, this requires lookahead does not
|
||||
// use old population
|
||||
lfs_file_open(&lfs, &file, "exhaustion2",
|
||||
LFS_O_WRONLY | LFS_O_TRUNC) => 0;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg->block_count-2+1)/2)*(cfg->block_size-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# outdated lookahead and split dir test
|
||||
[cases.test_alloc_outdated_lookahead_split_dir]
|
||||
if = 'ERASE_SIZE == 512'
|
||||
defines.ERASE_COUNT = 1024
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// fill completely with two files
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "exhaustion1",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
size_t size = strlen("blahblahblahblah");
|
||||
uint8_t buffer[1024];
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg->block_count-2)/2)*(cfg->block_size-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "exhaustion2",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg->block_count-2+1)/2)*(cfg->block_size-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// remount to force reset of lookahead
|
||||
lfs_unmount(&lfs) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// rewrite one file with a hole of one block
|
||||
lfs_file_open(&lfs, &file, "exhaustion1",
|
||||
LFS_O_WRONLY | LFS_O_TRUNC) => 0;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
size = strlen("blahblahblahblah");
|
||||
memcpy(buffer, "blahblahblahblah", size);
|
||||
for (lfs_size_t i = 0;
|
||||
i < ((cfg->block_count-2)/2 - 1)*(cfg->block_size-8);
|
||||
i += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// try to allocate a directory, should fail!
|
||||
lfs_mkdir(&lfs, "split") => LFS_ERR_NOSPC;
|
||||
|
||||
// file should not fail
|
||||
lfs_file_open(&lfs, &file, "notasplit",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_write(&lfs, &file, "hi", 2) => 2;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
|
@ -0,0 +1,316 @@
|
|||
[cases.test_attrs_get_set]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_mkdir(&lfs, "hello") => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
uint8_t buffer[1024];
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
lfs_setattr(&lfs, "hello", 'A', "aaaa", 4) => 0;
|
||||
lfs_setattr(&lfs, "hello", 'B', "bbbbbb", 6) => 0;
|
||||
lfs_setattr(&lfs, "hello", 'C', "ccccc", 5) => 0;
|
||||
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => 6;
|
||||
lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "bbbbbb", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_setattr(&lfs, "hello", 'B', "", 0) => 0;
|
||||
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => 0;
|
||||
lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "\0\0\0\0\0\0", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_removeattr(&lfs, "hello", 'B') => 0;
|
||||
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => LFS_ERR_NOATTR;
|
||||
lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "\0\0\0\0\0\0", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_setattr(&lfs, "hello", 'B', "dddddd", 6) => 0;
|
||||
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => 6;
|
||||
lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "dddddd", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_setattr(&lfs, "hello", 'B', "eee", 3) => 0;
|
||||
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => 3;
|
||||
lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "eee\0\0\0", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_setattr(&lfs, "hello", 'A', buffer, LFS_ATTR_MAX+1) => LFS_ERR_NOSPC;
|
||||
lfs_setattr(&lfs, "hello", 'B', "fffffffff", 9) => 0;
|
||||
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "hello", 'B', buffer+4, 6) => 9;
|
||||
lfs_getattr(&lfs, "hello", 'C', buffer+10, 5) => 5;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
lfs_getattr(&lfs, "hello", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "hello", 'B', buffer+4, 9) => 9;
|
||||
lfs_getattr(&lfs, "hello", 'C', buffer+13, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "fffffffff", 9) => 0;
|
||||
memcmp(buffer+13, "ccccc", 5) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, sizeof(buffer)) => strlen("hello");
|
||||
memcmp(buffer, "hello", strlen("hello")) => 0;
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_attrs_get_set_root]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_mkdir(&lfs, "hello") => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
uint8_t buffer[1024];
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
lfs_setattr(&lfs, "/", 'A', "aaaa", 4) => 0;
|
||||
lfs_setattr(&lfs, "/", 'B', "bbbbbb", 6) => 0;
|
||||
lfs_setattr(&lfs, "/", 'C', "ccccc", 5) => 0;
|
||||
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => 6;
|
||||
lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "bbbbbb", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_setattr(&lfs, "/", 'B', "", 0) => 0;
|
||||
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => 0;
|
||||
lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "\0\0\0\0\0\0", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_removeattr(&lfs, "/", 'B') => 0;
|
||||
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => LFS_ERR_NOATTR;
|
||||
lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "\0\0\0\0\0\0", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_setattr(&lfs, "/", 'B', "dddddd", 6) => 0;
|
||||
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => 6;
|
||||
lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "dddddd", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_setattr(&lfs, "/", 'B', "eee", 3) => 0;
|
||||
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => 3;
|
||||
lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "eee\0\0\0", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
lfs_setattr(&lfs, "/", 'A', buffer, LFS_ATTR_MAX+1) => LFS_ERR_NOSPC;
|
||||
lfs_setattr(&lfs, "/", 'B', "fffffffff", 9) => 0;
|
||||
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "/", 'B', buffer+4, 6) => 9;
|
||||
lfs_getattr(&lfs, "/", 'C', buffer+10, 5) => 5;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
lfs_getattr(&lfs, "/", 'A', buffer, 4) => 4;
|
||||
lfs_getattr(&lfs, "/", 'B', buffer+4, 9) => 9;
|
||||
lfs_getattr(&lfs, "/", 'C', buffer+13, 5) => 5;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "fffffffff", 9) => 0;
|
||||
memcmp(buffer+13, "ccccc", 5) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, sizeof(buffer)) => strlen("hello");
|
||||
memcmp(buffer, "hello", strlen("hello")) => 0;
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_attrs_get_set_file]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_mkdir(&lfs, "hello") => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
uint8_t buffer[1024];
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
struct lfs_attr attrs1[] = {
|
||||
{'A', buffer, 4},
|
||||
{'B', buffer+4, 6},
|
||||
{'C', buffer+10, 5},
|
||||
};
|
||||
struct lfs_file_config cfg1 = {.attrs=attrs1, .attr_count=3};
|
||||
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
|
||||
memcpy(buffer, "aaaa", 4);
|
||||
memcpy(buffer+4, "bbbbbb", 6);
|
||||
memcpy(buffer+10, "ccccc", 5);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memset(buffer, 0, 15);
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg1) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "bbbbbb", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
attrs1[1].size = 0;
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memset(buffer, 0, 15);
|
||||
attrs1[1].size = 6;
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg1) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "\0\0\0\0\0\0", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
attrs1[1].size = 6;
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
|
||||
memcpy(buffer+4, "dddddd", 6);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memset(buffer, 0, 15);
|
||||
attrs1[1].size = 6;
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg1) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "dddddd", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
attrs1[1].size = 3;
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
|
||||
memcpy(buffer+4, "eee", 3);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memset(buffer, 0, 15);
|
||||
attrs1[1].size = 6;
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg1) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "eee\0\0\0", 6) => 0;
|
||||
memcmp(buffer+10, "ccccc", 5) => 0;
|
||||
|
||||
attrs1[0].size = LFS_ATTR_MAX+1;
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1)
|
||||
=> LFS_ERR_NOSPC;
|
||||
|
||||
struct lfs_attr attrs2[] = {
|
||||
{'A', buffer, 4},
|
||||
{'B', buffer+4, 9},
|
||||
{'C', buffer+13, 5},
|
||||
};
|
||||
struct lfs_file_config cfg2 = {.attrs=attrs2, .attr_count=3};
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDWR, &cfg2) => 0;
|
||||
memcpy(buffer+4, "fffffffff", 9);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
attrs1[0].size = 4;
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg1) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
struct lfs_attr attrs3[] = {
|
||||
{'A', buffer, 4},
|
||||
{'B', buffer+4, 9},
|
||||
{'C', buffer+13, 5},
|
||||
};
|
||||
struct lfs_file_config cfg3 = {.attrs=attrs3, .attr_count=3};
|
||||
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_RDONLY, &cfg3) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
memcmp(buffer, "aaaa", 4) => 0;
|
||||
memcmp(buffer+4, "fffffffff", 9) => 0;
|
||||
memcmp(buffer+13, "ccccc", 5) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, sizeof(buffer)) => strlen("hello");
|
||||
memcmp(buffer, "hello", strlen("hello")) => 0;
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_attrs_deferred_file]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_mkdir(&lfs, "hello") => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "hello/hello", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_write(&lfs, &file, "hello", strlen("hello")) => strlen("hello");
|
||||
lfs_file_close(&lfs, &file);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_setattr(&lfs, "hello/hello", 'B', "fffffffff", 9) => 0;
|
||||
lfs_setattr(&lfs, "hello/hello", 'C', "ccccc", 5) => 0;
|
||||
|
||||
uint8_t buffer[1024];
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
struct lfs_attr attrs1[] = {
|
||||
{'B', "gggg", 4},
|
||||
{'C', "", 0},
|
||||
{'D', "hhhh", 4},
|
||||
};
|
||||
struct lfs_file_config cfg1 = {.attrs=attrs1, .attr_count=3};
|
||||
|
||||
lfs_file_opencfg(&lfs, &file, "hello/hello", LFS_O_WRONLY, &cfg1) => 0;
|
||||
|
||||
lfs_getattr(&lfs, "hello/hello", 'B', buffer, 9) => 9;
|
||||
lfs_getattr(&lfs, "hello/hello", 'C', buffer+9, 9) => 5;
|
||||
lfs_getattr(&lfs, "hello/hello", 'D', buffer+18, 9) => LFS_ERR_NOATTR;
|
||||
memcmp(buffer, "fffffffff", 9) => 0;
|
||||
memcmp(buffer+9, "ccccc\0\0\0\0", 9) => 0;
|
||||
memcmp(buffer+18, "\0\0\0\0\0\0\0\0\0", 9) => 0;
|
||||
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
lfs_getattr(&lfs, "hello/hello", 'B', buffer, 9) => 4;
|
||||
lfs_getattr(&lfs, "hello/hello", 'C', buffer+9, 9) => 0;
|
||||
lfs_getattr(&lfs, "hello/hello", 'D', buffer+18, 9) => 4;
|
||||
memcmp(buffer, "gggg\0\0\0\0\0", 9) => 0;
|
||||
memcmp(buffer+9, "\0\0\0\0\0\0\0\0\0", 9) => 0;
|
||||
memcmp(buffer+18, "hhhh\0\0\0\0\0", 9) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
|
@ -0,0 +1,260 @@
|
|||
# bad blocks with block cycles should be tested in test_relocations
|
||||
if = '(int32_t)BLOCK_CYCLES == -1'
|
||||
|
||||
[cases.test_badblocks_single]
|
||||
defines.ERASE_COUNT = 256 # small bd so test runs faster
|
||||
defines.ERASE_CYCLES = 0xffffffff
|
||||
defines.ERASE_VALUE = [0x00, 0xff, -1]
|
||||
defines.BADBLOCK_BEHAVIOR = [
|
||||
'LFS_EMUBD_BADBLOCK_PROGERROR',
|
||||
'LFS_EMUBD_BADBLOCK_ERASEERROR',
|
||||
'LFS_EMUBD_BADBLOCK_READERROR',
|
||||
'LFS_EMUBD_BADBLOCK_PROGNOOP',
|
||||
'LFS_EMUBD_BADBLOCK_ERASENOOP',
|
||||
]
|
||||
defines.NAMEMULT = 64
|
||||
defines.FILEMULT = 1
|
||||
code = '''
|
||||
for (lfs_block_t badblock = 2; badblock < BLOCK_COUNT; badblock++) {
|
||||
lfs_emubd_setwear(cfg, badblock-1, 0) => 0;
|
||||
lfs_emubd_setwear(cfg, badblock, 0xffffffff) => 0;
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
uint8_t buffer[1024];
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
}
|
||||
buffer[NAMEMULT] = '\0';
|
||||
lfs_mkdir(&lfs, (char*)buffer) => 0;
|
||||
|
||||
buffer[NAMEMULT] = '/';
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j+NAMEMULT+1] = '0'+i;
|
||||
}
|
||||
buffer[2*NAMEMULT+1] = '\0';
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, (char*)buffer,
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
lfs_size_t size = NAMEMULT;
|
||||
for (int j = 0; j < i*FILEMULT; j++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
uint8_t buffer[1024];
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
}
|
||||
buffer[NAMEMULT] = '\0';
|
||||
struct lfs_info info;
|
||||
lfs_stat(&lfs, (char*)buffer, &info) => 0;
|
||||
info.type => LFS_TYPE_DIR;
|
||||
|
||||
buffer[NAMEMULT] = '/';
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j+NAMEMULT+1] = '0'+i;
|
||||
}
|
||||
buffer[2*NAMEMULT+1] = '\0';
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
|
||||
|
||||
int size = NAMEMULT;
|
||||
for (int j = 0; j < i*FILEMULT; j++) {
|
||||
uint8_t rbuffer[1024];
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(buffer, rbuffer, size) => 0;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
'''
|
||||
|
||||
[cases.test_badblocks_region_corruption] # (causes cascading failures)
|
||||
defines.ERASE_COUNT = 256 # small bd so test runs faster
|
||||
defines.ERASE_CYCLES = 0xffffffff
|
||||
defines.ERASE_VALUE = [0x00, 0xff, -1]
|
||||
defines.BADBLOCK_BEHAVIOR = [
|
||||
'LFS_EMUBD_BADBLOCK_PROGERROR',
|
||||
'LFS_EMUBD_BADBLOCK_ERASEERROR',
|
||||
'LFS_EMUBD_BADBLOCK_READERROR',
|
||||
'LFS_EMUBD_BADBLOCK_PROGNOOP',
|
||||
'LFS_EMUBD_BADBLOCK_ERASENOOP',
|
||||
]
|
||||
defines.NAMEMULT = 64
|
||||
defines.FILEMULT = 1
|
||||
code = '''
|
||||
for (lfs_block_t i = 0; i < (BLOCK_COUNT-2)/2; i++) {
|
||||
lfs_emubd_setwear(cfg, i+2, 0xffffffff) => 0;
|
||||
}
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
uint8_t buffer[1024];
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
}
|
||||
buffer[NAMEMULT] = '\0';
|
||||
lfs_mkdir(&lfs, (char*)buffer) => 0;
|
||||
|
||||
buffer[NAMEMULT] = '/';
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j+NAMEMULT+1] = '0'+i;
|
||||
}
|
||||
buffer[2*NAMEMULT+1] = '\0';
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, (char*)buffer,
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
lfs_size_t size = NAMEMULT;
|
||||
for (int j = 0; j < i*FILEMULT; j++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
uint8_t buffer[1024];
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
}
|
||||
buffer[NAMEMULT] = '\0';
|
||||
struct lfs_info info;
|
||||
lfs_stat(&lfs, (char*)buffer, &info) => 0;
|
||||
info.type => LFS_TYPE_DIR;
|
||||
|
||||
buffer[NAMEMULT] = '/';
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j+NAMEMULT+1] = '0'+i;
|
||||
}
|
||||
buffer[2*NAMEMULT+1] = '\0';
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
|
||||
|
||||
lfs_size_t size = NAMEMULT;
|
||||
for (int j = 0; j < i*FILEMULT; j++) {
|
||||
uint8_t rbuffer[1024];
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(buffer, rbuffer, size) => 0;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_badblocks_alternating_corruption] # (causes cascading failures)
|
||||
defines.ERASE_COUNT = 256 # small bd so test runs faster
|
||||
defines.ERASE_CYCLES = 0xffffffff
|
||||
defines.ERASE_VALUE = [0x00, 0xff, -1]
|
||||
defines.BADBLOCK_BEHAVIOR = [
|
||||
'LFS_EMUBD_BADBLOCK_PROGERROR',
|
||||
'LFS_EMUBD_BADBLOCK_ERASEERROR',
|
||||
'LFS_EMUBD_BADBLOCK_READERROR',
|
||||
'LFS_EMUBD_BADBLOCK_PROGNOOP',
|
||||
'LFS_EMUBD_BADBLOCK_ERASENOOP',
|
||||
]
|
||||
defines.NAMEMULT = 64
|
||||
defines.FILEMULT = 1
|
||||
code = '''
|
||||
for (lfs_block_t i = 0; i < (BLOCK_COUNT-2)/2; i++) {
|
||||
lfs_emubd_setwear(cfg, (2*i) + 2, 0xffffffff) => 0;
|
||||
}
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
uint8_t buffer[1024];
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
}
|
||||
buffer[NAMEMULT] = '\0';
|
||||
lfs_mkdir(&lfs, (char*)buffer) => 0;
|
||||
|
||||
buffer[NAMEMULT] = '/';
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j+NAMEMULT+1] = '0'+i;
|
||||
}
|
||||
buffer[2*NAMEMULT+1] = '\0';
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, (char*)buffer,
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
lfs_size_t size = NAMEMULT;
|
||||
for (int j = 0; j < i*FILEMULT; j++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
uint8_t buffer[1024];
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j] = '0'+i;
|
||||
}
|
||||
buffer[NAMEMULT] = '\0';
|
||||
struct lfs_info info;
|
||||
lfs_stat(&lfs, (char*)buffer, &info) => 0;
|
||||
info.type => LFS_TYPE_DIR;
|
||||
|
||||
buffer[NAMEMULT] = '/';
|
||||
for (int j = 0; j < NAMEMULT; j++) {
|
||||
buffer[j+NAMEMULT+1] = '0'+i;
|
||||
}
|
||||
buffer[2*NAMEMULT+1] = '\0';
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, (char*)buffer, LFS_O_RDONLY) => 0;
|
||||
|
||||
lfs_size_t size = NAMEMULT;
|
||||
for (int j = 0; j < i*FILEMULT; j++) {
|
||||
uint8_t rbuffer[1024];
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(buffer, rbuffer, size) => 0;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# other corner cases
|
||||
[cases.test_badblocks_superblocks] # (corrupt 1 or 0)
|
||||
defines.ERASE_CYCLES = 0xffffffff
|
||||
defines.ERASE_VALUE = [0x00, 0xff, -1]
|
||||
defines.BADBLOCK_BEHAVIOR = [
|
||||
'LFS_EMUBD_BADBLOCK_PROGERROR',
|
||||
'LFS_EMUBD_BADBLOCK_ERASEERROR',
|
||||
'LFS_EMUBD_BADBLOCK_READERROR',
|
||||
'LFS_EMUBD_BADBLOCK_PROGNOOP',
|
||||
'LFS_EMUBD_BADBLOCK_ERASENOOP',
|
||||
]
|
||||
code = '''
|
||||
lfs_emubd_setwear(cfg, 0, 0xffffffff) => 0;
|
||||
lfs_emubd_setwear(cfg, 1, 0xffffffff) => 0;
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => LFS_ERR_NOSPC;
|
||||
lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
248
components/joltwallet__littlefs/src/littlefs/tests/test_bd.toml
Normal file
248
components/joltwallet__littlefs/src/littlefs/tests/test_bd.toml
Normal file
|
|
@ -0,0 +1,248 @@
|
|||
# These tests don't really test littlefs at all, they are here only to make
|
||||
# sure the underlying block device is working.
|
||||
#
|
||||
# Note we use 251, a prime, in places to avoid aliasing powers of 2.
|
||||
#
|
||||
|
||||
[cases.test_bd_one_block]
|
||||
defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
|
||||
defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
|
||||
code = '''
|
||||
uint8_t buffer[lfs_max(READ, PROG)];
|
||||
|
||||
// write data
|
||||
cfg->erase(cfg, 0) => 0;
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
|
||||
for (lfs_off_t j = 0; j < PROG; j++) {
|
||||
buffer[j] = (i+j) % 251;
|
||||
}
|
||||
cfg->prog(cfg, 0, i, buffer, PROG) => 0;
|
||||
}
|
||||
|
||||
// read data
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
|
||||
cfg->read(cfg, 0, i, buffer, READ) => 0;
|
||||
|
||||
for (lfs_off_t j = 0; j < READ; j++) {
|
||||
LFS_ASSERT(buffer[j] == (i+j) % 251);
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
[cases.test_bd_two_block]
|
||||
defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
|
||||
defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
|
||||
code = '''
|
||||
uint8_t buffer[lfs_max(READ, PROG)];
|
||||
lfs_block_t block;
|
||||
|
||||
// write block 0
|
||||
block = 0;
|
||||
cfg->erase(cfg, block) => 0;
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
|
||||
for (lfs_off_t j = 0; j < PROG; j++) {
|
||||
buffer[j] = (block+i+j) % 251;
|
||||
}
|
||||
cfg->prog(cfg, block, i, buffer, PROG) => 0;
|
||||
}
|
||||
|
||||
// read block 0
|
||||
block = 0;
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
|
||||
cfg->read(cfg, block, i, buffer, READ) => 0;
|
||||
|
||||
for (lfs_off_t j = 0; j < READ; j++) {
|
||||
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
|
||||
}
|
||||
}
|
||||
|
||||
// write block 1
|
||||
block = 1;
|
||||
cfg->erase(cfg, block) => 0;
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
|
||||
for (lfs_off_t j = 0; j < PROG; j++) {
|
||||
buffer[j] = (block+i+j) % 251;
|
||||
}
|
||||
cfg->prog(cfg, block, i, buffer, PROG) => 0;
|
||||
}
|
||||
|
||||
// read block 1
|
||||
block = 1;
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
|
||||
cfg->read(cfg, block, i, buffer, READ) => 0;
|
||||
|
||||
for (lfs_off_t j = 0; j < READ; j++) {
|
||||
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
|
||||
}
|
||||
}
|
||||
|
||||
// read block 0 again
|
||||
block = 0;
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
|
||||
cfg->read(cfg, block, i, buffer, READ) => 0;
|
||||
|
||||
for (lfs_off_t j = 0; j < READ; j++) {
|
||||
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
[cases.test_bd_last_block]
|
||||
defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
|
||||
defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
|
||||
code = '''
|
||||
uint8_t buffer[lfs_max(READ, PROG)];
|
||||
lfs_block_t block;
|
||||
|
||||
// write block 0
|
||||
block = 0;
|
||||
cfg->erase(cfg, block) => 0;
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
|
||||
for (lfs_off_t j = 0; j < PROG; j++) {
|
||||
buffer[j] = (block+i+j) % 251;
|
||||
}
|
||||
cfg->prog(cfg, block, i, buffer, PROG) => 0;
|
||||
}
|
||||
|
||||
// read block 0
|
||||
block = 0;
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
|
||||
cfg->read(cfg, block, i, buffer, READ) => 0;
|
||||
|
||||
for (lfs_off_t j = 0; j < READ; j++) {
|
||||
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
|
||||
}
|
||||
}
|
||||
|
||||
// write block n-1
|
||||
block = cfg->block_count-1;
|
||||
cfg->erase(cfg, block) => 0;
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
|
||||
for (lfs_off_t j = 0; j < PROG; j++) {
|
||||
buffer[j] = (block+i+j) % 251;
|
||||
}
|
||||
cfg->prog(cfg, block, i, buffer, PROG) => 0;
|
||||
}
|
||||
|
||||
// read block n-1
|
||||
block = cfg->block_count-1;
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
|
||||
cfg->read(cfg, block, i, buffer, READ) => 0;
|
||||
|
||||
for (lfs_off_t j = 0; j < READ; j++) {
|
||||
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
|
||||
}
|
||||
}
|
||||
|
||||
// read block 0 again
|
||||
block = 0;
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
|
||||
cfg->read(cfg, block, i, buffer, READ) => 0;
|
||||
|
||||
for (lfs_off_t j = 0; j < READ; j++) {
|
||||
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
[cases.test_bd_powers_of_two]
|
||||
defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
|
||||
defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
|
||||
code = '''
|
||||
uint8_t buffer[lfs_max(READ, PROG)];
|
||||
|
||||
// write/read every power of 2
|
||||
lfs_block_t block = 1;
|
||||
while (block < cfg->block_count) {
|
||||
// write
|
||||
cfg->erase(cfg, block) => 0;
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
|
||||
for (lfs_off_t j = 0; j < PROG; j++) {
|
||||
buffer[j] = (block+i+j) % 251;
|
||||
}
|
||||
cfg->prog(cfg, block, i, buffer, PROG) => 0;
|
||||
}
|
||||
|
||||
// read
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
|
||||
cfg->read(cfg, block, i, buffer, READ) => 0;
|
||||
|
||||
for (lfs_off_t j = 0; j < READ; j++) {
|
||||
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
|
||||
}
|
||||
}
|
||||
|
||||
block *= 2;
|
||||
}
|
||||
|
||||
// read every power of 2 again
|
||||
block = 1;
|
||||
while (block < cfg->block_count) {
|
||||
// read
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
|
||||
cfg->read(cfg, block, i, buffer, READ) => 0;
|
||||
|
||||
for (lfs_off_t j = 0; j < READ; j++) {
|
||||
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
|
||||
}
|
||||
}
|
||||
|
||||
block *= 2;
|
||||
}
|
||||
'''
|
||||
|
||||
[cases.test_bd_fibonacci]
|
||||
defines.READ = ['READ_SIZE', 'BLOCK_SIZE']
|
||||
defines.PROG = ['PROG_SIZE', 'BLOCK_SIZE']
|
||||
code = '''
|
||||
uint8_t buffer[lfs_max(READ, PROG)];
|
||||
|
||||
// write/read every fibonacci number on our device
|
||||
lfs_block_t block = 1;
|
||||
lfs_block_t block_ = 1;
|
||||
while (block < cfg->block_count) {
|
||||
// write
|
||||
cfg->erase(cfg, block) => 0;
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += PROG) {
|
||||
for (lfs_off_t j = 0; j < PROG; j++) {
|
||||
buffer[j] = (block+i+j) % 251;
|
||||
}
|
||||
cfg->prog(cfg, block, i, buffer, PROG) => 0;
|
||||
}
|
||||
|
||||
// read
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
|
||||
cfg->read(cfg, block, i, buffer, READ) => 0;
|
||||
|
||||
for (lfs_off_t j = 0; j < READ; j++) {
|
||||
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
|
||||
}
|
||||
}
|
||||
|
||||
lfs_block_t nblock = block + block_;
|
||||
block_ = block;
|
||||
block = nblock;
|
||||
}
|
||||
|
||||
// read every fibonacci number again
|
||||
block = 1;
|
||||
block_ = 1;
|
||||
while (block < cfg->block_count) {
|
||||
// read
|
||||
for (lfs_off_t i = 0; i < cfg->block_size; i += READ) {
|
||||
cfg->read(cfg, block, i, buffer, READ) => 0;
|
||||
|
||||
for (lfs_off_t j = 0; j < READ; j++) {
|
||||
LFS_ASSERT(buffer[j] == (block+i+j) % 251);
|
||||
}
|
||||
}
|
||||
|
||||
lfs_block_t nblock = block + block_;
|
||||
block_ = block;
|
||||
block = nblock;
|
||||
}
|
||||
'''
|
||||
|
||||
|
||||
|
||||
|
||||
1453
components/joltwallet__littlefs/src/littlefs/tests/test_compat.toml
Normal file
1453
components/joltwallet__littlefs/src/littlefs/tests/test_compat.toml
Normal file
File diff suppressed because it is too large
Load diff
1120
components/joltwallet__littlefs/src/littlefs/tests/test_dirs.toml
Normal file
1120
components/joltwallet__littlefs/src/littlefs/tests/test_dirs.toml
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,642 @@
|
|||
# These tests are for some specific corner cases with neighboring inline files.
|
||||
# Note that these tests are intended for 512 byte inline sizes. They should
|
||||
# still pass with other inline sizes but wouldn't be testing anything.
|
||||
|
||||
defines.CACHE_SIZE = 512
|
||||
if = 'CACHE_SIZE % PROG_SIZE == 0 && CACHE_SIZE == 512'
|
||||
|
||||
[cases.test_entries_grow]
|
||||
code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// write hi0 20
|
||||
char path[1024];
|
||||
lfs_size_t size;
|
||||
sprintf(path, "hi0"); size = 20;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 20
|
||||
sprintf(path, "hi1"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi2 20
|
||||
sprintf(path, "hi2"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi3 20
|
||||
sprintf(path, "hi3"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi1 20
|
||||
sprintf(path, "hi1"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi0 20
|
||||
sprintf(path, "hi0"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi2 20
|
||||
sprintf(path, "hi2"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi3 20
|
||||
sprintf(path, "hi3"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_entries_shrink]
|
||||
code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// write hi0 20
|
||||
char path[1024];
|
||||
lfs_size_t size;
|
||||
sprintf(path, "hi0"); size = 20;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi2 20
|
||||
sprintf(path, "hi2"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi3 20
|
||||
sprintf(path, "hi3"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 20
|
||||
sprintf(path, "hi1"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi0 20
|
||||
sprintf(path, "hi0"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi1 20
|
||||
sprintf(path, "hi1"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi2 20
|
||||
sprintf(path, "hi2"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi3 20
|
||||
sprintf(path, "hi3"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_entries_spill]
|
||||
code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// write hi0 200
|
||||
char path[1024];
|
||||
lfs_size_t size;
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi2 200
|
||||
sprintf(path, "hi2"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi2 200
|
||||
sprintf(path, "hi2"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_entries_push_spill]
|
||||
code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// write hi0 200
|
||||
char path[1024];
|
||||
lfs_size_t size;
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 20
|
||||
sprintf(path, "hi1"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi2 200
|
||||
sprintf(path, "hi2"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi1 20
|
||||
sprintf(path, "hi1"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi2 200
|
||||
sprintf(path, "hi2"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_entries_push_spill_two]
|
||||
code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// write hi0 200
|
||||
char path[1024];
|
||||
lfs_size_t size;
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 20
|
||||
sprintf(path, "hi1"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi2 200
|
||||
sprintf(path, "hi2"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi4 200
|
||||
sprintf(path, "hi4"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi1 20
|
||||
sprintf(path, "hi1"); size = 20;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi2 200
|
||||
sprintf(path, "hi2"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi4 200
|
||||
sprintf(path, "hi4"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_entries_drop]
|
||||
code = '''
|
||||
uint8_t wbuffer[1024];
|
||||
uint8_t rbuffer[1024];
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// write hi0 200
|
||||
char path[1024];
|
||||
lfs_size_t size;
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi1 200
|
||||
sprintf(path, "hi1"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi2 200
|
||||
sprintf(path, "hi2"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// write hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_remove(&lfs, "hi1") => 0;
|
||||
struct lfs_info info;
|
||||
lfs_stat(&lfs, "hi1", &info) => LFS_ERR_NOENT;
|
||||
// read hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi2 200
|
||||
sprintf(path, "hi2"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_remove(&lfs, "hi2") => 0;
|
||||
lfs_stat(&lfs, "hi2", &info) => LFS_ERR_NOENT;
|
||||
// read hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// read hi3 200
|
||||
sprintf(path, "hi3"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_remove(&lfs, "hi3") => 0;
|
||||
lfs_stat(&lfs, "hi3", &info) => LFS_ERR_NOENT;
|
||||
// read hi0 200
|
||||
sprintf(path, "hi0"); size = 200;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_remove(&lfs, "hi0") => 0;
|
||||
lfs_stat(&lfs, "hi0", &info) => LFS_ERR_NOENT;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_entries_create_too_big]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
char path[1024];
|
||||
memset(path, 'm', 200);
|
||||
path[200] = '\0';
|
||||
|
||||
lfs_size_t size = 400;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
uint8_t wbuffer[1024];
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
size = 400;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
uint8_t rbuffer[1024];
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_entries_resize_too_big]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
char path[1024];
|
||||
memset(path, 'm', 200);
|
||||
path[200] = '\0';
|
||||
|
||||
lfs_size_t size = 40;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
uint8_t wbuffer[1024];
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
size = 40;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
uint8_t rbuffer[1024];
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
size = 400;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
memset(wbuffer, 'c', size);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
size = 400;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
memcmp(rbuffer, wbuffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
|
@ -0,0 +1,306 @@
|
|||
# Tests for recovering from conditions which shouldn't normally
|
||||
# happen during normal operation of littlefs
|
||||
|
||||
# invalid pointer tests (outside of block_count)
|
||||
|
||||
[cases.test_evil_invalid_tail_pointer]
|
||||
defines.TAIL_TYPE = ['LFS_TYPE_HARDTAIL', 'LFS_TYPE_SOFTTAIL']
|
||||
defines.INVALSET = [0x3, 0x1, 0x2]
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
// change tail-pointer to invalid pointers
|
||||
lfs_init(&lfs, cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8),
|
||||
(lfs_block_t[2]){
|
||||
(INVALSET & 0x1) ? 0xcccccccc : 0,
|
||||
(INVALSET & 0x2) ? 0xcccccccc : 0}})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
[cases.test_evil_invalid_dir_pointer]
|
||||
defines.INVALSET = [0x3, 0x1, 0x2]
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
// make a dir
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_mkdir(&lfs, "dir_here") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// change the dir pointer to be invalid
|
||||
lfs_init(&lfs, cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
// make sure id 1 == our directory
|
||||
uint8_t buffer[1024];
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x700, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("dir_here")), buffer)
|
||||
=> LFS_MKTAG(LFS_TYPE_DIR, 1, strlen("dir_here"));
|
||||
assert(memcmp((char*)buffer, "dir_here", strlen("dir_here")) == 0);
|
||||
// change dir pointer
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, 8),
|
||||
(lfs_block_t[2]){
|
||||
(INVALSET & 0x1) ? 0xcccccccc : 0,
|
||||
(INVALSET & 0x2) ? 0xcccccccc : 0}})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that accessing our bad dir fails, note there's a number
|
||||
// of ways to access the dir, some can fail, but some don't
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
struct lfs_info info;
|
||||
lfs_stat(&lfs, "dir_here", &info) => 0;
|
||||
assert(strcmp(info.name, "dir_here") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
lfs_dir_t dir;
|
||||
lfs_dir_open(&lfs, &dir, "dir_here") => LFS_ERR_CORRUPT;
|
||||
lfs_stat(&lfs, "dir_here/file_here", &info) => LFS_ERR_CORRUPT;
|
||||
lfs_dir_open(&lfs, &dir, "dir_here/dir_here") => LFS_ERR_CORRUPT;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "dir_here/file_here",
|
||||
LFS_O_RDONLY) => LFS_ERR_CORRUPT;
|
||||
lfs_file_open(&lfs, &file, "dir_here/file_here",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => LFS_ERR_CORRUPT;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_evil_invalid_file_pointer]
|
||||
in = "lfs.c"
|
||||
defines.SIZE = [10, 1000, 100000] # faked file size
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
// make a file
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "file_here",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// change the file pointer to be invalid
|
||||
lfs_init(&lfs, cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
// make sure id 1 == our file
|
||||
uint8_t buffer[1024];
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x700, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("file_here")), buffer)
|
||||
=> LFS_MKTAG(LFS_TYPE_REG, 1, strlen("file_here"));
|
||||
assert(memcmp((char*)buffer, "file_here", strlen("file_here")) == 0);
|
||||
// change file pointer
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz)),
|
||||
&(struct lfs_ctz){0xcccccccc, lfs_tole32(SIZE)}})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that accessing our bad file fails, note there's a number
|
||||
// of ways to access the dir, some can fail, but some don't
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
struct lfs_info info;
|
||||
lfs_stat(&lfs, "file_here", &info) => 0;
|
||||
assert(strcmp(info.name, "file_here") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(info.size == SIZE);
|
||||
|
||||
lfs_file_open(&lfs, &file, "file_here", LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, SIZE) => LFS_ERR_CORRUPT;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// any allocs that traverse CTZ must unfortunately must fail
|
||||
if (SIZE > 2*BLOCK_SIZE) {
|
||||
lfs_mkdir(&lfs, "dir_here") => LFS_ERR_CORRUPT;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_evil_invalid_ctz_pointer] # invalid pointer in CTZ skip-list test
|
||||
defines.SIZE = ['2*BLOCK_SIZE', '3*BLOCK_SIZE', '4*BLOCK_SIZE']
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
// make a file
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "file_here",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
for (int i = 0; i < SIZE; i++) {
|
||||
char c = 'c';
|
||||
lfs_file_write(&lfs, &file, &c, 1) => 1;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
// change pointer in CTZ skip-list to be invalid
|
||||
lfs_init(&lfs, cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
// make sure id 1 == our file and get our CTZ structure
|
||||
uint8_t buffer[4*BLOCK_SIZE];
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x700, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 1, strlen("file_here")), buffer)
|
||||
=> LFS_MKTAG(LFS_TYPE_REG, 1, strlen("file_here"));
|
||||
assert(memcmp((char*)buffer, "file_here", strlen("file_here")) == 0);
|
||||
struct lfs_ctz ctz;
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x700, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_STRUCT, 1, sizeof(struct lfs_ctz)), &ctz)
|
||||
=> LFS_MKTAG(LFS_TYPE_CTZSTRUCT, 1, sizeof(struct lfs_ctz));
|
||||
lfs_ctz_fromle32(&ctz);
|
||||
// rewrite block to contain bad pointer
|
||||
uint8_t bbuffer[BLOCK_SIZE];
|
||||
cfg->read(cfg, ctz.head, 0, bbuffer, BLOCK_SIZE) => 0;
|
||||
uint32_t bad = lfs_tole32(0xcccccccc);
|
||||
memcpy(&bbuffer[0], &bad, sizeof(bad));
|
||||
memcpy(&bbuffer[4], &bad, sizeof(bad));
|
||||
cfg->erase(cfg, ctz.head) => 0;
|
||||
cfg->prog(cfg, ctz.head, 0, bbuffer, BLOCK_SIZE) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that accessing our bad file fails, note there's a number
|
||||
// of ways to access the dir, some can fail, but some don't
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
struct lfs_info info;
|
||||
lfs_stat(&lfs, "file_here", &info) => 0;
|
||||
assert(strcmp(info.name, "file_here") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(info.size == SIZE);
|
||||
|
||||
lfs_file_open(&lfs, &file, "file_here", LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, SIZE) => LFS_ERR_CORRUPT;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// any allocs that traverse CTZ must unfortunately must fail
|
||||
if (SIZE > 2*BLOCK_SIZE) {
|
||||
lfs_mkdir(&lfs, "dir_here") => LFS_ERR_CORRUPT;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
|
||||
[cases.test_evil_invalid_gstate_pointer]
|
||||
defines.INVALSET = [0x3, 0x1, 0x2]
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
// create an invalid gstate
|
||||
lfs_init(&lfs, cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_fs_prepmove(&lfs, 1, (lfs_block_t [2]){
|
||||
(INVALSET & 0x1) ? 0xcccccccc : 0,
|
||||
(INVALSET & 0x2) ? 0xcccccccc : 0});
|
||||
lfs_dir_commit(&lfs, &mdir, NULL, 0) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
// mount may not fail, but our first alloc should fail when
|
||||
// we try to fix the gstate
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_mkdir(&lfs, "should_fail") => LFS_ERR_CORRUPT;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# cycle detection/recovery tests
|
||||
|
||||
[cases.test_evil_mdir_loop] # metadata-pair threaded-list loop test
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
// change tail-pointer to point to ourself
|
||||
lfs_init(&lfs, cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8),
|
||||
(lfs_block_t[2]){0, 1}})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
[cases.test_evil_mdir_loop2] # metadata-pair threaded-list 2-length loop test
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs with child dir
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_mkdir(&lfs, "child") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// find child
|
||||
lfs_init(&lfs, cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_block_t pair[2];
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x7ff, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair)
|
||||
=> LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair));
|
||||
lfs_pair_fromle32(pair);
|
||||
// change tail-pointer to point to root
|
||||
lfs_dir_fetch(&lfs, &mdir, pair) => 0;
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8),
|
||||
(lfs_block_t[2]){0, 1}})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
[cases.test_evil_mdir_loop_child] # metadata-pair threaded-list 1-length child loop test
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
// create littlefs with child dir
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_mkdir(&lfs, "child") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// find child
|
||||
lfs_init(&lfs, cfg) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_block_t pair[2];
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_dir_get(&lfs, &mdir,
|
||||
LFS_MKTAG(0x7ff, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair)), pair)
|
||||
=> LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 1, sizeof(pair));
|
||||
lfs_pair_fromle32(pair);
|
||||
// change tail-pointer to point to ourself
|
||||
lfs_dir_fetch(&lfs, &mdir, pair) => 0;
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_HARDTAIL, 0x3ff, 8), pair})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// test that mount fails gracefully
|
||||
lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
|
@ -0,0 +1,505 @@
|
|||
# test running a filesystem to exhaustion
|
||||
[cases.test_exhaustion_normal]
|
||||
defines.ERASE_CYCLES = 10
|
||||
defines.ERASE_COUNT = 256 # small bd so test runs faster
|
||||
defines.BLOCK_CYCLES = 'ERASE_CYCLES / 2'
|
||||
defines.BADBLOCK_BEHAVIOR = [
|
||||
'LFS_EMUBD_BADBLOCK_PROGERROR',
|
||||
'LFS_EMUBD_BADBLOCK_ERASEERROR',
|
||||
'LFS_EMUBD_BADBLOCK_READERROR',
|
||||
'LFS_EMUBD_BADBLOCK_PROGNOOP',
|
||||
'LFS_EMUBD_BADBLOCK_ERASENOOP',
|
||||
]
|
||||
defines.FILES = 10
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_mkdir(&lfs, "roadrunner") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
uint32_t cycle = 0;
|
||||
while (true) {
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// chose name, roughly random seed, and random 2^n size
|
||||
char path[1024];
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
uint32_t prng = cycle * i;
|
||||
lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
|
||||
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (TEST_PRNG(&prng) % 26);
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
int err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
int err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
char path[1024];
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
uint32_t prng = cycle * i;
|
||||
lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
|
||||
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (TEST_PRNG(&prng) % 26);
|
||||
char r;
|
||||
lfs_file_read(&lfs, &file, &r, 1) => 1;
|
||||
assert(r == c);
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
cycle += 1;
|
||||
}
|
||||
|
||||
exhausted:
|
||||
// should still be readable
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
char path[1024];
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
struct lfs_info info;
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
LFS_WARN("completed %d cycles", cycle);
|
||||
'''
|
||||
|
||||
# test running a filesystem to exhaustion
|
||||
# which also requires expanding superblocks
|
||||
[cases.test_exhaustion_superblocks]
|
||||
defines.ERASE_CYCLES = 10
|
||||
defines.ERASE_COUNT = 256 # small bd so test runs faster
|
||||
defines.BLOCK_CYCLES = 'ERASE_CYCLES / 2'
|
||||
defines.BADBLOCK_BEHAVIOR = [
|
||||
'LFS_EMUBD_BADBLOCK_PROGERROR',
|
||||
'LFS_EMUBD_BADBLOCK_ERASEERROR',
|
||||
'LFS_EMUBD_BADBLOCK_READERROR',
|
||||
'LFS_EMUBD_BADBLOCK_PROGNOOP',
|
||||
'LFS_EMUBD_BADBLOCK_ERASENOOP',
|
||||
]
|
||||
defines.FILES = 10
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
uint32_t cycle = 0;
|
||||
while (true) {
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// chose name, roughly random seed, and random 2^n size
|
||||
char path[1024];
|
||||
sprintf(path, "test%d", i);
|
||||
uint32_t prng = cycle * i;
|
||||
lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
|
||||
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (TEST_PRNG(&prng) % 26);
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
int err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
int err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
char path[1024];
|
||||
sprintf(path, "test%d", i);
|
||||
uint32_t prng = cycle * i;
|
||||
lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
|
||||
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (TEST_PRNG(&prng) % 26);
|
||||
char r;
|
||||
lfs_file_read(&lfs, &file, &r, 1) => 1;
|
||||
assert(r == c);
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
cycle += 1;
|
||||
}
|
||||
|
||||
exhausted:
|
||||
// should still be readable
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
char path[1024];
|
||||
struct lfs_info info;
|
||||
sprintf(path, "test%d", i);
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
LFS_WARN("completed %d cycles", cycle);
|
||||
'''
|
||||
|
||||
# These are a sort of high-level litmus test for wear-leveling. One definition
|
||||
# of wear-leveling is that increasing a block device's space translates directly
|
||||
# into increasing the block devices lifetime. This is something we can actually
|
||||
# check for.
|
||||
|
||||
# wear-level test running a filesystem to exhaustion
|
||||
[cases.test_exhuastion_wear_leveling]
|
||||
defines.ERASE_CYCLES = 20
|
||||
defines.ERASE_COUNT = 256 # small bd so test runs faster
|
||||
defines.BLOCK_CYCLES = 'ERASE_CYCLES / 2'
|
||||
defines.FILES = 10
|
||||
code = '''
|
||||
uint32_t run_cycles[2];
|
||||
const uint32_t run_block_count[2] = {BLOCK_COUNT/2, BLOCK_COUNT};
|
||||
|
||||
for (int run = 0; run < 2; run++) {
|
||||
for (lfs_block_t b = 0; b < BLOCK_COUNT; b++) {
|
||||
lfs_emubd_setwear(cfg, b,
|
||||
(b < run_block_count[run]) ? 0 : ERASE_CYCLES) => 0;
|
||||
}
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_mkdir(&lfs, "roadrunner") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
uint32_t cycle = 0;
|
||||
while (true) {
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// chose name, roughly random seed, and random 2^n size
|
||||
char path[1024];
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
uint32_t prng = cycle * i;
|
||||
lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
|
||||
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (TEST_PRNG(&prng) % 26);
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
int err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
int err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
char path[1024];
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
uint32_t prng = cycle * i;
|
||||
lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
|
||||
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (TEST_PRNG(&prng) % 26);
|
||||
char r;
|
||||
lfs_file_read(&lfs, &file, &r, 1) => 1;
|
||||
assert(r == c);
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
cycle += 1;
|
||||
}
|
||||
|
||||
exhausted:
|
||||
// should still be readable
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
char path[1024];
|
||||
struct lfs_info info;
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
run_cycles[run] = cycle;
|
||||
LFS_WARN("completed %d blocks %d cycles",
|
||||
run_block_count[run], run_cycles[run]);
|
||||
}
|
||||
|
||||
// check we increased the lifetime by 2x with ~10% error
|
||||
LFS_ASSERT(run_cycles[1]*110/100 > 2*run_cycles[0]);
|
||||
'''
|
||||
|
||||
# wear-level test + expanding superblock
|
||||
[cases.test_exhaustion_wear_leveling_superblocks]
|
||||
defines.ERASE_CYCLES = 20
|
||||
defines.ERASE_COUNT = 256 # small bd so test runs faster
|
||||
defines.BLOCK_CYCLES = 'ERASE_CYCLES / 2'
|
||||
defines.FILES = 10
|
||||
code = '''
|
||||
uint32_t run_cycles[2];
|
||||
const uint32_t run_block_count[2] = {BLOCK_COUNT/2, BLOCK_COUNT};
|
||||
|
||||
for (int run = 0; run < 2; run++) {
|
||||
for (lfs_block_t b = 0; b < BLOCK_COUNT; b++) {
|
||||
lfs_emubd_setwear(cfg, b,
|
||||
(b < run_block_count[run]) ? 0 : ERASE_CYCLES) => 0;
|
||||
}
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
uint32_t cycle = 0;
|
||||
while (true) {
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// chose name, roughly random seed, and random 2^n size
|
||||
char path[1024];
|
||||
sprintf(path, "test%d", i);
|
||||
uint32_t prng = cycle * i;
|
||||
lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
|
||||
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (TEST_PRNG(&prng) % 26);
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
int err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
int err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
char path[1024];
|
||||
sprintf(path, "test%d", i);
|
||||
uint32_t prng = cycle * i;
|
||||
lfs_size_t size = 1 << ((TEST_PRNG(&prng) % 10)+2);
|
||||
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (TEST_PRNG(&prng) % 26);
|
||||
char r;
|
||||
lfs_file_read(&lfs, &file, &r, 1) => 1;
|
||||
assert(r == c);
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
cycle += 1;
|
||||
}
|
||||
|
||||
exhausted:
|
||||
// should still be readable
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
char path[1024];
|
||||
struct lfs_info info;
|
||||
sprintf(path, "test%d", i);
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
run_cycles[run] = cycle;
|
||||
LFS_WARN("completed %d blocks %d cycles",
|
||||
run_block_count[run], run_cycles[run]);
|
||||
}
|
||||
|
||||
// check we increased the lifetime by 2x with ~10% error
|
||||
LFS_ASSERT(run_cycles[1]*110/100 > 2*run_cycles[0]);
|
||||
'''
|
||||
|
||||
# test that we wear blocks roughly evenly
|
||||
[cases.test_exhaustion_wear_distribution]
|
||||
defines.ERASE_CYCLES = 0xffffffff
|
||||
defines.ERASE_COUNT = 256 # small bd so test runs faster
|
||||
defines.BLOCK_CYCLES = [5, 4, 3, 2, 1]
|
||||
defines.CYCLES = 100
|
||||
defines.FILES = 10
|
||||
if = 'BLOCK_CYCLES < CYCLES/10'
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_mkdir(&lfs, "roadrunner") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
uint32_t cycle = 0;
|
||||
while (cycle < CYCLES) {
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// chose name, roughly random seed, and random 2^n size
|
||||
char path[1024];
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
uint32_t prng = cycle * i;
|
||||
lfs_size_t size = 1 << 4; //((TEST_PRNG(&prng) % 10)+2);
|
||||
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (TEST_PRNG(&prng) % 26);
|
||||
lfs_ssize_t res = lfs_file_write(&lfs, &file, &c, 1);
|
||||
assert(res == 1 || res == LFS_ERR_NOSPC);
|
||||
if (res == LFS_ERR_NOSPC) {
|
||||
int err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
int err = lfs_file_close(&lfs, &file);
|
||||
assert(err == 0 || err == LFS_ERR_NOSPC);
|
||||
if (err == LFS_ERR_NOSPC) {
|
||||
lfs_unmount(&lfs) => 0;
|
||||
goto exhausted;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
char path[1024];
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
uint32_t prng = cycle * i;
|
||||
lfs_size_t size = 1 << 4; //((TEST_PRNG(&prng) % 10)+2);
|
||||
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
char c = 'a' + (TEST_PRNG(&prng) % 26);
|
||||
char r;
|
||||
lfs_file_read(&lfs, &file, &r, 1) => 1;
|
||||
assert(r == c);
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
cycle += 1;
|
||||
}
|
||||
|
||||
exhausted:
|
||||
// should still be readable
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (uint32_t i = 0; i < FILES; i++) {
|
||||
// check for errors
|
||||
char path[1024];
|
||||
struct lfs_info info;
|
||||
sprintf(path, "roadrunner/test%d", i);
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
LFS_WARN("completed %d cycles", cycle);
|
||||
|
||||
// check the wear on our block device
|
||||
lfs_emubd_wear_t minwear = -1;
|
||||
lfs_emubd_wear_t totalwear = 0;
|
||||
lfs_emubd_wear_t maxwear = 0;
|
||||
// skip 0 and 1 as superblock movement is intentionally avoided
|
||||
for (lfs_block_t b = 2; b < BLOCK_COUNT; b++) {
|
||||
lfs_emubd_wear_t wear = lfs_emubd_wear(cfg, b);
|
||||
printf("%08x: wear %d\n", b, wear);
|
||||
assert(wear >= 0);
|
||||
if (wear < minwear) {
|
||||
minwear = wear;
|
||||
}
|
||||
if (wear > maxwear) {
|
||||
maxwear = wear;
|
||||
}
|
||||
totalwear += wear;
|
||||
}
|
||||
lfs_emubd_wear_t avgwear = totalwear / BLOCK_COUNT;
|
||||
LFS_WARN("max wear: %d cycles", maxwear);
|
||||
LFS_WARN("avg wear: %d cycles", totalwear / (int)BLOCK_COUNT);
|
||||
LFS_WARN("min wear: %d cycles", minwear);
|
||||
|
||||
// find standard deviation^2
|
||||
lfs_emubd_wear_t dev2 = 0;
|
||||
for (lfs_block_t b = 2; b < BLOCK_COUNT; b++) {
|
||||
lfs_emubd_wear_t wear = lfs_emubd_wear(cfg, b);
|
||||
assert(wear >= 0);
|
||||
lfs_emubd_swear_t diff = wear - avgwear;
|
||||
dev2 += diff*diff;
|
||||
}
|
||||
dev2 /= totalwear;
|
||||
LFS_WARN("std dev^2: %d", dev2);
|
||||
assert(dev2 < 8);
|
||||
'''
|
||||
|
||||
|
|
@ -0,0 +1,539 @@
|
|||
|
||||
[cases.test_files_simple]
|
||||
defines.INLINE_MAX = [0, -1, 8]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "hello",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_size_t size = strlen("Hello World!")+1;
|
||||
uint8_t buffer[1024];
|
||||
strcpy((char*)buffer, "Hello World!");
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "hello", LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(strcmp((char*)buffer, "Hello World!") == 0);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_files_large]
|
||||
defines.SIZE = [32, 8192, 262144, 0, 7, 8193]
|
||||
defines.CHUNKSIZE = [31, 16, 33, 1, 1023]
|
||||
defines.INLINE_MAX = [0, -1, 8]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
// write
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "avacado",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
uint32_t prng = 1;
|
||||
uint8_t buffer[1024];
|
||||
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = TEST_PRNG(&prng) & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
prng = 1;
|
||||
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_files_rewrite]
|
||||
defines.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
|
||||
defines.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
|
||||
defines.CHUNKSIZE = [31, 16, 1]
|
||||
defines.INLINE_MAX = [0, -1, 8]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
// write
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_open(&lfs, &file, "avacado",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
uint32_t prng = 1;
|
||||
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = TEST_PRNG(&prng) & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE1;
|
||||
prng = 1;
|
||||
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// rewrite
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY) => 0;
|
||||
prng = 2;
|
||||
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = TEST_PRNG(&prng) & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => lfs_max(SIZE1, SIZE2);
|
||||
prng = 2;
|
||||
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
|
||||
}
|
||||
}
|
||||
if (SIZE1 > SIZE2) {
|
||||
prng = 1;
|
||||
for (lfs_size_t b = 0; b < SIZE2; b++) {
|
||||
TEST_PRNG(&prng);
|
||||
}
|
||||
for (lfs_size_t i = SIZE2; i < SIZE1; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
|
||||
}
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_files_append]
|
||||
defines.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
|
||||
defines.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
|
||||
defines.CHUNKSIZE = [31, 16, 1]
|
||||
defines.INLINE_MAX = [0, -1, 8]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
// write
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_open(&lfs, &file, "avacado",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
uint32_t prng = 1;
|
||||
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = TEST_PRNG(&prng) & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE1;
|
||||
prng = 1;
|
||||
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// append
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_APPEND) => 0;
|
||||
prng = 2;
|
||||
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = TEST_PRNG(&prng) & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE1 + SIZE2;
|
||||
prng = 1;
|
||||
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
|
||||
}
|
||||
}
|
||||
prng = 2;
|
||||
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_files_truncate]
|
||||
defines.SIZE1 = [32, 8192, 131072, 0, 7, 8193]
|
||||
defines.SIZE2 = [32, 8192, 131072, 0, 7, 8193]
|
||||
defines.CHUNKSIZE = [31, 16, 1]
|
||||
defines.INLINE_MAX = [0, -1, 8]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
// write
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_open(&lfs, &file, "avacado",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
uint32_t prng = 1;
|
||||
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = TEST_PRNG(&prng) & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE1;
|
||||
prng = 1;
|
||||
for (lfs_size_t i = 0; i < SIZE1; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE1-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// truncate
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_TRUNC) => 0;
|
||||
prng = 2;
|
||||
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = TEST_PRNG(&prng) & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// read
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE2;
|
||||
prng = 2;
|
||||
for (lfs_size_t i = 0; i < SIZE2; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE2-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_files_reentrant_write]
|
||||
defines.SIZE = [32, 0, 7, 2049]
|
||||
defines.CHUNKSIZE = [31, 16, 65]
|
||||
defines.INLINE_MAX = [0, -1, 8]
|
||||
reentrant = true
|
||||
defines.POWERLOSS_BEHAVIOR = [
|
||||
'LFS_EMUBD_POWERLOSS_NOOP',
|
||||
'LFS_EMUBD_POWERLOSS_OOO',
|
||||
]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
int err = lfs_mount(&lfs, cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
}
|
||||
|
||||
lfs_file_t file;
|
||||
uint8_t buffer[1024];
|
||||
err = lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY);
|
||||
assert(err == LFS_ERR_NOENT || err == 0);
|
||||
if (err == 0) {
|
||||
// can only be 0 (new file) or full size
|
||||
lfs_size_t size = lfs_file_size(&lfs, &file);
|
||||
assert(size == 0 || size == SIZE);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
// write
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
uint32_t prng = 1;
|
||||
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = TEST_PRNG(&prng) & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
prng = 1;
|
||||
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_files_reentrant_write_sync]
|
||||
defines = [
|
||||
# append (O(n))
|
||||
{MODE='LFS_O_APPEND',
|
||||
SIZE=[32, 0, 7, 2049],
|
||||
CHUNKSIZE=[31, 16, 65],
|
||||
INLINE_MAX=[0, -1, 8]},
|
||||
# truncate (O(n^2))
|
||||
{MODE='LFS_O_TRUNC',
|
||||
SIZE=[32, 0, 7, 200],
|
||||
CHUNKSIZE=[31, 16, 65],
|
||||
INLINE_MAX=[0, -1, 8]},
|
||||
# rewrite (O(n^2))
|
||||
{MODE=0,
|
||||
SIZE=[32, 0, 7, 200],
|
||||
CHUNKSIZE=[31, 16, 65],
|
||||
INLINE_MAX=[0, -1, 8]},
|
||||
]
|
||||
reentrant = true
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
int err = lfs_mount(&lfs, cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
}
|
||||
|
||||
lfs_file_t file;
|
||||
uint8_t buffer[1024];
|
||||
err = lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY);
|
||||
assert(err == LFS_ERR_NOENT || err == 0);
|
||||
if (err == 0) {
|
||||
// with syncs we could be any size, but it at least must be valid data
|
||||
lfs_size_t size = lfs_file_size(&lfs, &file);
|
||||
assert(size <= SIZE);
|
||||
uint32_t prng = 1;
|
||||
for (lfs_size_t i = 0; i < size; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, size-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
// write
|
||||
lfs_file_open(&lfs, &file, "avacado",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | MODE) => 0;
|
||||
lfs_size_t size = lfs_file_size(&lfs, &file);
|
||||
assert(size <= SIZE);
|
||||
uint32_t prng = 1;
|
||||
lfs_size_t skip = (MODE == LFS_O_APPEND) ? size : 0;
|
||||
for (lfs_size_t b = 0; b < skip; b++) {
|
||||
TEST_PRNG(&prng);
|
||||
}
|
||||
for (lfs_size_t i = skip; i < SIZE; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
buffer[b] = TEST_PRNG(&prng) & 0xff;
|
||||
}
|
||||
lfs_file_write(&lfs, &file, buffer, chunk) => chunk;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// read
|
||||
lfs_file_open(&lfs, &file, "avacado", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
prng = 1;
|
||||
for (lfs_size_t i = 0; i < SIZE; i += CHUNKSIZE) {
|
||||
lfs_size_t chunk = lfs_min(CHUNKSIZE, SIZE-i);
|
||||
lfs_file_read(&lfs, &file, buffer, chunk) => chunk;
|
||||
for (lfs_size_t b = 0; b < chunk; b++) {
|
||||
assert(buffer[b] == (TEST_PRNG(&prng) & 0xff));
|
||||
}
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, CHUNKSIZE) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_files_many]
|
||||
defines.N = 300
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
// create N files of 7 bytes
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
lfs_file_t file;
|
||||
char path[1024];
|
||||
sprintf(path, "file_%03d", i);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
char wbuffer[1024];
|
||||
lfs_size_t size = 7;
|
||||
sprintf(wbuffer, "Hi %03d", i);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
char rbuffer[1024];
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(strcmp(rbuffer, wbuffer) == 0);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_files_many_power_cycle]
|
||||
defines.N = 300
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
// create N files of 7 bytes
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
lfs_file_t file;
|
||||
char path[1024];
|
||||
sprintf(path, "file_%03d", i);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
char wbuffer[1024];
|
||||
lfs_size_t size = 7;
|
||||
sprintf(wbuffer, "Hi %03d", i);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
char rbuffer[1024];
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(strcmp(rbuffer, wbuffer) == 0);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_files_many_power_loss]
|
||||
defines.N = 300
|
||||
reentrant = true
|
||||
defines.POWERLOSS_BEHAVIOR = [
|
||||
'LFS_EMUBD_POWERLOSS_NOOP',
|
||||
'LFS_EMUBD_POWERLOSS_OOO',
|
||||
]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
int err = lfs_mount(&lfs, cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
}
|
||||
// create N files of 7 bytes
|
||||
for (int i = 0; i < N; i++) {
|
||||
lfs_file_t file;
|
||||
char path[1024];
|
||||
sprintf(path, "file_%03d", i);
|
||||
err = lfs_file_open(&lfs, &file, path, LFS_O_WRONLY | LFS_O_CREAT);
|
||||
char wbuffer[1024];
|
||||
lfs_size_t size = 7;
|
||||
sprintf(wbuffer, "Hi %03d", i);
|
||||
if ((lfs_size_t)lfs_file_size(&lfs, &file) != size) {
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
char rbuffer[1024];
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(strcmp(rbuffer, wbuffer) == 0);
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
|
@ -0,0 +1,274 @@
|
|||
|
||||
[cases.test_interspersed_files]
|
||||
defines.SIZE = [10, 100]
|
||||
defines.FILES = [4, 10, 26]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_file_t files[FILES];
|
||||
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
char path[1024];
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_file_open(&lfs, &files[j], path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < SIZE; i++) {
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
lfs_file_write(&lfs, &files[j], &alphas[j], 1) => 1;
|
||||
}
|
||||
}
|
||||
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
lfs_file_close(&lfs, &files[j]);
|
||||
}
|
||||
|
||||
lfs_dir_t dir;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
struct lfs_info info;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
char path[1024];
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(info.size == SIZE);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
char path[1024];
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_file_open(&lfs, &files[j], path, LFS_O_RDONLY) => 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_read(&lfs, &files[j], buffer, 1) => 1;
|
||||
assert(buffer[0] == alphas[j]);
|
||||
}
|
||||
}
|
||||
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
lfs_file_close(&lfs, &files[j]);
|
||||
}
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_interspersed_remove_files]
|
||||
defines.SIZE = [10, 100]
|
||||
defines.FILES = [4, 10, 26]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
char path[1024];
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
for (int i = 0; i < SIZE; i++) {
|
||||
lfs_file_write(&lfs, &file, &alphas[j], 1) => 1;
|
||||
}
|
||||
lfs_file_close(&lfs, &file);
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "zzz", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
lfs_file_write(&lfs, &file, (const void*)"~", 1) => 1;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
|
||||
char path[1024];
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
lfs_file_close(&lfs, &file);
|
||||
|
||||
lfs_dir_t dir;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
struct lfs_info info;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "zzz") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(info.size == FILES);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "zzz", LFS_O_RDONLY) => 0;
|
||||
for (int i = 0; i < FILES; i++) {
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_read(&lfs, &file, buffer, 1) => 1;
|
||||
assert(buffer[0] == '~');
|
||||
}
|
||||
lfs_file_close(&lfs, &file);
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_interspersed_remove_inconveniently]
|
||||
defines.SIZE = [10, 100]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t files[3];
|
||||
lfs_file_open(&lfs, &files[0], "e", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_open(&lfs, &files[1], "f", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
lfs_file_open(&lfs, &files[2], "g", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
for (int i = 0; i < SIZE/2; i++) {
|
||||
lfs_file_write(&lfs, &files[0], (const void*)"e", 1) => 1;
|
||||
lfs_file_write(&lfs, &files[1], (const void*)"f", 1) => 1;
|
||||
lfs_file_write(&lfs, &files[2], (const void*)"g", 1) => 1;
|
||||
}
|
||||
|
||||
lfs_remove(&lfs, "f") => 0;
|
||||
|
||||
for (int i = 0; i < SIZE/2; i++) {
|
||||
lfs_file_write(&lfs, &files[0], (const void*)"e", 1) => 1;
|
||||
lfs_file_write(&lfs, &files[1], (const void*)"f", 1) => 1;
|
||||
lfs_file_write(&lfs, &files[2], (const void*)"g", 1) => 1;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &files[0]);
|
||||
lfs_file_close(&lfs, &files[1]);
|
||||
lfs_file_close(&lfs, &files[2]);
|
||||
|
||||
lfs_dir_t dir;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
struct lfs_info info;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "e") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(info.size == SIZE);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "g") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(info.size == SIZE);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &files[0], "e", LFS_O_RDONLY) => 0;
|
||||
lfs_file_open(&lfs, &files[1], "g", LFS_O_RDONLY) => 0;
|
||||
for (int i = 0; i < SIZE; i++) {
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_read(&lfs, &files[0], buffer, 1) => 1;
|
||||
assert(buffer[0] == 'e');
|
||||
lfs_file_read(&lfs, &files[1], buffer, 1) => 1;
|
||||
assert(buffer[0] == 'g');
|
||||
}
|
||||
lfs_file_close(&lfs, &files[0]);
|
||||
lfs_file_close(&lfs, &files[1]);
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_interspersed_reentrant_files]
|
||||
defines.SIZE = [10, 100]
|
||||
defines.FILES = [4, 10, 26]
|
||||
reentrant = true
|
||||
defines.POWERLOSS_BEHAVIOR = [
|
||||
'LFS_EMUBD_POWERLOSS_NOOP',
|
||||
'LFS_EMUBD_POWERLOSS_OOO',
|
||||
]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_file_t files[FILES];
|
||||
const char alphas[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
|
||||
int err = lfs_mount(&lfs, cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
}
|
||||
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
char path[1024];
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_file_open(&lfs, &files[j], path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < SIZE; i++) {
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
lfs_ssize_t size = lfs_file_size(&lfs, &files[j]);
|
||||
assert(size >= 0);
|
||||
if ((int)size <= i) {
|
||||
lfs_file_write(&lfs, &files[j], &alphas[j], 1) => 1;
|
||||
lfs_file_sync(&lfs, &files[j]) => 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
lfs_file_close(&lfs, &files[j]);
|
||||
}
|
||||
|
||||
lfs_dir_t dir;
|
||||
lfs_dir_open(&lfs, &dir, "/") => 0;
|
||||
struct lfs_info info;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, ".") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, "..") == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
char path[1024];
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
assert(strcmp(info.name, path) == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
assert(info.size == SIZE);
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
char path[1024];
|
||||
sprintf(path, "%c", alphas[j]);
|
||||
lfs_file_open(&lfs, &files[j], path, LFS_O_RDONLY) => 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_read(&lfs, &files[j], buffer, 1) => 1;
|
||||
assert(buffer[0] == alphas[j]);
|
||||
}
|
||||
}
|
||||
|
||||
for (int j = 0; j < FILES; j++) {
|
||||
lfs_file_close(&lfs, &files[j]);
|
||||
}
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
1905
components/joltwallet__littlefs/src/littlefs/tests/test_move.toml
Normal file
1905
components/joltwallet__littlefs/src/littlefs/tests/test_move.toml
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,340 @@
|
|||
[cases.test_orphans_normal]
|
||||
in = "lfs.c"
|
||||
if = 'PROG_SIZE <= 0x3fe' # only works with one crc per commit
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_mkdir(&lfs, "parent") => 0;
|
||||
lfs_mkdir(&lfs, "parent/orphan") => 0;
|
||||
lfs_mkdir(&lfs, "parent/child") => 0;
|
||||
lfs_remove(&lfs, "parent/orphan") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// corrupt the child's most recent commit, this should be the update
|
||||
// to the linked-list entry, which should orphan the orphan. Note this
|
||||
// makes a lot of assumptions about the remove operation.
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_dir_t dir;
|
||||
lfs_dir_open(&lfs, &dir, "parent/child") => 0;
|
||||
lfs_block_t block = dir.m.pair[0];
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
uint8_t buffer[BLOCK_SIZE];
|
||||
cfg->read(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
|
||||
int off = BLOCK_SIZE-1;
|
||||
while (off >= 0 && buffer[off] == ERASE_VALUE) {
|
||||
off -= 1;
|
||||
}
|
||||
memset(&buffer[off-3], BLOCK_SIZE, 3);
|
||||
cfg->erase(cfg, block) => 0;
|
||||
cfg->prog(cfg, block, 0, buffer, BLOCK_SIZE) => 0;
|
||||
cfg->sync(cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
struct lfs_info info;
|
||||
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
|
||||
lfs_stat(&lfs, "parent/child", &info) => 0;
|
||||
lfs_fs_size(&lfs) => 8;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
|
||||
lfs_stat(&lfs, "parent/child", &info) => 0;
|
||||
lfs_fs_size(&lfs) => 8;
|
||||
// this mkdir should both create a dir and deorphan, so size
|
||||
// should be unchanged
|
||||
lfs_mkdir(&lfs, "parent/otherchild") => 0;
|
||||
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
|
||||
lfs_stat(&lfs, "parent/child", &info) => 0;
|
||||
lfs_stat(&lfs, "parent/otherchild", &info) => 0;
|
||||
lfs_fs_size(&lfs) => 8;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_stat(&lfs, "parent/orphan", &info) => LFS_ERR_NOENT;
|
||||
lfs_stat(&lfs, "parent/child", &info) => 0;
|
||||
lfs_stat(&lfs, "parent/otherchild", &info) => 0;
|
||||
lfs_fs_size(&lfs) => 8;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# test that we only run deorphan once per power-cycle
|
||||
[cases.test_orphans_no_orphans]
|
||||
in = 'lfs.c'
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
// mark the filesystem as having orphans
|
||||
lfs_fs_preporphans(&lfs, +1) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_dir_commit(&lfs, &mdir, NULL, 0) => 0;
|
||||
|
||||
// we should have orphans at this state
|
||||
assert(lfs_gstate_hasorphans(&lfs.gstate));
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// mount
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
// we should detect orphans
|
||||
assert(lfs_gstate_hasorphans(&lfs.gstate));
|
||||
// force consistency
|
||||
lfs_fs_forceconsistency(&lfs) => 0;
|
||||
// we should no longer have orphans
|
||||
assert(!lfs_gstate_hasorphans(&lfs.gstate));
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_orphans_one_orphan]
|
||||
in = 'lfs.c'
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
// create an orphan
|
||||
lfs_mdir_t orphan;
|
||||
lfs_alloc_ckpoint(&lfs);
|
||||
lfs_dir_alloc(&lfs, &orphan) => 0;
|
||||
lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0;
|
||||
|
||||
// append our orphan and mark the filesystem as having orphans
|
||||
lfs_fs_preporphans(&lfs, +1) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_pair_tole32(orphan.pair);
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), orphan.pair})) => 0;
|
||||
|
||||
// we should have orphans at this state
|
||||
assert(lfs_gstate_hasorphans(&lfs.gstate));
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// mount
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
// we should detect orphans
|
||||
assert(lfs_gstate_hasorphans(&lfs.gstate));
|
||||
// force consistency
|
||||
lfs_fs_forceconsistency(&lfs) => 0;
|
||||
// we should no longer have orphans
|
||||
assert(!lfs_gstate_hasorphans(&lfs.gstate));
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# test that we can persist gstate with lfs_fs_mkconsistent
|
||||
[cases.test_orphans_mkconsistent_no_orphans]
|
||||
in = 'lfs.c'
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
// mark the filesystem as having orphans
|
||||
lfs_fs_preporphans(&lfs, +1) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_dir_commit(&lfs, &mdir, NULL, 0) => 0;
|
||||
|
||||
// we should have orphans at this state
|
||||
assert(lfs_gstate_hasorphans(&lfs.gstate));
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// mount
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
// we should detect orphans
|
||||
assert(lfs_gstate_hasorphans(&lfs.gstate));
|
||||
// force consistency
|
||||
lfs_fs_mkconsistent(&lfs) => 0;
|
||||
// we should no longer have orphans
|
||||
assert(!lfs_gstate_hasorphans(&lfs.gstate));
|
||||
|
||||
// remount
|
||||
lfs_unmount(&lfs) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
// we should still have no orphans
|
||||
assert(!lfs_gstate_hasorphans(&lfs.gstate));
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_orphans_mkconsistent_one_orphan]
|
||||
in = 'lfs.c'
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
// create an orphan
|
||||
lfs_mdir_t orphan;
|
||||
lfs_alloc_ckpoint(&lfs);
|
||||
lfs_dir_alloc(&lfs, &orphan) => 0;
|
||||
lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0;
|
||||
|
||||
// append our orphan and mark the filesystem as having orphans
|
||||
lfs_fs_preporphans(&lfs, +1) => 0;
|
||||
lfs_mdir_t mdir;
|
||||
lfs_dir_fetch(&lfs, &mdir, (lfs_block_t[2]){0, 1}) => 0;
|
||||
lfs_pair_tole32(orphan.pair);
|
||||
lfs_dir_commit(&lfs, &mdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), orphan.pair})) => 0;
|
||||
|
||||
// we should have orphans at this state
|
||||
assert(lfs_gstate_hasorphans(&lfs.gstate));
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// mount
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
// we should detect orphans
|
||||
assert(lfs_gstate_hasorphans(&lfs.gstate));
|
||||
// force consistency
|
||||
lfs_fs_mkconsistent(&lfs) => 0;
|
||||
// we should no longer have orphans
|
||||
assert(!lfs_gstate_hasorphans(&lfs.gstate));
|
||||
|
||||
// remount
|
||||
lfs_unmount(&lfs) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
// we should still have no orphans
|
||||
assert(!lfs_gstate_hasorphans(&lfs.gstate));
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# reentrant testing for orphans, basically just spam mkdir/remove
|
||||
[cases.test_orphans_reentrant]
|
||||
reentrant = true
|
||||
# TODO fix this case, caused by non-DAG trees
|
||||
# NOTE the second condition is required
|
||||
if = '!(DEPTH == 3 && CACHE_SIZE != 64) && 2*FILES < BLOCK_COUNT'
|
||||
defines = [
|
||||
{FILES=6, DEPTH=1, CYCLES=20},
|
||||
{FILES=26, DEPTH=1, CYCLES=20},
|
||||
{FILES=3, DEPTH=3, CYCLES=20},
|
||||
]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
int err = lfs_mount(&lfs, cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
}
|
||||
|
||||
uint32_t prng = 1;
|
||||
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
for (unsigned i = 0; i < CYCLES; i++) {
|
||||
// create random path
|
||||
char full_path[256];
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
sprintf(&full_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
|
||||
}
|
||||
|
||||
// if it does not exist, we create it, else we destroy
|
||||
struct lfs_info info;
|
||||
int res = lfs_stat(&lfs, full_path, &info);
|
||||
if (res == LFS_ERR_NOENT) {
|
||||
// create each directory in turn, ignore if dir already exists
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
err = lfs_mkdir(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_EXIST);
|
||||
}
|
||||
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
assert(strcmp(info.name, &path[2*d+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
}
|
||||
} else {
|
||||
// is valid dir?
|
||||
assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
// try to delete path in reverse order, ignore if dir is not empty
|
||||
for (int d = DEPTH-1; d >= 0; d--) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
err = lfs_remove(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_NOTEMPTY);
|
||||
}
|
||||
|
||||
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
|
||||
}
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# non-reentrant testing for orphans, this is the same as reentrant
|
||||
# testing, but we test way more states than we could under powerloss
|
||||
[cases.test_orphans_nonreentrant]
|
||||
# TODO fix this case, caused by non-DAG trees
|
||||
# NOTE the second condition is required
|
||||
if = '!(DEPTH == 3 && CACHE_SIZE != 64) && 2*FILES < BLOCK_COUNT'
|
||||
defines = [
|
||||
{FILES=6, DEPTH=1, CYCLES=2000},
|
||||
{FILES=26, DEPTH=1, CYCLES=2000},
|
||||
{FILES=3, DEPTH=3, CYCLES=2000},
|
||||
]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
uint32_t prng = 1;
|
||||
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
for (unsigned i = 0; i < CYCLES; i++) {
|
||||
// create random path
|
||||
char full_path[256];
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
sprintf(&full_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
|
||||
}
|
||||
|
||||
// if it does not exist, we create it, else we destroy
|
||||
struct lfs_info info;
|
||||
int res = lfs_stat(&lfs, full_path, &info);
|
||||
if (res == LFS_ERR_NOENT) {
|
||||
// create each directory in turn, ignore if dir already exists
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
int err = lfs_mkdir(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_EXIST);
|
||||
}
|
||||
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
assert(strcmp(info.name, &path[2*d+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
}
|
||||
} else {
|
||||
// is valid dir?
|
||||
assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
// try to delete path in reverse order, ignore if dir is not empty
|
||||
for (int d = DEPTH-1; d >= 0; d--) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
int err = lfs_remove(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_NOTEMPTY);
|
||||
}
|
||||
|
||||
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
|
||||
}
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
7398
components/joltwallet__littlefs/src/littlefs/tests/test_paths.toml
Normal file
7398
components/joltwallet__littlefs/src/littlefs/tests/test_paths.toml
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,185 @@
|
|||
# There are already a number of tests that test general operations under
|
||||
# power-loss (see the reentrant attribute). These tests are for explicitly
|
||||
# testing specific corner cases.
|
||||
|
||||
# only a revision count
|
||||
[cases.test_powerloss_only_rev]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_mkdir(&lfs, "notebook") => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "notebook/paper",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
char buffer[256];
|
||||
strcpy(buffer, "hello");
|
||||
lfs_size_t size = strlen("hello");
|
||||
for (int i = 0; i < 5; i++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
char rbuffer[256];
|
||||
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
|
||||
for (int i = 0; i < 5; i++) {
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(memcmp(rbuffer, buffer, size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// get pair/rev count
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_dir_t dir;
|
||||
lfs_dir_open(&lfs, &dir, "notebook") => 0;
|
||||
lfs_block_t pair[2] = {dir.m.pair[0], dir.m.pair[1]};
|
||||
uint32_t rev = dir.m.rev;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// write just the revision count
|
||||
uint8_t bbuffer[BLOCK_SIZE];
|
||||
cfg->read(cfg, pair[1], 0, bbuffer, BLOCK_SIZE) => 0;
|
||||
|
||||
memcpy(bbuffer, &(uint32_t){lfs_tole32(rev+1)}, sizeof(uint32_t));
|
||||
|
||||
cfg->erase(cfg, pair[1]) => 0;
|
||||
cfg->prog(cfg, pair[1], 0, bbuffer, BLOCK_SIZE) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// can read?
|
||||
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
|
||||
for (int i = 0; i < 5; i++) {
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(memcmp(rbuffer, buffer, size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// can write?
|
||||
lfs_file_open(&lfs, &file, "notebook/paper",
|
||||
LFS_O_WRONLY | LFS_O_APPEND) => 0;
|
||||
strcpy(buffer, "goodbye");
|
||||
size = strlen("goodbye");
|
||||
for (int i = 0; i < 5; i++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
|
||||
strcpy(buffer, "hello");
|
||||
size = strlen("hello");
|
||||
for (int i = 0; i < 5; i++) {
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(memcmp(rbuffer, buffer, size) == 0);
|
||||
}
|
||||
strcpy(buffer, "goodbye");
|
||||
size = strlen("goodbye");
|
||||
for (int i = 0; i < 5; i++) {
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(memcmp(rbuffer, buffer, size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# partial prog, may not be byte in order!
|
||||
[cases.test_powerloss_partial_prog]
|
||||
if = '''
|
||||
PROG_SIZE < BLOCK_SIZE
|
||||
&& (DISK_VERSION == 0 || DISK_VERSION >= 0x00020001)
|
||||
'''
|
||||
defines.BYTE_OFF = ["0", "PROG_SIZE-1", "PROG_SIZE/2"]
|
||||
defines.BYTE_VALUE = [0x33, 0xcc]
|
||||
in = "lfs.c"
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_mkdir(&lfs, "notebook") => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "notebook/paper",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
char buffer[256];
|
||||
strcpy(buffer, "hello");
|
||||
lfs_size_t size = strlen("hello");
|
||||
for (int i = 0; i < 5; i++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
char rbuffer[256];
|
||||
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
|
||||
for (int i = 0; i < 5; i++) {
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(memcmp(rbuffer, buffer, size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// imitate a partial prog, value should not matter, if littlefs
|
||||
// doesn't notice the partial prog testbd will assert
|
||||
|
||||
// get offset to next prog
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_dir_t dir;
|
||||
lfs_dir_open(&lfs, &dir, "notebook") => 0;
|
||||
lfs_block_t block = dir.m.pair[0];
|
||||
lfs_off_t off = dir.m.off;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// tweak byte
|
||||
uint8_t bbuffer[BLOCK_SIZE];
|
||||
cfg->read(cfg, block, 0, bbuffer, BLOCK_SIZE) => 0;
|
||||
|
||||
bbuffer[off + BYTE_OFF] = BYTE_VALUE;
|
||||
|
||||
cfg->erase(cfg, block) => 0;
|
||||
cfg->prog(cfg, block, 0, bbuffer, BLOCK_SIZE) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
// can read?
|
||||
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
|
||||
for (int i = 0; i < 5; i++) {
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(memcmp(rbuffer, buffer, size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
// can write?
|
||||
lfs_file_open(&lfs, &file, "notebook/paper",
|
||||
LFS_O_WRONLY | LFS_O_APPEND) => 0;
|
||||
strcpy(buffer, "goodbye");
|
||||
size = strlen("goodbye");
|
||||
for (int i = 0; i < 5; i++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "notebook/paper", LFS_O_RDONLY) => 0;
|
||||
strcpy(buffer, "hello");
|
||||
size = strlen("hello");
|
||||
for (int i = 0; i < 5; i++) {
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(memcmp(rbuffer, buffer, size) == 0);
|
||||
}
|
||||
strcpy(buffer, "goodbye");
|
||||
size = strlen("goodbye");
|
||||
for (int i = 0; i < 5; i++) {
|
||||
lfs_file_read(&lfs, &file, rbuffer, size) => size;
|
||||
assert(memcmp(rbuffer, buffer, size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
|
@ -0,0 +1,511 @@
|
|||
# specific corner cases worth explicitly testing for
|
||||
[cases.test_relocations_dangling_split_dir]
|
||||
defines.ITERATIONS = 20
|
||||
defines.COUNT = 10
|
||||
defines.BLOCK_CYCLES = [8, 1]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
// fill up filesystem so only ~16 blocks are left
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "padding", LFS_O_CREAT | LFS_O_WRONLY) => 0;
|
||||
uint8_t buffer[512];
|
||||
memset(buffer, 0, 512);
|
||||
while (BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
|
||||
lfs_file_write(&lfs, &file, buffer, 512) => 512;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// make a child dir to use in bounded space
|
||||
lfs_mkdir(&lfs, "child") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (unsigned j = 0; j < ITERATIONS; j++) {
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
char path[1024];
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
lfs_dir_t dir;
|
||||
struct lfs_info info;
|
||||
lfs_dir_open(&lfs, &dir, "child") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
char path[1024];
|
||||
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
strcmp(info.name, path) => 0;
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
if (j == (unsigned)ITERATIONS-1) {
|
||||
break;
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
char path[1024];
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_dir_t dir;
|
||||
struct lfs_info info;
|
||||
lfs_dir_open(&lfs, &dir, "child") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
char path[1024];
|
||||
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
strcmp(info.name, path) => 0;
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
char path[1024];
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_relocations_outdated_head]
|
||||
defines.ITERATIONS = 20
|
||||
defines.COUNT = 10
|
||||
defines.BLOCK_CYCLES = [8, 1]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
// fill up filesystem so only ~16 blocks are left
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "padding", LFS_O_CREAT | LFS_O_WRONLY) => 0;
|
||||
uint8_t buffer[512];
|
||||
memset(buffer, 0, 512);
|
||||
while (BLOCK_COUNT - lfs_fs_size(&lfs) > 16) {
|
||||
lfs_file_write(&lfs, &file, buffer, 512) => 512;
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
// make a child dir to use in bounded space
|
||||
lfs_mkdir(&lfs, "child") => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (unsigned j = 0; j < ITERATIONS; j++) {
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
char path[1024];
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_CREAT | LFS_O_WRONLY) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
lfs_dir_t dir;
|
||||
struct lfs_info info;
|
||||
lfs_dir_open(&lfs, &dir, "child") => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
char path[1024];
|
||||
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
strcmp(info.name, path) => 0;
|
||||
info.size => 0;
|
||||
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY) => 0;
|
||||
lfs_file_write(&lfs, &file, "hi", 2) => 2;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
|
||||
lfs_dir_rewind(&lfs, &dir) => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
char path[1024];
|
||||
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
strcmp(info.name, path) => 0;
|
||||
info.size => 2;
|
||||
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_WRONLY) => 0;
|
||||
lfs_file_write(&lfs, &file, "hi", 2) => 2;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
|
||||
lfs_dir_rewind(&lfs, &dir) => 0;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
char path[1024];
|
||||
sprintf(path, "test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_dir_read(&lfs, &dir, &info) => 1;
|
||||
strcmp(info.name, path) => 0;
|
||||
info.size => 2;
|
||||
}
|
||||
lfs_dir_read(&lfs, &dir, &info) => 0;
|
||||
lfs_dir_close(&lfs, &dir) => 0;
|
||||
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
char path[1024];
|
||||
sprintf(path, "child/test%03d_loooooooooooooooooong_name", i);
|
||||
lfs_remove(&lfs, path) => 0;
|
||||
}
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# reentrant testing for relocations, this is the same as the
|
||||
# orphan testing, except here we also set block_cycles so that
|
||||
# almost every tree operation needs a relocation
|
||||
[cases.test_relocations_reentrant]
|
||||
reentrant = true
|
||||
# TODO fix this case, caused by non-DAG trees
|
||||
# NOTE the second condition is required
|
||||
if = '!(DEPTH == 3 && CACHE_SIZE != 64) && 2*FILES < BLOCK_COUNT'
|
||||
defines = [
|
||||
{FILES=6, DEPTH=1, CYCLES=20, BLOCK_CYCLES=1},
|
||||
{FILES=26, DEPTH=1, CYCLES=20, BLOCK_CYCLES=1},
|
||||
{FILES=3, DEPTH=3, CYCLES=20, BLOCK_CYCLES=1},
|
||||
]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
int err = lfs_mount(&lfs, cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
}
|
||||
|
||||
uint32_t prng = 1;
|
||||
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
for (unsigned i = 0; i < CYCLES; i++) {
|
||||
// create random path
|
||||
char full_path[256];
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
sprintf(&full_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
|
||||
}
|
||||
|
||||
// if it does not exist, we create it, else we destroy
|
||||
struct lfs_info info;
|
||||
int res = lfs_stat(&lfs, full_path, &info);
|
||||
if (res == LFS_ERR_NOENT) {
|
||||
// create each directory in turn, ignore if dir already exists
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
err = lfs_mkdir(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_EXIST);
|
||||
}
|
||||
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
assert(strcmp(info.name, &path[2*d+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
}
|
||||
} else {
|
||||
// is valid dir?
|
||||
assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
// try to delete path in reverse order, ignore if dir is not empty
|
||||
for (unsigned d = DEPTH-1; d+1 > 0; d--) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
err = lfs_remove(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_NOTEMPTY);
|
||||
}
|
||||
|
||||
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
|
||||
}
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# reentrant testing for relocations, but now with random renames!
|
||||
[cases.test_relocations_reentrant_renames]
|
||||
reentrant = true
|
||||
# TODO fix this case, caused by non-DAG trees
|
||||
# NOTE the second condition is required
|
||||
if = '!(DEPTH == 3 && CACHE_SIZE != 64) && 2*FILES < BLOCK_COUNT'
|
||||
defines = [
|
||||
{FILES=6, DEPTH=1, CYCLES=20, BLOCK_CYCLES=1},
|
||||
{FILES=26, DEPTH=1, CYCLES=20, BLOCK_CYCLES=1},
|
||||
{FILES=3, DEPTH=3, CYCLES=20, BLOCK_CYCLES=1},
|
||||
]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
int err = lfs_mount(&lfs, cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
}
|
||||
|
||||
uint32_t prng = 1;
|
||||
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
for (unsigned i = 0; i < CYCLES; i++) {
|
||||
// create random path
|
||||
char full_path[256];
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
sprintf(&full_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
|
||||
}
|
||||
|
||||
// if it does not exist, we create it, else we destroy
|
||||
struct lfs_info info;
|
||||
int res = lfs_stat(&lfs, full_path, &info);
|
||||
assert(!res || res == LFS_ERR_NOENT);
|
||||
if (res == LFS_ERR_NOENT) {
|
||||
// create each directory in turn, ignore if dir already exists
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
err = lfs_mkdir(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_EXIST);
|
||||
}
|
||||
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
assert(strcmp(info.name, &path[2*d+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
}
|
||||
} else {
|
||||
assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
// create new random path
|
||||
char new_path[256];
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
sprintf(&new_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
|
||||
}
|
||||
|
||||
// if new path does not exist, rename, otherwise destroy
|
||||
res = lfs_stat(&lfs, new_path, &info);
|
||||
assert(!res || res == LFS_ERR_NOENT);
|
||||
if (res == LFS_ERR_NOENT) {
|
||||
// stop once some dir is renamed
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
char path[1024];
|
||||
strcpy(&path[2*d], &full_path[2*d]);
|
||||
path[2*d+2] = '\0';
|
||||
strcpy(&path[128+2*d], &new_path[2*d]);
|
||||
path[128+2*d+2] = '\0';
|
||||
err = lfs_rename(&lfs, path, path+128);
|
||||
assert(!err || err == LFS_ERR_NOTEMPTY);
|
||||
if (!err) {
|
||||
strcpy(path, path+128);
|
||||
}
|
||||
}
|
||||
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
char path[1024];
|
||||
strcpy(path, new_path);
|
||||
path[2*d+2] = '\0';
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
assert(strcmp(info.name, &path[2*d+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
}
|
||||
|
||||
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
|
||||
} else {
|
||||
// try to delete path in reverse order,
|
||||
// ignore if dir is not empty
|
||||
for (unsigned d = DEPTH-1; d+1 > 0; d--) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
err = lfs_remove(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_NOTEMPTY);
|
||||
}
|
||||
|
||||
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
|
||||
}
|
||||
}
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# non-reentrant testing for orphans, this is the same as reentrant
|
||||
# testing, but we test way more states than we could under powerloss
|
||||
[cases.test_relocations_nonreentrant]
|
||||
# TODO fix this case, caused by non-DAG trees
|
||||
# NOTE the second condition is required
|
||||
if = '!(DEPTH == 3 && CACHE_SIZE != 64) && 2*FILES < BLOCK_COUNT'
|
||||
defines = [
|
||||
{FILES=6, DEPTH=1, CYCLES=2000, BLOCK_CYCLES=1},
|
||||
{FILES=26, DEPTH=1, CYCLES=2000, BLOCK_CYCLES=1},
|
||||
{FILES=3, DEPTH=3, CYCLES=2000, BLOCK_CYCLES=1},
|
||||
]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
uint32_t prng = 1;
|
||||
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
for (unsigned i = 0; i < CYCLES; i++) {
|
||||
// create random path
|
||||
char full_path[256];
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
sprintf(&full_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
|
||||
}
|
||||
|
||||
// if it does not exist, we create it, else we destroy
|
||||
struct lfs_info info;
|
||||
int res = lfs_stat(&lfs, full_path, &info);
|
||||
if (res == LFS_ERR_NOENT) {
|
||||
// create each directory in turn, ignore if dir already exists
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
int err = lfs_mkdir(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_EXIST);
|
||||
}
|
||||
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
assert(strcmp(info.name, &path[2*d+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
}
|
||||
} else {
|
||||
// is valid dir?
|
||||
assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
// try to delete path in reverse order, ignore if dir is not empty
|
||||
for (unsigned d = DEPTH-1; d+1 > 0; d--) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
int err = lfs_remove(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_NOTEMPTY);
|
||||
}
|
||||
|
||||
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
|
||||
}
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# non-reentrant testing for relocations, but now with random renames!
|
||||
[cases.test_relocations_nonreentrant_renames]
|
||||
# TODO fix this case, caused by non-DAG trees
|
||||
# NOTE the second condition is required
|
||||
if = '!(DEPTH == 3 && CACHE_SIZE != 64) && 2*FILES < BLOCK_COUNT'
|
||||
defines = [
|
||||
{FILES=6, DEPTH=1, CYCLES=2000, BLOCK_CYCLES=1},
|
||||
{FILES=26, DEPTH=1, CYCLES=2000, BLOCK_CYCLES=1},
|
||||
{FILES=3, DEPTH=3, CYCLES=2000, BLOCK_CYCLES=1},
|
||||
]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
uint32_t prng = 1;
|
||||
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
|
||||
for (unsigned i = 0; i < CYCLES; i++) {
|
||||
// create random path
|
||||
char full_path[256];
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
sprintf(&full_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
|
||||
}
|
||||
|
||||
// if it does not exist, we create it, else we destroy
|
||||
struct lfs_info info;
|
||||
int res = lfs_stat(&lfs, full_path, &info);
|
||||
assert(!res || res == LFS_ERR_NOENT);
|
||||
if (res == LFS_ERR_NOENT) {
|
||||
// create each directory in turn, ignore if dir already exists
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
int err = lfs_mkdir(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_EXIST);
|
||||
}
|
||||
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
assert(strcmp(info.name, &path[2*d+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
}
|
||||
} else {
|
||||
assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
|
||||
// create new random path
|
||||
char new_path[256];
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
sprintf(&new_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
|
||||
}
|
||||
|
||||
// if new path does not exist, rename, otherwise destroy
|
||||
res = lfs_stat(&lfs, new_path, &info);
|
||||
assert(!res || res == LFS_ERR_NOENT);
|
||||
if (res == LFS_ERR_NOENT) {
|
||||
// stop once some dir is renamed
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
char path[1024];
|
||||
strcpy(&path[2*d], &full_path[2*d]);
|
||||
path[2*d+2] = '\0';
|
||||
strcpy(&path[128+2*d], &new_path[2*d]);
|
||||
path[128+2*d+2] = '\0';
|
||||
int err = lfs_rename(&lfs, path, path+128);
|
||||
assert(!err || err == LFS_ERR_NOTEMPTY);
|
||||
if (!err) {
|
||||
strcpy(path, path+128);
|
||||
}
|
||||
}
|
||||
|
||||
for (unsigned d = 0; d < DEPTH; d++) {
|
||||
char path[1024];
|
||||
strcpy(path, new_path);
|
||||
path[2*d+2] = '\0';
|
||||
lfs_stat(&lfs, path, &info) => 0;
|
||||
assert(strcmp(info.name, &path[2*d+1]) == 0);
|
||||
assert(info.type == LFS_TYPE_DIR);
|
||||
}
|
||||
|
||||
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
|
||||
} else {
|
||||
// try to delete path in reverse order,
|
||||
// ignore if dir is not empty
|
||||
for (unsigned d = DEPTH-1; d+1 > 0; d--) {
|
||||
char path[1024];
|
||||
strcpy(path, full_path);
|
||||
path[2*d+2] = '\0';
|
||||
int err = lfs_remove(&lfs, path);
|
||||
assert(!err || err == LFS_ERR_NOTEMPTY);
|
||||
}
|
||||
|
||||
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
|
||||
}
|
||||
}
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
|
@ -0,0 +1,662 @@
|
|||
|
||||
# simple file seek
|
||||
[cases.test_seek_read]
|
||||
defines = [
|
||||
{COUNT=132, SKIP=4},
|
||||
{COUNT=132, SKIP=128},
|
||||
{COUNT=200, SKIP=10},
|
||||
{COUNT=200, SKIP=100},
|
||||
{COUNT=4, SKIP=1},
|
||||
{COUNT=4, SKIP=2},
|
||||
]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "kitty",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size_t size = strlen("kittycatcat");
|
||||
uint8_t buffer[1024];
|
||||
memcpy(buffer, "kittycatcat", size);
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDONLY) => 0;
|
||||
|
||||
lfs_soff_t pos = -1;
|
||||
size = strlen("kittycatcat");
|
||||
for (int i = 0; i < SKIP; i++) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
pos = lfs_file_tell(&lfs, &file);
|
||||
}
|
||||
assert(pos >= 0);
|
||||
|
||||
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
lfs_file_rewind(&lfs, &file) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, size, LFS_SEEK_CUR) => 3*size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, -size, LFS_SEEK_CUR) => pos;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, -size, LFS_SEEK_END) >= 0 => 1;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
size = lfs_file_size(&lfs, &file);
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# simple file seek and write
|
||||
[cases.test_seek_write]
|
||||
defines = [
|
||||
{COUNT=132, SKIP=4},
|
||||
{COUNT=132, SKIP=128},
|
||||
{COUNT=200, SKIP=10},
|
||||
{COUNT=200, SKIP=100},
|
||||
{COUNT=4, SKIP=1},
|
||||
{COUNT=4, SKIP=2},
|
||||
]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "kitty",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size_t size = strlen("kittycatcat");
|
||||
uint8_t buffer[1024];
|
||||
memcpy(buffer, "kittycatcat", size);
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
|
||||
|
||||
lfs_soff_t pos = -1;
|
||||
size = strlen("kittycatcat");
|
||||
for (int i = 0; i < SKIP; i++) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
pos = lfs_file_tell(&lfs, &file);
|
||||
}
|
||||
assert(pos >= 0);
|
||||
|
||||
memcpy(buffer, "doggodogdog", size);
|
||||
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
|
||||
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "doggodogdog", size) => 0;
|
||||
|
||||
lfs_file_rewind(&lfs, &file) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, pos, LFS_SEEK_SET) => pos;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "doggodogdog", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, -size, LFS_SEEK_END) >= 0 => 1;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
size = lfs_file_size(&lfs, &file);
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => size;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# boundary seek and reads
|
||||
[cases.test_seek_boundary_read]
|
||||
defines.COUNT = 132
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "kitty",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size_t size = strlen("kittycatcat");
|
||||
uint8_t buffer[1024];
|
||||
memcpy(buffer, "kittycatcat", size);
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDONLY) => 0;
|
||||
|
||||
size = strlen("kittycatcat");
|
||||
const lfs_soff_t offsets[] = {
|
||||
512,
|
||||
1024-4,
|
||||
512+1,
|
||||
1024-4+1,
|
||||
512-1,
|
||||
1024-4-1,
|
||||
|
||||
512-strlen("kittycatcat"),
|
||||
1024-4-strlen("kittycatcat"),
|
||||
512-strlen("kittycatcat")+1,
|
||||
1024-4-strlen("kittycatcat")+1,
|
||||
512-strlen("kittycatcat")-1,
|
||||
1024-4-strlen("kittycatcat")-1,
|
||||
|
||||
strlen("kittycatcat")*(COUNT-2)-1,
|
||||
};
|
||||
|
||||
for (unsigned i = 0; i < sizeof(offsets) / sizeof(offsets[0]); i++) {
|
||||
lfs_soff_t off = offsets[i];
|
||||
// read @ offset
|
||||
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer,
|
||||
&"kittycatcatkittycatcat"[off % strlen("kittycatcat")],
|
||||
size) => 0;
|
||||
// read after
|
||||
lfs_file_seek(&lfs, &file, off+strlen("kittycatcat")+1, LFS_SEEK_SET)
|
||||
=> off+strlen("kittycatcat")+1;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer,
|
||||
&"kittycatcatkittycatcat"[(off+1) % strlen("kittycatcat")],
|
||||
size) => 0;
|
||||
// read before
|
||||
lfs_file_seek(&lfs, &file, off-strlen("kittycatcat")-1, LFS_SEEK_SET)
|
||||
=> off-strlen("kittycatcat")-1;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer,
|
||||
&"kittycatcatkittycatcat"[(off-1) % strlen("kittycatcat")],
|
||||
size) => 0;
|
||||
|
||||
// read @ 0
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
// read @ offset
|
||||
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer,
|
||||
&"kittycatcatkittycatcat"[off % strlen("kittycatcat")],
|
||||
size) => 0;
|
||||
// read after
|
||||
lfs_file_seek(&lfs, &file, off+strlen("kittycatcat")+1, LFS_SEEK_SET)
|
||||
=> off+strlen("kittycatcat")+1;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer,
|
||||
&"kittycatcatkittycatcat"[(off+1) % strlen("kittycatcat")],
|
||||
size) => 0;
|
||||
// read before
|
||||
lfs_file_seek(&lfs, &file, off-strlen("kittycatcat")-1, LFS_SEEK_SET)
|
||||
=> off-strlen("kittycatcat")-1;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer,
|
||||
&"kittycatcatkittycatcat"[(off-1) % strlen("kittycatcat")],
|
||||
size) => 0;
|
||||
|
||||
// sync
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
|
||||
// read @ 0
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
// read @ offset
|
||||
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer,
|
||||
&"kittycatcatkittycatcat"[off % strlen("kittycatcat")],
|
||||
size) => 0;
|
||||
// read after
|
||||
lfs_file_seek(&lfs, &file, off+strlen("kittycatcat")+1, LFS_SEEK_SET)
|
||||
=> off+strlen("kittycatcat")+1;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer,
|
||||
&"kittycatcatkittycatcat"[(off+1) % strlen("kittycatcat")],
|
||||
size) => 0;
|
||||
// read before
|
||||
lfs_file_seek(&lfs, &file, off-strlen("kittycatcat")-1, LFS_SEEK_SET)
|
||||
=> off-strlen("kittycatcat")-1;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer,
|
||||
&"kittycatcatkittycatcat"[(off-1) % strlen("kittycatcat")],
|
||||
size) => 0;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# boundary seek and writes
|
||||
[cases.test_seek_boundary_write]
|
||||
defines.COUNT = 132
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "kitty",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size_t size = strlen("kittycatcat");
|
||||
uint8_t buffer[1024];
|
||||
memcpy(buffer, "kittycatcat", size);
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
|
||||
|
||||
size = strlen("hedgehoghog");
|
||||
const lfs_soff_t offsets[] = {
|
||||
512,
|
||||
1024-4,
|
||||
512+1,
|
||||
1024-4+1,
|
||||
512-1,
|
||||
1024-4-1,
|
||||
|
||||
512-strlen("kittycatcat"),
|
||||
1024-4-strlen("kittycatcat"),
|
||||
512-strlen("kittycatcat")+1,
|
||||
1024-4-strlen("kittycatcat")+1,
|
||||
512-strlen("kittycatcat")-1,
|
||||
1024-4-strlen("kittycatcat")-1,
|
||||
|
||||
strlen("kittycatcat")*(COUNT-2)-1,
|
||||
};
|
||||
|
||||
for (unsigned i = 0; i < sizeof(offsets) / sizeof(offsets[0]); i++) {
|
||||
lfs_soff_t off = offsets[i];
|
||||
// write @ offset
|
||||
memcpy(buffer, "hedgehoghog", size);
|
||||
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
|
||||
// read @ offset
|
||||
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hedgehoghog", size) => 0;
|
||||
|
||||
// read @ 0
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
// read @ offset
|
||||
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hedgehoghog", size) => 0;
|
||||
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
|
||||
// read @ 0
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "kittycatcat", size) => 0;
|
||||
|
||||
// read @ offset
|
||||
lfs_file_seek(&lfs, &file, off, LFS_SEEK_SET) => off;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hedgehoghog", size) => 0;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# out of bounds seek
|
||||
[cases.test_seek_out_of_bounds]
|
||||
defines = [
|
||||
{COUNT=132, SKIP=4},
|
||||
{COUNT=132, SKIP=128},
|
||||
{COUNT=200, SKIP=10},
|
||||
{COUNT=200, SKIP=100},
|
||||
{COUNT=4, SKIP=2},
|
||||
{COUNT=4, SKIP=3},
|
||||
]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "kitty",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
size_t size = strlen("kittycatcat");
|
||||
uint8_t buffer[1024];
|
||||
memcpy(buffer, "kittycatcat", size);
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
lfs_file_write(&lfs, &file, buffer, size);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
|
||||
|
||||
size = strlen("kittycatcat");
|
||||
lfs_file_size(&lfs, &file) => COUNT*size;
|
||||
lfs_file_seek(&lfs, &file, (COUNT+SKIP)*size,
|
||||
LFS_SEEK_SET) => (COUNT+SKIP)*size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
||||
|
||||
memcpy(buffer, "porcupineee", size);
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
|
||||
lfs_file_seek(&lfs, &file, (COUNT+SKIP)*size,
|
||||
LFS_SEEK_SET) => (COUNT+SKIP)*size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "porcupineee", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, COUNT*size,
|
||||
LFS_SEEK_SET) => COUNT*size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "\0\0\0\0\0\0\0\0\0\0\0", size) => 0;
|
||||
|
||||
lfs_file_seek(&lfs, &file, -((COUNT+SKIP)*size),
|
||||
LFS_SEEK_CUR) => LFS_ERR_INVAL;
|
||||
lfs_file_tell(&lfs, &file) => (COUNT+1)*size;
|
||||
|
||||
lfs_file_seek(&lfs, &file, -((COUNT+2*SKIP)*size),
|
||||
LFS_SEEK_END) => LFS_ERR_INVAL;
|
||||
lfs_file_tell(&lfs, &file) => (COUNT+1)*size;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# inline write and seek
|
||||
[cases.test_seek_inline_write]
|
||||
defines.SIZE = [2, 4, 128, 132]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "tinykitty",
|
||||
LFS_O_RDWR | LFS_O_CREAT) => 0;
|
||||
int j = 0;
|
||||
int k = 0;
|
||||
|
||||
uint8_t buffer[1024];
|
||||
memcpy(buffer, "abcdefghijklmnopqrstuvwxyz", 26);
|
||||
for (unsigned i = 0; i < SIZE; i++) {
|
||||
lfs_file_write(&lfs, &file, &buffer[j++ % 26], 1) => 1;
|
||||
lfs_file_tell(&lfs, &file) => i+1;
|
||||
lfs_file_size(&lfs, &file) => i+1;
|
||||
}
|
||||
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
lfs_file_tell(&lfs, &file) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
for (unsigned i = 0; i < SIZE; i++) {
|
||||
uint8_t c;
|
||||
lfs_file_read(&lfs, &file, &c, 1) => 1;
|
||||
c => buffer[k++ % 26];
|
||||
}
|
||||
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
lfs_file_tell(&lfs, &file) => SIZE;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
for (unsigned i = 0; i < SIZE; i++) {
|
||||
lfs_file_write(&lfs, &file, &buffer[j++ % 26], 1) => 1;
|
||||
lfs_file_tell(&lfs, &file) => i+1;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
lfs_file_tell(&lfs, &file) => i+1;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
if (i < SIZE-2) {
|
||||
uint8_t c[3];
|
||||
lfs_file_seek(&lfs, &file, -1, LFS_SEEK_CUR) => i;
|
||||
lfs_file_read(&lfs, &file, &c, 3) => 3;
|
||||
lfs_file_tell(&lfs, &file) => i+3;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
lfs_file_seek(&lfs, &file, i+1, LFS_SEEK_SET) => i+1;
|
||||
lfs_file_tell(&lfs, &file) => i+1;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
lfs_file_tell(&lfs, &file) => 0;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
for (unsigned i = 0; i < SIZE; i++) {
|
||||
uint8_t c;
|
||||
lfs_file_read(&lfs, &file, &c, 1) => 1;
|
||||
c => buffer[k++ % 26];
|
||||
}
|
||||
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
lfs_file_tell(&lfs, &file) => SIZE;
|
||||
lfs_file_size(&lfs, &file) => SIZE;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# file seek and write with power-loss
|
||||
[cases.test_seek_reentrant_write]
|
||||
# must be power-of-2 for quadratic probing to be exhaustive
|
||||
defines.COUNT = [4, 64, 128]
|
||||
reentrant = true
|
||||
defines.POWERLOSS_BEHAVIOR = [
|
||||
'LFS_EMUBD_POWERLOSS_NOOP',
|
||||
'LFS_EMUBD_POWERLOSS_OOO',
|
||||
]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
int err = lfs_mount(&lfs, cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
}
|
||||
lfs_file_t file;
|
||||
uint8_t buffer[1024];
|
||||
err = lfs_file_open(&lfs, &file, "kitty", LFS_O_RDONLY);
|
||||
assert(!err || err == LFS_ERR_NOENT);
|
||||
if (!err) {
|
||||
if (lfs_file_size(&lfs, &file) != 0) {
|
||||
lfs_file_size(&lfs, &file) => 11*COUNT;
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
memset(buffer, 0, 11+1);
|
||||
lfs_file_read(&lfs, &file, buffer, 11) => 11;
|
||||
assert(memcmp(buffer, "kittycatcat", 11) == 0 ||
|
||||
memcmp(buffer, "doggodogdog", 11) == 0);
|
||||
}
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
if (lfs_file_size(&lfs, &file) == 0) {
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
strcpy((char*)buffer, "kittycatcat");
|
||||
size_t size = strlen((char*)buffer);
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
strcpy((char*)buffer, "doggodogdog");
|
||||
size_t size = strlen((char*)buffer);
|
||||
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => COUNT*size;
|
||||
// seek and write using quadratic probing to touch all
|
||||
// 11-byte words in the file
|
||||
lfs_off_t off = 0;
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
off = (5*off + 1) % COUNT;
|
||||
lfs_file_seek(&lfs, &file, off*size, LFS_SEEK_SET) => off*size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(memcmp(buffer, "kittycatcat", size) == 0 ||
|
||||
memcmp(buffer, "doggodogdog", size) == 0);
|
||||
if (memcmp(buffer, "doggodogdog", size) != 0) {
|
||||
lfs_file_seek(&lfs, &file, off*size, LFS_SEEK_SET) => off*size;
|
||||
strcpy((char*)buffer, "doggodogdog");
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
lfs_file_seek(&lfs, &file, off*size, LFS_SEEK_SET) => off*size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(memcmp(buffer, "doggodogdog", size) == 0);
|
||||
lfs_file_sync(&lfs, &file) => 0;
|
||||
lfs_file_seek(&lfs, &file, off*size, LFS_SEEK_SET) => off*size;
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(memcmp(buffer, "doggodogdog", size) == 0);
|
||||
}
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "kitty", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => COUNT*size;
|
||||
for (int j = 0; j < COUNT; j++) {
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
assert(memcmp(buffer, "doggodogdog", size) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
|
||||
# test possible overflow/underflow conditions
|
||||
#
|
||||
# note these need -fsanitize=undefined to consistently detect
|
||||
# overflow/underflow conditions
|
||||
|
||||
[cases.test_seek_filemax]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "kitty",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
uint8_t buffer[1024];
|
||||
strcpy((char*)buffer, "kittycatcat");
|
||||
size_t size = strlen((char*)buffer);
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
|
||||
// seek with LFS_SEEK_SET
|
||||
lfs_file_seek(&lfs, &file, LFS_FILE_MAX, LFS_SEEK_SET) => LFS_FILE_MAX;
|
||||
|
||||
// seek with LFS_SEEK_CUR
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_CUR) => LFS_FILE_MAX;
|
||||
|
||||
// the file hasn't changed size, so seek end takes us back to the offset=0
|
||||
lfs_file_seek(&lfs, &file, +10, LFS_SEEK_END) => size+10;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_seek_underflow]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "kitty",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
uint8_t buffer[1024];
|
||||
strcpy((char*)buffer, "kittycatcat");
|
||||
size_t size = strlen((char*)buffer);
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
|
||||
// underflow with LFS_SEEK_CUR, should error
|
||||
lfs_file_seek(&lfs, &file, -(size+10), LFS_SEEK_CUR) => LFS_ERR_INVAL;
|
||||
lfs_file_seek(&lfs, &file, -LFS_FILE_MAX, LFS_SEEK_CUR) => LFS_ERR_INVAL;
|
||||
lfs_file_seek(&lfs, &file, -(size+LFS_FILE_MAX), LFS_SEEK_CUR)
|
||||
=> LFS_ERR_INVAL;
|
||||
|
||||
// underflow with LFS_SEEK_END, should error
|
||||
lfs_file_seek(&lfs, &file, -(size+10), LFS_SEEK_END) => LFS_ERR_INVAL;
|
||||
lfs_file_seek(&lfs, &file, -LFS_FILE_MAX, LFS_SEEK_END) => LFS_ERR_INVAL;
|
||||
lfs_file_seek(&lfs, &file, -(size+LFS_FILE_MAX), LFS_SEEK_END)
|
||||
=> LFS_ERR_INVAL;
|
||||
|
||||
// file pointer should not have changed
|
||||
lfs_file_tell(&lfs, &file) => size;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_seek_overflow]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "kitty",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_APPEND) => 0;
|
||||
uint8_t buffer[1024];
|
||||
strcpy((char*)buffer, "kittycatcat");
|
||||
size_t size = strlen((char*)buffer);
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
|
||||
// seek to LFS_FILE_MAX
|
||||
lfs_file_seek(&lfs, &file, LFS_FILE_MAX, LFS_SEEK_SET) => LFS_FILE_MAX;
|
||||
|
||||
// overflow with LFS_SEEK_CUR, should error
|
||||
lfs_file_seek(&lfs, &file, +10, LFS_SEEK_CUR) => LFS_ERR_INVAL;
|
||||
lfs_file_seek(&lfs, &file, +LFS_FILE_MAX, LFS_SEEK_CUR) => LFS_ERR_INVAL;
|
||||
|
||||
// LFS_SEEK_SET/END don't care about the current file position, but we can
|
||||
// still overflow with a large offset
|
||||
|
||||
// overflow with LFS_SEEK_SET, should error
|
||||
lfs_file_seek(&lfs, &file,
|
||||
+((uint32_t)LFS_FILE_MAX+10),
|
||||
LFS_SEEK_SET) => LFS_ERR_INVAL;
|
||||
lfs_file_seek(&lfs, &file,
|
||||
+((uint32_t)LFS_FILE_MAX+(uint32_t)LFS_FILE_MAX),
|
||||
LFS_SEEK_SET) => LFS_ERR_INVAL;
|
||||
|
||||
// overflow with LFS_SEEK_END, should error
|
||||
lfs_file_seek(&lfs, &file, +(LFS_FILE_MAX-size+10), LFS_SEEK_END)
|
||||
=> LFS_ERR_INVAL;
|
||||
lfs_file_seek(&lfs, &file, +(LFS_FILE_MAX-size+LFS_FILE_MAX), LFS_SEEK_END)
|
||||
=> LFS_ERR_INVAL;
|
||||
|
||||
// file pointer should not have changed
|
||||
lfs_file_tell(&lfs, &file) => LFS_FILE_MAX;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
|
@ -0,0 +1,109 @@
|
|||
# simple shrink
|
||||
[cases.test_shrink_simple]
|
||||
defines.BLOCK_COUNT = [10, 15, 20]
|
||||
defines.AFTER_BLOCK_COUNT = [5, 10, 15, 19]
|
||||
|
||||
if = "AFTER_BLOCK_COUNT <= BLOCK_COUNT"
|
||||
code = '''
|
||||
#ifdef LFS_SHRINKNONRELOCATING
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_grow(&lfs, AFTER_BLOCK_COUNT) => 0;
|
||||
lfs_unmount(&lfs);
|
||||
if (BLOCK_COUNT != AFTER_BLOCK_COUNT) {
|
||||
lfs_mount(&lfs, cfg) => LFS_ERR_INVAL;
|
||||
}
|
||||
lfs_t lfs2 = lfs;
|
||||
struct lfs_config cfg2 = *cfg;
|
||||
cfg2.block_count = AFTER_BLOCK_COUNT;
|
||||
lfs2.cfg = &cfg2;
|
||||
lfs_mount(&lfs2, &cfg2) => 0;
|
||||
lfs_unmount(&lfs2) => 0;
|
||||
#endif
|
||||
'''
|
||||
|
||||
# shrinking full
|
||||
[cases.test_shrink_full]
|
||||
defines.BLOCK_COUNT = [10, 15, 20]
|
||||
defines.AFTER_BLOCK_COUNT = [5, 7, 10, 12, 15, 17, 20]
|
||||
defines.FILES_COUNT = [7, 8, 9, 10]
|
||||
if = "AFTER_BLOCK_COUNT <= BLOCK_COUNT && FILES_COUNT + 2 < BLOCK_COUNT"
|
||||
code = '''
|
||||
#ifdef LFS_SHRINKNONRELOCATING
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
// create FILES_COUNT files of BLOCK_SIZE - 50 bytes (to avoid inlining)
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (int i = 0; i < FILES_COUNT + 1; i++) {
|
||||
lfs_file_t file;
|
||||
char path[1024];
|
||||
sprintf(path, "file_%03d", i);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
char wbuffer[BLOCK_SIZE];
|
||||
memset(wbuffer, 'b', BLOCK_SIZE);
|
||||
// Ensure one block is taken per file, but that files are not inlined.
|
||||
lfs_size_t size = BLOCK_SIZE - 0x40;
|
||||
sprintf(wbuffer, "Hi %03d", i);
|
||||
lfs_file_write(&lfs, &file, wbuffer, size) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
int err = lfs_fs_grow(&lfs, AFTER_BLOCK_COUNT);
|
||||
if (err == 0) {
|
||||
for (int i = 0; i < FILES_COUNT + 1; i++) {
|
||||
lfs_file_t file;
|
||||
char path[1024];
|
||||
sprintf(path, "file_%03d", i);
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_RDONLY ) => 0;
|
||||
lfs_size_t size = BLOCK_SIZE - 0x40;
|
||||
char wbuffer[size];
|
||||
char wbuffer_ref[size];
|
||||
// Ensure one block is taken per file, but that files are not inlined.
|
||||
memset(wbuffer_ref, 'b', size);
|
||||
sprintf(wbuffer_ref, "Hi %03d", i);
|
||||
lfs_file_read(&lfs, &file, wbuffer, BLOCK_SIZE) => size;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
wbuffer[j] => wbuffer_ref[j];
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert(err == LFS_ERR_NOTEMPTY);
|
||||
}
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
if (err == 0 ) {
|
||||
if ( AFTER_BLOCK_COUNT != BLOCK_COUNT ) {
|
||||
lfs_mount(&lfs, cfg) => LFS_ERR_INVAL;
|
||||
}
|
||||
|
||||
lfs_t lfs2 = lfs;
|
||||
struct lfs_config cfg2 = *cfg;
|
||||
cfg2.block_count = AFTER_BLOCK_COUNT;
|
||||
lfs2.cfg = &cfg2;
|
||||
lfs_mount(&lfs2, &cfg2) => 0;
|
||||
for (int i = 0; i < FILES_COUNT + 1; i++) {
|
||||
lfs_file_t file;
|
||||
char path[1024];
|
||||
sprintf(path, "file_%03d", i);
|
||||
lfs_file_open(&lfs2, &file, path,
|
||||
LFS_O_RDONLY ) => 0;
|
||||
lfs_size_t size = BLOCK_SIZE - 0x40;
|
||||
char wbuffer[size];
|
||||
char wbuffer_ref[size];
|
||||
// Ensure one block is taken per file, but that files are not inlined.
|
||||
memset(wbuffer_ref, 'b', size);
|
||||
sprintf(wbuffer_ref, "Hi %03d", i);
|
||||
lfs_file_read(&lfs2, &file, wbuffer, BLOCK_SIZE) => size;
|
||||
lfs_file_close(&lfs2, &file) => 0;
|
||||
for (lfs_size_t j = 0; j < size; j++) {
|
||||
wbuffer[j] => wbuffer_ref[j];
|
||||
}
|
||||
}
|
||||
lfs_unmount(&lfs2);
|
||||
}
|
||||
#endif
|
||||
'''
|
||||
|
|
@ -0,0 +1,660 @@
|
|||
# simple formatting test
|
||||
[cases.test_superblocks_format]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
'''
|
||||
|
||||
# mount/unmount
|
||||
[cases.test_superblocks_mount]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# make sure the magic string "littlefs" is always at offset=8
|
||||
[cases.test_superblocks_magic]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
// check our magic string
|
||||
//
|
||||
// note if we lose power we may not have the magic string in both blocks!
|
||||
// but we don't lose power in this test so we can assert the magic string
|
||||
// is present in both
|
||||
uint8_t magic[lfs_max(16, READ_SIZE)];
|
||||
cfg->read(cfg, 0, 0, magic, lfs_max(16, READ_SIZE)) => 0;
|
||||
assert(memcmp(&magic[8], "littlefs", 8) == 0);
|
||||
cfg->read(cfg, 1, 0, magic, lfs_max(16, READ_SIZE)) => 0;
|
||||
assert(memcmp(&magic[8], "littlefs", 8) == 0);
|
||||
'''
|
||||
|
||||
# mount/unmount from interpretting a previous superblock block_count
|
||||
[cases.test_superblocks_mount_unknown_block_count]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
memset(&lfs, 0, sizeof(lfs));
|
||||
struct lfs_config tweaked_cfg = *cfg;
|
||||
tweaked_cfg.block_count = 0;
|
||||
lfs_mount(&lfs, &tweaked_cfg) => 0;
|
||||
assert(lfs.block_count == cfg->block_count);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# reentrant format
|
||||
[cases.test_superblocks_reentrant_format]
|
||||
reentrant = true
|
||||
defines.POWERLOSS_BEHAVIOR = [
|
||||
'LFS_EMUBD_POWERLOSS_NOOP',
|
||||
'LFS_EMUBD_POWERLOSS_OOO',
|
||||
]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
int err = lfs_mount(&lfs, cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# invalid mount
|
||||
[cases.test_superblocks_invalid_mount]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_mount(&lfs, cfg) => LFS_ERR_CORRUPT;
|
||||
'''
|
||||
|
||||
# test we can read superblock info through lfs_fs_stat
|
||||
[cases.test_superblocks_stat]
|
||||
if = 'DISK_VERSION == 0'
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
// test we can mount and read fsinfo
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
struct lfs_fsinfo fsinfo;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.disk_version == LFS_DISK_VERSION);
|
||||
assert(fsinfo.name_max == LFS_NAME_MAX);
|
||||
assert(fsinfo.file_max == LFS_FILE_MAX);
|
||||
assert(fsinfo.attr_max == LFS_ATTR_MAX);
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
[cases.test_superblocks_stat_tweaked]
|
||||
if = 'DISK_VERSION == 0'
|
||||
defines.TWEAKED_NAME_MAX = 63
|
||||
defines.TWEAKED_FILE_MAX = '(1 << 16)-1'
|
||||
defines.TWEAKED_ATTR_MAX = 512
|
||||
code = '''
|
||||
// create filesystem with tweaked params
|
||||
struct lfs_config tweaked_cfg = *cfg;
|
||||
tweaked_cfg.name_max = TWEAKED_NAME_MAX;
|
||||
tweaked_cfg.file_max = TWEAKED_FILE_MAX;
|
||||
tweaked_cfg.attr_max = TWEAKED_ATTR_MAX;
|
||||
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, &tweaked_cfg) => 0;
|
||||
|
||||
// test we can mount and read these params with the original config
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
struct lfs_fsinfo fsinfo;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.disk_version == LFS_DISK_VERSION);
|
||||
assert(fsinfo.name_max == TWEAKED_NAME_MAX);
|
||||
assert(fsinfo.file_max == TWEAKED_FILE_MAX);
|
||||
assert(fsinfo.attr_max == TWEAKED_ATTR_MAX);
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# expanding superblock
|
||||
[cases.test_superblocks_expand]
|
||||
defines.BLOCK_CYCLES = [32, 33, 1]
|
||||
defines.N = [10, 100, 1000]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "dummy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
struct lfs_info info;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_remove(&lfs, "dummy") => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// one last check after power-cycle
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "dummy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
struct lfs_info info;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# make sure the magic string "littlefs" is always at offset=8
|
||||
[cases.test_superblocks_magic_expand]
|
||||
defines.BLOCK_CYCLES = [32, 33, 1]
|
||||
defines.N = [10, 100, 1000]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "dummy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
struct lfs_info info;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_remove(&lfs, "dummy") => 0;
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// check our magic string
|
||||
//
|
||||
// note if we lose power we may not have the magic string in both blocks!
|
||||
// but we don't lose power in this test so we can assert the magic string
|
||||
// is present in both
|
||||
uint8_t magic[lfs_max(16, READ_SIZE)];
|
||||
cfg->read(cfg, 0, 0, magic, lfs_max(16, READ_SIZE)) => 0;
|
||||
assert(memcmp(&magic[8], "littlefs", 8) == 0);
|
||||
cfg->read(cfg, 1, 0, magic, lfs_max(16, READ_SIZE)) => 0;
|
||||
assert(memcmp(&magic[8], "littlefs", 8) == 0);
|
||||
'''
|
||||
|
||||
# expanding superblock with power cycle
|
||||
[cases.test_superblocks_expand_power_cycle]
|
||||
defines.BLOCK_CYCLES = [32, 33, 1]
|
||||
defines.N = [10, 100, 1000]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
// remove lingering dummy?
|
||||
struct lfs_info info;
|
||||
int err = lfs_stat(&lfs, "dummy", &info);
|
||||
assert(err == 0 || (err == LFS_ERR_NOENT && i == 0));
|
||||
if (!err) {
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_remove(&lfs, "dummy") => 0;
|
||||
}
|
||||
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "dummy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
}
|
||||
|
||||
// one last check after power-cycle
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
struct lfs_info info;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# reentrant expanding superblock
|
||||
[cases.test_superblocks_reentrant_expand]
|
||||
defines.BLOCK_CYCLES = [2, 1]
|
||||
defines.N = 24
|
||||
reentrant = true
|
||||
defines.POWERLOSS_BEHAVIOR = [
|
||||
'LFS_EMUBD_POWERLOSS_NOOP',
|
||||
'LFS_EMUBD_POWERLOSS_OOO',
|
||||
]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
int err = lfs_mount(&lfs, cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
// remove lingering dummy?
|
||||
struct lfs_info info;
|
||||
err = lfs_stat(&lfs, "dummy", &info);
|
||||
assert(err == 0 || (err == LFS_ERR_NOENT && i == 0));
|
||||
if (!err) {
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_remove(&lfs, "dummy") => 0;
|
||||
}
|
||||
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "dummy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
}
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// one last check after power-cycle
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
struct lfs_info info;
|
||||
lfs_stat(&lfs, "dummy", &info) => 0;
|
||||
assert(strcmp(info.name, "dummy") == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
|
||||
# mount with unknown block_count
|
||||
[cases.test_superblocks_unknown_blocks]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
// known block_size/block_count
|
||||
cfg->block_size = BLOCK_SIZE;
|
||||
cfg->block_count = BLOCK_COUNT;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
struct lfs_fsinfo fsinfo;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// unknown block_count
|
||||
cfg->block_size = BLOCK_SIZE;
|
||||
cfg->block_count = 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// do some work
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "test",
|
||||
LFS_O_CREAT | LFS_O_EXCL | LFS_O_WRONLY) => 0;
|
||||
lfs_file_write(&lfs, &file, "hello!", 6) => 6;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT);
|
||||
lfs_file_open(&lfs, &file, "test", LFS_O_RDONLY) => 0;
|
||||
uint8_t buffer[256];
|
||||
lfs_file_read(&lfs, &file, buffer, sizeof(buffer)) => 6;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
assert(memcmp(buffer, "hello!", 6) == 0);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# mount with blocks fewer than the erase_count
|
||||
[cases.test_superblocks_fewer_blocks]
|
||||
defines.BLOCK_COUNT = ['ERASE_COUNT/2', 'ERASE_COUNT/4', '2']
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
// known block_size/block_count
|
||||
cfg->block_size = BLOCK_SIZE;
|
||||
cfg->block_count = BLOCK_COUNT;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
struct lfs_fsinfo fsinfo;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// incorrect block_count
|
||||
cfg->block_size = BLOCK_SIZE;
|
||||
cfg->block_count = ERASE_COUNT;
|
||||
lfs_mount(&lfs, cfg) => LFS_ERR_INVAL;
|
||||
|
||||
// unknown block_count
|
||||
cfg->block_size = BLOCK_SIZE;
|
||||
cfg->block_count = 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// do some work
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "test",
|
||||
LFS_O_CREAT | LFS_O_EXCL | LFS_O_WRONLY) => 0;
|
||||
lfs_file_write(&lfs, &file, "hello!", 6) => 6;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT);
|
||||
lfs_file_open(&lfs, &file, "test", LFS_O_RDONLY) => 0;
|
||||
uint8_t buffer[256];
|
||||
lfs_file_read(&lfs, &file, buffer, sizeof(buffer)) => 6;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
assert(memcmp(buffer, "hello!", 6) == 0);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# mount with more blocks than the erase_count
|
||||
[cases.test_superblocks_more_blocks]
|
||||
defines.FORMAT_BLOCK_COUNT = '2*ERASE_COUNT'
|
||||
in = 'lfs.c'
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_init(&lfs, cfg) => 0;
|
||||
lfs.block_count = BLOCK_COUNT;
|
||||
|
||||
lfs_mdir_t root = {
|
||||
.pair = {0, 0}, // make sure this goes into block 0
|
||||
.rev = 0,
|
||||
.off = sizeof(uint32_t),
|
||||
.etag = 0xffffffff,
|
||||
.count = 0,
|
||||
.tail = {LFS_BLOCK_NULL, LFS_BLOCK_NULL},
|
||||
.erased = false,
|
||||
.split = false,
|
||||
};
|
||||
|
||||
lfs_superblock_t superblock = {
|
||||
.version = LFS_DISK_VERSION,
|
||||
.block_size = BLOCK_SIZE,
|
||||
.block_count = FORMAT_BLOCK_COUNT,
|
||||
.name_max = LFS_NAME_MAX,
|
||||
.file_max = LFS_FILE_MAX,
|
||||
.attr_max = LFS_ATTR_MAX,
|
||||
};
|
||||
|
||||
lfs_superblock_tole32(&superblock);
|
||||
lfs_dir_commit(&lfs, &root, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
|
||||
{LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
|
||||
{LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
|
||||
&superblock})) => 0;
|
||||
lfs_deinit(&lfs) => 0;
|
||||
|
||||
// known block_size/block_count
|
||||
cfg->block_size = BLOCK_SIZE;
|
||||
cfg->block_count = BLOCK_COUNT;
|
||||
lfs_mount(&lfs, cfg) => LFS_ERR_INVAL;
|
||||
'''
|
||||
|
||||
# mount and grow the filesystem
|
||||
[cases.test_superblocks_grow]
|
||||
defines.BLOCK_COUNT = ['ERASE_COUNT/2', 'ERASE_COUNT/4', '2']
|
||||
defines.BLOCK_COUNT_2 = 'ERASE_COUNT'
|
||||
defines.KNOWN_BLOCK_COUNT = [true, false]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
if (KNOWN_BLOCK_COUNT) {
|
||||
cfg->block_count = BLOCK_COUNT;
|
||||
} else {
|
||||
cfg->block_count = 0;
|
||||
}
|
||||
|
||||
// mount with block_size < erase_size
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
struct lfs_fsinfo fsinfo;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// same size is a noop
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_grow(&lfs, BLOCK_COUNT) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// grow to new size
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_grow(&lfs, BLOCK_COUNT_2) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT_2);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
if (KNOWN_BLOCK_COUNT) {
|
||||
cfg->block_count = BLOCK_COUNT_2;
|
||||
} else {
|
||||
cfg->block_count = 0;
|
||||
}
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT_2);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// mounting with the previous size should fail
|
||||
cfg->block_count = BLOCK_COUNT;
|
||||
lfs_mount(&lfs, cfg) => LFS_ERR_INVAL;
|
||||
|
||||
if (KNOWN_BLOCK_COUNT) {
|
||||
cfg->block_count = BLOCK_COUNT_2;
|
||||
} else {
|
||||
cfg->block_count = 0;
|
||||
}
|
||||
|
||||
// same size is a noop
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_grow(&lfs, BLOCK_COUNT_2) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT_2);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT_2);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// do some work
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT_2);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "test",
|
||||
LFS_O_CREAT | LFS_O_EXCL | LFS_O_WRONLY) => 0;
|
||||
lfs_file_write(&lfs, &file, "hello!", 6) => 6;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT_2);
|
||||
lfs_file_open(&lfs, &file, "test", LFS_O_RDONLY) => 0;
|
||||
uint8_t buffer[256];
|
||||
lfs_file_read(&lfs, &file, buffer, sizeof(buffer)) => 6;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
assert(memcmp(buffer, "hello!", 6) == 0);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
|
||||
# mount and grow the filesystem
|
||||
[cases.test_superblocks_shrink]
|
||||
defines.BLOCK_COUNT = 'ERASE_COUNT'
|
||||
defines.BLOCK_COUNT_2 = ['ERASE_COUNT/2', 'ERASE_COUNT/4', '2']
|
||||
defines.KNOWN_BLOCK_COUNT = [true, false]
|
||||
code = '''
|
||||
#ifdef LFS_SHRINKNONRELOCATING
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
|
||||
if (KNOWN_BLOCK_COUNT) {
|
||||
cfg->block_count = BLOCK_COUNT;
|
||||
} else {
|
||||
cfg->block_count = 0;
|
||||
}
|
||||
|
||||
// mount with block_size < erase_size
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
struct lfs_fsinfo fsinfo;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// same size is a noop
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_grow(&lfs, BLOCK_COUNT) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// grow to new size
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_grow(&lfs, BLOCK_COUNT_2) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT_2);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
if (KNOWN_BLOCK_COUNT) {
|
||||
cfg->block_count = BLOCK_COUNT_2;
|
||||
} else {
|
||||
cfg->block_count = 0;
|
||||
}
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT_2);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// mounting with the previous size should fail
|
||||
cfg->block_count = BLOCK_COUNT;
|
||||
lfs_mount(&lfs, cfg) => LFS_ERR_INVAL;
|
||||
|
||||
if (KNOWN_BLOCK_COUNT) {
|
||||
cfg->block_count = BLOCK_COUNT_2;
|
||||
} else {
|
||||
cfg->block_count = 0;
|
||||
}
|
||||
|
||||
// same size is a noop
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_grow(&lfs, BLOCK_COUNT_2) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT_2);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT_2);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// do some work
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT_2);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "test",
|
||||
LFS_O_CREAT | LFS_O_EXCL | LFS_O_WRONLY) => 0;
|
||||
lfs_file_write(&lfs, &file, "hello!", 6) => 6;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_fs_stat(&lfs, &fsinfo) => 0;
|
||||
assert(fsinfo.block_size == BLOCK_SIZE);
|
||||
assert(fsinfo.block_count == BLOCK_COUNT_2);
|
||||
lfs_file_open(&lfs, &file, "test", LFS_O_RDONLY) => 0;
|
||||
uint8_t buffer[256];
|
||||
lfs_file_read(&lfs, &file, buffer, sizeof(buffer)) => 6;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
assert(memcmp(buffer, "hello!", 6) == 0);
|
||||
lfs_unmount(&lfs) => 0;
|
||||
#endif
|
||||
'''
|
||||
|
||||
# test that metadata_max does not cause problems for superblock compaction
|
||||
[cases.test_superblocks_metadata_max]
|
||||
defines.METADATA_MAX = [
|
||||
'lfs_max(512, PROG_SIZE)',
|
||||
'lfs_max(BLOCK_SIZE/2, PROG_SIZE)',
|
||||
'BLOCK_SIZE'
|
||||
]
|
||||
defines.N = [10, 100, 1000]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
for (int i = 0; i < N; i++) {
|
||||
lfs_file_t file;
|
||||
char name[256];
|
||||
sprintf(name, "hello%03x", i);
|
||||
lfs_file_open(&lfs, &file, name,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
struct lfs_info info;
|
||||
lfs_stat(&lfs, name, &info) => 0;
|
||||
assert(strcmp(info.name, name) == 0);
|
||||
assert(info.type == LFS_TYPE_REG);
|
||||
}
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
|
@ -0,0 +1,503 @@
|
|||
# simple truncate
|
||||
[cases.test_truncate_simple]
|
||||
defines.MEDIUMSIZE = [31, 32, 33, 511, 512, 513, 2047, 2048, 2049]
|
||||
defines.LARGESIZE = [32, 33, 512, 513, 2048, 2049, 8192, 8193]
|
||||
if = 'MEDIUMSIZE < LARGESIZE'
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "baldynoop",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
uint8_t buffer[1024];
|
||||
strcpy((char*)buffer, "hair");
|
||||
size_t size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, lfs_min(size, LARGESIZE-j))
|
||||
=> lfs_min(size, LARGESIZE-j);
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
|
||||
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
size = strlen("hair");
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
|
||||
=> lfs_min(size, MEDIUMSIZE-j);
|
||||
memcmp(buffer, "hair", lfs_min(size, MEDIUMSIZE-j)) => 0;
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# truncate and read
|
||||
[cases.test_truncate_read]
|
||||
defines.MEDIUMSIZE = [31, 32, 33, 511, 512, 513, 2047, 2048, 2049]
|
||||
defines.LARGESIZE = [32, 33, 512, 513, 2048, 2049, 8192, 8193]
|
||||
if = 'MEDIUMSIZE < LARGESIZE'
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "baldyread",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
uint8_t buffer[1024];
|
||||
strcpy((char*)buffer, "hair");
|
||||
size_t size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, lfs_min(size, LARGESIZE-j))
|
||||
=> lfs_min(size, LARGESIZE-j);
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldyread", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
|
||||
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
size = strlen("hair");
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
|
||||
=> lfs_min(size, MEDIUMSIZE-j);
|
||||
memcmp(buffer, "hair", lfs_min(size, MEDIUMSIZE-j)) => 0;
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldyread", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
size = strlen("hair");
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
|
||||
=> lfs_min(size, MEDIUMSIZE-j);
|
||||
memcmp(buffer, "hair", lfs_min(size, MEDIUMSIZE-j)) => 0;
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# write, truncate, and read
|
||||
[cases.test_truncate_write_read]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "sequence",
|
||||
LFS_O_RDWR | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
uint8_t buffer[1024];
|
||||
size_t size = lfs_min(lfs.cfg->cache_size, sizeof(buffer)/2);
|
||||
lfs_size_t qsize = size / 4;
|
||||
uint8_t *wb = buffer;
|
||||
uint8_t *rb = buffer + size;
|
||||
for (lfs_off_t j = 0; j < size; ++j) {
|
||||
wb[j] = j;
|
||||
}
|
||||
|
||||
/* Spread sequence over size */
|
||||
lfs_file_write(&lfs, &file, wb, size) => size;
|
||||
lfs_file_size(&lfs, &file) => size;
|
||||
lfs_file_tell(&lfs, &file) => size;
|
||||
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
lfs_file_tell(&lfs, &file) => 0;
|
||||
|
||||
/* Chop off the last quarter */
|
||||
lfs_size_t trunc = size - qsize;
|
||||
lfs_file_truncate(&lfs, &file, trunc) => 0;
|
||||
lfs_file_tell(&lfs, &file) => 0;
|
||||
lfs_file_size(&lfs, &file) => trunc;
|
||||
|
||||
/* Read should produce first 3/4 */
|
||||
lfs_file_read(&lfs, &file, rb, size) => trunc;
|
||||
memcmp(rb, wb, trunc) => 0;
|
||||
|
||||
/* Move to 1/4 */
|
||||
lfs_file_size(&lfs, &file) => trunc;
|
||||
lfs_file_seek(&lfs, &file, qsize, LFS_SEEK_SET) => qsize;
|
||||
lfs_file_tell(&lfs, &file) => qsize;
|
||||
|
||||
/* Chop to 1/2 */
|
||||
trunc -= qsize;
|
||||
lfs_file_truncate(&lfs, &file, trunc) => 0;
|
||||
lfs_file_tell(&lfs, &file) => qsize;
|
||||
lfs_file_size(&lfs, &file) => trunc;
|
||||
|
||||
/* Read should produce second quarter */
|
||||
lfs_file_read(&lfs, &file, rb, size) => trunc - qsize;
|
||||
memcmp(rb, wb + qsize, trunc - qsize) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# truncate and write
|
||||
[cases.test_truncate_write]
|
||||
defines.MEDIUMSIZE = [31, 32, 33, 511, 512, 513, 2047, 2048, 2049]
|
||||
defines.LARGESIZE = [32, 33, 512, 513, 2048, 2049, 8192, 8193]
|
||||
if = 'MEDIUMSIZE < LARGESIZE'
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "baldywrite",
|
||||
LFS_O_WRONLY | LFS_O_CREAT) => 0;
|
||||
|
||||
uint8_t buffer[1024];
|
||||
strcpy((char*)buffer, "hair");
|
||||
size_t size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, lfs_min(size, LARGESIZE-j))
|
||||
=> lfs_min(size, LARGESIZE-j);
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldywrite", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
|
||||
/* truncate */
|
||||
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
/* and write */
|
||||
strcpy((char*)buffer, "bald");
|
||||
size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
|
||||
=> lfs_min(size, MEDIUMSIZE-j);
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldywrite", LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
size = strlen("bald");
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
|
||||
=> lfs_min(size, MEDIUMSIZE-j);
|
||||
memcmp(buffer, "bald", lfs_min(size, MEDIUMSIZE-j)) => 0;
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# truncate write under powerloss
|
||||
[cases.test_truncate_reentrant_write]
|
||||
defines.SMALLSIZE = [4, 512]
|
||||
defines.MEDIUMSIZE = [0, 3, 4, 5, 31, 32, 33, 511, 512, 513, 1023, 1024, 1025]
|
||||
defines.LARGESIZE = 2048
|
||||
reentrant = true
|
||||
defines.POWERLOSS_BEHAVIOR = [
|
||||
'LFS_EMUBD_POWERLOSS_NOOP',
|
||||
'LFS_EMUBD_POWERLOSS_OOO',
|
||||
]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
int err = lfs_mount(&lfs, cfg);
|
||||
if (err) {
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
}
|
||||
lfs_file_t file;
|
||||
err = lfs_file_open(&lfs, &file, "baldy", LFS_O_RDONLY);
|
||||
assert(!err || err == LFS_ERR_NOENT);
|
||||
if (!err) {
|
||||
size_t size = lfs_file_size(&lfs, &file);
|
||||
assert(size == 0 ||
|
||||
size == (size_t)LARGESIZE ||
|
||||
size == (size_t)MEDIUMSIZE ||
|
||||
size == (size_t)SMALLSIZE);
|
||||
for (lfs_off_t j = 0; j < size; j += 4) {
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_read(&lfs, &file, buffer, lfs_min(4, size-j))
|
||||
=> lfs_min(4, size-j);
|
||||
assert(memcmp(buffer, "hair", lfs_min(4, size-j)) == 0 ||
|
||||
memcmp(buffer, "bald", lfs_min(4, size-j)) == 0 ||
|
||||
memcmp(buffer, "comb", lfs_min(4, size-j)) == 0);
|
||||
}
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
lfs_file_open(&lfs, &file, "baldy",
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
lfs_file_size(&lfs, &file) => 0;
|
||||
uint8_t buffer[1024];
|
||||
strcpy((char*)buffer, "hair");
|
||||
size_t size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < LARGESIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, lfs_min(size, LARGESIZE-j))
|
||||
=> lfs_min(size, LARGESIZE-j);
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "baldy", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => LARGESIZE;
|
||||
/* truncate */
|
||||
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
/* and write */
|
||||
strcpy((char*)buffer, "bald");
|
||||
size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
|
||||
=> lfs_min(size, MEDIUMSIZE-j);
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_file_open(&lfs, &file, "baldy", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
lfs_file_truncate(&lfs, &file, SMALLSIZE) => 0;
|
||||
lfs_file_size(&lfs, &file) => SMALLSIZE;
|
||||
strcpy((char*)buffer, "comb");
|
||||
size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < SMALLSIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, lfs_min(size, SMALLSIZE-j))
|
||||
=> lfs_min(size, SMALLSIZE-j);
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => SMALLSIZE;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# more aggressive general truncation tests
|
||||
[cases.test_truncate_aggressive]
|
||||
defines.CONFIG = 'range(6)'
|
||||
defines.SMALLSIZE = 32
|
||||
defines.MEDIUMSIZE = 2048
|
||||
defines.LARGESIZE = 8192
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
#define COUNT 5
|
||||
const struct {
|
||||
lfs_off_t startsizes[COUNT];
|
||||
lfs_off_t startseeks[COUNT];
|
||||
lfs_off_t hotsizes[COUNT];
|
||||
lfs_off_t coldsizes[COUNT];
|
||||
} configs[] = {
|
||||
// cold shrinking
|
||||
{{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE}},
|
||||
// cold expanding
|
||||
{{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE}},
|
||||
// warm shrinking truncate
|
||||
{{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
|
||||
{ 0, 0, 0, 0, 0}},
|
||||
// warm expanding truncate
|
||||
{{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
|
||||
{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE}},
|
||||
// mid-file shrinking truncate
|
||||
{{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{ LARGESIZE, LARGESIZE, LARGESIZE, LARGESIZE, LARGESIZE},
|
||||
{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
|
||||
{ 0, 0, 0, 0, 0}},
|
||||
// mid-file expanding truncate
|
||||
{{ 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE, 2*LARGESIZE},
|
||||
{ 0, 0, SMALLSIZE, MEDIUMSIZE, LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE},
|
||||
{2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE, 2*LARGESIZE}},
|
||||
};
|
||||
|
||||
const lfs_off_t *startsizes = configs[CONFIG].startsizes;
|
||||
const lfs_off_t *startseeks = configs[CONFIG].startseeks;
|
||||
const lfs_off_t *hotsizes = configs[CONFIG].hotsizes;
|
||||
const lfs_off_t *coldsizes = configs[CONFIG].coldsizes;
|
||||
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
char path[1024];
|
||||
sprintf(path, "hairyhead%d", i);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path,
|
||||
LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0;
|
||||
|
||||
uint8_t buffer[1024];
|
||||
strcpy((char*)buffer, "hair");
|
||||
size_t size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < startsizes[i]; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, size) => size;
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => startsizes[i];
|
||||
|
||||
if (startseeks[i] != startsizes[i]) {
|
||||
lfs_file_seek(&lfs, &file,
|
||||
startseeks[i], LFS_SEEK_SET) => startseeks[i];
|
||||
}
|
||||
|
||||
lfs_file_truncate(&lfs, &file, hotsizes[i]) => 0;
|
||||
lfs_file_size(&lfs, &file) => hotsizes[i];
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
char path[1024];
|
||||
sprintf(path, "hairyhead%d", i);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => hotsizes[i];
|
||||
|
||||
size_t size = strlen("hair");
|
||||
lfs_off_t j = 0;
|
||||
for (; j < startsizes[i] && j < hotsizes[i]; j += size) {
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hair", size) => 0;
|
||||
}
|
||||
|
||||
for (; j < hotsizes[i]; j += size) {
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "\0\0\0\0", size) => 0;
|
||||
}
|
||||
|
||||
lfs_file_truncate(&lfs, &file, coldsizes[i]) => 0;
|
||||
lfs_file_size(&lfs, &file) => coldsizes[i];
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
|
||||
for (unsigned i = 0; i < COUNT; i++) {
|
||||
char path[1024];
|
||||
sprintf(path, "hairyhead%d", i);
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, path, LFS_O_RDONLY) => 0;
|
||||
lfs_file_size(&lfs, &file) => coldsizes[i];
|
||||
|
||||
size_t size = strlen("hair");
|
||||
lfs_off_t j = 0;
|
||||
for (; j < startsizes[i] && j < hotsizes[i] && j < coldsizes[i];
|
||||
j += size) {
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "hair", size) => 0;
|
||||
}
|
||||
|
||||
for (; j < coldsizes[i]; j += size) {
|
||||
uint8_t buffer[1024];
|
||||
lfs_file_read(&lfs, &file, buffer, size) => size;
|
||||
memcmp(buffer, "\0\0\0\0", size) => 0;
|
||||
}
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
}
|
||||
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
|
||||
# noop truncate
|
||||
[cases.test_truncate_nop]
|
||||
defines.MEDIUMSIZE = [32, 33, 512, 513, 2048, 2049, 8192, 8193]
|
||||
code = '''
|
||||
lfs_t lfs;
|
||||
lfs_format(&lfs, cfg) => 0;
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_t file;
|
||||
lfs_file_open(&lfs, &file, "baldynoop",
|
||||
LFS_O_RDWR | LFS_O_CREAT) => 0;
|
||||
|
||||
uint8_t buffer[1024];
|
||||
strcpy((char*)buffer, "hair");
|
||||
size_t size = strlen((char*)buffer);
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_write(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
|
||||
=> lfs_min(size, MEDIUMSIZE-j);
|
||||
|
||||
// this truncate should do nothing
|
||||
lfs_file_truncate(&lfs, &file, j+lfs_min(size, MEDIUMSIZE-j)) => 0;
|
||||
}
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0;
|
||||
// should do nothing again
|
||||
lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
|
||||
=> lfs_min(size, MEDIUMSIZE-j);
|
||||
memcmp(buffer, "hair", lfs_min(size, MEDIUMSIZE-j)) => 0;
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
||||
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
|
||||
// still there after reboot?
|
||||
lfs_mount(&lfs, cfg) => 0;
|
||||
lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0;
|
||||
lfs_file_size(&lfs, &file) => MEDIUMSIZE;
|
||||
for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) {
|
||||
lfs_file_read(&lfs, &file, buffer, lfs_min(size, MEDIUMSIZE-j))
|
||||
=> lfs_min(size, MEDIUMSIZE-j);
|
||||
memcmp(buffer, "hair", lfs_min(size, MEDIUMSIZE-j)) => 0;
|
||||
}
|
||||
lfs_file_read(&lfs, &file, buffer, size) => 0;
|
||||
lfs_file_close(&lfs, &file) => 0;
|
||||
lfs_unmount(&lfs) => 0;
|
||||
'''
|
||||
196
components/joltwallet__littlefs/src/littlefs_api.h
Normal file
196
components/joltwallet__littlefs/src/littlefs_api.h
Normal file
|
|
@ -0,0 +1,196 @@
|
|||
#ifndef ESP_LITTLEFS_API_H__
|
||||
#define ESP_LITTLEFS_API_H__
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "freertos/semphr.h"
|
||||
#include "esp_vfs.h"
|
||||
#include "esp_partition.h"
|
||||
#include "littlefs/lfs.h"
|
||||
#include "sdkconfig.h"
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT
|
||||
#include <sdmmc_cmd.h>
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if CONFIG_LITTLEFS_USE_MTIME
|
||||
#define ESP_LITTLEFS_ATTR_COUNT 1
|
||||
#else
|
||||
#define ESP_LITTLEFS_ATTR_COUNT 0
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief a file descriptor
|
||||
* That's also a singly linked list used for keeping tracks of all opened file descriptor
|
||||
*
|
||||
* Shortcomings/potential issues of 32-bit hash (when CONFIG_LITTLEFS_USE_ONLY_HASH) listed here:
|
||||
* * unlink - If a different file is open that generates a hash collision, it will report an
|
||||
* error that it cannot unlink an open file.
|
||||
* * rename - If a different file is open that generates a hash collision with
|
||||
* src or dst, it will report an error that it cannot rename an open file.
|
||||
* Potential consequences:
|
||||
* 1. A file cannot be deleted while a collision-geneating file is open.
|
||||
* Worst-case, if the other file is always open during the lifecycle
|
||||
* of your app, it's collision file cannot be deleted, which in the
|
||||
* worst-case could cause storage-capacity issues.
|
||||
* 2. Same as (1), but for renames
|
||||
*/
|
||||
typedef struct _vfs_littlefs_file_t {
|
||||
lfs_file_t file;
|
||||
|
||||
/* Allocate all other necessary buffers */
|
||||
struct lfs_file_config lfs_file_config;
|
||||
uint8_t lfs_buffer[CONFIG_LITTLEFS_CACHE_SIZE];
|
||||
#if ESP_LITTLEFS_ATTR_COUNT
|
||||
struct lfs_attr lfs_attr[ESP_LITTLEFS_ATTR_COUNT];
|
||||
time_t lfs_attr_time_buffer;
|
||||
#endif
|
||||
|
||||
uint32_t hash;
|
||||
struct _vfs_littlefs_file_t * next; /*!< Pointer to next file in Singly Linked List */
|
||||
#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH
|
||||
char * path;
|
||||
#endif
|
||||
} vfs_littlefs_file_t;
|
||||
|
||||
/**
|
||||
* @brief littlefs definition structure
|
||||
*/
|
||||
typedef struct {
|
||||
lfs_t *fs; /*!< Handle to the underlying littlefs */
|
||||
SemaphoreHandle_t lock; /*!< FS lock */
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT
|
||||
sdmmc_card_t *sdcard; /*!< The SD card driver handle on which littlefs is located */
|
||||
#endif
|
||||
|
||||
const esp_partition_t* partition; /*!< The partition on which littlefs is located */
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_MMAP_PARTITION
|
||||
const void *mmap_data; /*!< Buffer of mmapped partition */
|
||||
esp_partition_mmap_handle_t mmap_handle; /*!< Handle to mmapped partition */
|
||||
#endif
|
||||
|
||||
char base_path[ESP_VFS_PATH_MAX+1]; /*!< Mount point */
|
||||
|
||||
struct lfs_config cfg; /*!< littlefs Mount configuration */
|
||||
|
||||
vfs_littlefs_file_t *file; /*!< Singly Linked List of files */
|
||||
|
||||
vfs_littlefs_file_t **cache; /*!< A cache of pointers to the opened files */
|
||||
uint16_t cache_size; /*!< The cache allocated size (in pointers) */
|
||||
uint16_t fd_count; /*!< The count of opened file descriptor used to speed up computation */
|
||||
bool read_only; /*!< Filesystem is read-only */
|
||||
} esp_littlefs_t;
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_MMAP_PARTITION
|
||||
/**
|
||||
* @brief Read a region in a block, only for use with an mmapped partition.
|
||||
*
|
||||
* Negative error codes are propogated to the user.
|
||||
*
|
||||
* @return errorcode. 0 on success.
|
||||
*/
|
||||
int littlefs_esp_part_read_mmap(const struct lfs_config *c, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Read a region in a block.
|
||||
*
|
||||
* Negative error codes are propogated to the user.
|
||||
*
|
||||
* @return errorcode. 0 on success.
|
||||
*/
|
||||
int littlefs_esp_part_read(const struct lfs_config *c, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
|
||||
/**
|
||||
* @brief Program a region in a block.
|
||||
*
|
||||
* The block must have previously been erased.
|
||||
* Negative error codes are propogated to the user.
|
||||
* May return LFS_ERR_CORRUPT if the block should be considered bad.
|
||||
*
|
||||
* @return errorcode. 0 on success.
|
||||
*/
|
||||
int littlefs_esp_part_write(const struct lfs_config *c, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
|
||||
/**
|
||||
* @brief Erase a block.
|
||||
*
|
||||
* A block must be erased before being programmed.
|
||||
* The state of an erased block is undefined.
|
||||
* Negative error codes are propogated to the user.
|
||||
* May return LFS_ERR_CORRUPT if the block should be considered bad.
|
||||
* @return errorcode. 0 on success.
|
||||
*/
|
||||
int littlefs_esp_part_erase(const struct lfs_config *c, lfs_block_t block);
|
||||
|
||||
/**
|
||||
* @brief Sync the state of the underlying block device.
|
||||
*
|
||||
* Negative error codes are propogated to the user.
|
||||
*
|
||||
* @return errorcode. 0 on success.
|
||||
*/
|
||||
int littlefs_esp_part_sync(const struct lfs_config *c);
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT
|
||||
|
||||
/**
|
||||
* @brief Read a region in a block on SD card
|
||||
*
|
||||
* Negative error codes are propogated to the user.
|
||||
*
|
||||
* @return errorcode. 0 on success.
|
||||
*/
|
||||
int littlefs_sdmmc_read(const struct lfs_config *c, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size);
|
||||
|
||||
/**
|
||||
* @brief Program a region in a block on SD card.
|
||||
*
|
||||
* The block must have previously been erased.
|
||||
* Negative error codes are propogated to the user.
|
||||
* May return LFS_ERR_CORRUPT if the block should be considered bad.
|
||||
*
|
||||
* @return errorcode. 0 on success.
|
||||
*/
|
||||
int littlefs_sdmmc_write(const struct lfs_config *c, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size);
|
||||
|
||||
/**
|
||||
* @brief Erase a block on SD card.
|
||||
*
|
||||
* A block must be erased before being programmed.
|
||||
* The state of an erased block is undefined.
|
||||
* Negative error codes are propogated to the user.
|
||||
* May return LFS_ERR_CORRUPT if the block should be considered bad.
|
||||
* @return errorcode. 0 on success.
|
||||
*/
|
||||
int littlefs_sdmmc_erase(const struct lfs_config *c, lfs_block_t block);
|
||||
|
||||
/**
|
||||
* @brief Sync the state of the underlying SD card.
|
||||
*
|
||||
* Negative error codes are propogated to the user.
|
||||
*
|
||||
* @return errorcode. 0 on success.
|
||||
*/
|
||||
int littlefs_sdmmc_sync(const struct lfs_config *c);
|
||||
|
||||
#endif // CONFIG_LITTLEFS_SDMMC_SUPPORT
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
89
components/joltwallet__littlefs/src/littlefs_esp_part.c
Normal file
89
components/joltwallet__littlefs/src/littlefs_esp_part.c
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
/**
|
||||
* @file littlefs_api.c
|
||||
* @brief Maps the HAL of esp_partition <-> littlefs
|
||||
* @author Brian Pugh
|
||||
*/
|
||||
|
||||
//#define ESP_LOCAL_LOG_LEVEL ESP_LOG_INFO
|
||||
|
||||
#include "esp_log.h"
|
||||
#include "esp_partition.h"
|
||||
#include "esp_vfs.h"
|
||||
#include "littlefs/lfs.h"
|
||||
#include "esp_littlefs.h"
|
||||
#include "littlefs_api.h"
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_WDT_RESET
|
||||
#include "esp_task_wdt.h"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_MMAP_PARTITION
|
||||
int littlefs_esp_part_read_mmap(const struct lfs_config *c, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size) {
|
||||
esp_littlefs_t * efs = c->context;
|
||||
size_t part_off = (block * c->block_size) + off;
|
||||
if (part_off > efs->partition->size || part_off + size > efs->partition->size) {
|
||||
ESP_LOGE(ESP_LITTLEFS_TAG, "attempt to read out bounds of mmaped region %08x-%08x", (unsigned int)part_off, (unsigned int)(part_off + size));
|
||||
return LFS_ERR_IO;
|
||||
}
|
||||
memcpy(buffer, efs->mmap_data + part_off, size);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int littlefs_esp_part_read(const struct lfs_config *c, lfs_block_t block,
|
||||
lfs_off_t off, void *buffer, lfs_size_t size) {
|
||||
esp_littlefs_t * efs = c->context;
|
||||
size_t part_off = (block * c->block_size) + off;
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_WDT_RESET
|
||||
esp_task_wdt_reset();
|
||||
#endif
|
||||
|
||||
esp_err_t err = esp_partition_read(efs->partition, part_off, buffer, size);
|
||||
if (err) {
|
||||
ESP_LOGE(ESP_LITTLEFS_TAG, "failed to read addr %08x, size %08x, err %d", (unsigned int) part_off, (unsigned int) size, err);
|
||||
return LFS_ERR_IO;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int littlefs_esp_part_write(const struct lfs_config *c, lfs_block_t block,
|
||||
lfs_off_t off, const void *buffer, lfs_size_t size) {
|
||||
esp_littlefs_t * efs = c->context;
|
||||
size_t part_off = (block * c->block_size) + off;
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_WDT_RESET
|
||||
esp_task_wdt_reset();
|
||||
#endif
|
||||
|
||||
esp_err_t err = esp_partition_write(efs->partition, part_off, buffer, size);
|
||||
if (err) {
|
||||
ESP_LOGE(ESP_LITTLEFS_TAG, "failed to write addr %08x, size %08x, err %d", (unsigned int) part_off, (unsigned int) size, err);
|
||||
return LFS_ERR_IO;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int littlefs_esp_part_erase(const struct lfs_config *c, lfs_block_t block) {
|
||||
esp_littlefs_t * efs = c->context;
|
||||
size_t part_off = block * c->block_size;
|
||||
|
||||
#ifdef CONFIG_LITTLEFS_WDT_RESET
|
||||
esp_task_wdt_reset();
|
||||
#endif
|
||||
|
||||
esp_err_t err = esp_partition_erase_range(efs->partition, part_off, c->block_size);
|
||||
if (err) {
|
||||
ESP_LOGE(ESP_LITTLEFS_TAG, "failed to erase addr %08x, size %08x, err %d", (unsigned int) part_off, (unsigned int) c->block_size, err);
|
||||
return LFS_ERR_IO;
|
||||
}
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
int littlefs_esp_part_sync(const struct lfs_config *c) {
|
||||
/* Unnecessary for esp-idf */
|
||||
return 0;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue