commit 9454ed4f902ed7efabfc26e7b38a1459ab7ed89a Author: jasinco Date: Wed Dec 3 20:59:51 2025 +0800 init esp diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ce66cbf --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +build/ +sdkconfig +sdkconfig.old diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..6ea5bec --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,6 @@ +# The following five lines of boilerplate have to be in your project's +# CMakeLists in this exact order for cmake to work correctly +cmake_minimum_required(VERSION 3.16) + +include($ENV{IDF_PATH}/tools/cmake/project.cmake) +project(controlplane) diff --git a/components/ctx.graphics b/components/ctx.graphics new file mode 160000 index 0000000..cbdf96f --- /dev/null +++ b/components/ctx.graphics @@ -0,0 +1 @@ +Subproject commit cbdf96f6666341fb667defbfd2189876e0084412 diff --git a/components/joltwallet__littlefs/.bumpversion.cfg b/components/joltwallet__littlefs/.bumpversion.cfg new file mode 100644 index 0000000..5ed1a6f --- /dev/null +++ b/components/joltwallet__littlefs/.bumpversion.cfg @@ -0,0 +1,38 @@ +[bumpversion] +current_version = 1.20.3 +commit = True +tag = True + +[bumpversion:file:README.md] +search = littlefs=={current_version} +replace = littlefs=={new_version} + +[bumpversion:file:idf_component.yml] +search = "{current_version}" +replace = "{new_version}" + +[bumpversion:file:library.json] +search = "{current_version}" +replace = "{new_version}" + +[bumpversion:file(number):include/esp_littlefs.h] +search = ESP_LITTLEFS_VERSION_NUMBER "{current_version}" +replace = ESP_LITTLEFS_VERSION_NUMBER "{new_version}" + +[bumpversion:file(major):include/esp_littlefs.h] +parse = (?P\d+) +serialize = {major} +search = ESP_LITTLEFS_VERSION_MAJOR {current_version} +replace = ESP_LITTLEFS_VERSION_MAJOR {new_version} + +[bumpversion:file(minor):include/esp_littlefs.h] +parse = (?P\d+) +serialize = {minor} +search = ESP_LITTLEFS_VERSION_MINOR {current_version} +replace = ESP_LITTLEFS_VERSION_MINOR {new_version} + +[bumpversion:file(patch):include/esp_littlefs.h] +parse = (?P\d+) +serialize = {patch} +search = ESP_LITTLEFS_VERSION_PATCH {current_version} +replace = ESP_LITTLEFS_VERSION_PATCH {new_version} diff --git a/components/joltwallet__littlefs/.component_hash b/components/joltwallet__littlefs/.component_hash new file mode 100644 index 0000000..dd83046 --- /dev/null +++ b/components/joltwallet__littlefs/.component_hash @@ -0,0 +1 @@ +1808d73e99168f6f3c26dd31799a248484762b3a320ec4962dec11a145f4277f \ No newline at end of file diff --git a/components/joltwallet__littlefs/.gitignore b/components/joltwallet__littlefs/.gitignore new file mode 100644 index 0000000..133dc70 --- /dev/null +++ b/components/joltwallet__littlefs/.gitignore @@ -0,0 +1,11 @@ +build/ +sdkconfig +sdkconfig.old + +example/build/ +example/sdkconfig +example/sdkconfig.old +example/dependencies.lock + +*.DS_Store +*/.cache diff --git a/components/joltwallet__littlefs/.gitmodules b/components/joltwallet__littlefs/.gitmodules new file mode 100644 index 0000000..a0cb7c4 --- /dev/null +++ b/components/joltwallet__littlefs/.gitmodules @@ -0,0 +1,3 @@ +[submodule "main/littlefs"] + path = src/littlefs + url = https://github.com/littlefs-project/littlefs.git diff --git a/components/joltwallet__littlefs/CHECKSUMS.json b/components/joltwallet__littlefs/CHECKSUMS.json new file mode 100644 index 0000000..c682b6a --- /dev/null +++ b/components/joltwallet__littlefs/CHECKSUMS.json @@ -0,0 +1 @@ +{"version":"1.0","algorithm":"sha256","created_at":"2025-11-02T02:16:37.529194+00:00","files":[{"path":".bumpversion.cfg","size":1102,"hash":"c51697b122e3a76a2fcecc0773fb1b78918512351c563cceb784d4d4712f0fe4"},{"path":".gitignore","size":134,"hash":"bf9558138b3ce333412bd965f55fc5a5bcd2713fb18a8e7fde8ea0cb74ad5dff"},{"path":".gitmodules","size":105,"hash":"5b0034485733a64bc3376d8f398fbd6eaa7dca00f936fe82a1d060d210b609cf"},{"path":"CMakeLists.txt","size":1274,"hash":"10d05276183a87a559c63b3084f1d179e3dde928148fc903374c81864e807d28"},{"path":"Kconfig","size":10820,"hash":"4bd38dd56dffd0493059b9ea1db42b6609eb031bad975a6ba812f4eb524c3aac"},{"path":"LICENSE","size":1057,"hash":"b6bbf3bd17c2b5532bc25a7acf1760faac0b995d6e0ad8bec29435166727d4c5"},{"path":"Makefile","size":402,"hash":"245677c98cbbd3fd965a443cf9a0cf4a06315a6fb685526fe1dc068383e4dc29"},{"path":"README.md","size":8290,"hash":"b03ab27387ca8355f45c91541f32b83aa49954a133a5f59b0ff4da65e1d3aa8b"},{"path":"component.mk","size":403,"hash":"a708de136a661830933b5ae8accde9a1cab8df012c93b02a3071e6fd7bb6a461"},{"path":"idf_component.yml","size":309,"hash":"a2e50e20c93d7520c52a6fa7080705335f03e17aa40de73f2dbc85a92691a17e"},{"path":"image-building-requirements.txt","size":24,"hash":"9434c691c9528226ab030f90868a01d1a10f955dd0d893f14acf91b433afaf9b"},{"path":"library.json","size":366,"hash":"2f5febdcb82b21632171b056b497bf997f98e63bab60915b0bfc60c3fd850a42"},{"path":"partition_table_unit_test_app.csv","size":841,"hash":"def32ec95e0a148e2b379a7eac7cf34056ccf945a4313552c5ebf06c194574e5"},{"path":"project_include.cmake","size":3222,"hash":"ae639d7543f3f282c81d862ece1a42fa827770e1625e55f36d1808e48673d85a"},{"path":"sdkconfig.defaults","size":3136,"hash":"3f5ab4f28d6ebd09c209b949adba977f48f35499f6f0ea1dbdc20acd1bb707de"},{"path":"example/CMakeLists.txt","size":336,"hash":"568c60f4b5c74f14d61cb9bf221dae71efb84e39ddd22f70ce4a7d8bc1a8fdfd"},{"path":"example/Makefile","size":229,"hash":"212521b3d10f385df0b1005a4f217374d01c803bec9690f2e01cdcad334a628e"},{"path":"example/README.md","size":185,"hash":"09d2d66961bccefca2a8246f5dc4ec887259c7d2014d18da4c8aa3eae66df7d4"},{"path":"example/partitions_demo_esp_littlefs.csv","size":313,"hash":"a36770cecc5a895f7a58ce999fc7541abac3ac821ce1857a3474979ed6469cd1"},{"path":"example/sdkconfig.defaults","size":286,"hash":"487f39a98e483104741ada17b4e76c0acb360dc49daec2ab6a7d308c284f8847"},{"path":"include/esp_littlefs.h","size":6235,"hash":"127bb32d7f2f9270171f43aa85db4d4a115e59c1003e2dbd30e5b077e2d2ab18"},{"path":"src/esp_littlefs.c","size":80741,"hash":"8187355926c15bc3842af9c17bca98208de86b93a99e5b0da506e9656039beee"},{"path":"src/lfs_config.c","size":828,"hash":"c1db7e994651383127c3a2c874303c983b352dca05a0ce87ba27060f5795e481"},{"path":"src/lfs_config.h","size":7709,"hash":"2a0f8b13ce09d3bbfd71857b4452e274c31f4765d50046d9514a0623789ad6b9"},{"path":"src/littlefs_api.h","size":6342,"hash":"aed2417260a6bb96106a1126766323a701684c1c5635fae8db5711c5c165145d"},{"path":"src/littlefs_esp_part.c","size":2841,"hash":"d557968b22c8426c17739844d922d65cabe43582f88abc1e5493c01d5e7445f0"},{"path":"src/littlefs_sdmmc.c","size":1878,"hash":"cd0f1eaf098f18312c96e15d6af4b1fd4e0d873c8c2ba4e4deecd7d009899f1d"},{"path":"test/CMakeLists.txt","size":232,"hash":"1db545d95a4e9903913707a9ef1ea6aadc318de894b40d7003090de53e823d43"},{"path":"test/component.mk","size":87,"hash":"25fb365628c1a43511a32d92f88779a9e55f6475a996973636a385f7401c3b87"},{"path":"test/test_benchmark.c","size":7787,"hash":"db68b711bf3c239e03b5974d9ce7f082fd97b41ec49c40dd5f425a75ecb1bf36"},{"path":"test/test_dir.c","size":10362,"hash":"73d4ff2da5bc83eb57f6cf2054af070c1ba07aacf3e586bd99444dc8b978f9df"},{"path":"test/test_littlefs.c","size":30502,"hash":"4f6d91fbef76923779e32aa144eeed4f4bfa70aa2969b77d3c6364446b49ff12"},{"path":"test/test_littlefs_common.c","size":2052,"hash":"f7251c30f2cd8bcd579182a590e931af4c327ec719328eb6f8497652349eb3f4"},{"path":"test/test_littlefs_common.h","size":1115,"hash":"dd214941aab5816947982a29613e0a9e7ba84c15107e50e521ca3288dee956eb"},{"path":"test/test_littlefs_static_partition.c","size":6426,"hash":"3bf805914a5cd3bcf54a8a358c5cc63e08b011009c1a5755c7fb14e0077dd14d"},{"path":"test/testfs.bin","size":49152,"hash":"16225b76da120e172b41cf5b11cc976a8de553b625d5e2944cac2d4d2ad019dd"},{"path":"testdir/pangram.txt","size":43,"hash":"d7a8fbb307d7809469ca9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592"},{"path":"testdir/test1.txt","size":5,"hash":"1b4f0e9851971998e732078544c96b36c3d01cedf7caa332359d6f1d83567014"},{"path":"testdir/test2.txt","size":5,"hash":"60303ae22b998861bce3b28f33eec1be758a213c86c93c076dbe9f558c11c752"},{"path":"testdir/test_folder/lorem_ipsum.txt","size":1162,"hash":"ce58a77a173174f448453d859ea9ebfed4d0f2ea284ab47f2c839b14f2643dea"},{"path":"src/littlefs/.git","size":41,"hash":"47e333a375b1a9a34ca35e3319b304ae97c7365da95799cbf1b4ee0fbb541fcd"},{"path":"src/littlefs/.gitattributes","size":193,"hash":"50ef1c298084dd4074e25dd4b3cb62879261541ffab67f5f13efa9036b7a3e37"},{"path":"src/littlefs/.gitignore","size":385,"hash":"6e209d9a8fab8b03bcac4708e7d1d6bf7b3a332c326834dbab761cc483a80683"},{"path":"src/littlefs/DESIGN.md","size":96235,"hash":"2a8b3459d74b9fa8bdd947738522c46331739405b1fc3d70f057f636a1dea768"},{"path":"src/littlefs/LICENSE.md","size":1523,"hash":"0cb4ff1daf5fdc1359c6a6ee3116092f08fc100c9d58b1b77ab17bfd801f856d"},{"path":"src/littlefs/Makefile","size":14916,"hash":"23ccc0a3332fa6d4a260b5cde929e340265d4eb2c81282147414c44451340f2b"},{"path":"src/littlefs/README.md","size":13677,"hash":"ccd5e4620280560dd982c3e4b2870fa31febb77600e0a9d6446f29d924529406"},{"path":"src/littlefs/SPEC.md","size":33698,"hash":"6dd74dfc58ac93589f8e002bb73c0830a59206912cfe8c951c07eb2bd5fccad0"},{"path":"src/littlefs/lfs.c","size":197434,"hash":"41b7aa8d6e1601ef26129399cc2e5988e736c82a4b89e31183be787722aebf07"},{"path":"src/littlefs/lfs.h","size":26733,"hash":"51c8215142691c2c177ef757ad09c95d96d75e5260eb403368d59ed14d5f11c0"},{"path":"src/littlefs/lfs_util.c","size":988,"hash":"f2fbde533670560434bd9f5a547174cc7c5a4670a02c47b4bd85180dced8b2ec"},{"path":"src/littlefs/lfs_util.h","size":8049,"hash":"548d46aa524dc7449e16739286c1a422a52f9de727ff0be0c2ffc5593f5ca981"},{"path":"src/littlefs/bd/lfs_emubd.c","size":22788,"hash":"cb4c45c8a5cb99da4ad209f1cdee712d05e737435cb5a8392cd0969424184ba8"},{"path":"src/littlefs/bd/lfs_emubd.h","size":7654,"hash":"9bfa7b73ab748c767495ad7f6039d8790f72d18f622af9f20c08b651ccc1190e"},{"path":"src/littlefs/bd/lfs_filebd.c","size":4934,"hash":"272cd0a62bb8596598bf9c3cb0b1aaf99f2a296e384237afaa4c11b5431b7718"},{"path":"src/littlefs/bd/lfs_filebd.h","size":1880,"hash":"ec1a79181e5cb96bd9a2ae6144ec392cf4f951285393ad41610833eb7dd30628"},{"path":"src/littlefs/bd/lfs_rambd.c","size":3810,"hash":"f25214fe0425a74c5698a2cb65a14e7cf657ca5321cc7d40853d5a06d6c6fa37"},{"path":"src/littlefs/bd/lfs_rambd.h","size":1933,"hash":"f78b559b14e60fd4badf10835dd9072a255c64826866d75008f05d295e7d17c6"},{"path":"src/littlefs/benches/bench_dir.toml","size":7107,"hash":"b40dae2359dff039abc7ed31ab0def12048034a37afaf7c9504aef18f3860be6"},{"path":"src/littlefs/benches/bench_file.toml","size":2681,"hash":"a6930fa61f92e4f76c3330b142051c56de7e290ec15389a1e8ecc5f2f8393602"},{"path":"src/littlefs/benches/bench_superblock.toml","size":1237,"hash":"67935ca003b7a7e476370ab8aee3adf75e98f6c80f3f509ddf9f5b94a1981562"},{"path":"src/littlefs/runners/bench_runner.c","size":68370,"hash":"7f8b243201733a5b2b23db3807f77ac06ab4434577461e27cd2ea1b3acd23a55"},{"path":"src/littlefs/runners/bench_runner.h","size":4169,"hash":"566e8ce66568fd2ee1f1c59899df0432d3856eef815a30569d63a8d772b7f711"},{"path":"src/littlefs/runners/test_runner.c","size":93635,"hash":"5c28f45b123ab9ec748186aa963931ddc61ef9576566ba95d68056a61b3623dc"},{"path":"src/littlefs/runners/test_runner.h","size":4070,"hash":"8545e70ee7d0bf6d295d5db78f75f7e12433cf42cc96bafe39dea9de81ad4bef"},{"path":"src/littlefs/scripts/bench.py","size":52237,"hash":"89960e94703bedae48a34525de9abb1c1d7e5ffd4d1faea68ffada9756cfe452"},{"path":"src/littlefs/scripts/changeprefix.py","size":5403,"hash":"403fa4480e3f4474fcfb048a27e8d590d4655417b7aa97e8cc7cfd0082638490"},{"path":"src/littlefs/scripts/code.py","size":23095,"hash":"115948040ac804b9d21d0afca3e82649e636652e4c1f2fd243fb5074cb790f4a"},{"path":"src/littlefs/scripts/cov.py","size":27397,"hash":"aa96789fec445d9403335a4c7cc17c428c2d66044c9234aba495ac0c278b2601"},{"path":"src/littlefs/scripts/data.py","size":22943,"hash":"b4e8ca5a8cd0b7eeab75b5e7e6750469c0c832c4c6cf33e9a87884d108485a55"},{"path":"src/littlefs/scripts/perf.py","size":45193,"hash":"2ebb5063f05c31fd292fe499e10a0daaf21add4747e65e971b7e01cb0a163388"},{"path":"src/littlefs/scripts/perfbd.py","size":44308,"hash":"2844461894231f48213d2a5ca77b16faa9f2861985b5db45d426cbd0f89a6848"},{"path":"src/littlefs/scripts/plot.py","size":54030,"hash":"2a7296d2b91c1d9619039fda3ef32f1f3565b37d50a2a79e9cb0098f33cf3635"},{"path":"src/littlefs/scripts/plotmpl.py","size":41869,"hash":"c9227baed38903db51dd2f04a1d60e9b23f4fc2857bab142646df975d288095c"},{"path":"src/littlefs/scripts/prettyasserts.py","size":16223,"hash":"b815a63e5b2f9f2d647accf24e8bb14b7e48783cce20104c1694c2da8b3208d2"},{"path":"src/littlefs/scripts/readblock.py","size":858,"hash":"bc3794a2b8f072219c402a8973b94e8eeeb4272abfaecc5c6c7c9e1896989d52"},{"path":"src/littlefs/scripts/readmdir.py","size":12631,"hash":"fab4c538130687e7bdb630c0598553d416d9e58855fbbaba696d35b81d873194"},{"path":"src/littlefs/scripts/readtree.py","size":6357,"hash":"5240676c50dd977aebefe115ab6e02e329b21e194934e9f5356c8384d34262e6"},{"path":"src/littlefs/scripts/stack.py","size":24367,"hash":"00a8f29e01d29e15ba6e9b613a0d2e8473f71eb503eb3d4e2840a209f3f216ec"},{"path":"src/littlefs/scripts/structs.py","size":21064,"hash":"eb8598c2ec9357bb74b8498ae18bdc899d7f457e392e3f3cdc536290c04239a4"},{"path":"src/littlefs/scripts/summary.py","size":25393,"hash":"8da904ee0ace70d0912ea5ed8635559522ad11ebf361441d29608c8a293fd5fe"},{"path":"src/littlefs/scripts/tailpipe.py","size":5275,"hash":"97735ead3d7d7c44e5334fe5c54cb4a14f812fa55dfe97ead53e7df66dd0f6d0"},{"path":"src/littlefs/scripts/teepipe.py","size":1993,"hash":"82dd8330dd385ee3a22cee4482c135aa19731fd0509c4cfca83304a3b7534d97"},{"path":"src/littlefs/scripts/test.py","size":54801,"hash":"1afd7cc5a0adaeadbb8aa5bff3c708877059d2fca59fe1c824f17dc7bc2d1bfe"},{"path":"src/littlefs/scripts/tracebd.py","size":31069,"hash":"65063ee26b83674dad19af0a7a55cecbc9f909f0c323437a72c3705ef72bb9fa"},{"path":"src/littlefs/scripts/watch.py","size":8135,"hash":"c6e2371007fa04d4e6fda8dc5687a3d663eb674c80ce78e6015aace639e06074"},{"path":"src/littlefs/tests/test_alloc.toml","size":24138,"hash":"816ff6d2ec41f0f585988fdf2555dae1e2fff84c54b5056739b8e245e887dfe3"},{"path":"src/littlefs/tests/test_attrs.toml","size":12219,"hash":"7649b115992f0c8d333b8bc39a8f1842772ac70204cf89daf8aaf052d713c167"},{"path":"src/littlefs/tests/test_badblocks.toml","size":7983,"hash":"4e9761bea21789eed27ab566730ac43ed11e24d22a49ce17c9480e34122ac10b"},{"path":"src/littlefs/tests/test_bd.toml","size":6811,"hash":"d9530de00553c808027732a80b3e9986575822acfa4f05f417410cff9f7eb9b4"},{"path":"src/littlefs/tests/test_compat.toml","size":42929,"hash":"50c97856ca5201640f8234f2d94c275a0f4aee3f15ecf6919ac088f395f2e043"},{"path":"src/littlefs/tests/test_dirs.toml","size":36281,"hash":"fb9b93aaed97832b3a5c92ba21110a928e0cac00cd43d7c7ea0ba1796c8b91b9"},{"path":"src/littlefs/tests/test_entries.toml","size":22546,"hash":"2f4e0cde81bd70513af344eaaf861feb2e2a748c7399bcb88137b3d6b05b9110"},{"path":"src/littlefs/tests/test_evil.toml","size":10596,"hash":"49938a860a01e907237758e51a0d264323783aeca1adca9346e8709ca3060cb5"},{"path":"src/littlefs/tests/test_exhaustion.toml","size":16965,"hash":"b6012493e985c0ba2a203c80b8d404b6ae801730fe57b31bc26b09df415e8ed4"},{"path":"src/littlefs/tests/test_files.toml","size":17620,"hash":"a8b6b0d9691c64650ec96842245fa9828d875411ee8aa25eaad64ac3678a23d2"},{"path":"src/littlefs/tests/test_interspersed.toml","size":8442,"hash":"c3592851cfe9db4cb9379b97937c439a366130b3261a3317a8e5918bebdef62b"},{"path":"src/littlefs/tests/test_move.toml","size":71009,"hash":"4d905f48918736c0630595c2f4cec7f69996bc6b87b98e8697d4981267db0945"},{"path":"src/littlefs/tests/test_orphans.toml","size":11054,"hash":"af6a6eb7a0b2c6c57bfe0df1886140d2b7e96ca99073819c73bbf0f24ad08a12"},{"path":"src/littlefs/tests/test_paths.toml","size":327195,"hash":"6a193e880d743f03da91c155ac6e7064e24d3759d2d59b47c4f120a2dc1d1255"},{"path":"src/littlefs/tests/test_powerloss.toml","size":5842,"hash":"8a533ed184c2dbfef7c79ad1caa386fea97a3ac367710d6b637bda11f40e5eb0"},{"path":"src/littlefs/tests/test_relocations.toml","size":18751,"hash":"8ad7d41467dc23e30f3e8cdcdc4f462d2312687d797884f634443109640fe24c"},{"path":"src/littlefs/tests/test_seek.toml","size":22299,"hash":"69940d9c7a696762e04063d73a6506327650c64941478d0261b5520079143271"},{"path":"src/littlefs/tests/test_shrink.toml","size":3730,"hash":"0973057fc74476d25ba5aa15f4625a7439b59cfa78399a8c00406a81d5f67355"},{"path":"src/littlefs/tests/test_superblocks.toml","size":20012,"hash":"7e32658faa899534607a2633f9d979ad361a30b74444005d43d9ef00410d6586"},{"path":"src/littlefs/tests/test_truncate.toml","size":17553,"hash":"de8a4e04961051c8c2471e16b54c29d115d72e830e78d5c751337b35c14cd98d"},{"path":"example/flash_data/example.txt","size":84,"hash":"ac44c81c7fecee476f2aff6097b510a4ead51505611ec3af730d49ad44430fcd"},{"path":"example/main/CMakeLists.txt","size":301,"hash":"c492cc65e508131fcc7e1e5e948e922dfa0490d0cd08d0619e415dfd04c4a99d"},{"path":"example/main/component.mk","size":146,"hash":"05313064022a3e181ef2a67bb839e6a186b887b44627453a0e5f84583e45d256"},{"path":"example/main/demo_esp_littlefs.c","size":5206,"hash":"4547078dc428ad7ece9e68199a0bf5c29cbd9a031067c57f9f0cddc6ffa753b4"}]} \ No newline at end of file diff --git a/components/joltwallet__littlefs/CMakeLists.txt b/components/joltwallet__littlefs/CMakeLists.txt new file mode 100644 index 0000000..d2b3e1f --- /dev/null +++ b/components/joltwallet__littlefs/CMakeLists.txt @@ -0,0 +1,47 @@ +cmake_minimum_required(VERSION 3.10) + +file(GLOB SOURCES src/littlefs/*.c) +list(APPEND SOURCES src/esp_littlefs.c src/littlefs_esp_part.c src/lfs_config.c) + +if(IDF_TARGET STREQUAL "esp8266") + # ESP8266 configuration here +else() + # non-ESP8266 configuration + list(APPEND pub_requires sdmmc) + + if(CONFIG_LITTLEFS_SDMMC_SUPPORT) + list(APPEND SOURCES src/littlefs_sdmmc.c) + endif() +endif() + +list(APPEND pub_requires esp_partition) +list(APPEND priv_requires esptool_py spi_flash vfs) + +idf_component_register( + SRCS ${SOURCES} + INCLUDE_DIRS include + PRIV_INCLUDE_DIRS src + REQUIRES ${pub_requires} + PRIV_REQUIRES ${priv_requires} +) + +set_source_files_properties( + ${SOURCES} + PROPERTIES COMPILE_FLAGS "-DLFS_CONFIG=lfs_config.h" +) + +if(CONFIG_LITTLEFS_FCNTL_GET_PATH) + target_compile_definitions(${COMPONENT_LIB} PUBLIC -DF_GETPATH=${CONFIG_LITTLEFS_FCNTL_F_GETPATH_VALUE}) +endif() + +if(CONFIG_LITTLEFS_MULTIVERSION) + target_compile_definitions(${COMPONENT_LIB} PUBLIC -DLFS_MULTIVERSION) +endif() + +if(CONFIG_LITTLEFS_MALLOC_STRATEGY_DISABLE) + target_compile_definitions(${COMPONENT_LIB} PUBLIC -DLFS_NO_MALLOC) +endif() + +if(NOT CONFIG_LITTLEFS_ASSERTS) + target_compile_definitions(${COMPONENT_LIB} PUBLIC -DLFS_NO_ASSERT) +endif() diff --git a/components/joltwallet__littlefs/Kconfig b/components/joltwallet__littlefs/Kconfig new file mode 100644 index 0000000..e023fdc --- /dev/null +++ b/components/joltwallet__littlefs/Kconfig @@ -0,0 +1,273 @@ +menu "LittleFS" + + config LITTLEFS_SDMMC_SUPPORT + bool "SDMMC support (requires ESP-IDF v5+)" + default n + help + Toggle SD card support + This requires IDF v5+ as older ESP-IDF do not support SD card erase. + + config LITTLEFS_MAX_PARTITIONS + int "Maximum Number of Partitions" + default 3 + range 1 10 + help + Define maximum number of partitions that can be mounted. + + config LITTLEFS_PAGE_SIZE + int "LITTLEFS logical page size" + default 256 + range 256 1024 + help + Logical page size of LITTLEFS partition, in bytes. Must be multiple + of flash page size (which is usually 256 bytes). + Larger page sizes reduce overhead when storing large files, and + improve filesystem performance when reading large files. + Smaller page sizes reduce overhead when storing small (< page size) + files. + + config LITTLEFS_OBJ_NAME_LEN + int "Maximum object name length including NULL terminator." + default 64 + range 16 1022 + help + Includes NULL-terminator. If flashing a prebuilt filesystem image, + rebuild the filesystem image if this value changes. + mklittlefs, the tool that generates the image will automatically be rebuilt. + If downloading a pre-built release of mklittlefs, it was most-likely + built with LFS_NAME_MAX=32 and should not be used. + + config LITTLEFS_READ_SIZE + int "Minimum size of a block read." + default 128 + help + Minimum size of a block read. All read operations will be a + multiple of this value. + + config LITTLEFS_WRITE_SIZE + int "Minimum size of a block write." + default 128 + help + Minimum size of a block program. All write operations will be a + multiple of this value. + + config LITTLEFS_LOOKAHEAD_SIZE + int "Look ahead size." + default 128 + help + Look ahead size. Must be a multiple of 8. + + config LITTLEFS_CACHE_SIZE + int "Cache Size" + default 512 + help + Size of block caches. Each cache buffers a portion of a block in RAM. + The littlefs needs a read cache, a program cache, and one additional + cache per file. Larger caches can improve performance by storing more + data and reducing the number of disk accesses. Must be a multiple of + the read and program sizes, and a factor of the block size (4096). + + config LITTLEFS_BLOCK_CYCLES + int "LittleFS wear-leveling block cycles" + default 512 + range -1 1024 + help + Number of erase cycles before littlefs evicts metadata logs and moves + the metadata to another block. Suggested values are in the + range 100-1000, with large values having better performance at the cost + of less consistent wear distribution. + Set to -1 to disable block-level wear-leveling. + + config LITTLEFS_USE_MTIME + bool "Save file modification time" + default "y" + help + Saves timestamp on modification. Uses an additional 4bytes. + + config LITTLEFS_USE_ONLY_HASH + bool "Don't store filepath in the file descriptor" + default "n" + help + Records the filepath only as a 32-bit hash in the file descriptor instead + of the entire filepath. Saves approximately `sizeof(filepath)` bytes + per file descriptor. + If enabled, functionality (like fstat) that requires the file path + from the file descriptor will not work. + In rare cases, may cause unlinking or renaming issues (unlikely) if + there's a hash collision between an open filepath and a filepath + to be modified. + + config LITTLEFS_HUMAN_READABLE + bool "Make errno human-readable" + default "n" + help + Converts LittleFS error codes into human readable strings. + May increase binary size depending on logging level. + + choice LITTLEFS_MTIME + prompt "mtime attribute options" + depends on LITTLEFS_USE_MTIME + default LITTLEFS_MTIME_USE_SECONDS + help + Save an additional 4-byte attribute. Options listed below. + + config LITTLEFS_MTIME_USE_SECONDS + bool "Use Seconds" + help + Saves timestamp on modification. + + config LITTLEFS_MTIME_USE_NONCE + bool "Use Nonce" + help + Saves nonce on modification; intended for detecting filechanges + on systems without access to a RTC. + + A file who's nonce is the same as it was at a previous time has + high probability of not having been modified. + + Upon file modification, the nonce is incremented by one. Upon file + creation, a random nonce is assigned. + + There is a very slim chance that a file will have the same nonce if + it is deleted and created again (approx 1 in 4 billion). + + endchoice + + config LITTLEFS_SPIFFS_COMPAT + bool "Improve SPIFFS drop-in compatability" + default "n" + help + Enabling this feature allows for greater drop-in compatability + when replacing SPIFFS. Since SPIFFS doesn't have folders, and + folders are just considered as part of a file name, enabling this + will automatically create folders as necessary to create a file + instead of throwing an error. Similarly, upon the deletion of the + last file in a folder, the folder will be deleted. It is recommended + to only enable this flag as a stop-gap solution. + + config LITTLEFS_FLUSH_FILE_EVERY_WRITE + bool "Flush file to flash after each write operation" + default "n" + help + Enabling this feature extends SPIFFS capability. + In SPIFFS data is written immediately to the flash storage when fflush() function called. + In LittleFS flush() does not write data to the flash, and fsync() call needed after. + With this feature fflush() will write data to the storage. + + config LITTLEFS_OPEN_DIR + bool "Support opening directory" + default "n" + depends on !LITTLEFS_USE_ONLY_HASH && LITTLEFS_SPIFFS_COMPAT + help + Support opening directory by following APIs: + + int fd = open("my_directory", O_DIRECTORY); + + config LITTLEFS_FCNTL_GET_PATH + bool "Support get file or directory path" + default "n" + depends on !LITTLEFS_USE_ONLY_HASH + help + Support getting directory by following APIs: + + char buffer[MAXPATHLEN]; + + int fd = open("my_file", flags); + fcntl(fd, F_GETPATH, buffer); + + config LITTLEFS_FCNTL_F_GETPATH_VALUE + int "Value of command F_GETPATH" + default 20 + depends on LITTLEFS_FCNTL_GET_PATH + help + ESP-IDF's header file "fcntl.h" doesn't support macro "F_GETPATH", + so we should define this macro here. + + config LITTLEFS_MULTIVERSION + bool "Support selecting the LittleFS minor version to write to disk" + default "n" + help + LittleFS 2.6 bumps the on-disk minor version of littlefs from lfs2.0 -> lfs2.1. + + This change is backwards-compatible, but after the first write with the new version, + the image on disk will no longer be mountable by older versions of littlefs. + + Enabling LITTLEFS_MULTIVERSION allows to select the On-disk version + to use when writing in the form of 16-bit major version + + 16-bit minor version. This limiting metadata to what is supported by + older minor versions. Note that some features will be lost. Defaults to + to the most recent minor version when zero. + + choice LITTLEFS_DISK_VERSION + prompt "LITTLEFS_DISK_VERSION" + depends on LITTLEFS_MULTIVERSION + default LITTLEFS_DISK_VERSION_MOST_RECENT + help + See LITTLEFS_MULTIVERSION for details. + + config LITTLEFS_DISK_VERSION_MOST_RECENT + bool "Write the most recent LittleFS version" + + config LITTLEFS_DISK_VERSION_2_1 + bool "Write LittleFS 2.1" + + config LITTLEFS_DISK_VERSION_2_0 + bool "Write LittleFS 2.0 (no forward-looking erase-state CRCs)" + + endchoice + + choice LITTLEFS_MALLOC_STRATEGY + prompt "Buffer allocation strategy" + default LITTLEFS_MALLOC_STRATEGY_DEFAULT + help + Maps lfs_malloc to ESP-IDF capabilities-based memory allocator or + disables dynamic allocation in favour of user-provided static buffers. + + config LITTLEFS_MALLOC_STRATEGY_DISABLE + bool "Static buffers only" + help + Disallow dynamic allocation, static buffers must be provided by the calling application. + + config LITTLEFS_MALLOC_STRATEGY_DEFAULT + bool "Default heap selection" + help + Uses an automatic allocation strategy. On systems with heap in SPIRAM, if + the allocation size does not exceed SPIRAM_MALLOC_ALWAYSINTERNAL then internal + heap allocation if preferred, otherwise allocation will be attempted from SPIRAM + heap. + + config LITTLEFS_MALLOC_STRATEGY_INTERNAL + bool "Internal heap" + help + Uses ESP-IDF heap_caps_malloc to allocate from internal heap. + + config LITTLEFS_MALLOC_STRATEGY_SPIRAM + bool "SPIRAM heap" + depends on SPIRAM_USE_MALLOC || SPIRAM_USE_CAPS_ALLOC + help + Uses ESP-IDF heap_caps_malloc to allocate from SPIRAM heap. + + endchoice + + config LITTLEFS_ASSERTS + bool "Enable asserts" + default "y" + help + Selects whether littlefs performs runtime assert checks. + + config LITTLEFS_MMAP_PARTITION + bool "Memory map LITTLEFS partitions" + default "n" + help + Use esp_partition_mmap to map the partitions to memory, which can provide a significant + performance boost in some cases. Make sure the chip you're using has enough available address + space to map the partition (for the ESP32 there is 4MB available). + + config LITTLEFS_WDT_RESET + bool "Reset task watchdog during flash operations" + default "n" + help + Enable calling esp_task_wdt_reset() during flash read/write/erase operations + to prevent task watchdog timeouts during long-running filesystem operations. + +endmenu diff --git a/components/joltwallet__littlefs/LICENSE b/components/joltwallet__littlefs/LICENSE new file mode 100644 index 0000000..2101ea9 --- /dev/null +++ b/components/joltwallet__littlefs/LICENSE @@ -0,0 +1,7 @@ +Copyright 2020 Brian Pugh + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/components/joltwallet__littlefs/Makefile b/components/joltwallet__littlefs/Makefile new file mode 100644 index 0000000..76183ee --- /dev/null +++ b/components/joltwallet__littlefs/Makefile @@ -0,0 +1,28 @@ +PROJECT_NAME := littlefs + +EXTRA_COMPONENT_DIRS := \ + $(abspath .) \ + $(abspath unit_tester) \ + $(IDF_PATH)/tools/unit-test-app/components/ + +CFLAGS += \ + -Werror + +include $(IDF_PATH)/make/project.mk + +.PHONY: tests + +tests-build: + $(MAKE) \ + TEST_COMPONENTS='src' + +tests: + $(MAKE) \ + TEST_COMPONENTS='src' \ + flash monitor; + +tests-enc: + $(MAKE) \ + TEST_COMPONENTS='src' \ + encrypted-flash monitor; + diff --git a/components/joltwallet__littlefs/README.md b/components/joltwallet__littlefs/README.md new file mode 100644 index 0000000..99af2a8 --- /dev/null +++ b/components/joltwallet__littlefs/README.md @@ -0,0 +1,267 @@ +LittleFS for ESP-IDF. + +# What is LittleFS? + +[LittleFS](https://github.com/ARMmbed/littlefs) is a small fail-safe filesystem +for microcontrollers. We ported LittleFS to esp-idf (specifically, the ESP32) +because SPIFFS was too slow, and FAT was too fragile. + +# How to Use + +## ESP-IDF + +There are two ways to add this component to your project + +1. As a ESP-IDF managed component: In your project directory run + +``` +idf.py add-dependency joltwallet/littlefs==1.20.3 +``` + +2. As a submodule: In your project, add this as a submodule to your `components/` directory. + +``` +git submodule add https://github.com/joltwallet/esp_littlefs.git +git submodule update --init --recursive +``` + +The library can be configured via `idf.py menuconfig` under `Component config->LittleFS`. + +#### Example +User @wreyford has kindly provided a [demo repo](https://github.com/wreyford/demo_esp_littlefs) showing the use of `esp_littlefs`. A modified copy exists in the `example/` directory. + +## PlatformIO +Add to the following line to your project's `platformio.ini` file: + +``` +lib_deps = https://github.com/joltwallet/esp_littlefs.git +``` + +Example `platformio.ini` file: + +``` +[env] +platform = espressif32 +framework = espidf +monitor_speed = 115200 + +[common] +lib_deps = https://github.com/joltwallet/esp_littlefs.git + +[env:nodemcu-32s] +board = nodemcu-32s +board_build.filesystem = littlefs +board_build.partitions = min_littlefs.csv +lib_deps = ${common.lib_deps} +``` + +Example `min_littlefs.cvs` flash partition layout: +``` +# Name, Type, SubType, Offset, Size, Flags +nvs, data, nvs, 0x9000, 0x5000, +otadata, data, ota, 0xe000, 0x2000, +app0, app, ota_0, 0x10000, 0x1E0000, +app1, app, ota_1, 0x1F0000,0x1E0000, +littlefs, data, littlefs, 0x3D0000,0x20000, +coredump, data, coredump, 0x3F0000,0x10000, +``` + +[Currently, it is required](https://github.com/platformio/platform-espressif32/issues/479) to modify `CMakeList.txt`. Add the following 2 lines to the your project's `CMakeList.txt`: + +``` +get_filename_component(configName "${CMAKE_BINARY_DIR}" NAME) +list(APPEND EXTRA_COMPONENT_DIRS "${CMAKE_SOURCE_DIR}/.pio/libdeps/${configName}/esp_littlefs") +``` + +Example `CMakeList.txt`: + +``` +cmake_minimum_required(VERSION 3.16.0) +include($ENV{IDF_PATH}/tools/cmake/project.cmake) + +get_filename_component(configName "${CMAKE_BINARY_DIR}" NAME) +list(APPEND EXTRA_COMPONENT_DIRS "${CMAKE_SOURCE_DIR}/.pio/libdeps/${configName}/esp_littlefs") + +project(my_project_name_here) +``` + +To configure LittleFS from PlatformIO, run the following command: + +```console +$ pio run -t menuconfig +``` +An entry `Component config->LittleFS` should be available for configuration. If not, check your `CMakeList.txt` configuration. + + +# Documentation + +See the official [ESP-IDF SPIFFS documentation](https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-reference/storage/spiffs.html), basically all the functionality is the +same; just replace `spiffs` with `littlefs` in all function calls. + +Also see the comments in `include/esp_littlefs.h` + +Slight differences between this configuration and SPIFFS's configuration is in the `esp_vfs_littlefs_conf_t`: + +1. `max_files` field doesn't exist since we removed the file limit, thanks to @X-Ryl669 +2. `grow_on_mount` will expand an existing filesystem to fill the partition. Defaults to `false`. + * LittleFS filesystems can only grow, they cannot shrink. + +### Filesystem Image Creation + +At compile time, a filesystem image can be created and flashed to the device by adding the following to your project's `CMakeLists.txt` file: + +``` +littlefs_create_partition_image(partition_name path_to_folder_containing_files FLASH_IN_PROJECT) +``` + +If `FLASH_IN_PROJECT` is not specified, the image will still be generated, but you will have to flash it manually using `esptool.py`, `parttool.py`, or a custom build system target. + +For example, if your partition table looks like: + +``` +# Name, Type, SubType, Offset, Size, Flags +nvs, data, nvs, 0x9000, 0x6000, +phy_init, data, phy, 0xf000, 0x1000, +factory, app, factory, 0x10000, 1M, +graphics, data, spiffs, , 0xF0000, +``` + +change it to: + +``` +# Name, Type, SubType, Offset, Size, Flags +nvs, data, nvs, 0x9000, 0x6000, +phy_init, data, phy, 0xf000, 0x1000, +factory, app, factory, 0x10000, 1M, +graphics, data, littlefs, , 0xF0000, +``` + + +and your project has a folder called `device_graphics/`, your call should be: + +``` +littlefs_create_partition_image(graphics device_graphics FLASH_IN_PROJECT) +``` + + +# Performance + +Here are some naive benchmarks to give a vague indicator on performance. +Tests were performed with the following configuration: + +* ESP-IDF: v4.4 +* Target: ESP32 +* CPU Clock: 160MHz +* Flash SPI Freq: 80MHz +* Flash SPI Mode: QIO + +In these tests, FAT has a cache size of 4096, and SPIFFS has a cahce size of 256 bytes. + +#### Formatting a 512KB partition + +``` +FAT: 549,494 us +SPIFFS: 10,715,425 us +LittleFS: 110,997 us +``` + +#### Writing 5 88KB files + +``` +FAT: 7,124,812 us +SPIFFS*: 99,138,905 us +LittleFS (cache=128): 8,261,920 us +LittleFS (cache=512 default): 6,356,247 us +LittleFS (cache=4096): 6,026,592 us +*Only wrote 374,784 bytes instead of the benchmark 440,000, so this value is extrapolated +``` + +In the above test, SPIFFS drastically slows down as the filesystem fills up. Below +is the specific breakdown of file write times for SPIFFS. Not sure what happens +on the last file write. + + +``` +SPIFFS: + +88000 bytes written in 2190635 us +88000 bytes written in 2190321 us +88000 bytes written in 5133605 us +88000 bytes written in 16570667 us +22784 bytes written in 73053677 us +``` + +#### Reading 5 88KB files + +``` +FAT: 5,685,230 us +SPIFFS*: 5,162,289 us +LittleFS (cache=128): 6,284,142 us +LittleFS (cache=512 default): 5,874,931 us +LittleFS (cache=4096): 5,731,385 us +*Only read 374,784 bytes instead of the benchmark 440,000, so this value is extrapolated +``` + +#### Deleting 5 88KB files + +``` +FAT: 680,358 us +SPIFFS*: 1,653,500 us +LittleFS (cache=128): 86,090 us +LittleFS (cache=512 default): 53,705 us +LittleFS (cache=4096): 27,709 us +*The 5th file was smaller, did not extrapolate value. +``` + + +# Tips, Tricks, and Gotchas + +* LittleFS operates on blocks, and blocks have a size of 4096 bytes on the ESP32. + +* A freshly formatted LittleFS will have 2 blocks in use, making it seem like 8KB are in use. + +* The esp32 has [flash concurrency constraints](https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-reference/peripherals/spi_flash/spi_flash_concurrency.html#concurrency-constraints-for-flash-on-spi1). + When using UART (either for data transfer or generic logging) at the same time, you *MUST* enable the following option in KConfig: + `menuconfig > Component config > Driver config > UART > UART ISR in IRAM`. + +# Running Unit Tests + +To flash the unit-tester app and the unit-tests, clone or symbolicly link this +component to `$IDF_PATH/tools/unit-test-app/components/littlefs`. Make sure the +folder name is `littlefs`, not `esp_littlefs`. Then, run the following: + +``` +cd $IDF_PATH/tools/unit-test-app +idf.py menuconfig # See notes +idf.py -T littlefs -p YOUR_PORT_HERE flash monitor +``` + +In `menuconfig`: + +* Set the partition table to `components/littlefs/partition_table_unit_test_app.csv` + +* Double check your crystal frequency `ESP32_XTAL_FREQ_SEL`; my board doesn't work with autodetect. + +To test on an encrypted partition, add the `encrypted` flag to the `flash_test` partition +in `partition_table_unit_test_app.csv`. I.e. + +``` +flash_test, data, spiffs, , 512K, encrypted +``` + +Also make sure that `CONFIG_SECURE_FLASH_ENC_ENABLED=y` in `menuconfig`. + +The unit tester can then be flashed via the command: + +``` +idf.py -T littlefs -p YOUR_PORT_HERE encrypted-flash monitor +``` + +# Breaking Changes + +* July 22, 2020 - Changed attribute type for file timestamp from `0` to `0x74` ('t' ascii value). +* May 3, 2023 - All logging tags have been changed to a unified `esp_littlefs`. + +# Acknowledgement + +This code base was heavily modeled after the SPIFFS esp-idf component. diff --git a/components/joltwallet__littlefs/component.mk b/components/joltwallet__littlefs/component.mk new file mode 100644 index 0000000..d725822 --- /dev/null +++ b/components/joltwallet__littlefs/component.mk @@ -0,0 +1,24 @@ +# +# Component Makefile +# + +COMPONENT_SRCDIRS := src src/littlefs + +COMPONENT_ADD_INCLUDEDIRS := include + +COMPONENT_PRIV_INCLUDEDIRS := src + +COMPONENT_SUBMODULES := src/littlefs + +CFLAGS += \ + -DLFS_CONFIG=lfs_config.h + +ifdef CONFIG_LITTLEFS_FCNTL_GET_PATH + CFLAGS += \ + -DF_GETPATH=$(CONFIG_LITTLEFS_FCNTL_F_GETPATH_VALUE) +endif + +ifdef CONFIG_LITTLEFS_MULTIVERSION + CFLAGS += \ + -DLFS_MULTIVERSION +endif diff --git a/components/joltwallet__littlefs/example/CMakeLists.txt b/components/joltwallet__littlefs/example/CMakeLists.txt new file mode 100644 index 0000000..def3a71 --- /dev/null +++ b/components/joltwallet__littlefs/example/CMakeLists.txt @@ -0,0 +1,9 @@ +# The following lines of boilerplate have to be in your project's +# CMakeLists in this exact order for cmake to work correctly +cmake_minimum_required(VERSION 3.5) + +# Add the root of this git repo to the component search path. +set(EXTRA_COMPONENT_DIRS "../") + +include($ENV{IDF_PATH}/tools/cmake/project.cmake) +project(demo_esp_littlefs) diff --git a/components/joltwallet__littlefs/example/Makefile b/components/joltwallet__littlefs/example/Makefile new file mode 100644 index 0000000..856c047 --- /dev/null +++ b/components/joltwallet__littlefs/example/Makefile @@ -0,0 +1,11 @@ +# +# This is a project Makefile. It is assumed the directory this Makefile resides in is a +# project subdirectory. +# + +PROJECT_NAME := demo_esp_littlefs + +EXTRA_COMPONENT_DIRS := $(realpath ..) + +include $(IDF_PATH)/make/project.mk + diff --git a/components/joltwallet__littlefs/example/README.md b/components/joltwallet__littlefs/example/README.md new file mode 100644 index 0000000..3a3cfdd --- /dev/null +++ b/components/joltwallet__littlefs/example/README.md @@ -0,0 +1,3 @@ +This example is based on [wreyford's](https://github.com/wreyford/demo_esp_littlefs) demo project. + +Modifications were made so that this example project could be built as a part of CI. diff --git a/components/joltwallet__littlefs/example/flash_data/example.txt b/components/joltwallet__littlefs/example/flash_data/example.txt new file mode 100644 index 0000000..aa38d70 --- /dev/null +++ b/components/joltwallet__littlefs/example/flash_data/example.txt @@ -0,0 +1 @@ +Example text to compile into a LittleFS disk image to be flashed to the ESP device. diff --git a/components/joltwallet__littlefs/example/main/CMakeLists.txt b/components/joltwallet__littlefs/example/main/CMakeLists.txt new file mode 100644 index 0000000..ab04fd2 --- /dev/null +++ b/components/joltwallet__littlefs/example/main/CMakeLists.txt @@ -0,0 +1,7 @@ +idf_component_register(SRCS "demo_esp_littlefs.c" + INCLUDE_DIRS "." + ) + +# Note: you must have a partition named the first argument (here it's "littlefs") +# in your partition table csv file. +littlefs_create_partition_image(littlefs ../flash_data FLASH_IN_PROJECT) diff --git a/components/joltwallet__littlefs/example/main/component.mk b/components/joltwallet__littlefs/example/main/component.mk new file mode 100644 index 0000000..0b9d758 --- /dev/null +++ b/components/joltwallet__littlefs/example/main/component.mk @@ -0,0 +1,5 @@ +# +# "main" pseudo-component makefile. +# +# (Uses default behaviour of compiling all source files in directory, adding 'include' to include path.) + diff --git a/components/joltwallet__littlefs/example/main/demo_esp_littlefs.c b/components/joltwallet__littlefs/example/main/demo_esp_littlefs.c new file mode 100644 index 0000000..d85e76c --- /dev/null +++ b/components/joltwallet__littlefs/example/main/demo_esp_littlefs.c @@ -0,0 +1,166 @@ +/* Demo ESP LittleFS Example + + This example code is in the Public Domain (or CC0 licensed, at your option.) + + Unless required by applicable law or agreed to in writing, this + software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, either express or implied. +*/ +#include "esp_err.h" +#include "esp_log.h" +#include "esp_system.h" +#include "freertos/FreeRTOS.h" +#include "freertos/task.h" +#include "sdkconfig.h" +#include +#include +#include +#include +#include "esp_idf_version.h" +#include "esp_flash.h" +#include "esp_chip_info.h" +#include "spi_flash_mmap.h" + + +#include "esp_littlefs.h" + +static const char *TAG = "demo_esp_littlefs"; + +void app_main(void) +{ + printf("Demo LittleFs implementation by esp_littlefs!\n"); + printf(" https://github.com/joltwallet/esp_littlefs\n"); + + /* Print chip information */ + esp_chip_info_t chip_info; + esp_chip_info(&chip_info); + printf("This is %s chip with %d CPU cores, WiFi%s%s, ", + CONFIG_IDF_TARGET, + chip_info.cores, + (chip_info.features & CHIP_FEATURE_BT) ? "/BT" : "", + (chip_info.features & CHIP_FEATURE_BLE) ? "/BLE" : ""); + + printf("silicon revision %d, ", chip_info.revision); + + uint32_t size_flash_chip = 0; + esp_flash_get_size(NULL, &size_flash_chip); + printf("%uMB %s flash\n", (unsigned int)size_flash_chip >> 20, + (chip_info.features & CHIP_FEATURE_EMB_FLASH) ? "embedded" : "external"); + + printf("Free heap: %u\n", (unsigned int) esp_get_free_heap_size()); + + printf("Now we are starting the LittleFs Demo ...\n"); + + ESP_LOGI(TAG, "Initializing LittleFS"); + + esp_vfs_littlefs_conf_t conf = { + .base_path = "/littlefs", + .partition_label = "littlefs", + .format_if_mount_failed = true, + .dont_mount = false, + }; + + // Use settings defined above to initialize and mount LittleFS filesystem. + // Note: esp_vfs_littlefs_register is an all-in-one convenience function. + esp_err_t ret = esp_vfs_littlefs_register(&conf); + + if (ret != ESP_OK) + { + if (ret == ESP_FAIL) + { + ESP_LOGE(TAG, "Failed to mount or format filesystem"); + } + else if (ret == ESP_ERR_NOT_FOUND) + { + ESP_LOGE(TAG, "Failed to find LittleFS partition"); + } + else + { + ESP_LOGE(TAG, "Failed to initialize LittleFS (%s)", esp_err_to_name(ret)); + } + return; + } + + size_t total = 0, used = 0; + ret = esp_littlefs_info(conf.partition_label, &total, &used); + if (ret != ESP_OK) + { + ESP_LOGE(TAG, "Failed to get LittleFS partition information (%s)", esp_err_to_name(ret)); + } + else + { + ESP_LOGI(TAG, "Partition size: total: %d, used: %d", total, used); + } + + // Use POSIX and C standard library functions to work with files. + // First create a file. + ESP_LOGI(TAG, "Opening file"); + FILE *f = fopen("/littlefs/hello.txt", "w"); + if (f == NULL) + { + ESP_LOGE(TAG, "Failed to open file for writing"); + return; + } + fprintf(f, "LittleFS Rocks!\n"); + fclose(f); + ESP_LOGI(TAG, "File written"); + + // Check if destination file exists before renaming + struct stat st; + if (stat("/littlefs/foo.txt", &st) == 0) + { + // Delete it if it exists + unlink("/littlefs/foo.txt"); + } + + // Rename original file + ESP_LOGI(TAG, "Renaming file"); + if (rename("/littlefs/hello.txt", "/littlefs/foo.txt") != 0) + { + ESP_LOGE(TAG, "Rename failed"); + return; + } + + // Open renamed file for reading + ESP_LOGI(TAG, "Reading file"); + f = fopen("/littlefs/foo.txt", "r"); + if (f == NULL) + { + ESP_LOGE(TAG, "Failed to open file for reading"); + return; + } + + char line[128]; + char *pos; + + fgets(line, sizeof(line), f); + fclose(f); + // strip newline + pos = strchr(line, '\n'); + if (pos) + { + *pos = '\0'; + } + ESP_LOGI(TAG, "Read from file: '%s'", line); + + ESP_LOGI(TAG, "Reading from flashed filesystem example.txt"); + f = fopen("/littlefs/example.txt", "r"); + if (f == NULL) + { + ESP_LOGE(TAG, "Failed to open file for reading"); + return; + } + fgets(line, sizeof(line), f); + fclose(f); + // strip newline + pos = strchr(line, '\n'); + if (pos) + { + *pos = '\0'; + } + ESP_LOGI(TAG, "Read from file: '%s'", line); + + // All done, unmount partition and disable LittleFS + esp_vfs_littlefs_unregister(conf.partition_label); + ESP_LOGI(TAG, "LittleFS unmounted"); +} diff --git a/components/joltwallet__littlefs/example/partitions_demo_esp_littlefs.csv b/components/joltwallet__littlefs/example/partitions_demo_esp_littlefs.csv new file mode 100644 index 0000000..17f7ba1 --- /dev/null +++ b/components/joltwallet__littlefs/example/partitions_demo_esp_littlefs.csv @@ -0,0 +1,6 @@ +# Name, Type, SubType, Offset, Size, Flags +# Note: if you have increased the bootloader size, make sure to update the offsets to avoid overlap +nvs, data, nvs, 0x9000, 0x6000, +phy_init, data, phy, 0xf000, 0x1000, +factory, app, factory, 0x10000, 1M, +littlefs, data, littlefs, , 0xF0000, diff --git a/components/joltwallet__littlefs/example/sdkconfig.defaults b/components/joltwallet__littlefs/example/sdkconfig.defaults new file mode 100644 index 0000000..ab47bde --- /dev/null +++ b/components/joltwallet__littlefs/example/sdkconfig.defaults @@ -0,0 +1,12 @@ +CONFIG_PARTITION_TABLE_CUSTOM=y +CONFIG_PARTITION_TABLE_CUSTOM_FILENAME="partitions_demo_esp_littlefs.csv" + +# +# Serial flasher config +# +CONFIG_ESPTOOLPY_BAUD_921600B=y +CONFIG_ESPTOOLPY_COMPRESSED=y +CONFIG_ESPTOOLPY_MONITOR_BAUD_CONSOLE=y + +# BOOTLOADER +CONFIG_BOOTLOADER_LOG_LEVEL_WARN=y diff --git a/components/joltwallet__littlefs/idf_component.yml b/components/joltwallet__littlefs/idf_component.yml new file mode 100644 index 0000000..4dd900b --- /dev/null +++ b/components/joltwallet__littlefs/idf_component.yml @@ -0,0 +1,9 @@ +dependencies: + idf: '>=5.0' +description: LittleFS is a small fail-safe filesystem for micro-controllers. +repository: git://github.com/joltwallet/esp_littlefs.git +repository_info: + commit_sha: 8274371dc5912196f66ac3e71dbb6291760cb8b0 + path: . +url: https://github.com/joltwallet/esp_littlefs +version: 1.20.3 diff --git a/components/joltwallet__littlefs/image-building-requirements.txt b/components/joltwallet__littlefs/image-building-requirements.txt new file mode 100644 index 0000000..b86049e --- /dev/null +++ b/components/joltwallet__littlefs/image-building-requirements.txt @@ -0,0 +1 @@ +littlefs-python==0.15.0 diff --git a/components/joltwallet__littlefs/include/esp_littlefs.h b/components/joltwallet__littlefs/include/esp_littlefs.h new file mode 100644 index 0000000..79a2a7d --- /dev/null +++ b/components/joltwallet__littlefs/include/esp_littlefs.h @@ -0,0 +1,212 @@ +#ifndef ESP_LITTLEFS_H__ +#define ESP_LITTLEFS_H__ + +#include "sdkconfig.h" +#include "esp_err.h" +#include "esp_idf_version.h" +#include +#include "esp_partition.h" + +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#define ESP_LITTLEFS_VERSION_NUMBER "1.20.3" +#define ESP_LITTLEFS_VERSION_MAJOR 1 +#define ESP_LITTLEFS_VERSION_MINOR 20 +#define ESP_LITTLEFS_VERSION_PATCH 3 + +#ifdef ESP8266 +// ESP8266 RTOS SDK default enables VFS DIR support +#define CONFIG_VFS_SUPPORT_DIR 1 +#endif + +#if CONFIG_VFS_SUPPORT_DIR +#define ESP_LITTLEFS_ENABLE_FTRUNCATE +#endif // CONFIG_VFS_SUPPORT_DIR + +/** + *Configuration structure for esp_vfs_littlefs_register. + */ +typedef struct { + const char *base_path; /**< Mounting point. */ + const char *partition_label; /**< Label of partition to use. If partition_label, partition, and sdcard are all NULL, + then the first partition with data subtype 'littlefs' will be used. */ + const esp_partition_t* partition; /**< partition to use if partition_label is NULL */ + +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT + sdmmc_card_t *sdcard; /**< SD card handle to use if both esp_partition handle & partition label is NULL */ +#endif + + uint8_t format_if_mount_failed:1; /**< Format the file system if it fails to mount. */ + uint8_t read_only : 1; /**< Mount the partition as read-only. */ + uint8_t dont_mount:1; /**< Don't attempt to mount.*/ + uint8_t grow_on_mount:1; /**< Grow filesystem to match partition size on mount.*/ +} esp_vfs_littlefs_conf_t; + +/** + * Register and mount (if configured to) littlefs to VFS with given path prefix. + * + * @param conf Pointer to esp_vfs_littlefs_conf_t configuration structure + * + * @return + * - ESP_OK if success + * - ESP_ERR_NO_MEM if objects could not be allocated + * - ESP_ERR_INVALID_STATE if already mounted or partition is encrypted + * - ESP_ERR_NOT_FOUND if partition for littlefs was not found + * - ESP_FAIL if mount or format fails + */ +esp_err_t esp_vfs_littlefs_register(const esp_vfs_littlefs_conf_t * conf); + +/** + * Unregister and unmount littlefs from VFS + * + * @param partition_label Label of the partition to unregister. + * + * @return + * - ESP_OK if successful + * - ESP_ERR_INVALID_STATE already unregistered + */ +esp_err_t esp_vfs_littlefs_unregister(const char* partition_label); + +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT +/** + * Unregister and unmount LittleFS from VFS for SD card + * + * @param sdcard SD card to unregister. + * + * @return + * - ESP_OK if successful + * - ESP_ERR_INVALID_STATE already unregistered + */ +esp_err_t esp_vfs_littlefs_unregister_sdmmc(sdmmc_card_t *sdcard); +#endif + +/** + * Unregister and unmount littlefs from VFS + * + * @param partition partition to unregister. + * + * @return + * - ESP_OK if successful + * - ESP_ERR_INVALID_STATE already unregistered + */ +esp_err_t esp_vfs_littlefs_unregister_partition(const esp_partition_t* partition); + +/** + * Check if littlefs is mounted + * + * @param partition_label Label of the partition to check. + * + * @return + * - true if mounted + * - false if not mounted + */ +bool esp_littlefs_mounted(const char* partition_label); + +/** + * Check if littlefs is mounted + * + * @param partition partition to check. + * + * @return + * - true if mounted + * - false if not mounted + */ +bool esp_littlefs_partition_mounted(const esp_partition_t* partition); + +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT +/** + * Check if littlefs is mounted + * + * @param sdcard SD card to check. + * + * @return + * - true if mounted + * - false if not mounted + */ +bool esp_littlefs_sdmmc_mounted(sdmmc_card_t *sdcard); +#endif + +/** + * Format the littlefs partition + * + * @param partition_label Label of the partition to format. + * @return + * - ESP_OK if successful + * - ESP_FAIL on error + */ +esp_err_t esp_littlefs_format(const char* partition_label); + +/** + * Format the littlefs partition + * + * @param partition partition to format. + * @return + * - ESP_OK if successful + * - ESP_FAIL on error + */ +esp_err_t esp_littlefs_format_partition(const esp_partition_t* partition); + +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT +/** + * Format the LittleFS on a SD card + * + * @param sdcard SD card to format + * @return + * - ESP_OK if successful + * - ESP_FAIL on error + */ +esp_err_t esp_littlefs_format_sdmmc(sdmmc_card_t *sdcard); +#endif + +/** + * Get information for littlefs + * + * @param partition_label Optional, label of the partition to get info for. + * @param[out] total_bytes Size of the file system + * @param[out] used_bytes Current used bytes in the file system + * + * @return + * - ESP_OK if success + * - ESP_ERR_INVALID_STATE if not mounted + */ +esp_err_t esp_littlefs_info(const char* partition_label, size_t* total_bytes, size_t* used_bytes); + +/** + * Get information for littlefs + * + * @param parition the partition to get info for. + * @param[out] total_bytes Size of the file system + * @param[out] used_bytes Current used bytes in the file system + * + * @return + * - ESP_OK if success + * - ESP_ERR_INVALID_STATE if not mounted + */ +esp_err_t esp_littlefs_partition_info(const esp_partition_t* partition, size_t *total_bytes, size_t *used_bytes); + +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT +/** + * Get information for littlefs on SD card + * + * @param[in] sdcard the SD card to get info for. + * @param[out] total_bytes Size of the file system + * @param[out] used_bytes Current used bytes in the file system + * + * @return + * - ESP_OK if success + * - ESP_ERR_INVALID_STATE if not mounted + */ +esp_err_t esp_littlefs_sdmmc_info(sdmmc_card_t *sdcard, size_t *total_bytes, size_t *used_bytes); +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif diff --git a/components/joltwallet__littlefs/library.json b/components/joltwallet__littlefs/library.json new file mode 100644 index 0000000..e6419e8 --- /dev/null +++ b/components/joltwallet__littlefs/library.json @@ -0,0 +1,14 @@ +{ + "name": "esp_littlefs", + "version": "1.20.3", + "description": "LittleFS is a small fail-safe filesystem for micro-controllers.", + "frameworks": "espidf", + "platforms": "*", + "build": { + "srcFilter": "+<*> - - -", + "flags": [ + "-I ./src/littlefs/", + "-DLFS_CONFIG=lfs_config.h" + ] + } +} diff --git a/components/joltwallet__littlefs/partition_table_unit_test_app.csv b/components/joltwallet__littlefs/partition_table_unit_test_app.csv new file mode 100644 index 0000000..3c75a5d --- /dev/null +++ b/components/joltwallet__littlefs/partition_table_unit_test_app.csv @@ -0,0 +1,17 @@ +# Special partition table for unit test app +# +# Name, Type, SubType, Offset, Size, Flags +# Note: if you change the phy_init or app partition offset, make sure to change the offset in Kconfig.projbuild +nvs, data, nvs, 0x9000, 0x4000 +otadata, data, ota, 0xd000, 0x2000 +phy_init, data, phy, 0xf000, 0x1000 +factory, 0, 0, 0x10000, 2M +# these OTA partitions are used for tests, but can't fit real OTA apps in them +# (done this way so tests can run in 2MB of flash.) +ota_0, 0, ota_0, , 64K +ota_1, 0, ota_1, , 64K +# flash_test partition used for SPI flash tests, WL FAT tests, and SPIFFS tests +fat_store, data, fat, , 528K +spiffs_store, data, spiffs, , 512K +flash_test, data, spiffs, , 512K +named_part, data, littlefs, , 64K diff --git a/components/joltwallet__littlefs/project_include.cmake b/components/joltwallet__littlefs/project_include.cmake new file mode 100644 index 0000000..b03d042 --- /dev/null +++ b/components/joltwallet__littlefs/project_include.cmake @@ -0,0 +1,83 @@ + +# littlefs_create_partition_image +# +# Create a littlefs image of the specified directory on the host during build and optionally +# have the created image flashed using `idf.py flash` + +set(littlefs_py_venv "${CMAKE_CURRENT_BINARY_DIR}/littlefs_py_venv") +set(littlefs_py_requirements "${CMAKE_CURRENT_LIST_DIR}/image-building-requirements.txt") + +set_directory_properties(PROPERTIES + ADDITIONAL_CLEAN_FILES "${littlefs_py_venv}" +) + +function(littlefs_create_partition_image partition base_dir) + set(options FLASH_IN_PROJECT) + set(multi DEPENDS) + cmake_parse_arguments(arg "${options}" "" "${multi}" "${ARGN}") + + idf_build_get_property(idf_path IDF_PATH) + + get_filename_component(base_dir_full_path ${base_dir} ABSOLUTE) + + partition_table_get_partition_info(size "--partition-name ${partition}" "size") + partition_table_get_partition_info(offset "--partition-name ${partition}" "offset") + + if("${size}" AND "${offset}") + set(image_file ${CMAKE_BINARY_DIR}/${partition}.bin) + + if(CMAKE_HOST_WIN32) + set(littlefs_py "${littlefs_py_venv}/Scripts/littlefs-python.exe") + add_custom_command( + OUTPUT ${littlefs_py_venv} + COMMAND ${PYTHON} -m venv ${littlefs_py_venv} && ${littlefs_py_venv}/Scripts/pip.exe install -r ${littlefs_py_requirements} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS ${littlefs_py_requirements} + ) + else() + set(littlefs_py "${littlefs_py_venv}/bin/littlefs-python") + add_custom_command( + OUTPUT ${littlefs_py_venv} + COMMAND ${PYTHON} -m venv ${littlefs_py_venv} && ${littlefs_py_venv}/bin/pip install -r ${littlefs_py_requirements} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS ${littlefs_py_requirements} + ) + endif() + + # Execute LittleFS image generation; this always executes as there is no way to specify for CMake to watch for + # contents of the base dir changing. + + add_custom_target(littlefs_${partition}_bin ALL + COMMAND ${littlefs_py} create ${base_dir_full_path} ${image_file} -v --fs-size=${size} --name-max=${CONFIG_LITTLEFS_OBJ_NAME_LEN} --block-size=4096 + DEPENDS ${arg_DEPENDS} ${littlefs_py_venv} + ) + + set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" APPEND PROPERTY + ADDITIONAL_MAKE_CLEAN_FILES + ${image_file}) + + set(IDF_VER_NO_V "${IDF_VERSION_MAJOR}.${IDF_VERSION_MINOR}") + + if(${IDF_VER_NO_V} VERSION_LESS 5.0) + message(WARNING "Unsupported/unmaintained/deprecated ESP-IDF version ${IDF_VER}") + endif() + + idf_component_get_property(main_args esptool_py FLASH_ARGS) + idf_component_get_property(sub_args esptool_py FLASH_SUB_ARGS) + esptool_py_flash_target(${partition}-flash "${main_args}" "${sub_args}") + esptool_py_flash_target_image(${partition}-flash "${partition}" "${offset}" "${image_file}") + + add_dependencies(${partition}-flash littlefs_${partition}_bin) + + if(arg_FLASH_IN_PROJECT) + esptool_py_flash_target_image(flash "${partition}" "${offset}" "${image_file}") + add_dependencies(flash littlefs_${partition}_bin) + endif() + + else() + set(message "Failed to create littlefs image for partition '${partition}'. " + "Check project configuration if using the correct partition table file." + ) + fail_at_build_time(littlefs_${partition}_bin "${message}") + endif() +endfunction() diff --git a/components/joltwallet__littlefs/sdkconfig.defaults b/components/joltwallet__littlefs/sdkconfig.defaults new file mode 100644 index 0000000..2c2f92a --- /dev/null +++ b/components/joltwallet__littlefs/sdkconfig.defaults @@ -0,0 +1,130 @@ +# +# Partition Table +# +CONFIG_PARTITION_TABLE_CUSTOM=y +CONFIG_PARTITION_TABLE_CUSTOM_FILENAME="partition_table_unit_test_app.csv" +CONFIG_PARTITION_TABLE_FILENAME="partition_table_unit_test_app.csv" +CONFIG_PARTITION_TABLE_OFFSET=0x8000 +CONFIG_PARTITION_TABLE_MD5=y + +# +# Heap +# +CONFIG_HEAP_POISONING_COMPREHENSIVE=y + +# +# Watchdog +# +CONFIG_ESP_TASK_WDT=y +CONFIG_ESP_TASK_WDT_CHECK_IDLE_TASK_CPU0=n +CONFIG_ESP_TASK_WDT_CHECK_IDLE_TASK_CPU1=n + +# +# ESP32-specific +# +CONFIG_IDF_TARGET_ESP32=y +CONFIG_ESP32_DEFAULT_CPU_FREQ_240=y +CONFIG_ESP32_DEFAULT_CPU_FREQ_MHZ=240 + +CONFIG_ESP32_XTAL_FREQ_AUTO=y + +# +# Serial flasher config +# +CONFIG_ESPTOOLPY_BAUD_921600B=y +CONFIG_ESPTOOLPY_COMPRESSED=y +CONFIG_ESPTOOLPY_FLASHMODE_QIO=y +CONFIG_ESPTOOLPY_FLASHFREQ_80M=y +CONFIG_ESPTOOLPY_FLASHFREQ="80m" +CONFIG_ESPTOOLPY_FLASHSIZE_4MB=y +CONFIG_ESPTOOLPY_FLASHSIZE="4MB" +CONFIG_ESPTOOLPY_BEFORE_RESET=y +CONFIG_ESPTOOLPY_BEFORE="default_reset" +CONFIG_ESPTOOLPY_AFTER_RESET=y +CONFIG_ESPTOOLPY_AFTER_NORESET=n +CONFIG_ESPTOOLPY_AFTER="hard_reset" +CONFIG_ESPTOOLPY_MONITOR_BAUD_CONSOLE=y +CONFIG_ESPTOOLPY_FLASHSIZE="4MB" +CONFIG_ESPTOOLPY_FLASHSIZE_DETECT=n + +CONFIG_ESP_CONSOLE_UART_NUM=0 + +# +# SPI Flash driver +# +CONFIG_SPI_FLASH_VERIFY_WRITE=n +CONFIG_SPI_FLASH_ENABLE_COUNTERS=n +CONFIG_SPI_FLASH_ROM_DRIVER_PATCH=y +CONFIG_SPI_FLASH_DANGEROUS_WRITE_ABORTS=y +CONFIG_SPI_FLASH_DANGEROUS_WRITE_FAILS=n +CONFIG_SPI_FLASH_DANGEROUS_WRITE_ALLOWED=n + +# +# SPIFFS Configuration +# +CONFIG_SPIFFS_MAX_PARTITIONS=3 + +# +# SPIFFS Cache Configuration +# +CONFIG_SPIFFS_CACHE=y +CONFIG_SPIFFS_CACHE_WR=y +CONFIG_SPIFFS_CACHE_STATS=n +CONFIG_SPIFFS_PAGE_CHECK=y +CONFIG_SPIFFS_GC_MAX_RUNS=10 +CONFIG_SPIFFS_GC_STATS=n +CONFIG_SPIFFS_PAGE_SIZE=256 +CONFIG_SPIFFS_OBJ_NAME_LEN=32 +CONFIG_SPIFFS_USE_MAGIC=y +CONFIG_SPIFFS_USE_MAGIC_LENGTH=y +CONFIG_SPIFFS_META_LENGTH=4 +CONFIG_SPIFFS_USE_MTIME=n + +# +# FAT Filesystem support +# +CONFIG_FATFS_CODEPAGE_DYNAMIC=n +CONFIG_FATFS_CODEPAGE_437=y +CONFIG_FATFS_CODEPAGE_720=n +CONFIG_FATFS_CODEPAGE_737=n +CONFIG_FATFS_CODEPAGE_771=n +CONFIG_FATFS_CODEPAGE_775=n +CONFIG_FATFS_CODEPAGE_850=n +CONFIG_FATFS_CODEPAGE_852=n +CONFIG_FATFS_CODEPAGE_855=n +CONFIG_FATFS_CODEPAGE_857=n +CONFIG_FATFS_CODEPAGE_860=n +CONFIG_FATFS_CODEPAGE_861=n +CONFIG_FATFS_CODEPAGE_862=n +CONFIG_FATFS_CODEPAGE_863=n +CONFIG_FATFS_CODEPAGE_864=n +CONFIG_FATFS_CODEPAGE_865=n +CONFIG_FATFS_CODEPAGE_866=n +CONFIG_FATFS_CODEPAGE_869=n +CONFIG_FATFS_CODEPAGE_932=n +CONFIG_FATFS_CODEPAGE_936=n +CONFIG_FATFS_CODEPAGE_949=n +CONFIG_FATFS_CODEPAGE_950=n +CONFIG_FATFS_CODEPAGE=437 +CONFIG_FATFS_LFN_NONE=y +CONFIG_FATFS_LFN_HEAP=n +CONFIG_FATFS_LFN_STACK=n +CONFIG_FATFS_FS_LOCK=0 +CONFIG_FATFS_TIMEOUT_MS=10000 +CONFIG_FATFS_PER_FILE_CACHE=y + +CONFIG_UNITY_FREERTOS_PRIORITY=5 +CONFIG_UNITY_FREERTOS_CPU=0 +CONFIG_UNITY_FREERTOS_STACK_SIZE=12000 +CONFIG_UNITY_WARN_LEAK_LEVEL_GENERAL=255 +CONFIG_UNITY_CRITICAL_LEAK_LEVEL_GENERAL=1024 +CONFIG_UNITY_CRITICAL_LEAK_LEVEL_LWIP=4095 +CONFIG_UNITY_ENABLE_FLOAT=y +CONFIG_UNITY_ENABLE_DOUBLE=y +CONFIG_UNITY_ENABLE_COLOR=y +CONFIG_UNITY_ENABLE_IDF_TEST_RUNNER=y +CONFIG_UNITY_ENABLE_FIXTURE=y +CONFIG_UNITY_ENABLE_BACKTRACE_ON_FAIL=y + +# BOOTLOADER +CONFIG_BOOTLOADER_LOG_LEVEL_WARN=y diff --git a/components/joltwallet__littlefs/src/esp_littlefs.c b/components/joltwallet__littlefs/src/esp_littlefs.c new file mode 100644 index 0000000..b0a3fcc --- /dev/null +++ b/components/joltwallet__littlefs/src/esp_littlefs.c @@ -0,0 +1,2573 @@ +/** + * @file esp_littlefs.c + * @brief Maps LittleFS <-> ESP_VFS + * @author Brian Pugh + */ + +#ifndef LOG_LOCAL_LEVEL +#define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL +#endif // LOG_LOCAL_LEVEL + +#include "esp_littlefs.h" +#include "littlefs/lfs.h" +#include "sdkconfig.h" +#include "esp_log.h" +#include "esp_system.h" +#include "freertos/FreeRTOS.h" +#include "freertos/semphr.h" +#include "freertos/task.h" +#include "littlefs_api.h" +#include +#include +#include +#include +#include +#include +#include +#include "esp_random.h" + +#if ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(5, 0, 0) +#error "esp_littlefs requires esp-idf >=5.0" +#endif + + +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT +#include +#endif + +#include "spi_flash_mmap.h" + +#if CONFIG_IDF_TARGET_ESP32 +#include "esp32/rom/spi_flash.h" +#elif CONFIG_IDF_TARGET_ESP32S2 +#include "esp32s2/rom/spi_flash.h" +#elif CONFIG_IDF_TARGET_ESP32S3 +#include "esp32s3/rom/spi_flash.h" +#elif CONFIG_IDF_TARGET_ESP32C3 +#include "esp32c3/rom/spi_flash.h" +#elif CONFIG_IDF_TARGET_ESP32H2 +#include "esp32h2/rom/spi_flash.h" +#elif CONFIG_IDF_TARGET_ESP8684 +#include "esp8684/rom/spi_flash.h" +#elif __has_include("esp32/rom/spi_flash.h") +#include "esp32/rom/spi_flash.h" //IDF 4 +#else +#include "rom/spi_flash.h" //IDF 3 +#endif + +#define CONFIG_LITTLEFS_BLOCK_SIZE 4096 /* ESP32 can only operate at 4kb */ + +/* File Descriptor Caching Params */ +#define CONFIG_LITTLEFS_FD_CACHE_REALLOC_FACTOR 2 /* Amount to resize FD cache by */ +#define CONFIG_LITTLEFS_FD_CACHE_MIN_SIZE 4 /* Minimum size of FD cache */ +#define CONFIG_LITTLEFS_FD_CACHE_HYST 4 /* When shrinking, leave this many trailing FD slots available */ + +/** + * @brief Last Modified Time + * + * Use 't' for ESP_LITTLEFS_ATTR_MTIME to match example: + * https://github.com/ARMmbed/littlefs/issues/23#issuecomment-482293539 + * And to match other external tools such as: + * https://github.com/earlephilhower/mklittlefs + */ +#define ESP_LITTLEFS_ATTR_MTIME ((uint8_t) 't') + +// ESP_PARTITION_SUBTYPE_DATA_LITTLEFS was introduced in later patch versions of esp-idf. +// * v5.0.7 +// * v5.1.4 +// * v5.2.0 +#if ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(5, 2, 0) +#ifndef ESP_PARTITION_SUBTYPE_DATA_LITTLEFS +#define ESP_PARTITION_SUBTYPE_DATA_LITTLEFS 0x83 +#endif +#endif + +/** + * @brief littlefs DIR structure + */ +typedef struct { + DIR dir; /*!< VFS DIR struct */ + lfs_dir_t d; /*!< littlefs DIR struct */ + struct dirent e; /*!< Last open dirent */ + long offset; /*!< Offset of the current dirent */ + char *path; /*!< Requested directory name */ +} vfs_littlefs_dir_t; + +static int vfs_littlefs_open(void* ctx, const char * path, int flags, int mode); +static ssize_t vfs_littlefs_write(void* ctx, int fd, const void * data, size_t size); +static ssize_t vfs_littlefs_read(void* ctx, int fd, void * dst, size_t size); +static ssize_t vfs_littlefs_pwrite(void *ctx, int fd, const void *src, size_t size, off_t offset); +static ssize_t vfs_littlefs_pread(void *ctx, int fd, void *dst, size_t size, off_t offset); +static int vfs_littlefs_close(void* ctx, int fd); +static off_t vfs_littlefs_lseek(void* ctx, int fd, off_t offset, int mode); +static int vfs_littlefs_fsync(void* ctx, int fd); +#if ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(5, 4, 0) +static esp_vfs_t vfs_littlefs_create_struct(bool writeable); +#endif // ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(5, 4, 0) + +#ifdef CONFIG_VFS_SUPPORT_DIR +static int vfs_littlefs_stat(void* ctx, const char * path, struct stat * st); +static int vfs_littlefs_unlink(void* ctx, const char *path); +static int vfs_littlefs_rename(void* ctx, const char *src, const char *dst); +static DIR* vfs_littlefs_opendir(void* ctx, const char* name); +static int vfs_littlefs_closedir(void* ctx, DIR* pdir); +static struct dirent* vfs_littlefs_readdir(void* ctx, DIR* pdir); +static int vfs_littlefs_readdir_r(void* ctx, DIR* pdir, + struct dirent* entry, struct dirent** out_dirent); +static long vfs_littlefs_telldir(void* ctx, DIR* pdir); +static void vfs_littlefs_seekdir(void* ctx, DIR* pdir, long offset); +static int vfs_littlefs_mkdir(void* ctx, const char* name, mode_t mode); +static int vfs_littlefs_rmdir(void* ctx, const char* name); +static ssize_t vfs_littlefs_truncate( void *ctx, const char *path, off_t size); + +#ifdef ESP_LITTLEFS_ENABLE_FTRUNCATE +static int vfs_littlefs_ftruncate(void *ctx, int fd, off_t size); +#endif // ESP_LITTLEFS_ENABLE_FTRUNCATE + +static void esp_littlefs_dir_free(vfs_littlefs_dir_t *dir); +#endif + +static void esp_littlefs_take_efs_lock(void); +static esp_err_t esp_littlefs_init_efs(esp_littlefs_t** efs, const esp_partition_t* partition, bool read_only); +static esp_err_t esp_littlefs_init(const esp_vfs_littlefs_conf_t* conf, int *index); + +static esp_err_t esp_littlefs_by_label(const char* label, int * index); +static esp_err_t esp_littlefs_by_partition(const esp_partition_t* part, int*index); +static int esp_littlefs_file_sync(esp_littlefs_t *efs, vfs_littlefs_file_t *file); + +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT +static esp_err_t esp_littlefs_by_sdmmc_handle(sdmmc_card_t *handle, int *index); +#endif + +static esp_err_t esp_littlefs_get_empty(int *index); +static void esp_littlefs_free(esp_littlefs_t ** efs); +static int esp_littlefs_flags_conv(int m); + +#if CONFIG_LITTLEFS_USE_MTIME +static int vfs_littlefs_utime(void *ctx, const char *path, const struct utimbuf *times); +static int esp_littlefs_update_mtime_attr(esp_littlefs_t *efs, const char *path, time_t t); +static time_t esp_littlefs_get_mtime_attr(esp_littlefs_t *efs, const char *path); +static time_t esp_littlefs_get_updated_time(esp_littlefs_t *efs, vfs_littlefs_file_t *file, const char *path); +#endif + +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH +/* The only way in LittleFS to get info is via a path (lfs_stat), so it cannot + * be done if the path isn't stored. */ +static int vfs_littlefs_fstat(void* ctx, int fd, struct stat * st); +#endif + +#if CONFIG_LITTLEFS_SPIFFS_COMPAT +static void mkdirs(esp_littlefs_t * efs, const char *dir); +static void rmdirs(esp_littlefs_t * efs, const char *dir); +#endif // CONFIG_LITTLEFS_SPIFFS_COMPAT + +static int vfs_littlefs_fcntl(void* ctx, int fd, int cmd, int arg); + +static int sem_take(esp_littlefs_t *efs); +static int sem_give(esp_littlefs_t *efs); +static esp_err_t format_from_efs(esp_littlefs_t *efs); +static void get_total_and_used_bytes(esp_littlefs_t *efs, size_t *total_bytes, size_t *used_bytes); + +static SemaphoreHandle_t _efs_lock = NULL; +static esp_littlefs_t * _efs[CONFIG_LITTLEFS_MAX_PARTITIONS] = { 0 }; + +/******************** + * Helper Functions * + ********************/ + + +#if CONFIG_LITTLEFS_HUMAN_READABLE +/** + * @brief converts an enumerated lfs error into a string. + * @param lfs_errno The enumerated littlefs error. + */ +static const char * esp_littlefs_errno(enum lfs_error lfs_errno); +#endif + +static inline void * esp_littlefs_calloc(size_t __nmemb, size_t __size) { + /* Used internally by this wrapper only */ +#if defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_INTERNAL) + return heap_caps_calloc(__nmemb, __size, MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL); +#elif defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_SPIRAM) + return heap_caps_calloc(__nmemb, __size, MALLOC_CAP_8BIT | MALLOC_CAP_SPIRAM); +#else /* CONFIG_LITTLEFS_MALLOC_STRATEGY_DISABLE, CONFIG_LITTLEFS_MALLOC_STRATEGY_DEFAULT or not defined */ + return calloc(__nmemb, __size); +#endif +} + +static void esp_littlefs_free_fds(esp_littlefs_t * efs) { + /* Need to free all files that were opened */ + while (efs->file) { + vfs_littlefs_file_t * next = efs->file->next; + free(efs->file); + efs->file = next; + } + free(efs->cache); + efs->cache = 0; + efs->cache_size = efs->fd_count = 0; +} + +static int lfs_errno_remap(enum lfs_error err) { + switch(err){ + case LFS_ERR_OK: return 0; + case LFS_ERR_IO: return EIO; + case LFS_ERR_CORRUPT: return EBADMSG; // This is a bit opinionated. + case LFS_ERR_NOENT: return ENOENT; + case LFS_ERR_EXIST: return EEXIST; + case LFS_ERR_NOTDIR: return ENOTDIR; + case LFS_ERR_ISDIR: return EISDIR; + case LFS_ERR_NOTEMPTY: return ENOTEMPTY; + case LFS_ERR_BADF: return EBADF; + case LFS_ERR_FBIG: return EFBIG; + case LFS_ERR_INVAL: return EINVAL; + case LFS_ERR_NOSPC: return ENOSPC; + case LFS_ERR_NOMEM: return ENOMEM; + case LFS_ERR_NOATTR: return ENODATA; + case LFS_ERR_NAMETOOLONG: return ENAMETOOLONG; + } + return EINVAL; // Need some default vlaue +} + +esp_err_t format_from_efs(esp_littlefs_t *efs) +{ + assert( efs ); + bool was_mounted = false; + + /* Unmount if mounted */ + if(efs->cache_size > 0){ + int res; + ESP_LOGV(ESP_LITTLEFS_TAG, "Partition was mounted. Unmounting..."); + was_mounted = true; + res = lfs_unmount(efs->fs); + if(res != LFS_ERR_OK){ + ESP_LOGE(ESP_LITTLEFS_TAG, "Failed to unmount."); + return ESP_FAIL; + } + esp_littlefs_free_fds(efs); + } + +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT + /* Format the SD card too */ + if (efs->sdcard) { + esp_err_t ret = sdmmc_full_erase(efs->sdcard); + if (ret != ESP_OK) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Failed to format SD card: 0x%x %s", ret, esp_err_to_name(ret)); + return ret; + } + + ESP_LOGI(ESP_LITTLEFS_TAG, "SD card formatted!"); + } +#endif + + /* Format */ + { + esp_err_t res = ESP_OK; + ESP_LOGV(ESP_LITTLEFS_TAG, "Formatting filesystem"); + + /* Need to write explicit block_count to cfg; but skip if it's the SD card */ +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT + if (efs->sdcard) { + res = lfs_format(efs->fs, &efs->cfg); + } else +#endif + { + efs->cfg.block_count = efs->partition->size / efs->cfg.block_size; + res = lfs_format(efs->fs, &efs->cfg); + efs->cfg.block_count = 0; + } + + if( res != LFS_ERR_OK ) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Failed to format filesystem"); + return ESP_FAIL; + } + } + + /* Mount filesystem */ + if( was_mounted ) { + int res; + /* Remount the partition */ + ESP_LOGV(ESP_LITTLEFS_TAG, "Remounting formatted partition"); + res = lfs_mount(efs->fs, &efs->cfg); + if( res != LFS_ERR_OK ) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Failed to re-mount filesystem"); + return ESP_FAIL; + } + efs->cache_size = CONFIG_LITTLEFS_FD_CACHE_MIN_SIZE; // Initial size of cache; will resize ondemand + efs->cache = esp_littlefs_calloc(efs->cache_size, sizeof(*efs->cache)); + } + ESP_LOGV(ESP_LITTLEFS_TAG, "Format Success!"); + + return ESP_OK; +} + +void get_total_and_used_bytes(esp_littlefs_t *efs, size_t *total_bytes, size_t *used_bytes) { + sem_take(efs); + size_t total_bytes_local = efs->cfg.block_size * efs->fs->block_count; + if(total_bytes) *total_bytes = total_bytes_local; + + /* lfs_fs_size may return a size larger than the actual filesystem size. + * https://github.com/littlefs-project/littlefs/blob/9c7e232086f865cff0bb96fe753deb66431d91fd/lfs.h#L658 + */ + if(used_bytes) *used_bytes = MIN(total_bytes_local, efs->cfg.block_size * lfs_fs_size(efs->fs)); + sem_give(efs); +} + +/******************** + * Public Functions * + ********************/ + +bool esp_littlefs_mounted(const char* partition_label) { + int index; + esp_err_t err; + + err = esp_littlefs_by_label(partition_label, &index); + if(err != ESP_OK) return false; + return _efs[index]->cache_size > 0; +} + +bool esp_littlefs_partition_mounted(const esp_partition_t* partition) { + int index; + esp_err_t err = esp_littlefs_by_partition(partition, &index); + + if(err != ESP_OK) return false; + return _efs[index]->cache_size > 0; +} + +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT +bool esp_littlefs_sdmmc_mounted(sdmmc_card_t *sdcard) +{ + int index; + esp_err_t err = esp_littlefs_by_sdmmc_handle(sdcard, &index); + + if(err != ESP_OK) return false; + return _efs[index]->cache_size > 0; +} +#endif + +esp_err_t esp_littlefs_info(const char* partition_label, size_t *total_bytes, size_t *used_bytes){ + int index; + esp_err_t err; + + err = esp_littlefs_by_label(partition_label, &index); + if(err != ESP_OK) return err; + get_total_and_used_bytes(_efs[index], total_bytes, used_bytes); + + return ESP_OK; +} + +esp_err_t esp_littlefs_partition_info(const esp_partition_t* partition, size_t *total_bytes, size_t *used_bytes){ + int index; + esp_err_t err; + + err = esp_littlefs_by_partition(partition, &index); + if(err != ESP_OK) return err; + get_total_and_used_bytes(_efs[index], total_bytes, used_bytes); + + return ESP_OK; +} + +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT +esp_err_t esp_littlefs_sdmmc_info(sdmmc_card_t *sdcard, size_t *total_bytes, size_t *used_bytes) +{ + int index; + esp_err_t err; + + err = esp_littlefs_by_sdmmc_handle(sdcard, &index); + if(err != ESP_OK) return err; + get_total_and_used_bytes(_efs[index], total_bytes, used_bytes); + + return ESP_OK; +} +#endif + +#if ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(5, 4, 0) + +#ifdef CONFIG_VFS_SUPPORT_DIR +static esp_vfs_dir_ops_t s_vfs_littlefs_dir = { + .stat_p = &vfs_littlefs_stat, + .link_p = NULL, /* Not Supported */ + .unlink_p = &vfs_littlefs_unlink, + .rename_p = &vfs_littlefs_rename, + .opendir_p = &vfs_littlefs_opendir, + .readdir_p = &vfs_littlefs_readdir, + .readdir_r_p = &vfs_littlefs_readdir_r, + .telldir_p = &vfs_littlefs_telldir, + .seekdir_p = &vfs_littlefs_seekdir, + .closedir_p = &vfs_littlefs_closedir, + .mkdir_p = &vfs_littlefs_mkdir, + .rmdir_p = &vfs_littlefs_rmdir, + // access_p + .truncate_p = &vfs_littlefs_truncate, +#ifdef ESP_LITTLEFS_ENABLE_FTRUNCATE + .ftruncate_p = &vfs_littlefs_ftruncate, +#endif // ESP_LITTLEFS_ENABLE_FTRUNCATE +#if CONFIG_LITTLEFS_USE_MTIME + .utime_p = &vfs_littlefs_utime, +#endif // CONFIG_LITTLEFS_USE_MTIME +}; +#endif // CONFIG_VFS_SUPPORT_DIR + +static esp_vfs_fs_ops_t s_vfs_littlefs = { + .write_p = &vfs_littlefs_write, + .pwrite_p = &vfs_littlefs_pwrite, + .lseek_p = &vfs_littlefs_lseek, + .read_p = &vfs_littlefs_read, + .pread_p = &vfs_littlefs_pread, + .open_p = &vfs_littlefs_open, + .close_p = &vfs_littlefs_close, + .fsync_p = &vfs_littlefs_fsync, + .fcntl_p = &vfs_littlefs_fcntl, +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + .fstat_p = &vfs_littlefs_fstat, +#endif +#ifdef CONFIG_VFS_SUPPORT_DIR + .dir = &s_vfs_littlefs_dir, +#endif // CONFIG_VFS_SUPPORT_DIR +}; + +#endif // ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(5, 4, 0) +esp_err_t esp_vfs_littlefs_register(const esp_vfs_littlefs_conf_t * conf) +{ + int index; + assert(conf->base_path); +#if ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(5, 4, 0) + const esp_vfs_t vfs = vfs_littlefs_create_struct(!conf->read_only); +#endif // ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(5, 4, 0) + + esp_err_t err = esp_littlefs_init(conf, &index); + if (err != ESP_OK) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Failed to initialize LittleFS"); + return err; + } + + strlcat(_efs[index]->base_path, conf->base_path, ESP_VFS_PATH_MAX + 1); +#if ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(5, 4, 0) + int flags = ESP_VFS_FLAG_CONTEXT_PTR | ESP_VFS_FLAG_STATIC; + if (conf->read_only) { + flags |= ESP_VFS_FLAG_READONLY_FS; + } + err = esp_vfs_register_fs(conf->base_path, &s_vfs_littlefs, flags, _efs[index]); +#else + err = esp_vfs_register(conf->base_path, &vfs, _efs[index]); +#endif // ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(5, 4, 0) + if (err != ESP_OK) { + esp_littlefs_free(&_efs[index]); + ESP_LOGE(ESP_LITTLEFS_TAG, "Failed to register Littlefs to \"%s\"", conf->base_path); + return err; + } + + ESP_LOGV(ESP_LITTLEFS_TAG, "Successfully registered LittleFS to \"%s\"", conf->base_path); + return ESP_OK; +} + +esp_err_t esp_vfs_littlefs_unregister(const char* partition_label) +{ + int index; + if (esp_littlefs_by_label(partition_label, &index) != ESP_OK) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Partition was never registered."); + return ESP_ERR_INVALID_STATE; + } + ESP_LOGV(ESP_LITTLEFS_TAG, "Unregistering \"%s\"", partition_label); + esp_err_t err = esp_vfs_unregister(_efs[index]->base_path); + if (err != ESP_OK) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Failed to unregister \"%s\"", partition_label); + return err; + } + esp_littlefs_free(&_efs[index]); + _efs[index] = NULL; + return ESP_OK; +} + +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT +esp_err_t esp_vfs_littlefs_unregister_sdmmc(sdmmc_card_t *sdcard) +{ + assert(sdcard); + int index; + if (esp_littlefs_by_sdmmc_handle(sdcard, &index) != ESP_OK) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Partition was never registered."); + return ESP_ERR_INVALID_STATE; + } + + ESP_LOGV(ESP_LITTLEFS_TAG, "Unregistering SD card \"%p\"", sdcard); + esp_err_t err = esp_vfs_unregister(_efs[index]->base_path); + if (err != ESP_OK) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Failed to unregister SD card \"%p\"", sdcard); + return err; + } + + esp_littlefs_free(&_efs[index]); + _efs[index] = NULL; + return ESP_OK; +} +#endif + +esp_err_t esp_vfs_littlefs_unregister_partition(const esp_partition_t* partition) { + assert(partition); + int index; + if (esp_littlefs_by_partition(partition, &index) != ESP_OK) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Partition was never registered."); + return ESP_ERR_INVALID_STATE; + } + ESP_LOGV(ESP_LITTLEFS_TAG, "Unregistering \"0x%08"PRIX32"\"", partition->address); + esp_err_t err = esp_vfs_unregister(_efs[index]->base_path); + if (err != ESP_OK) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Failed to unregister \"0x%08"PRIX32"\"", partition->address); + return err; + } + esp_littlefs_free(&_efs[index]); + _efs[index] = NULL; + return ESP_OK; +} + +esp_err_t esp_littlefs_format(const char* partition_label) { + bool efs_free = false; + int index = -1; + esp_err_t err; + + ESP_LOGV(ESP_LITTLEFS_TAG, "Formatting \"%s\"", partition_label); + + /* Get a context */ + err = esp_littlefs_by_label(partition_label, &index); + + if( err != ESP_OK ){ + /* Create a tmp context */ + ESP_LOGV(ESP_LITTLEFS_TAG, "Temporarily creating EFS context."); + efs_free = true; + const esp_vfs_littlefs_conf_t conf = { + /* base_name not necessary for initializing */ + .dont_mount = true, + .partition_label = partition_label, + }; + err = esp_littlefs_init(&conf, &index); + if( err != ESP_OK ) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Failed to initialize to format."); + goto exit; + } + } + + err = format_from_efs(_efs[index]); + +exit: + if(efs_free && index>=0) esp_littlefs_free(&_efs[index]); + return err; +} + +esp_err_t esp_littlefs_format_partition(const esp_partition_t* partition) { + assert( partition ); + + bool efs_free = false; + int index = -1; + esp_err_t err; + + ESP_LOGV(ESP_LITTLEFS_TAG, "Formatting partition at \"0x%08"PRIX32"\"", partition->address); + + /* Get a context */ + err = esp_littlefs_by_partition(partition, &index); + + if( err != ESP_OK ){ + /* Create a tmp context */ + ESP_LOGV(ESP_LITTLEFS_TAG, "Temporarily creating EFS context."); + efs_free = true; + const esp_vfs_littlefs_conf_t conf = { + /* base_name not necessary for initializing */ + .dont_mount = true, + .partition_label = NULL, + .partition = partition, + }; + err = esp_littlefs_init(&conf, &index); + if( err != ESP_OK ) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Failed to initialize to format."); + goto exit; + } + } + + err = format_from_efs(_efs[index]); + +exit: + if(efs_free && index>=0) esp_littlefs_free(&_efs[index]); + return err; +} + +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT +esp_err_t esp_littlefs_format_sdmmc(sdmmc_card_t *sdcard) +{ + assert(sdcard); + + bool efs_free = false; + int index = -1; + esp_err_t err; + + ESP_LOGV(ESP_LITTLEFS_TAG, "Formatting sdcard %p", sdcard); + + /* Get a context */ + err = esp_littlefs_by_sdmmc_handle(sdcard, &index); + + if( err != ESP_OK ){ + /* Create a tmp context */ + ESP_LOGV(ESP_LITTLEFS_TAG, "Temporarily creating EFS context."); + efs_free = true; + const esp_vfs_littlefs_conf_t conf = { + /* base_name not necessary for initializing */ + .dont_mount = true, + .partition_label = NULL, + .partition = NULL, + .sdcard = sdcard, + }; + + err = esp_littlefs_init(&conf, &index); + if( err != ESP_OK ) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Failed to initialize to format."); + goto exit; + } + } + + err = format_from_efs(_efs[index]); + +exit: + if(efs_free && index>=0) esp_littlefs_free(&_efs[index]); + return err; +} +#endif + +/******************** + * Static Functions * + ********************/ + +/*** Helpers ***/ + +#if CONFIG_LITTLEFS_HUMAN_READABLE +/** + * @brief converts an enumerated lfs error into a string. + * @param lfs_error The littlefs error. + */ +static const char * esp_littlefs_errno(enum lfs_error lfs_errno) { + switch(lfs_errno){ + case LFS_ERR_OK: return "LFS_ERR_OK"; + case LFS_ERR_IO: return "LFS_ERR_IO"; + case LFS_ERR_CORRUPT: return "LFS_ERR_CORRUPT"; + case LFS_ERR_NOENT: return "LFS_ERR_NOENT"; + case LFS_ERR_EXIST: return "LFS_ERR_EXIST"; + case LFS_ERR_NOTDIR: return "LFS_ERR_NOTDIR"; + case LFS_ERR_ISDIR: return "LFS_ERR_ISDIR"; + case LFS_ERR_NOTEMPTY: return "LFS_ERR_NOTEMPTY"; + case LFS_ERR_BADF: return "LFS_ERR_BADF"; + case LFS_ERR_FBIG: return "LFS_ERR_FBIG"; + case LFS_ERR_INVAL: return "LFS_ERR_INVAL"; + case LFS_ERR_NOSPC: return "LFS_ERR_NOSPC"; + case LFS_ERR_NOMEM: return "LFS_ERR_NOMEM"; + case LFS_ERR_NOATTR: return "LFS_ERR_NOATTR"; + case LFS_ERR_NAMETOOLONG: return "LFS_ERR_NAMETOOLONG"; + default: return "LFS_ERR_UNDEFINED"; + } + return ""; +} +#else +#define esp_littlefs_errno(x) "" +#endif + +#if ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(5, 4, 0) +static esp_vfs_t vfs_littlefs_create_struct(bool writeable) { + esp_vfs_t vfs = { + .flags = ESP_VFS_FLAG_CONTEXT_PTR, + .write_p = &vfs_littlefs_write, + .pwrite_p = &vfs_littlefs_pwrite, + .lseek_p = &vfs_littlefs_lseek, + .read_p = &vfs_littlefs_read, + .pread_p = &vfs_littlefs_pread, + .open_p = &vfs_littlefs_open, + .close_p = &vfs_littlefs_close, + .fsync_p = &vfs_littlefs_fsync, + .fcntl_p = &vfs_littlefs_fcntl, +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + .fstat_p = &vfs_littlefs_fstat, +#endif +#ifdef CONFIG_VFS_SUPPORT_DIR + .stat_p = &vfs_littlefs_stat, + .link_p = NULL, /* Not Supported */ + .unlink_p = &vfs_littlefs_unlink, + .rename_p = &vfs_littlefs_rename, + .opendir_p = &vfs_littlefs_opendir, + .readdir_p = &vfs_littlefs_readdir, + .readdir_r_p = &vfs_littlefs_readdir_r, + .telldir_p = &vfs_littlefs_telldir, + .seekdir_p = &vfs_littlefs_seekdir, + .closedir_p = &vfs_littlefs_closedir, + .mkdir_p = &vfs_littlefs_mkdir, + .rmdir_p = &vfs_littlefs_rmdir, + // access_p + .truncate_p = &vfs_littlefs_truncate, +#ifdef ESP_LITTLEFS_ENABLE_FTRUNCATE + .ftruncate_p = &vfs_littlefs_ftruncate, +#endif // ESP_LITTLEFS_ENABLE_FTRUNCATE +#if CONFIG_LITTLEFS_USE_MTIME + .utime_p = &vfs_littlefs_utime, +#endif // CONFIG_LITTLEFS_USE_MTIME +#endif // CONFIG_VFS_SUPPORT_DIR +}; + if(!writeable) { +#if ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(5, 2, 0) + vfs.flags |= ESP_VFS_FLAG_READONLY_FS; +#endif // ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(5, 2, 0) + vfs.write_p = NULL; + vfs.pwrite_p = NULL; + vfs.fsync_p = NULL; + vfs.link_p = NULL; + vfs.unlink_p = NULL; + vfs.rename_p = NULL; + vfs.mkdir_p = NULL; + vfs.rmdir_p = NULL; + } + return vfs; +} +#endif // ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(5, 4, 0) + +/** + * @brief Free and clear a littlefs definition structure. + * @param efs Pointer to pointer to struct. Done this way so we can also zero + * out the pointer. + */ +static void esp_littlefs_free(esp_littlefs_t ** efs) +{ + esp_littlefs_t * e = *efs; + if (e == NULL) return; + *efs = NULL; + + if (e->fs) { + if(e->cache_size > 0) lfs_unmount(e->fs); + free(e->fs); + } + if(e->lock) vSemaphoreDelete(e->lock); + +#ifdef CONFIG_LITTLEFS_MMAP_PARTITION + esp_partition_munmap(e->mmap_handle); +#endif + + esp_littlefs_free_fds(e); + free(e); +} + +#ifdef CONFIG_VFS_SUPPORT_DIR +/** + * @brief Free a vfs_littlefs_dir_t struct. + */ +static void esp_littlefs_dir_free(vfs_littlefs_dir_t *dir){ + if(dir == NULL) return; + if(dir->path) free(dir->path); + free(dir); +} +#endif + +/** + * Get a mounted littlefs filesystem by label. + * @param[in] label + * @param[out] index index into _efs + * @return ESP_OK on success + */ +static esp_err_t esp_littlefs_by_partition(const esp_partition_t* part, int * index){ + int i; + esp_littlefs_t * p; + + if(!part || !index) return ESP_ERR_INVALID_ARG; + + ESP_LOGV(ESP_LITTLEFS_TAG, "Searching for existing filesystem for partition \"0x%08"PRIX32"\"", part->address); + + for (i = 0; i < CONFIG_LITTLEFS_MAX_PARTITIONS; i++) { + p = _efs[i]; + if (!p) continue; + if (!p->partition) continue; + if (part->address == p->partition->address) { + *index = i; + ESP_LOGV(ESP_LITTLEFS_TAG, "Found existing filesystem \"0x%08"PRIX32"\" at index %d", part->address, *index); + return ESP_OK; + } + } + + ESP_LOGV(ESP_LITTLEFS_TAG, "Existing filesystem \"0x%08"PRIX32"\" not found", part->address); + return ESP_ERR_NOT_FOUND; +} + +/** + * @brief Find index of already mounted littlefs filesystem by label. + * @param[in] label + * @param[out] index + */ +static esp_err_t esp_littlefs_by_label(const char* label, int * index){ + int i; + esp_littlefs_t * p; + const esp_partition_t *partition; + + if(!index) return ESP_ERR_INVALID_ARG; + if(!label){ + // Search for first dat partition with subtype "littlefs" + partition = esp_partition_find_first( + ESP_PARTITION_TYPE_DATA, + ESP_PARTITION_SUBTYPE_DATA_LITTLEFS, + NULL + ); + if(!partition){ + ESP_LOGE(ESP_LITTLEFS_TAG, "No data partition with subtype \"littlefs\" found"); + return ESP_ERR_NOT_FOUND; + } + label = partition->label; + } + + ESP_LOGV(ESP_LITTLEFS_TAG, "Searching for existing filesystem for partition \"%s\"", label); + + for (i = 0; i < CONFIG_LITTLEFS_MAX_PARTITIONS; i++) { + p = _efs[i]; + if (!p) continue; + if (!p->partition) continue; + if (strncmp(label, p->partition->label, 17) == 0) { + *index = i; + ESP_LOGV(ESP_LITTLEFS_TAG, "Found existing filesystem \"%s\" at index %d", label, *index); + return ESP_OK; + } + } + + ESP_LOGV(ESP_LITTLEFS_TAG, "Existing filesystem \"%s\" not found", label); + return ESP_ERR_NOT_FOUND; +} + +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT +static esp_err_t esp_littlefs_by_sdmmc_handle(sdmmc_card_t *handle, int *index) +{ + if(!handle || !index) return ESP_ERR_INVALID_ARG; + + ESP_LOGV(ESP_LITTLEFS_TAG, "Searching for existing filesystem for SD handle %p", handle); + + for (int i = 0; i < CONFIG_LITTLEFS_MAX_PARTITIONS; i++) { + esp_littlefs_t *p = _efs[i]; + if (!p) continue; + if (!p->sdcard) continue; + if (p->sdcard == handle) { + *index = i; + ESP_LOGV(ESP_LITTLEFS_TAG, "Found existing filesystem %p at index %d", handle, *index); + return ESP_OK; + } + } + + ESP_LOGV(ESP_LITTLEFS_TAG, "Existing filesystem %p not found", handle); + return ESP_ERR_NOT_FOUND; +} +#endif + +/** + * @brief Get the index of an unallocated LittleFS slot. + * @param[out] index Indexd of free LittleFS slot + * @return ESP_OK on success + */ +static esp_err_t esp_littlefs_get_empty(int *index) { + assert(index); + for(uint8_t i=0; i < CONFIG_LITTLEFS_MAX_PARTITIONS; i++){ + if( _efs[i] == NULL ){ + *index = i; + return ESP_OK; + } + } + ESP_LOGE(ESP_LITTLEFS_TAG, "No more free partitions available."); + return ESP_FAIL; +} + +/** + * @brief Convert fcntl flags to littlefs flags + * @param m fcntl flags + * @return lfs flags + */ +static int esp_littlefs_flags_conv(int m) { + int lfs_flags = 0; + + // Mask out unsupported flags; can cause internal LFS issues. + m &= (O_APPEND | O_WRONLY | O_RDWR | O_EXCL | O_CREAT | O_TRUNC); + + // O_RDONLY is 0 and not a flag, so must be explicitly checked + if (m == O_RDONLY) {ESP_LOGV(ESP_LITTLEFS_TAG, "O_RDONLY"); lfs_flags |= LFS_O_RDONLY;} + + if (m & O_APPEND) {ESP_LOGV(ESP_LITTLEFS_TAG, "O_APPEND"); lfs_flags |= LFS_O_APPEND;} + if (m & O_WRONLY) {ESP_LOGV(ESP_LITTLEFS_TAG, "O_WRONLY"); lfs_flags |= LFS_O_WRONLY;} + if (m & O_RDWR) {ESP_LOGV(ESP_LITTLEFS_TAG, "O_RDWR"); lfs_flags |= LFS_O_RDWR;} + if (m & O_EXCL) {ESP_LOGV(ESP_LITTLEFS_TAG, "O_EXCL"); lfs_flags |= LFS_O_EXCL;} + if (m & O_CREAT) {ESP_LOGV(ESP_LITTLEFS_TAG, "O_CREAT"); lfs_flags |= LFS_O_CREAT;} + if (m & O_TRUNC) {ESP_LOGV(ESP_LITTLEFS_TAG, "O_TRUNC"); lfs_flags |= LFS_O_TRUNC;} + return lfs_flags; +} + +static void esp_littlefs_take_efs_lock(void) { + if( _efs_lock == NULL ){ +#ifdef ESP8266 + taskENTER_CRITICAL(); +#else + static portMUX_TYPE mux = portMUX_INITIALIZER_UNLOCKED; + portENTER_CRITICAL(&mux); +#endif + if( _efs_lock == NULL ){ + _efs_lock = xSemaphoreCreateMutex(); + assert(_efs_lock); + } +#ifdef ESP8266 + taskEXIT_CRITICAL(); +#else + portEXIT_CRITICAL(&mux); +#endif + } + + xSemaphoreTake(_efs_lock, portMAX_DELAY); +} + + +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT +static esp_err_t esp_littlefs_init_sdcard(esp_littlefs_t** efs, sdmmc_card_t* sdcard, bool read_only) +{ + /* Allocate Context */ + *efs = esp_littlefs_calloc(1, sizeof(esp_littlefs_t)); + if (*efs == NULL) { + ESP_LOGE(ESP_LITTLEFS_TAG, "esp_littlefs could not be malloced"); + return ESP_ERR_NO_MEM; + } + (*efs)->sdcard = sdcard; + + { /* LittleFS Configuration */ + (*efs)->cfg.context = *efs; + (*efs)->read_only = read_only; + + // block device operations + (*efs)->cfg.read = littlefs_sdmmc_read; + (*efs)->cfg.prog = littlefs_sdmmc_write; + (*efs)->cfg.erase = littlefs_sdmmc_erase; + (*efs)->cfg.sync = littlefs_sdmmc_sync; + + // block device configuration + (*efs)->cfg.read_size = sdcard->csd.sector_size; + (*efs)->cfg.prog_size = sdcard->csd.sector_size; + (*efs)->cfg.block_size = sdcard->csd.sector_size; + (*efs)->cfg.block_count = sdcard->csd.capacity; + (*efs)->cfg.cache_size = MAX(CONFIG_LITTLEFS_CACHE_SIZE, sdcard->csd.sector_size); // Must not be smaller than SD sector size + (*efs)->cfg.lookahead_size = CONFIG_LITTLEFS_LOOKAHEAD_SIZE; + (*efs)->cfg.block_cycles = CONFIG_LITTLEFS_BLOCK_CYCLES; +#if CONFIG_LITTLEFS_MULTIVERSION + #if CONFIG_LITTLEFS_DISK_VERSION_MOST_RECENT + (*efs)->cfg.disk_version = 0; +#elif CONFIG_LITTLEFS_DISK_VERSION_2_1 + (*efs)->cfg.disk_version = 0x00020001; +#elif CONFIG_LITTLEFS_DISK_VERSION_2_0 + (*efs)->cfg.disk_version = 0x00020000; +#else +#error "CONFIG_LITTLEFS_MULTIVERSION enabled but no or unknown disk version selected!" +#endif +#endif + } + + (*efs)->lock = xSemaphoreCreateRecursiveMutex(); + if ((*efs)->lock == NULL) { + ESP_LOGE(ESP_LITTLEFS_TAG, "mutex lock could not be created"); + return ESP_ERR_NO_MEM; + } + + (*efs)->fs = esp_littlefs_calloc(1, sizeof(lfs_t)); + if ((*efs)->fs == NULL) { + ESP_LOGE(ESP_LITTLEFS_TAG, "littlefs could not be malloced"); + return ESP_ERR_NO_MEM; + } + + return ESP_OK; +} +#endif // CONFIG_LITTLEFS_SDMMC_SUPPORT + +static esp_err_t esp_littlefs_init_efs(esp_littlefs_t** efs, const esp_partition_t* partition, bool read_only) +{ + /* Allocate Context */ + *efs = esp_littlefs_calloc(1, sizeof(esp_littlefs_t)); + if (*efs == NULL) { + ESP_LOGE(ESP_LITTLEFS_TAG, "esp_littlefs could not be malloced"); + return ESP_ERR_NO_MEM; + } + (*efs)->partition = partition; + +#ifdef CONFIG_LITTLEFS_MMAP_PARTITION + esp_err_t err = esp_partition_mmap(partition, 0, partition->size, SPI_FLASH_MMAP_DATA, &(*efs)->mmap_data, &(*efs)->mmap_handle); + if (err != ESP_OK) { + ESP_LOGE(ESP_LITTLEFS_TAG, "esp_littlefs could not map data"); + return err; + } +#endif + + { /* LittleFS Configuration */ + (*efs)->cfg.context = *efs; + (*efs)->read_only = read_only; + + // block device operations +#ifdef CONFIG_LITTLEFS_MMAP_PARTITION + (*efs)->cfg.read = littlefs_esp_part_read_mmap; +#else + (*efs)->cfg.read = littlefs_esp_part_read; +#endif + (*efs)->cfg.prog = littlefs_esp_part_write; + (*efs)->cfg.erase = littlefs_esp_part_erase; + (*efs)->cfg.sync = littlefs_esp_part_sync; + + // block device configuration + (*efs)->cfg.read_size = CONFIG_LITTLEFS_READ_SIZE; + (*efs)->cfg.prog_size = CONFIG_LITTLEFS_WRITE_SIZE; + (*efs)->cfg.block_size = CONFIG_LITTLEFS_BLOCK_SIZE; + (*efs)->cfg.block_count = 0; // Autodetect ``block_count`` + (*efs)->cfg.cache_size = CONFIG_LITTLEFS_CACHE_SIZE; + (*efs)->cfg.lookahead_size = CONFIG_LITTLEFS_LOOKAHEAD_SIZE; + (*efs)->cfg.block_cycles = CONFIG_LITTLEFS_BLOCK_CYCLES; +#if CONFIG_LITTLEFS_MULTIVERSION +#if CONFIG_LITTLEFS_DISK_VERSION_MOST_RECENT + (*efs)->cfg.disk_version = 0; +#elif CONFIG_LITTLEFS_DISK_VERSION_2_1 + (*efs)->cfg.disk_version = 0x00020001; +#elif CONFIG_LITTLEFS_DISK_VERSION_2_0 + (*efs)->cfg.disk_version = 0x00020000; +#else +#error "CONFIG_LITTLEFS_MULTIVERSION enabled but no or unknown disk version selected!" +#endif +#endif + } + + (*efs)->lock = xSemaphoreCreateRecursiveMutex(); + if ((*efs)->lock == NULL) { + ESP_LOGE(ESP_LITTLEFS_TAG, "mutex lock could not be created"); + return ESP_ERR_NO_MEM; + } + + (*efs)->fs = esp_littlefs_calloc(1, sizeof(lfs_t)); + if ((*efs)->fs == NULL) { + ESP_LOGE(ESP_LITTLEFS_TAG, "littlefs could not be malloced"); + return ESP_ERR_NO_MEM; + } + + return ESP_OK; +} + +/** + * @brief Initialize and mount littlefs + * @param[in] conf Filesystem Configuration + * @param[out] index On success, index into _efs. + * @return ESP_OK on success + */ +static esp_err_t esp_littlefs_init(const esp_vfs_littlefs_conf_t* conf, int *index) +{ + esp_err_t err = ESP_FAIL; + const esp_partition_t* partition = NULL; + esp_littlefs_t * efs = NULL; + *index = -1; + + esp_littlefs_take_efs_lock(); + + if (esp_littlefs_get_empty(index) != ESP_OK) { + ESP_LOGE(ESP_LITTLEFS_TAG, "max mounted partitions reached"); + err = ESP_ERR_INVALID_STATE; + goto exit; + } + + if(conf->partition_label) + { + /* Input and Environment Validation */ + if (esp_littlefs_by_label(conf->partition_label, index) == ESP_OK) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Partition already used"); + err = ESP_ERR_INVALID_STATE; + goto exit; + } + partition = esp_partition_find_first( + ESP_PARTITION_TYPE_DATA, + ESP_PARTITION_SUBTYPE_ANY, + conf->partition_label); + if (!partition) { + ESP_LOGE(ESP_LITTLEFS_TAG, "partition \"%s\" could not be found", conf->partition_label); + err = ESP_ERR_NOT_FOUND; + goto exit; + } + + } else if(conf->partition) { + if (esp_littlefs_by_partition(conf->partition, index) == ESP_OK) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Partition already used"); + err = ESP_ERR_INVALID_STATE; + goto exit; + } + partition = conf->partition; +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT + } else if (conf->sdcard) { + ESP_LOGV(ESP_LITTLEFS_TAG, "Using SD card handle %p for LittleFS mount", conf->sdcard); + err = sdmmc_get_status(conf->sdcard); + if (err != ESP_OK) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Failed when checking SD card status: 0x%x", err); + goto exit; + } +#endif + } else { + // Find first partition with "littlefs" subtype. + partition = esp_partition_find_first( + ESP_PARTITION_TYPE_DATA, + ESP_PARTITION_SUBTYPE_DATA_LITTLEFS, + NULL + ); + if (!partition) { + ESP_LOGE(ESP_LITTLEFS_TAG, "No data partition with subtype \"littlefs\" found"); + err = ESP_ERR_NOT_FOUND; + goto exit; + } + } + +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT + if (conf->sdcard) { + err = esp_littlefs_init_sdcard(&efs, conf->sdcard, conf->read_only); + if(err != ESP_OK) { + goto exit; + } + } else +#endif + { + uint32_t flash_page_size = g_rom_flashchip.page_size; + uint32_t log_page_size = CONFIG_LITTLEFS_PAGE_SIZE; + if (log_page_size % flash_page_size != 0) { + ESP_LOGE(ESP_LITTLEFS_TAG, "LITTLEFS_PAGE_SIZE is not multiple of flash chip page size (%u)", + (unsigned int) flash_page_size); + err = ESP_ERR_INVALID_ARG; + goto exit; + } + + err = esp_littlefs_init_efs(&efs, partition, conf->read_only); + + if(err != ESP_OK) { + goto exit; + } + } + + // Mount and Error Check + _efs[*index] = efs; + if(!conf->dont_mount){ + int res; + + res = lfs_mount(efs->fs, &efs->cfg); + + if (conf->format_if_mount_failed && res != LFS_ERR_OK) { + ESP_LOGW(ESP_LITTLEFS_TAG, "mount failed, %s (%i). formatting...", esp_littlefs_errno(res), res); +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT + if (conf->sdcard) { + err = esp_littlefs_format_sdmmc(conf->sdcard); + } else +#endif + { + err = esp_littlefs_format_partition(efs->partition); + } + if(err != ESP_OK) { + ESP_LOGE(ESP_LITTLEFS_TAG, "format failed"); + err = ESP_FAIL; + goto exit; + } + res = lfs_mount(efs->fs, &efs->cfg); + } + if (res != LFS_ERR_OK) { + ESP_LOGE(ESP_LITTLEFS_TAG, "mount failed, %s (%i)", esp_littlefs_errno(res), res); + err = ESP_FAIL; + goto exit; + } + efs->cache_size = 4; + efs->cache = esp_littlefs_calloc(efs->cache_size, sizeof(*efs->cache)); + + if(conf->grow_on_mount){ +#ifdef CONFIG_LITTLEFS_SDMMC_SUPPORT + if (efs->sdcard) { + res = lfs_fs_grow(efs->fs, efs->sdcard->csd.capacity); + } else +#endif + { + res = lfs_fs_grow(efs->fs, efs->partition->size / efs->cfg.block_size); + } + if (res != LFS_ERR_OK) { + ESP_LOGE(ESP_LITTLEFS_TAG, "FS grow failed, %s (%i)", esp_littlefs_errno(res), res); + err = ESP_FAIL; + goto exit; + } + } + } + + err = ESP_OK; + +exit: + if(err != ESP_OK){ + if( *index >= 0 ) { + esp_littlefs_free(&_efs[*index]); + } + else{ + esp_littlefs_free(&efs); + } + } + xSemaphoreGive(_efs_lock); + return err; +} + +/** + * @brief + * @parameter efs file system context + */ +static inline int sem_take(esp_littlefs_t *efs) { + int res; +#if LOG_LOCAL_LEVEL >= 5 + ESP_LOGV(ESP_LITTLEFS_TAG, "------------------------ Sem Taking [%s]", pcTaskGetName(NULL)); +#endif + res = xSemaphoreTakeRecursive(efs->lock, portMAX_DELAY); +#if LOG_LOCAL_LEVEL >= 5 + ESP_LOGV(ESP_LITTLEFS_TAG, "--------------------->>> Sem Taken [%s]", pcTaskGetName(NULL)); +#endif + return res; +} + +/** + * @brief + * @parameter efs file system context + */ +static inline int sem_give(esp_littlefs_t *efs) { +#if LOG_LOCAL_LEVEL >= 5 + ESP_LOGV(ESP_LITTLEFS_TAG, "---------------------<<< Sem Give [%s]", pcTaskGetName(NULL)); +#endif + return xSemaphoreGiveRecursive(efs->lock); +} + + +/* We are using a double allocation system here, which an array and a linked list. + The array contains the pointer to the file descriptor (the index in the array is what's returned to the user). + The linked list is used for file descriptors. + This means that position of nodes in the list must stay consistent: + - Allocation is obvious (append to the list from the head, and realloc the pointers array) + There is still a O(N) search in the cache for a free position to store + - Searching is a O(1) process (good) + - Deallocation is more tricky. That is, for example, + if you need to remove node 5 in a 12 nodes list, you'll have to: + 1) Mark the 5th position as freed (if it's the last position of the array realloc smaller) + 2) Walk the list until finding the pointer to the node O(N) and scrub the node so the chained list stays consistent + 3) Deallocate the node +*/ + +/** + * @brief Get a file descriptor + * @param[in,out] efs file system context + * @param[out] file pointer to a file that'll be filled with a file object + * @param[in] path_len the length of the filepath in bytes (including terminating zero byte) + * @return integer file descriptor. Returns -1 if a FD cannot be obtained. + * @warning This must be called with lock taken + */ +static int esp_littlefs_allocate_fd(esp_littlefs_t *efs, vfs_littlefs_file_t ** file +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + , const size_t path_len +#endif + ) +{ + int i = -1; + + assert( efs->fd_count < UINT16_MAX ); + assert( efs->cache_size < UINT16_MAX ); + + /* Make sure there is enough space in the cache to store new fd */ + if (efs->fd_count + 1 > efs->cache_size) { + uint16_t new_size = (uint16_t)MIN(UINT16_MAX, CONFIG_LITTLEFS_FD_CACHE_REALLOC_FACTOR * efs->cache_size); + /* Resize the cache */ + vfs_littlefs_file_t ** new_cache = realloc(efs->cache, new_size * sizeof(*efs->cache)); + if (!new_cache) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Unable to allocate file cache"); + return -1; /* If it fails here, no harm is done to the filesystem, so it's safe */ + } + /* Zero out the new portions of the cache */ + memset(&new_cache[efs->cache_size], 0, (new_size - efs->cache_size) * sizeof(*efs->cache)); + efs->cache = new_cache; + efs->cache_size = new_size; + } + + + /* Allocate file descriptor here now */ +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + *file = esp_littlefs_calloc(1, sizeof(**file) + path_len); +#else + *file = esp_littlefs_calloc(1, sizeof(**file)); +#endif + + if (*file == NULL) { + /* If it fails here, the file system might have a larger cache, but it's harmless, no need to reverse it */ + ESP_LOGE(ESP_LITTLEFS_TAG, "Unable to allocate FD"); + return -1; + } + + /* Starting from here, nothing can fail anymore */ + +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + /* The trick here is to avoid dual allocation so the path pointer + should point to the next byte after it: + file => [ lfs_file | # | next | path | free_space ] + | /\ + |__/ + */ + (*file)->path = (char*)(*file) + sizeof(**file); +#endif + + /* initialize lfs_file_config */ + (*file)->lfs_file_config.buffer = (*file)->lfs_buffer; +#if ESP_LITTLEFS_ATTR_COUNT + (*file)->lfs_file_config.attrs = (*file)->lfs_attr; + (*file)->lfs_attr[0].type = ESP_LITTLEFS_ATTR_MTIME; + (*file)->lfs_attr[0].buffer = &(*file)->lfs_attr_time_buffer; + (*file)->lfs_attr[0].size = sizeof((*file)->lfs_attr_time_buffer); +#endif + (*file)->lfs_file_config.attr_count = ESP_LITTLEFS_ATTR_COUNT; + + /* Now find a free place in cache */ + for(i=0; i < efs->cache_size; i++) { + if (efs->cache[i] == NULL) { + efs->cache[i] = *file; + break; + } + } + /* Save file in the list */ + (*file)->next = efs->file; + efs->file = *file; + efs->fd_count++; + return i; +} + +/** + * @brief Release a file descriptor + * @param[in,out] efs file system context + * @param[in] fd File Descriptor to release + * @return 0 on success. -1 if a FD cannot be obtained. + * @warning This must be called with lock taken + */ +static int esp_littlefs_free_fd(esp_littlefs_t *efs, int fd){ + vfs_littlefs_file_t * file, * head; + + if((uint32_t)fd >= efs->cache_size) { + ESP_LOGE(ESP_LITTLEFS_TAG, "FD %d must be <%d.", fd, efs->cache_size); + return -1; + } + + /* Get the file descriptor to free it */ + file = efs->cache[fd]; + head = efs->file; + /* Search for file in SLL to remove it */ + if (file == head) { + /* Last file, can't fail */ + efs->file = efs->file->next; + } else { + while (head && head->next != file) { + head = head->next; + } + if (!head) { + ESP_LOGE(ESP_LITTLEFS_TAG, "Inconsistent list"); + return -1; + } + /* Transaction starts here and can't fail anymore */ + head->next = file->next; + } + efs->cache[fd] = NULL; + efs->fd_count--; + + ESP_LOGV(ESP_LITTLEFS_TAG, "Clearing FD"); + free(file); + +#if 0 + /* Realloc smaller if its possible + * * Find and realloc based on number of trailing NULL ptrs in cache + * * Leave some hysteris to prevent thrashing around resize points + * This is disabled for now because it adds unnecessary complexity + * and binary size increase that outweights its ebenfits. + */ + if(efs->cache_size > CONFIG_LITTLEFS_FD_CACHE_MIN_SIZE) { + uint16_t n_free; + uint16_t new_size = efs->cache_size / CONFIG_LITTLEFS_FD_CACHE_REALLOC_FACTOR; + + if(new_size >= CONFIG_LITTLEFS_FD_CACHE_MIN_SIZE) { + /* Count number of trailing NULL ptrs */ + for(n_free=0; n_free < efs->cache_size; n_free++) { + if(efs->cache[efs->cache_size - n_free - 1] != NULL) { + break; + } + } + + if(n_free >= (efs->cache_size - new_size)){ + new_size += CONFIG_LITTLEFS_FD_CACHE_HYST; + ESP_LOGV(ESP_LITTLEFS_TAG, "Reallocating cache %i -> %i", efs->cache_size, new_size); + vfs_littlefs_file_t ** new_cache; + new_cache = realloc(efs->cache, new_size * sizeof(*efs->cache)); + /* No harm on realloc failure, continue using the oversized cache */ + if(new_cache) { + efs->cache = new_cache; + efs->cache_size = new_size; + } + } + } + } +#endif + + return 0; +} + +/** + * @brief Compute the 32bit DJB2 hash of the given string. + * @param[in] path the path to hash + * @returns the hash for this path + */ +static uint32_t compute_hash(const char * path) { + uint32_t hash = 5381; + char c; + + while ((c = *path++)) + hash = ((hash << 5) + hash) + c; /* hash * 33 + c */ + return hash; +} + +#ifdef CONFIG_VFS_SUPPORT_DIR +/** + * @brief finds an open file descriptor by file name. + * @param[in,out] efs file system context + * @param[in] path File path to check. + * @returns integer file descriptor. Returns -1 if not found. + * @warning This must be called with lock taken + * @warning if CONFIG_LITTLEFS_USE_ONLY_HASH, there is a slim chance an + * erroneous FD may be returned on hash collision. + */ +static int esp_littlefs_get_fd_by_name(esp_littlefs_t *efs, const char *path){ + uint32_t hash = compute_hash(path); + + for(uint16_t i=0, j=0; i < efs->cache_size && j < efs->fd_count; i++){ + if (efs->cache[i]) { + ++j; + + if ( + efs->cache[i]->hash == hash // Faster than strcmp +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + && strcmp(path, efs->cache[i]->path) == 0 // May as well check incase of hash collision. Usually short-circuited. +#endif + ) { + ESP_LOGV(ESP_LITTLEFS_TAG, "Found \"%s\" at FD %d.", path, i); + return i; + } + } + } + ESP_LOGV(ESP_LITTLEFS_TAG, "Unable to get a find FD for \"%s\"", path); + return -1; +} +#endif + +/*** Filesystem Hooks ***/ + +static int vfs_littlefs_open(void* ctx, const char * path, int flags, int mode) { + /* Note: mode is currently unused */ + int fd=-1, lfs_flags, res; + esp_littlefs_t *efs = (esp_littlefs_t *)ctx; + vfs_littlefs_file_t *file = NULL; +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + size_t path_len = strlen(path) + 1; // include NULL terminator +#endif +#if CONFIG_LITTLEFS_OPEN_DIR + struct lfs_info info; +#endif + + assert(path); + + ESP_LOGV(ESP_LITTLEFS_TAG, "Opening %s", path); + + /* Convert flags to lfs flags */ + lfs_flags = esp_littlefs_flags_conv(flags); + if(efs->read_only && lfs_flags != LFS_O_RDONLY) { + return LFS_ERR_INVAL; + } + + /* Get a FD */ + sem_take(efs); + +#if CONFIG_LITTLEFS_OPEN_DIR + /* Check if it is a file with same path */ + if (flags & O_DIRECTORY) { + res = lfs_stat(efs->fs, path, &info); + if (res == LFS_ERR_OK) { + if (info.type == LFS_TYPE_REG) { + sem_give(efs); + ESP_LOGV(ESP_LITTLEFS_TAG, "Open directory but it is a file"); + errno = ENOTDIR; + return LFS_ERR_INVAL; + } + } + } +#endif + + fd = esp_littlefs_allocate_fd(efs, &file +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + , path_len +#endif + ); + + if(fd < 0) { + errno = lfs_errno_remap(fd); + sem_give(efs); + ESP_LOGV(ESP_LITTLEFS_TAG, "Error obtaining FD"); + return LFS_ERR_INVAL; + } + +#if CONFIG_LITTLEFS_SPIFFS_COMPAT + /* Create all parent directories (if necessary) */ + ESP_LOGV(ESP_LITTLEFS_TAG, "LITTLEFS_SPIFFS_COMPAT attempting to create all directories for %s", path); + mkdirs(efs, path); +#endif // CONFIG_LITTLEFS_SPIFFS_COMPAT + +#ifndef CONFIG_LITTLEFS_MALLOC_STRATEGY_DISABLE + /* Open File */ + res = lfs_file_opencfg(efs->fs, &file->file, path, lfs_flags, &file->lfs_file_config); +#if CONFIG_LITTLEFS_MTIME_USE_NONCE + if(!(lfs_flags & LFS_O_RDONLY)){ + // When the READ flag is set, LittleFS will automatically populate attributes. + // If it's not set, it will not populate attributes. + // We want the attributes regardless so that we can properly update it. + file->lfs_attr_time_buffer = esp_littlefs_get_mtime_attr(efs, path); + } +#endif + +#else + #error "The use of static buffers is not currently supported by this VFS wrapper" +#endif + +#if CONFIG_LITTLEFS_OPEN_DIR + if ( flags & O_DIRECTORY && res == LFS_ERR_ISDIR) { + res = LFS_ERR_OK; + file->file.flags = flags; + } +#endif + + if( res < 0 ) { + errno = lfs_errno_remap(res); + esp_littlefs_free_fd(efs, fd); + sem_give(efs); +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to open file %s. Error %s (%d)", + path, esp_littlefs_errno(res), res); +#else + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to open file. Error %s (%d)", + esp_littlefs_errno(res), res); +#endif + return LFS_ERR_INVAL; + } + + /* Sync after opening. If we are overwriting a file, this will free that + * file's blocks in storage, prevent OOS errors. + * See TEST_CASE: + * "Rewriting file frees space immediately (#7426)" + */ +#if CONFIG_LITTLEFS_OPEN_DIR + if ( (flags & O_DIRECTORY) == 0 ) { +#endif + if(!efs->read_only && lfs_flags != LFS_O_RDONLY) + { + res = esp_littlefs_file_sync(efs, file); + } + if(res < 0){ + errno = lfs_errno_remap(res); +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to sync at opening file \"%s\". Error %s (%d)", + file->path, esp_littlefs_errno(res), res); +#else + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to sync at opening file %d. Error %d", fd, res); +#endif + } + +#if CONFIG_LITTLEFS_OPEN_DIR + } +#endif + + file->hash = compute_hash(path); +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + memcpy(file->path, path, path_len); +#endif + + sem_give(efs); + ESP_LOGV(ESP_LITTLEFS_TAG, "Done opening %s", path); + return fd; +} + +static ssize_t vfs_littlefs_write(void* ctx, int fd, const void * data, size_t size) { + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + ssize_t res; + vfs_littlefs_file_t *file = NULL; + + sem_take(efs); + if((uint32_t)fd > efs->cache_size) { + sem_give(efs); + ESP_LOGE(ESP_LITTLEFS_TAG, "FD %d must be <%d.", fd, efs->cache_size); + errno = EBADF; + return -1; + } + file = efs->cache[fd]; + res = lfs_file_write(efs->fs, &file->file, data, size); +#ifdef CONFIG_LITTLEFS_FLUSH_FILE_EVERY_WRITE + if(res > 0) { + vfs_littlefs_fsync(ctx, fd); + } +#endif + sem_give(efs); + + if(res < 0){ + errno = lfs_errno_remap(res); +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to write FD %d; path \"%s\". Error %s (%d)", + fd, file->path, esp_littlefs_errno(res), res); +#else + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to write FD %d. Error %s (%d)", + fd, esp_littlefs_errno(res), res); +#endif + return -1; + } + + return res; +} + +static ssize_t vfs_littlefs_read(void* ctx, int fd, void * dst, size_t size) { + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + ssize_t res; + vfs_littlefs_file_t *file = NULL; + + sem_take(efs); + if((uint32_t)fd > efs->cache_size) { + sem_give(efs); + ESP_LOGE(ESP_LITTLEFS_TAG, "FD %d must be <%d.", fd, efs->cache_size); + errno = EBADF; + return -1; + } + file = efs->cache[fd]; + res = lfs_file_read(efs->fs, &file->file, dst, size); + sem_give(efs); + + if(res < 0){ + errno = lfs_errno_remap(res); +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to read file \"%s\". Error %s (%d)", + file->path, esp_littlefs_errno(res), res); +#else + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to read FD %d. Error %s (%d)", + fd, esp_littlefs_errno(res), res); +#endif + return -1; + } + + return res; +} + +static ssize_t vfs_littlefs_pwrite(void *ctx, int fd, const void *src, size_t size, off_t offset) +{ + esp_littlefs_t *efs = (esp_littlefs_t *)ctx; + ssize_t res, save_res; + vfs_littlefs_file_t *file = NULL; + + sem_take(efs); + if ((uint32_t)fd > efs->cache_size) + { + sem_give(efs); + ESP_LOGE(ESP_LITTLEFS_TAG, "FD %d must be <%d.", fd, efs->cache_size); + errno = EBADF; + return -1; + } + file = efs->cache[fd]; + + off_t old_offset = lfs_file_seek(efs->fs, &file->file, 0, SEEK_CUR); + if (old_offset < (off_t)0) + { + res = old_offset; + goto exit; + } + + /* Set to wanted position. */ + res = lfs_file_seek(efs->fs, &file->file, offset, SEEK_SET); + if (res < (off_t)0) + goto exit; + + /* Write out the data. */ + res = lfs_file_write(efs->fs, &file->file, src, size); + + /* Now we have to restore the position. If this fails we have to + return this as an error. But if the writing also failed we + return writing error. */ + save_res = lfs_file_seek(efs->fs, &file->file, old_offset, SEEK_SET); + if (res >= (ssize_t)0 && save_res < (off_t)0) + { + res = save_res; + } + sem_give(efs); + +exit: + if (res < 0) + { + errno = lfs_errno_remap(res); +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to write FD %d; path \"%s\". Error %s (%d)", + fd, file->path, esp_littlefs_errno(res), res); +#else + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to write FD %d. Error %s (%d)", + fd, esp_littlefs_errno(res), res); +#endif + return -1; + } + + return res; +} + +static ssize_t vfs_littlefs_pread(void *ctx, int fd, void *dst, size_t size, off_t offset) +{ + esp_littlefs_t *efs = (esp_littlefs_t *)ctx; + ssize_t res, save_res; + vfs_littlefs_file_t *file = NULL; + + sem_take(efs); + if ((uint32_t)fd > efs->cache_size) + { + sem_give(efs); + ESP_LOGE(ESP_LITTLEFS_TAG, "FD %d must be <%d.", fd, efs->cache_size); + errno = EBADF; + return -1; + } + file = efs->cache[fd]; + + off_t old_offset = lfs_file_seek(efs->fs, &file->file, 0, SEEK_CUR); + if (old_offset < (off_t)0) + { + res = old_offset; + goto exit; + } + + /* Set to wanted position. */ + res = lfs_file_seek(efs->fs, &file->file, offset, SEEK_SET); + if (res < (off_t)0) + goto exit; + + /* Read the data. */ + res = lfs_file_read(efs->fs, &file->file, dst, size); + + /* Now we have to restore the position. If this fails we have to + return this as an error. But if the reading also failed we + return reading error. */ + save_res = lfs_file_seek(efs->fs, &file->file, old_offset, SEEK_SET); + if (res >= (ssize_t)0 && save_res < (off_t)0) + { + res = save_res; + } + sem_give(efs); + +exit: + if (res < 0) + { + errno = lfs_errno_remap(res); +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to read file \"%s\". Error %s (%d)", + file->path, esp_littlefs_errno(res), res); +#else + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to read FD %d. Error %s (%d)", + fd, esp_littlefs_errno(res), res); +#endif + return -1; + } + + return res; +} + +static int vfs_littlefs_close(void* ctx, int fd) { + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + int res; + vfs_littlefs_file_t *file = NULL; + + sem_take(efs); + if((uint32_t)fd > efs->cache_size) { + sem_give(efs); + ESP_LOGE(ESP_LITTLEFS_TAG, "FD %d must be <%d.", fd, efs->cache_size); + errno = EBADF; + return -1; + } + + file = efs->cache[fd]; + +#if CONFIG_LITTLEFS_OPEN_DIR + if ((file->file.flags & O_DIRECTORY) == 0) { +#endif +#if CONFIG_LITTLEFS_USE_MTIME + file->lfs_attr_time_buffer = esp_littlefs_get_updated_time(efs, file, NULL); +#endif + res = lfs_file_close(efs->fs, &file->file); + if(res < 0){ + errno = lfs_errno_remap(res); + sem_give(efs); +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to close file \"%s\". Error %s (%d)", + file->path, esp_littlefs_errno(res), res); +#else + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to close Fd %d. Error %s (%d)", + fd, esp_littlefs_errno(res), res); +#endif + return -1; + } + // TODO: update directory containing file's mtime. +#if CONFIG_LITTLEFS_OPEN_DIR + } else { + res = 0; + } +#endif + + esp_littlefs_free_fd(efs, fd); + sem_give(efs); + return res; +} + +static off_t vfs_littlefs_lseek(void* ctx, int fd, off_t offset, int mode) { + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + lfs_soff_t res; + vfs_littlefs_file_t *file = NULL; + int whence; + + switch (mode) { + case SEEK_SET: whence = LFS_SEEK_SET; break; + case SEEK_CUR: whence = LFS_SEEK_CUR; break; + case SEEK_END: whence = LFS_SEEK_END; break; + default: + ESP_LOGE(ESP_LITTLEFS_TAG, "Invalid mode"); + errno = EINVAL; + return -1; + } + + sem_take(efs); + if((uint32_t)fd > efs->cache_size) { + sem_give(efs); + ESP_LOGE(ESP_LITTLEFS_TAG, "FD %d must be <%d.", fd, efs->cache_size); + errno = EBADF; + return -1; + } + file = efs->cache[fd]; + res = lfs_file_seek(efs->fs, &file->file, offset, whence); + sem_give(efs); + + if(res < 0){ + errno = lfs_errno_remap(res); +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to seek file \"%s\" to offset %08x. Error %s (%d)", + file->path, (unsigned int)offset, esp_littlefs_errno(res), (int) res); +#else + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to seek FD %d to offset %08x. Error (%d)", + fd, (unsigned int)offset, (int) res); +#endif + return -1; + } + + return res; +} + +static int vfs_littlefs_fsync(void* ctx, int fd) +{ + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + ssize_t res; + vfs_littlefs_file_t *file = NULL; + + + sem_take(efs); + if((uint32_t)fd > efs->cache_size) { + sem_give(efs); + ESP_LOGE(ESP_LITTLEFS_TAG, "FD %d must be <%d.", fd, efs->cache_size); + errno = EBADF; + return -1; + } + file = efs->cache[fd]; + res = esp_littlefs_file_sync(efs, file); + sem_give(efs); + + if(res < 0){ + errno = lfs_errno_remap(res); +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to sync file \"%s\". Error %s (%d)", + file->path, esp_littlefs_errno(res), res); +#else + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to sync file %d. Error %d", fd, res); +#endif + return -1; + } + + return res; +} + +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH +static int vfs_littlefs_fstat(void* ctx, int fd, struct stat * st) { + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + struct lfs_info info; + int res; + vfs_littlefs_file_t *file = NULL; + + memset(st, 0, sizeof(struct stat)); + st->st_blksize = efs->cfg.block_size; + + sem_take(efs); + if((uint32_t)fd > efs->cache_size) { + sem_give(efs); + ESP_LOGE(ESP_LITTLEFS_TAG, "FD must be <%d.", efs->cache_size); + errno = EBADF; + return -1; + } + file = efs->cache[fd]; + res = lfs_stat(efs->fs, file->path, &info); + if (res < 0) { + errno = lfs_errno_remap(res); + sem_give(efs); + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to stat file \"%s\". Error %s (%d)", + file->path, esp_littlefs_errno(res), res); + return -1; + } + +#if CONFIG_LITTLEFS_USE_MTIME + st->st_mtime = file->lfs_attr_time_buffer; +#endif + + sem_give(efs); + if(info.type==LFS_TYPE_REG){ + // Regular File + st->st_mode = S_IFREG; + st->st_size = info.size; + } + else{ + // Directory + st->st_mode = S_IFDIR; + st->st_size = 0; // info.size is only valid for REG files + } + return 0; +} +#endif + +#ifdef CONFIG_VFS_SUPPORT_DIR +static int vfs_littlefs_stat(void* ctx, const char * path, struct stat * st) { + assert(path); + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + struct lfs_info info; + int res; + + memset(st, 0, sizeof(struct stat)); + st->st_blksize = efs->cfg.block_size; + + sem_take(efs); + res = lfs_stat(efs->fs, path, &info); + if (res < 0) { + errno = lfs_errno_remap(res); + sem_give(efs); + /* Not strictly an error, since stat can be used to check + * if a file exists */ + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to stat path \"%s\". Error %s (%d)", + path, esp_littlefs_errno(res), res); + return -1; + } +#if CONFIG_LITTLEFS_USE_MTIME + st->st_mtime = esp_littlefs_get_mtime_attr(efs, path); +#endif + sem_give(efs); + if(info.type==LFS_TYPE_REG){ + // Regular File + st->st_mode = S_IFREG; + st->st_size = info.size; + } + else{ + // Directory + st->st_mode = S_IFDIR; + st->st_size = 0; // info.size is only valid for REG files + } + return 0; +} + +static int vfs_littlefs_unlink(void* ctx, const char *path) { +#define fail_str_1 "Failed to unlink path \"%s\"." + assert(path); + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + struct lfs_info info; + int res; + + sem_take(efs); + res = lfs_stat(efs->fs, path, &info); + if (res < 0) { + errno = lfs_errno_remap(res); + sem_give(efs); + ESP_LOGV(ESP_LITTLEFS_TAG, fail_str_1 " Error %s (%d)", + path, esp_littlefs_errno(res), res); + return -1; + } + + if(esp_littlefs_get_fd_by_name(efs, path) >= 0) { + sem_give(efs); + ESP_LOGE(ESP_LITTLEFS_TAG, fail_str_1 " Has open FD.", path); + errno = EBUSY; + return -1; + } + + if (info.type == LFS_TYPE_DIR) { + sem_give(efs); + ESP_LOGV(ESP_LITTLEFS_TAG, "Cannot unlink a directory."); + errno = EISDIR; + return -1; + } + + res = lfs_remove(efs->fs, path); + if (res < 0) { + errno = lfs_errno_remap(res); + sem_give(efs); + ESP_LOGV(ESP_LITTLEFS_TAG, fail_str_1 " Error %s (%d)", + path, esp_littlefs_errno(res), res); + return -1; + } + +#if CONFIG_LITTLEFS_SPIFFS_COMPAT + /* Attempt to delete all parent directories that are empty */ + rmdirs(efs, path); +#endif // CONFIG_LITTLEFS_SPIFFS_COMPAT + + sem_give(efs); + + return 0; +#undef fail_str_1 +} + +static int vfs_littlefs_rename(void* ctx, const char *src, const char *dst) { + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + int res; + + sem_take(efs); + + if(esp_littlefs_get_fd_by_name(efs, src) >= 0){ + sem_give(efs); + ESP_LOGE(ESP_LITTLEFS_TAG, "Cannot rename; src \"%s\" is open.", src); + errno = EBUSY; + return -1; + } + else if(esp_littlefs_get_fd_by_name(efs, dst) >= 0){ + sem_give(efs); + ESP_LOGE(ESP_LITTLEFS_TAG, "Cannot rename; dst \"%s\" is open.", dst); + errno = EBUSY; + return -1; + } + +#if CONFIG_LITTLEFS_SPIFFS_COMPAT + /* Create all parent directories to dst (if necessary) */ + ESP_LOGV(ESP_LITTLEFS_TAG, "LITTLEFS_SPIFFS_COMPAT attempting to create all directories for %s", src); + mkdirs(efs, dst); +#endif + + res = lfs_rename(efs->fs, src, dst); + if (res < 0) { + errno = lfs_errno_remap(res); + sem_give(efs); + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to rename \"%s\" -> \"%s\". Error %s (%d)", + src, dst, esp_littlefs_errno(res), res); + return -1; + } + +#if CONFIG_LITTLEFS_SPIFFS_COMPAT + /* Attempt to delete all parent directories from src that are empty */ + rmdirs(efs, src); +#endif // CONFIG_LITTLEFS_SPIFFS_COMPAT + + sem_give(efs); + + return 0; +} + +static DIR* vfs_littlefs_opendir(void* ctx, const char* name) { + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + int res; + vfs_littlefs_dir_t *dir = NULL; + + dir = esp_littlefs_calloc(1, sizeof(vfs_littlefs_dir_t)); + if( dir == NULL ) { + ESP_LOGE(ESP_LITTLEFS_TAG, "dir struct could not be malloced"); + errno = ENOMEM; + goto exit; + } + + dir->path = strdup(name); + if(dir->path == NULL){ + errno = ENOMEM; + ESP_LOGE(ESP_LITTLEFS_TAG, "dir path name could not be malloced"); + goto exit; + } + + sem_take(efs); + res = lfs_dir_open(efs->fs, &dir->d, dir->path); + sem_give(efs); + if (res < 0) { + errno = lfs_errno_remap(res); +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to opendir \"%s\". Error %s (%d)", + dir->path, esp_littlefs_errno(res), res); +#else + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to opendir \"%s\". Error %d", dir->path, res); +#endif + goto exit; + } + + return (DIR *)dir; + +exit: + esp_littlefs_dir_free(dir); + return NULL; +} + +static int vfs_littlefs_closedir(void* ctx, DIR* pdir) { + assert(pdir); + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + vfs_littlefs_dir_t * dir = (vfs_littlefs_dir_t *) pdir; + int res; + + sem_take(efs); + res = lfs_dir_close(efs->fs, &dir->d); + sem_give(efs); + if (res < 0) { + errno = lfs_errno_remap(res); +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to closedir \"%s\". Error %s (%d)", + dir->path, esp_littlefs_errno(res), res); +#else + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to closedir \"%s\". Error %d", dir->path, res); +#endif + return res; + } + + esp_littlefs_dir_free(dir); + return 0; +} + +static struct dirent* vfs_littlefs_readdir(void* ctx, DIR* pdir) { + assert(pdir); + vfs_littlefs_dir_t * dir = (vfs_littlefs_dir_t *) pdir; + int res; + struct dirent* out_dirent; + + res = vfs_littlefs_readdir_r(ctx, pdir, &dir->e, &out_dirent); + if (res != 0) return NULL; + return out_dirent; +} + +static int vfs_littlefs_readdir_r(void* ctx, DIR* pdir, + struct dirent* entry, struct dirent** out_dirent) { + assert(pdir); + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + vfs_littlefs_dir_t * dir = (vfs_littlefs_dir_t *) pdir; + int res; + struct lfs_info info = { 0 }; + + sem_take(efs); + do{ /* Read until we get a real object name */ + res = lfs_dir_read(efs->fs, &dir->d, &info); + }while( res>0 && (strcmp(info.name, ".") == 0 || strcmp(info.name, "..") == 0)); + sem_give(efs); + if (res < 0) { + errno = lfs_errno_remap(res); +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to readdir \"%s\". Error %s (%d)", + dir->path, esp_littlefs_errno(res), res); +#else + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to readdir \"%s\". Error %d", dir->path, res); +#endif + return -1; + } + + if(info.type == LFS_TYPE_REG) { + ESP_LOGV(ESP_LITTLEFS_TAG, "readdir a file of size %u named \"%s\"", + (unsigned int) info.size, info.name); + } + else { + ESP_LOGV(ESP_LITTLEFS_TAG, "readdir a dir named \"%s\"", info.name); + } + + if(res == 0) { + /* End of Objs */ + ESP_LOGV(ESP_LITTLEFS_TAG, "Reached the end of the directory."); + *out_dirent = NULL; + } + else { + entry->d_ino = 0; + entry->d_type = info.type == LFS_TYPE_REG ? DT_REG : DT_DIR; + strncpy(entry->d_name, info.name, sizeof(entry->d_name)); + *out_dirent = entry; + } + dir->offset++; + + return 0; +} + +static long vfs_littlefs_telldir(void* ctx, DIR* pdir) { + assert(pdir); + vfs_littlefs_dir_t * dir = (vfs_littlefs_dir_t *) pdir; + return dir->offset; +} + +static void vfs_littlefs_seekdir(void* ctx, DIR* pdir, long offset) { + assert(pdir); + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + vfs_littlefs_dir_t * dir = (vfs_littlefs_dir_t *) pdir; + int res; + + if (offset < dir->offset) { + /* close and re-open dir to rewind to beginning */ + sem_take(efs); + res = lfs_dir_rewind(efs->fs, &dir->d); + sem_give(efs); + if (res < 0) { + errno = lfs_errno_remap(res); + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to rewind dir \"%s\". Error %s (%d)", + dir->path, esp_littlefs_errno(res), res); + return; + } + dir->offset = 0; + } + + while(dir->offset < offset){ + struct dirent *out_dirent; + res = vfs_littlefs_readdir_r(ctx, pdir, &dir->e, &out_dirent); + if( res != 0 ){ + ESP_LOGE(ESP_LITTLEFS_TAG, "Error readdir_r"); + return; + } + } +} + +static int vfs_littlefs_mkdir(void* ctx, const char* name, mode_t mode) { + /* Note: mode is currently unused */ + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + int res; + ESP_LOGV(ESP_LITTLEFS_TAG, "mkdir \"%s\"", name); + + sem_take(efs); + res = lfs_mkdir(efs->fs, name); + sem_give(efs); + if (res < 0) { + errno = lfs_errno_remap(res); + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to mkdir \"%s\". Error %s (%d)", + name, esp_littlefs_errno(res), res); + return -1; + } + return 0; +} + +static int vfs_littlefs_rmdir(void* ctx, const char* name) { + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + struct lfs_info info; + int res; + + /* Error Checking */ + sem_take(efs); + res = lfs_stat(efs->fs, name, &info); + if (res < 0) { + errno = lfs_errno_remap(res); + sem_give(efs); + ESP_LOGV(ESP_LITTLEFS_TAG, "\"%s\" doesn't exist.", name); + return -1; + } + + if (info.type != LFS_TYPE_DIR) { + sem_give(efs); + ESP_LOGV(ESP_LITTLEFS_TAG, "\"%s\" is not a directory.", name); + errno = ENOTDIR; + return -1; + } + + /* Unlink the dir */ + res = lfs_remove(efs->fs, name); + sem_give(efs); + if ( res < 0) { + errno = lfs_errno_remap(res); + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to unlink path \"%s\". Error %s (%d)", + name, esp_littlefs_errno(res), res); + return -1; + } + + return 0; +} + +static ssize_t vfs_littlefs_truncate( void *ctx, const char *path, off_t size ) +{ + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + ssize_t res = -1; + vfs_littlefs_file_t *file = NULL; + + int fd = vfs_littlefs_open( ctx, path, LFS_O_RDWR, 438 ); + + sem_take(efs); + if((uint32_t)fd > efs->cache_size) + { + sem_give(efs); + ESP_LOGE(ESP_LITTLEFS_TAG, "FD %d must be <%d.", fd, efs->cache_size); + errno = EBADF; + return -1; + } + file = efs->cache[fd]; + res = lfs_file_truncate( efs->fs, &file->file, size ); + sem_give(efs); + + if(res < 0) + { + errno = lfs_errno_remap(res); +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to truncate file \"%s\". Error %s (%d)", + file->path, esp_littlefs_errno(res), res); +#else + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to truncate FD %d. Error %s (%d)", + fd, esp_littlefs_errno(res), res); +#endif + res = -1; + } + else + { + ESP_LOGV( ESP_LITTLEFS_TAG, "Truncated file %s to %u bytes", path, (unsigned int) size ); + } + vfs_littlefs_close( ctx, fd ); + return res; +} + +#ifdef ESP_LITTLEFS_ENABLE_FTRUNCATE +static int vfs_littlefs_ftruncate(void *ctx, int fd, off_t size) +{ + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + ssize_t res; + vfs_littlefs_file_t *file = NULL; + + sem_take(efs); + if((uint32_t)fd > efs->cache_size) { + sem_give(efs); + ESP_LOGE(ESP_LITTLEFS_TAG, "FD %d must be <%d.", fd, efs->cache_size); + errno = EBADF; + return -1; + } + file = efs->cache[fd]; + res = lfs_file_truncate( efs->fs, &file->file, size ); + sem_give(efs); + + if(res < 0) + { + errno = lfs_errno_remap(res); +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to truncate file \"%s\". Error %s (%d)", + file->path, esp_littlefs_errno(res), res); +#else + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to truncate FD %d. Error %s (%d)", + fd, esp_littlefs_errno(res), res); +#endif + res = -1; + } + else + { +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + ESP_LOGV( ESP_LITTLEFS_TAG, "Truncated file %s to %u bytes", file->path, (unsigned int) size ); +#else + ESP_LOGV(ESP_LITTLEFS_TAG, "Truncated FD %d to %u bytes", fd, (unsigned int) size ); +#endif + } + return res; +} +#endif // ESP_LITTLEFS_ENABLE_FTRUNCATE +#endif //CONFIG_VFS_SUPPORT_DIR + +/** + * Syncs file while also updating mtime (if necessary) + */ +static int esp_littlefs_file_sync(esp_littlefs_t *efs, vfs_littlefs_file_t *file) +{ + int res; +#if CONFIG_LITTLEFS_USE_MTIME + if((file->file.flags & 0x3) != LFS_O_RDONLY){ + file->lfs_attr_time_buffer = esp_littlefs_get_updated_time(efs, file, NULL); + } +#endif + res = lfs_file_sync(efs->fs, &file->file); + return res; +} + +#if CONFIG_LITTLEFS_USE_MTIME +/** + * Sets the mtime attr to t. + */ +static int esp_littlefs_update_mtime_attr(esp_littlefs_t *efs, const char *path, time_t t) +{ + int res; + res = lfs_setattr(efs->fs, path, ESP_LITTLEFS_ATTR_MTIME, + &t, sizeof(t)); + if( res < 0 ) { + errno = lfs_errno_remap(res); + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to update mtime (%d)", res); + return -1; + } + + return res; +} + +/** + * @brief Only to be used when calcualting what time we should write to disk. + * @param file If non-null, use this file's attribute to get previous file's time (if use nonce). + * @param path If non-null, use this path to read in the previous file's time (if use nonce). + */ +static time_t esp_littlefs_get_updated_time(esp_littlefs_t *efs, vfs_littlefs_file_t *file, const char *path) +{ + time_t t; +#if CONFIG_LITTLEFS_MTIME_USE_SECONDS + // use current time + t = time(NULL); +#elif CONFIG_LITTLEFS_MTIME_USE_NONCE + assert( sizeof(time_t) == 8 ); + if(path){ + t = esp_littlefs_get_mtime_attr(efs, path); + } + else if(file){ + t = file->lfs_attr_time_buffer; + } + else{ + // Invalid input arguments. + assert(0); + } + if( 0 == t ) t = esp_random(); + else t += 1; + + if( 0 == t ) t = 1; +#else +#error "Invalid MTIME configuration" +#endif + return t; +} + +static int vfs_littlefs_utime(void *ctx, const char *path, const struct utimbuf *times) +{ + esp_littlefs_t * efs = (esp_littlefs_t *)ctx; + time_t t; + + assert(path); + + sem_take(efs); + if (times) { + t = times->modtime; + } else { + t = esp_littlefs_get_updated_time(efs, NULL, path); + } + + int ret = esp_littlefs_update_mtime_attr(efs, path, t); + sem_give(efs); + return ret; +} + +static time_t esp_littlefs_get_mtime_attr(esp_littlefs_t *efs, const char *path) +{ + time_t t; + int size; + size = lfs_getattr(efs->fs, path, ESP_LITTLEFS_ATTR_MTIME, + &t, sizeof(t)); + if( size < 0 ) { + errno = lfs_errno_remap(size); +#ifndef CONFIG_LITTLEFS_USE_ONLY_HASH + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to get mtime attribute %s (%d)", + esp_littlefs_errno(size), size); +#else + ESP_LOGV(ESP_LITTLEFS_TAG, "Failed to get mtime attribute %d", size); +#endif + return -1; + } + return t; +} +#endif //CONFIG_LITTLEFS_USE_MTIME + +#if CONFIG_LITTLEFS_SPIFFS_COMPAT +/** + * @brief Recursively make all parent directories for a file. + * @param[in] dir Path of directories to make up to. The last element + * of the path is assumed to be the file and IS NOT created. + * e.g. + * "foo/bar/baz" + * will create directories "foo" and "bar" + */ +static void mkdirs(esp_littlefs_t * efs, const char *dir) { + char tmp[CONFIG_LITTLEFS_OBJ_NAME_LEN]; + char *p = NULL; + + strlcpy(tmp, dir, sizeof(tmp)); + for(p = tmp + 1; *p; p++) { + if(*p == '/') { + *p = '\0'; + vfs_littlefs_mkdir((void*)efs, tmp, S_IRWXU); + *p = '/'; + } + } +} + +/** + * @brief Recursively attempt to delete all empty directories for a file. + * @param[in] dir Path of directories to delete. The last element of the path + * is assumed to be the file and IS NOT deleted. + * e.g. + * "foo/bar/baz" + * will attempt to delete directories (in order): + * 1. "foo/bar/baz" + * 2. "foo/bar" + * 3. "foo" + */ + +static void rmdirs(esp_littlefs_t * efs, const char *dir) { + char tmp[CONFIG_LITTLEFS_OBJ_NAME_LEN]; + char *p = NULL; + + strlcpy(tmp, dir, sizeof(tmp)); + for(p = tmp + strlen(tmp) - 1; p != tmp; p--) { + if(*p == '/') { + *p = '\0'; + vfs_littlefs_rmdir((void*)efs, tmp); + *p = '/'; + } + } +} + +#endif // CONFIG_LITTLEFS_SPIFFS_COMPAT + +static int vfs_littlefs_fcntl(void* ctx, int fd, int cmd, int arg) +{ + int result = 0; + esp_littlefs_t *efs = (esp_littlefs_t *)ctx; + lfs_file_t *lfs_file = NULL; + vfs_littlefs_file_t *file = NULL; + const uint32_t flags_mask = LFS_O_WRONLY | LFS_O_RDONLY | LFS_O_RDWR; + + sem_take(efs); + if((uint32_t)fd > efs->cache_size) { + sem_give(efs); + ESP_LOGE(ESP_LITTLEFS_TAG, "FD %d must be <%d.", fd, efs->cache_size); + errno = EBADF; + return -1; + } + + file = efs->cache[fd]; + if (file) { + lfs_file = &efs->cache[fd]->file; + } else { + sem_give(efs); + errno = EBADF; + return -1; + } + + if (cmd == F_GETFL) { + if ((lfs_file->flags & flags_mask) == LFS_O_WRONLY) { + result = O_WRONLY; + } else if ((lfs_file->flags & flags_mask) == LFS_O_RDONLY) { + result = O_RDONLY; + } else if ((lfs_file->flags & flags_mask) == LFS_O_RDWR) { + result = O_RDWR; + } + } +#ifdef CONFIG_LITTLEFS_FCNTL_GET_PATH + else if (cmd == F_GETPATH) { + char *buffer = (char *)(uintptr_t)arg; + + assert(buffer); + + if (snprintf(buffer, MAXPATHLEN, "%s%s", efs->base_path, file->path) > 0) { + result = 0; + } else { + result = -1; + errno = EINVAL; + } + } +#endif + else { + result = -1; + errno = ENOSYS; + } + + sem_give(efs); + + return result; +} diff --git a/components/joltwallet__littlefs/src/lfs_config.c b/components/joltwallet__littlefs/src/lfs_config.c new file mode 100644 index 0000000..6d9e49b --- /dev/null +++ b/components/joltwallet__littlefs/src/lfs_config.c @@ -0,0 +1,28 @@ +/* + * lfs util functions + * + * Copyright (c) 2017, Arm Limited. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + */ +#include "lfs_config.h" + +const char ESP_LITTLEFS_TAG[] = "esp_littlefs"; + +// Software CRC implementation with small lookup table +uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size) { + static const uint32_t rtable[16] = { + 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac, + 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, + 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, + 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c, + }; + + const uint8_t *data = buffer; + + for (size_t i = 0; i < size; i++) { + crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 0)) & 0xf]; + crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 4)) & 0xf]; + } + + return crc; +} diff --git a/components/joltwallet__littlefs/src/lfs_config.h b/components/joltwallet__littlefs/src/lfs_config.h new file mode 100644 index 0000000..1a2a506 --- /dev/null +++ b/components/joltwallet__littlefs/src/lfs_config.h @@ -0,0 +1,244 @@ +/* + * lfs utility functions + * + * Copyright (c) 2017, Arm Limited. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef LFS_CFG_H +#define LFS_CFG_H + +// System includes +#include +#include +#include +#include +#include "sdkconfig.h" +#include "esp_log.h" + + +#if defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_DEFAULT) || \ + defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_INTERNAL) || \ + defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_SPIRAM) +#include +#include "esp_heap_caps.h" +#endif + +#ifdef CONFIG_LITTLEFS_ASSERTS +#include +#endif + +#if !defined(LFS_NO_DEBUG) || \ + !defined(LFS_NO_WARN) || \ + !defined(LFS_NO_ERROR) || \ + defined(LFS_YES_TRACE) +#include +#endif + +#ifdef __cplusplus +extern "C" +{ +#endif + + +// Macros, may be replaced by system specific wrappers. Arguments to these +// macros must not have side-effects as the macros can be removed for a smaller +// code footprint +extern const char ESP_LITTLEFS_TAG[]; + +// Logging functions +#ifndef LFS_TRACE +#ifdef LFS_YES_TRACE +#define LFS_TRACE_(fmt, ...) \ + ESP_LOGV(ESP_LITTLEFS_TAG, "%s:%d:trace: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) +#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "") +#else +#define LFS_TRACE(...) +#endif +#endif + +#ifndef LFS_DEBUG +#ifndef LFS_NO_DEBUG +#define LFS_DEBUG_(fmt, ...) \ + ESP_LOGD(ESP_LITTLEFS_TAG, "%s:%d:debug: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) +#define LFS_DEBUG(...) LFS_DEBUG_(__VA_ARGS__, "") +#else +#define LFS_DEBUG(...) +#endif +#endif + +#ifndef LFS_WARN +#ifndef LFS_NO_WARN +#define LFS_WARN_(fmt, ...) \ + ESP_LOGW(ESP_LITTLEFS_TAG, "%s:%d:warn: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) +#define LFS_WARN(...) LFS_WARN_(__VA_ARGS__, "") +#else +#define LFS_WARN(...) +#endif +#endif + +#ifndef LFS_ERROR +#ifndef LFS_NO_ERROR +#define LFS_ERROR_(fmt, ...) \ + ESP_LOGE(ESP_LITTLEFS_TAG, "%s:%d:error: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) +#define LFS_ERROR(...) LFS_ERROR_(__VA_ARGS__, "") +#else +#define LFS_ERROR(...) +#endif +#endif + +// Runtime assertions +#ifdef CONFIG_LITTLEFS_ASSERTS +#define LFS_ASSERT(test) assert(test) +#else +#define LFS_ASSERT(test) +#endif + + +// Builtin functions, these may be replaced by more efficient +// toolchain-specific implementations. LFS_NO_INTRINSICS falls back to a more +// expensive basic C implementation for debugging purposes + +// Min/max functions for unsigned 32-bit numbers +static inline uint32_t lfs_max(uint32_t a, uint32_t b) { + return (a > b) ? a : b; +} + +static inline uint32_t lfs_min(uint32_t a, uint32_t b) { + return (a < b) ? a : b; +} + +// Align to nearest multiple of a size +static inline uint32_t lfs_aligndown(uint32_t a, uint32_t alignment) { + return a - (a % alignment); +} + +static inline uint32_t lfs_alignup(uint32_t a, uint32_t alignment) { + return lfs_aligndown(a + alignment-1, alignment); +} + +// Find the smallest power of 2 greater than or equal to a +static inline uint32_t lfs_npw2(uint32_t a) { +#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM)) + return 32 - __builtin_clz(a-1); +#else + uint32_t r = 0; + uint32_t s; + a -= 1; + s = (a > 0xffff) << 4; a >>= s; r |= s; + s = (a > 0xff ) << 3; a >>= s; r |= s; + s = (a > 0xf ) << 2; a >>= s; r |= s; + s = (a > 0x3 ) << 1; a >>= s; r |= s; + return (r | (a >> 1)) + 1; +#endif +} + +// Count the number of trailing binary zeros in a +// lfs_ctz(0) may be undefined +static inline uint32_t lfs_ctz(uint32_t a) { +#if !defined(LFS_NO_INTRINSICS) && defined(__GNUC__) + return __builtin_ctz(a); +#else + return lfs_npw2((a & -a) + 1) - 1; +#endif +} + +// Count the number of binary ones in a +static inline uint32_t lfs_popc(uint32_t a) { +#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM)) + return __builtin_popcount(a); +#else + a = a - ((a >> 1) & 0x55555555); + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); + return (((a + (a >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24; +#endif +} + +// Find the sequence comparison of a and b, this is the distance +// between a and b ignoring overflow +static inline int lfs_scmp(uint32_t a, uint32_t b) { + return (int)(unsigned)(a - b); +} + +// Convert between 32-bit little-endian and native order +static inline uint32_t lfs_fromle32(uint32_t a) { +#if !defined(LFS_NO_INTRINSICS) && ( \ + (defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \ + (defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \ + (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) + return a; +#elif !defined(LFS_NO_INTRINSICS) && ( \ + (defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \ + (defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \ + (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)) + return __builtin_bswap32(a); +#else + return (((uint8_t*)&a)[0] << 0) | + (((uint8_t*)&a)[1] << 8) | + (((uint8_t*)&a)[2] << 16) | + (((uint8_t*)&a)[3] << 24); +#endif +} + +static inline uint32_t lfs_tole32(uint32_t a) { + return lfs_fromle32(a); +} + +// Convert between 32-bit big-endian and native order +static inline uint32_t lfs_frombe32(uint32_t a) { +#if !defined(LFS_NO_INTRINSICS) && ( \ + (defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \ + (defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \ + (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) + return __builtin_bswap32(a); +#elif !defined(LFS_NO_INTRINSICS) && ( \ + (defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \ + (defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \ + (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)) + return a; +#else + return (((uint8_t*)&a)[0] << 24) | + (((uint8_t*)&a)[1] << 16) | + (((uint8_t*)&a)[2] << 8) | + (((uint8_t*)&a)[3] << 0); +#endif +} + +static inline uint32_t lfs_tobe32(uint32_t a) { + return lfs_frombe32(a); +} + +// Calculate CRC-32 with polynomial = 0x04c11db7 +uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size); + +// Allocate memory, only used if buffers are not provided to littlefs +// For the lookahead buffer, memory must be 32-bit aligned +static inline void *lfs_malloc(size_t size) { +#if defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_DEFAULT) + return malloc(size); // Equivalent to heap_caps_malloc_default(size); +#elif defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_INTERNAL) + return heap_caps_malloc(size, MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL); +#elif defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_SPIRAM) + return heap_caps_malloc(size, MALLOC_CAP_8BIT | MALLOC_CAP_SPIRAM); +#else // CONFIG_LITTLEFS_MALLOC_STRATEGY_DISABLE or not defined + (void)size; + return NULL; +#endif +} + +// Deallocate memory, only used if buffers are not provided to littlefs +static inline void lfs_free(void *p) { +#if defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_DEFAULT) || \ + defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_INTERNAL) || \ + defined(CONFIG_LITTLEFS_MALLOC_STRATEGY_SPIRAM) + free(p); +#else // CONFIG_LITTLEFS_MALLOC_STRATEGY_DISABLE or not defined + (void)p; +#endif +} + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/components/joltwallet__littlefs/src/littlefs/.gitattributes b/components/joltwallet__littlefs/src/littlefs/.gitattributes new file mode 100644 index 0000000..26d0425 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/.gitattributes @@ -0,0 +1,4 @@ +# GitHub really wants to mark littlefs as a python project, telling it to +# reclassify our test .toml files as C code (which they are 95% of anyways) +# remedies this +*.toml linguist-language=c diff --git a/components/joltwallet__littlefs/src/littlefs/.gitignore b/components/joltwallet__littlefs/src/littlefs/.gitignore new file mode 100644 index 0000000..09707c6 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/.gitignore @@ -0,0 +1,34 @@ +# Compilation output +*.o +*.d +*.a +*.ci +*.csv +*.t.* +*.b.* +*.gcno +*.gcda +*.perf +lfs +liblfs.a + +# Testing things +runners/test_runner +runners/bench_runner +lfs.code.csv +lfs.data.csv +lfs.stack.csv +lfs.structs.csv +lfs.cov.csv +lfs.perf.csv +lfs.perfbd.csv +lfs.test.csv +lfs.bench.csv + +# Misc +tags +.gdb_history +scripts/__pycache__ + +# Historical, probably should remove at some point +tests/*.toml.* diff --git a/components/joltwallet__littlefs/src/littlefs/DESIGN.md b/components/joltwallet__littlefs/src/littlefs/DESIGN.md new file mode 100644 index 0000000..9c9703a --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/DESIGN.md @@ -0,0 +1,2173 @@ +## The design of littlefs + +A little fail-safe filesystem designed for microcontrollers. + +``` + | | | .---._____ + .-----. | | +--|o |---| littlefs | +--| |---| | + '-----' '----------' + | | | +``` + +littlefs was originally built as an experiment to learn about filesystem design +in the context of microcontrollers. The question was: How would you build a +filesystem that is resilient to power-loss and flash wear without using +unbounded memory? + +This document covers the high-level design of littlefs, how it is different +than other filesystems, and the design decisions that got us here. For the +low-level details covering every bit on disk, check out [SPEC.md](SPEC.md). + +## The problem + +The embedded systems littlefs targets are usually 32-bit microcontrollers with +around 32 KiB of RAM and 512 KiB of ROM. These are often paired with SPI NOR +flash chips with about 4 MiB of flash storage. These devices are too small for +Linux and most existing filesystems, requiring code written specifically with +size in mind. + +Flash itself is an interesting piece of technology with its own quirks and +nuance. Unlike other forms of storage, writing to flash requires two +operations: erasing and programming. Programming (setting bits to 0) is +relatively cheap and can be very granular. Erasing however (setting bits to 1), +requires an expensive and destructive operation which gives flash its name. +[Wikipedia][wikipedia-flash] has more information on how exactly flash works. + +To make the situation more annoying, it's very common for these embedded +systems to lose power at any time. Usually, microcontroller code is simple and +reactive, with no concept of a shutdown routine. This presents a big challenge +for persistent storage, where an unlucky power loss can corrupt the storage and +leave a device unrecoverable. + +This leaves us with three major requirements for an embedded filesystem. + +1. **Power-loss resilience** - On these systems, power can be lost at any time. + If a power loss corrupts any persistent data structures, this can cause the + device to become unrecoverable. An embedded filesystem must be designed to + recover from a power loss during any write operation. + +1. **Wear leveling** - Writing to flash is destructive. If a filesystem + repeatedly writes to the same block, eventually that block will wear out. + Filesystems that don't take wear into account can easily burn through blocks + used to store frequently updated metadata and cause a device's early death. + +1. **Bounded RAM/ROM** - If the above requirements weren't enough, these + systems also have very limited amounts of memory. This prevents many + existing filesystem designs, which can lean on relatively large amounts of + RAM to temporarily store filesystem metadata. + + For ROM, this means we need to keep our design simple and reuse code paths + where possible. For RAM we have a stronger requirement, all RAM usage is + bounded. This means RAM usage does not grow as the filesystem changes in + size or number of files. This creates a unique challenge as even presumably + simple operations, such as traversing the filesystem, become surprisingly + difficult. + +## Existing designs? + +So, what's already out there? There are, of course, many different filesystems, +however they often share and borrow feature from each other. If we look at +power-loss resilience and wear leveling, we can narrow these down to a handful +of designs. + +1. First we have the non-resilient, block based filesystems, such as [FAT] and + [ext2]. These are the earliest filesystem designs and often the most simple. + Here storage is divided into blocks, with each file being stored in a + collection of blocks. Without modifications, these filesystems are not + power-loss resilient, so updating a file is a simple as rewriting the blocks + in place. + + ``` + .--------. + | root | + | | + | | + '--------' + .-' '-. + v v + .--------. .--------. + | A | | B | + | | | | + | | | | + '--------' '--------' + .-' .-' '-. + v v v + .--------. .--------. .--------. + | C | | D | | E | + | | | | | | + | | | | | | + '--------' '--------' '--------' + ``` + + Because of their simplicity, these filesystems are usually both the fastest + and smallest. However the lack of power resilience is not great, and the + binding relationship of storage location and data removes the filesystem's + ability to manage wear. + +2. In a completely different direction, we have logging filesystems, such as + [JFFS], [YAFFS], and [SPIFFS], storage location is not bound to a piece of + data, instead the entire storage is used for a circular log which is + appended with every change made to the filesystem. Writing appends new + changes, while reading requires traversing the log to reconstruct a file. + Some logging filesystems cache files to avoid the read cost, but this comes + at a tradeoff of RAM. + + ``` + v + .--------.--------.--------.--------.--------.--------.--------.--------. + | C | new B | new A | | A | B | + | | | |-> | | | + | | | | | | | + '--------'--------'--------'--------'--------'--------'--------'--------' + ``` + + Logging filesystem are beautifully elegant. With a checksum, we can easily + detect power-loss and fall back to the previous state by ignoring failed + appends. And if that wasn't good enough, their cyclic nature means that + logging filesystems distribute wear across storage perfectly. + + The main downside is performance. If we look at garbage collection, the + process of cleaning up outdated data from the end of the log, I've yet to + see a pure logging filesystem that does not have one of these two costs: + + 1. _O(n²)_ runtime + 2. _O(n)_ RAM + + SPIFFS is a very interesting case here, as it uses the fact that repeated + programs to NOR flash is both atomic and masking. This is a very neat + solution, however it limits the type of storage you can support. + +3. Perhaps the most common type of filesystem, a journaling filesystem is the + offspring that happens when you mate a block based filesystem with a logging + filesystem. [ext4] and [NTFS] are good examples. Here, we take a normal + block based filesystem and add a bounded log where we note every change + before it occurs. + + ``` + journal + .--------.--------. + .--------. | C'| D'| | E'| + | root |-->| | |-> | | + | | | | | | | + | | '--------'--------' + '--------' + .-' '-. + v v + .--------. .--------. + | A | | B | + | | | | + | | | | + '--------' '--------' + .-' .-' '-. + v v v + .--------. .--------. .--------. + | C | | D | | E | + | | | | | | + | | | | | | + '--------' '--------' '--------' + ``` + + + This sort of filesystem takes the best from both worlds. Performance can be + as fast as a block based filesystem (though updating the journal does have + a small cost), and atomic updates to the journal allow the filesystem to + recover in the event of a power loss. + + Unfortunately, journaling filesystems have a couple of problems. They are + fairly complex, since there are effectively two filesystems running in + parallel, which comes with a code size cost. They also offer no protection + against wear because of the strong relationship between storage location + and data. + +4. Last but not least we have copy-on-write (COW) filesystems, such as + [btrfs] and [ZFS]. These are very similar to other block based filesystems, + but instead of updating block inplace, all updates are performed by creating + a copy with the changes and replacing any references to the old block with + our new block. This recursively pushes all of our problems upwards until we + reach the root of our filesystem, which is often stored in a very small log. + + ``` + .--------. .--------. + | root | write |new root| + | | ==> | | + | | | | + '--------' '--------' + .-' '-. | '-. + | .-------|------------------' v + v v v .--------. + .--------. .--------. | new B | + | A | | B | | | + | | | | | | + | | | | '--------' + '--------' '--------' .-' | + .-' .-' '-. .------------|------' + | | | | v + v v v v .--------. + .--------. .--------. .--------. | new D | + | C | | D | | E | | | + | | | | | | | | + | | | | | | '--------' + '--------' '--------' '--------' + ``` + + COW filesystems are interesting. They offer very similar performance to + block based filesystems while managing to pull off atomic updates without + storing data changes directly in a log. They even disassociate the storage + location of data, which creates an opportunity for wear leveling. + + Well, almost. The unbounded upwards movement of updates causes some + problems. Because updates to a COW filesystem don't stop until they've + reached the root, an update can cascade into a larger set of writes than + would be needed for the original data. On top of this, the upward motion + focuses these writes into the block, which can wear out much earlier than + the rest of the filesystem. + +## littlefs + +So what does littlefs do? + +If we look at existing filesystems, there are two interesting design patterns +that stand out, but each have their own set of problems. Logging, which +provides independent atomicity, has poor runtime performance. And COW data +structures, which perform well, push the atomicity problem upwards. + +Can we work around these limitations? + +Consider logging. It has either a _O(n²)_ runtime or _O(n)_ RAM cost. We +can't avoid these costs, _but_ if we put an upper bound on the size we can at +least prevent the theoretical cost from becoming problem. This relies on the +super secret computer science hack where you can pretend any algorithmic +complexity is _O(1)_ by bounding the input. + +In the case of COW data structures, we can try twisting the definition a bit. +Let's say that our COW structure doesn't copy after a single write, but instead +copies after _n_ writes. This doesn't change most COW properties (assuming you +can write atomically!), but what it does do is prevent the upward motion of +wear. This sort of copy-on-bounded-writes (CObW) still focuses wear, but at +each level we divide the propagation of wear by _n_. With a sufficiently +large _n_ (> branching factor) wear propagation is no longer a problem. + +See where this is going? Separate, logging and COW are imperfect solutions and +have weaknesses that limit their usefulness. But if we merge the two they can +mutually solve each other's limitations. + +This is the idea behind littlefs. At the sub-block level, littlefs is built +out of small, two block logs that provide atomic updates to metadata anywhere +on the filesystem. At the super-block level, littlefs is a CObW tree of blocks +that can be evicted on demand. + +``` + root + .--------.--------. + | A'| B'| | + | | |-> | + | | | | + '--------'--------' + .----' '--------------. + A v B v + .--------.--------. .--------.--------. + | C'| D'| | | E'|new| | + | | |-> | | | E'|-> | + | | | | | | | | + '--------'--------' '--------'--------' + .-' '--. | '------------------. + v v .-' v +.--------. .--------. v .--------. +| C | | D | .--------. write | new E | +| | | | | E | ==> | | +| | | | | | | | +'--------' '--------' | | '--------' + '--------' .-' | + .-' '-. .-------------|------' + v v v v + .--------. .--------. .--------. + | F | | G | | new F | + | | | | | | + | | | | | | + '--------' '--------' '--------' +``` + +There are still some minor issues. Small logs can be expensive in terms of +storage, in the worst case a small log costs 4x the size of the original data. +CObW structures require an efficient block allocator since allocation occurs +every _n_ writes. And there is still the challenge of keeping the RAM usage +constant. + +## Metadata pairs + +Metadata pairs are the backbone of littlefs. These are small, two block logs +that allow atomic updates anywhere in the filesystem. + +Why two blocks? Well, logs work by appending entries to a circular buffer +stored on disk. But remember that flash has limited write granularity. We can +incrementally program new data onto erased blocks, but we need to erase a full +block at a time. This means that in order for our circular buffer to work, we +need more than one block. + +We could make our logs larger than two blocks, but the next challenge is how +do we store references to these logs? Because the blocks themselves are erased +during writes, using a data structure to track these blocks is complicated. +The simple solution here is to store a two block addresses for every metadata +pair. This has the added advantage that we can change out blocks in the +metadata pair independently, and we don't reduce our block granularity for +other operations. + +In order to determine which metadata block is the most recent, we store a +revision count that we compare using [sequence arithmetic][wikipedia-sna] +(very handy for avoiding problems with integer overflow). Conveniently, this +revision count also gives us a rough idea of how many erases have occurred on +the block. + +``` +metadata pair pointer: {block 0, block 1} + | '--------------------. + '-. | +disk v v +.--------.--------.--------.--------.--------.--------.--------.--------. +| | |metadata| |metadata| | +| | |block 0 | |block 1 | | +| | | | | | | +'--------'--------'--------'--------'--------'--------'--------'--------' + '--. .----' + v v + metadata pair .----------------.----------------. + | revision 11 | revision 12 | + block 1 is |----------------|----------------| + most recent | A | A'' | + |----------------|----------------| + | checksum | checksum | + |----------------|----------------| + | B | A''' | <- most recent A + |----------------|----------------| + | A'' | checksum | + |----------------|----------------| + | checksum | | | + |----------------| v | + '----------------'----------------' +``` + +So how do we atomically update our metadata pairs? Atomicity (a type of +power-loss resilience) requires two parts: redundancy and error detection. +Error detection can be provided with a checksum, and in littlefs's case we +use a 32-bit [CRC][wikipedia-crc]. Maintaining redundancy, on the other hand, +requires multiple stages. + +1. If our block is not full and the program size is small enough to let us + append more entries, we can simply append the entries to the log. Because + we don't overwrite the original entries (remember rewriting flash requires + an erase), we still have the original entries if we lose power during the + append. + + ``` + commit A + .----------------.----------------. .----------------.----------------. + | revision 1 | revision 0 | => | revision 1 | revision 0 | + |----------------|----------------| |----------------|----------------| + | | | | | A | | + | v | | |----------------| | + | | | | checksum | | + | | | |----------------| | + | | | | | | | + | | | | v | | + | | | | | | + | | | | | | + | | | | | | + | | | | | | + '----------------'----------------' '----------------'----------------' + ``` + + Note that littlefs doesn't maintain a checksum for each entry. Many logging + filesystems do this, but it limits what you can update in a single atomic + operation. What we can do instead is group multiple entries into a commit + that shares a single checksum. This lets us update multiple unrelated pieces + of metadata as long as they reside on the same metadata pair. + + ``` + commit B and A' + .----------------.----------------. .----------------.----------------. + | revision 1 | revision 0 | => | revision 1 | revision 0 | + |----------------|----------------| |----------------|----------------| + | A | | | A | | + |----------------| | |----------------| | + | checksum | | | checksum | | + |----------------| | |----------------| | + | | | | | B | | + | v | | |----------------| | + | | | | A' | | + | | | |----------------| | + | | | | checksum | | + | | | |----------------| | + '----------------'----------------' '----------------'----------------' + ``` + +2. If our block _is_ full of entries, we need to somehow remove outdated + entries to make space for new ones. This process is called garbage + collection, but because littlefs has multiple garbage collectors, we + also call this specific case compaction. + + Compared to other filesystems, littlefs's garbage collector is relatively + simple. We want to avoid RAM consumption, so we use a sort of brute force + solution where for each entry we check to see if a newer entry has been + written. If the entry is the most recent we append it to our new block. This + is where having two blocks becomes important, if we lose power we still have + everything in our original block. + + During this compaction step we also erase the metadata block and increment + the revision count. Because we can commit multiple entries at once, we can + write all of these changes to the second block without worrying about power + loss. It's only when the commit's checksum is written that the compacted + entries and revision count become committed and readable. + + ``` + commit B', need to compact + .----------------.----------------. .----------------.----------------. + | revision 1 | revision 0 | => | revision 1 | revision 2 | + |----------------|----------------| |----------------|----------------| + | A | | | A | A' | + |----------------| | |----------------|----------------| + | checksum | | | checksum | B' | + |----------------| | |----------------|----------------| + | B | | | B | checksum | + |----------------| | |----------------|----------------| + | A' | | | A' | | | + |----------------| | |----------------| v | + | checksum | | | checksum | | + |----------------| | |----------------| | + '----------------'----------------' '----------------'----------------' + ``` + +3. If our block is full of entries _and_ we can't find any garbage, then what? + At this point, most logging filesystems would return an error indicating no + more space is available, but because we have small logs, overflowing a log + isn't really an error condition. + + Instead, we split our original metadata pair into two metadata pairs, each + containing half of the entries, connected by a tail pointer. Instead of + increasing the size of the log and dealing with the scalability issues + associated with larger logs, we form a linked list of small bounded logs. + This is a tradeoff as this approach does use more storage space, but at the + benefit of improved scalability. + + Despite writing to two metadata pairs, we can still maintain power + resilience during this split step by first preparing the new metadata pair, + and then inserting the tail pointer during the commit to the original + metadata pair. + + ``` + commit C and D, need to split + .----------------.----------------. .----------------.----------------. + | revision 1 | revision 2 | => | revision 3 | revision 2 | + |----------------|----------------| |----------------|----------------| + | A | A' | | A' | A' | + |----------------|----------------| |----------------|----------------| + | checksum | B' | | B' | B' | + |----------------|----------------| |----------------|----------------| + | B | checksum | | tail ---------------------. + |----------------|----------------| |----------------|----------------| | + | A' | | | | checksum | | | + |----------------| v | |----------------| | | + | checksum | | | | | | | + |----------------| | | v | | | + '----------------'----------------' '----------------'----------------' | + .----------------.---------' + v v + .----------------.----------------. + | revision 1 | revision 0 | + |----------------|----------------| + | C | | + |----------------| | + | D | | + |----------------| | + | checksum | | + |----------------| | + | | | | + | v | | + | | | + | | | + '----------------'----------------' + ``` + +There is another complexity the crops up when dealing with small logs. The +amortized runtime cost of garbage collection is not only dependent on its +one time cost (_O(n²)_ for littlefs), but also depends on how often +garbage collection occurs. + +Consider two extremes: + +1. Log is empty, garbage collection occurs once every _n_ updates +2. Log is full, garbage collection occurs **every** update + +Clearly we need to be more aggressive than waiting for our metadata pair to +be full. As the metadata pair approaches fullness the frequency of compactions +grows very rapidly. + +Looking at the problem generically, consider a log with ![n] bytes for each +entry, ![d] dynamic entries (entries that are outdated during garbage +collection), and ![s] static entries (entries that need to be copied during +garbage collection). If we look at the amortized runtime complexity of updating +this log we get this formula: + +![cost = n + n (s / d+1)][metadata-formula1] + +If we let ![r] be the ratio of static space to the size of our log in bytes, we +find an alternative representation of the number of static and dynamic entries: + +![s = r (size/n)][metadata-formula2] + +![d = (1 - r) (size/n)][metadata-formula3] + +Substituting these in for ![d] and ![s] gives us a nice formula for the cost of +updating an entry given how full the log is: + +![cost = n + n (r (size/n) / ((1-r) (size/n) + 1))][metadata-formula4] + +Assuming 100 byte entries in a 4 KiB log, we can graph this using the entry +size to find a multiplicative cost: + +![Metadata pair update cost graph][metadata-cost-graph] + +So at 50% usage, we're seeing an average of 2x cost per update, and at 75% +usage, we're already at an average of 4x cost per update. + +To avoid this exponential growth, instead of waiting for our metadata pair +to be full, we split the metadata pair once we exceed 50% capacity. We do this +lazily, waiting until we need to compact before checking if we fit in our 50% +limit. This limits the overhead of garbage collection to 2x the runtime cost, +giving us an amortized runtime complexity of _O(1)_. + +--- + +If we look at metadata pairs and linked-lists of metadata pairs at a high +level, they have fairly nice runtime costs. Assuming _n_ metadata pairs, +each containing _m_ metadata entries, the _lookup_ cost for a specific +entry has a worst case runtime complexity of _O(nm)_. For _updating_ a specific +entry, the worst case complexity is _O(nm²)_, with an amortized complexity +of only _O(nm)_. + +However, splitting at 50% capacity does mean that in the best case our +metadata pairs will only be 1/2 full. If we include the overhead of the second +block in our metadata pair, each metadata entry has an effective storage cost +of 4x the original size. I imagine users would not be happy if they found +that they can only use a quarter of their original storage. Metadata pairs +provide a mechanism for performing atomic updates, but we need a separate +mechanism for storing the bulk of our data. + +## CTZ skip-lists + +Metadata pairs provide efficient atomic updates but unfortunately have a large +storage cost. But we can work around this storage cost by only using the +metadata pairs to store references to more dense, copy-on-write (COW) data +structures. + +[Copy-on-write data structures][wikipedia-cow], also called purely functional +data structures, are a category of data structures where the underlying +elements are immutable. Making changes to the data requires creating new +elements containing a copy of the updated data and replacing any references +with references to the new elements. Generally, the performance of a COW data +structure depends on how many old elements can be reused after replacing parts +of the data. + +littlefs has several requirements of its COW structures. They need to be +efficient to read and write, but most frustrating, they need to be traversable +with a constant amount of RAM. Notably this rules out +[B-trees][wikipedia-B-tree], which can not be traversed with constant RAM, and +[B+-trees][wikipedia-B+-tree], which are not possible to update with COW +operations. + +--- + +So, what can we do? First let's consider storing files in a simple COW +linked-list. Appending a block, which is the basis for writing files, means we +have to update the last block to point to our new block. This requires a COW +operation, which means we need to update the second-to-last block, and then the +third-to-last, and so on until we've copied out the entire file. + +``` +A linked-list +.--------. .--------. .--------. .--------. .--------. .--------. +| data 0 |->| data 1 |->| data 2 |->| data 4 |->| data 5 |->| data 6 | +| | | | | | | | | | | | +| | | | | | | | | | | | +'--------' '--------' '--------' '--------' '--------' '--------' +``` + +To avoid a full copy during appends, we can store the data backwards. Appending +blocks just requires adding the new block and no other blocks need to be +updated. If we update a block in the middle, we still need to copy the +following blocks, but can reuse any blocks before it. Since most file writes +are linear, this design gambles that appends are the most common type of data +update. + +``` +A backwards linked-list +.--------. .--------. .--------. .--------. .--------. .--------. +| data 0 |<-| data 1 |<-| data 2 |<-| data 4 |<-| data 5 |<-| data 6 | +| | | | | | | | | | | | +| | | | | | | | | | | | +'--------' '--------' '--------' '--------' '--------' '--------' +``` + +However, a backwards linked-list does have a rather glaring problem. Iterating +over a file _in order_ has a runtime cost of _O(n²)_. A quadratic runtime +just to read a file! That's awful. + +Fortunately we can do better. Instead of a singly linked list, littlefs +uses a multilayered linked-list often called a +[skip-list][wikipedia-skip-list]. However, unlike the most common type of +skip-list, littlefs's skip-lists are strictly deterministic built around some +interesting properties of the count-trailing-zeros (CTZ) instruction. + +The rules CTZ skip-lists follow are that for every _n_‍th block where _n_ +is divisible by 2‍_ˣ_, that block contains a pointer to block +_n_-2‍_ˣ_. This means that each block contains anywhere from 1 to +log₂_n_ pointers that skip to different preceding elements of the +skip-list. + +The name comes from heavy use of the [CTZ instruction][wikipedia-ctz], which +lets us calculate the power-of-two factors efficiently. For a given block _n_, +that block contains ctz(_n_)+1 pointers. + +``` +A backwards CTZ skip-list +.--------. .--------. .--------. .--------. .--------. .--------. +| data 0 |<-| data 1 |<-| data 2 |<-| data 3 |<-| data 4 |<-| data 5 | +| |<-| |--| |<-| |--| | | | +| |<-| |--| |--| |--| | | | +'--------' '--------' '--------' '--------' '--------' '--------' +``` + +The additional pointers let us navigate the data-structure on disk much more +efficiently than in a singly linked list. + +Consider a path from data block 5 to data block 1. You can see how data block 3 +was completely skipped: +``` +.--------. .--------. .--------. .--------. .--------. .--------. +| data 0 | | data 1 |<-| data 2 | | data 3 | | data 4 |<-| data 5 | +| | | | | |<-| |--| | | | +| | | | | | | | | | | | +'--------' '--------' '--------' '--------' '--------' '--------' +``` + +The path to data block 0 is even faster, requiring only two jumps: +``` +.--------. .--------. .--------. .--------. .--------. .--------. +| data 0 | | data 1 | | data 2 | | data 3 | | data 4 |<-| data 5 | +| | | | | | | | | | | | +| |<-| |--| |--| |--| | | | +'--------' '--------' '--------' '--------' '--------' '--------' +``` + +We can find the runtime complexity by looking at the path to any block from +the block containing the most pointers. Every step along the path divides +the search space for the block in half, giving us a runtime of _O(log n)_. +To get _to_ the block with the most pointers, we can perform the same steps +backwards, which puts the runtime at _O(2 log n)_ = _O(log n)_. An interesting +note is that this optimal path occurs naturally if we greedily choose the +pointer that covers the most distance without passing our target. + +So now we have a [COW] data structure that is cheap to append with a runtime +of _O(1)_, and can be read with a worst case runtime of _O(n log n)_. Given +that this runtime is also divided by the amount of data we can store in a +block, this cost is fairly reasonable. + +--- + +This is a new data structure, so we still have several questions. What is the +storage overhead? Can the number of pointers exceed the size of a block? How do +we store a CTZ skip-list in our metadata pairs? + +To find the storage overhead, we can look at the data structure as multiple +linked-lists. Each linked-list skips twice as many blocks as the previous, +or from another perspective, each linked-list uses half as much storage as +the previous. As we approach infinity, the storage overhead forms a geometric +series. Solving this tells us that on average our storage overhead is only +2 pointers per block. + +![lim,n->inf((1/n)sum,i,0->n(ctz(i)+1)) = sum,i,0->inf(1/2^i) = 2][ctz-formula1] + +Because our file size is limited the word width we use to store sizes, we can +also solve for the maximum number of pointers we would ever need to store in a +block. If we set the overhead of pointers equal to the block size, we get the +following equation. Note that both a smaller block size (![B][bigB]) and larger +word width (![w]) result in more storage overhead. + +![B = (w/8)ceil(log2(2^w / (B-2w/8)))][ctz-formula2] + +Solving the equation for ![B][bigB] gives us the minimum block size for some +common word widths: + +1. 32-bit CTZ skip-list => minimum block size of 104 bytes +2. 64-bit CTZ skip-list => minimum block size of 448 bytes + +littlefs uses a 32-bit word width, so our blocks can only overflow with +pointers if they are smaller than 104 bytes. This is an easy requirement, as +in practice, most block sizes start at 512 bytes. As long as our block size +is larger than 104 bytes, we can avoid the extra logic needed to handle +pointer overflow. + +This last question is how do we store CTZ skip-lists? We need a pointer to the +head block, the size of the skip-list, the index of the head block, and our +offset in the head block. But it's worth noting that each size maps to a unique +index + offset pair. So in theory we can store only a single pointer and size. + +However, calculating the index + offset pair from the size is a bit +complicated. We can start with a summation that loops through all of the blocks +up until our given size. Let ![B][bigB] be the block size in bytes, ![w] be the +word width in bits, ![n] be the index of the block in the skip-list, and +![N][bigN] be the file size in bytes: + +![N = sum,i,0->n(B-(w/8)(ctz(i)+1))][ctz-formula3] + +This works quite well, but requires _O(n)_ to compute, which brings the full +runtime of reading a file up to _O(n² log n)_. Fortunately, that summation +doesn't need to touch the disk, so the practical impact is minimal. + +However, despite the integration of a bitwise operation, we can actually reduce +this equation to a _O(1)_ form. While browsing the amazing resource that is +the [On-Line Encyclopedia of Integer Sequences (OEIS)][oeis], I managed to find +[A001511], which matches the iteration of the CTZ instruction, +and [A005187], which matches its partial summation. Much to my +surprise, these both result from simple equations, leading us to a rather +unintuitive property that ties together two seemingly unrelated bitwise +instructions: + +![sum,i,0->n(ctz(i)+1) = 2n-popcount(n)][ctz-formula4] + +where: + +1. ctz(![x]) = the number of trailing bits that are 0 in ![x] +2. popcount(![x]) = the number of bits that are 1 in ![x] + +Initial tests of this surprising property seem to hold. As ![n] approaches +infinity, we end up with an average overhead of 2 pointers, which matches our +assumption from earlier. During iteration, the popcount function seems to +handle deviations from this average. Of course, just to make sure I wrote a +quick script that verified this property for all 32-bit integers. + +Now we can substitute into our original equation to find a more efficient +equation for file size: + +![N = Bn - (w/8)(2n-popcount(n))][ctz-formula5] + +Unfortunately, the popcount function is non-injective, so we can't solve this +equation for our index. But what we can do is solve for an ![n'] index that +is greater than ![n] with error bounded by the range of the popcount function. +We can repeatedly substitute ![n'] into the original equation until the error +is smaller than our integer resolution. As it turns out, we only need to +perform this substitution once, which gives us this formula for our index: + +![n = floor((N-(w/8)popcount(N/(B-2w/8))) / (B-2w/8))][ctz-formula6] + +Now that we have our index ![n], we can just plug it back into the above +equation to find the offset. We run into a bit of a problem with integer +overflow, but we can avoid this by rearranging the equation a bit: + +![off = N - (B-2w/8)n - (w/8)popcount(n)][ctz-formula7] + +Our solution requires quite a bit of math, but computers are very good at math. +Now we can find both our block index and offset from a size in _O(1)_, letting +us store CTZ skip-lists with only a pointer and size. + +CTZ skip-lists give us a COW data structure that is easily traversable in +_O(n)_, can be appended in _O(1)_, and can be read in _O(n log n)_. All of +these operations work in a bounded amount of RAM and require only two words of +storage overhead per block. In combination with metadata pairs, CTZ skip-lists +provide power resilience and compact storage of data. + +``` + .--------. + .|metadata| + || | + || | + |'--------' + '----|---' + v +.--------. .--------. .--------. .--------. +| data 0 |<-| data 1 |<-| data 2 |<-| data 3 | +| |<-| |--| | | | +| | | | | | | | +'--------' '--------' '--------' '--------' + +write data to disk, create copies +=> + .--------. + .|metadata| + || | + || | + |'--------' + '----|---' + v +.--------. .--------. .--------. .--------. +| data 0 |<-| data 1 |<-| data 2 |<-| data 3 | +| |<-| |--| | | | +| | | | | | | | +'--------' '--------' '--------' '--------' + ^ ^ ^ + | | | .--------. .--------. .--------. .--------. + | | '----| new |<-| new |<-| new |<-| new | + | '----------------| data 2 |<-| data 3 |--| data 4 | | data 5 | + '------------------| |--| |--| | | | + '--------' '--------' '--------' '--------' + +commit to metadata pair +=> + .--------. + .|new | + ||metadata| + || | + |'--------' + '----|---' + | +.--------. .--------. .--------. .--------. | +| data 0 |<-| data 1 |<-| data 2 |<-| data 3 | | +| |<-| |--| | | | | +| | | | | | | | | +'--------' '--------' '--------' '--------' | + ^ ^ ^ v + | | | .--------. .--------. .--------. .--------. + | | '----| new |<-| new |<-| new |<-| new | + | '----------------| data 2 |<-| data 3 |--| data 4 | | data 5 | + '------------------| |--| |--| | | | + '--------' '--------' '--------' '--------' +``` + +## The block allocator + +So we now have the framework for an atomic, wear leveling filesystem. Small two +block metadata pairs provide atomic updates, while CTZ skip-lists provide +compact storage of data in COW blocks. + +But now we need to look at the [elephant] in the room. Where do all these +blocks come from? + +Deciding which block to use next is the responsibility of the block allocator. +In filesystem design, block allocation is often a second-class citizen, but in +a COW filesystem its role becomes much more important as it is needed for +nearly every write to the filesystem. + +Normally, block allocation involves some sort of free list or bitmap stored on +the filesystem that is updated with free blocks. However, with power +resilience, keeping these structures consistent becomes difficult. It doesn't +help that any mistake in updating these structures can result in lost blocks +that are impossible to recover. + +littlefs takes a cautious approach. Instead of trusting a free list on disk, +littlefs relies on the fact that the filesystem on disk is a mirror image of +the free blocks on the disk. The block allocator operates much like a garbage +collector in a scripting language, scanning for unused blocks on demand. + +``` + .----. + |root| + | | + '----' + v-------' '-------v +.----. . . .----. +| A | . . | B | +| | . . | | +'----' . . '----' +. . . . v--' '------------v---------v +. . . .----. . .----. .----. +. . . | C | . | D | | E | +. . . | | . | | | | +. . . '----' . '----' '----' +. . . . . . . . . . +.----.----.----.----.----.----.----.----.----.----.----.----. +| A | |root| C | B | | D | | E | | +| | | | | | | | | | | +'----'----'----'----'----'----'----'----'----'----'----'----' + ^ ^ ^ ^ ^ + '-------------------'----'-------------------'----'-- free blocks +``` + +While this approach may sound complicated, the decision to not maintain a free +list greatly simplifies the overall design of littlefs. Unlike programming +languages, there are only a handful of data structures we need to traverse. +And block deallocation, which occurs nearly as often as block allocation, +is simply a noop. This "drop it on the floor" strategy greatly reduces the +complexity of managing on disk data structures, especially when handling +high-risk error conditions. + +--- + +Our block allocator needs to find free blocks efficiently. You could traverse +through every block on storage and check each one against our filesystem tree; +however, the runtime would be abhorrent. We need to somehow collect multiple +blocks per traversal. + +Looking at existing designs, some larger filesystems that use a similar "drop +it on the floor" strategy store a bitmap of the entire storage in [RAM]. This +works well because bitmaps are surprisingly compact. We can't use the same +strategy here, as it violates our constant RAM requirement, but we may be able +to modify the idea into a workable solution. + +``` +.----.----.----.----.----.----.----.----.----.----.----.----. +| A | |root| C | B | | D | | E | | +| | | | | | | | | | | +'----'----'----'----'----'----'----'----'----'----'----'----' + 1 0 1 1 1 0 0 1 0 1 0 0 + \---------------------------+----------------------------/ + v + bitmap: 0xb94 (0b101110010100) +``` + +The block allocator in littlefs is a compromise between a disk-sized bitmap and +a brute force traversal. Instead of a bitmap the size of storage, we keep track +of a small, fixed-size bitmap called the lookahead buffer. During block +allocation, we take blocks from the lookahead buffer. If the lookahead buffer +is empty, we scan the filesystem for more free blocks, populating our lookahead +buffer. In each scan we use an increasing offset, circling the storage as +blocks are allocated. + +Here's what it might look like to allocate 4 blocks on a decently busy +filesystem with a 32 bit lookahead and a total of 128 blocks (512 KiB +of storage if blocks are 4 KiB): +``` +boot... lookahead: + fs blocks: fffff9fffffffffeffffffffffff0000 +scanning... lookahead: fffff9ff + fs blocks: fffff9fffffffffeffffffffffff0000 +alloc = 21 lookahead: fffffdff + fs blocks: fffffdfffffffffeffffffffffff0000 +alloc = 22 lookahead: ffffffff + fs blocks: fffffffffffffffeffffffffffff0000 +scanning... lookahead: fffffffe + fs blocks: fffffffffffffffeffffffffffff0000 +alloc = 63 lookahead: ffffffff + fs blocks: ffffffffffffffffffffffffffff0000 +scanning... lookahead: ffffffff + fs blocks: ffffffffffffffffffffffffffff0000 +scanning... lookahead: ffffffff + fs blocks: ffffffffffffffffffffffffffff0000 +scanning... lookahead: ffff0000 + fs blocks: ffffffffffffffffffffffffffff0000 +alloc = 112 lookahead: ffff8000 + fs blocks: ffffffffffffffffffffffffffff8000 +``` + +This lookahead approach has a runtime complexity of _O(n²)_ to completely +scan storage; however, bitmaps are surprisingly compact, and in practice only +one or two passes are usually needed to find free blocks. Additionally, the +performance of the allocator can be optimized by adjusting the block size or +size of the lookahead buffer, trading either write granularity or RAM for +allocator performance. + +## Wear leveling + +The block allocator has a secondary role: wear leveling. + +Wear leveling is the process of distributing wear across all blocks in the +storage to prevent the filesystem from experiencing an early death due to +wear on a single block in the storage. + +littlefs has two methods of protecting against wear: +1. Detection and recovery from bad blocks +2. Evenly distributing wear across dynamic blocks + +--- + +Recovery from bad blocks doesn't actually have anything to do with the block +allocator itself. Instead, it relies on the ability of the filesystem to detect +and evict bad blocks when they occur. + +In littlefs, it is fairly straightforward to detect bad blocks at write time. +All writes must be sourced by some form of data in RAM, so immediately after we +write to a block, we can read the data back and verify that it was written +correctly. If we find that the data on disk does not match the copy we have in +RAM, a write error has occurred and we most likely have a bad block. + +Once we detect a bad block, we need to recover from it. In the case of write +errors, we have a copy of the corrupted data in RAM, so all we need to do is +evict the bad block, allocate a new, hopefully good block, and repeat the write +that previously failed. + +The actual act of evicting the bad block and replacing it with a new block is +left up to the filesystem's copy-on-bounded-writes (CObW) data structures. One +property of CObW data structures is that any block can be replaced during a +COW operation. The bounded-writes part is normally triggered by a counter, but +nothing prevents us from triggering a COW operation as soon as we find a bad +block. + +``` + .----. + |root| + | | + '----' + v--' '----------------------v +.----. .----. +| A | | B | +| | | | +'----' '----' +. . v---' . +. . .----. . +. . | C | . +. . | | . +. . '----' . +. . . . . +.----.----.----.----.----.----.----.----.----.----. +| A |root| | C | B | | +| | | | | | | +'----'----'----'----'----'----'----'----'----'----' + +update C +=> + .----. + |root| + | | + '----' + v--' '----------------------v +.----. .----. +| A | | B | +| | | | +'----' '----' +. . v---' . +. . .----. . +. . |bad | . +. . |blck| . +. . '----' . +. . . . . +.----.----.----.----.----.----.----.----.----.----. +| A |root| |bad | B | | +| | | |blck| | | +'----'----'----'----'----'----'----'----'----'----' + +oh no! bad block! relocate C +=> + .----. + |root| + | | + '----' + v--' '----------------------v +.----. .----. +| A | | B | +| | | | +'----' '----' +. . v---' . +. . .----. . +. . |bad | . +. . |blck| . +. . '----' . +. . . . . +.----.----.----.----.----.----.----.----.----.----. +| A |root| |bad | B |bad | | +| | | |blck| |blck| | +'----'----'----'----'----'----'----'----'----'----' + ---------> +oh no! bad block! relocate C +=> + .----. + |root| + | | + '----' + v--' '----------------------v +.----. .----. +| A | | B | +| | | | +'----' '----' +. . v---' . +. . .----. . .----. +. . |bad | . | C' | +. . |blck| . | | +. . '----' . '----' +. . . . . . . +.----.----.----.----.----.----.----.----.----.----. +| A |root| |bad | B |bad | C' | | +| | | |blck| |blck| | | +'----'----'----'----'----'----'----'----'----'----' + --------------> +successfully relocated C, update B +=> + .----. + |root| + | | + '----' + v--' '----------------------v +.----. .----. +| A | |bad | +| | |blck| +'----' '----' +. . v---' . +. . .----. . .----. +. . |bad | . | C' | +. . |blck| . | | +. . '----' . '----' +. . . . . . . +.----.----.----.----.----.----.----.----.----.----. +| A |root| |bad |bad |bad | C' | | +| | | |blck|blck|blck| | | +'----'----'----'----'----'----'----'----'----'----' + +oh no! bad block! relocate B +=> + .----. + |root| + | | + '----' + v--' '----------------------v +.----. .----. .----. +| A | |bad | |bad | +| | |blck| |blck| +'----' '----' '----' +. . v---' . . . +. . .----. . .----. . +. . |bad | . | C' | . +. . |blck| . | | . +. . '----' . '----' . +. . . . . . . . +.----.----.----.----.----.----.----.----.----.----. +| A |root| |bad |bad |bad | C' |bad | +| | | |blck|blck|blck| |blck| +'----'----'----'----'----'----'----'----'----'----' + --------------> +oh no! bad block! relocate B +=> + .----. + |root| + | | + '----' + v--' '----------------------v +.----. .----. .----. +| A | | B' | |bad | +| | | | |blck| +'----' '----' '----' +. . . | . .---' . +. . . '--------------v-------------v +. . . . .----. . .----. +. . . . |bad | . | C' | +. . . . |blck| . | | +. . . . '----' . '----' +. . . . . . . . . +.----.----.----.----.----.----.----.----.----.----. +| A |root| B' | |bad |bad |bad | C' |bad | +| | | | |blck|blck|blck| |blck| +'----'----'----'----'----'----'----'----'----'----' +------------> ------------------ +successfully relocated B, update root +=> + .----. + |root| + | | + '----' + v--' '--v +.----. .----. +| A | | B' | +| | | | +'----' '----' +. . . '---------------------------v +. . . . .----. +. . . . | C' | +. . . . | | +. . . . '----' +. . . . . . +.----.----.----.----.----.----.----.----.----.----. +| A |root| B' | |bad |bad |bad | C' |bad | +| | | | |blck|blck|blck| |blck| +'----'----'----'----'----'----'----'----'----'----' +``` + +We may find that the new block is also bad, but hopefully after repeating this +cycle we'll eventually find a new block where a write succeeds. If we don't, +that means that all blocks in our storage are bad, and we've reached the end of +our device's usable life. At this point, littlefs will return an "out of space" +error. This is technically true, as there are no more good blocks, but as an +added benefit it also matches the error condition expected by users of +dynamically sized data. + +--- + +Read errors, on the other hand, are quite a bit more complicated. We don't have +a copy of the data lingering around in RAM, so we need a way to reconstruct the +original data even after it has been corrupted. One such mechanism for this is +[error-correction-codes (ECC)][wikipedia-ecc]. + +ECC is an extension to the idea of a checksum. Where a checksum such as CRC can +detect that an error has occurred in the data, ECC can detect and actually +correct some amount of errors. However, there is a limit to how many errors ECC +can detect: the [Hamming bound][wikipedia-hamming-bound]. As the number of +errors approaches the Hamming bound, we may still be able to detect errors, but +can no longer fix the data. If we've reached this point the block is +unrecoverable. + +littlefs by itself does **not** provide ECC. The block nature and relatively +large footprint of ECC does not work well with the dynamically sized data of +filesystems, correcting errors without RAM is complicated, and ECC fits better +with the geometry of block devices. In fact, several NOR flash chips have extra +storage intended for ECC, and many NAND chips can even calculate ECC on the +chip itself. + +In littlefs, ECC is entirely optional. Read errors can instead be prevented +proactively by wear leveling. But it's important to note that ECC can be used +at the block device level to modestly extend the life of a device. littlefs +respects any errors reported by the block device, allowing a block device to +provide additional aggressive error detection. + +--- + +To avoid read errors, we need to be proactive, as opposed to reactive as we +were with write errors. + +One way to do this is to detect when the number of errors in a block exceeds +some threshold, but is still recoverable. With ECC we can do this at write +time, and treat the error as a write error, evicting the block before fatal +read errors have a chance to develop. + +A different, more generic strategy, is to proactively distribute wear across +all blocks in the storage, with the hope that no single block fails before the +rest of storage is approaching the end of its usable life. This is called +wear leveling. + +Generally, wear leveling algorithms fall into one of two categories: + +1. [Dynamic wear leveling][wikipedia-dynamic-wear-leveling], where we + distribute wear over "dynamic" blocks. The can be accomplished by + only considering unused blocks. + +2. [Static wear leveling][wikipedia-static-wear-leveling], where we + distribute wear over both "dynamic" and "static" blocks. To make this work, + we need to consider all blocks, including blocks that already contain data. + +As a tradeoff for code size and complexity, littlefs (currently) only provides +dynamic wear leveling. This is a best effort solution. Wear is not distributed +perfectly, but it is distributed among the free blocks and greatly extends the +life of a device. + +On top of this, littlefs uses a statistical wear leveling algorithm. What this +means is that we don’t actively track wear, instead we rely on a uniform +distribution of wear across storage to approximate a dynamic wear leveling +algorithm. Despite the long name, this is actually a simplification of dynamic +wear leveling. + +The uniform distribution of wear is left up to the block allocator, which +creates a uniform distribution in two parts. The easy part is when the device +is powered, in which case we allocate the blocks linearly, circling the device. +The harder part is what to do when the device loses power. We can't just +restart the allocator at the beginning of storage, as this would bias the wear. +Instead, we start the allocator as a random offset every time we mount the +filesystem. As long as this random offset is uniform, the combined allocation +pattern is also a uniform distribution. + +![Cumulative wear distribution graph][wear-distribution-graph] + +Initially, this approach to wear leveling looks like it creates a difficult +dependency on a power-independent random number generator, which must return +different random numbers on each boot. However, the filesystem is in a +relatively unique situation in that it is sitting on top of a large of amount +of entropy that persists across power loss. + +We can actually use the data on disk to directly drive our random number +generator. In practice, this is implemented by xoring the checksums of each +metadata pair, which is already calculated to fetch and mount the filesystem. + +``` + .--------. \ probably random + .|metadata| | ^ + || | +-> crc ----------------------> xor + || | | ^ + |'--------' / | + '---|--|-' | + .-' '-------------------------. | + | | | + | .--------------> xor ------------> xor + | | ^ | ^ + v crc crc v crc + .--------. \ ^ .--------. \ ^ .--------. \ ^ + .|metadata|-|--|-->|metadata| | | .|metadata| | | + || | +--' || | +--' || | +--' + || | | || | | || | | + |'--------' / |'--------' / |'--------' / + '---|--|-' '----|---' '---|--|-' + .-' '-. | .-' '-. + v v v v v +.--------. .--------. .--------. .--------. .--------. +| data | | data | | data | | data | | data | +| | | | | | | | | | +| | | | | | | | | | +'--------' '--------' '--------' '--------' '--------' +``` + +Note that this random number generator is not perfect. It only returns unique +random numbers when the filesystem is modified. This is exactly what we want +for distributing wear in the allocator, but means this random number generator +is not useful for general use. + +--- + +Together, bad block detection and dynamic wear leveling provide a best effort +solution for avoiding the early death of a filesystem due to wear. Importantly, +littlefs's wear leveling algorithm provides a key feature: You can increase the +life of a device simply by increasing the size of storage. And if more +aggressive wear leveling is desired, you can always combine littlefs with a +[flash translation layer (FTL)][wikipedia-ftl] to get a small power resilient +filesystem with static wear leveling. + +## Files + +Now that we have our building blocks out of the way, we can start looking at +our filesystem as a whole. + +The first step: How do we actually store our files? + +We've determined that CTZ skip-lists are pretty good at storing data compactly, +so following the precedent found in other filesystems we could give each file +a skip-list stored in a metadata pair that acts as an inode for the file. + + +``` + .--------. + .|metadata| + || | + || | + |'--------' + '----|---' + v +.--------. .--------. .--------. .--------. +| data 0 |<-| data 1 |<-| data 2 |<-| data 3 | +| |<-| |--| | | | +| | | | | | | | +'--------' '--------' '--------' '--------' +``` + +However, this doesn't work well when files are small, which is common for +embedded systems. Compared to PCs, _all_ data in an embedded system is small. + +Consider a small 4-byte file. With a two block metadata-pair and one block for +the CTZ skip-list, we find ourselves using a full 3 blocks. On most NOR flash +with 4 KiB blocks, this is 12 KiB of overhead. A ridiculous 3072x increase. + +``` +file stored as inode, 4 bytes costs ~12 KiB + + .----------------. \ +.| revision | | +||----------------| \ | +|| skiplist ---. +- metadata | +||----------------| | / 4x8 bytes | +|| checksum | | 32 bytes | +||----------------| | | +|| | | | +- metadata pair +|| v | | | 2x4 KiB +|| | | | 8 KiB +|| | | | +|| | | | +|| | | | +|'----------------' | | +'----------------' | / + .--------' + v + .----------------. \ \ + | data | +- data | + |----------------| / 4 bytes | + | | | + | | | + | | | + | | +- data block + | | | 4 KiB + | | | + | | | + | | | + | | | + | | | + '----------------' / +``` + +We can make several improvements. First, instead of giving each file its own +metadata pair, we can store multiple files in a single metadata pair. One way +to do this is to directly associate a directory with a metadata pair (or a +linked list of metadata pairs). This makes it easy for multiple files to share +the directory's metadata pair for logging and reduces the collective storage +overhead. + +The strict binding of metadata pairs and directories also gives users +direct control over storage utilization depending on how they organize their +directories. + +``` +multiple files stored in metadata pair, 4 bytes costs ~4 KiB + + .----------------. + .| revision | + ||----------------| + || A name | + || A skiplist -----. + ||----------------| | \ + || B name | | +- metadata + || B skiplist ---. | | 4x8 bytes + ||----------------| | | / 32 bytes + || checksum | | | + ||----------------| | | + || | | | | + || v | | | + |'----------------' | | + '----------------' | | + .----------------' | + v v +.----------------. .----------------. \ \ +| A data | | B data | +- data | +| | |----------------| / 4 bytes | +| | | | | +| | | | | +| | | | | +| | | | + data block +| | | | | 4 KiB +| | | | | +|----------------| | | | +| | | | | +| | | | | +| | | | | +'----------------' '----------------' / +``` + +The second improvement we can make is noticing that for very small files, our +attempts to use CTZ skip-lists for compact storage backfires. Metadata pairs +have a ~4x storage cost, so if our file is smaller than 1/4 the block size, +there's actually no benefit in storing our file outside of our metadata pair. + +In this case, we can store the file directly in our directory's metadata pair. +We call this an inline file, and it allows a directory to store many small +files quite efficiently. Our previous 4 byte file now only takes up a +theoretical 16 bytes on disk. + +``` +inline files stored in metadata pair, 4 bytes costs ~16 bytes + + .----------------. +.| revision | +||----------------| +|| A name | +|| A skiplist ---. +||----------------| | \ +|| B name | | +- data +|| B data | | | 4x4 bytes +||----------------| | / 16 bytes +|| checksum | | +||----------------| | +|| | | | +|| v | | +|'----------------' | +'----------------' | + .---------' + v + .----------------. + | A data | + | | + | | + | | + | | + | | + | | + | | + |----------------| + | | + | | + | | + '----------------' +``` + +Once the file exceeds 1/4 the block size, we switch to a CTZ skip-list. This +means that our files never use more than 4x storage overhead, decreasing as +the file grows in size. + +![File storage cost graph][file-cost-graph] + +## Directories + +Now we just need directories to store our files. As mentioned above we want +a strict binding of directories and metadata pairs, but there are a few +complications we need to sort out. + +On their own, each directory is a linked-list of metadata pairs. This lets us +store an unlimited number of files in each directory, and we don't need to +worry about the runtime complexity of unbounded logs. We can store other +directory pointers in our metadata pairs, which gives us a directory tree, much +like what you find on other filesystems. + +``` + .--------. + .| root | + || | + || | + |'--------' + '---|--|-' + .-' '-------------------------. + v v + .--------. .--------. .--------. + .| dir A |------->| dir A | .| dir B | + || | || | || | + || | || | || | + |'--------' |'--------' |'--------' + '---|--|-' '----|---' '---|--|-' + .-' '-. | .-' '-. + v v v v v +.--------. .--------. .--------. .--------. .--------. +| file C | | file D | | file E | | file F | | file G | +| | | | | | | | | | +| | | | | | | | | | +'--------' '--------' '--------' '--------' '--------' +``` + +The main complication is, once again, traversal with a constant amount of +[RAM]. The directory tree is a tree, and the unfortunate fact is you can't +traverse a tree with constant RAM. + +Fortunately, the elements of our tree are metadata pairs, so unlike CTZ +skip-lists, we're not limited to strict COW operations. One thing we can do is +thread a linked-list through our tree, explicitly enabling cheap traversal +over the entire filesystem. + +``` + .--------. + .| root |-. + || | | + .-------|| |-' + | |'--------' + | '---|--|-' + | .-' '-------------------------. + | v v + | .--------. .--------. .--------. + '->| dir A |------->| dir A |------->| dir B | + || | || | || | + || | || | || | + |'--------' |'--------' |'--------' + '---|--|-' '----|---' '---|--|-' + .-' '-. | .-' '-. + v v v v v +.--------. .--------. .--------. .--------. .--------. +| file C | | file D | | file E | | file F | | file G | +| | | | | | | | | | +| | | | | | | | | | +'--------' '--------' '--------' '--------' '--------' +``` + +Unfortunately, not sticking to pure COW operations creates some problems. Now, +whenever we want to manipulate the directory tree, multiple pointers need to be +updated. If you're familiar with designing atomic data structures this should +set off a bunch of red flags. + +To work around this, our threaded linked-list has a bit of leeway. Instead of +only containing metadata pairs found in our filesystem, it is allowed to +contain metadata pairs that have no parent because of a power loss. These are +called orphaned metadata pairs. + +With the possibility of orphans, we can build power loss resilient operations +that maintain a filesystem tree threaded with a linked-list for traversal. + +Adding a directory to our tree: + +``` + .--------. + .| root |-. + || | | +.-------|| |-' +| |'--------' +| '---|--|-' +| .-' '-. +| v v +| .--------. .--------. +'->| dir A |->| dir C | + || | || | + || | || | + |'--------' |'--------' + '--------' '--------' + +allocate dir B +=> + .--------. + .| root |-. + || | | +.-------|| |-' +| |'--------' +| '---|--|-' +| .-' '-. +| v v +| .--------. .--------. +'->| dir A |--->| dir C | + || | .->| | + || | | || | + |'--------' | |'--------' + '--------' | '--------' + | + .--------. | + .| dir B |-' + || | + || | + |'--------' + '--------' + +insert dir B into threaded linked-list, creating an orphan +=> + .--------. + .| root |-. + || | | +.-------|| |-' +| |'--------' +| '---|--|-' +| .-' '-------------. +| v v +| .--------. .--------. .--------. +'->| dir A |->| dir B |->| dir C | + || | || orphan!| || | + || | || | || | + |'--------' |'--------' |'--------' + '--------' '--------' '--------' + +add dir B to parent directory +=> + .--------. + .| root |-. + || | | +.-------------|| |-' +| |'--------' +| '--|-|-|-' +| .------' | '-------. +| v v v +| .--------. .--------. .--------. +'->| dir A |->| dir B |->| dir C | + || | || | || | + || | || | || | + |'--------' |'--------' |'--------' + '--------' '--------' '--------' +``` + +Removing a directory: + +``` + .--------. + .| root |-. + || | | +.-------------|| |-' +| |'--------' +| '--|-|-|-' +| .------' | '-------. +| v v v +| .--------. .--------. .--------. +'->| dir A |->| dir B |->| dir C | + || | || | || | + || | || | || | + |'--------' |'--------' |'--------' + '--------' '--------' '--------' + +remove dir B from parent directory, creating an orphan +=> + .--------. + .| root |-. + || | | +.-------|| |-' +| |'--------' +| '---|--|-' +| .-' '-------------. +| v v +| .--------. .--------. .--------. +'->| dir A |->| dir B |->| dir C | + || | || orphan!| || | + || | || | || | + |'--------' |'--------' |'--------' + '--------' '--------' '--------' + +remove dir B from threaded linked-list, returning dir B to free blocks +=> + .--------. + .| root |-. + || | | +.-------|| |-' +| |'--------' +| '---|--|-' +| .-' '-. +| v v +| .--------. .--------. +'->| dir A |->| dir C | + || | || | + || | || | + |'--------' |'--------' + '--------' '--------' +``` + +In addition to normal directory tree operations, we can use orphans to evict +blocks in a metadata pair when the block goes bad or exceeds its allocated +erases. If we lose power while evicting a metadata block we may end up with +a situation where the filesystem references the replacement block while the +threaded linked-list still contains the evicted block. We call this a +half-orphan. + +``` + .--------. + .| root |-. + || | | +.-------------|| |-' +| |'--------' +| '--|-|-|-' +| .------' | '-------. +| v v v +| .--------. .--------. .--------. +'->| dir A |->| dir B |->| dir C | + || | || | || | + || | || | || | + |'--------' |'--------' |'--------' + '--------' '--------' '--------' + +try to write to dir B +=> + .--------. + .| root |-. + || | | +.----------------|| |-' +| |'--------' +| '-|-||-|-' +| .--------' || '-----. +| v |v v +| .--------. .--------. .--------. +'->| dir A |---->| dir B |->| dir C | + || |-. | | || | + || | | | | || | + |'--------' | '--------' |'--------' + '--------' | v '--------' + | .--------. + '->| dir B | + | bad | + | block! | + '--------' + +oh no! bad block detected, allocate replacement +=> + .--------. + .| root |-. + || | | +.----------------|| |-' +| |'--------' +| '-|-||-|-' +| .--------' || '-------. +| v |v v +| .--------. .--------. .--------. +'->| dir A |---->| dir B |--->| dir C | + || |-. | | .->| | + || | | | | | || | + |'--------' | '--------' | |'--------' + '--------' | v | '--------' + | .--------. | + '->| dir B | | + | bad | | + | block! | | + '--------' | + | + .--------. | + | dir B |--' + | | + | | + '--------' + +insert replacement in threaded linked-list, creating a half-orphan +=> + .--------. + .| root |-. + || | | +.----------------|| |-' +| |'--------' +| '-|-||-|-' +| .--------' || '-------. +| v |v v +| .--------. .--------. .--------. +'->| dir A |---->| dir B |--->| dir C | + || |-. | | .->| | + || | | | | | || | + |'--------' | '--------' | |'--------' + '--------' | v | '--------' + | .--------. | + | | dir B | | + | | bad | | + | | block! | | + | '--------' | + | | + | .--------. | + '->| dir B |--' + | half | + | orphan!| + '--------' + +fix reference in parent directory +=> + .--------. + .| root |-. + || | | +.-------------|| |-' +| |'--------' +| '--|-|-|-' +| .------' | '-------. +| v v v +| .--------. .--------. .--------. +'->| dir A |->| dir B |->| dir C | + || | || | || | + || | || | || | + |'--------' |'--------' |'--------' + '--------' '--------' '--------' +``` + +Finding orphans and half-orphans is expensive, requiring a _O(n²)_ +comparison of every metadata pair with every directory entry. But the tradeoff +is a power resilient filesystem that works with only a bounded amount of RAM. +Fortunately, we only need to check for orphans on the first allocation after +boot, and a read-only littlefs can ignore the threaded linked-list entirely. + +If we only had some sort of global state, then we could also store a flag and +avoid searching for orphans unless we knew we were specifically interrupted +while manipulating the directory tree (foreshadowing!). + +## The move problem + +We have one last challenge: the move problem. Phrasing the problem is simple: + +How do you atomically move a file between two directories? + +In littlefs we can atomically commit to directories, but we can't create +an atomic commit that spans multiple directories. The filesystem must go +through a minimum of two distinct states to complete a move. + +To make matters worse, file moves are a common form of synchronization for +filesystems. As a filesystem designed for power-loss, it's important we get +atomic moves right. + +So what can we do? + +- We definitely can't just let power-loss result in duplicated or lost files. + This could easily break users' code and would only reveal itself in extreme + cases. We were only able to be lazy about the threaded linked-list because + it isn't user facing and we can handle the corner cases internally. + +- Some filesystems propagate COW operations up the tree until a common parent + is found. Unfortunately this interacts poorly with our threaded tree and + brings back the issue of upward propagation of wear. + +- In a previous version of littlefs we tried to solve this problem by going + back and forth between the source and destination, marking and unmarking the + file as moving in order to make the move atomic from the user perspective. + This worked, but not well. Finding failed moves was expensive and required + a unique identifier for each file. + +In the end, solving the move problem required creating a new mechanism for +sharing knowledge between multiple metadata pairs. In littlefs this led to the +introduction of a mechanism called "global state". + +--- + +Global state is a small set of state that can be updated from _any_ metadata +pair. Combining global state with metadata pairs' ability to update multiple +entries in one commit gives us a powerful tool for crafting complex atomic +operations. + +How does global state work? + +Global state exists as a set of deltas that are distributed across the metadata +pairs in the filesystem. The actual global state can be built out of these +deltas by xoring together all of the deltas in the filesystem. + +``` + .--------. .--------. .--------. .--------. .--------. +.| |->| gdelta |->| |->| gdelta |->| gdelta | +|| | || 0x23 | || | || 0xff | || 0xce | +|| | || | || | || | || | +|'--------' |'--------' |'--------' |'--------' |'--------' +'--------' '----|---' '--------' '----|---' '----|---' + v v v + 0x00 --> xor ------------------> xor ------> xor --> gstate 0x12 +``` + +To update the global state from a metadata pair, we take the global state we +know and xor it with both our changes and any existing delta in the metadata +pair. Committing this new delta to the metadata pair commits the changes to +the filesystem's global state. + +``` + .--------. .--------. .--------. .--------. .--------. +.| |->| gdelta |->| |->| gdelta |->| gdelta | +|| | || 0x23 | || | || 0xff | || 0xce | +|| | || | || | || | || | +|'--------' |'--------' |'--------' |'--------' |'--------' +'--------' '----|---' '--------' '--|---|-' '----|---' + v v | v + 0x00 --> xor ----------------> xor -|------> xor --> gstate = 0x12 + | | + | | +change gstate to 0xab --> xor <------------|--------------------------' +=> | v + '------------> xor + | + v + .--------. .--------. .--------. .--------. .--------. +.| |->| gdelta |->| |->| gdelta |->| gdelta | +|| | || 0x23 | || | || 0x46 | || 0xce | +|| | || | || | || | || | +|'--------' |'--------' |'--------' |'--------' |'--------' +'--------' '----|---' '--------' '----|---' '----|---' + v v v + 0x00 --> xor ------------------> xor ------> xor --> gstate = 0xab +``` + +To make this efficient, we always keep a copy of the global state in RAM. We +only need to iterate over our metadata pairs and build the global state when +the filesystem is mounted. + +You may have noticed that global state is very expensive. We keep a copy in +RAM and a delta in an unbounded number of metadata pairs. Even if we reset +the global state to its initial value, we can't easily clean up the deltas on +disk. For this reason, it's very important that we keep the size of global +state bounded and extremely small. But, even with a strict budget, global +state is incredibly valuable. + +--- + +Now we can solve the move problem. We can create global state describing our +move atomically with the creation of the new file, and we can clear this move +state atomically with the removal of the old file. + +``` + .--------. gstate = no move + .| root |-. + || | | +.-------------|| |-' +| |'--------' +| '--|-|-|-' +| .------' | '-------. +| v v v +| .--------. .--------. .--------. +'->| dir A |->| dir B |->| dir C | + || | || | || | + || | || | || | + |'--------' |'--------' |'--------' + '----|---' '--------' '--------' + v + .--------. + | file D | + | | + | | + '--------' + +begin move, add reference in dir C, change gstate to have move +=> + .--------. gstate = moving file D in dir A (m1) + .| root |-. + || | | +.-------------|| |-' +| |'--------' +| '--|-|-|-' +| .------' | '-------. +| v v v +| .--------. .--------. .--------. +'->| dir A |->| dir B |->| dir C | + || | || | || gdelta | + || | || | || =m1 | + |'--------' |'--------' |'--------' + '----|---' '--------' '----|---' + | .----------------' + v v + .--------. + | file D | + | | + | | + '--------' + +complete move, remove reference in dir A, change gstate to no move +=> + .--------. gstate = no move (m1^~m1) + .| root |-. + || | | +.-------------|| |-' +| |'--------' +| '--|-|-|-' +| .------' | '-------. +| v v v +| .--------. .--------. .--------. +'->| dir A |->| dir B |->| dir C | + || gdelta | || | || gdelta | + || =~m1 | || | || =m1 | + |'--------' |'--------' |'--------' + '--------' '--------' '----|---' + v + .--------. + | file D | + | | + | | + '--------' +``` + + +If, after building our global state during mount, we find information +describing an ongoing move, we know we lost power during a move and the file +is duplicated in both the source and destination directories. If this happens, +we can resolve the move using the information in the global state to remove +one of the files. + +``` + .--------. gstate = moving file D in dir A (m1) + .| root |-. ^ + || |------------> xor +.---------------|| |-' ^ +| |'--------' | +| '--|-|-|-' | +| .--------' | '---------. | +| | | | | +| | .----------> xor --------> xor +| v | v ^ v ^ +| .--------. | .--------. | .--------. | +'->| dir A |-|->| dir B |-|->| dir C | | + || |-' || |-' || gdelta |-' + || | || | || =m1 | + |'--------' |'--------' |'--------' + '----|---' '--------' '----|---' + | .---------------------' + v v + .--------. + | file D | + | | + | | + '--------' +``` + +We can also move directories the same way we move files. There is the threaded +linked-list to consider, but leaving the threaded linked-list unchanged works +fine as the order doesn't really matter. + +``` + .--------. gstate = no move (m1^~m1) + .| root |-. + || | | +.-------------|| |-' +| |'--------' +| '--|-|-|-' +| .------' | '-------. +| v v v +| .--------. .--------. .--------. +'->| dir A |->| dir B |->| dir C | + || gdelta | || | || gdelta | + || =~m1 | || | || =m1 | + |'--------' |'--------' |'--------' + '--------' '--------' '----|---' + v + .--------. + | file D | + | | + | | + '--------' + +begin move, add reference in dir C, change gstate to have move +=> + .--------. gstate = moving dir B in root (m1^~m1^m2) + .| root |-. + || | | +.--------------|| |-' +| |'--------' +| '--|-|-|-' +| .-------' | '----------. +| v | v +| .--------. | .--------. +'->| dir A |-. | .->| dir C | + || gdelta | | | | || gdelta | + || =~m1 | | | | || =m1^m2 | + |'--------' | | | |'--------' + '--------' | | | '---|--|-' + | | .-------' | + | v v | v + | .--------. | .--------. + '->| dir B |-' | file D | + || | | | + || | | | + |'--------' '--------' + '--------' + +complete move, remove reference in root, change gstate to no move +=> + .--------. gstate = no move (m1^~m1^m2^~m2) + .| root |-. + || gdelta | | +.-----------|| =~m2 |-' +| |'--------' +| '---|--|-' +| .-----' '-----. +| v v +| .--------. .--------. +'->| dir A |-. .->| dir C | + || gdelta | | | || gdelta | + || =~m1 | | '-|| =m1^m2 |-------. + |'--------' | |'--------' | + '--------' | '---|--|-' | + | .-' '-. | + | v v | + | .--------. .--------. | + '->| dir B |--| file D |-' + || | | | + || | | | + |'--------' '--------' + '--------' +``` + +Global state gives us a powerful tool we can use to solve the move problem. +And the result is surprisingly performant, only needing the minimum number +of states and using the same number of commits as a naive move. Additionally, +global state gives us a bit of persistent state we can use for some other +small improvements. + +## Conclusion + +And that's littlefs, thanks for reading! + + +[wikipedia-flash]: https://en.wikipedia.org/wiki/Flash_memory +[wikipedia-sna]: https://en.wikipedia.org/wiki/Serial_number_arithmetic +[wikipedia-crc]: https://en.wikipedia.org/wiki/Cyclic_redundancy_check +[wikipedia-cow]: https://en.wikipedia.org/wiki/Copy-on-write +[wikipedia-B-tree]: https://en.wikipedia.org/wiki/B-tree +[wikipedia-B+-tree]: https://en.wikipedia.org/wiki/B%2B_tree +[wikipedia-skip-list]: https://en.wikipedia.org/wiki/Skip_list +[wikipedia-ctz]: https://en.wikipedia.org/wiki/Count_trailing_zeros +[wikipedia-ecc]: https://en.wikipedia.org/wiki/Error_correction_code +[wikipedia-hamming-bound]: https://en.wikipedia.org/wiki/Hamming_bound +[wikipedia-dynamic-wear-leveling]: https://en.wikipedia.org/wiki/Wear_leveling#Dynamic_wear_leveling +[wikipedia-static-wear-leveling]: https://en.wikipedia.org/wiki/Wear_leveling#Static_wear_leveling +[wikipedia-ftl]: https://en.wikipedia.org/wiki/Flash_translation_layer + +[oeis]: https://oeis.org +[A001511]: https://oeis.org/A001511 +[A005187]: https://oeis.org/A005187 + +[fat]: https://en.wikipedia.org/wiki/Design_of_the_FAT_file_system +[ext2]: http://e2fsprogs.sourceforge.net/ext2intro.html +[jffs]: https://www.sourceware.org/jffs2/jffs2-html +[yaffs]: https://yaffs.net/documents/how-yaffs-works +[spiffs]: https://github.com/pellepl/spiffs/blob/master/docs/TECH_SPEC +[ext4]: https://ext4.wiki.kernel.org/index.php/Ext4_Design +[ntfs]: https://en.wikipedia.org/wiki/NTFS +[btrfs]: https://btrfs.wiki.kernel.org/index.php/Btrfs_design +[zfs]: https://en.wikipedia.org/wiki/ZFS + +[cow]: https://upload.wikimedia.org/wikipedia/commons/0/0c/Cow_female_black_white.jpg +[elephant]: https://upload.wikimedia.org/wikipedia/commons/3/37/African_Bush_Elephant.jpg +[ram]: https://upload.wikimedia.org/wikipedia/commons/9/97/New_Mexico_Bighorn_Sheep.JPG + +[metadata-formula1]: https://latex.codecogs.com/svg.latex?cost%20%3D%20n%20+%20n%20%5Cfrac%7Bs%7D%7Bd+1%7D +[metadata-formula2]: https://latex.codecogs.com/svg.latex?s%20%3D%20r%20%5Cfrac%7Bsize%7D%7Bn%7D +[metadata-formula3]: https://latex.codecogs.com/svg.latex?d%20%3D%20%281-r%29%20%5Cfrac%7Bsize%7D%7Bn%7D +[metadata-formula4]: https://latex.codecogs.com/svg.latex?cost%20%3D%20n%20+%20n%20%5Cfrac%7Br%5Cfrac%7Bsize%7D%7Bn%7D%7D%7B%281-r%29%5Cfrac%7Bsize%7D%7Bn%7D+1%7D + +[ctz-formula1]: https://latex.codecogs.com/svg.latex?%5Clim_%7Bn%5Cto%5Cinfty%7D%5Cfrac%7B1%7D%7Bn%7D%5Csum_%7Bi%3D0%7D%5E%7Bn%7D%5Cleft%28%5Ctext%7Bctz%7D%28i%29+1%5Cright%29%20%3D%20%5Csum_%7Bi%3D0%7D%5Cfrac%7B1%7D%7B2%5Ei%7D%20%3D%202 +[ctz-formula2]: https://latex.codecogs.com/svg.latex?B%20%3D%20%5Cfrac%7Bw%7D%7B8%7D%5Cleft%5Clceil%5Clog_2%5Cleft%28%5Cfrac%7B2%5Ew%7D%7BB-2%5Cfrac%7Bw%7D%7B8%7D%7D%5Cright%29%5Cright%5Crceil +[ctz-formula3]: https://latex.codecogs.com/svg.latex?N%20%3D%20%5Csum_i%5En%5Cleft%5BB-%5Cfrac%7Bw%7D%7B8%7D%5Cleft%28%5Ctext%7Bctz%7D%28i%29+1%5Cright%29%5Cright%5D +[ctz-formula4]: https://latex.codecogs.com/svg.latex?%5Csum_i%5En%5Cleft%28%5Ctext%7Bctz%7D%28i%29+1%5Cright%29%20%3D%202n-%5Ctext%7Bpopcount%7D%28n%29 +[ctz-formula5]: https://latex.codecogs.com/svg.latex?N%20%3D%20Bn%20-%20%5Cfrac%7Bw%7D%7B8%7D%5Cleft%282n-%5Ctext%7Bpopcount%7D%28n%29%5Cright%29 +[ctz-formula6]: https://latex.codecogs.com/svg.latex?n%20%3D%20%5Cleft%5Clfloor%5Cfrac%7BN-%5Cfrac%7Bw%7D%7B8%7D%5Cleft%28%5Ctext%7Bpopcount%7D%5Cleft%28%5Cfrac%7BN%7D%7BB-2%5Cfrac%7Bw%7D%7B8%7D%7D-1%5Cright%29+2%5Cright%29%7D%7BB-2%5Cfrac%7Bw%7D%7B8%7D%7D%5Cright%5Crfloor +[ctz-formula7]: https://latex.codecogs.com/svg.latex?%5Cmathit%7Boff%7D%20%3D%20N%20-%20%5Cleft%28B-2%5Cfrac%7Bw%7D%7B8%7D%5Cright%29n%20-%20%5Cfrac%7Bw%7D%7B8%7D%5Ctext%7Bpopcount%7D%28n%29 + +[bigB]: https://latex.codecogs.com/svg.latex?B +[d]: https://latex.codecogs.com/svg.latex?d +[m]: https://latex.codecogs.com/svg.latex?m +[bigN]: https://latex.codecogs.com/svg.latex?N +[n]: https://latex.codecogs.com/svg.latex?n +[n']: https://latex.codecogs.com/svg.latex?n%27 +[r]: https://latex.codecogs.com/svg.latex?r +[s]: https://latex.codecogs.com/svg.latex?s +[w]: https://latex.codecogs.com/svg.latex?w +[x]: https://latex.codecogs.com/svg.latex?x + +[metadata-cost-graph]: https://raw.githubusercontent.com/geky/littlefs/gh-images/metadata-cost.svg?sanitize=true +[wear-distribution-graph]: https://raw.githubusercontent.com/geky/littlefs/gh-images/wear-distribution.svg?sanitize=true +[file-cost-graph]: https://raw.githubusercontent.com/geky/littlefs/gh-images/file-cost.svg?sanitize=true diff --git a/components/joltwallet__littlefs/src/littlefs/LICENSE.md b/components/joltwallet__littlefs/src/littlefs/LICENSE.md new file mode 100644 index 0000000..e6c3a7b --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/LICENSE.md @@ -0,0 +1,25 @@ +Copyright (c) 2022, The littlefs authors. +Copyright (c) 2017, Arm Limited. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +- Neither the name of ARM nor the names of its contributors may be used to + endorse or promote products derived from this software without specific prior + written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/components/joltwallet__littlefs/src/littlefs/Makefile b/components/joltwallet__littlefs/src/littlefs/Makefile new file mode 100644 index 0000000..909223a --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/Makefile @@ -0,0 +1,595 @@ +# overrideable build dir, default is in-place +BUILDDIR ?= . +# overridable target/src/tools/flags/etc +ifneq ($(wildcard test.c main.c),) +TARGET ?= $(BUILDDIR)/lfs +else +TARGET ?= $(BUILDDIR)/liblfs.a +endif + + +CC ?= gcc +AR ?= ar +SIZE ?= size +CTAGS ?= ctags +NM ?= nm +OBJDUMP ?= objdump +VALGRIND ?= valgrind +GDB ?= gdb +PERF ?= perf + +# guess clang or gcc (clang sometimes masquerades as gcc because of +# course it does) +ifneq ($(shell $(CC) --version | grep clang),) +NO_GCC = 1 +endif + +SRC ?= $(filter-out $(wildcard *.t.* *.b.*),$(wildcard *.c)) +OBJ := $(SRC:%.c=$(BUILDDIR)/%.o) +DEP := $(SRC:%.c=$(BUILDDIR)/%.d) +ASM := $(SRC:%.c=$(BUILDDIR)/%.s) +CI := $(SRC:%.c=$(BUILDDIR)/%.ci) +GCDA := $(SRC:%.c=$(BUILDDIR)/%.t.gcda) + +TESTS ?= $(wildcard tests/*.toml) +TEST_SRC ?= $(SRC) \ + $(filter-out $(wildcard bd/*.t.* bd/*.b.*),$(wildcard bd/*.c)) \ + runners/test_runner.c +TEST_RUNNER ?= $(BUILDDIR)/runners/test_runner +TEST_A := $(TESTS:%.toml=$(BUILDDIR)/%.t.a.c) \ + $(TEST_SRC:%.c=$(BUILDDIR)/%.t.a.c) +TEST_C := $(TEST_A:%.t.a.c=%.t.c) +TEST_OBJ := $(TEST_C:%.t.c=%.t.o) +TEST_DEP := $(TEST_C:%.t.c=%.t.d) +TEST_CI := $(TEST_C:%.t.c=%.t.ci) +TEST_GCNO := $(TEST_C:%.t.c=%.t.gcno) +TEST_GCDA := $(TEST_C:%.t.c=%.t.gcda) +TEST_PERF := $(TEST_RUNNER:%=%.perf) +TEST_TRACE := $(TEST_RUNNER:%=%.trace) +TEST_CSV := $(TEST_RUNNER:%=%.csv) + +BENCHES ?= $(wildcard benches/*.toml) +BENCH_SRC ?= $(SRC) \ + $(filter-out $(wildcard bd/*.t.* bd/*.b.*),$(wildcard bd/*.c)) \ + runners/bench_runner.c +BENCH_RUNNER ?= $(BUILDDIR)/runners/bench_runner +BENCH_A := $(BENCHES:%.toml=$(BUILDDIR)/%.b.a.c) \ + $(BENCH_SRC:%.c=$(BUILDDIR)/%.b.a.c) +BENCH_C := $(BENCH_A:%.b.a.c=%.b.c) +BENCH_OBJ := $(BENCH_C:%.b.c=%.b.o) +BENCH_DEP := $(BENCH_C:%.b.c=%.b.d) +BENCH_CI := $(BENCH_C:%.b.c=%.b.ci) +BENCH_GCNO := $(BENCH_C:%.b.c=%.b.gcno) +BENCH_GCDA := $(BENCH_C:%.b.c=%.b.gcda) +BENCH_PERF := $(BENCH_RUNNER:%=%.perf) +BENCH_TRACE := $(BENCH_RUNNER:%=%.trace) +BENCH_CSV := $(BENCH_RUNNER:%=%.csv) + +CFLAGS += -g3 +CFLAGS += -I. +CFLAGS += -std=c99 -Wall -Wextra -pedantic +CFLAGS += -Wmissing-prototypes +ifndef NO_GCC +CFLAGS += -fcallgraph-info=su +CFLAGS += -ftrack-macro-expansion=0 +endif + +ifdef DEBUG +CFLAGS += -O0 +else +CFLAGS += -Os +endif +ifdef TRACE +CFLAGS += -DLFS_YES_TRACE +endif +ifdef YES_COV +CFLAGS += --coverage +endif +ifdef YES_PERF +CFLAGS += -fno-omit-frame-pointer +endif +ifdef YES_PERFBD +CFLAGS += -fno-omit-frame-pointer +endif + +ifdef VERBOSE +CODEFLAGS += -v +DATAFLAGS += -v +STACKFLAGS += -v +STRUCTSFLAGS += -v +COVFLAGS += -v +PERFFLAGS += -v +PERFBDFLAGS += -v +endif +# forward -j flag +PERFFLAGS += $(filter -j%,$(MAKEFLAGS)) +PERFBDFLAGS += $(filter -j%,$(MAKEFLAGS)) +ifneq ($(NM),nm) +CODEFLAGS += --nm-path="$(NM)" +DATAFLAGS += --nm-path="$(NM)" +endif +ifneq ($(OBJDUMP),objdump) +CODEFLAGS += --objdump-path="$(OBJDUMP)" +DATAFLAGS += --objdump-path="$(OBJDUMP)" +STRUCTSFLAGS += --objdump-path="$(OBJDUMP)" +PERFFLAGS += --objdump-path="$(OBJDUMP)" +PERFBDFLAGS += --objdump-path="$(OBJDUMP)" +endif +ifneq ($(PERF),perf) +PERFFLAGS += --perf-path="$(PERF)" +endif + +TESTFLAGS += -b +BENCHFLAGS += -b +# forward -j flag +TESTFLAGS += $(filter -j%,$(MAKEFLAGS)) +BENCHFLAGS += $(filter -j%,$(MAKEFLAGS)) +ifdef YES_PERF +TESTFLAGS += -p $(TEST_PERF) +BENCHFLAGS += -p $(BENCH_PERF) +endif +ifdef YES_PERFBD +TESTFLAGS += -t $(TEST_TRACE) --trace-backtrace --trace-freq=100 +endif +ifndef NO_PERFBD +BENCHFLAGS += -t $(BENCH_TRACE) --trace-backtrace --trace-freq=100 +endif +ifdef YES_TESTMARKS +TESTFLAGS += -o $(TEST_CSV) +endif +ifndef NO_BENCHMARKS +BENCHFLAGS += -o $(BENCH_CSV) +endif +ifdef VERBOSE +TESTFLAGS += -v +TESTCFLAGS += -v +BENCHFLAGS += -v +BENCHCFLAGS += -v +endif +ifdef EXEC +TESTFLAGS += --exec="$(EXEC)" +BENCHFLAGS += --exec="$(EXEC)" +endif +ifneq ($(GDB),gdb) +TESTFLAGS += --gdb-path="$(GDB)" +BENCHFLAGS += --gdb-path="$(GDB)" +endif +ifneq ($(VALGRIND),valgrind) +TESTFLAGS += --valgrind-path="$(VALGRIND)" +BENCHFLAGS += --valgrind-path="$(VALGRIND)" +endif +ifneq ($(PERF),perf) +TESTFLAGS += --perf-path="$(PERF)" +BENCHFLAGS += --perf-path="$(PERF)" +endif + +# this is a bit of a hack, but we want to make sure the BUILDDIR +# directory structure is correct before we run any commands +ifneq ($(BUILDDIR),.) +$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \ + $(addprefix $(BUILDDIR)/,$(dir \ + $(SRC) \ + $(TESTS) \ + $(TEST_SRC) \ + $(BENCHES) \ + $(BENCH_SRC))))) +endif + + +# commands + +## Build littlefs +.PHONY: all build +all build: $(TARGET) + +## Build assembly files +.PHONY: asm +asm: $(ASM) + +## Find the total size +.PHONY: size +size: $(OBJ) + $(SIZE) -t $^ + +## Generate a ctags file +.PHONY: tags +tags: + $(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC) + +## Show this help text +.PHONY: help +help: + @$(strip awk '/^## / { \ + sub(/^## /,""); \ + getline rule; \ + while (rule ~ /^(#|\.PHONY|ifdef|ifndef)/) getline rule; \ + gsub(/:.*/, "", rule); \ + printf " "" %-25s %s\n", rule, $$0 \ + }' $(MAKEFILE_LIST)) + +## Find the per-function code size +.PHONY: code +code: CODEFLAGS+=-S +code: $(OBJ) $(BUILDDIR)/lfs.code.csv + ./scripts/code.py $(OBJ) $(CODEFLAGS) + +## Compare per-function code size +.PHONY: code-diff +code-diff: $(OBJ) + ./scripts/code.py $^ $(CODEFLAGS) -d $(BUILDDIR)/lfs.code.csv + +## Find the per-function data size +.PHONY: data +data: DATAFLAGS+=-S +data: $(OBJ) $(BUILDDIR)/lfs.data.csv + ./scripts/data.py $(OBJ) $(DATAFLAGS) + +## Compare per-function data size +.PHONY: data-diff +data-diff: $(OBJ) + ./scripts/data.py $^ $(DATAFLAGS) -d $(BUILDDIR)/lfs.data.csv + +## Find the per-function stack usage +.PHONY: stack +stack: STACKFLAGS+=-S +stack: $(CI) $(BUILDDIR)/lfs.stack.csv + ./scripts/stack.py $(CI) $(STACKFLAGS) + +## Compare per-function stack usage +.PHONY: stack-diff +stack-diff: $(CI) + ./scripts/stack.py $^ $(STACKFLAGS) -d $(BUILDDIR)/lfs.stack.csv + +## Find function sizes +.PHONY: funcs +funcs: SUMMARYFLAGS+=-S +funcs: \ + $(BUILDDIR)/lfs.code.csv \ + $(BUILDDIR)/lfs.data.csv \ + $(BUILDDIR)/lfs.stack.csv + $(strip ./scripts/summary.py $^ \ + -bfunction \ + -fcode=code_size \ + -fdata=data_size \ + -fstack=stack_limit --max=stack \ + $(SUMMARYFLAGS)) + +## Compare function sizes +.PHONY: funcs-diff +funcs-diff: SHELL=/bin/bash +funcs-diff: $(OBJ) $(CI) + $(strip ./scripts/summary.py \ + <(./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o-) \ + <(./scripts/data.py $(OBJ) -q $(DATAFLAGS) -o-) \ + <(./scripts/stack.py $(CI) -q $(STACKFLAGS) -o-) \ + -bfunction \ + -fcode=code_size \ + -fdata=data_size \ + -fstack=stack_limit --max=stack \ + $(SUMMARYFLAGS) -d <(./scripts/summary.py \ + $(BUILDDIR)/lfs.code.csv \ + $(BUILDDIR)/lfs.data.csv \ + $(BUILDDIR)/lfs.stack.csv \ + -q $(SUMMARYFLAGS) -o-)) + +## Find struct sizes +.PHONY: structs +structs: STRUCTSFLAGS+=-S +structs: $(OBJ) $(BUILDDIR)/lfs.structs.csv + ./scripts/structs.py $(OBJ) $(STRUCTSFLAGS) + +## Compare struct sizes +.PHONY: structs-diff +structs-diff: $(OBJ) + ./scripts/structs.py $^ $(STRUCTSFLAGS) -d $(BUILDDIR)/lfs.structs.csv + +## Find the line/branch coverage after a test run +.PHONY: cov +cov: COVFLAGS+=-s +cov: $(GCDA) $(BUILDDIR)/lfs.cov.csv + $(strip ./scripts/cov.py $(GCDA) \ + $(patsubst %,-F%,$(SRC)) \ + $(COVFLAGS)) + +## Compare line/branch coverage +.PHONY: cov-diff +cov-diff: $(GCDA) + $(strip ./scripts/cov.py $^ \ + $(patsubst %,-F%,$(SRC)) \ + $(COVFLAGS) -d $(BUILDDIR)/lfs.cov.csv) + +## Find the perf results after bench run with YES_PERF +.PHONY: perf +perf: PERFFLAGS+=-S +perf: $(BENCH_PERF) $(BUILDDIR)/lfs.perf.csv + $(strip ./scripts/perf.py $(BENCH_PERF) \ + $(patsubst %,-F%,$(SRC)) \ + $(PERFFLAGS)) + +## Compare perf results +.PHONY: perf-diff +perf-diff: $(BENCH_PERF) + $(strip ./scripts/perf.py $^ \ + $(patsubst %,-F%,$(SRC)) \ + $(PERFFLAGS) -d $(BUILDDIR)/lfs.perf.csv) + +## Find the perfbd results after a bench run +.PHONY: perfbd +perfbd: PERFBDFLAGS+=-S +perfbd: $(BENCH_TRACE) $(BUILDDIR)/lfs.perfbd.csv + $(strip ./scripts/perfbd.py $(BENCH_RUNNER) $(BENCH_TRACE) \ + $(patsubst %,-F%,$(SRC)) \ + $(PERFBDFLAGS)) + +## Compare perfbd results +.PHONY: perfbd-diff +perfbd-diff: $(BENCH_TRACE) + $(strip ./scripts/perfbd.py $(BENCH_RUNNER) $^ \ + $(patsubst %,-F%,$(SRC)) \ + $(PERFBDFLAGS) -d $(BUILDDIR)/lfs.perfbd.csv) + +## Find a summary of compile-time sizes +.PHONY: summary sizes +summary sizes: \ + $(BUILDDIR)/lfs.code.csv \ + $(BUILDDIR)/lfs.data.csv \ + $(BUILDDIR)/lfs.stack.csv \ + $(BUILDDIR)/lfs.structs.csv + $(strip ./scripts/summary.py $^ \ + -fcode=code_size \ + -fdata=data_size \ + -fstack=stack_limit --max=stack \ + -fstructs=struct_size \ + -Y $(SUMMARYFLAGS)) + +## Compare compile-time sizes +.PHONY: summary-diff sizes-diff +summary-diff sizes-diff: SHELL=/bin/bash +summary-diff sizes-diff: $(OBJ) $(CI) + $(strip ./scripts/summary.py \ + <(./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o-) \ + <(./scripts/data.py $(OBJ) -q $(DATAFLAGS) -o-) \ + <(./scripts/stack.py $(CI) -q $(STACKFLAGS) -o-) \ + <(./scripts/structs.py $(OBJ) -q $(STRUCTSFLAGS) -o-) \ + -fcode=code_size \ + -fdata=data_size \ + -fstack=stack_limit --max=stack \ + -fstructs=struct_size \ + -Y $(SUMMARYFLAGS) -d <(./scripts/summary.py \ + $(BUILDDIR)/lfs.code.csv \ + $(BUILDDIR)/lfs.data.csv \ + $(BUILDDIR)/lfs.stack.csv \ + $(BUILDDIR)/lfs.structs.csv \ + -q $(SUMMARYFLAGS) -o-)) + +## Build the test-runner +.PHONY: test-runner build-test +test-runner build-test: CFLAGS+=-Wno-missing-prototypes +ifndef NO_COV +test-runner build-test: CFLAGS+=--coverage +endif +ifdef YES_PERF +test-runner build-test: CFLAGS+=-fno-omit-frame-pointer +endif +ifdef YES_PERFBD +test-runner build-test: CFLAGS+=-fno-omit-frame-pointer +endif +# note we remove some binary dependent files during compilation, +# otherwise it's way to easy to end up with outdated results +test-runner build-test: $(TEST_RUNNER) +ifndef NO_COV + rm -f $(TEST_GCDA) +endif +ifdef YES_PERF + rm -f $(TEST_PERF) +endif +ifdef YES_PERFBD + rm -f $(TEST_TRACE) +endif + +## Run the tests, -j enables parallel tests +.PHONY: test +test: test-runner + ./scripts/test.py $(TEST_RUNNER) $(TESTFLAGS) + +## List the tests +.PHONY: test-list +test-list: test-runner + ./scripts/test.py $(TEST_RUNNER) $(TESTFLAGS) -l + +## Summarize the testmarks +.PHONY: testmarks +testmarks: SUMMARYFLAGS+=-spassed +testmarks: $(TEST_CSV) $(BUILDDIR)/lfs.test.csv + $(strip ./scripts/summary.py $(TEST_CSV) \ + -bsuite \ + -fpassed=test_passed \ + $(SUMMARYFLAGS)) + +## Compare testmarks against a previous run +.PHONY: testmarks-diff +testmarks-diff: $(TEST_CSV) + $(strip ./scripts/summary.py $^ \ + -bsuite \ + -fpassed=test_passed \ + $(SUMMARYFLAGS) -d $(BUILDDIR)/lfs.test.csv) + +## Build the bench-runner +.PHONY: bench-runner build-bench +bench-runner build-bench: CFLAGS+=-Wno-missing-prototypes +ifdef YES_COV +bench-runner build-bench: CFLAGS+=--coverage +endif +ifdef YES_PERF +bench-runner build-bench: CFLAGS+=-fno-omit-frame-pointer +endif +ifndef NO_PERFBD +bench-runner build-bench: CFLAGS+=-fno-omit-frame-pointer +endif +# note we remove some binary dependent files during compilation, +# otherwise it's way to easy to end up with outdated results +bench-runner build-bench: $(BENCH_RUNNER) +ifdef YES_COV + rm -f $(BENCH_GCDA) +endif +ifdef YES_PERF + rm -f $(BENCH_PERF) +endif +ifndef NO_PERFBD + rm -f $(BENCH_TRACE) +endif + +## Run the benchmarks, -j enables parallel benchmarks +.PHONY: bench +bench: bench-runner + ./scripts/bench.py $(BENCH_RUNNER) $(BENCHFLAGS) + +## List the benchmarks +.PHONY: bench-list +bench-list: bench-runner + ./scripts/bench.py $(BENCH_RUNNER) $(BENCHFLAGS) -l + +## Summarize the benchmarks +.PHONY: benchmarks +benchmarks: SUMMARYFLAGS+=-Serased -Sproged -Sreaded +benchmarks: $(BENCH_CSV) $(BUILDDIR)/lfs.bench.csv + $(strip ./scripts/summary.py $(BENCH_CSV) \ + -bsuite \ + -freaded=bench_readed \ + -fproged=bench_proged \ + -ferased=bench_erased \ + $(SUMMARYFLAGS)) + +## Compare benchmarks against a previous run +.PHONY: benchmarks-diff +benchmarks-diff: $(BENCH_CSV) + $(strip ./scripts/summary.py $^ \ + -bsuite \ + -freaded=bench_readed \ + -fproged=bench_proged \ + -ferased=bench_erased \ + $(SUMMARYFLAGS) -d $(BUILDDIR)/lfs.bench.csv) + + + +# rules +-include $(DEP) +-include $(TEST_DEP) +-include $(BENCH_DEP) +.SUFFIXES: +.SECONDARY: + +$(BUILDDIR)/lfs: $(OBJ) + $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ + +$(BUILDDIR)/liblfs.a: $(OBJ) + $(AR) rcs $@ $^ + +$(BUILDDIR)/lfs.code.csv: $(OBJ) + ./scripts/code.py $^ -q $(CODEFLAGS) -o $@ + +$(BUILDDIR)/lfs.data.csv: $(OBJ) + ./scripts/data.py $^ -q $(DATAFLAGS) -o $@ + +$(BUILDDIR)/lfs.stack.csv: $(CI) + ./scripts/stack.py $^ -q $(STACKFLAGS) -o $@ + +$(BUILDDIR)/lfs.structs.csv: $(OBJ) + ./scripts/structs.py $^ -q $(STRUCTSFLAGS) -o $@ + +$(BUILDDIR)/lfs.cov.csv: $(GCDA) + $(strip ./scripts/cov.py $^ \ + $(patsubst %,-F%,$(SRC)) \ + -q $(COVFLAGS) -o $@) + +$(BUILDDIR)/lfs.perf.csv: $(BENCH_PERF) + $(strip ./scripts/perf.py $^ \ + $(patsubst %,-F%,$(SRC)) \ + -q $(PERFFLAGS) -o $@) + +$(BUILDDIR)/lfs.perfbd.csv: $(BENCH_TRACE) + $(strip ./scripts/perfbd.py $(BENCH_RUNNER) $^ \ + $(patsubst %,-F%,$(SRC)) \ + -q $(PERFBDFLAGS) -o $@) + +$(BUILDDIR)/lfs.test.csv: $(TEST_CSV) + cp $^ $@ + +$(BUILDDIR)/lfs.bench.csv: $(BENCH_CSV) + cp $^ $@ + +$(BUILDDIR)/runners/test_runner: $(TEST_OBJ) + $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ + +$(BUILDDIR)/runners/bench_runner: $(BENCH_OBJ) + $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ + +# our main build rule generates .o, .d, and .ci files, the latter +# used for stack analysis +$(BUILDDIR)/%.o $(BUILDDIR)/%.ci: %.c + $(CC) -c -MMD $(CFLAGS) $< -o $(BUILDDIR)/$*.o + +$(BUILDDIR)/%.o $(BUILDDIR)/%.ci: $(BUILDDIR)/%.c + $(CC) -c -MMD $(CFLAGS) $< -o $(BUILDDIR)/$*.o + +$(BUILDDIR)/%.s: %.c + $(CC) -S $(CFLAGS) $< -o $@ + +$(BUILDDIR)/%.c: %.a.c + ./scripts/prettyasserts.py -p LFS_ASSERT $< -o $@ + +$(BUILDDIR)/%.c: $(BUILDDIR)/%.a.c + ./scripts/prettyasserts.py -p LFS_ASSERT $< -o $@ + +$(BUILDDIR)/%.t.a.c: %.toml + ./scripts/test.py -c $< $(TESTCFLAGS) -o $@ + +$(BUILDDIR)/%.t.a.c: %.c $(TESTS) + ./scripts/test.py -c $(TESTS) -s $< $(TESTCFLAGS) -o $@ + +$(BUILDDIR)/%.b.a.c: %.toml + ./scripts/bench.py -c $< $(BENCHCFLAGS) -o $@ + +$(BUILDDIR)/%.b.a.c: %.c $(BENCHES) + ./scripts/bench.py -c $(BENCHES) -s $< $(BENCHCFLAGS) -o $@ + +## Clean everything +.PHONY: clean +clean: + rm -f $(BUILDDIR)/lfs + rm -f $(BUILDDIR)/liblfs.a + rm -f $(BUILDDIR)/lfs.code.csv + rm -f $(BUILDDIR)/lfs.data.csv + rm -f $(BUILDDIR)/lfs.stack.csv + rm -f $(BUILDDIR)/lfs.structs.csv + rm -f $(BUILDDIR)/lfs.cov.csv + rm -f $(BUILDDIR)/lfs.perf.csv + rm -f $(BUILDDIR)/lfs.perfbd.csv + rm -f $(BUILDDIR)/lfs.test.csv + rm -f $(BUILDDIR)/lfs.bench.csv + rm -f $(OBJ) + rm -f $(DEP) + rm -f $(ASM) + rm -f $(CI) + rm -f $(TEST_RUNNER) + rm -f $(TEST_A) + rm -f $(TEST_C) + rm -f $(TEST_OBJ) + rm -f $(TEST_DEP) + rm -f $(TEST_CI) + rm -f $(TEST_GCNO) + rm -f $(TEST_GCDA) + rm -f $(TEST_PERF) + rm -f $(TEST_TRACE) + rm -f $(TEST_CSV) + rm -f $(BENCH_RUNNER) + rm -f $(BENCH_A) + rm -f $(BENCH_C) + rm -f $(BENCH_OBJ) + rm -f $(BENCH_DEP) + rm -f $(BENCH_CI) + rm -f $(BENCH_GCNO) + rm -f $(BENCH_GCDA) + rm -f $(BENCH_PERF) + rm -f $(BENCH_TRACE) + rm -f $(BENCH_CSV) diff --git a/components/joltwallet__littlefs/src/littlefs/README.md b/components/joltwallet__littlefs/src/littlefs/README.md new file mode 100644 index 0000000..95db2a0 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/README.md @@ -0,0 +1,342 @@ +## littlefs + +A little fail-safe filesystem designed for microcontrollers. + +``` + | | | .---._____ + .-----. | | +--|o |---| littlefs | +--| |---| | + '-----' '----------' + | | | +``` + +**Power-loss resilience** - littlefs is designed to handle random power +failures. All file operations have strong copy-on-write guarantees and if +power is lost the filesystem will fall back to the last known good state. + +**Dynamic wear leveling** - littlefs is designed with flash in mind, and +provides wear leveling over dynamic blocks. Additionally, littlefs can +detect bad blocks and work around them. + +**Bounded RAM/ROM** - littlefs is designed to work with a small amount of +memory. RAM usage is strictly bounded, which means RAM consumption does not +change as the filesystem grows. The filesystem contains no unbounded +recursion and dynamic memory is limited to configurable buffers that can be +provided statically. + +## Example + +Here's a simple example that updates a file named `boot_count` every time +main runs. The program can be interrupted at any time without losing track +of how many times it has been booted and without corrupting the filesystem: + +``` c +#include "lfs.h" + +// variables used by the filesystem +lfs_t lfs; +lfs_file_t file; + +// configuration of the filesystem is provided by this struct +const struct lfs_config cfg = { + // block device operations + .read = user_provided_block_device_read, + .prog = user_provided_block_device_prog, + .erase = user_provided_block_device_erase, + .sync = user_provided_block_device_sync, + + // block device configuration + .read_size = 16, + .prog_size = 16, + .block_size = 4096, + .block_count = 128, + .cache_size = 16, + .lookahead_size = 16, + .block_cycles = 500, +}; + +// entry point +int main(void) { + // mount the filesystem + int err = lfs_mount(&lfs, &cfg); + + // reformat if we can't mount the filesystem + // this should only happen on the first boot + if (err) { + lfs_format(&lfs, &cfg); + lfs_mount(&lfs, &cfg); + } + + // read current count + uint32_t boot_count = 0; + lfs_file_open(&lfs, &file, "boot_count", LFS_O_RDWR | LFS_O_CREAT); + lfs_file_read(&lfs, &file, &boot_count, sizeof(boot_count)); + + // update boot count + boot_count += 1; + lfs_file_rewind(&lfs, &file); + lfs_file_write(&lfs, &file, &boot_count, sizeof(boot_count)); + + // remember the storage is not updated until the file is closed successfully + lfs_file_close(&lfs, &file); + + // release any resources we were using + lfs_unmount(&lfs); + + // print the boot count + printf("boot_count: %d\n", boot_count); +} +``` + +## Usage + +Detailed documentation (or at least as much detail as is currently available) +can be found in the comments in [lfs.h](lfs.h). + +littlefs takes in a configuration structure that defines how the filesystem +operates. The configuration struct provides the filesystem with the block +device operations and dimensions, tweakable parameters that tradeoff memory +usage for performance, and optional static buffers if the user wants to avoid +dynamic memory. + +The state of the littlefs is stored in the `lfs_t` type which is left up +to the user to allocate, allowing multiple filesystems to be in use +simultaneously. With the `lfs_t` and configuration struct, a user can +format a block device or mount the filesystem. + +Once mounted, the littlefs provides a full set of POSIX-like file and +directory functions, with the deviation that the allocation of filesystem +structures must be provided by the user. + +All POSIX operations, such as remove and rename, are atomic, even in event +of power-loss. Additionally, file updates are not actually committed to +the filesystem until sync or close is called on the file. + +## Other notes + +Littlefs is written in C, and specifically should compile with any compiler +that conforms to the `C99` standard. + +All littlefs calls have the potential to return a negative error code. The +errors can be either one of those found in the `enum lfs_error` in +[lfs.h](lfs.h), or an error returned by the user's block device operations. + +In the configuration struct, the `prog` and `erase` function provided by the +user may return a `LFS_ERR_CORRUPT` error if the implementation already can +detect corrupt blocks. However, the wear leveling does not depend on the return +code of these functions, instead all data is read back and checked for +integrity. + +If your storage caches writes, make sure that the provided `sync` function +flushes all the data to memory and ensures that the next read fetches the data +from memory, otherwise data integrity can not be guaranteed. If the `write` +function does not perform caching, and therefore each `read` or `write` call +hits the memory, the `sync` function can simply return 0. + +## Design + +At a high level, littlefs is a block based filesystem that uses small logs to +store metadata and larger copy-on-write (COW) structures to store file data. + +In littlefs, these ingredients form a sort of two-layered cake, with the small +logs (called metadata pairs) providing fast updates to metadata anywhere on +storage, while the COW structures store file data compactly and without any +wear amplification cost. + +Both of these data structures are built out of blocks, which are fed by a +common block allocator. By limiting the number of erases allowed on a block +per allocation, the allocator provides dynamic wear leveling over the entire +filesystem. + +``` + root + .--------.--------. + | A'| B'| | + | | |-> | + | | | | + '--------'--------' + .----' '--------------. + A v B v + .--------.--------. .--------.--------. + | C'| D'| | | E'|new| | + | | |-> | | | E'|-> | + | | | | | | | | + '--------'--------' '--------'--------' + .-' '--. | '------------------. + v v .-' v +.--------. .--------. v .--------. +| C | | D | .--------. write | new E | +| | | | | E | ==> | | +| | | | | | | | +'--------' '--------' | | '--------' + '--------' .-' | + .-' '-. .-------------|------' + v v v v + .--------. .--------. .--------. + | F | | G | | new F | + | | | | | | + | | | | | | + '--------' '--------' '--------' +``` + +More details on how littlefs works can be found in [DESIGN.md](DESIGN.md) and +[SPEC.md](SPEC.md). + +- [DESIGN.md](DESIGN.md) - A fully detailed dive into how littlefs works. + I would suggest reading it as the tradeoffs at work are quite interesting. + +- [SPEC.md](SPEC.md) - The on-disk specification of littlefs with all the + nitty-gritty details. May be useful for tooling development. + +## Testing + +The littlefs comes with a test suite designed to run on a PC using the +[emulated block device](bd/lfs_testbd.h) found in the `bd` directory. +The tests assume a Linux environment and can be started with make: + +``` bash +make test +``` + +Tests are implemented in C in the .toml files found in the `tests` directory. +When developing a feature or fixing a bug, it is frequently useful to run a +single test case or suite of tests: + +``` bash +./scripts/test.py -l runners/test_runner # list available test suites +./scripts/test.py -L runners/test_runner test_dirs # list available test cases +./scripts/test.py runners/test_runner test_dirs # run a specific test suite +``` + +If an assert fails in a test, test.py will try to print information about the +failure: + +``` bash +tests/test_dirs.toml:1:failure: test_dirs_root:1g12gg2 (PROG_SIZE=16, ERASE_SIZE=512) failed +tests/test_dirs.toml:5:assert: assert failed with 0, expected eq 42 + lfs_mount(&lfs, cfg) => 42; +``` + +This includes the test id, which can be passed to test.py to run only that +specific test permutation: + +``` bash +./scripts/test.py runners/test_runner test_dirs_root:1g12gg2 # run a specific test permutation +./scripts/test.py runners/test_runner test_dirs_root:1g12gg2 --gdb # drop into gdb on failure +``` + +Some other flags that may be useful: + +```bash +./scripts/test.py runners/test_runner -b -j # run tests in parallel +./scripts/test.py runners/test_runner -v -O- # redirect stdout to stdout +./scripts/test.py runners/test_runner -ddisk # capture resulting disk image +``` + +See `-h/--help` for a full list of available flags: + +``` bash +./scripts/test.py --help +``` + +## License + +The littlefs is provided under the [BSD-3-Clause] license. See +[LICENSE.md](LICENSE.md) for more information. Contributions to this project +are accepted under the same license. + +Individual files contain the following tag instead of the full license text. + + SPDX-License-Identifier: BSD-3-Clause + +This enables machine processing of license information based on the SPDX +License Identifiers that are here available: http://spdx.org/licenses/ + +## Related projects + +- [littlefs-fuse] - A [FUSE] wrapper for littlefs. The project allows you to + mount littlefs directly on a Linux machine. Can be useful for debugging + littlefs if you have an SD card handy. + +- [littlefs-js] - A javascript wrapper for littlefs. I'm not sure why you would + want this, but it is handy for demos. You can see it in action + [here][littlefs-js-demo]. + +- [littlefs-python] - A Python wrapper for littlefs. The project allows you + to create images of the filesystem on your PC. Check if littlefs will fit + your needs, create images for a later download to the target memory or + inspect the content of a binary image of the target memory. + +- [littlefs-toy] - A command-line tool for creating and working with littlefs + images. Uses syntax similar to tar command for ease of use. Supports working + on littlefs images embedded inside another file (firmware image, etc). + +- [littlefs2-rust] - A Rust wrapper for littlefs. This project allows you + to use littlefs in a Rust-friendly API, reaping the benefits of Rust's memory + safety and other guarantees. + +- [nim-littlefs] - A Nim wrapper and API for littlefs. Includes a fuse + implementation based on [littlefs-fuse] + +- [chamelon] - A pure-OCaml implementation of (most of) littlefs, designed for + use with the MirageOS library operating system project. It is interoperable + with the reference implementation, with some caveats. + +- [littlefs-disk-img-viewer] - A memory-efficient web application for viewing + littlefs disk images in your web browser. + +- [mklfs] - A command line tool for creating littlefs images. Used in the Lua + RTOS ecosystem. + +- [mklittlefs] - A command line tool for creating littlefs images. Used in the + ESP8266 and RP2040 ecosystem. + +- [pico-littlefs-usb] - An interface for littlefs that emulates a FAT12 + filesystem over USB. Allows mounting littlefs on a host PC without additional + drivers. + +- [ramcrc32bd] - An example block device using littlefs's 32-bit CRC for + error-correction. + +- [ramrsbd] - An example block device using Reed-Solomon codes for + error-correction. + +- [Mbed OS] - The easiest way to get started with littlefs is to jump into Mbed + which already has block device drivers for most forms of embedded storage. + littlefs is available in Mbed OS as the [LittleFileSystem] class. + +- [SPIFFS] - Another excellent embedded filesystem for NOR flash. As a more + traditional logging filesystem with full static wear-leveling, SPIFFS will + likely outperform littlefs on small memories such as the internal flash on + microcontrollers. + +- [Dhara] - An interesting NAND flash translation layer designed for small + MCUs. It offers static wear-leveling and power-resilience with only a fixed + _O(|address|)_ pointer structure stored on each block and in RAM. + +- [ChaN's FatFs] - A lightweight reimplementation of the infamous FAT filesystem + for microcontroller-scale devices. Due to limitations of FAT it can't provide + power-loss resilience, but it does allow easy interop with PCs. + +[BSD-3-Clause]: https://spdx.org/licenses/BSD-3-Clause.html +[littlefs-fuse]: https://github.com/geky/littlefs-fuse +[FUSE]: https://github.com/libfuse/libfuse +[littlefs-js]: https://github.com/geky/littlefs-js +[littlefs-js-demo]:http://littlefs.geky.net/demo.html +[littlefs-python]: https://pypi.org/project/littlefs-python/ +[littlefs-toy]: https://github.com/tjko/littlefs-toy +[littlefs2-rust]: https://crates.io/crates/littlefs2 +[nim-littlefs]: https://github.com/Graveflo/nim-littlefs +[chamelon]: https://github.com/yomimono/chamelon +[littlefs-disk-img-viewer]: https://github.com/tniessen/littlefs-disk-img-viewer +[mklfs]: https://github.com/whitecatboard/Lua-RTOS-ESP32/tree/master/components/mklfs/src +[mklittlefs]: https://github.com/earlephilhower/mklittlefs +[pico-littlefs-usb]: https://github.com/oyama/pico-littlefs-usb +[ramcrc32bd]: https://github.com/geky/ramcrc32bd +[ramrsbd]: https://github.com/geky/ramrsbd +[Mbed OS]: https://github.com/armmbed/mbed-os +[LittleFileSystem]: https://os.mbed.com/docs/mbed-os/latest/apis/littlefilesystem.html +[SPIFFS]: https://github.com/pellepl/spiffs +[Dhara]: https://github.com/dlbeer/dhara +[ChaN's FatFs]: http://elm-chan.org/fsw/ff/00index_e.html diff --git a/components/joltwallet__littlefs/src/littlefs/SPEC.md b/components/joltwallet__littlefs/src/littlefs/SPEC.md new file mode 100644 index 0000000..6682c74 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/SPEC.md @@ -0,0 +1,867 @@ +## littlefs technical specification + +This is the technical specification of the little filesystem with on-disk +version lfs2.1. This document covers the technical details of how the littlefs +is stored on disk for introspection and tooling. This document assumes you are +familiar with the design of the littlefs, for more info on how littlefs works +check out [DESIGN.md](DESIGN.md). + +``` + | | | .---._____ + .-----. | | +--|o |---| littlefs | +--| |---| | + '-----' '----------' + | | | +``` + +## Some quick notes + +- littlefs is a block-based filesystem. The disk is divided into an array of + evenly sized blocks that are used as the logical unit of storage. + +- Block pointers are stored in 32 bits, with the special value `0xffffffff` + representing a null block address. + +- In addition to the logical block size (which usually matches the erase + block size), littlefs also uses a program block size and read block size. + These determine the alignment of block device operations, but don't need + to be consistent for portability. + +- By default, all values in littlefs are stored in little-endian byte order. + +## Directories / Metadata pairs + +Metadata pairs form the backbone of littlefs and provide a system for +distributed atomic updates. Even the superblock is stored in a metadata pair. + +As their name suggests, a metadata pair is stored in two blocks, with one block +providing a backup during erase cycles in case power is lost. These two blocks +are not necessarily sequential and may be anywhere on disk, so a "pointer" to a +metadata pair is stored as two block pointers. + +On top of this, each metadata block behaves as an appendable log, containing a +variable number of commits. Commits can be appended to the metadata log in +order to update the metadata without requiring an erase cycles. Note that +successive commits may supersede the metadata in previous commits. Only the +most recent metadata should be considered valid. + +The high-level layout of a metadata block is fairly simple: + +``` + .---------------------------------------. +.-| revision count | entries | \ +| |-------------------+ | | +| | | | +| | | +-- 1st commit +| | | | +| | +-------------------| | +| | | CRC | / +| |-------------------+-------------------| +| | entries | \ +| | | | +| | | +-- 2nd commit +| | +-------------------+--------------| | +| | | CRC | padding | / +| |----+-------------------+--------------| +| | entries | \ +| | | | +| | | +-- 3rd commit +| | +-------------------+---------| | +| | | CRC | | / +| |---------+-------------------+ | +| | unwritten storage | more commits +| | | | +| | | v +| | | +| | | +| '---------------------------------------' +'---------------------------------------' +``` + +Each metadata block contains a 32-bit revision count followed by a number of +commits. Each commit contains a variable number of metadata entries followed +by a 32-bit CRC. + +Note also that entries aren't necessarily word-aligned. This allows us to +store metadata more compactly, however we can only write to addresses that are +aligned to our program block size. This means each commit may have padding for +alignment. + +Metadata block fields: + +1. **Revision count (32-bits)** - Incremented every erase cycle. If both blocks + contain valid commits, only the block with the most recent revision count + should be used. Sequence comparison must be used to avoid issues with + integer overflow. + +2. **CRC (32-bits)** - Detects corruption from power-loss or other write + issues. Uses a CRC-32 with a polynomial of `0x04c11db7` initialized + with `0xffffffff`. + +Entries themselves are stored as a 32-bit tag followed by a variable length +blob of data. But exactly how these tags are stored is a little bit tricky. + +Metadata blocks support both forward and backward iteration. In order to do +this without duplicating the space for each tag, neighboring entries have their +tags XORed together, starting with `0xffffffff`. + +``` + Forward iteration Backward iteration + +.-------------------. 0xffffffff .-------------------. +| revision count | | | revision count | +|-------------------| v |-------------------| +| tag ~A |---> xor -> tag A | tag ~A |---> xor -> 0xffffffff +|-------------------| | |-------------------| ^ +| data A | | | data A | | +| | | | | | +| | | | | | +|-------------------| v |-------------------| | +| tag AxB |---> xor -> tag B | tag AxB |---> xor -> tag A +|-------------------| | |-------------------| ^ +| data B | | | data B | | +| | | | | | +| | | | | | +|-------------------| v |-------------------| | +| tag BxC |---> xor -> tag C | tag BxC |---> xor -> tag B +|-------------------| |-------------------| ^ +| data C | | data C | | +| | | | tag C +| | | | +| | | | +'-------------------' '-------------------' +``` + +Here's a more complete example of metadata block containing 4 entries: + +``` + .---------------------------------------. +.-| revision count | tag ~A | \ +| |-------------------+-------------------| | +| | data A | | +| | | | +| |-------------------+-------------------| | +| | tag AxB | data B | <--. | +| |-------------------+ | | | +| | | | +-- 1st commit +| | +-------------------+---------| | | +| | | tag BxC | | <-.| | +| |---------+-------------------+ | || | +| | data C | || | +| | | || | +| |-------------------+-------------------| || | +| | tag CxCRC | CRC | || / +| |-------------------+-------------------| || +| | tag CRCxA' | data A' | || \ +| |-------------------+ | || | +| | | || | +| | +-------------------+----| || +-- 2nd commit +| | | tag CRCxA' | | || | +| |--------------+-------------------+----| || | +| | CRC | padding | || / +| |--------------+----+-------------------| || +| | tag CRCxA'' | data A'' | <---. \ +| |-------------------+ | ||| | +| | | ||| | +| | +-------------------+---------| ||| | +| | | tag A''xD | | < ||| | +| |---------+-------------------+ | |||| +-- 3rd commit +| | data D | |||| | +| | +---------| |||| | +| | | tag Dx| |||| | +| |---------+-------------------+---------| |||| | +| |CRC | CRC | | |||| / +| |---------+-------------------+ | |||| +| | unwritten storage | |||| more commits +| | | |||| | +| | | |||| v +| | | |||| +| | | |||| +| '---------------------------------------' |||| +'---------------------------------------' |||'- most recent A + ||'-- most recent B + |'--- most recent C + '---- most recent D +``` + +Two things to note before we get into the details around tag encoding: + +1. Each tag contains a valid bit used to indicate if the tag and containing + commit is valid. After XORing, this bit should always be zero. + + At the end of each commit, the valid bit of the previous tag is XORed + with the lowest bit in the type field of the CRC tag. This allows + the CRC tag to force the next commit to fail the valid bit test if it + has not yet been written to. + +2. The valid bit alone is not enough info to know if the next commit has been + erased. We don't know the order bits will be programmed in a program block, + so it's possible that the next commit had an attempted program that left the + valid bit unchanged. + + To ensure we only ever program erased bytes, each commit can contain an + optional forward-CRC (FCRC). An FCRC contains a checksum of some amount of + bytes in the next commit at the time it was erased. + + ``` + .-------------------. \ \ + | revision count | | | + |-------------------| | | + | metadata | | | + | | +---. +-- current commit + | | | | | + |-------------------| | | | + | FCRC ---|-. | | + |-------------------| / | | | + | CRC -----|-' / + |-------------------| | + | padding | | padding (does't need CRC) + | | | + |-------------------| \ | \ + | erased? | +-' | + | | | | +-- next commit + | v | / | + | | / + | | + '-------------------' + ``` + + If the FCRC is missing or the checksum does not match, we must assume a + commit was attempted but failed due to power-loss. + + Note that end-of-block commits do not need an FCRC. + +## Metadata tags + +So in littlefs, 32-bit tags describe every type of metadata. And this means +_every_ type of metadata, including file entries, directory fields, and +global state. Even the CRCs used to mark the end of commits get their own tag. + +Because of this, the tag format contains some densely packed information. Note +that there are multiple levels of types which break down into more info: + +``` +[---- 32 ----] +[1|-- 11 --|-- 10 --|-- 10 --] + ^. ^ . ^ ^- length + |. | . '------------ id + |. '-----.------------------ type (type3) + '.-----------.------------------ valid bit + [-3-|-- 8 --] + ^ ^- chunk + '------- type (type1) +``` + + +Before we go further, there's one important thing to note. These tags are +**not** stored in little-endian. Tags stored in commits are actually stored +in big-endian (and is the only thing in littlefs stored in big-endian). This +little bit of craziness comes from the fact that the valid bit must be the +first bit in a commit, and when converted to little-endian, the valid bit finds +itself in byte 4. We could restructure the tag to store the valid bit lower, +but, because none of the fields are byte-aligned, this would be more +complicated than just storing the tag in big-endian. + +Another thing to note is that both the tags `0x00000000` and `0xffffffff` are +invalid and can be used for null values. + +Metadata tag fields: + +1. **Valid bit (1-bit)** - Indicates if the tag is valid. + +2. **Type3 (11-bits)** - Type of the tag. This field is broken down further + into a 3-bit abstract type and an 8-bit chunk field. Note that the value + `0x000` is invalid and not assigned a type. + + 1. **Type1 (3-bits)** - Abstract type of the tag. Groups the tags into + 8 categories that facilitate bitmasked lookups. + + 2. **Chunk (8-bits)** - Chunk field used for various purposes by the different + abstract types. type1+chunk+id form a unique identifier for each tag in the + metadata block. + +3. **Id (10-bits)** - File id associated with the tag. Each file in a metadata + block gets a unique id which is used to associate tags with that file. The + special value `0x3ff` is used for any tags that are not associated with a + file, such as directory and global metadata. + +4. **Length (10-bits)** - Length of the data in bytes. The special value + `0x3ff` indicates that this tag has been deleted. + +## Metadata types + +What follows is an exhaustive list of metadata in littlefs. + +--- +#### `0x401` LFS_TYPE_CREATE + +Creates a new file with this id. Note that files in a metadata block +don't necessarily need a create tag. All a create does is move over any +files using this id. In this sense a create is similar to insertion into +an imaginary array of files. + +The create and delete tags allow littlefs to keep files in a directory +ordered alphabetically by filename. + +--- +#### `0x4ff` LFS_TYPE_DELETE + +Deletes the file with this id. An inverse to create, this tag moves over +any files neighboring this id similar to a deletion from an imaginary +array of files. + +--- +#### `0x0xx` LFS_TYPE_NAME + +Associates the id with a file name and file type. + +The data contains the file name stored as an ASCII string (may be expanded to +UTF8 in the future). + +The chunk field in this tag indicates an 8-bit file type which can be one of +the following. + +Currently, the name tag must precede any other tags associated with the id and +can not be reassigned without deleting the file. + +Layout of the name tag: + +``` + tag data +[-- 32 --][--- variable length ---] +[1| 3| 8 | 10 | 10 ][--- (size * 8) ---] + ^ ^ ^ ^ ^- size ^- file name + | | | '------ id + | | '----------- file type + | '-------------- type1 (0x0) + '----------------- valid bit +``` + +Name fields: + +1. **file type (8-bits)** - Type of the file. + +2. **file name** - File name stored as an ASCII string. + +--- +#### `0x001` LFS_TYPE_REG + +Initializes the id + name as a regular file. + +How each file is stored depends on its struct tag, which is described below. + +--- +#### `0x002` LFS_TYPE_DIR + +Initializes the id + name as a directory. + +Directories in littlefs are stored on disk as a linked-list of metadata pairs, +each pair containing any number of files in alphabetical order. A pointer to +the directory is stored in the struct tag, which is described below. + +--- +#### `0x0ff` LFS_TYPE_SUPERBLOCK + +Initializes the id as a superblock entry. + +The superblock entry is a special entry used to store format-time configuration +and identify the filesystem. + +The name is a bit of a misnomer. While the superblock entry serves the same +purpose as a superblock found in other filesystems, in littlefs the superblock +does not get a dedicated block. Instead, the superblock entry is duplicated +across a linked-list of metadata pairs rooted on the blocks 0 and 1. The last +metadata pair doubles as the root directory of the filesystem. + +``` + .--------. .--------. .--------. .--------. .--------. +.| super |->| super |->| super |->| super |->| file B | +|| block | || block | || block | || block | || file C | +|| | || | || | || file A | || file D | +|'--------' |'--------' |'--------' |'--------' |'--------' +'--------' '--------' '--------' '--------' '--------' + +\----------------+----------------/ \----------+----------/ + superblock pairs root directory +``` + +The filesystem starts with only the root directory. The superblock metadata +pairs grow every time the root pair is compacted in order to prolong the +life of the device exponentially. + +The contents of the superblock entry are stored in a name tag with the +superblock type and an inline-struct tag. The name tag contains the magic +string "littlefs", while the inline-struct tag contains version and +configuration information. + +Layout of the superblock name tag and inline-struct tag: + +``` + tag data +[-- 32 --][-- 32 --|-- 32 --] +[1|- 11 -| 10 | 10 ][--- 64 ---] + ^ ^ ^ ^- size (8) ^- magic string ("littlefs") + | | '------ id (0) + | '------------ type (0x0ff) + '----------------- valid bit + + tag data +[-- 32 --][-- 32 --|-- 32 --|-- 32 --] +[1|- 11 -| 10 | 10 ][-- 32 --|-- 32 --|-- 32 --] + ^ ^ ^ ^ ^- version ^- block size ^- block count + | | | | [-- 32 --|-- 32 --|-- 32 --] + | | | | [-- 32 --|-- 32 --|-- 32 --] + | | | | ^- name max ^- file max ^- attr max + | | | '- size (24) + | | '------ id (0) + | '------------ type (0x201) + '----------------- valid bit +``` + +Superblock fields: + +1. **Magic string (8-bytes)** - Magic string indicating the presence of + littlefs on the device. Must be the string "littlefs". + +2. **Version (32-bits)** - The version of littlefs at format time. The version + is encoded in a 32-bit value with the upper 16-bits containing the major + version, and the lower 16-bits containing the minor version. + + This specification describes version 2.0 (`0x00020000`). + +3. **Block size (32-bits)** - Size of the logical block size used by the + filesystem in bytes. + +4. **Block count (32-bits)** - Number of blocks in the filesystem. + +5. **Name max (32-bits)** - Maximum size of file names in bytes. + +6. **File max (32-bits)** - Maximum size of files in bytes. + +7. **Attr max (32-bits)** - Maximum size of file attributes in bytes. + +The superblock must always be the first entry (id 0) in the metadata pair, and +the name tag must always be the first tag in the metadata pair. This makes it +so that the magic string "littlefs" will always reside at offset=8 in a valid +littlefs superblock. + +--- +#### `0x2xx` LFS_TYPE_STRUCT + +Associates the id with an on-disk data structure. + +The exact layout of the data depends on the data structure type stored in the +chunk field and can be one of the following. + +Any type of struct supersedes all other structs associated with the id. For +example, appending a ctz-struct replaces an inline-struct on the same file. + +--- +#### `0x200` LFS_TYPE_DIRSTRUCT + +Gives the id a directory data structure. + +Directories in littlefs are stored on disk as a linked-list of metadata pairs, +each pair containing any number of files in alphabetical order. + +``` + | + v + .--------. .--------. .--------. .--------. .--------. .--------. +.| file A |->| file D |->| file G |->| file I |->| file J |->| file M | +|| file B | || file E | || file H | || | || file K | || file N | +|| file C | || file F | || | || | || file L | || | +|'--------' |'--------' |'--------' |'--------' |'--------' |'--------' +'--------' '--------' '--------' '--------' '--------' '--------' +``` + +The dir-struct tag contains only the pointer to the first metadata-pair in the +directory. The directory size is not known without traversing the directory. + +The pointer to the next metadata-pair in the directory is stored in a tail tag, +which is described below. + +Layout of the dir-struct tag: + +``` + tag data +[-- 32 --][-- 32 --|-- 32 --] +[1|- 11 -| 10 | 10 ][--- 64 ---] + ^ ^ ^ ^- size (8) ^- metadata pair + | | '------ id + | '------------ type (0x200) + '----------------- valid bit +``` + +Dir-struct fields: + +1. **Metadata pair (8-bytes)** - Pointer to the first metadata-pair + in the directory. + +--- +#### `0x201` LFS_TYPE_INLINESTRUCT + +Gives the id an inline data structure. + +Inline structs store small files that can fit in the metadata pair. In this +case, the file data is stored directly in the tag's data area. + +Layout of the inline-struct tag: + +``` + tag data +[-- 32 --][--- variable length ---] +[1|- 11 -| 10 | 10 ][--- (size * 8) ---] + ^ ^ ^ ^- size ^- inline data + | | '------ id + | '------------ type (0x201) + '----------------- valid bit +``` + +Inline-struct fields: + +1. **Inline data** - File data stored directly in the metadata-pair. + +--- +#### `0x202` LFS_TYPE_CTZSTRUCT + +Gives the id a CTZ skip-list data structure. + +CTZ skip-lists store files that can not fit in the metadata pair. These files +are stored in a skip-list in reverse, with a pointer to the head of the +skip-list. Note that the head of the skip-list and the file size is enough +information to read the file. + +How exactly CTZ skip-lists work is a bit complicated. A full explanation can be +found in the [DESIGN.md](DESIGN.md#ctz-skip-lists). + +A quick summary: For every _n_‍th block where _n_ is divisible by +2‍_ˣ_, that block contains a pointer to block _n_-2‍_ˣ_. +These pointers are stored in increasing order of _x_ in each block of the file +before the actual data. + +``` + | + v +.--------. .--------. .--------. .--------. .--------. .--------. +| A |<-| D |<-| G |<-| J |<-| M |<-| P | +| B |<-| E |--| H |<-| K |--| N | | Q | +| C |<-| F |--| I |--| L |--| O | | | +'--------' '--------' '--------' '--------' '--------' '--------' + block 0 block 1 block 2 block 3 block 4 block 5 + 1 skip 2 skips 1 skip 3 skips 1 skip +``` + +Note that the maximum number of pointers in a block is bounded by the maximum +file size divided by the block size. With 32 bits for file size, this results +in a minimum block size of 104 bytes. + +Layout of the CTZ-struct tag: + +``` + tag data +[-- 32 --][-- 32 --|-- 32 --] +[1|- 11 -| 10 | 10 ][-- 32 --|-- 32 --] + ^ ^ ^ ^ ^ ^- file size + | | | | '-------------------- file head + | | | '- size (8) + | | '------ id + | '------------ type (0x202) + '----------------- valid bit +``` + +CTZ-struct fields: + +1. **File head (32-bits)** - Pointer to the block that is the head of the + file's CTZ skip-list. + +2. **File size (32-bits)** - Size of the file in bytes. + +--- +#### `0x3xx` LFS_TYPE_USERATTR + +Attaches a user attribute to an id. + +littlefs has a concept of "user attributes". These are small user-provided +attributes that can be used to store things like timestamps, hashes, +permissions, etc. + +Each user attribute is uniquely identified by an 8-bit type which is stored in +the chunk field, and the user attribute itself can be found in the tag's data. + +There are currently no standard user attributes and a portable littlefs +implementation should work with any user attributes missing. + +Layout of the user-attr tag: + +``` + tag data +[-- 32 --][--- variable length ---] +[1| 3| 8 | 10 | 10 ][--- (size * 8) ---] + ^ ^ ^ ^ ^- size ^- attr data + | | | '------ id + | | '----------- attr type + | '-------------- type1 (0x3) + '----------------- valid bit +``` + +User-attr fields: + +1. **Attr type (8-bits)** - Type of the user attributes. + +2. **Attr data** - The data associated with the user attribute. + +--- +#### `0x6xx` LFS_TYPE_TAIL + +Provides the tail pointer for the metadata pair itself. + +The metadata pair's tail pointer is used in littlefs for a linked-list +containing all metadata pairs. The chunk field contains the type of the tail, +which indicates if the following metadata pair is a part of the directory +(hard-tail) or only used to traverse the filesystem (soft-tail). + +``` + .--------. + .| dir A |-. + ||softtail| | +.--------| |-' +| |'--------' +| '---|--|-' +| .-' '-------------. +| v v +| .--------. .--------. .--------. +'->| dir B |->| dir B |->| dir C | + ||hardtail| ||softtail| || | + || | || | || | + |'--------' |'--------' |'--------' + '--------' '--------' '--------' +``` + +Currently any type supersedes any other preceding tails in the metadata pair, +but this may change if additional metadata pair state is added. + +A note about the metadata pair linked-list: Normally, this linked-list contains +every metadata pair in the filesystem. However, there are some operations that +can cause this linked-list to become out of sync if a power-loss were to occur. +When this happens, littlefs sets the "sync" flag in the global state. How +exactly this flag is stored is described below. + +When the sync flag is set: + +1. The linked-list may contain an orphaned directory that has been removed in + the filesystem. +2. The linked-list may contain a metadata pair with a bad block that has been + replaced in the filesystem. + +If the sync flag is set, the threaded linked-list must be checked for these +errors before it can be used reliably. Note that the threaded linked-list can +be ignored if littlefs is mounted read-only. + +Layout of the tail tag: + +``` + tag data +[-- 32 --][-- 32 --|-- 32 --] +[1| 3| 8 | 10 | 10 ][--- 64 ---] + ^ ^ ^ ^ ^- size (8) ^- metadata pair + | | | '------ id + | | '---------- tail type + | '------------- type1 (0x6) + '---------------- valid bit +``` + +Tail fields: + +1. **Tail type (8-bits)** - Type of the tail pointer. + +2. **Metadata pair (8-bytes)** - Pointer to the next metadata-pair. + +--- +#### `0x600` LFS_TYPE_SOFTTAIL + +Provides a tail pointer that points to the next metadata pair in the +filesystem. + +In this case, the next metadata pair is not a part of our current directory +and should only be followed when traversing the entire filesystem. + +--- +#### `0x601` LFS_TYPE_HARDTAIL + +Provides a tail pointer that points to the next metadata pair in the +directory. + +In this case, the next metadata pair belongs to the current directory. Note +that because directories in littlefs are sorted alphabetically, the next +metadata pair should only contain filenames greater than any filename in the +current pair. + +--- +#### `0x7xx` LFS_TYPE_GSTATE + +Provides delta bits for global state entries. + +littlefs has a concept of "global state". This is a small set of state that +can be updated by a commit to _any_ metadata pair in the filesystem. + +The way this works is that the global state is stored as a set of deltas +distributed across the filesystem such that the global state can be found by +the xor-sum of these deltas. + +``` + .--------. .--------. .--------. .--------. .--------. +.| |->| gdelta |->| |->| gdelta |->| gdelta | +|| | || 0x23 | || | || 0xff | || 0xce | +|| | || | || | || | || | +|'--------' |'--------' |'--------' |'--------' |'--------' +'--------' '----|---' '--------' '----|---' '----|---' + v v v + 0x00 --> xor ------------------> xor ------> xor --> gstate = 0x12 +``` + +Note that storing globals this way is very expensive in terms of storage usage, +so any global state should be kept very small. + +The size and format of each piece of global state depends on the type, which +is stored in the chunk field. Currently, the only global state is move state, +which is outlined below. + +--- +#### `0x7ff` LFS_TYPE_MOVESTATE + +Provides delta bits for the global move state. + +The move state in littlefs is used to store info about operations that could +cause to filesystem to go out of sync if the power is lost. The operations +where this could occur is moves of files between metadata pairs and any +operation that changes metadata pairs on the threaded linked-list. + +In the case of moves, the move state contains a tag + metadata pair describing +the source of the ongoing move. If this tag is non-zero, that means that power +was lost during a move, and the file exists in two different locations. If this +happens, the source of the move should be considered deleted, and the move +should be completed (the source should be deleted) before any other write +operations to the filesystem. + +In the case of operations to the threaded linked-list, a single "sync" bit is +used to indicate that a modification is ongoing. If this sync flag is set, the +threaded linked-list will need to be checked for errors before it can be used +reliably. The exact cases to check for are described above in the tail tag. + +Layout of the move state: + +``` + tag data +[-- 32 --][-- 32 --|-- 32 --|-- 32 --] +[1|- 11 -| 10 | 10 ][1|- 11 -| 10 | 10 |--- 64 ---] + ^ ^ ^ ^ ^ ^ ^ ^- padding (0) ^- metadata pair + | | | | | | '------ move id + | | | | | '------------ move type + | | | | '----------------- sync bit + | | | | + | | | '- size (12) + | | '------ id (0x3ff) + | '------------ type (0x7ff) + '----------------- valid bit +``` + +Move state fields: + +1. **Sync bit (1-bit)** - Indicates if the metadata pair threaded linked-list + is in-sync. If set, the threaded linked-list should be checked for errors. + +2. **Move type (11-bits)** - Type of move being performed. Must be either + `0x000`, indicating no move, or `0x4ff` indicating the source file should + be deleted. + +3. **Move id (10-bits)** - The file id being moved. + +4. **Metadata pair (8-bytes)** - Pointer to the metadata-pair containing + the move. + +--- +#### `0x5xx` LFS_TYPE_CRC + +Last but not least, the CRC tag marks the end of a commit and provides a +checksum for any commits to the metadata block. + +The first 32-bits of the data contain a CRC-32 with a polynomial of +`0x04c11db7` initialized with `0xffffffff`. This CRC provides a checksum for +all metadata since the previous CRC tag, including the CRC tag itself. For +the first commit, this includes the revision count for the metadata block. + +However, the size of the data is not limited to 32-bits. The data field may +larger to pad the commit to the next program-aligned boundary. + +In addition, the CRC tag's chunk field contains a set of flags which can +change the behaviour of commits. Currently the only flag in use is the lowest +bit, which determines the expected state of the valid bit for any following +tags. This is used to guarantee that unwritten storage in a metadata block +will be detected as invalid. + +Layout of the CRC tag: + +``` + tag data +[-- 32 --][-- 32 --|--- variable length ---] +[1| 3| 8 | 10 | 10 ][-- 32 --|--- (size * 8 - 32) ---] + ^ ^ ^ ^ ^ ^- crc ^- padding + | | | | '- size + | | | '------ id (0x3ff) + | | '----------- valid state + | '-------------- type1 (0x5) + '----------------- valid bit +``` + +CRC fields: + +1. **Valid state (1-bit)** - Indicates the expected value of the valid bit for + any tags in the next commit. + +2. **CRC (32-bits)** - CRC-32 with a polynomial of `0x04c11db7` initialized + with `0xffffffff`. + +3. **Padding** - Padding to the next program-aligned boundary. No guarantees + are made about the contents. + +--- +#### `0x5ff` LFS_TYPE_FCRC + +Added in lfs2.1, the optional FCRC tag contains a checksum of some amount of +bytes in the next commit at the time it was erased. This allows us to ensure +that we only ever program erased bytes, even if a previous commit failed due +to power-loss. + +When programming a commit, the FCRC size must be at least as large as the +program block size. However, the program block is not saved on disk, and can +change between mounts, so the FCRC size on disk may be different than the +current program block size. + +If the FCRC is missing or the checksum does not match, we must assume a +commit was attempted but failed due to power-loss. + +Layout of the FCRC tag: + +``` + tag data +[-- 32 --][-- 32 --|-- 32 --] +[1|- 11 -| 10 | 10 ][-- 32 --|-- 32 --] + ^ ^ ^ ^ ^- fcrc size ^- fcrc + | | | '- size (8) + | | '------ id (0x3ff) + | '------------ type (0x5ff) + '----------------- valid bit +``` + +FCRC fields: + +1. **FCRC size (32-bits)** - Number of bytes after this commit's CRC tag's + padding to include in the FCRC. + +2. **FCRC (32-bits)** - CRC of the bytes after this commit's CRC tag's padding + when erased. Like the CRC tag, this uses a CRC-32 with a polynomial of + `0x04c11db7` initialized with `0xffffffff`. + +--- diff --git a/components/joltwallet__littlefs/src/littlefs/bd/lfs_emubd.c b/components/joltwallet__littlefs/src/littlefs/bd/lfs_emubd.c new file mode 100644 index 0000000..a734bc2 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/bd/lfs_emubd.c @@ -0,0 +1,739 @@ +/* + * Emulating block device, wraps filebd and rambd while providing a bunch + * of hooks for testing littlefs in various conditions. + * + * Copyright (c) 2022, The littlefs authors. + * Copyright (c) 2017, Arm Limited. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef _POSIX_C_SOURCE +#define _POSIX_C_SOURCE 199309L +#endif + +#include "bd/lfs_emubd.h" + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include +#endif + + +// access to lazily-allocated/copy-on-write blocks +// +// Note we can only modify a block if we have exclusive access to it (rc == 1) +// + +static lfs_emubd_block_t *lfs_emubd_incblock(lfs_emubd_block_t *block) { + if (block) { + block->rc += 1; + } + return block; +} + +static void lfs_emubd_decblock(lfs_emubd_block_t *block) { + if (block) { + block->rc -= 1; + if (block->rc == 0) { + free(block); + } + } +} + +static lfs_emubd_block_t *lfs_emubd_mutblock( + const struct lfs_config *cfg, + lfs_emubd_block_t **block) { + lfs_emubd_t *bd = cfg->context; + lfs_emubd_block_t *block_ = *block; + if (block_ && block_->rc == 1) { + // rc == 1? can modify + return block_; + + } else if (block_) { + // rc > 1? need to create a copy + lfs_emubd_block_t *nblock = malloc( + sizeof(lfs_emubd_block_t) + bd->cfg->erase_size); + if (!nblock) { + return NULL; + } + + memcpy(nblock, block_, + sizeof(lfs_emubd_block_t) + bd->cfg->erase_size); + nblock->rc = 1; + + lfs_emubd_decblock(block_); + *block = nblock; + return nblock; + + } else { + // no block? need to allocate + lfs_emubd_block_t *nblock = malloc( + sizeof(lfs_emubd_block_t) + bd->cfg->erase_size); + if (!nblock) { + return NULL; + } + + nblock->rc = 1; + nblock->wear = 0; + + // zero for consistency + memset(nblock->data, + (bd->cfg->erase_value != -1) ? bd->cfg->erase_value : 0, + bd->cfg->erase_size); + + *block = nblock; + return nblock; + } +} + + +// emubd create/destroy + +int lfs_emubd_create(const struct lfs_config *cfg, + const struct lfs_emubd_config *bdcfg) { + LFS_EMUBD_TRACE("lfs_emubd_create(%p {.context=%p, " + ".read=%p, .prog=%p, .erase=%p, .sync=%p}, " + "%p {.read_size=%"PRIu32", .prog_size=%"PRIu32", " + ".erase_size=%"PRIu32", .erase_count=%"PRIu32", " + ".erase_value=%"PRId32", .erase_cycles=%"PRIu32", " + ".badblock_behavior=%"PRIu8", .power_cycles=%"PRIu32", " + ".powerloss_behavior=%"PRIu8", .powerloss_cb=%p, " + ".powerloss_data=%p, .track_branches=%d})", + (void*)cfg, cfg->context, + (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog, + (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync, + (void*)bdcfg, + bdcfg->read_size, bdcfg->prog_size, bdcfg->erase_size, + bdcfg->erase_count, bdcfg->erase_value, bdcfg->erase_cycles, + bdcfg->badblock_behavior, bdcfg->power_cycles, + bdcfg->powerloss_behavior, (void*)(uintptr_t)bdcfg->powerloss_cb, + bdcfg->powerloss_data, bdcfg->track_branches); + lfs_emubd_t *bd = cfg->context; + bd->cfg = bdcfg; + + // allocate our block array, all blocks start as uninitialized + bd->blocks = malloc(bd->cfg->erase_count * sizeof(lfs_emubd_block_t*)); + if (!bd->blocks) { + LFS_EMUBD_TRACE("lfs_emubd_create -> %d", LFS_ERR_NOMEM); + return LFS_ERR_NOMEM; + } + memset(bd->blocks, 0, bd->cfg->erase_count * sizeof(lfs_emubd_block_t*)); + + // setup testing things + bd->readed = 0; + bd->proged = 0; + bd->erased = 0; + bd->power_cycles = bd->cfg->power_cycles; + bd->ooo_block = -1; + bd->ooo_data = NULL; + bd->disk = NULL; + + if (bd->cfg->disk_path) { + bd->disk = malloc(sizeof(lfs_emubd_disk_t)); + if (!bd->disk) { + LFS_EMUBD_TRACE("lfs_emubd_create -> %d", LFS_ERR_NOMEM); + return LFS_ERR_NOMEM; + } + bd->disk->rc = 1; + bd->disk->scratch = NULL; + + #ifdef _WIN32 + bd->disk->fd = open(bd->cfg->disk_path, + O_RDWR | O_CREAT | O_BINARY, 0666); + #else + bd->disk->fd = open(bd->cfg->disk_path, + O_RDWR | O_CREAT, 0666); + #endif + if (bd->disk->fd < 0) { + int err = -errno; + LFS_EMUBD_TRACE("lfs_emubd_create -> %d", err); + return err; + } + + // if we're emulating erase values, we can keep a block around in + // memory of just the erase state to speed up emulated erases + if (bd->cfg->erase_value != -1) { + bd->disk->scratch = malloc(bd->cfg->erase_size); + if (!bd->disk->scratch) { + LFS_EMUBD_TRACE("lfs_emubd_create -> %d", LFS_ERR_NOMEM); + return LFS_ERR_NOMEM; + } + memset(bd->disk->scratch, + bd->cfg->erase_value, + bd->cfg->erase_size); + + // go ahead and erase all of the disk, otherwise the file will not + // match our internal representation + for (size_t i = 0; i < bd->cfg->erase_count; i++) { + ssize_t res = write(bd->disk->fd, + bd->disk->scratch, + bd->cfg->erase_size); + if (res < 0) { + int err = -errno; + LFS_EMUBD_TRACE("lfs_emubd_create -> %d", err); + return err; + } + } + } + } + + LFS_EMUBD_TRACE("lfs_emubd_create -> %d", 0); + return 0; +} + +int lfs_emubd_destroy(const struct lfs_config *cfg) { + LFS_EMUBD_TRACE("lfs_emubd_destroy(%p)", (void*)cfg); + lfs_emubd_t *bd = cfg->context; + + // decrement reference counts + for (lfs_block_t i = 0; i < bd->cfg->erase_count; i++) { + lfs_emubd_decblock(bd->blocks[i]); + } + free(bd->blocks); + + // clean up other resources + lfs_emubd_decblock(bd->ooo_data); + if (bd->disk) { + bd->disk->rc -= 1; + if (bd->disk->rc == 0) { + close(bd->disk->fd); + free(bd->disk->scratch); + free(bd->disk); + } + } + + LFS_EMUBD_TRACE("lfs_emubd_destroy -> %d", 0); + return 0; +} + + +// powerloss hook +static int lfs_emubd_powerloss(const struct lfs_config *cfg) { + lfs_emubd_t *bd = cfg->context; + + // emulate out-of-order writes? + lfs_emubd_block_t *ooo_data = NULL; + if (bd->cfg->powerloss_behavior == LFS_EMUBD_POWERLOSS_OOO + && bd->ooo_block != -1) { + // since writes between syncs are allowed to be out-of-order, it + // shouldn't hurt to restore the first write on powerloss, right? + ooo_data = bd->blocks[bd->ooo_block]; + bd->blocks[bd->ooo_block] = lfs_emubd_incblock(bd->ooo_data); + + // mirror to disk file? + if (bd->disk + && (bd->blocks[bd->ooo_block] + || bd->cfg->erase_value != -1)) { + off_t res1 = lseek(bd->disk->fd, + (off_t)bd->ooo_block*bd->cfg->erase_size, + SEEK_SET); + if (res1 < 0) { + return -errno; + } + + ssize_t res2 = write(bd->disk->fd, + (bd->blocks[bd->ooo_block]) + ? bd->blocks[bd->ooo_block]->data + : bd->disk->scratch, + bd->cfg->erase_size); + if (res2 < 0) { + return -errno; + } + } + } + + // simulate power loss + bd->cfg->powerloss_cb(bd->cfg->powerloss_data); + + // if we continue, undo out-of-order write emulation + if (bd->cfg->powerloss_behavior == LFS_EMUBD_POWERLOSS_OOO + && bd->ooo_block != -1) { + lfs_emubd_decblock(bd->blocks[bd->ooo_block]); + bd->blocks[bd->ooo_block] = ooo_data; + + // mirror to disk file? + if (bd->disk + && (bd->blocks[bd->ooo_block] + || bd->cfg->erase_value != -1)) { + off_t res1 = lseek(bd->disk->fd, + (off_t)bd->ooo_block*bd->cfg->erase_size, + SEEK_SET); + if (res1 < 0) { + return -errno; + } + + ssize_t res2 = write(bd->disk->fd, + (bd->blocks[bd->ooo_block]) + ? bd->blocks[bd->ooo_block]->data + : bd->disk->scratch, + bd->cfg->erase_size); + if (res2 < 0) { + return -errno; + } + } + } + + return 0; +} + + +// block device API + +int lfs_emubd_read(const struct lfs_config *cfg, lfs_block_t block, + lfs_off_t off, void *buffer, lfs_size_t size) { + LFS_EMUBD_TRACE("lfs_emubd_read(%p, " + "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", + (void*)cfg, block, off, buffer, size); + lfs_emubd_t *bd = cfg->context; + + // check if read is valid + LFS_ASSERT(block < bd->cfg->erase_count); + LFS_ASSERT(off % bd->cfg->read_size == 0); + LFS_ASSERT(size % bd->cfg->read_size == 0); + LFS_ASSERT(off+size <= bd->cfg->erase_size); + + // get the block + const lfs_emubd_block_t *b = bd->blocks[block]; + if (b) { + // block bad? + if (bd->cfg->erase_cycles && b->wear >= bd->cfg->erase_cycles && + bd->cfg->badblock_behavior == LFS_EMUBD_BADBLOCK_READERROR) { + LFS_EMUBD_TRACE("lfs_emubd_read -> %d", LFS_ERR_CORRUPT); + return LFS_ERR_CORRUPT; + } + + // read data + memcpy(buffer, &b->data[off], size); + } else { + // zero for consistency + memset(buffer, + (bd->cfg->erase_value != -1) ? bd->cfg->erase_value : 0, + size); + } + + // track reads + bd->readed += size; + if (bd->cfg->read_sleep) { + int err = nanosleep(&(struct timespec){ + .tv_sec=bd->cfg->read_sleep/1000000000, + .tv_nsec=bd->cfg->read_sleep%1000000000}, + NULL); + if (err) { + err = -errno; + LFS_EMUBD_TRACE("lfs_emubd_read -> %d", err); + return err; + } + } + + LFS_EMUBD_TRACE("lfs_emubd_read -> %d", 0); + return 0; +} + +int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block, + lfs_off_t off, const void *buffer, lfs_size_t size) { + LFS_EMUBD_TRACE("lfs_emubd_prog(%p, " + "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", + (void*)cfg, block, off, buffer, size); + lfs_emubd_t *bd = cfg->context; + + // check if write is valid + LFS_ASSERT(block < bd->cfg->erase_count); + LFS_ASSERT(off % bd->cfg->prog_size == 0); + LFS_ASSERT(size % bd->cfg->prog_size == 0); + LFS_ASSERT(off+size <= bd->cfg->erase_size); + + // get the block + lfs_emubd_block_t *b = lfs_emubd_mutblock(cfg, &bd->blocks[block]); + if (!b) { + LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", LFS_ERR_NOMEM); + return LFS_ERR_NOMEM; + } + + // block bad? + if (bd->cfg->erase_cycles && b->wear >= bd->cfg->erase_cycles) { + if (bd->cfg->badblock_behavior == + LFS_EMUBD_BADBLOCK_PROGERROR) { + LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", LFS_ERR_CORRUPT); + return LFS_ERR_CORRUPT; + } else if (bd->cfg->badblock_behavior == + LFS_EMUBD_BADBLOCK_PROGNOOP || + bd->cfg->badblock_behavior == + LFS_EMUBD_BADBLOCK_ERASENOOP) { + LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", 0); + return 0; + } + } + + // were we erased properly? + if (bd->cfg->erase_value != -1) { + for (lfs_off_t i = 0; i < size; i++) { + LFS_ASSERT(b->data[off+i] == bd->cfg->erase_value); + } + } + + // prog data + memcpy(&b->data[off], buffer, size); + + // mirror to disk file? + if (bd->disk) { + off_t res1 = lseek(bd->disk->fd, + (off_t)block*bd->cfg->erase_size + (off_t)off, + SEEK_SET); + if (res1 < 0) { + int err = -errno; + LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", err); + return err; + } + + ssize_t res2 = write(bd->disk->fd, buffer, size); + if (res2 < 0) { + int err = -errno; + LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", err); + return err; + } + } + + // track progs + bd->proged += size; + if (bd->cfg->prog_sleep) { + int err = nanosleep(&(struct timespec){ + .tv_sec=bd->cfg->prog_sleep/1000000000, + .tv_nsec=bd->cfg->prog_sleep%1000000000}, + NULL); + if (err) { + err = -errno; + LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", err); + return err; + } + } + + // lose power? + if (bd->power_cycles > 0) { + bd->power_cycles -= 1; + if (bd->power_cycles == 0) { + int err = lfs_emubd_powerloss(cfg); + if (err) { + LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", err); + return err; + } + } + } + + LFS_EMUBD_TRACE("lfs_emubd_prog -> %d", 0); + return 0; +} + +int lfs_emubd_erase(const struct lfs_config *cfg, lfs_block_t block) { + LFS_EMUBD_TRACE("lfs_emubd_erase(%p, 0x%"PRIx32" (%"PRIu32"))", + (void*)cfg, block, ((lfs_emubd_t*)cfg->context)->cfg->erase_size); + lfs_emubd_t *bd = cfg->context; + + // check if erase is valid + LFS_ASSERT(block < bd->cfg->erase_count); + + // emulate out-of-order writes? save first write + if (bd->cfg->powerloss_behavior == LFS_EMUBD_POWERLOSS_OOO + && bd->ooo_block == -1) { + bd->ooo_block = block; + bd->ooo_data = lfs_emubd_incblock(bd->blocks[block]); + } + + // get the block + lfs_emubd_block_t *b = lfs_emubd_mutblock(cfg, &bd->blocks[block]); + if (!b) { + LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", LFS_ERR_NOMEM); + return LFS_ERR_NOMEM; + } + + // block bad? + if (bd->cfg->erase_cycles) { + if (b->wear >= bd->cfg->erase_cycles) { + if (bd->cfg->badblock_behavior == + LFS_EMUBD_BADBLOCK_ERASEERROR) { + LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", LFS_ERR_CORRUPT); + return LFS_ERR_CORRUPT; + } else if (bd->cfg->badblock_behavior == + LFS_EMUBD_BADBLOCK_ERASENOOP) { + LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", 0); + return 0; + } + } else { + // mark wear + b->wear += 1; + } + } + + // emulate an erase value? + if (bd->cfg->erase_value != -1) { + memset(b->data, bd->cfg->erase_value, bd->cfg->erase_size); + + // mirror to disk file? + if (bd->disk) { + off_t res1 = lseek(bd->disk->fd, + (off_t)block*bd->cfg->erase_size, + SEEK_SET); + if (res1 < 0) { + int err = -errno; + LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", err); + return err; + } + + ssize_t res2 = write(bd->disk->fd, + bd->disk->scratch, + bd->cfg->erase_size); + if (res2 < 0) { + int err = -errno; + LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", err); + return err; + } + } + } + + // track erases + bd->erased += bd->cfg->erase_size; + if (bd->cfg->erase_sleep) { + int err = nanosleep(&(struct timespec){ + .tv_sec=bd->cfg->erase_sleep/1000000000, + .tv_nsec=bd->cfg->erase_sleep%1000000000}, + NULL); + if (err) { + err = -errno; + LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", err); + return err; + } + } + + // lose power? + if (bd->power_cycles > 0) { + bd->power_cycles -= 1; + if (bd->power_cycles == 0) { + int err = lfs_emubd_powerloss(cfg); + if (err) { + LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", err); + return err; + } + } + } + + LFS_EMUBD_TRACE("lfs_emubd_erase -> %d", 0); + return 0; +} + +int lfs_emubd_sync(const struct lfs_config *cfg) { + LFS_EMUBD_TRACE("lfs_emubd_sync(%p)", (void*)cfg); + lfs_emubd_t *bd = cfg->context; + + // emulate out-of-order writes? reset first write, writes + // cannot be out-of-order across sync + if (bd->cfg->powerloss_behavior == LFS_EMUBD_POWERLOSS_OOO) { + lfs_emubd_decblock(bd->ooo_data); + bd->ooo_block = -1; + bd->ooo_data = NULL; + } + + LFS_EMUBD_TRACE("lfs_emubd_sync -> %d", 0); + return 0; +} + + +/// Additional extended API for driving test features /// + +static int lfs_emubd_crc_(const struct lfs_config *cfg, + lfs_block_t block, uint32_t *crc) { + lfs_emubd_t *bd = cfg->context; + + // check if crc is valid + LFS_ASSERT(block < cfg->block_count); + + // crc the block + uint32_t crc_ = 0xffffffff; + const lfs_emubd_block_t *b = bd->blocks[block]; + if (b) { + crc_ = lfs_crc(crc_, b->data, cfg->block_size); + } else { + uint8_t erase_value = (bd->cfg->erase_value != -1) + ? bd->cfg->erase_value + : 0; + for (lfs_size_t i = 0; i < cfg->block_size; i++) { + crc_ = lfs_crc(crc_, &erase_value, 1); + } + } + *crc = 0xffffffff ^ crc_; + + return 0; +} + +int lfs_emubd_crc(const struct lfs_config *cfg, + lfs_block_t block, uint32_t *crc) { + LFS_EMUBD_TRACE("lfs_emubd_crc(%p, %"PRIu32", %p)", + (void*)cfg, block, crc); + int err = lfs_emubd_crc_(cfg, block, crc); + LFS_EMUBD_TRACE("lfs_emubd_crc -> %d", err); + return err; +} + +int lfs_emubd_bdcrc(const struct lfs_config *cfg, uint32_t *crc) { + LFS_EMUBD_TRACE("lfs_emubd_bdcrc(%p, %p)", (void*)cfg, crc); + + uint32_t crc_ = 0xffffffff; + for (lfs_block_t i = 0; i < cfg->block_count; i++) { + uint32_t i_crc; + int err = lfs_emubd_crc_(cfg, i, &i_crc); + if (err) { + LFS_EMUBD_TRACE("lfs_emubd_bdcrc -> %d", err); + return err; + } + + crc_ = lfs_crc(crc_, &i_crc, sizeof(uint32_t)); + } + *crc = 0xffffffff ^ crc_; + + LFS_EMUBD_TRACE("lfs_emubd_bdcrc -> %d", 0); + return 0; +} + +lfs_emubd_sio_t lfs_emubd_readed(const struct lfs_config *cfg) { + LFS_EMUBD_TRACE("lfs_emubd_readed(%p)", (void*)cfg); + lfs_emubd_t *bd = cfg->context; + LFS_EMUBD_TRACE("lfs_emubd_readed -> %"PRIu64, bd->readed); + return bd->readed; +} + +lfs_emubd_sio_t lfs_emubd_proged(const struct lfs_config *cfg) { + LFS_EMUBD_TRACE("lfs_emubd_proged(%p)", (void*)cfg); + lfs_emubd_t *bd = cfg->context; + LFS_EMUBD_TRACE("lfs_emubd_proged -> %"PRIu64, bd->proged); + return bd->proged; +} + +lfs_emubd_sio_t lfs_emubd_erased(const struct lfs_config *cfg) { + LFS_EMUBD_TRACE("lfs_emubd_erased(%p)", (void*)cfg); + lfs_emubd_t *bd = cfg->context; + LFS_EMUBD_TRACE("lfs_emubd_erased -> %"PRIu64, bd->erased); + return bd->erased; +} + +int lfs_emubd_setreaded(const struct lfs_config *cfg, lfs_emubd_io_t readed) { + LFS_EMUBD_TRACE("lfs_emubd_setreaded(%p, %"PRIu64")", (void*)cfg, readed); + lfs_emubd_t *bd = cfg->context; + bd->readed = readed; + LFS_EMUBD_TRACE("lfs_emubd_setreaded -> %d", 0); + return 0; +} + +int lfs_emubd_setproged(const struct lfs_config *cfg, lfs_emubd_io_t proged) { + LFS_EMUBD_TRACE("lfs_emubd_setproged(%p, %"PRIu64")", (void*)cfg, proged); + lfs_emubd_t *bd = cfg->context; + bd->proged = proged; + LFS_EMUBD_TRACE("lfs_emubd_setproged -> %d", 0); + return 0; +} + +int lfs_emubd_seterased(const struct lfs_config *cfg, lfs_emubd_io_t erased) { + LFS_EMUBD_TRACE("lfs_emubd_seterased(%p, %"PRIu64")", (void*)cfg, erased); + lfs_emubd_t *bd = cfg->context; + bd->erased = erased; + LFS_EMUBD_TRACE("lfs_emubd_seterased -> %d", 0); + return 0; +} + +lfs_emubd_swear_t lfs_emubd_wear(const struct lfs_config *cfg, + lfs_block_t block) { + LFS_EMUBD_TRACE("lfs_emubd_wear(%p, %"PRIu32")", (void*)cfg, block); + lfs_emubd_t *bd = cfg->context; + + // check if block is valid + LFS_ASSERT(block < bd->cfg->erase_count); + + // get the wear + lfs_emubd_wear_t wear; + const lfs_emubd_block_t *b = bd->blocks[block]; + if (b) { + wear = b->wear; + } else { + wear = 0; + } + + LFS_EMUBD_TRACE("lfs_emubd_wear -> %"PRIi32, wear); + return wear; +} + +int lfs_emubd_setwear(const struct lfs_config *cfg, + lfs_block_t block, lfs_emubd_wear_t wear) { + LFS_EMUBD_TRACE("lfs_emubd_setwear(%p, %"PRIu32", %"PRIi32")", + (void*)cfg, block, wear); + lfs_emubd_t *bd = cfg->context; + + // check if block is valid + LFS_ASSERT(block < bd->cfg->erase_count); + + // set the wear + lfs_emubd_block_t *b = lfs_emubd_mutblock(cfg, &bd->blocks[block]); + if (!b) { + LFS_EMUBD_TRACE("lfs_emubd_setwear -> %d", LFS_ERR_NOMEM); + return LFS_ERR_NOMEM; + } + b->wear = wear; + + LFS_EMUBD_TRACE("lfs_emubd_setwear -> %d", 0); + return 0; +} + +lfs_emubd_spowercycles_t lfs_emubd_powercycles( + const struct lfs_config *cfg) { + LFS_EMUBD_TRACE("lfs_emubd_powercycles(%p)", (void*)cfg); + lfs_emubd_t *bd = cfg->context; + + LFS_EMUBD_TRACE("lfs_emubd_powercycles -> %"PRIi32, bd->power_cycles); + return bd->power_cycles; +} + +int lfs_emubd_setpowercycles(const struct lfs_config *cfg, + lfs_emubd_powercycles_t power_cycles) { + LFS_EMUBD_TRACE("lfs_emubd_setpowercycles(%p, %"PRIi32")", + (void*)cfg, power_cycles); + lfs_emubd_t *bd = cfg->context; + + bd->power_cycles = power_cycles; + + LFS_EMUBD_TRACE("lfs_emubd_powercycles -> %d", 0); + return 0; +} + +int lfs_emubd_copy(const struct lfs_config *cfg, lfs_emubd_t *copy) { + LFS_EMUBD_TRACE("lfs_emubd_copy(%p, %p)", (void*)cfg, (void*)copy); + lfs_emubd_t *bd = cfg->context; + + // lazily copy over our block array + copy->blocks = malloc(bd->cfg->erase_count * sizeof(lfs_emubd_block_t*)); + if (!copy->blocks) { + LFS_EMUBD_TRACE("lfs_emubd_copy -> %d", LFS_ERR_NOMEM); + return LFS_ERR_NOMEM; + } + + for (size_t i = 0; i < bd->cfg->erase_count; i++) { + copy->blocks[i] = lfs_emubd_incblock(bd->blocks[i]); + } + + // other state + copy->readed = bd->readed; + copy->proged = bd->proged; + copy->erased = bd->erased; + copy->power_cycles = bd->power_cycles; + copy->ooo_block = bd->ooo_block; + copy->ooo_data = lfs_emubd_incblock(bd->ooo_data); + copy->disk = bd->disk; + if (copy->disk) { + copy->disk->rc += 1; + } + copy->cfg = bd->cfg; + + LFS_EMUBD_TRACE("lfs_emubd_copy -> %d", 0); + return 0; +} + diff --git a/components/joltwallet__littlefs/src/littlefs/bd/lfs_emubd.h b/components/joltwallet__littlefs/src/littlefs/bd/lfs_emubd.h new file mode 100644 index 0000000..9060008 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/bd/lfs_emubd.h @@ -0,0 +1,244 @@ +/* + * Emulating block device, wraps filebd and rambd while providing a bunch + * of hooks for testing littlefs in various conditions. + * + * Copyright (c) 2022, The littlefs authors. + * Copyright (c) 2017, Arm Limited. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef LFS_EMUBD_H +#define LFS_EMUBD_H + +#include "lfs.h" +#include "lfs_util.h" +#include "bd/lfs_rambd.h" +#include "bd/lfs_filebd.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + + +// Block device specific tracing +#ifndef LFS_EMUBD_TRACE +#ifdef LFS_EMUBD_YES_TRACE +#define LFS_EMUBD_TRACE(...) LFS_TRACE(__VA_ARGS__) +#else +#define LFS_EMUBD_TRACE(...) +#endif +#endif + +// Mode determining how "bad-blocks" behave during testing. This simulates +// some real-world circumstances such as progs not sticking (prog-noop), +// a readonly disk (erase-noop), and ECC failures (read-error). +// +// Not that read-noop is not allowed. Read _must_ return a consistent (but +// may be arbitrary) value on every read. +typedef enum lfs_emubd_badblock_behavior { + LFS_EMUBD_BADBLOCK_PROGERROR = 0, // Error on prog + LFS_EMUBD_BADBLOCK_ERASEERROR = 1, // Error on erase + LFS_EMUBD_BADBLOCK_READERROR = 2, // Error on read + LFS_EMUBD_BADBLOCK_PROGNOOP = 3, // Prog does nothing silently + LFS_EMUBD_BADBLOCK_ERASENOOP = 4, // Erase does nothing silently +} lfs_emubd_badblock_behavior_t; + +// Mode determining how power-loss behaves during testing. For now this +// only supports a noop behavior, leaving the data on-disk untouched. +typedef enum lfs_emubd_powerloss_behavior { + LFS_EMUBD_POWERLOSS_NOOP = 0, // Progs are atomic + LFS_EMUBD_POWERLOSS_OOO = 1, // Blocks are written out-of-order +} lfs_emubd_powerloss_behavior_t; + +// Type for measuring read/program/erase operations +typedef uint64_t lfs_emubd_io_t; +typedef int64_t lfs_emubd_sio_t; + +// Type for measuring wear +typedef uint32_t lfs_emubd_wear_t; +typedef int32_t lfs_emubd_swear_t; + +// Type for tracking power-cycles +typedef uint32_t lfs_emubd_powercycles_t; +typedef int32_t lfs_emubd_spowercycles_t; + +// Type for delays in nanoseconds +typedef uint64_t lfs_emubd_sleep_t; +typedef int64_t lfs_emubd_ssleep_t; + +// emubd config, this is required for testing +struct lfs_emubd_config { + // Minimum size of a read operation in bytes. + lfs_size_t read_size; + + // Minimum size of a program operation in bytes. + lfs_size_t prog_size; + + // Size of an erase operation in bytes. + lfs_size_t erase_size; + + // Number of erase blocks on the device. + lfs_size_t erase_count; + + // 8-bit erase value to use for simulating erases. -1 does not simulate + // erases, which can speed up testing by avoiding the extra block-device + // operations to store the erase value. + int32_t erase_value; + + // Number of erase cycles before a block becomes "bad". The exact behavior + // of bad blocks is controlled by badblock_behavior. + uint32_t erase_cycles; + + // The mode determining how bad-blocks fail + lfs_emubd_badblock_behavior_t badblock_behavior; + + // Number of write operations (erase/prog) before triggering a power-loss. + // power_cycles=0 disables this. The exact behavior of power-loss is + // controlled by a combination of powerloss_behavior and powerloss_cb. + lfs_emubd_powercycles_t power_cycles; + + // The mode determining how power-loss affects disk + lfs_emubd_powerloss_behavior_t powerloss_behavior; + + // Function to call to emulate power-loss. The exact behavior of power-loss + // is up to the runner to provide. + void (*powerloss_cb)(void*); + + // Data for power-loss callback + void *powerloss_data; + + // True to track when power-loss could have occured. Note this involves + // heavy memory usage! + bool track_branches; + + // Path to file to use as a mirror of the disk. This provides a way to view + // the current state of the block device. + const char *disk_path; + + // Artificial delay in nanoseconds, there is no purpose for this other + // than slowing down the simulation. + lfs_emubd_sleep_t read_sleep; + + // Artificial delay in nanoseconds, there is no purpose for this other + // than slowing down the simulation. + lfs_emubd_sleep_t prog_sleep; + + // Artificial delay in nanoseconds, there is no purpose for this other + // than slowing down the simulation. + lfs_emubd_sleep_t erase_sleep; +}; + +// A reference counted block +typedef struct lfs_emubd_block { + uint32_t rc; + lfs_emubd_wear_t wear; + + uint8_t data[]; +} lfs_emubd_block_t; + +// Disk mirror +typedef struct lfs_emubd_disk { + uint32_t rc; + int fd; + uint8_t *scratch; +} lfs_emubd_disk_t; + +// emubd state +typedef struct lfs_emubd { + // array of copy-on-write blocks + lfs_emubd_block_t **blocks; + + // some other test state + lfs_emubd_io_t readed; + lfs_emubd_io_t proged; + lfs_emubd_io_t erased; + lfs_emubd_powercycles_t power_cycles; + lfs_ssize_t ooo_block; + lfs_emubd_block_t *ooo_data; + lfs_emubd_disk_t *disk; + + const struct lfs_emubd_config *cfg; +} lfs_emubd_t; + + +/// Block device API /// + +// Create an emulating block device using the geometry in lfs_config +int lfs_emubd_create(const struct lfs_config *cfg, + const struct lfs_emubd_config *bdcfg); + +// Clean up memory associated with block device +int lfs_emubd_destroy(const struct lfs_config *cfg); + +// Read a block +int lfs_emubd_read(const struct lfs_config *cfg, lfs_block_t block, + lfs_off_t off, void *buffer, lfs_size_t size); + +// Program a block +// +// The block must have previously been erased. +int lfs_emubd_prog(const struct lfs_config *cfg, lfs_block_t block, + lfs_off_t off, const void *buffer, lfs_size_t size); + +// Erase a block +// +// A block must be erased before being programmed. The +// state of an erased block is undefined. +int lfs_emubd_erase(const struct lfs_config *cfg, lfs_block_t block); + +// Sync the block device +int lfs_emubd_sync(const struct lfs_config *cfg); + + +/// Additional extended API for driving test features /// + +// A CRC of a block for debugging purposes +int lfs_emubd_crc(const struct lfs_config *cfg, + lfs_block_t block, uint32_t *crc); + +// A CRC of the entire block device for debugging purposes +int lfs_emubd_bdcrc(const struct lfs_config *cfg, uint32_t *crc); + +// Get total amount of bytes read +lfs_emubd_sio_t lfs_emubd_readed(const struct lfs_config *cfg); + +// Get total amount of bytes programmed +lfs_emubd_sio_t lfs_emubd_proged(const struct lfs_config *cfg); + +// Get total amount of bytes erased +lfs_emubd_sio_t lfs_emubd_erased(const struct lfs_config *cfg); + +// Manually set amount of bytes read +int lfs_emubd_setreaded(const struct lfs_config *cfg, lfs_emubd_io_t readed); + +// Manually set amount of bytes programmed +int lfs_emubd_setproged(const struct lfs_config *cfg, lfs_emubd_io_t proged); + +// Manually set amount of bytes erased +int lfs_emubd_seterased(const struct lfs_config *cfg, lfs_emubd_io_t erased); + +// Get simulated wear on a given block +lfs_emubd_swear_t lfs_emubd_wear(const struct lfs_config *cfg, + lfs_block_t block); + +// Manually set simulated wear on a given block +int lfs_emubd_setwear(const struct lfs_config *cfg, + lfs_block_t block, lfs_emubd_wear_t wear); + +// Get the remaining power-cycles +lfs_emubd_spowercycles_t lfs_emubd_powercycles( + const struct lfs_config *cfg); + +// Manually set the remaining power-cycles +int lfs_emubd_setpowercycles(const struct lfs_config *cfg, + lfs_emubd_powercycles_t power_cycles); + +// Create a copy-on-write copy of the state of this block device +int lfs_emubd_copy(const struct lfs_config *cfg, lfs_emubd_t *copy); + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/components/joltwallet__littlefs/src/littlefs/bd/lfs_filebd.c b/components/joltwallet__littlefs/src/littlefs/bd/lfs_filebd.c new file mode 100644 index 0000000..ca2fa05 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/bd/lfs_filebd.c @@ -0,0 +1,167 @@ +/* + * Block device emulated in a file + * + * Copyright (c) 2022, The littlefs authors. + * Copyright (c) 2017, Arm Limited. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + */ +#include "bd/lfs_filebd.h" + +#include +#include +#include + +#ifdef _WIN32 +#include +#endif + +int lfs_filebd_create(const struct lfs_config *cfg, const char *path, + const struct lfs_filebd_config *bdcfg) { + LFS_FILEBD_TRACE("lfs_filebd_create(%p {.context=%p, " + ".read=%p, .prog=%p, .erase=%p, .sync=%p}, " + "\"%s\", " + "%p {.read_size=%"PRIu32", .prog_size=%"PRIu32", " + ".erase_size=%"PRIu32", .erase_count=%"PRIu32"})", + (void*)cfg, cfg->context, + (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog, + (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync, + path, + (void*)bdcfg, + bdcfg->read_size, bdcfg->prog_size, bdcfg->erase_size, + bdcfg->erase_count); + lfs_filebd_t *bd = cfg->context; + bd->cfg = bdcfg; + + // open file + #ifdef _WIN32 + bd->fd = open(path, O_RDWR | O_CREAT | O_BINARY, 0666); + #else + bd->fd = open(path, O_RDWR | O_CREAT, 0666); + #endif + + if (bd->fd < 0) { + int err = -errno; + LFS_FILEBD_TRACE("lfs_filebd_create -> %d", err); + return err; + } + + LFS_FILEBD_TRACE("lfs_filebd_create -> %d", 0); + return 0; +} + +int lfs_filebd_destroy(const struct lfs_config *cfg) { + LFS_FILEBD_TRACE("lfs_filebd_destroy(%p)", (void*)cfg); + lfs_filebd_t *bd = cfg->context; + int err = close(bd->fd); + if (err < 0) { + err = -errno; + LFS_FILEBD_TRACE("lfs_filebd_destroy -> %d", err); + return err; + } + LFS_FILEBD_TRACE("lfs_filebd_destroy -> %d", 0); + return 0; +} + +int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block, + lfs_off_t off, void *buffer, lfs_size_t size) { + LFS_FILEBD_TRACE("lfs_filebd_read(%p, " + "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", + (void*)cfg, block, off, buffer, size); + lfs_filebd_t *bd = cfg->context; + + // check if read is valid + LFS_ASSERT(block < bd->cfg->erase_count); + LFS_ASSERT(off % bd->cfg->read_size == 0); + LFS_ASSERT(size % bd->cfg->read_size == 0); + LFS_ASSERT(off+size <= bd->cfg->erase_size); + + // zero for reproducibility (in case file is truncated) + memset(buffer, 0, size); + + // read + off_t res1 = lseek(bd->fd, + (off_t)block*bd->cfg->erase_size + (off_t)off, SEEK_SET); + if (res1 < 0) { + int err = -errno; + LFS_FILEBD_TRACE("lfs_filebd_read -> %d", err); + return err; + } + + ssize_t res2 = read(bd->fd, buffer, size); + if (res2 < 0) { + int err = -errno; + LFS_FILEBD_TRACE("lfs_filebd_read -> %d", err); + return err; + } + + LFS_FILEBD_TRACE("lfs_filebd_read -> %d", 0); + return 0; +} + +int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block, + lfs_off_t off, const void *buffer, lfs_size_t size) { + LFS_FILEBD_TRACE("lfs_filebd_prog(%p, " + "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", + (void*)cfg, block, off, buffer, size); + lfs_filebd_t *bd = cfg->context; + + // check if write is valid + LFS_ASSERT(block < bd->cfg->erase_count); + LFS_ASSERT(off % bd->cfg->prog_size == 0); + LFS_ASSERT(size % bd->cfg->prog_size == 0); + LFS_ASSERT(off+size <= bd->cfg->erase_size); + + // program data + off_t res1 = lseek(bd->fd, + (off_t)block*bd->cfg->erase_size + (off_t)off, SEEK_SET); + if (res1 < 0) { + int err = -errno; + LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err); + return err; + } + + ssize_t res2 = write(bd->fd, buffer, size); + if (res2 < 0) { + int err = -errno; + LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", err); + return err; + } + + LFS_FILEBD_TRACE("lfs_filebd_prog -> %d", 0); + return 0; +} + +int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block) { + LFS_FILEBD_TRACE("lfs_filebd_erase(%p, 0x%"PRIx32" (%"PRIu32"))", + (void*)cfg, block, ((lfs_filebd_t*)cfg->context)->cfg->erase_size); + lfs_filebd_t *bd = cfg->context; + + // check if erase is valid + LFS_ASSERT(block < bd->cfg->erase_count); + + // erase is a noop + (void)block; + + LFS_FILEBD_TRACE("lfs_filebd_erase -> %d", 0); + return 0; +} + +int lfs_filebd_sync(const struct lfs_config *cfg) { + LFS_FILEBD_TRACE("lfs_filebd_sync(%p)", (void*)cfg); + + // file sync + lfs_filebd_t *bd = cfg->context; + #ifdef _WIN32 + int err = FlushFileBuffers((HANDLE) _get_osfhandle(bd->fd)) ? 0 : -1; + #else + int err = fsync(bd->fd); + #endif + if (err) { + err = -errno; + LFS_FILEBD_TRACE("lfs_filebd_sync -> %d", 0); + return err; + } + + LFS_FILEBD_TRACE("lfs_filebd_sync -> %d", 0); + return 0; +} diff --git a/components/joltwallet__littlefs/src/littlefs/bd/lfs_filebd.h b/components/joltwallet__littlefs/src/littlefs/bd/lfs_filebd.h new file mode 100644 index 0000000..d7d2fd9 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/bd/lfs_filebd.h @@ -0,0 +1,82 @@ +/* + * Block device emulated in a file + * + * Copyright (c) 2022, The littlefs authors. + * Copyright (c) 2017, Arm Limited. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef LFS_FILEBD_H +#define LFS_FILEBD_H + +#include "lfs.h" +#include "lfs_util.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + + +// Block device specific tracing +#ifndef LFS_FILEBD_TRACE +#ifdef LFS_FILEBD_YES_TRACE +#define LFS_FILEBD_TRACE(...) LFS_TRACE(__VA_ARGS__) +#else +#define LFS_FILEBD_TRACE(...) +#endif +#endif + +// filebd config +struct lfs_filebd_config { + // Minimum size of a read operation in bytes. + lfs_size_t read_size; + + // Minimum size of a program operation in bytes. + lfs_size_t prog_size; + + // Size of an erase operation in bytes. + lfs_size_t erase_size; + + // Number of erase blocks on the device. + lfs_size_t erase_count; +}; + +// filebd state +typedef struct lfs_filebd { + int fd; + const struct lfs_filebd_config *cfg; +} lfs_filebd_t; + + +// Create a file block device +int lfs_filebd_create(const struct lfs_config *cfg, const char *path, + const struct lfs_filebd_config *bdcfg); + +// Clean up memory associated with block device +int lfs_filebd_destroy(const struct lfs_config *cfg); + +// Read a block +int lfs_filebd_read(const struct lfs_config *cfg, lfs_block_t block, + lfs_off_t off, void *buffer, lfs_size_t size); + +// Program a block +// +// The block must have previously been erased. +int lfs_filebd_prog(const struct lfs_config *cfg, lfs_block_t block, + lfs_off_t off, const void *buffer, lfs_size_t size); + +// Erase a block +// +// A block must be erased before being programmed. The +// state of an erased block is undefined. +int lfs_filebd_erase(const struct lfs_config *cfg, lfs_block_t block); + +// Sync the block device +int lfs_filebd_sync(const struct lfs_config *cfg); + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/components/joltwallet__littlefs/src/littlefs/bd/lfs_rambd.c b/components/joltwallet__littlefs/src/littlefs/bd/lfs_rambd.c new file mode 100644 index 0000000..a6a0572 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/bd/lfs_rambd.c @@ -0,0 +1,118 @@ +/* + * Block device emulated in RAM + * + * Copyright (c) 2022, The littlefs authors. + * Copyright (c) 2017, Arm Limited. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + */ +#include "bd/lfs_rambd.h" + +int lfs_rambd_create(const struct lfs_config *cfg, + const struct lfs_rambd_config *bdcfg) { + LFS_RAMBD_TRACE("lfs_rambd_create(%p {.context=%p, " + ".read=%p, .prog=%p, .erase=%p, .sync=%p}, " + "%p {.read_size=%"PRIu32", .prog_size=%"PRIu32", " + ".erase_size=%"PRIu32", .erase_count=%"PRIu32", " + ".buffer=%p})", + (void*)cfg, cfg->context, + (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog, + (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync, + (void*)bdcfg, + bdcfg->read_size, bdcfg->prog_size, bdcfg->erase_size, + bdcfg->erase_count, bdcfg->buffer); + lfs_rambd_t *bd = cfg->context; + bd->cfg = bdcfg; + + // allocate buffer? + if (bd->cfg->buffer) { + bd->buffer = bd->cfg->buffer; + } else { + bd->buffer = lfs_malloc(bd->cfg->erase_size * bd->cfg->erase_count); + if (!bd->buffer) { + LFS_RAMBD_TRACE("lfs_rambd_create -> %d", LFS_ERR_NOMEM); + return LFS_ERR_NOMEM; + } + } + + // zero for reproducibility + memset(bd->buffer, 0, bd->cfg->erase_size * bd->cfg->erase_count); + + LFS_RAMBD_TRACE("lfs_rambd_create -> %d", 0); + return 0; +} + +int lfs_rambd_destroy(const struct lfs_config *cfg) { + LFS_RAMBD_TRACE("lfs_rambd_destroy(%p)", (void*)cfg); + // clean up memory + lfs_rambd_t *bd = cfg->context; + if (!bd->cfg->buffer) { + lfs_free(bd->buffer); + } + LFS_RAMBD_TRACE("lfs_rambd_destroy -> %d", 0); + return 0; +} + +int lfs_rambd_read(const struct lfs_config *cfg, lfs_block_t block, + lfs_off_t off, void *buffer, lfs_size_t size) { + LFS_RAMBD_TRACE("lfs_rambd_read(%p, " + "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", + (void*)cfg, block, off, buffer, size); + lfs_rambd_t *bd = cfg->context; + + // check if read is valid + LFS_ASSERT(block < bd->cfg->erase_count); + LFS_ASSERT(off % bd->cfg->read_size == 0); + LFS_ASSERT(size % bd->cfg->read_size == 0); + LFS_ASSERT(off+size <= bd->cfg->erase_size); + + // read data + memcpy(buffer, &bd->buffer[block*bd->cfg->erase_size + off], size); + + LFS_RAMBD_TRACE("lfs_rambd_read -> %d", 0); + return 0; +} + +int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block, + lfs_off_t off, const void *buffer, lfs_size_t size) { + LFS_RAMBD_TRACE("lfs_rambd_prog(%p, " + "0x%"PRIx32", %"PRIu32", %p, %"PRIu32")", + (void*)cfg, block, off, buffer, size); + lfs_rambd_t *bd = cfg->context; + + // check if write is valid + LFS_ASSERT(block < bd->cfg->erase_count); + LFS_ASSERT(off % bd->cfg->prog_size == 0); + LFS_ASSERT(size % bd->cfg->prog_size == 0); + LFS_ASSERT(off+size <= bd->cfg->erase_size); + + // program data + memcpy(&bd->buffer[block*bd->cfg->erase_size + off], buffer, size); + + LFS_RAMBD_TRACE("lfs_rambd_prog -> %d", 0); + return 0; +} + +int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block) { + LFS_RAMBD_TRACE("lfs_rambd_erase(%p, 0x%"PRIx32" (%"PRIu32"))", + (void*)cfg, block, ((lfs_rambd_t*)cfg->context)->cfg->erase_size); + lfs_rambd_t *bd = cfg->context; + + // check if erase is valid + LFS_ASSERT(block < bd->cfg->erase_count); + + // erase is a noop + (void)block; + + LFS_RAMBD_TRACE("lfs_rambd_erase -> %d", 0); + return 0; +} + +int lfs_rambd_sync(const struct lfs_config *cfg) { + LFS_RAMBD_TRACE("lfs_rambd_sync(%p)", (void*)cfg); + + // sync is a noop + (void)cfg; + + LFS_RAMBD_TRACE("lfs_rambd_sync -> %d", 0); + return 0; +} diff --git a/components/joltwallet__littlefs/src/littlefs/bd/lfs_rambd.h b/components/joltwallet__littlefs/src/littlefs/bd/lfs_rambd.h new file mode 100644 index 0000000..8663702 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/bd/lfs_rambd.h @@ -0,0 +1,85 @@ +/* + * Block device emulated in RAM + * + * Copyright (c) 2022, The littlefs authors. + * Copyright (c) 2017, Arm Limited. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef LFS_RAMBD_H +#define LFS_RAMBD_H + +#include "lfs.h" +#include "lfs_util.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + + +// Block device specific tracing +#ifndef LFS_RAMBD_TRACE +#ifdef LFS_RAMBD_YES_TRACE +#define LFS_RAMBD_TRACE(...) LFS_TRACE(__VA_ARGS__) +#else +#define LFS_RAMBD_TRACE(...) +#endif +#endif + +// rambd config +struct lfs_rambd_config { + // Minimum size of a read operation in bytes. + lfs_size_t read_size; + + // Minimum size of a program operation in bytes. + lfs_size_t prog_size; + + // Size of an erase operation in bytes. + lfs_size_t erase_size; + + // Number of erase blocks on the device. + lfs_size_t erase_count; + + // Optional statically allocated buffer for the block device. + void *buffer; +}; + +// rambd state +typedef struct lfs_rambd { + uint8_t *buffer; + const struct lfs_rambd_config *cfg; +} lfs_rambd_t; + + +// Create a RAM block device +int lfs_rambd_create(const struct lfs_config *cfg, + const struct lfs_rambd_config *bdcfg); + +// Clean up memory associated with block device +int lfs_rambd_destroy(const struct lfs_config *cfg); + +// Read a block +int lfs_rambd_read(const struct lfs_config *cfg, lfs_block_t block, + lfs_off_t off, void *buffer, lfs_size_t size); + +// Program a block +// +// The block must have previously been erased. +int lfs_rambd_prog(const struct lfs_config *cfg, lfs_block_t block, + lfs_off_t off, const void *buffer, lfs_size_t size); + +// Erase a block +// +// A block must be erased before being programmed. The +// state of an erased block is undefined. +int lfs_rambd_erase(const struct lfs_config *cfg, lfs_block_t block); + +// Sync the block device +int lfs_rambd_sync(const struct lfs_config *cfg); + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/components/joltwallet__littlefs/src/littlefs/benches/bench_dir.toml b/components/joltwallet__littlefs/src/littlefs/benches/bench_dir.toml new file mode 100644 index 0000000..5f8cb49 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/benches/bench_dir.toml @@ -0,0 +1,270 @@ +[cases.bench_dir_open] +# 0 = in-order +# 1 = reversed-order +# 2 = random-order +defines.ORDER = [0, 1, 2] +defines.N = 1024 +defines.FILE_SIZE = 8 +defines.CHUNK_SIZE = 8 +code = ''' + lfs_t lfs; + lfs_format(&lfs, cfg) => 0; + lfs_mount(&lfs, cfg) => 0; + + // first create the files + char name[256]; + uint8_t buffer[CHUNK_SIZE]; + for (lfs_size_t i = 0; i < N; i++) { + sprintf(name, "file%08x", i); + lfs_file_t file; + lfs_file_open(&lfs, &file, name, + LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0; + + uint32_t file_prng = i; + for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) { + for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) { + buffer[k] = BENCH_PRNG(&file_prng); + } + lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE; + } + + lfs_file_close(&lfs, &file) => 0; + } + + // then read the files + BENCH_START(); + uint32_t prng = 42; + for (lfs_size_t i = 0; i < N; i++) { + lfs_off_t i_ + = (ORDER == 0) ? i + : (ORDER == 1) ? (N-1-i) + : BENCH_PRNG(&prng) % N; + sprintf(name, "file%08x", i_); + lfs_file_t file; + lfs_file_open(&lfs, &file, name, LFS_O_RDONLY) => 0; + + uint32_t file_prng = i_; + for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) { + lfs_file_read(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE; + for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) { + assert(buffer[k] == BENCH_PRNG(&file_prng)); + } + } + + lfs_file_close(&lfs, &file) => 0; + } + BENCH_STOP(); + + lfs_unmount(&lfs) => 0; +''' + +[cases.bench_dir_creat] +# 0 = in-order +# 1 = reversed-order +# 2 = random-order +defines.ORDER = [0, 1, 2] +defines.N = 1024 +defines.FILE_SIZE = 8 +defines.CHUNK_SIZE = 8 +code = ''' + lfs_t lfs; + lfs_format(&lfs, cfg) => 0; + lfs_mount(&lfs, cfg) => 0; + + BENCH_START(); + uint32_t prng = 42; + char name[256]; + uint8_t buffer[CHUNK_SIZE]; + for (lfs_size_t i = 0; i < N; i++) { + lfs_off_t i_ + = (ORDER == 0) ? i + : (ORDER == 1) ? (N-1-i) + : BENCH_PRNG(&prng) % N; + sprintf(name, "file%08x", i_); + lfs_file_t file; + lfs_file_open(&lfs, &file, name, + LFS_O_WRONLY | LFS_O_CREAT | LFS_O_TRUNC) => 0; + + uint32_t file_prng = i_; + for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) { + for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) { + buffer[k] = BENCH_PRNG(&file_prng); + } + lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE; + } + + lfs_file_close(&lfs, &file) => 0; + } + BENCH_STOP(); + + lfs_unmount(&lfs) => 0; +''' + +[cases.bench_dir_remove] +# 0 = in-order +# 1 = reversed-order +# 2 = random-order +defines.ORDER = [0, 1, 2] +defines.N = 1024 +defines.FILE_SIZE = 8 +defines.CHUNK_SIZE = 8 +code = ''' + lfs_t lfs; + lfs_format(&lfs, cfg) => 0; + lfs_mount(&lfs, cfg) => 0; + + // first create the files + char name[256]; + uint8_t buffer[CHUNK_SIZE]; + for (lfs_size_t i = 0; i < N; i++) { + sprintf(name, "file%08x", i); + lfs_file_t file; + lfs_file_open(&lfs, &file, name, + LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0; + + uint32_t file_prng = i; + for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) { + for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) { + buffer[k] = BENCH_PRNG(&file_prng); + } + lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE; + } + + lfs_file_close(&lfs, &file) => 0; + } + + // then remove the files + BENCH_START(); + uint32_t prng = 42; + for (lfs_size_t i = 0; i < N; i++) { + lfs_off_t i_ + = (ORDER == 0) ? i + : (ORDER == 1) ? (N-1-i) + : BENCH_PRNG(&prng) % N; + sprintf(name, "file%08x", i_); + int err = lfs_remove(&lfs, name); + assert(!err || err == LFS_ERR_NOENT); + } + BENCH_STOP(); + + lfs_unmount(&lfs) => 0; +''' + +[cases.bench_dir_read] +defines.N = 1024 +defines.FILE_SIZE = 8 +defines.CHUNK_SIZE = 8 +code = ''' + lfs_t lfs; + lfs_format(&lfs, cfg) => 0; + lfs_mount(&lfs, cfg) => 0; + + // first create the files + char name[256]; + uint8_t buffer[CHUNK_SIZE]; + for (lfs_size_t i = 0; i < N; i++) { + sprintf(name, "file%08x", i); + lfs_file_t file; + lfs_file_open(&lfs, &file, name, + LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0; + + uint32_t file_prng = i; + for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) { + for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) { + buffer[k] = BENCH_PRNG(&file_prng); + } + lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE; + } + + lfs_file_close(&lfs, &file) => 0; + } + + // then read the directory + BENCH_START(); + lfs_dir_t dir; + lfs_dir_open(&lfs, &dir, "/") => 0; + struct lfs_info info; + lfs_dir_read(&lfs, &dir, &info) => 1; + assert(info.type == LFS_TYPE_DIR); + assert(strcmp(info.name, ".") == 0); + lfs_dir_read(&lfs, &dir, &info) => 1; + assert(info.type == LFS_TYPE_DIR); + assert(strcmp(info.name, "..") == 0); + for (int i = 0; i < N; i++) { + sprintf(name, "file%08x", i); + lfs_dir_read(&lfs, &dir, &info) => 1; + assert(info.type == LFS_TYPE_REG); + assert(strcmp(info.name, name) == 0); + } + lfs_dir_read(&lfs, &dir, &info) => 0; + lfs_dir_close(&lfs, &dir) => 0; + BENCH_STOP(); + + lfs_unmount(&lfs) => 0; +''' + +[cases.bench_dir_mkdir] +# 0 = in-order +# 1 = reversed-order +# 2 = random-order +defines.ORDER = [0, 1, 2] +defines.N = 8 +code = ''' + lfs_t lfs; + lfs_format(&lfs, cfg) => 0; + lfs_mount(&lfs, cfg) => 0; + + BENCH_START(); + uint32_t prng = 42; + char name[256]; + for (lfs_size_t i = 0; i < N; i++) { + lfs_off_t i_ + = (ORDER == 0) ? i + : (ORDER == 1) ? (N-1-i) + : BENCH_PRNG(&prng) % N; + printf("hm %d\n", i); + sprintf(name, "dir%08x", i_); + int err = lfs_mkdir(&lfs, name); + assert(!err || err == LFS_ERR_EXIST); + } + BENCH_STOP(); + + lfs_unmount(&lfs) => 0; +''' + +[cases.bench_dir_rmdir] +# 0 = in-order +# 1 = reversed-order +# 2 = random-order +defines.ORDER = [0, 1, 2] +defines.N = 8 +code = ''' + lfs_t lfs; + lfs_format(&lfs, cfg) => 0; + lfs_mount(&lfs, cfg) => 0; + + // first create the dirs + char name[256]; + for (lfs_size_t i = 0; i < N; i++) { + sprintf(name, "dir%08x", i); + lfs_mkdir(&lfs, name) => 0; + } + + // then remove the dirs + BENCH_START(); + uint32_t prng = 42; + for (lfs_size_t i = 0; i < N; i++) { + lfs_off_t i_ + = (ORDER == 0) ? i + : (ORDER == 1) ? (N-1-i) + : BENCH_PRNG(&prng) % N; + sprintf(name, "dir%08x", i_); + int err = lfs_remove(&lfs, name); + assert(!err || err == LFS_ERR_NOENT); + } + BENCH_STOP(); + + lfs_unmount(&lfs) => 0; +''' + + diff --git a/components/joltwallet__littlefs/src/littlefs/benches/bench_file.toml b/components/joltwallet__littlefs/src/littlefs/benches/bench_file.toml new file mode 100644 index 0000000..168eaad --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/benches/bench_file.toml @@ -0,0 +1,95 @@ +[cases.bench_file_read] +# 0 = in-order +# 1 = reversed-order +# 2 = random-order +defines.ORDER = [0, 1, 2] +defines.SIZE = '128*1024' +defines.CHUNK_SIZE = 64 +code = ''' + lfs_t lfs; + lfs_format(&lfs, cfg) => 0; + lfs_mount(&lfs, cfg) => 0; + lfs_size_t chunks = (SIZE+CHUNK_SIZE-1)/CHUNK_SIZE; + + // first write the file + lfs_file_t file; + uint8_t buffer[CHUNK_SIZE]; + lfs_file_open(&lfs, &file, "file", + LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0; + for (lfs_size_t i = 0; i < chunks; i++) { + uint32_t chunk_prng = i; + for (lfs_size_t j = 0; j < CHUNK_SIZE; j++) { + buffer[j] = BENCH_PRNG(&chunk_prng); + } + + lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE; + } + lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE; + lfs_file_close(&lfs, &file) => 0; + + // then read the file + BENCH_START(); + lfs_file_open(&lfs, &file, "file", LFS_O_RDONLY) => 0; + + uint32_t prng = 42; + for (lfs_size_t i = 0; i < chunks; i++) { + lfs_off_t i_ + = (ORDER == 0) ? i + : (ORDER == 1) ? (chunks-1-i) + : BENCH_PRNG(&prng) % chunks; + lfs_file_seek(&lfs, &file, i_*CHUNK_SIZE, LFS_SEEK_SET) + => i_*CHUNK_SIZE; + lfs_file_read(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE; + + uint32_t chunk_prng = i_; + for (lfs_size_t j = 0; j < CHUNK_SIZE; j++) { + assert(buffer[j] == BENCH_PRNG(&chunk_prng)); + } + } + + lfs_file_close(&lfs, &file) => 0; + BENCH_STOP(); + + lfs_unmount(&lfs) => 0; +''' + +[cases.bench_file_write] +# 0 = in-order +# 1 = reversed-order +# 2 = random-order +defines.ORDER = [0, 1, 2] +defines.SIZE = '128*1024' +defines.CHUNK_SIZE = 64 +code = ''' + lfs_t lfs; + lfs_format(&lfs, cfg) => 0; + lfs_mount(&lfs, cfg) => 0; + lfs_size_t chunks = (SIZE+CHUNK_SIZE-1)/CHUNK_SIZE; + + BENCH_START(); + lfs_file_t file; + lfs_file_open(&lfs, &file, "file", + LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0; + + uint8_t buffer[CHUNK_SIZE]; + uint32_t prng = 42; + for (lfs_size_t i = 0; i < chunks; i++) { + lfs_off_t i_ + = (ORDER == 0) ? i + : (ORDER == 1) ? (chunks-1-i) + : BENCH_PRNG(&prng) % chunks; + uint32_t chunk_prng = i_; + for (lfs_size_t j = 0; j < CHUNK_SIZE; j++) { + buffer[j] = BENCH_PRNG(&chunk_prng); + } + + lfs_file_seek(&lfs, &file, i_*CHUNK_SIZE, LFS_SEEK_SET) + => i_*CHUNK_SIZE; + lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE; + } + + lfs_file_close(&lfs, &file) => 0; + BENCH_STOP(); + + lfs_unmount(&lfs) => 0; +''' diff --git a/components/joltwallet__littlefs/src/littlefs/benches/bench_superblock.toml b/components/joltwallet__littlefs/src/littlefs/benches/bench_superblock.toml new file mode 100644 index 0000000..37659d4 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/benches/bench_superblock.toml @@ -0,0 +1,56 @@ +[cases.bench_superblocks_found] +# support benchmarking with files +defines.N = [0, 1024] +defines.FILE_SIZE = 8 +defines.CHUNK_SIZE = 8 +code = ''' + lfs_t lfs; + lfs_format(&lfs, cfg) => 0; + + // create files? + lfs_mount(&lfs, cfg) => 0; + char name[256]; + uint8_t buffer[CHUNK_SIZE]; + for (lfs_size_t i = 0; i < N; i++) { + sprintf(name, "file%08x", i); + lfs_file_t file; + lfs_file_open(&lfs, &file, name, + LFS_O_WRONLY | LFS_O_CREAT | LFS_O_EXCL) => 0; + + for (lfs_size_t j = 0; j < FILE_SIZE; j += CHUNK_SIZE) { + for (lfs_size_t k = 0; k < CHUNK_SIZE; k++) { + buffer[k] = i+j+k; + } + lfs_file_write(&lfs, &file, buffer, CHUNK_SIZE) => CHUNK_SIZE; + } + + lfs_file_close(&lfs, &file) => 0; + } + lfs_unmount(&lfs) => 0; + + BENCH_START(); + lfs_mount(&lfs, cfg) => 0; + BENCH_STOP(); + + lfs_unmount(&lfs) => 0; +''' + +[cases.bench_superblocks_missing] +code = ''' + lfs_t lfs; + + BENCH_START(); + int err = lfs_mount(&lfs, cfg); + assert(err != 0); + BENCH_STOP(); +''' + +[cases.bench_superblocks_format] +code = ''' + lfs_t lfs; + + BENCH_START(); + lfs_format(&lfs, cfg) => 0; + BENCH_STOP(); +''' + diff --git a/components/joltwallet__littlefs/src/littlefs/lfs.c b/components/joltwallet__littlefs/src/littlefs/lfs.c new file mode 100644 index 0000000..da4bfca --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/lfs.c @@ -0,0 +1,6549 @@ +/* + * The little filesystem + * + * Copyright (c) 2022, The littlefs authors. + * Copyright (c) 2017, Arm Limited. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + */ +#include "lfs.h" +#include "lfs_util.h" + + +// some constants used throughout the code +#define LFS_BLOCK_NULL ((lfs_block_t)-1) +#define LFS_BLOCK_INLINE ((lfs_block_t)-2) + +enum { + LFS_OK_RELOCATED = 1, + LFS_OK_DROPPED = 2, + LFS_OK_ORPHANED = 3, +}; + +enum { + LFS_CMP_EQ = 0, + LFS_CMP_LT = 1, + LFS_CMP_GT = 2, +}; + + +/// Caching block device operations /// + +static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) { + // do not zero, cheaper if cache is readonly or only going to be + // written with identical data (during relocates) + (void)lfs; + rcache->block = LFS_BLOCK_NULL; +} + +static inline void lfs_cache_zero(lfs_t *lfs, lfs_cache_t *pcache) { + // zero to avoid information leak + memset(pcache->buffer, 0xff, lfs->cfg->cache_size); + pcache->block = LFS_BLOCK_NULL; +} + +static int lfs_bd_read(lfs_t *lfs, + const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint, + lfs_block_t block, lfs_off_t off, + void *buffer, lfs_size_t size) { + uint8_t *data = buffer; + if (off+size > lfs->cfg->block_size + || (lfs->block_count && block >= lfs->block_count)) { + return LFS_ERR_CORRUPT; + } + + while (size > 0) { + lfs_size_t diff = size; + + if (pcache && block == pcache->block && + off < pcache->off + pcache->size) { + if (off >= pcache->off) { + // is already in pcache? + diff = lfs_min(diff, pcache->size - (off-pcache->off)); + memcpy(data, &pcache->buffer[off-pcache->off], diff); + + data += diff; + off += diff; + size -= diff; + continue; + } + + // pcache takes priority + diff = lfs_min(diff, pcache->off-off); + } + + if (block == rcache->block && + off < rcache->off + rcache->size) { + if (off >= rcache->off) { + // is already in rcache? + diff = lfs_min(diff, rcache->size - (off-rcache->off)); + memcpy(data, &rcache->buffer[off-rcache->off], diff); + + data += diff; + off += diff; + size -= diff; + continue; + } + + // rcache takes priority + diff = lfs_min(diff, rcache->off-off); + } + + if (size >= hint && off % lfs->cfg->read_size == 0 && + size >= lfs->cfg->read_size) { + // bypass cache? + diff = lfs_aligndown(diff, lfs->cfg->read_size); + int err = lfs->cfg->read(lfs->cfg, block, off, data, diff); + LFS_ASSERT(err <= 0); + if (err) { + return err; + } + + data += diff; + off += diff; + size -= diff; + continue; + } + + // load to cache, first condition can no longer fail + LFS_ASSERT(!lfs->block_count || block < lfs->block_count); + rcache->block = block; + rcache->off = lfs_aligndown(off, lfs->cfg->read_size); + rcache->size = lfs_min( + lfs_min( + lfs_alignup(off+hint, lfs->cfg->read_size), + lfs->cfg->block_size) + - rcache->off, + lfs->cfg->cache_size); + int err = lfs->cfg->read(lfs->cfg, rcache->block, + rcache->off, rcache->buffer, rcache->size); + LFS_ASSERT(err <= 0); + if (err) { + return err; + } + } + + return 0; +} + +static int lfs_bd_cmp(lfs_t *lfs, + const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint, + lfs_block_t block, lfs_off_t off, + const void *buffer, lfs_size_t size) { + const uint8_t *data = buffer; + lfs_size_t diff = 0; + + for (lfs_off_t i = 0; i < size; i += diff) { + uint8_t dat[8]; + + diff = lfs_min(size-i, sizeof(dat)); + int err = lfs_bd_read(lfs, + pcache, rcache, hint-i, + block, off+i, &dat, diff); + if (err) { + return err; + } + + int res = memcmp(dat, data + i, diff); + if (res) { + return res < 0 ? LFS_CMP_LT : LFS_CMP_GT; + } + } + + return LFS_CMP_EQ; +} + +static int lfs_bd_crc(lfs_t *lfs, + const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint, + lfs_block_t block, lfs_off_t off, lfs_size_t size, uint32_t *crc) { + lfs_size_t diff = 0; + + for (lfs_off_t i = 0; i < size; i += diff) { + uint8_t dat[8]; + diff = lfs_min(size-i, sizeof(dat)); + int err = lfs_bd_read(lfs, + pcache, rcache, hint-i, + block, off+i, &dat, diff); + if (err) { + return err; + } + + *crc = lfs_crc(*crc, &dat, diff); + } + + return 0; +} + +#ifndef LFS_READONLY +static int lfs_bd_flush(lfs_t *lfs, + lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) { + if (pcache->block != LFS_BLOCK_NULL && pcache->block != LFS_BLOCK_INLINE) { + LFS_ASSERT(pcache->block < lfs->block_count); + lfs_size_t diff = lfs_alignup(pcache->size, lfs->cfg->prog_size); + int err = lfs->cfg->prog(lfs->cfg, pcache->block, + pcache->off, pcache->buffer, diff); + LFS_ASSERT(err <= 0); + if (err) { + return err; + } + + if (validate) { + // check data on disk + lfs_cache_drop(lfs, rcache); + int res = lfs_bd_cmp(lfs, + NULL, rcache, diff, + pcache->block, pcache->off, pcache->buffer, diff); + if (res < 0) { + return res; + } + + if (res != LFS_CMP_EQ) { + return LFS_ERR_CORRUPT; + } + } + + lfs_cache_zero(lfs, pcache); + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_bd_sync(lfs_t *lfs, + lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) { + lfs_cache_drop(lfs, rcache); + + int err = lfs_bd_flush(lfs, pcache, rcache, validate); + if (err) { + return err; + } + + err = lfs->cfg->sync(lfs->cfg); + LFS_ASSERT(err <= 0); + return err; +} +#endif + +#ifndef LFS_READONLY +static int lfs_bd_prog(lfs_t *lfs, + lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate, + lfs_block_t block, lfs_off_t off, + const void *buffer, lfs_size_t size) { + const uint8_t *data = buffer; + LFS_ASSERT(block == LFS_BLOCK_INLINE || block < lfs->block_count); + LFS_ASSERT(off + size <= lfs->cfg->block_size); + + while (size > 0) { + if (block == pcache->block && + off >= pcache->off && + off < pcache->off + lfs->cfg->cache_size) { + // already fits in pcache? + lfs_size_t diff = lfs_min(size, + lfs->cfg->cache_size - (off-pcache->off)); + memcpy(&pcache->buffer[off-pcache->off], data, diff); + + data += diff; + off += diff; + size -= diff; + + pcache->size = lfs_max(pcache->size, off - pcache->off); + if (pcache->size == lfs->cfg->cache_size) { + // eagerly flush out pcache if we fill up + int err = lfs_bd_flush(lfs, pcache, rcache, validate); + if (err) { + return err; + } + } + + continue; + } + + // pcache must have been flushed, either by programming and + // entire block or manually flushing the pcache + LFS_ASSERT(pcache->block == LFS_BLOCK_NULL); + + // prepare pcache, first condition can no longer fail + pcache->block = block; + pcache->off = lfs_aligndown(off, lfs->cfg->prog_size); + pcache->size = 0; + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_bd_erase(lfs_t *lfs, lfs_block_t block) { + LFS_ASSERT(block < lfs->block_count); + int err = lfs->cfg->erase(lfs->cfg, block); + LFS_ASSERT(err <= 0); + return err; +} +#endif + + +/// Small type-level utilities /// + +// some operations on paths +static inline lfs_size_t lfs_path_namelen(const char *path) { + return strcspn(path, "/"); +} + +static inline bool lfs_path_islast(const char *path) { + lfs_size_t namelen = lfs_path_namelen(path); + return path[namelen + strspn(path + namelen, "/")] == '\0'; +} + +static inline bool lfs_path_isdir(const char *path) { + return path[lfs_path_namelen(path)] != '\0'; +} + +// operations on block pairs +static inline void lfs_pair_swap(lfs_block_t pair[2]) { + lfs_block_t t = pair[0]; + pair[0] = pair[1]; + pair[1] = t; +} + +static inline bool lfs_pair_isnull(const lfs_block_t pair[2]) { + return pair[0] == LFS_BLOCK_NULL || pair[1] == LFS_BLOCK_NULL; +} + +static inline int lfs_pair_cmp( + const lfs_block_t paira[2], + const lfs_block_t pairb[2]) { + return !(paira[0] == pairb[0] || paira[1] == pairb[1] || + paira[0] == pairb[1] || paira[1] == pairb[0]); +} + +static inline bool lfs_pair_issync( + const lfs_block_t paira[2], + const lfs_block_t pairb[2]) { + return (paira[0] == pairb[0] && paira[1] == pairb[1]) || + (paira[0] == pairb[1] && paira[1] == pairb[0]); +} + +static inline void lfs_pair_fromle32(lfs_block_t pair[2]) { + pair[0] = lfs_fromle32(pair[0]); + pair[1] = lfs_fromle32(pair[1]); +} + +#ifndef LFS_READONLY +static inline void lfs_pair_tole32(lfs_block_t pair[2]) { + pair[0] = lfs_tole32(pair[0]); + pair[1] = lfs_tole32(pair[1]); +} +#endif + +// operations on 32-bit entry tags +typedef uint32_t lfs_tag_t; +typedef int32_t lfs_stag_t; + +#define LFS_MKTAG(type, id, size) \ + (((lfs_tag_t)(type) << 20) | ((lfs_tag_t)(id) << 10) | (lfs_tag_t)(size)) + +#define LFS_MKTAG_IF(cond, type, id, size) \ + ((cond) ? LFS_MKTAG(type, id, size) : LFS_MKTAG(LFS_FROM_NOOP, 0, 0)) + +#define LFS_MKTAG_IF_ELSE(cond, type1, id1, size1, type2, id2, size2) \ + ((cond) ? LFS_MKTAG(type1, id1, size1) : LFS_MKTAG(type2, id2, size2)) + +static inline bool lfs_tag_isvalid(lfs_tag_t tag) { + return !(tag & 0x80000000); +} + +static inline bool lfs_tag_isdelete(lfs_tag_t tag) { + return ((int32_t)(tag << 22) >> 22) == -1; +} + +static inline uint16_t lfs_tag_type1(lfs_tag_t tag) { + return (tag & 0x70000000) >> 20; +} + +static inline uint16_t lfs_tag_type2(lfs_tag_t tag) { + return (tag & 0x78000000) >> 20; +} + +static inline uint16_t lfs_tag_type3(lfs_tag_t tag) { + return (tag & 0x7ff00000) >> 20; +} + +static inline uint8_t lfs_tag_chunk(lfs_tag_t tag) { + return (tag & 0x0ff00000) >> 20; +} + +static inline int8_t lfs_tag_splice(lfs_tag_t tag) { + return (int8_t)lfs_tag_chunk(tag); +} + +static inline uint16_t lfs_tag_id(lfs_tag_t tag) { + return (tag & 0x000ffc00) >> 10; +} + +static inline lfs_size_t lfs_tag_size(lfs_tag_t tag) { + return tag & 0x000003ff; +} + +static inline lfs_size_t lfs_tag_dsize(lfs_tag_t tag) { + return sizeof(tag) + lfs_tag_size(tag + lfs_tag_isdelete(tag)); +} + +// operations on attributes in attribute lists +struct lfs_mattr { + lfs_tag_t tag; + const void *buffer; +}; + +struct lfs_diskoff { + lfs_block_t block; + lfs_off_t off; +}; + +#define LFS_MKATTRS(...) \ + (struct lfs_mattr[]){__VA_ARGS__}, \ + sizeof((struct lfs_mattr[]){__VA_ARGS__}) / sizeof(struct lfs_mattr) + +// operations on global state +static inline void lfs_gstate_xor(lfs_gstate_t *a, const lfs_gstate_t *b) { + a->tag ^= b->tag; + a->pair[0] ^= b->pair[0]; + a->pair[1] ^= b->pair[1]; +} + +static inline bool lfs_gstate_iszero(const lfs_gstate_t *a) { + return a->tag == 0 + && a->pair[0] == 0 + && a->pair[1] == 0; +} + +#ifndef LFS_READONLY +static inline bool lfs_gstate_hasorphans(const lfs_gstate_t *a) { + return lfs_tag_size(a->tag); +} + +static inline uint8_t lfs_gstate_getorphans(const lfs_gstate_t *a) { + return lfs_tag_size(a->tag) & 0x1ff; +} + +static inline bool lfs_gstate_hasmove(const lfs_gstate_t *a) { + return lfs_tag_type1(a->tag); +} +#endif + +static inline bool lfs_gstate_needssuperblock(const lfs_gstate_t *a) { + return lfs_tag_size(a->tag) >> 9; +} + +static inline bool lfs_gstate_hasmovehere(const lfs_gstate_t *a, + const lfs_block_t *pair) { + return lfs_tag_type1(a->tag) && lfs_pair_cmp(a->pair, pair) == 0; +} + +static inline void lfs_gstate_fromle32(lfs_gstate_t *a) { + a->tag = lfs_fromle32(a->tag); + a->pair[0] = lfs_fromle32(a->pair[0]); + a->pair[1] = lfs_fromle32(a->pair[1]); +} + +#ifndef LFS_READONLY +static inline void lfs_gstate_tole32(lfs_gstate_t *a) { + a->tag = lfs_tole32(a->tag); + a->pair[0] = lfs_tole32(a->pair[0]); + a->pair[1] = lfs_tole32(a->pair[1]); +} +#endif + +// operations on forward-CRCs used to track erased state +struct lfs_fcrc { + lfs_size_t size; + uint32_t crc; +}; + +static void lfs_fcrc_fromle32(struct lfs_fcrc *fcrc) { + fcrc->size = lfs_fromle32(fcrc->size); + fcrc->crc = lfs_fromle32(fcrc->crc); +} + +#ifndef LFS_READONLY +static void lfs_fcrc_tole32(struct lfs_fcrc *fcrc) { + fcrc->size = lfs_tole32(fcrc->size); + fcrc->crc = lfs_tole32(fcrc->crc); +} +#endif + +// other endianness operations +static void lfs_ctz_fromle32(struct lfs_ctz *ctz) { + ctz->head = lfs_fromle32(ctz->head); + ctz->size = lfs_fromle32(ctz->size); +} + +#ifndef LFS_READONLY +static void lfs_ctz_tole32(struct lfs_ctz *ctz) { + ctz->head = lfs_tole32(ctz->head); + ctz->size = lfs_tole32(ctz->size); +} +#endif + +static inline void lfs_superblock_fromle32(lfs_superblock_t *superblock) { + superblock->version = lfs_fromle32(superblock->version); + superblock->block_size = lfs_fromle32(superblock->block_size); + superblock->block_count = lfs_fromle32(superblock->block_count); + superblock->name_max = lfs_fromle32(superblock->name_max); + superblock->file_max = lfs_fromle32(superblock->file_max); + superblock->attr_max = lfs_fromle32(superblock->attr_max); +} + +#ifndef LFS_READONLY +static inline void lfs_superblock_tole32(lfs_superblock_t *superblock) { + superblock->version = lfs_tole32(superblock->version); + superblock->block_size = lfs_tole32(superblock->block_size); + superblock->block_count = lfs_tole32(superblock->block_count); + superblock->name_max = lfs_tole32(superblock->name_max); + superblock->file_max = lfs_tole32(superblock->file_max); + superblock->attr_max = lfs_tole32(superblock->attr_max); +} +#endif + +#ifndef LFS_NO_ASSERT +static bool lfs_mlist_isopen(struct lfs_mlist *head, + struct lfs_mlist *node) { + for (struct lfs_mlist **p = &head; *p; p = &(*p)->next) { + if (*p == (struct lfs_mlist*)node) { + return true; + } + } + + return false; +} +#endif + +static void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) { + for (struct lfs_mlist **p = &lfs->mlist; *p; p = &(*p)->next) { + if (*p == mlist) { + *p = (*p)->next; + break; + } + } +} + +static void lfs_mlist_append(lfs_t *lfs, struct lfs_mlist *mlist) { + mlist->next = lfs->mlist; + lfs->mlist = mlist; +} + +// some other filesystem operations +static uint32_t lfs_fs_disk_version(lfs_t *lfs) { + (void)lfs; +#ifdef LFS_MULTIVERSION + if (lfs->cfg->disk_version) { + return lfs->cfg->disk_version; + } else +#endif + { + return LFS_DISK_VERSION; + } +} + +static uint16_t lfs_fs_disk_version_major(lfs_t *lfs) { + return 0xffff & (lfs_fs_disk_version(lfs) >> 16); + +} + +static uint16_t lfs_fs_disk_version_minor(lfs_t *lfs) { + return 0xffff & (lfs_fs_disk_version(lfs) >> 0); +} + + +/// Internal operations predeclared here /// +#ifndef LFS_READONLY +static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir, + const struct lfs_mattr *attrs, int attrcount); +static int lfs_dir_compact(lfs_t *lfs, + lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount, + lfs_mdir_t *source, uint16_t begin, uint16_t end); +static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file, + const void *buffer, lfs_size_t size); +static lfs_ssize_t lfs_file_write_(lfs_t *lfs, lfs_file_t *file, + const void *buffer, lfs_size_t size); +static int lfs_file_sync_(lfs_t *lfs, lfs_file_t *file); +static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file); +static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file); + +static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss); +static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans); +static void lfs_fs_prepmove(lfs_t *lfs, + uint16_t id, const lfs_block_t pair[2]); +static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2], + lfs_mdir_t *pdir); +static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t dir[2], + lfs_mdir_t *parent); +static int lfs_fs_forceconsistency(lfs_t *lfs); +#endif + +static void lfs_fs_prepsuperblock(lfs_t *lfs, bool needssuperblock); + +#ifdef LFS_MIGRATE +static int lfs1_traverse(lfs_t *lfs, + int (*cb)(void*, lfs_block_t), void *data); +#endif + +static int lfs_dir_rewind_(lfs_t *lfs, lfs_dir_t *dir); + +static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file, + void *buffer, lfs_size_t size); +static lfs_ssize_t lfs_file_read_(lfs_t *lfs, lfs_file_t *file, + void *buffer, lfs_size_t size); +static int lfs_file_close_(lfs_t *lfs, lfs_file_t *file); +static lfs_soff_t lfs_file_size_(lfs_t *lfs, lfs_file_t *file); + +static lfs_ssize_t lfs_fs_size_(lfs_t *lfs); +static int lfs_fs_traverse_(lfs_t *lfs, + int (*cb)(void *data, lfs_block_t block), void *data, + bool includeorphans); + +static int lfs_deinit(lfs_t *lfs); +static int lfs_unmount_(lfs_t *lfs); + + +/// Block allocator /// + +// allocations should call this when all allocated blocks are committed to +// the filesystem +// +// after a checkpoint, the block allocator may realloc any untracked blocks +static void lfs_alloc_ckpoint(lfs_t *lfs) { + lfs->lookahead.ckpoint = lfs->block_count; +} + +// drop the lookahead buffer, this is done during mounting and failed +// traversals in order to avoid invalid lookahead state +static void lfs_alloc_drop(lfs_t *lfs) { + lfs->lookahead.size = 0; + lfs->lookahead.next = 0; + lfs_alloc_ckpoint(lfs); +} + +#ifndef LFS_READONLY +static int lfs_alloc_lookahead(void *p, lfs_block_t block) { + lfs_t *lfs = (lfs_t*)p; + lfs_block_t off = ((block - lfs->lookahead.start) + + lfs->block_count) % lfs->block_count; + + if (off < lfs->lookahead.size) { + lfs->lookahead.buffer[off / 8] |= 1U << (off % 8); + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_alloc_scan(lfs_t *lfs) { + // move lookahead buffer to the first unused block + // + // note we limit the lookahead buffer to at most the amount of blocks + // checkpointed, this prevents the math in lfs_alloc from underflowing + lfs->lookahead.start = (lfs->lookahead.start + lfs->lookahead.next) + % lfs->block_count; + lfs->lookahead.next = 0; + lfs->lookahead.size = lfs_min( + 8*lfs->cfg->lookahead_size, + lfs->lookahead.ckpoint); + + // find mask of free blocks from tree + memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size); + int err = lfs_fs_traverse_(lfs, lfs_alloc_lookahead, lfs, true); + if (err) { + lfs_alloc_drop(lfs); + return err; + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) { + while (true) { + // scan our lookahead buffer for free blocks + while (lfs->lookahead.next < lfs->lookahead.size) { + if (!(lfs->lookahead.buffer[lfs->lookahead.next / 8] + & (1U << (lfs->lookahead.next % 8)))) { + // found a free block + *block = (lfs->lookahead.start + lfs->lookahead.next) + % lfs->block_count; + + // eagerly find next free block to maximize how many blocks + // lfs_alloc_ckpoint makes available for scanning + while (true) { + lfs->lookahead.next += 1; + lfs->lookahead.ckpoint -= 1; + + if (lfs->lookahead.next >= lfs->lookahead.size + || !(lfs->lookahead.buffer[lfs->lookahead.next / 8] + & (1U << (lfs->lookahead.next % 8)))) { + return 0; + } + } + } + + lfs->lookahead.next += 1; + lfs->lookahead.ckpoint -= 1; + } + + // In order to keep our block allocator from spinning forever when our + // filesystem is full, we mark points where there are no in-flight + // allocations with a checkpoint before starting a set of allocations. + // + // If we've looked at all blocks since the last checkpoint, we report + // the filesystem as out of storage. + // + if (lfs->lookahead.ckpoint <= 0) { + LFS_ERROR("No more free space 0x%"PRIx32, + (lfs->lookahead.start + lfs->lookahead.next) + % lfs->block_count); + return LFS_ERR_NOSPC; + } + + // No blocks in our lookahead buffer, we need to scan the filesystem for + // unused blocks in the next lookahead window. + int err = lfs_alloc_scan(lfs); + if(err) { + return err; + } + } +} +#endif + +/// Metadata pair and directory operations /// +static lfs_stag_t lfs_dir_getslice(lfs_t *lfs, const lfs_mdir_t *dir, + lfs_tag_t gmask, lfs_tag_t gtag, + lfs_off_t goff, void *gbuffer, lfs_size_t gsize) { + lfs_off_t off = dir->off; + lfs_tag_t ntag = dir->etag; + lfs_stag_t gdiff = 0; + + // synthetic moves + if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair) && + lfs_tag_id(gmask) != 0) { + if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(gtag)) { + return LFS_ERR_NOENT; + } else if (lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(gtag)) { + gdiff -= LFS_MKTAG(0, 1, 0); + } + } + + // iterate over dir block backwards (for faster lookups) + while (off >= sizeof(lfs_tag_t) + lfs_tag_dsize(ntag)) { + off -= lfs_tag_dsize(ntag); + lfs_tag_t tag = ntag; + int err = lfs_bd_read(lfs, + NULL, &lfs->rcache, sizeof(ntag), + dir->pair[0], off, &ntag, sizeof(ntag)); + LFS_ASSERT(err <= 0); + if (err) { + return err; + } + + ntag = (lfs_frombe32(ntag) ^ tag) & 0x7fffffff; + + if (lfs_tag_id(gmask) != 0 && + lfs_tag_type1(tag) == LFS_TYPE_SPLICE && + lfs_tag_id(tag) <= lfs_tag_id(gtag - gdiff)) { + if (tag == (LFS_MKTAG(LFS_TYPE_CREATE, 0, 0) | + (LFS_MKTAG(0, 0x3ff, 0) & (gtag - gdiff)))) { + // found where we were created + return LFS_ERR_NOENT; + } + + // move around splices + gdiff += LFS_MKTAG(0, lfs_tag_splice(tag), 0); + } + + if ((gmask & tag) == (gmask & (gtag - gdiff))) { + if (lfs_tag_isdelete(tag)) { + return LFS_ERR_NOENT; + } + + lfs_size_t diff = lfs_min(lfs_tag_size(tag), gsize); + err = lfs_bd_read(lfs, + NULL, &lfs->rcache, diff, + dir->pair[0], off+sizeof(tag)+goff, gbuffer, diff); + LFS_ASSERT(err <= 0); + if (err) { + return err; + } + + memset((uint8_t*)gbuffer + diff, 0, gsize - diff); + + return tag + gdiff; + } + } + + return LFS_ERR_NOENT; +} + +static lfs_stag_t lfs_dir_get(lfs_t *lfs, const lfs_mdir_t *dir, + lfs_tag_t gmask, lfs_tag_t gtag, void *buffer) { + return lfs_dir_getslice(lfs, dir, + gmask, gtag, + 0, buffer, lfs_tag_size(gtag)); +} + +static int lfs_dir_getread(lfs_t *lfs, const lfs_mdir_t *dir, + const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint, + lfs_tag_t gmask, lfs_tag_t gtag, + lfs_off_t off, void *buffer, lfs_size_t size) { + uint8_t *data = buffer; + if (off+size > lfs->cfg->block_size) { + return LFS_ERR_CORRUPT; + } + + while (size > 0) { + lfs_size_t diff = size; + + if (pcache && pcache->block == LFS_BLOCK_INLINE && + off < pcache->off + pcache->size) { + if (off >= pcache->off) { + // is already in pcache? + diff = lfs_min(diff, pcache->size - (off-pcache->off)); + memcpy(data, &pcache->buffer[off-pcache->off], diff); + + data += diff; + off += diff; + size -= diff; + continue; + } + + // pcache takes priority + diff = lfs_min(diff, pcache->off-off); + } + + if (rcache->block == LFS_BLOCK_INLINE && + off < rcache->off + rcache->size) { + if (off >= rcache->off) { + // is already in rcache? + diff = lfs_min(diff, rcache->size - (off-rcache->off)); + memcpy(data, &rcache->buffer[off-rcache->off], diff); + + data += diff; + off += diff; + size -= diff; + continue; + } + } + + // load to cache, first condition can no longer fail + rcache->block = LFS_BLOCK_INLINE; + rcache->off = lfs_aligndown(off, lfs->cfg->read_size); + rcache->size = lfs_min(lfs_alignup(off+hint, lfs->cfg->read_size), + lfs->cfg->cache_size); + int err = lfs_dir_getslice(lfs, dir, gmask, gtag, + rcache->off, rcache->buffer, rcache->size); + if (err < 0) { + return err; + } + } + + return 0; +} + +#ifndef LFS_READONLY +static int lfs_dir_traverse_filter(void *p, + lfs_tag_t tag, const void *buffer) { + lfs_tag_t *filtertag = p; + (void)buffer; + + // which mask depends on unique bit in tag structure + uint32_t mask = (tag & LFS_MKTAG(0x100, 0, 0)) + ? LFS_MKTAG(0x7ff, 0x3ff, 0) + : LFS_MKTAG(0x700, 0x3ff, 0); + + // check for redundancy + if ((mask & tag) == (mask & *filtertag) || + lfs_tag_isdelete(*filtertag) || + (LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) == ( + LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) | + (LFS_MKTAG(0, 0x3ff, 0) & *filtertag))) { + *filtertag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0); + return true; + } + + // check if we need to adjust for created/deleted tags + if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE && + lfs_tag_id(tag) <= lfs_tag_id(*filtertag)) { + *filtertag += LFS_MKTAG(0, lfs_tag_splice(tag), 0); + } + + return false; +} +#endif + +#ifndef LFS_READONLY +// maximum recursive depth of lfs_dir_traverse, the deepest call: +// +// traverse with commit +// '-> traverse with move +// '-> traverse with filter +// +#define LFS_DIR_TRAVERSE_DEPTH 3 + +struct lfs_dir_traverse { + const lfs_mdir_t *dir; + lfs_off_t off; + lfs_tag_t ptag; + const struct lfs_mattr *attrs; + int attrcount; + + lfs_tag_t tmask; + lfs_tag_t ttag; + uint16_t begin; + uint16_t end; + int16_t diff; + + int (*cb)(void *data, lfs_tag_t tag, const void *buffer); + void *data; + + lfs_tag_t tag; + const void *buffer; + struct lfs_diskoff disk; +}; + +static int lfs_dir_traverse(lfs_t *lfs, + const lfs_mdir_t *dir, lfs_off_t off, lfs_tag_t ptag, + const struct lfs_mattr *attrs, int attrcount, + lfs_tag_t tmask, lfs_tag_t ttag, + uint16_t begin, uint16_t end, int16_t diff, + int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) { + // This function in inherently recursive, but bounded. To allow tool-based + // analysis without unnecessary code-cost we use an explicit stack + struct lfs_dir_traverse stack[LFS_DIR_TRAVERSE_DEPTH-1]; + unsigned sp = 0; + int res; + + // iterate over directory and attrs + lfs_tag_t tag; + const void *buffer; + struct lfs_diskoff disk = {0}; + while (true) { + { + if (off+lfs_tag_dsize(ptag) < dir->off) { + off += lfs_tag_dsize(ptag); + int err = lfs_bd_read(lfs, + NULL, &lfs->rcache, sizeof(tag), + dir->pair[0], off, &tag, sizeof(tag)); + if (err) { + return err; + } + + tag = (lfs_frombe32(tag) ^ ptag) | 0x80000000; + disk.block = dir->pair[0]; + disk.off = off+sizeof(lfs_tag_t); + buffer = &disk; + ptag = tag; + } else if (attrcount > 0) { + tag = attrs[0].tag; + buffer = attrs[0].buffer; + attrs += 1; + attrcount -= 1; + } else { + // finished traversal, pop from stack? + res = 0; + break; + } + + // do we need to filter? + lfs_tag_t mask = LFS_MKTAG(0x7ff, 0, 0); + if ((mask & tmask & tag) != (mask & tmask & ttag)) { + continue; + } + + if (lfs_tag_id(tmask) != 0) { + LFS_ASSERT(sp < LFS_DIR_TRAVERSE_DEPTH); + // recurse, scan for duplicates, and update tag based on + // creates/deletes + stack[sp] = (struct lfs_dir_traverse){ + .dir = dir, + .off = off, + .ptag = ptag, + .attrs = attrs, + .attrcount = attrcount, + .tmask = tmask, + .ttag = ttag, + .begin = begin, + .end = end, + .diff = diff, + .cb = cb, + .data = data, + .tag = tag, + .buffer = buffer, + .disk = disk, + }; + sp += 1; + + tmask = 0; + ttag = 0; + begin = 0; + end = 0; + diff = 0; + cb = lfs_dir_traverse_filter; + data = &stack[sp-1].tag; + continue; + } + } + +popped: + // in filter range? + if (lfs_tag_id(tmask) != 0 && + !(lfs_tag_id(tag) >= begin && lfs_tag_id(tag) < end)) { + continue; + } + + // handle special cases for mcu-side operations + if (lfs_tag_type3(tag) == LFS_FROM_NOOP) { + // do nothing + } else if (lfs_tag_type3(tag) == LFS_FROM_MOVE) { + // Without this condition, lfs_dir_traverse can exhibit an + // extremely expensive O(n^3) of nested loops when renaming. + // This happens because lfs_dir_traverse tries to filter tags by + // the tags in the source directory, triggering a second + // lfs_dir_traverse with its own filter operation. + // + // traverse with commit + // '-> traverse with filter + // '-> traverse with move + // '-> traverse with filter + // + // However we don't actually care about filtering the second set of + // tags, since duplicate tags have no effect when filtering. + // + // This check skips this unnecessary recursive filtering explicitly, + // reducing this runtime from O(n^3) to O(n^2). + if (cb == lfs_dir_traverse_filter) { + continue; + } + + // recurse into move + stack[sp] = (struct lfs_dir_traverse){ + .dir = dir, + .off = off, + .ptag = ptag, + .attrs = attrs, + .attrcount = attrcount, + .tmask = tmask, + .ttag = ttag, + .begin = begin, + .end = end, + .diff = diff, + .cb = cb, + .data = data, + .tag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0), + }; + sp += 1; + + uint16_t fromid = lfs_tag_size(tag); + uint16_t toid = lfs_tag_id(tag); + dir = buffer; + off = 0; + ptag = 0xffffffff; + attrs = NULL; + attrcount = 0; + tmask = LFS_MKTAG(0x600, 0x3ff, 0); + ttag = LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0); + begin = fromid; + end = fromid+1; + diff = toid-fromid+diff; + } else if (lfs_tag_type3(tag) == LFS_FROM_USERATTRS) { + for (unsigned i = 0; i < lfs_tag_size(tag); i++) { + const struct lfs_attr *a = buffer; + res = cb(data, LFS_MKTAG(LFS_TYPE_USERATTR + a[i].type, + lfs_tag_id(tag) + diff, a[i].size), a[i].buffer); + if (res < 0) { + return res; + } + + if (res) { + break; + } + } + } else { + res = cb(data, tag + LFS_MKTAG(0, diff, 0), buffer); + if (res < 0) { + return res; + } + + if (res) { + break; + } + } + } + + if (sp > 0) { + // pop from the stack and return, fortunately all pops share + // a destination + dir = stack[sp-1].dir; + off = stack[sp-1].off; + ptag = stack[sp-1].ptag; + attrs = stack[sp-1].attrs; + attrcount = stack[sp-1].attrcount; + tmask = stack[sp-1].tmask; + ttag = stack[sp-1].ttag; + begin = stack[sp-1].begin; + end = stack[sp-1].end; + diff = stack[sp-1].diff; + cb = stack[sp-1].cb; + data = stack[sp-1].data; + tag = stack[sp-1].tag; + buffer = stack[sp-1].buffer; + disk = stack[sp-1].disk; + sp -= 1; + goto popped; + } else { + return res; + } +} +#endif + +static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs, + lfs_mdir_t *dir, const lfs_block_t pair[2], + lfs_tag_t fmask, lfs_tag_t ftag, uint16_t *id, + int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) { + // we can find tag very efficiently during a fetch, since we're already + // scanning the entire directory + lfs_stag_t besttag = -1; + + // if either block address is invalid we return LFS_ERR_CORRUPT here, + // otherwise later writes to the pair could fail + if (lfs->block_count + && (pair[0] >= lfs->block_count || pair[1] >= lfs->block_count)) { + return LFS_ERR_CORRUPT; + } + + // find the block with the most recent revision + uint32_t revs[2] = {0, 0}; + int r = 0; + for (int i = 0; i < 2; i++) { + int err = lfs_bd_read(lfs, + NULL, &lfs->rcache, sizeof(revs[i]), + pair[i], 0, &revs[i], sizeof(revs[i])); + revs[i] = lfs_fromle32(revs[i]); + if (err && err != LFS_ERR_CORRUPT) { + return err; + } + + if (err != LFS_ERR_CORRUPT && + lfs_scmp(revs[i], revs[(i+1)%2]) > 0) { + r = i; + } + } + + dir->pair[0] = pair[(r+0)%2]; + dir->pair[1] = pair[(r+1)%2]; + dir->rev = revs[(r+0)%2]; + dir->off = 0; // nonzero = found some commits + + // now scan tags to fetch the actual dir and find possible match + for (int i = 0; i < 2; i++) { + lfs_off_t off = 0; + lfs_tag_t ptag = 0xffffffff; + + uint16_t tempcount = 0; + lfs_block_t temptail[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL}; + bool tempsplit = false; + lfs_stag_t tempbesttag = besttag; + + // assume not erased until proven otherwise + bool maybeerased = false; + bool hasfcrc = false; + struct lfs_fcrc fcrc; + + dir->rev = lfs_tole32(dir->rev); + uint32_t crc = lfs_crc(0xffffffff, &dir->rev, sizeof(dir->rev)); + dir->rev = lfs_fromle32(dir->rev); + + while (true) { + // extract next tag + lfs_tag_t tag; + off += lfs_tag_dsize(ptag); + int err = lfs_bd_read(lfs, + NULL, &lfs->rcache, lfs->cfg->block_size, + dir->pair[0], off, &tag, sizeof(tag)); + if (err) { + if (err == LFS_ERR_CORRUPT) { + // can't continue? + break; + } + return err; + } + + crc = lfs_crc(crc, &tag, sizeof(tag)); + tag = lfs_frombe32(tag) ^ ptag; + + // next commit not yet programmed? + if (!lfs_tag_isvalid(tag)) { + // we only might be erased if the last tag was a crc + maybeerased = (lfs_tag_type2(ptag) == LFS_TYPE_CCRC); + break; + // out of range? + } else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) { + break; + } + + ptag = tag; + + if (lfs_tag_type2(tag) == LFS_TYPE_CCRC) { + // check the crc attr + uint32_t dcrc; + err = lfs_bd_read(lfs, + NULL, &lfs->rcache, lfs->cfg->block_size, + dir->pair[0], off+sizeof(tag), &dcrc, sizeof(dcrc)); + if (err) { + if (err == LFS_ERR_CORRUPT) { + break; + } + return err; + } + dcrc = lfs_fromle32(dcrc); + + if (crc != dcrc) { + break; + } + + // reset the next bit if we need to + ptag ^= (lfs_tag_t)(lfs_tag_chunk(tag) & 1U) << 31; + + // toss our crc into the filesystem seed for + // pseudorandom numbers, note we use another crc here + // as a collection function because it is sufficiently + // random and convenient + lfs->seed = lfs_crc(lfs->seed, &crc, sizeof(crc)); + + // update with what's found so far + besttag = tempbesttag; + dir->off = off + lfs_tag_dsize(tag); + dir->etag = ptag; + dir->count = tempcount; + dir->tail[0] = temptail[0]; + dir->tail[1] = temptail[1]; + dir->split = tempsplit; + + // reset crc, hasfcrc + crc = 0xffffffff; + continue; + } + + // crc the entry first, hopefully leaving it in the cache + err = lfs_bd_crc(lfs, + NULL, &lfs->rcache, lfs->cfg->block_size, + dir->pair[0], off+sizeof(tag), + lfs_tag_dsize(tag)-sizeof(tag), &crc); + if (err) { + if (err == LFS_ERR_CORRUPT) { + break; + } + return err; + } + + // directory modification tags? + if (lfs_tag_type1(tag) == LFS_TYPE_NAME) { + // increase count of files if necessary + if (lfs_tag_id(tag) >= tempcount) { + tempcount = lfs_tag_id(tag) + 1; + } + } else if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE) { + tempcount += lfs_tag_splice(tag); + + if (tag == (LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) | + (LFS_MKTAG(0, 0x3ff, 0) & tempbesttag))) { + tempbesttag |= 0x80000000; + } else if (tempbesttag != -1 && + lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) { + tempbesttag += LFS_MKTAG(0, lfs_tag_splice(tag), 0); + } + } else if (lfs_tag_type1(tag) == LFS_TYPE_TAIL) { + tempsplit = (lfs_tag_chunk(tag) & 1); + + err = lfs_bd_read(lfs, + NULL, &lfs->rcache, lfs->cfg->block_size, + dir->pair[0], off+sizeof(tag), &temptail, 8); + if (err) { + if (err == LFS_ERR_CORRUPT) { + break; + } + return err; + } + lfs_pair_fromle32(temptail); + } else if (lfs_tag_type3(tag) == LFS_TYPE_FCRC) { + err = lfs_bd_read(lfs, + NULL, &lfs->rcache, lfs->cfg->block_size, + dir->pair[0], off+sizeof(tag), + &fcrc, sizeof(fcrc)); + if (err) { + if (err == LFS_ERR_CORRUPT) { + break; + } + return err; + } + + lfs_fcrc_fromle32(&fcrc); + hasfcrc = true; + } + + // found a match for our fetcher? + if ((fmask & tag) == (fmask & ftag)) { + int res = cb(data, tag, &(struct lfs_diskoff){ + dir->pair[0], off+sizeof(tag)}); + if (res < 0) { + if (res == LFS_ERR_CORRUPT) { + break; + } + return res; + } + + if (res == LFS_CMP_EQ) { + // found a match + tempbesttag = tag; + } else if ((LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) == + (LFS_MKTAG(0x7ff, 0x3ff, 0) & tempbesttag)) { + // found an identical tag, but contents didn't match + // this must mean that our besttag has been overwritten + tempbesttag = -1; + } else if (res == LFS_CMP_GT && + lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) { + // found a greater match, keep track to keep things sorted + tempbesttag = tag | 0x80000000; + } + } + } + + // found no valid commits? + if (dir->off == 0) { + // try the other block? + lfs_pair_swap(dir->pair); + dir->rev = revs[(r+1)%2]; + continue; + } + + // did we end on a valid commit? we may have an erased block + dir->erased = false; + if (maybeerased && dir->off % lfs->cfg->prog_size == 0) { + #ifdef LFS_MULTIVERSION + // note versions < lfs2.1 did not have fcrc tags, if + // we're < lfs2.1 treat missing fcrc as erased data + // + // we don't strictly need to do this, but otherwise writing + // to lfs2.0 disks becomes very inefficient + if (lfs_fs_disk_version(lfs) < 0x00020001) { + dir->erased = true; + + } else + #endif + if (hasfcrc) { + // check for an fcrc matching the next prog's erased state, if + // this failed most likely a previous prog was interrupted, we + // need a new erase + uint32_t fcrc_ = 0xffffffff; + int err = lfs_bd_crc(lfs, + NULL, &lfs->rcache, lfs->cfg->block_size, + dir->pair[0], dir->off, fcrc.size, &fcrc_); + if (err && err != LFS_ERR_CORRUPT) { + return err; + } + + // found beginning of erased part? + dir->erased = (fcrc_ == fcrc.crc); + } + } + + // synthetic move + if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair)) { + if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(besttag)) { + besttag |= 0x80000000; + } else if (besttag != -1 && + lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(besttag)) { + besttag -= LFS_MKTAG(0, 1, 0); + } + } + + // found tag? or found best id? + if (id) { + *id = lfs_min(lfs_tag_id(besttag), dir->count); + } + + if (lfs_tag_isvalid(besttag)) { + return besttag; + } else if (lfs_tag_id(besttag) < dir->count) { + return LFS_ERR_NOENT; + } else { + return 0; + } + } + + LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}", + dir->pair[0], dir->pair[1]); + return LFS_ERR_CORRUPT; +} + +static int lfs_dir_fetch(lfs_t *lfs, + lfs_mdir_t *dir, const lfs_block_t pair[2]) { + // note, mask=-1, tag=-1 can never match a tag since this + // pattern has the invalid bit set + return (int)lfs_dir_fetchmatch(lfs, dir, pair, + (lfs_tag_t)-1, (lfs_tag_t)-1, NULL, NULL, NULL); +} + +static int lfs_dir_getgstate(lfs_t *lfs, const lfs_mdir_t *dir, + lfs_gstate_t *gstate) { + lfs_gstate_t temp; + lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x7ff, 0, 0), + LFS_MKTAG(LFS_TYPE_MOVESTATE, 0, sizeof(temp)), &temp); + if (res < 0 && res != LFS_ERR_NOENT) { + return res; + } + + if (res != LFS_ERR_NOENT) { + // xor together to find resulting gstate + lfs_gstate_fromle32(&temp); + lfs_gstate_xor(gstate, &temp); + } + + return 0; +} + +static int lfs_dir_getinfo(lfs_t *lfs, lfs_mdir_t *dir, + uint16_t id, struct lfs_info *info) { + if (id == 0x3ff) { + // special case for root + strcpy(info->name, "/"); + info->type = LFS_TYPE_DIR; + return 0; + } + + lfs_stag_t tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x780, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_NAME, id, lfs->name_max+1), info->name); + if (tag < 0) { + return (int)tag; + } + + info->type = lfs_tag_type3(tag); + + struct lfs_ctz ctz; + tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz); + if (tag < 0) { + return (int)tag; + } + lfs_ctz_fromle32(&ctz); + + if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) { + info->size = ctz.size; + } else if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) { + info->size = lfs_tag_size(tag); + } + + return 0; +} + +struct lfs_dir_find_match { + lfs_t *lfs; + const void *name; + lfs_size_t size; +}; + +static int lfs_dir_find_match(void *data, + lfs_tag_t tag, const void *buffer) { + struct lfs_dir_find_match *name = data; + lfs_t *lfs = name->lfs; + const struct lfs_diskoff *disk = buffer; + + // compare with disk + lfs_size_t diff = lfs_min(name->size, lfs_tag_size(tag)); + int res = lfs_bd_cmp(lfs, + NULL, &lfs->rcache, diff, + disk->block, disk->off, name->name, diff); + if (res != LFS_CMP_EQ) { + return res; + } + + // only equal if our size is still the same + if (name->size != lfs_tag_size(tag)) { + return (name->size < lfs_tag_size(tag)) ? LFS_CMP_LT : LFS_CMP_GT; + } + + // found a match! + return LFS_CMP_EQ; +} + +// lfs_dir_find tries to set path and id even if file is not found +// +// returns: +// - 0 if file is found +// - LFS_ERR_NOENT if file or parent is not found +// - LFS_ERR_NOTDIR if parent is not a dir +static lfs_stag_t lfs_dir_find(lfs_t *lfs, lfs_mdir_t *dir, + const char **path, uint16_t *id) { + // we reduce path to a single name if we can find it + const char *name = *path; + + // default to root dir + lfs_stag_t tag = LFS_MKTAG(LFS_TYPE_DIR, 0x3ff, 0); + dir->tail[0] = lfs->root[0]; + dir->tail[1] = lfs->root[1]; + + // empty paths are not allowed + if (*name == '\0') { + return LFS_ERR_INVAL; + } + + while (true) { +nextname: + // skip slashes if we're a directory + if (lfs_tag_type3(tag) == LFS_TYPE_DIR) { + name += strspn(name, "/"); + } + lfs_size_t namelen = strcspn(name, "/"); + + // skip '.' + if (namelen == 1 && memcmp(name, ".", 1) == 0) { + name += namelen; + goto nextname; + } + + // error on unmatched '..', trying to go above root? + if (namelen == 2 && memcmp(name, "..", 2) == 0) { + return LFS_ERR_INVAL; + } + + // skip if matched by '..' in name + const char *suffix = name + namelen; + lfs_size_t sufflen; + int depth = 1; + while (true) { + suffix += strspn(suffix, "/"); + sufflen = strcspn(suffix, "/"); + if (sufflen == 0) { + break; + } + + if (sufflen == 1 && memcmp(suffix, ".", 1) == 0) { + // noop + } else if (sufflen == 2 && memcmp(suffix, "..", 2) == 0) { + depth -= 1; + if (depth == 0) { + name = suffix + sufflen; + goto nextname; + } + } else { + depth += 1; + } + + suffix += sufflen; + } + + // found path + if (*name == '\0') { + return tag; + } + + // update what we've found so far + *path = name; + + // only continue if we're a directory + if (lfs_tag_type3(tag) != LFS_TYPE_DIR) { + return LFS_ERR_NOTDIR; + } + + // grab the entry data + if (lfs_tag_id(tag) != 0x3ff) { + lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), dir->tail); + if (res < 0) { + return res; + } + lfs_pair_fromle32(dir->tail); + } + + // find entry matching name + while (true) { + tag = lfs_dir_fetchmatch(lfs, dir, dir->tail, + LFS_MKTAG(0x780, 0, 0), + LFS_MKTAG(LFS_TYPE_NAME, 0, namelen), + id, + lfs_dir_find_match, &(struct lfs_dir_find_match){ + lfs, name, namelen}); + if (tag < 0) { + return tag; + } + + if (tag) { + break; + } + + if (!dir->split) { + return LFS_ERR_NOENT; + } + } + + // to next name + name += namelen; + } +} + +// commit logic +struct lfs_commit { + lfs_block_t block; + lfs_off_t off; + lfs_tag_t ptag; + uint32_t crc; + + lfs_off_t begin; + lfs_off_t end; +}; + +#ifndef LFS_READONLY +static int lfs_dir_commitprog(lfs_t *lfs, struct lfs_commit *commit, + const void *buffer, lfs_size_t size) { + int err = lfs_bd_prog(lfs, + &lfs->pcache, &lfs->rcache, false, + commit->block, commit->off , + (const uint8_t*)buffer, size); + if (err) { + return err; + } + + commit->crc = lfs_crc(commit->crc, buffer, size); + commit->off += size; + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_commitattr(lfs_t *lfs, struct lfs_commit *commit, + lfs_tag_t tag, const void *buffer) { + // check if we fit + lfs_size_t dsize = lfs_tag_dsize(tag); + if (commit->off + dsize > commit->end) { + return LFS_ERR_NOSPC; + } + + // write out tag + lfs_tag_t ntag = lfs_tobe32((tag & 0x7fffffff) ^ commit->ptag); + int err = lfs_dir_commitprog(lfs, commit, &ntag, sizeof(ntag)); + if (err) { + return err; + } + + if (!(tag & 0x80000000)) { + // from memory + err = lfs_dir_commitprog(lfs, commit, buffer, dsize-sizeof(tag)); + if (err) { + return err; + } + } else { + // from disk + const struct lfs_diskoff *disk = buffer; + for (lfs_off_t i = 0; i < dsize-sizeof(tag); i++) { + // rely on caching to make this efficient + uint8_t dat; + err = lfs_bd_read(lfs, + NULL, &lfs->rcache, dsize-sizeof(tag)-i, + disk->block, disk->off+i, &dat, 1); + if (err) { + return err; + } + + err = lfs_dir_commitprog(lfs, commit, &dat, 1); + if (err) { + return err; + } + } + } + + commit->ptag = tag & 0x7fffffff; + return 0; +} +#endif + +#ifndef LFS_READONLY + +static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) { + // align to program units + // + // this gets a bit complex as we have two types of crcs: + // - 5-word crc with fcrc to check following prog (middle of block) + // - 2-word crc with no following prog (end of block) + const lfs_off_t end = lfs_alignup( + lfs_min(commit->off + 5*sizeof(uint32_t), lfs->cfg->block_size), + lfs->cfg->prog_size); + + lfs_off_t off1 = 0; + uint32_t crc1 = 0; + + // create crc tags to fill up remainder of commit, note that + // padding is not crced, which lets fetches skip padding but + // makes committing a bit more complicated + while (commit->off < end) { + lfs_off_t noff = ( + lfs_min(end - (commit->off+sizeof(lfs_tag_t)), 0x3fe) + + (commit->off+sizeof(lfs_tag_t))); + // too large for crc tag? need padding commits + if (noff < end) { + noff = lfs_min(noff, end - 5*sizeof(uint32_t)); + } + + // space for fcrc? + uint8_t eperturb = (uint8_t)-1; + if (noff >= end && noff <= lfs->cfg->block_size - lfs->cfg->prog_size) { + // first read the leading byte, this always contains a bit + // we can perturb to avoid writes that don't change the fcrc + int err = lfs_bd_read(lfs, + NULL, &lfs->rcache, lfs->cfg->prog_size, + commit->block, noff, &eperturb, 1); + if (err && err != LFS_ERR_CORRUPT) { + return err; + } + + #ifdef LFS_MULTIVERSION + // unfortunately fcrcs break mdir fetching < lfs2.1, so only write + // these if we're a >= lfs2.1 filesystem + if (lfs_fs_disk_version(lfs) <= 0x00020000) { + // don't write fcrc + } else + #endif + { + // find the expected fcrc, don't bother avoiding a reread + // of the eperturb, it should still be in our cache + struct lfs_fcrc fcrc = { + .size = lfs->cfg->prog_size, + .crc = 0xffffffff + }; + err = lfs_bd_crc(lfs, + NULL, &lfs->rcache, lfs->cfg->prog_size, + commit->block, noff, fcrc.size, &fcrc.crc); + if (err && err != LFS_ERR_CORRUPT) { + return err; + } + + lfs_fcrc_tole32(&fcrc); + err = lfs_dir_commitattr(lfs, commit, + LFS_MKTAG(LFS_TYPE_FCRC, 0x3ff, sizeof(struct lfs_fcrc)), + &fcrc); + if (err) { + return err; + } + } + } + + // build commit crc + struct { + lfs_tag_t tag; + uint32_t crc; + } ccrc; + lfs_tag_t ntag = LFS_MKTAG( + LFS_TYPE_CCRC + (((uint8_t)~eperturb) >> 7), 0x3ff, + noff - (commit->off+sizeof(lfs_tag_t))); + ccrc.tag = lfs_tobe32(ntag ^ commit->ptag); + commit->crc = lfs_crc(commit->crc, &ccrc.tag, sizeof(lfs_tag_t)); + ccrc.crc = lfs_tole32(commit->crc); + + int err = lfs_bd_prog(lfs, + &lfs->pcache, &lfs->rcache, false, + commit->block, commit->off, &ccrc, sizeof(ccrc)); + if (err) { + return err; + } + + // keep track of non-padding checksum to verify + if (off1 == 0) { + off1 = commit->off + sizeof(lfs_tag_t); + crc1 = commit->crc; + } + + commit->off = noff; + // perturb valid bit? + commit->ptag = ntag ^ ((0x80UL & ~eperturb) << 24); + // reset crc for next commit + commit->crc = 0xffffffff; + + // manually flush here since we don't prog the padding, this confuses + // the caching layer + if (noff >= end || noff >= lfs->pcache.off + lfs->cfg->cache_size) { + // flush buffers + int err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false); + if (err) { + return err; + } + } + } + + // successful commit, check checksums to make sure + // + // note that we don't need to check padding commits, worst + // case if they are corrupted we would have had to compact anyways + lfs_off_t off = commit->begin; + uint32_t crc = 0xffffffff; + int err = lfs_bd_crc(lfs, + NULL, &lfs->rcache, off1+sizeof(uint32_t), + commit->block, off, off1-off, &crc); + if (err) { + return err; + } + + // check non-padding commits against known crc + if (crc != crc1) { + return LFS_ERR_CORRUPT; + } + + // make sure to check crc in case we happen to pick + // up an unrelated crc (frozen block?) + err = lfs_bd_crc(lfs, + NULL, &lfs->rcache, sizeof(uint32_t), + commit->block, off1, sizeof(uint32_t), &crc); + if (err) { + return err; + } + + if (crc != 0) { + return LFS_ERR_CORRUPT; + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) { + // allocate pair of dir blocks (backwards, so we write block 1 first) + for (int i = 0; i < 2; i++) { + int err = lfs_alloc(lfs, &dir->pair[(i+1)%2]); + if (err) { + return err; + } + } + + // zero for reproducibility in case initial block is unreadable + dir->rev = 0; + + // rather than clobbering one of the blocks we just pretend + // the revision may be valid + int err = lfs_bd_read(lfs, + NULL, &lfs->rcache, sizeof(dir->rev), + dir->pair[0], 0, &dir->rev, sizeof(dir->rev)); + dir->rev = lfs_fromle32(dir->rev); + if (err && err != LFS_ERR_CORRUPT) { + return err; + } + + // to make sure we don't immediately evict, align the new revision count + // to our block_cycles modulus, see lfs_dir_compact for why our modulus + // is tweaked this way + if (lfs->cfg->block_cycles > 0) { + dir->rev = lfs_alignup(dir->rev, ((lfs->cfg->block_cycles+1)|1)); + } + + // set defaults + dir->off = sizeof(dir->rev); + dir->etag = 0xffffffff; + dir->count = 0; + dir->tail[0] = LFS_BLOCK_NULL; + dir->tail[1] = LFS_BLOCK_NULL; + dir->erased = false; + dir->split = false; + + // don't write out yet, let caller take care of that + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_drop(lfs_t *lfs, lfs_mdir_t *dir, lfs_mdir_t *tail) { + // steal state + int err = lfs_dir_getgstate(lfs, tail, &lfs->gdelta); + if (err) { + return err; + } + + // steal tail + lfs_pair_tole32(tail->tail); + err = lfs_dir_commit(lfs, dir, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_TAIL + tail->split, 0x3ff, 8), tail->tail})); + lfs_pair_fromle32(tail->tail); + if (err) { + return err; + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_split(lfs_t *lfs, + lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount, + lfs_mdir_t *source, uint16_t split, uint16_t end) { + // create tail metadata pair + lfs_mdir_t tail; + int err = lfs_dir_alloc(lfs, &tail); + if (err) { + return err; + } + + tail.split = dir->split; + tail.tail[0] = dir->tail[0]; + tail.tail[1] = dir->tail[1]; + + // note we don't care about LFS_OK_RELOCATED + int res = lfs_dir_compact(lfs, &tail, attrs, attrcount, source, split, end); + if (res < 0) { + return res; + } + + dir->tail[0] = tail.pair[0]; + dir->tail[1] = tail.pair[1]; + dir->split = true; + + // update root if needed + if (lfs_pair_cmp(dir->pair, lfs->root) == 0 && split == 0) { + lfs->root[0] = tail.pair[0]; + lfs->root[1] = tail.pair[1]; + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_commit_size(void *p, lfs_tag_t tag, const void *buffer) { + lfs_size_t *size = p; + (void)buffer; + + *size += lfs_tag_dsize(tag); + return 0; +} +#endif + +#ifndef LFS_READONLY +struct lfs_dir_commit_commit { + lfs_t *lfs; + struct lfs_commit *commit; +}; +#endif + +#ifndef LFS_READONLY +static int lfs_dir_commit_commit(void *p, lfs_tag_t tag, const void *buffer) { + struct lfs_dir_commit_commit *commit = p; + return lfs_dir_commitattr(commit->lfs, commit->commit, tag, buffer); +} +#endif + +#ifndef LFS_READONLY +static bool lfs_dir_needsrelocation(lfs_t *lfs, lfs_mdir_t *dir) { + // If our revision count == n * block_cycles, we should force a relocation, + // this is how littlefs wear-levels at the metadata-pair level. Note that we + // actually use (block_cycles+1)|1, this is to avoid two corner cases: + // 1. block_cycles = 1, which would prevent relocations from terminating + // 2. block_cycles = 2n, which, due to aliasing, would only ever relocate + // one metadata block in the pair, effectively making this useless + return (lfs->cfg->block_cycles > 0 + && ((dir->rev + 1) % ((lfs->cfg->block_cycles+1)|1) == 0)); +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_compact(lfs_t *lfs, + lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount, + lfs_mdir_t *source, uint16_t begin, uint16_t end) { + // save some state in case block is bad + bool relocated = false; + bool tired = lfs_dir_needsrelocation(lfs, dir); + + // increment revision count + dir->rev += 1; + + // do not proactively relocate blocks during migrations, this + // can cause a number of failure states such: clobbering the + // v1 superblock if we relocate root, and invalidating directory + // pointers if we relocate the head of a directory. On top of + // this, relocations increase the overall complexity of + // lfs_migration, which is already a delicate operation. +#ifdef LFS_MIGRATE + if (lfs->lfs1) { + tired = false; + } +#endif + + if (tired && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) != 0) { + // we're writing too much, time to relocate + goto relocate; + } + + // begin loop to commit compaction to blocks until a compact sticks + while (true) { + { + // setup commit state + struct lfs_commit commit = { + .block = dir->pair[1], + .off = 0, + .ptag = 0xffffffff, + .crc = 0xffffffff, + + .begin = 0, + .end = (lfs->cfg->metadata_max ? + lfs->cfg->metadata_max : lfs->cfg->block_size) - 8, + }; + + // erase block to write to + int err = lfs_bd_erase(lfs, dir->pair[1]); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + + // write out header + dir->rev = lfs_tole32(dir->rev); + err = lfs_dir_commitprog(lfs, &commit, + &dir->rev, sizeof(dir->rev)); + dir->rev = lfs_fromle32(dir->rev); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + + // traverse the directory, this time writing out all unique tags + err = lfs_dir_traverse(lfs, + source, 0, 0xffffffff, attrs, attrcount, + LFS_MKTAG(0x400, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_NAME, 0, 0), + begin, end, -begin, + lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){ + lfs, &commit}); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + + // commit tail, which may be new after last size check + if (!lfs_pair_isnull(dir->tail)) { + lfs_pair_tole32(dir->tail); + err = lfs_dir_commitattr(lfs, &commit, + LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8), + dir->tail); + lfs_pair_fromle32(dir->tail); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + } + + // bring over gstate? + lfs_gstate_t delta = {0}; + if (!relocated) { + lfs_gstate_xor(&delta, &lfs->gdisk); + lfs_gstate_xor(&delta, &lfs->gstate); + } + lfs_gstate_xor(&delta, &lfs->gdelta); + delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff); + + err = lfs_dir_getgstate(lfs, dir, &delta); + if (err) { + return err; + } + + if (!lfs_gstate_iszero(&delta)) { + lfs_gstate_tole32(&delta); + err = lfs_dir_commitattr(lfs, &commit, + LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff, + sizeof(delta)), &delta); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + } + + // complete commit with crc + err = lfs_dir_commitcrc(lfs, &commit); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + + // successful compaction, swap dir pair to indicate most recent + LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0); + lfs_pair_swap(dir->pair); + dir->count = end - begin; + dir->off = commit.off; + dir->etag = commit.ptag; + // update gstate + lfs->gdelta = (lfs_gstate_t){0}; + if (!relocated) { + lfs->gdisk = lfs->gstate; + } + } + break; + +relocate: + // commit was corrupted, drop caches and prepare to relocate block + relocated = true; + lfs_cache_drop(lfs, &lfs->pcache); + if (!tired) { + LFS_DEBUG("Bad block at 0x%"PRIx32, dir->pair[1]); + } + + // can't relocate superblock, filesystem is now frozen + if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) { + LFS_WARN("Superblock 0x%"PRIx32" has become unwritable", + dir->pair[1]); + return LFS_ERR_NOSPC; + } + + // relocate half of pair + int err = lfs_alloc(lfs, &dir->pair[1]); + if (err && (err != LFS_ERR_NOSPC || !tired)) { + return err; + } + + tired = false; + continue; + } + + return relocated ? LFS_OK_RELOCATED : 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_splittingcompact(lfs_t *lfs, lfs_mdir_t *dir, + const struct lfs_mattr *attrs, int attrcount, + lfs_mdir_t *source, uint16_t begin, uint16_t end) { + while (true) { + // find size of first split, we do this by halving the split until + // the metadata is guaranteed to fit + // + // Note that this isn't a true binary search, we never increase the + // split size. This may result in poorly distributed metadata but isn't + // worth the extra code size or performance hit to fix. + lfs_size_t split = begin; + while (end - split > 1) { + lfs_size_t size = 0; + int err = lfs_dir_traverse(lfs, + source, 0, 0xffffffff, attrs, attrcount, + LFS_MKTAG(0x400, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_NAME, 0, 0), + split, end, -split, + lfs_dir_commit_size, &size); + if (err) { + return err; + } + + // space is complicated, we need room for: + // + // - tail: 4+2*4 = 12 bytes + // - gstate: 4+3*4 = 16 bytes + // - move delete: 4 = 4 bytes + // - crc: 4+4 = 8 bytes + // total = 40 bytes + // + // And we cap at half a block to avoid degenerate cases with + // nearly-full metadata blocks. + // + lfs_size_t metadata_max = (lfs->cfg->metadata_max) + ? lfs->cfg->metadata_max + : lfs->cfg->block_size; + if (end - split < 0xff + && size <= lfs_min( + metadata_max - 40, + lfs_alignup( + metadata_max/2, + lfs->cfg->prog_size))) { + break; + } + + split = split + ((end - split) / 2); + } + + if (split == begin) { + // no split needed + break; + } + + // split into two metadata pairs and continue + int err = lfs_dir_split(lfs, dir, attrs, attrcount, + source, split, end); + if (err && err != LFS_ERR_NOSPC) { + return err; + } + + if (err) { + // we can't allocate a new block, try to compact with degraded + // performance + LFS_WARN("Unable to split {0x%"PRIx32", 0x%"PRIx32"}", + dir->pair[0], dir->pair[1]); + break; + } else { + end = split; + } + } + + if (lfs_dir_needsrelocation(lfs, dir) + && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) { + // oh no! we're writing too much to the superblock, + // should we expand? + lfs_ssize_t size = lfs_fs_size_(lfs); + if (size < 0) { + return size; + } + + // littlefs cannot reclaim expanded superblocks, so expand cautiously + // + // if our filesystem is more than ~88% full, don't expand, this is + // somewhat arbitrary + if (lfs->block_count - size > lfs->block_count/8) { + LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev); + int err = lfs_dir_split(lfs, dir, attrs, attrcount, + source, begin, end); + if (err && err != LFS_ERR_NOSPC) { + return err; + } + + if (err) { + // welp, we tried, if we ran out of space there's not much + // we can do, we'll error later if we've become frozen + LFS_WARN("Unable to expand superblock"); + } else { + // duplicate the superblock entry into the new superblock + end = 1; + } + } + } + + return lfs_dir_compact(lfs, dir, attrs, attrcount, source, begin, end); +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_relocatingcommit(lfs_t *lfs, lfs_mdir_t *dir, + const lfs_block_t pair[2], + const struct lfs_mattr *attrs, int attrcount, + lfs_mdir_t *pdir) { + int state = 0; + + // calculate changes to the directory + bool hasdelete = false; + for (int i = 0; i < attrcount; i++) { + if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE) { + dir->count += 1; + } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE) { + LFS_ASSERT(dir->count > 0); + dir->count -= 1; + hasdelete = true; + } else if (lfs_tag_type1(attrs[i].tag) == LFS_TYPE_TAIL) { + dir->tail[0] = ((lfs_block_t*)attrs[i].buffer)[0]; + dir->tail[1] = ((lfs_block_t*)attrs[i].buffer)[1]; + dir->split = (lfs_tag_chunk(attrs[i].tag) & 1); + lfs_pair_fromle32(dir->tail); + } + } + + // should we actually drop the directory block? + if (hasdelete && dir->count == 0) { + LFS_ASSERT(pdir); + int err = lfs_fs_pred(lfs, dir->pair, pdir); + if (err && err != LFS_ERR_NOENT) { + return err; + } + + if (err != LFS_ERR_NOENT && pdir->split) { + state = LFS_OK_DROPPED; + goto fixmlist; + } + } + + if (dir->erased && dir->count < 0xff) { + // try to commit + struct lfs_commit commit = { + .block = dir->pair[0], + .off = dir->off, + .ptag = dir->etag, + .crc = 0xffffffff, + + .begin = dir->off, + .end = (lfs->cfg->metadata_max ? + lfs->cfg->metadata_max : lfs->cfg->block_size) - 8, + }; + + // traverse attrs that need to be written out + lfs_pair_tole32(dir->tail); + int err = lfs_dir_traverse(lfs, + dir, dir->off, dir->etag, attrs, attrcount, + 0, 0, 0, 0, 0, + lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){ + lfs, &commit}); + lfs_pair_fromle32(dir->tail); + if (err) { + if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) { + goto compact; + } + return err; + } + + // commit any global diffs if we have any + lfs_gstate_t delta = {0}; + lfs_gstate_xor(&delta, &lfs->gstate); + lfs_gstate_xor(&delta, &lfs->gdisk); + lfs_gstate_xor(&delta, &lfs->gdelta); + delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff); + if (!lfs_gstate_iszero(&delta)) { + err = lfs_dir_getgstate(lfs, dir, &delta); + if (err) { + return err; + } + + lfs_gstate_tole32(&delta); + err = lfs_dir_commitattr(lfs, &commit, + LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff, + sizeof(delta)), &delta); + if (err) { + if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) { + goto compact; + } + return err; + } + } + + // finalize commit with the crc + err = lfs_dir_commitcrc(lfs, &commit); + if (err) { + if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) { + goto compact; + } + return err; + } + + // successful commit, update dir + LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0); + dir->off = commit.off; + dir->etag = commit.ptag; + // and update gstate + lfs->gdisk = lfs->gstate; + lfs->gdelta = (lfs_gstate_t){0}; + + goto fixmlist; + } + +compact: + // fall back to compaction + lfs_cache_drop(lfs, &lfs->pcache); + + state = lfs_dir_splittingcompact(lfs, dir, attrs, attrcount, + dir, 0, dir->count); + if (state < 0) { + return state; + } + + goto fixmlist; + +fixmlist:; + // this complicated bit of logic is for fixing up any active + // metadata-pairs that we may have affected + // + // note we have to make two passes since the mdir passed to + // lfs_dir_commit could also be in this list, and even then + // we need to copy the pair so they don't get clobbered if we refetch + // our mdir. + lfs_block_t oldpair[2] = {pair[0], pair[1]}; + for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) { + if (lfs_pair_cmp(d->m.pair, oldpair) == 0) { + d->m = *dir; + if (d->m.pair != pair) { + for (int i = 0; i < attrcount; i++) { + if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE && + d->id == lfs_tag_id(attrs[i].tag) && + d->type != LFS_TYPE_DIR) { + d->m.pair[0] = LFS_BLOCK_NULL; + d->m.pair[1] = LFS_BLOCK_NULL; + } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE && + d->id > lfs_tag_id(attrs[i].tag)) { + d->id -= 1; + if (d->type == LFS_TYPE_DIR) { + ((lfs_dir_t*)d)->pos -= 1; + } + } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE && + d->id >= lfs_tag_id(attrs[i].tag)) { + d->id += 1; + if (d->type == LFS_TYPE_DIR) { + ((lfs_dir_t*)d)->pos += 1; + } + } + } + } + + while (d->id >= d->m.count && d->m.split) { + // we split and id is on tail now + if (lfs_pair_cmp(d->m.tail, lfs->root) != 0) { + d->id -= d->m.count; + } + int err = lfs_dir_fetch(lfs, &d->m, d->m.tail); + if (err) { + return err; + } + } + } + } + + return state; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_orphaningcommit(lfs_t *lfs, lfs_mdir_t *dir, + const struct lfs_mattr *attrs, int attrcount) { + // check for any inline files that aren't RAM backed and + // forcefully evict them, needed for filesystem consistency + for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) { + if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 && + f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) && + f->ctz.size > lfs->cfg->cache_size) { + int err = lfs_file_outline(lfs, f); + if (err) { + return err; + } + + err = lfs_file_flush(lfs, f); + if (err) { + return err; + } + } + } + + lfs_block_t lpair[2] = {dir->pair[0], dir->pair[1]}; + lfs_mdir_t ldir = *dir; + lfs_mdir_t pdir; + int state = lfs_dir_relocatingcommit(lfs, &ldir, dir->pair, + attrs, attrcount, &pdir); + if (state < 0) { + return state; + } + + // update if we're not in mlist, note we may have already been + // updated if we are in mlist + if (lfs_pair_cmp(dir->pair, lpair) == 0) { + *dir = ldir; + } + + // commit was successful, but may require other changes in the + // filesystem, these would normally be tail recursive, but we have + // flattened them here avoid unbounded stack usage + + // need to drop? + if (state == LFS_OK_DROPPED) { + // steal state + int err = lfs_dir_getgstate(lfs, dir, &lfs->gdelta); + if (err) { + return err; + } + + // steal tail, note that this can't create a recursive drop + lpair[0] = pdir.pair[0]; + lpair[1] = pdir.pair[1]; + lfs_pair_tole32(dir->tail); + state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8), + dir->tail}), + NULL); + lfs_pair_fromle32(dir->tail); + if (state < 0) { + return state; + } + + ldir = pdir; + } + + // need to relocate? + bool orphans = false; + while (state == LFS_OK_RELOCATED) { + LFS_DEBUG("Relocating {0x%"PRIx32", 0x%"PRIx32"} " + "-> {0x%"PRIx32", 0x%"PRIx32"}", + lpair[0], lpair[1], ldir.pair[0], ldir.pair[1]); + state = 0; + + // update internal root + if (lfs_pair_cmp(lpair, lfs->root) == 0) { + lfs->root[0] = ldir.pair[0]; + lfs->root[1] = ldir.pair[1]; + } + + // update internally tracked dirs + for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) { + if (lfs_pair_cmp(lpair, d->m.pair) == 0) { + d->m.pair[0] = ldir.pair[0]; + d->m.pair[1] = ldir.pair[1]; + } + + if (d->type == LFS_TYPE_DIR && + lfs_pair_cmp(lpair, ((lfs_dir_t*)d)->head) == 0) { + ((lfs_dir_t*)d)->head[0] = ldir.pair[0]; + ((lfs_dir_t*)d)->head[1] = ldir.pair[1]; + } + } + + // find parent + lfs_stag_t tag = lfs_fs_parent(lfs, lpair, &pdir); + if (tag < 0 && tag != LFS_ERR_NOENT) { + return tag; + } + + bool hasparent = (tag != LFS_ERR_NOENT); + if (tag != LFS_ERR_NOENT) { + // note that if we have a parent, we must have a pred, so this will + // always create an orphan + int err = lfs_fs_preporphans(lfs, +1); + if (err) { + return err; + } + + // fix pending move in this pair? this looks like an optimization but + // is in fact _required_ since relocating may outdate the move. + uint16_t moveid = 0x3ff; + if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) { + moveid = lfs_tag_id(lfs->gstate.tag); + LFS_DEBUG("Fixing move while relocating " + "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n", + pdir.pair[0], pdir.pair[1], moveid); + lfs_fs_prepmove(lfs, 0x3ff, NULL); + if (moveid < lfs_tag_id(tag)) { + tag -= LFS_MKTAG(0, 1, 0); + } + } + + lfs_block_t ppair[2] = {pdir.pair[0], pdir.pair[1]}; + lfs_pair_tole32(ldir.pair); + state = lfs_dir_relocatingcommit(lfs, &pdir, ppair, LFS_MKATTRS( + {LFS_MKTAG_IF(moveid != 0x3ff, + LFS_TYPE_DELETE, moveid, 0), NULL}, + {tag, ldir.pair}), + NULL); + lfs_pair_fromle32(ldir.pair); + if (state < 0) { + return state; + } + + if (state == LFS_OK_RELOCATED) { + lpair[0] = ppair[0]; + lpair[1] = ppair[1]; + ldir = pdir; + orphans = true; + continue; + } + } + + // find pred + int err = lfs_fs_pred(lfs, lpair, &pdir); + if (err && err != LFS_ERR_NOENT) { + return err; + } + LFS_ASSERT(!(hasparent && err == LFS_ERR_NOENT)); + + // if we can't find dir, it must be new + if (err != LFS_ERR_NOENT) { + if (lfs_gstate_hasorphans(&lfs->gstate)) { + // next step, clean up orphans + err = lfs_fs_preporphans(lfs, -(int8_t)hasparent); + if (err) { + return err; + } + } + + // fix pending move in this pair? this looks like an optimization + // but is in fact _required_ since relocating may outdate the move. + uint16_t moveid = 0x3ff; + if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) { + moveid = lfs_tag_id(lfs->gstate.tag); + LFS_DEBUG("Fixing move while relocating " + "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n", + pdir.pair[0], pdir.pair[1], moveid); + lfs_fs_prepmove(lfs, 0x3ff, NULL); + } + + // replace bad pair, either we clean up desync, or no desync occured + lpair[0] = pdir.pair[0]; + lpair[1] = pdir.pair[1]; + lfs_pair_tole32(ldir.pair); + state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS( + {LFS_MKTAG_IF(moveid != 0x3ff, + LFS_TYPE_DELETE, moveid, 0), NULL}, + {LFS_MKTAG(LFS_TYPE_TAIL + pdir.split, 0x3ff, 8), + ldir.pair}), + NULL); + lfs_pair_fromle32(ldir.pair); + if (state < 0) { + return state; + } + + ldir = pdir; + } + } + + return orphans ? LFS_OK_ORPHANED : 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir, + const struct lfs_mattr *attrs, int attrcount) { + int orphans = lfs_dir_orphaningcommit(lfs, dir, attrs, attrcount); + if (orphans < 0) { + return orphans; + } + + if (orphans) { + // make sure we've removed all orphans, this is a noop if there + // are none, but if we had nested blocks failures we may have + // created some + int err = lfs_fs_deorphan(lfs, false); + if (err) { + return err; + } + } + + return 0; +} +#endif + + +/// Top level directory operations /// +#ifndef LFS_READONLY +static int lfs_mkdir_(lfs_t *lfs, const char *path) { + // deorphan if we haven't yet, needed at most once after poweron + int err = lfs_fs_forceconsistency(lfs); + if (err) { + return err; + } + + struct lfs_mlist cwd; + cwd.next = lfs->mlist; + uint16_t id; + err = lfs_dir_find(lfs, &cwd.m, &path, &id); + if (!(err == LFS_ERR_NOENT && lfs_path_islast(path))) { + return (err < 0) ? err : LFS_ERR_EXIST; + } + + // check that name fits + lfs_size_t nlen = lfs_path_namelen(path); + if (nlen > lfs->name_max) { + return LFS_ERR_NAMETOOLONG; + } + + // build up new directory + lfs_alloc_ckpoint(lfs); + lfs_mdir_t dir; + err = lfs_dir_alloc(lfs, &dir); + if (err) { + return err; + } + + // find end of list + lfs_mdir_t pred = cwd.m; + while (pred.split) { + err = lfs_dir_fetch(lfs, &pred, pred.tail); + if (err) { + return err; + } + } + + // setup dir + lfs_pair_tole32(pred.tail); + err = lfs_dir_commit(lfs, &dir, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), pred.tail})); + lfs_pair_fromle32(pred.tail); + if (err) { + return err; + } + + // current block not end of list? + if (cwd.m.split) { + // update tails, this creates a desync + err = lfs_fs_preporphans(lfs, +1); + if (err) { + return err; + } + + // it's possible our predecessor has to be relocated, and if + // our parent is our predecessor's predecessor, this could have + // caused our parent to go out of date, fortunately we can hook + // ourselves into littlefs to catch this + cwd.type = 0; + cwd.id = 0; + lfs->mlist = &cwd; + + lfs_pair_tole32(dir.pair); + err = lfs_dir_commit(lfs, &pred, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair})); + lfs_pair_fromle32(dir.pair); + if (err) { + lfs->mlist = cwd.next; + return err; + } + + lfs->mlist = cwd.next; + err = lfs_fs_preporphans(lfs, -1); + if (err) { + return err; + } + } + + // now insert into our parent block + lfs_pair_tole32(dir.pair); + err = lfs_dir_commit(lfs, &cwd.m, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL}, + {LFS_MKTAG(LFS_TYPE_DIR, id, nlen), path}, + {LFS_MKTAG(LFS_TYPE_DIRSTRUCT, id, 8), dir.pair}, + {LFS_MKTAG_IF(!cwd.m.split, + LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair})); + lfs_pair_fromle32(dir.pair); + if (err) { + return err; + } + + return 0; +} +#endif + +static int lfs_dir_open_(lfs_t *lfs, lfs_dir_t *dir, const char *path) { + lfs_stag_t tag = lfs_dir_find(lfs, &dir->m, &path, NULL); + if (tag < 0) { + return tag; + } + + if (lfs_tag_type3(tag) != LFS_TYPE_DIR) { + return LFS_ERR_NOTDIR; + } + + lfs_block_t pair[2]; + if (lfs_tag_id(tag) == 0x3ff) { + // handle root dir separately + pair[0] = lfs->root[0]; + pair[1] = lfs->root[1]; + } else { + // get dir pair from parent + lfs_stag_t res = lfs_dir_get(lfs, &dir->m, LFS_MKTAG(0x700, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair); + if (res < 0) { + return res; + } + lfs_pair_fromle32(pair); + } + + // fetch first pair + int err = lfs_dir_fetch(lfs, &dir->m, pair); + if (err) { + return err; + } + + // setup entry + dir->head[0] = dir->m.pair[0]; + dir->head[1] = dir->m.pair[1]; + dir->id = 0; + dir->pos = 0; + + // add to list of mdirs + dir->type = LFS_TYPE_DIR; + lfs_mlist_append(lfs, (struct lfs_mlist *)dir); + + return 0; +} + +static int lfs_dir_close_(lfs_t *lfs, lfs_dir_t *dir) { + // remove from list of mdirs + lfs_mlist_remove(lfs, (struct lfs_mlist *)dir); + + return 0; +} + +static int lfs_dir_read_(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) { + memset(info, 0, sizeof(*info)); + + // special offset for '.' and '..' + if (dir->pos == 0) { + info->type = LFS_TYPE_DIR; + strcpy(info->name, "."); + dir->pos += 1; + return true; + } else if (dir->pos == 1) { + info->type = LFS_TYPE_DIR; + strcpy(info->name, ".."); + dir->pos += 1; + return true; + } + + while (true) { + if (dir->id == dir->m.count) { + if (!dir->m.split) { + return false; + } + + int err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail); + if (err) { + return err; + } + + dir->id = 0; + } + + int err = lfs_dir_getinfo(lfs, &dir->m, dir->id, info); + if (err && err != LFS_ERR_NOENT) { + return err; + } + + dir->id += 1; + if (err != LFS_ERR_NOENT) { + break; + } + } + + dir->pos += 1; + return true; +} + +static int lfs_dir_seek_(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) { + // simply walk from head dir + int err = lfs_dir_rewind_(lfs, dir); + if (err) { + return err; + } + + // first two for ./.. + dir->pos = lfs_min(2, off); + off -= dir->pos; + + // skip superblock entry + dir->id = (off > 0 && lfs_pair_cmp(dir->head, lfs->root) == 0); + + while (off > 0) { + if (dir->id == dir->m.count) { + if (!dir->m.split) { + return LFS_ERR_INVAL; + } + + err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail); + if (err) { + return err; + } + + dir->id = 0; + } + + int diff = lfs_min(dir->m.count - dir->id, off); + dir->id += diff; + dir->pos += diff; + off -= diff; + } + + return 0; +} + +static lfs_soff_t lfs_dir_tell_(lfs_t *lfs, lfs_dir_t *dir) { + (void)lfs; + return dir->pos; +} + +static int lfs_dir_rewind_(lfs_t *lfs, lfs_dir_t *dir) { + // reload the head dir + int err = lfs_dir_fetch(lfs, &dir->m, dir->head); + if (err) { + return err; + } + + dir->id = 0; + dir->pos = 0; + return 0; +} + + +/// File index list operations /// +static int lfs_ctz_index(lfs_t *lfs, lfs_off_t *off) { + lfs_off_t size = *off; + lfs_off_t b = lfs->cfg->block_size - 2*4; + lfs_off_t i = size / b; + if (i == 0) { + return 0; + } + + i = (size - 4*(lfs_popc(i-1)+2)) / b; + *off = size - b*i - 4*lfs_popc(i); + return i; +} + +static int lfs_ctz_find(lfs_t *lfs, + const lfs_cache_t *pcache, lfs_cache_t *rcache, + lfs_block_t head, lfs_size_t size, + lfs_size_t pos, lfs_block_t *block, lfs_off_t *off) { + if (size == 0) { + *block = LFS_BLOCK_NULL; + *off = 0; + return 0; + } + + lfs_off_t current = lfs_ctz_index(lfs, &(lfs_off_t){size-1}); + lfs_off_t target = lfs_ctz_index(lfs, &pos); + + while (current > target) { + lfs_size_t skip = lfs_min( + lfs_npw2(current-target+1) - 1, + lfs_ctz(current)); + + int err = lfs_bd_read(lfs, + pcache, rcache, sizeof(head), + head, 4*skip, &head, sizeof(head)); + head = lfs_fromle32(head); + if (err) { + return err; + } + + current -= 1 << skip; + } + + *block = head; + *off = pos; + return 0; +} + +#ifndef LFS_READONLY +static int lfs_ctz_extend(lfs_t *lfs, + lfs_cache_t *pcache, lfs_cache_t *rcache, + lfs_block_t head, lfs_size_t size, + lfs_block_t *block, lfs_off_t *off) { + while (true) { + // go ahead and grab a block + lfs_block_t nblock; + int err = lfs_alloc(lfs, &nblock); + if (err) { + return err; + } + + { + err = lfs_bd_erase(lfs, nblock); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + + if (size == 0) { + *block = nblock; + *off = 0; + return 0; + } + + lfs_size_t noff = size - 1; + lfs_off_t index = lfs_ctz_index(lfs, &noff); + noff = noff + 1; + + // just copy out the last block if it is incomplete + if (noff != lfs->cfg->block_size) { + for (lfs_off_t i = 0; i < noff; i++) { + uint8_t data; + err = lfs_bd_read(lfs, + NULL, rcache, noff-i, + head, i, &data, 1); + if (err) { + return err; + } + + err = lfs_bd_prog(lfs, + pcache, rcache, true, + nblock, i, &data, 1); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + } + + *block = nblock; + *off = noff; + return 0; + } + + // append block + index += 1; + lfs_size_t skips = lfs_ctz(index) + 1; + lfs_block_t nhead = head; + for (lfs_off_t i = 0; i < skips; i++) { + nhead = lfs_tole32(nhead); + err = lfs_bd_prog(lfs, pcache, rcache, true, + nblock, 4*i, &nhead, 4); + nhead = lfs_fromle32(nhead); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + + if (i != skips-1) { + err = lfs_bd_read(lfs, + NULL, rcache, sizeof(nhead), + nhead, 4*i, &nhead, sizeof(nhead)); + nhead = lfs_fromle32(nhead); + if (err) { + return err; + } + } + } + + *block = nblock; + *off = 4*skips; + return 0; + } + +relocate: + LFS_DEBUG("Bad block at 0x%"PRIx32, nblock); + + // just clear cache and try a new block + lfs_cache_drop(lfs, pcache); + } +} +#endif + +static int lfs_ctz_traverse(lfs_t *lfs, + const lfs_cache_t *pcache, lfs_cache_t *rcache, + lfs_block_t head, lfs_size_t size, + int (*cb)(void*, lfs_block_t), void *data) { + if (size == 0) { + return 0; + } + + lfs_off_t index = lfs_ctz_index(lfs, &(lfs_off_t){size-1}); + + while (true) { + int err = cb(data, head); + if (err) { + return err; + } + + if (index == 0) { + return 0; + } + + lfs_block_t heads[2]; + int count = 2 - (index & 1); + err = lfs_bd_read(lfs, + pcache, rcache, count*sizeof(head), + head, 0, &heads, count*sizeof(head)); + heads[0] = lfs_fromle32(heads[0]); + heads[1] = lfs_fromle32(heads[1]); + if (err) { + return err; + } + + for (int i = 0; i < count-1; i++) { + err = cb(data, heads[i]); + if (err) { + return err; + } + } + + head = heads[count-1]; + index -= count; + } +} + + +/// Top level file operations /// +static int lfs_file_opencfg_(lfs_t *lfs, lfs_file_t *file, + const char *path, int flags, + const struct lfs_file_config *cfg) { +#ifndef LFS_READONLY + // deorphan if we haven't yet, needed at most once after poweron + if ((flags & LFS_O_WRONLY) == LFS_O_WRONLY) { + int err = lfs_fs_forceconsistency(lfs); + if (err) { + return err; + } + } +#else + LFS_ASSERT((flags & LFS_O_RDONLY) == LFS_O_RDONLY); +#endif + + // setup simple file details + int err; + file->cfg = cfg; + file->flags = flags; + file->pos = 0; + file->off = 0; + file->cache.buffer = NULL; + + // allocate entry for file if it doesn't exist + lfs_stag_t tag = lfs_dir_find(lfs, &file->m, &path, &file->id); + if (tag < 0 && !(tag == LFS_ERR_NOENT && lfs_path_islast(path))) { + err = tag; + goto cleanup; + } + + // get id, add to list of mdirs to catch update changes + file->type = LFS_TYPE_REG; + lfs_mlist_append(lfs, (struct lfs_mlist *)file); + +#ifdef LFS_READONLY + if (tag == LFS_ERR_NOENT) { + err = LFS_ERR_NOENT; + goto cleanup; +#else + if (tag == LFS_ERR_NOENT) { + if (!(flags & LFS_O_CREAT)) { + err = LFS_ERR_NOENT; + goto cleanup; + } + + // don't allow trailing slashes + if (lfs_path_isdir(path)) { + err = LFS_ERR_NOTDIR; + goto cleanup; + } + + // check that name fits + lfs_size_t nlen = lfs_path_namelen(path); + if (nlen > lfs->name_max) { + err = LFS_ERR_NAMETOOLONG; + goto cleanup; + } + + // get next slot and create entry to remember name + err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_CREATE, file->id, 0), NULL}, + {LFS_MKTAG(LFS_TYPE_REG, file->id, nlen), path}, + {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0), NULL})); + + // it may happen that the file name doesn't fit in the metadata blocks, e.g., a 256 byte file name will + // not fit in a 128 byte block. + err = (err == LFS_ERR_NOSPC) ? LFS_ERR_NAMETOOLONG : err; + if (err) { + goto cleanup; + } + + tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, 0); + } else if (flags & LFS_O_EXCL) { + err = LFS_ERR_EXIST; + goto cleanup; +#endif + } else if (lfs_tag_type3(tag) != LFS_TYPE_REG) { + err = LFS_ERR_ISDIR; + goto cleanup; +#ifndef LFS_READONLY + } else if (flags & LFS_O_TRUNC) { + // truncate if requested + tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0); + file->flags |= LFS_F_DIRTY; +#endif + } else { + // try to load what's on disk, if it's inlined we'll fix it later + tag = lfs_dir_get(lfs, &file->m, LFS_MKTAG(0x700, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_STRUCT, file->id, 8), &file->ctz); + if (tag < 0) { + err = tag; + goto cleanup; + } + lfs_ctz_fromle32(&file->ctz); + } + + // fetch attrs + for (unsigned i = 0; i < file->cfg->attr_count; i++) { + // if opened for read / read-write operations + if ((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY) { + lfs_stag_t res = lfs_dir_get(lfs, &file->m, + LFS_MKTAG(0x7ff, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_USERATTR + file->cfg->attrs[i].type, + file->id, file->cfg->attrs[i].size), + file->cfg->attrs[i].buffer); + if (res < 0 && res != LFS_ERR_NOENT) { + err = res; + goto cleanup; + } + } + +#ifndef LFS_READONLY + // if opened for write / read-write operations + if ((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY) { + if (file->cfg->attrs[i].size > lfs->attr_max) { + err = LFS_ERR_NOSPC; + goto cleanup; + } + + file->flags |= LFS_F_DIRTY; + } +#endif + } + + // allocate buffer if needed + if (file->cfg->buffer) { + file->cache.buffer = file->cfg->buffer; + } else { + file->cache.buffer = lfs_malloc(lfs->cfg->cache_size); + if (!file->cache.buffer) { + err = LFS_ERR_NOMEM; + goto cleanup; + } + } + + // zero to avoid information leak + lfs_cache_zero(lfs, &file->cache); + + if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) { + // load inline files + file->ctz.head = LFS_BLOCK_INLINE; + file->ctz.size = lfs_tag_size(tag); + file->flags |= LFS_F_INLINE; + file->cache.block = file->ctz.head; + file->cache.off = 0; + file->cache.size = lfs->cfg->cache_size; + + // don't always read (may be new/trunc file) + if (file->ctz.size > 0) { + lfs_stag_t res = lfs_dir_get(lfs, &file->m, + LFS_MKTAG(0x700, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_STRUCT, file->id, + lfs_min(file->cache.size, 0x3fe)), + file->cache.buffer); + if (res < 0) { + err = res; + goto cleanup; + } + } + } + + return 0; + +cleanup: + // clean up lingering resources +#ifndef LFS_READONLY + file->flags |= LFS_F_ERRED; +#endif + lfs_file_close_(lfs, file); + return err; +} + +#ifndef LFS_NO_MALLOC +static int lfs_file_open_(lfs_t *lfs, lfs_file_t *file, + const char *path, int flags) { + static const struct lfs_file_config defaults = {0}; + int err = lfs_file_opencfg_(lfs, file, path, flags, &defaults); + return err; +} +#endif + +static int lfs_file_close_(lfs_t *lfs, lfs_file_t *file) { +#ifndef LFS_READONLY + int err = lfs_file_sync_(lfs, file); +#else + int err = 0; +#endif + + // remove from list of mdirs + lfs_mlist_remove(lfs, (struct lfs_mlist*)file); + + // clean up memory + if (!file->cfg->buffer) { + lfs_free(file->cache.buffer); + } + + return err; +} + + +#ifndef LFS_READONLY +static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) { + while (true) { + // just relocate what exists into new block + lfs_block_t nblock; + int err = lfs_alloc(lfs, &nblock); + if (err) { + return err; + } + + err = lfs_bd_erase(lfs, nblock); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + + // either read from dirty cache or disk + for (lfs_off_t i = 0; i < file->off; i++) { + uint8_t data; + if (file->flags & LFS_F_INLINE) { + err = lfs_dir_getread(lfs, &file->m, + // note we evict inline files before they can be dirty + NULL, &file->cache, file->off-i, + LFS_MKTAG(0xfff, 0x1ff, 0), + LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0), + i, &data, 1); + if (err) { + return err; + } + } else { + err = lfs_bd_read(lfs, + &file->cache, &lfs->rcache, file->off-i, + file->block, i, &data, 1); + if (err) { + return err; + } + } + + err = lfs_bd_prog(lfs, + &lfs->pcache, &lfs->rcache, true, + nblock, i, &data, 1); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + } + + // copy over new state of file + memcpy(file->cache.buffer, lfs->pcache.buffer, lfs->cfg->cache_size); + file->cache.block = lfs->pcache.block; + file->cache.off = lfs->pcache.off; + file->cache.size = lfs->pcache.size; + lfs_cache_zero(lfs, &lfs->pcache); + + file->block = nblock; + file->flags |= LFS_F_WRITING; + return 0; + +relocate: + LFS_DEBUG("Bad block at 0x%"PRIx32, nblock); + + // just clear cache and try a new block + lfs_cache_drop(lfs, &lfs->pcache); + } +} +#endif + +#ifndef LFS_READONLY +static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) { + file->off = file->pos; + lfs_alloc_ckpoint(lfs); + int err = lfs_file_relocate(lfs, file); + if (err) { + return err; + } + + file->flags &= ~LFS_F_INLINE; + return 0; +} +#endif + +static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) { + if (file->flags & LFS_F_READING) { + if (!(file->flags & LFS_F_INLINE)) { + lfs_cache_drop(lfs, &file->cache); + } + file->flags &= ~LFS_F_READING; + } + +#ifndef LFS_READONLY + if (file->flags & LFS_F_WRITING) { + lfs_off_t pos = file->pos; + + if (!(file->flags & LFS_F_INLINE)) { + // copy over anything after current branch + lfs_file_t orig = { + .ctz.head = file->ctz.head, + .ctz.size = file->ctz.size, + .flags = LFS_O_RDONLY, + .pos = file->pos, + .cache = lfs->rcache, + }; + lfs_cache_drop(lfs, &lfs->rcache); + + while (file->pos < file->ctz.size) { + // copy over a byte at a time, leave it up to caching + // to make this efficient + uint8_t data; + lfs_ssize_t res = lfs_file_flushedread(lfs, &orig, &data, 1); + if (res < 0) { + return res; + } + + res = lfs_file_flushedwrite(lfs, file, &data, 1); + if (res < 0) { + return res; + } + + // keep our reference to the rcache in sync + if (lfs->rcache.block != LFS_BLOCK_NULL) { + lfs_cache_drop(lfs, &orig.cache); + lfs_cache_drop(lfs, &lfs->rcache); + } + } + + // write out what we have + while (true) { + int err = lfs_bd_flush(lfs, &file->cache, &lfs->rcache, true); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + return err; + } + + break; + +relocate: + LFS_DEBUG("Bad block at 0x%"PRIx32, file->block); + err = lfs_file_relocate(lfs, file); + if (err) { + return err; + } + } + } else { + file->pos = lfs_max(file->pos, file->ctz.size); + } + + // actual file updates + file->ctz.head = file->block; + file->ctz.size = file->pos; + file->flags &= ~LFS_F_WRITING; + file->flags |= LFS_F_DIRTY; + + file->pos = pos; + } +#endif + + return 0; +} + +#ifndef LFS_READONLY +static int lfs_file_sync_(lfs_t *lfs, lfs_file_t *file) { + if (file->flags & LFS_F_ERRED) { + // it's not safe to do anything if our file errored + return 0; + } + + int err = lfs_file_flush(lfs, file); + if (err) { + file->flags |= LFS_F_ERRED; + return err; + } + + + if ((file->flags & LFS_F_DIRTY) && + !lfs_pair_isnull(file->m.pair)) { + // before we commit metadata, we need sync the disk to make sure + // data writes don't complete after metadata writes + if (!(file->flags & LFS_F_INLINE)) { + err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false); + if (err) { + return err; + } + } + + // update dir entry + uint16_t type; + const void *buffer; + lfs_size_t size; + struct lfs_ctz ctz; + if (file->flags & LFS_F_INLINE) { + // inline the whole file + type = LFS_TYPE_INLINESTRUCT; + buffer = file->cache.buffer; + size = file->ctz.size; + } else { + // update the ctz reference + type = LFS_TYPE_CTZSTRUCT; + // copy ctz so alloc will work during a relocate + ctz = file->ctz; + lfs_ctz_tole32(&ctz); + buffer = &ctz; + size = sizeof(ctz); + } + + // commit file data and attributes + err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS( + {LFS_MKTAG(type, file->id, size), buffer}, + {LFS_MKTAG(LFS_FROM_USERATTRS, file->id, + file->cfg->attr_count), file->cfg->attrs})); + if (err) { + file->flags |= LFS_F_ERRED; + return err; + } + + file->flags &= ~LFS_F_DIRTY; + } + + return 0; +} +#endif + +static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file, + void *buffer, lfs_size_t size) { + uint8_t *data = buffer; + lfs_size_t nsize = size; + + if (file->pos >= file->ctz.size) { + // eof if past end + return 0; + } + + size = lfs_min(size, file->ctz.size - file->pos); + nsize = size; + + while (nsize > 0) { + // check if we need a new block + if (!(file->flags & LFS_F_READING) || + file->off == lfs->cfg->block_size) { + if (!(file->flags & LFS_F_INLINE)) { + int err = lfs_ctz_find(lfs, NULL, &file->cache, + file->ctz.head, file->ctz.size, + file->pos, &file->block, &file->off); + if (err) { + return err; + } + } else { + file->block = LFS_BLOCK_INLINE; + file->off = file->pos; + } + + file->flags |= LFS_F_READING; + } + + // read as much as we can in current block + lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off); + if (file->flags & LFS_F_INLINE) { + int err = lfs_dir_getread(lfs, &file->m, + NULL, &file->cache, lfs->cfg->block_size, + LFS_MKTAG(0xfff, 0x1ff, 0), + LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0), + file->off, data, diff); + if (err) { + return err; + } + } else { + int err = lfs_bd_read(lfs, + NULL, &file->cache, lfs->cfg->block_size, + file->block, file->off, data, diff); + if (err) { + return err; + } + } + + file->pos += diff; + file->off += diff; + data += diff; + nsize -= diff; + } + + return size; +} + +static lfs_ssize_t lfs_file_read_(lfs_t *lfs, lfs_file_t *file, + void *buffer, lfs_size_t size) { + LFS_ASSERT((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY); + +#ifndef LFS_READONLY + if (file->flags & LFS_F_WRITING) { + // flush out any writes + int err = lfs_file_flush(lfs, file); + if (err) { + return err; + } + } +#endif + + return lfs_file_flushedread(lfs, file, buffer, size); +} + + +#ifndef LFS_READONLY +static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file, + const void *buffer, lfs_size_t size) { + const uint8_t *data = buffer; + lfs_size_t nsize = size; + + if ((file->flags & LFS_F_INLINE) && + lfs_max(file->pos+nsize, file->ctz.size) > lfs->inline_max) { + // inline file doesn't fit anymore + int err = lfs_file_outline(lfs, file); + if (err) { + file->flags |= LFS_F_ERRED; + return err; + } + } + + while (nsize > 0) { + // check if we need a new block + if (!(file->flags & LFS_F_WRITING) || + file->off == lfs->cfg->block_size) { + if (!(file->flags & LFS_F_INLINE)) { + if (!(file->flags & LFS_F_WRITING) && file->pos > 0) { + // find out which block we're extending from + int err = lfs_ctz_find(lfs, NULL, &file->cache, + file->ctz.head, file->ctz.size, + file->pos-1, &file->block, &(lfs_off_t){0}); + if (err) { + file->flags |= LFS_F_ERRED; + return err; + } + + // mark cache as dirty since we may have read data into it + lfs_cache_zero(lfs, &file->cache); + } + + // extend file with new blocks + lfs_alloc_ckpoint(lfs); + int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache, + file->block, file->pos, + &file->block, &file->off); + if (err) { + file->flags |= LFS_F_ERRED; + return err; + } + } else { + file->block = LFS_BLOCK_INLINE; + file->off = file->pos; + } + + file->flags |= LFS_F_WRITING; + } + + // program as much as we can in current block + lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off); + while (true) { + int err = lfs_bd_prog(lfs, &file->cache, &lfs->rcache, true, + file->block, file->off, data, diff); + if (err) { + if (err == LFS_ERR_CORRUPT) { + goto relocate; + } + file->flags |= LFS_F_ERRED; + return err; + } + + break; +relocate: + err = lfs_file_relocate(lfs, file); + if (err) { + file->flags |= LFS_F_ERRED; + return err; + } + } + + file->pos += diff; + file->off += diff; + data += diff; + nsize -= diff; + + lfs_alloc_ckpoint(lfs); + } + + return size; +} + +static lfs_ssize_t lfs_file_write_(lfs_t *lfs, lfs_file_t *file, + const void *buffer, lfs_size_t size) { + LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY); + + if (file->flags & LFS_F_READING) { + // drop any reads + int err = lfs_file_flush(lfs, file); + if (err) { + return err; + } + } + + if ((file->flags & LFS_O_APPEND) && file->pos < file->ctz.size) { + file->pos = file->ctz.size; + } + + if (file->pos + size > lfs->file_max) { + // Larger than file limit? + return LFS_ERR_FBIG; + } + + if (!(file->flags & LFS_F_WRITING) && file->pos > file->ctz.size) { + // fill with zeros + lfs_off_t pos = file->pos; + file->pos = file->ctz.size; + + while (file->pos < pos) { + lfs_ssize_t res = lfs_file_flushedwrite(lfs, file, &(uint8_t){0}, 1); + if (res < 0) { + return res; + } + } + } + + lfs_ssize_t nsize = lfs_file_flushedwrite(lfs, file, buffer, size); + if (nsize < 0) { + return nsize; + } + + file->flags &= ~LFS_F_ERRED; + return nsize; +} +#endif + +static lfs_soff_t lfs_file_seek_(lfs_t *lfs, lfs_file_t *file, + lfs_soff_t off, int whence) { + // find new pos + // + // fortunately for us, littlefs is limited to 31-bit file sizes, so we + // don't have to worry too much about integer overflow + lfs_off_t npos = file->pos; + if (whence == LFS_SEEK_SET) { + npos = off; + } else if (whence == LFS_SEEK_CUR) { + npos = file->pos + (lfs_off_t)off; + } else if (whence == LFS_SEEK_END) { + npos = (lfs_off_t)lfs_file_size_(lfs, file) + (lfs_off_t)off; + } + + if (npos > lfs->file_max) { + // file position out of range + return LFS_ERR_INVAL; + } + + if (file->pos == npos) { + // noop - position has not changed + return npos; + } + + // if we're only reading and our new offset is still in the file's cache + // we can avoid flushing and needing to reread the data + if ((file->flags & LFS_F_READING) + && file->off != lfs->cfg->block_size) { + int oindex = lfs_ctz_index(lfs, &(lfs_off_t){file->pos}); + lfs_off_t noff = npos; + int nindex = lfs_ctz_index(lfs, &noff); + if (oindex == nindex + && noff >= file->cache.off + && noff < file->cache.off + file->cache.size) { + file->pos = npos; + file->off = noff; + return npos; + } + } + + // write out everything beforehand, may be noop if rdonly + int err = lfs_file_flush(lfs, file); + if (err) { + return err; + } + + // update pos + file->pos = npos; + return npos; +} + +#ifndef LFS_READONLY +static int lfs_file_truncate_(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) { + LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY); + + if (size > LFS_FILE_MAX) { + return LFS_ERR_INVAL; + } + + lfs_off_t pos = file->pos; + lfs_off_t oldsize = lfs_file_size_(lfs, file); + if (size < oldsize) { + // revert to inline file? + if (size <= lfs->inline_max) { + // flush+seek to head + lfs_soff_t res = lfs_file_seek_(lfs, file, 0, LFS_SEEK_SET); + if (res < 0) { + return (int)res; + } + + // read our data into rcache temporarily + lfs_cache_drop(lfs, &lfs->rcache); + res = lfs_file_flushedread(lfs, file, + lfs->rcache.buffer, size); + if (res < 0) { + return (int)res; + } + + file->ctz.head = LFS_BLOCK_INLINE; + file->ctz.size = size; + file->flags |= LFS_F_DIRTY | LFS_F_READING | LFS_F_INLINE; + file->cache.block = file->ctz.head; + file->cache.off = 0; + file->cache.size = lfs->cfg->cache_size; + memcpy(file->cache.buffer, lfs->rcache.buffer, size); + + } else { + // need to flush since directly changing metadata + int err = lfs_file_flush(lfs, file); + if (err) { + return err; + } + + // lookup new head in ctz skip list + err = lfs_ctz_find(lfs, NULL, &file->cache, + file->ctz.head, file->ctz.size, + size-1, &file->block, &(lfs_off_t){0}); + if (err) { + return err; + } + + // need to set pos/block/off consistently so seeking back to + // the old position does not get confused + file->pos = size; + file->ctz.head = file->block; + file->ctz.size = size; + file->flags |= LFS_F_DIRTY | LFS_F_READING; + } + } else if (size > oldsize) { + // flush+seek if not already at end + lfs_soff_t res = lfs_file_seek_(lfs, file, 0, LFS_SEEK_END); + if (res < 0) { + return (int)res; + } + + // fill with zeros + while (file->pos < size) { + res = lfs_file_write_(lfs, file, &(uint8_t){0}, 1); + if (res < 0) { + return (int)res; + } + } + } + + // restore pos + lfs_soff_t res = lfs_file_seek_(lfs, file, pos, LFS_SEEK_SET); + if (res < 0) { + return (int)res; + } + + return 0; +} +#endif + +static lfs_soff_t lfs_file_tell_(lfs_t *lfs, lfs_file_t *file) { + (void)lfs; + return file->pos; +} + +static int lfs_file_rewind_(lfs_t *lfs, lfs_file_t *file) { + lfs_soff_t res = lfs_file_seek_(lfs, file, 0, LFS_SEEK_SET); + if (res < 0) { + return (int)res; + } + + return 0; +} + +static lfs_soff_t lfs_file_size_(lfs_t *lfs, lfs_file_t *file) { + (void)lfs; + +#ifndef LFS_READONLY + if (file->flags & LFS_F_WRITING) { + return lfs_max(file->pos, file->ctz.size); + } +#endif + + return file->ctz.size; +} + + +/// General fs operations /// +static int lfs_stat_(lfs_t *lfs, const char *path, struct lfs_info *info) { + lfs_mdir_t cwd; + lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL); + if (tag < 0) { + return (int)tag; + } + + // only allow trailing slashes on dirs + if (strchr(path, '/') != NULL + && lfs_tag_type3(tag) != LFS_TYPE_DIR) { + return LFS_ERR_NOTDIR; + } + + return lfs_dir_getinfo(lfs, &cwd, lfs_tag_id(tag), info); +} + +#ifndef LFS_READONLY +static int lfs_remove_(lfs_t *lfs, const char *path) { + // deorphan if we haven't yet, needed at most once after poweron + int err = lfs_fs_forceconsistency(lfs); + if (err) { + return err; + } + + lfs_mdir_t cwd; + lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL); + if (tag < 0 || lfs_tag_id(tag) == 0x3ff) { + return (tag < 0) ? (int)tag : LFS_ERR_INVAL; + } + + struct lfs_mlist dir; + dir.next = lfs->mlist; + if (lfs_tag_type3(tag) == LFS_TYPE_DIR) { + // must be empty before removal + lfs_block_t pair[2]; + lfs_stag_t res = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x700, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair); + if (res < 0) { + return (int)res; + } + lfs_pair_fromle32(pair); + + err = lfs_dir_fetch(lfs, &dir.m, pair); + if (err) { + return err; + } + + if (dir.m.count > 0 || dir.m.split) { + return LFS_ERR_NOTEMPTY; + } + + // mark fs as orphaned + err = lfs_fs_preporphans(lfs, +1); + if (err) { + return err; + } + + // I know it's crazy but yes, dir can be changed by our parent's + // commit (if predecessor is child) + dir.type = 0; + dir.id = 0; + lfs->mlist = &dir; + } + + // delete the entry + err = lfs_dir_commit(lfs, &cwd, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(tag), 0), NULL})); + if (err) { + lfs->mlist = dir.next; + return err; + } + + lfs->mlist = dir.next; + if (lfs_gstate_hasorphans(&lfs->gstate)) { + LFS_ASSERT(lfs_tag_type3(tag) == LFS_TYPE_DIR); + + // fix orphan + err = lfs_fs_preporphans(lfs, -1); + if (err) { + return err; + } + + err = lfs_fs_pred(lfs, dir.m.pair, &cwd); + if (err) { + return err; + } + + err = lfs_dir_drop(lfs, &cwd, &dir.m); + if (err) { + return err; + } + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_rename_(lfs_t *lfs, const char *oldpath, const char *newpath) { + // deorphan if we haven't yet, needed at most once after poweron + int err = lfs_fs_forceconsistency(lfs); + if (err) { + return err; + } + + // find old entry + lfs_mdir_t oldcwd; + lfs_stag_t oldtag = lfs_dir_find(lfs, &oldcwd, &oldpath, NULL); + if (oldtag < 0 || lfs_tag_id(oldtag) == 0x3ff) { + return (oldtag < 0) ? (int)oldtag : LFS_ERR_INVAL; + } + + // find new entry + lfs_mdir_t newcwd; + uint16_t newid; + lfs_stag_t prevtag = lfs_dir_find(lfs, &newcwd, &newpath, &newid); + if ((prevtag < 0 || lfs_tag_id(prevtag) == 0x3ff) && + !(prevtag == LFS_ERR_NOENT && lfs_path_islast(newpath))) { + return (prevtag < 0) ? (int)prevtag : LFS_ERR_INVAL; + } + + // if we're in the same pair there's a few special cases... + bool samepair = (lfs_pair_cmp(oldcwd.pair, newcwd.pair) == 0); + uint16_t newoldid = lfs_tag_id(oldtag); + + struct lfs_mlist prevdir; + prevdir.next = lfs->mlist; + if (prevtag == LFS_ERR_NOENT) { + // if we're a file, don't allow trailing slashes + if (lfs_path_isdir(newpath) + && lfs_tag_type3(oldtag) != LFS_TYPE_DIR) { + return LFS_ERR_NOTDIR; + } + + // check that name fits + lfs_size_t nlen = lfs_path_namelen(newpath); + if (nlen > lfs->name_max) { + return LFS_ERR_NAMETOOLONG; + } + + // there is a small chance we are being renamed in the same + // directory/ to an id less than our old id, the global update + // to handle this is a bit messy + if (samepair && newid <= newoldid) { + newoldid += 1; + } + } else if (lfs_tag_type3(prevtag) != lfs_tag_type3(oldtag)) { + return (lfs_tag_type3(prevtag) == LFS_TYPE_DIR) + ? LFS_ERR_ISDIR + : LFS_ERR_NOTDIR; + } else if (samepair && newid == newoldid) { + // we're renaming to ourselves?? + return 0; + } else if (lfs_tag_type3(prevtag) == LFS_TYPE_DIR) { + // must be empty before removal + lfs_block_t prevpair[2]; + lfs_stag_t res = lfs_dir_get(lfs, &newcwd, LFS_MKTAG(0x700, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_STRUCT, newid, 8), prevpair); + if (res < 0) { + return (int)res; + } + lfs_pair_fromle32(prevpair); + + // must be empty before removal + err = lfs_dir_fetch(lfs, &prevdir.m, prevpair); + if (err) { + return err; + } + + if (prevdir.m.count > 0 || prevdir.m.split) { + return LFS_ERR_NOTEMPTY; + } + + // mark fs as orphaned + err = lfs_fs_preporphans(lfs, +1); + if (err) { + return err; + } + + // I know it's crazy but yes, dir can be changed by our parent's + // commit (if predecessor is child) + prevdir.type = 0; + prevdir.id = 0; + lfs->mlist = &prevdir; + } + + if (!samepair) { + lfs_fs_prepmove(lfs, newoldid, oldcwd.pair); + } + + // move over all attributes + err = lfs_dir_commit(lfs, &newcwd, LFS_MKATTRS( + {LFS_MKTAG_IF(prevtag != LFS_ERR_NOENT, + LFS_TYPE_DELETE, newid, 0), NULL}, + {LFS_MKTAG(LFS_TYPE_CREATE, newid, 0), NULL}, + {LFS_MKTAG(lfs_tag_type3(oldtag), + newid, lfs_path_namelen(newpath)), newpath}, + {LFS_MKTAG(LFS_FROM_MOVE, newid, lfs_tag_id(oldtag)), &oldcwd}, + {LFS_MKTAG_IF(samepair, + LFS_TYPE_DELETE, newoldid, 0), NULL})); + if (err) { + lfs->mlist = prevdir.next; + return err; + } + + // let commit clean up after move (if we're different! otherwise move + // logic already fixed it for us) + if (!samepair && lfs_gstate_hasmove(&lfs->gstate)) { + // prep gstate and delete move id + lfs_fs_prepmove(lfs, 0x3ff, NULL); + err = lfs_dir_commit(lfs, &oldcwd, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(oldtag), 0), NULL})); + if (err) { + lfs->mlist = prevdir.next; + return err; + } + } + + lfs->mlist = prevdir.next; + if (lfs_gstate_hasorphans(&lfs->gstate)) { + LFS_ASSERT(prevtag != LFS_ERR_NOENT + && lfs_tag_type3(prevtag) == LFS_TYPE_DIR); + + // fix orphan + err = lfs_fs_preporphans(lfs, -1); + if (err) { + return err; + } + + err = lfs_fs_pred(lfs, prevdir.m.pair, &newcwd); + if (err) { + return err; + } + + err = lfs_dir_drop(lfs, &newcwd, &prevdir.m); + if (err) { + return err; + } + } + + return 0; +} +#endif + +static lfs_ssize_t lfs_getattr_(lfs_t *lfs, const char *path, + uint8_t type, void *buffer, lfs_size_t size) { + lfs_mdir_t cwd; + lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL); + if (tag < 0) { + return tag; + } + + uint16_t id = lfs_tag_id(tag); + if (id == 0x3ff) { + // special case for root + id = 0; + int err = lfs_dir_fetch(lfs, &cwd, lfs->root); + if (err) { + return err; + } + } + + tag = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x7ff, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_USERATTR + type, + id, lfs_min(size, lfs->attr_max)), + buffer); + if (tag < 0) { + if (tag == LFS_ERR_NOENT) { + return LFS_ERR_NOATTR; + } + + return tag; + } + + return lfs_tag_size(tag); +} + +#ifndef LFS_READONLY +static int lfs_commitattr(lfs_t *lfs, const char *path, + uint8_t type, const void *buffer, lfs_size_t size) { + lfs_mdir_t cwd; + lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL); + if (tag < 0) { + return tag; + } + + uint16_t id = lfs_tag_id(tag); + if (id == 0x3ff) { + // special case for root + id = 0; + int err = lfs_dir_fetch(lfs, &cwd, lfs->root); + if (err) { + return err; + } + } + + return lfs_dir_commit(lfs, &cwd, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_USERATTR + type, id, size), buffer})); +} +#endif + +#ifndef LFS_READONLY +static int lfs_setattr_(lfs_t *lfs, const char *path, + uint8_t type, const void *buffer, lfs_size_t size) { + if (size > lfs->attr_max) { + return LFS_ERR_NOSPC; + } + + return lfs_commitattr(lfs, path, type, buffer, size); +} +#endif + +#ifndef LFS_READONLY +static int lfs_removeattr_(lfs_t *lfs, const char *path, uint8_t type) { + return lfs_commitattr(lfs, path, type, NULL, 0x3ff); +} +#endif + + +/// Filesystem operations /// + +// compile time checks, see lfs.h for why these limits exist +#if LFS_NAME_MAX > 1022 +#error "Invalid LFS_NAME_MAX, must be <= 1022" +#endif + +#if LFS_FILE_MAX > 2147483647 +#error "Invalid LFS_FILE_MAX, must be <= 2147483647" +#endif + +#if LFS_ATTR_MAX > 1022 +#error "Invalid LFS_ATTR_MAX, must be <= 1022" +#endif + +// common filesystem initialization +static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) { + lfs->cfg = cfg; + lfs->block_count = cfg->block_count; // May be 0 + int err = 0; + +#ifdef LFS_MULTIVERSION + // this driver only supports minor version < current minor version + LFS_ASSERT(!lfs->cfg->disk_version || ( + (0xffff & (lfs->cfg->disk_version >> 16)) + == LFS_DISK_VERSION_MAJOR + && (0xffff & (lfs->cfg->disk_version >> 0)) + <= LFS_DISK_VERSION_MINOR)); +#endif + + // check that bool is a truthy-preserving type + // + // note the most common reason for this failure is a before-c99 compiler, + // which littlefs currently does not support + LFS_ASSERT((bool)0x80000000); + + // check that the required io functions are provided + LFS_ASSERT(lfs->cfg->read != NULL); +#ifndef LFS_READONLY + LFS_ASSERT(lfs->cfg->prog != NULL); + LFS_ASSERT(lfs->cfg->erase != NULL); + LFS_ASSERT(lfs->cfg->sync != NULL); +#endif + + // validate that the lfs-cfg sizes were initiated properly before + // performing any arithmetic logics with them + LFS_ASSERT(lfs->cfg->read_size != 0); + LFS_ASSERT(lfs->cfg->prog_size != 0); + LFS_ASSERT(lfs->cfg->cache_size != 0); + + // check that block size is a multiple of cache size is a multiple + // of prog and read sizes + LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->read_size == 0); + LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->prog_size == 0); + LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0); + + // check that the block size is large enough to fit all ctz pointers + LFS_ASSERT(lfs->cfg->block_size >= 128); + // this is the exact calculation for all ctz pointers, if this fails + // and the simpler assert above does not, math must be broken + LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4)) + <= lfs->cfg->block_size); + + // block_cycles = 0 is no longer supported. + // + // block_cycles is the number of erase cycles before littlefs evicts + // metadata logs as a part of wear leveling. Suggested values are in the + // range of 100-1000, or set block_cycles to -1 to disable block-level + // wear-leveling. + LFS_ASSERT(lfs->cfg->block_cycles != 0); + + // check that compact_thresh makes sense + // + // metadata can't be compacted below block_size/2, and metadata can't + // exceed a block_size + LFS_ASSERT(lfs->cfg->compact_thresh == 0 + || lfs->cfg->compact_thresh >= lfs->cfg->block_size/2); + LFS_ASSERT(lfs->cfg->compact_thresh == (lfs_size_t)-1 + || lfs->cfg->compact_thresh <= lfs->cfg->block_size); + + // check that metadata_max is a multiple of read_size and prog_size, + // and a factor of the block_size + LFS_ASSERT(!lfs->cfg->metadata_max + || lfs->cfg->metadata_max % lfs->cfg->read_size == 0); + LFS_ASSERT(!lfs->cfg->metadata_max + || lfs->cfg->metadata_max % lfs->cfg->prog_size == 0); + LFS_ASSERT(!lfs->cfg->metadata_max + || lfs->cfg->block_size % lfs->cfg->metadata_max == 0); + + // setup read cache + if (lfs->cfg->read_buffer) { + lfs->rcache.buffer = lfs->cfg->read_buffer; + } else { + lfs->rcache.buffer = lfs_malloc(lfs->cfg->cache_size); + if (!lfs->rcache.buffer) { + err = LFS_ERR_NOMEM; + goto cleanup; + } + } + + // setup program cache + if (lfs->cfg->prog_buffer) { + lfs->pcache.buffer = lfs->cfg->prog_buffer; + } else { + lfs->pcache.buffer = lfs_malloc(lfs->cfg->cache_size); + if (!lfs->pcache.buffer) { + err = LFS_ERR_NOMEM; + goto cleanup; + } + } + + // zero to avoid information leaks + lfs_cache_zero(lfs, &lfs->rcache); + lfs_cache_zero(lfs, &lfs->pcache); + + // setup lookahead buffer, note mount finishes initializing this after + // we establish a decent pseudo-random seed + LFS_ASSERT(lfs->cfg->lookahead_size > 0); + if (lfs->cfg->lookahead_buffer) { + lfs->lookahead.buffer = lfs->cfg->lookahead_buffer; + } else { + lfs->lookahead.buffer = lfs_malloc(lfs->cfg->lookahead_size); + if (!lfs->lookahead.buffer) { + err = LFS_ERR_NOMEM; + goto cleanup; + } + } + + // check that the size limits are sane + LFS_ASSERT(lfs->cfg->name_max <= LFS_NAME_MAX); + lfs->name_max = lfs->cfg->name_max; + if (!lfs->name_max) { + lfs->name_max = LFS_NAME_MAX; + } + + LFS_ASSERT(lfs->cfg->file_max <= LFS_FILE_MAX); + lfs->file_max = lfs->cfg->file_max; + if (!lfs->file_max) { + lfs->file_max = LFS_FILE_MAX; + } + + LFS_ASSERT(lfs->cfg->attr_max <= LFS_ATTR_MAX); + lfs->attr_max = lfs->cfg->attr_max; + if (!lfs->attr_max) { + lfs->attr_max = LFS_ATTR_MAX; + } + + LFS_ASSERT(lfs->cfg->metadata_max <= lfs->cfg->block_size); + + LFS_ASSERT(lfs->cfg->inline_max == (lfs_size_t)-1 + || lfs->cfg->inline_max <= lfs->cfg->cache_size); + LFS_ASSERT(lfs->cfg->inline_max == (lfs_size_t)-1 + || lfs->cfg->inline_max <= lfs->attr_max); + LFS_ASSERT(lfs->cfg->inline_max == (lfs_size_t)-1 + || lfs->cfg->inline_max <= ((lfs->cfg->metadata_max) + ? lfs->cfg->metadata_max + : lfs->cfg->block_size)/8); + lfs->inline_max = lfs->cfg->inline_max; + if (lfs->inline_max == (lfs_size_t)-1) { + lfs->inline_max = 0; + } else if (lfs->inline_max == 0) { + lfs->inline_max = lfs_min( + lfs->cfg->cache_size, + lfs_min( + lfs->attr_max, + ((lfs->cfg->metadata_max) + ? lfs->cfg->metadata_max + : lfs->cfg->block_size)/8)); + } + + // setup default state + lfs->root[0] = LFS_BLOCK_NULL; + lfs->root[1] = LFS_BLOCK_NULL; + lfs->mlist = NULL; + lfs->seed = 0; + lfs->gdisk = (lfs_gstate_t){0}; + lfs->gstate = (lfs_gstate_t){0}; + lfs->gdelta = (lfs_gstate_t){0}; +#ifdef LFS_MIGRATE + lfs->lfs1 = NULL; +#endif + + return 0; + +cleanup: + lfs_deinit(lfs); + return err; +} + +static int lfs_deinit(lfs_t *lfs) { + // free allocated memory + if (!lfs->cfg->read_buffer) { + lfs_free(lfs->rcache.buffer); + } + + if (!lfs->cfg->prog_buffer) { + lfs_free(lfs->pcache.buffer); + } + + if (!lfs->cfg->lookahead_buffer) { + lfs_free(lfs->lookahead.buffer); + } + + return 0; +} + + + +#ifndef LFS_READONLY +static int lfs_format_(lfs_t *lfs, const struct lfs_config *cfg) { + int err = 0; + { + err = lfs_init(lfs, cfg); + if (err) { + return err; + } + + LFS_ASSERT(cfg->block_count != 0); + + // create free lookahead + memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size); + lfs->lookahead.start = 0; + lfs->lookahead.size = lfs_min(8*lfs->cfg->lookahead_size, + lfs->block_count); + lfs->lookahead.next = 0; + lfs_alloc_ckpoint(lfs); + + // create root dir + lfs_mdir_t root; + err = lfs_dir_alloc(lfs, &root); + if (err) { + goto cleanup; + } + + // write one superblock + lfs_superblock_t superblock = { + .version = lfs_fs_disk_version(lfs), + .block_size = lfs->cfg->block_size, + .block_count = lfs->block_count, + .name_max = lfs->name_max, + .file_max = lfs->file_max, + .attr_max = lfs->attr_max, + }; + + lfs_superblock_tole32(&superblock); + err = lfs_dir_commit(lfs, &root, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL}, + {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"}, + {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)), + &superblock})); + if (err) { + goto cleanup; + } + + // force compaction to prevent accidentally mounting any + // older version of littlefs that may live on disk + root.erased = false; + err = lfs_dir_commit(lfs, &root, NULL, 0); + if (err) { + goto cleanup; + } + + // sanity check that fetch works + err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1}); + if (err) { + goto cleanup; + } + } + +cleanup: + lfs_deinit(lfs); + return err; + +} +#endif + +struct lfs_tortoise_t { + lfs_block_t pair[2]; + lfs_size_t i; + lfs_size_t period; +}; + +static int lfs_tortoise_detectcycles( + const lfs_mdir_t *dir, struct lfs_tortoise_t *tortoise) { + // detect cycles with Brent's algorithm + if (lfs_pair_issync(dir->tail, tortoise->pair)) { + LFS_WARN("Cycle detected in tail list"); + return LFS_ERR_CORRUPT; + } + if (tortoise->i == tortoise->period) { + tortoise->pair[0] = dir->tail[0]; + tortoise->pair[1] = dir->tail[1]; + tortoise->i = 0; + tortoise->period *= 2; + } + tortoise->i += 1; + + return LFS_ERR_OK; +} + +static int lfs_mount_(lfs_t *lfs, const struct lfs_config *cfg) { + int err = lfs_init(lfs, cfg); + if (err) { + return err; + } + + // scan directory blocks for superblock and any global updates + lfs_mdir_t dir = {.tail = {0, 1}}; + struct lfs_tortoise_t tortoise = { + .pair = {LFS_BLOCK_NULL, LFS_BLOCK_NULL}, + .i = 1, + .period = 1, + }; + while (!lfs_pair_isnull(dir.tail)) { + err = lfs_tortoise_detectcycles(&dir, &tortoise); + if (err < 0) { + goto cleanup; + } + + // fetch next block in tail list + lfs_stag_t tag = lfs_dir_fetchmatch(lfs, &dir, dir.tail, + LFS_MKTAG(0x7ff, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), + NULL, + lfs_dir_find_match, &(struct lfs_dir_find_match){ + lfs, "littlefs", 8}); + if (tag < 0) { + err = tag; + goto cleanup; + } + + // has superblock? + if (tag && !lfs_tag_isdelete(tag)) { + // update root + lfs->root[0] = dir.pair[0]; + lfs->root[1] = dir.pair[1]; + + // grab superblock + lfs_superblock_t superblock; + tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)), + &superblock); + if (tag < 0) { + err = tag; + goto cleanup; + } + lfs_superblock_fromle32(&superblock); + + // check version + uint16_t major_version = (0xffff & (superblock.version >> 16)); + uint16_t minor_version = (0xffff & (superblock.version >> 0)); + if (major_version != lfs_fs_disk_version_major(lfs) + || minor_version > lfs_fs_disk_version_minor(lfs)) { + LFS_ERROR("Invalid version " + "v%"PRIu16".%"PRIu16" != v%"PRIu16".%"PRIu16, + major_version, + minor_version, + lfs_fs_disk_version_major(lfs), + lfs_fs_disk_version_minor(lfs)); + err = LFS_ERR_INVAL; + goto cleanup; + } + + // found older minor version? set an in-device only bit in the + // gstate so we know we need to rewrite the superblock before + // the first write + bool needssuperblock = false; + if (minor_version < lfs_fs_disk_version_minor(lfs)) { + LFS_DEBUG("Found older minor version " + "v%"PRIu16".%"PRIu16" < v%"PRIu16".%"PRIu16, + major_version, + minor_version, + lfs_fs_disk_version_major(lfs), + lfs_fs_disk_version_minor(lfs)); + needssuperblock = true; + } + // note this bit is reserved on disk, so fetching more gstate + // will not interfere here + lfs_fs_prepsuperblock(lfs, needssuperblock); + + // check superblock configuration + if (superblock.name_max) { + if (superblock.name_max > lfs->name_max) { + LFS_ERROR("Unsupported name_max (%"PRIu32" > %"PRIu32")", + superblock.name_max, lfs->name_max); + err = LFS_ERR_INVAL; + goto cleanup; + } + + lfs->name_max = superblock.name_max; + } + + if (superblock.file_max) { + if (superblock.file_max > lfs->file_max) { + LFS_ERROR("Unsupported file_max (%"PRIu32" > %"PRIu32")", + superblock.file_max, lfs->file_max); + err = LFS_ERR_INVAL; + goto cleanup; + } + + lfs->file_max = superblock.file_max; + } + + if (superblock.attr_max) { + if (superblock.attr_max > lfs->attr_max) { + LFS_ERROR("Unsupported attr_max (%"PRIu32" > %"PRIu32")", + superblock.attr_max, lfs->attr_max); + err = LFS_ERR_INVAL; + goto cleanup; + } + + lfs->attr_max = superblock.attr_max; + + // we also need to update inline_max in case attr_max changed + lfs->inline_max = lfs_min(lfs->inline_max, lfs->attr_max); + } + + // this is where we get the block_count from disk if block_count=0 + if (lfs->cfg->block_count + && superblock.block_count != lfs->cfg->block_count) { + LFS_ERROR("Invalid block count (%"PRIu32" != %"PRIu32")", + superblock.block_count, lfs->cfg->block_count); + err = LFS_ERR_INVAL; + goto cleanup; + } + + lfs->block_count = superblock.block_count; + + if (superblock.block_size != lfs->cfg->block_size) { + LFS_ERROR("Invalid block size (%"PRIu32" != %"PRIu32")", + superblock.block_size, lfs->cfg->block_size); + err = LFS_ERR_INVAL; + goto cleanup; + } + } + + // has gstate? + err = lfs_dir_getgstate(lfs, &dir, &lfs->gstate); + if (err) { + goto cleanup; + } + } + + // update littlefs with gstate + if (!lfs_gstate_iszero(&lfs->gstate)) { + LFS_DEBUG("Found pending gstate 0x%08"PRIx32"%08"PRIx32"%08"PRIx32, + lfs->gstate.tag, + lfs->gstate.pair[0], + lfs->gstate.pair[1]); + } + lfs->gstate.tag += !lfs_tag_isvalid(lfs->gstate.tag); + lfs->gdisk = lfs->gstate; + + // setup free lookahead, to distribute allocations uniformly across + // boots, we start the allocator at a random location + lfs->lookahead.start = lfs->seed % lfs->block_count; + lfs_alloc_drop(lfs); + + return 0; + +cleanup: + lfs_unmount_(lfs); + return err; +} + +static int lfs_unmount_(lfs_t *lfs) { + return lfs_deinit(lfs); +} + + +/// Filesystem filesystem operations /// +static int lfs_fs_stat_(lfs_t *lfs, struct lfs_fsinfo *fsinfo) { + // if the superblock is up-to-date, we must be on the most recent + // minor version of littlefs + if (!lfs_gstate_needssuperblock(&lfs->gstate)) { + fsinfo->disk_version = lfs_fs_disk_version(lfs); + + // otherwise we need to read the minor version on disk + } else { + // fetch the superblock + lfs_mdir_t dir; + int err = lfs_dir_fetch(lfs, &dir, lfs->root); + if (err) { + return err; + } + + lfs_superblock_t superblock; + lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)), + &superblock); + if (tag < 0) { + return tag; + } + lfs_superblock_fromle32(&superblock); + + // read the on-disk version + fsinfo->disk_version = superblock.version; + } + + // filesystem geometry + fsinfo->block_size = lfs->cfg->block_size; + fsinfo->block_count = lfs->block_count; + + // other on-disk configuration, we cache all of these for internal use + fsinfo->name_max = lfs->name_max; + fsinfo->file_max = lfs->file_max; + fsinfo->attr_max = lfs->attr_max; + + return 0; +} + +int lfs_fs_traverse_(lfs_t *lfs, + int (*cb)(void *data, lfs_block_t block), void *data, + bool includeorphans) { + // iterate over metadata pairs + lfs_mdir_t dir = {.tail = {0, 1}}; + +#ifdef LFS_MIGRATE + // also consider v1 blocks during migration + if (lfs->lfs1) { + int err = lfs1_traverse(lfs, cb, data); + if (err) { + return err; + } + + dir.tail[0] = lfs->root[0]; + dir.tail[1] = lfs->root[1]; + } +#endif + + struct lfs_tortoise_t tortoise = { + .pair = {LFS_BLOCK_NULL, LFS_BLOCK_NULL}, + .i = 1, + .period = 1, + }; + int err = LFS_ERR_OK; + while (!lfs_pair_isnull(dir.tail)) { + err = lfs_tortoise_detectcycles(&dir, &tortoise); + if (err < 0) { + return LFS_ERR_CORRUPT; + } + + for (int i = 0; i < 2; i++) { + int err = cb(data, dir.tail[i]); + if (err) { + return err; + } + } + + // iterate through ids in directory + int err = lfs_dir_fetch(lfs, &dir, dir.tail); + if (err) { + return err; + } + + for (uint16_t id = 0; id < dir.count; id++) { + struct lfs_ctz ctz; + lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x700, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz); + if (tag < 0) { + if (tag == LFS_ERR_NOENT) { + continue; + } + return tag; + } + lfs_ctz_fromle32(&ctz); + + if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) { + err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache, + ctz.head, ctz.size, cb, data); + if (err) { + return err; + } + } else if (includeorphans && + lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) { + for (int i = 0; i < 2; i++) { + err = cb(data, (&ctz.head)[i]); + if (err) { + return err; + } + } + } + } + } + +#ifndef LFS_READONLY + // iterate over any open files + for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) { + if (f->type != LFS_TYPE_REG) { + continue; + } + + if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) { + int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache, + f->ctz.head, f->ctz.size, cb, data); + if (err) { + return err; + } + } + + if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) { + int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache, + f->block, f->pos, cb, data); + if (err) { + return err; + } + } + } +#endif + + return 0; +} + +#ifndef LFS_READONLY +static int lfs_fs_pred(lfs_t *lfs, + const lfs_block_t pair[2], lfs_mdir_t *pdir) { + // iterate over all directory directory entries + pdir->tail[0] = 0; + pdir->tail[1] = 1; + struct lfs_tortoise_t tortoise = { + .pair = {LFS_BLOCK_NULL, LFS_BLOCK_NULL}, + .i = 1, + .period = 1, + }; + int err = LFS_ERR_OK; + while (!lfs_pair_isnull(pdir->tail)) { + err = lfs_tortoise_detectcycles(pdir, &tortoise); + if (err < 0) { + return LFS_ERR_CORRUPT; + } + + if (lfs_pair_cmp(pdir->tail, pair) == 0) { + return 0; + } + + int err = lfs_dir_fetch(lfs, pdir, pdir->tail); + if (err) { + return err; + } + } + + return LFS_ERR_NOENT; +} +#endif + +#ifndef LFS_READONLY +struct lfs_fs_parent_match { + lfs_t *lfs; + const lfs_block_t pair[2]; +}; +#endif + +#ifndef LFS_READONLY +static int lfs_fs_parent_match(void *data, + lfs_tag_t tag, const void *buffer) { + struct lfs_fs_parent_match *find = data; + lfs_t *lfs = find->lfs; + const struct lfs_diskoff *disk = buffer; + (void)tag; + + lfs_block_t child[2]; + int err = lfs_bd_read(lfs, + &lfs->pcache, &lfs->rcache, lfs->cfg->block_size, + disk->block, disk->off, &child, sizeof(child)); + if (err) { + return err; + } + + lfs_pair_fromle32(child); + return (lfs_pair_cmp(child, find->pair) == 0) ? LFS_CMP_EQ : LFS_CMP_LT; +} +#endif + +#ifndef LFS_READONLY +static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2], + lfs_mdir_t *parent) { + // use fetchmatch with callback to find pairs + parent->tail[0] = 0; + parent->tail[1] = 1; + struct lfs_tortoise_t tortoise = { + .pair = {LFS_BLOCK_NULL, LFS_BLOCK_NULL}, + .i = 1, + .period = 1, + }; + int err = LFS_ERR_OK; + while (!lfs_pair_isnull(parent->tail)) { + err = lfs_tortoise_detectcycles(parent, &tortoise); + if (err < 0) { + return err; + } + + lfs_stag_t tag = lfs_dir_fetchmatch(lfs, parent, parent->tail, + LFS_MKTAG(0x7ff, 0, 0x3ff), + LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 0, 8), + NULL, + lfs_fs_parent_match, &(struct lfs_fs_parent_match){ + lfs, {pair[0], pair[1]}}); + if (tag && tag != LFS_ERR_NOENT) { + return tag; + } + } + + return LFS_ERR_NOENT; +} +#endif + +static void lfs_fs_prepsuperblock(lfs_t *lfs, bool needssuperblock) { + lfs->gstate.tag = (lfs->gstate.tag & ~LFS_MKTAG(0, 0, 0x200)) + | (uint32_t)needssuperblock << 9; +} + +#ifndef LFS_READONLY +static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) { + LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0x000 || orphans >= 0); + LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) < 0x1ff || orphans <= 0); + lfs->gstate.tag += orphans; + lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) | + ((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31)); + + return 0; +} +#endif + +#ifndef LFS_READONLY +static void lfs_fs_prepmove(lfs_t *lfs, + uint16_t id, const lfs_block_t pair[2]) { + lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x7ff, 0x3ff, 0)) | + ((id != 0x3ff) ? LFS_MKTAG(LFS_TYPE_DELETE, id, 0) : 0)); + lfs->gstate.pair[0] = (id != 0x3ff) ? pair[0] : 0; + lfs->gstate.pair[1] = (id != 0x3ff) ? pair[1] : 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_fs_desuperblock(lfs_t *lfs) { + if (!lfs_gstate_needssuperblock(&lfs->gstate)) { + return 0; + } + + LFS_DEBUG("Rewriting superblock {0x%"PRIx32", 0x%"PRIx32"}", + lfs->root[0], + lfs->root[1]); + + lfs_mdir_t root; + int err = lfs_dir_fetch(lfs, &root, lfs->root); + if (err) { + return err; + } + + // write a new superblock + lfs_superblock_t superblock = { + .version = lfs_fs_disk_version(lfs), + .block_size = lfs->cfg->block_size, + .block_count = lfs->block_count, + .name_max = lfs->name_max, + .file_max = lfs->file_max, + .attr_max = lfs->attr_max, + }; + + lfs_superblock_tole32(&superblock); + err = lfs_dir_commit(lfs, &root, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)), + &superblock})); + if (err) { + return err; + } + + lfs_fs_prepsuperblock(lfs, false); + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_fs_demove(lfs_t *lfs) { + if (!lfs_gstate_hasmove(&lfs->gdisk)) { + return 0; + } + + // Fix bad moves + LFS_DEBUG("Fixing move {0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16, + lfs->gdisk.pair[0], + lfs->gdisk.pair[1], + lfs_tag_id(lfs->gdisk.tag)); + + // no other gstate is supported at this time, so if we found something else + // something most likely went wrong in gstate calculation + LFS_ASSERT(lfs_tag_type3(lfs->gdisk.tag) == LFS_TYPE_DELETE); + + // fetch and delete the moved entry + lfs_mdir_t movedir; + int err = lfs_dir_fetch(lfs, &movedir, lfs->gdisk.pair); + if (err) { + return err; + } + + // prep gstate and delete move id + uint16_t moveid = lfs_tag_id(lfs->gdisk.tag); + lfs_fs_prepmove(lfs, 0x3ff, NULL); + err = lfs_dir_commit(lfs, &movedir, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_DELETE, moveid, 0), NULL})); + if (err) { + return err; + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss) { + if (!lfs_gstate_hasorphans(&lfs->gstate)) { + return 0; + } + + // Check for orphans in two separate passes: + // - 1 for half-orphans (relocations) + // - 2 for full-orphans (removes/renames) + // + // Two separate passes are needed as half-orphans can contain outdated + // references to full-orphans, effectively hiding them from the deorphan + // search. + // + int pass = 0; + while (pass < 2) { + // Fix any orphans + lfs_mdir_t pdir = {.split = true, .tail = {0, 1}}; + lfs_mdir_t dir; + bool moreorphans = false; + + // iterate over all directory directory entries + while (!lfs_pair_isnull(pdir.tail)) { + int err = lfs_dir_fetch(lfs, &dir, pdir.tail); + if (err) { + return err; + } + + // check head blocks for orphans + if (!pdir.split) { + // check if we have a parent + lfs_mdir_t parent; + lfs_stag_t tag = lfs_fs_parent(lfs, pdir.tail, &parent); + if (tag < 0 && tag != LFS_ERR_NOENT) { + return tag; + } + + if (pass == 0 && tag != LFS_ERR_NOENT) { + lfs_block_t pair[2]; + lfs_stag_t state = lfs_dir_get(lfs, &parent, + LFS_MKTAG(0x7ff, 0x3ff, 0), tag, pair); + if (state < 0) { + return state; + } + lfs_pair_fromle32(pair); + + if (!lfs_pair_issync(pair, pdir.tail)) { + // we have desynced + LFS_DEBUG("Fixing half-orphan " + "{0x%"PRIx32", 0x%"PRIx32"} " + "-> {0x%"PRIx32", 0x%"PRIx32"}", + pdir.tail[0], pdir.tail[1], pair[0], pair[1]); + + // fix pending move in this pair? this looks like an + // optimization but is in fact _required_ since + // relocating may outdate the move. + uint16_t moveid = 0x3ff; + if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) { + moveid = lfs_tag_id(lfs->gstate.tag); + LFS_DEBUG("Fixing move while fixing orphans " + "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n", + pdir.pair[0], pdir.pair[1], moveid); + lfs_fs_prepmove(lfs, 0x3ff, NULL); + } + + lfs_pair_tole32(pair); + state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS( + {LFS_MKTAG_IF(moveid != 0x3ff, + LFS_TYPE_DELETE, moveid, 0), NULL}, + {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), + pair})); + lfs_pair_fromle32(pair); + if (state < 0) { + return state; + } + + // did our commit create more orphans? + if (state == LFS_OK_ORPHANED) { + moreorphans = true; + } + + // refetch tail + continue; + } + } + + // note we only check for full orphans if we may have had a + // power-loss, otherwise orphans are created intentionally + // during operations such as lfs_mkdir + if (pass == 1 && tag == LFS_ERR_NOENT && powerloss) { + // we are an orphan + LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}", + pdir.tail[0], pdir.tail[1]); + + // steal state + err = lfs_dir_getgstate(lfs, &dir, &lfs->gdelta); + if (err) { + return err; + } + + // steal tail + lfs_pair_tole32(dir.tail); + int state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_TAIL + dir.split, 0x3ff, 8), + dir.tail})); + lfs_pair_fromle32(dir.tail); + if (state < 0) { + return state; + } + + // did our commit create more orphans? + if (state == LFS_OK_ORPHANED) { + moreorphans = true; + } + + // refetch tail + continue; + } + } + + pdir = dir; + } + + pass = moreorphans ? 0 : pass+1; + } + + // mark orphans as fixed + return lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate)); +} +#endif + +#ifndef LFS_READONLY +static int lfs_fs_forceconsistency(lfs_t *lfs) { + int err = lfs_fs_desuperblock(lfs); + if (err) { + return err; + } + + err = lfs_fs_demove(lfs); + if (err) { + return err; + } + + err = lfs_fs_deorphan(lfs, true); + if (err) { + return err; + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +static int lfs_fs_mkconsistent_(lfs_t *lfs) { + // lfs_fs_forceconsistency does most of the work here + int err = lfs_fs_forceconsistency(lfs); + if (err) { + return err; + } + + // do we have any pending gstate? + lfs_gstate_t delta = {0}; + lfs_gstate_xor(&delta, &lfs->gdisk); + lfs_gstate_xor(&delta, &lfs->gstate); + if (!lfs_gstate_iszero(&delta)) { + // lfs_dir_commit will implicitly write out any pending gstate + lfs_mdir_t root; + err = lfs_dir_fetch(lfs, &root, lfs->root); + if (err) { + return err; + } + + err = lfs_dir_commit(lfs, &root, NULL, 0); + if (err) { + return err; + } + } + + return 0; +} +#endif + +static int lfs_fs_size_count(void *p, lfs_block_t block) { + (void)block; + lfs_size_t *size = p; + *size += 1; + return 0; +} + +static lfs_ssize_t lfs_fs_size_(lfs_t *lfs) { + lfs_size_t size = 0; + int err = lfs_fs_traverse_(lfs, lfs_fs_size_count, &size, false); + if (err) { + return err; + } + + return size; +} + +// explicit garbage collection +#ifndef LFS_READONLY +static int lfs_fs_gc_(lfs_t *lfs) { + // force consistency, even if we're not necessarily going to write, + // because this function is supposed to take care of janitorial work + // isn't it? + int err = lfs_fs_forceconsistency(lfs); + if (err) { + return err; + } + + // try to compact metadata pairs, note we can't really accomplish + // anything if compact_thresh doesn't at least leave a prog_size + // available + if (lfs->cfg->compact_thresh + < lfs->cfg->block_size - lfs->cfg->prog_size) { + // iterate over all mdirs + lfs_mdir_t mdir = {.tail = {0, 1}}; + while (!lfs_pair_isnull(mdir.tail)) { + err = lfs_dir_fetch(lfs, &mdir, mdir.tail); + if (err) { + return err; + } + + // not erased? exceeds our compaction threshold? + if (!mdir.erased || ((lfs->cfg->compact_thresh == 0) + ? mdir.off > lfs->cfg->block_size - lfs->cfg->block_size/8 + : mdir.off > lfs->cfg->compact_thresh)) { + // the easiest way to trigger a compaction is to mark + // the mdir as unerased and add an empty commit + mdir.erased = false; + err = lfs_dir_commit(lfs, &mdir, NULL, 0); + if (err) { + return err; + } + } + } + } + + // try to populate the lookahead buffer, unless it's already full + if (lfs->lookahead.size < lfs_min( + 8 * lfs->cfg->lookahead_size, + lfs->block_count)) { + err = lfs_alloc_scan(lfs); + if (err) { + return err; + } + } + + return 0; +} +#endif + +#ifndef LFS_READONLY +#ifdef LFS_SHRINKNONRELOCATING +static int lfs_shrink_checkblock(void *data, lfs_block_t block) { + lfs_size_t threshold = *((lfs_size_t*)data); + if (block >= threshold) { + return LFS_ERR_NOTEMPTY; + } + return 0; +} +#endif + +static int lfs_fs_grow_(lfs_t *lfs, lfs_size_t block_count) { + int err; + + if (block_count == lfs->block_count) { + return 0; + } + + +#ifndef LFS_SHRINKNONRELOCATING + // shrinking is not supported + LFS_ASSERT(block_count >= lfs->block_count); +#endif +#ifdef LFS_SHRINKNONRELOCATING + if (block_count < lfs->block_count) { + err = lfs_fs_traverse_(lfs, lfs_shrink_checkblock, &block_count, true); + if (err) { + return err; + } + } +#endif + + lfs->block_count = block_count; + + // fetch the root + lfs_mdir_t root; + err = lfs_dir_fetch(lfs, &root, lfs->root); + if (err) { + return err; + } + + // update the superblock + lfs_superblock_t superblock; + lfs_stag_t tag = lfs_dir_get(lfs, &root, LFS_MKTAG(0x7ff, 0x3ff, 0), + LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)), + &superblock); + if (tag < 0) { + return tag; + } + lfs_superblock_fromle32(&superblock); + + superblock.block_count = lfs->block_count; + + lfs_superblock_tole32(&superblock); + err = lfs_dir_commit(lfs, &root, LFS_MKATTRS( + {tag, &superblock})); + if (err) { + return err; + } + return 0; +} +#endif + +#ifdef LFS_MIGRATE +////// Migration from littelfs v1 below this ////// + +/// Version info /// + +// Software library version +// Major (top-nibble), incremented on backwards incompatible changes +// Minor (bottom-nibble), incremented on feature additions +#define LFS1_VERSION 0x00010007 +#define LFS1_VERSION_MAJOR (0xffff & (LFS1_VERSION >> 16)) +#define LFS1_VERSION_MINOR (0xffff & (LFS1_VERSION >> 0)) + +// Version of On-disk data structures +// Major (top-nibble), incremented on backwards incompatible changes +// Minor (bottom-nibble), incremented on feature additions +#define LFS1_DISK_VERSION 0x00010001 +#define LFS1_DISK_VERSION_MAJOR (0xffff & (LFS1_DISK_VERSION >> 16)) +#define LFS1_DISK_VERSION_MINOR (0xffff & (LFS1_DISK_VERSION >> 0)) + + +/// v1 Definitions /// + +// File types +enum lfs1_type { + LFS1_TYPE_REG = 0x11, + LFS1_TYPE_DIR = 0x22, + LFS1_TYPE_SUPERBLOCK = 0x2e, +}; + +typedef struct lfs1 { + lfs_block_t root[2]; +} lfs1_t; + +typedef struct lfs1_entry { + lfs_off_t off; + + struct lfs1_disk_entry { + uint8_t type; + uint8_t elen; + uint8_t alen; + uint8_t nlen; + union { + struct { + lfs_block_t head; + lfs_size_t size; + } file; + lfs_block_t dir[2]; + } u; + } d; +} lfs1_entry_t; + +typedef struct lfs1_dir { + struct lfs1_dir *next; + lfs_block_t pair[2]; + lfs_off_t off; + + lfs_block_t head[2]; + lfs_off_t pos; + + struct lfs1_disk_dir { + uint32_t rev; + lfs_size_t size; + lfs_block_t tail[2]; + } d; +} lfs1_dir_t; + +typedef struct lfs1_superblock { + lfs_off_t off; + + struct lfs1_disk_superblock { + uint8_t type; + uint8_t elen; + uint8_t alen; + uint8_t nlen; + lfs_block_t root[2]; + uint32_t block_size; + uint32_t block_count; + uint32_t version; + char magic[8]; + } d; +} lfs1_superblock_t; + + +/// Low-level wrappers v1->v2 /// +static void lfs1_crc(uint32_t *crc, const void *buffer, size_t size) { + *crc = lfs_crc(*crc, buffer, size); +} + +static int lfs1_bd_read(lfs_t *lfs, lfs_block_t block, + lfs_off_t off, void *buffer, lfs_size_t size) { + // if we ever do more than writes to alternating pairs, + // this may need to consider pcache + return lfs_bd_read(lfs, &lfs->pcache, &lfs->rcache, size, + block, off, buffer, size); +} + +static int lfs1_bd_crc(lfs_t *lfs, lfs_block_t block, + lfs_off_t off, lfs_size_t size, uint32_t *crc) { + for (lfs_off_t i = 0; i < size; i++) { + uint8_t c; + int err = lfs1_bd_read(lfs, block, off+i, &c, 1); + if (err) { + return err; + } + + lfs1_crc(crc, &c, 1); + } + + return 0; +} + + +/// Endian swapping functions /// +static void lfs1_dir_fromle32(struct lfs1_disk_dir *d) { + d->rev = lfs_fromle32(d->rev); + d->size = lfs_fromle32(d->size); + d->tail[0] = lfs_fromle32(d->tail[0]); + d->tail[1] = lfs_fromle32(d->tail[1]); +} + +static void lfs1_dir_tole32(struct lfs1_disk_dir *d) { + d->rev = lfs_tole32(d->rev); + d->size = lfs_tole32(d->size); + d->tail[0] = lfs_tole32(d->tail[0]); + d->tail[1] = lfs_tole32(d->tail[1]); +} + +static void lfs1_entry_fromle32(struct lfs1_disk_entry *d) { + d->u.dir[0] = lfs_fromle32(d->u.dir[0]); + d->u.dir[1] = lfs_fromle32(d->u.dir[1]); +} + +static void lfs1_entry_tole32(struct lfs1_disk_entry *d) { + d->u.dir[0] = lfs_tole32(d->u.dir[0]); + d->u.dir[1] = lfs_tole32(d->u.dir[1]); +} + +static void lfs1_superblock_fromle32(struct lfs1_disk_superblock *d) { + d->root[0] = lfs_fromle32(d->root[0]); + d->root[1] = lfs_fromle32(d->root[1]); + d->block_size = lfs_fromle32(d->block_size); + d->block_count = lfs_fromle32(d->block_count); + d->version = lfs_fromle32(d->version); +} + + +///// Metadata pair and directory operations /// +static inline lfs_size_t lfs1_entry_size(const lfs1_entry_t *entry) { + return 4 + entry->d.elen + entry->d.alen + entry->d.nlen; +} + +static int lfs1_dir_fetch(lfs_t *lfs, + lfs1_dir_t *dir, const lfs_block_t pair[2]) { + // copy out pair, otherwise may be aliasing dir + const lfs_block_t tpair[2] = {pair[0], pair[1]}; + bool valid = false; + + // check both blocks for the most recent revision + for (int i = 0; i < 2; i++) { + struct lfs1_disk_dir test; + int err = lfs1_bd_read(lfs, tpair[i], 0, &test, sizeof(test)); + lfs1_dir_fromle32(&test); + if (err) { + if (err == LFS_ERR_CORRUPT) { + continue; + } + return err; + } + + if (valid && lfs_scmp(test.rev, dir->d.rev) < 0) { + continue; + } + + if ((0x7fffffff & test.size) < sizeof(test)+4 || + (0x7fffffff & test.size) > lfs->cfg->block_size) { + continue; + } + + uint32_t crc = 0xffffffff; + lfs1_dir_tole32(&test); + lfs1_crc(&crc, &test, sizeof(test)); + lfs1_dir_fromle32(&test); + err = lfs1_bd_crc(lfs, tpair[i], sizeof(test), + (0x7fffffff & test.size) - sizeof(test), &crc); + if (err) { + if (err == LFS_ERR_CORRUPT) { + continue; + } + return err; + } + + if (crc != 0) { + continue; + } + + valid = true; + + // setup dir in case it's valid + dir->pair[0] = tpair[(i+0) % 2]; + dir->pair[1] = tpair[(i+1) % 2]; + dir->off = sizeof(dir->d); + dir->d = test; + } + + if (!valid) { + LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}", + tpair[0], tpair[1]); + return LFS_ERR_CORRUPT; + } + + return 0; +} + +static int lfs1_dir_next(lfs_t *lfs, lfs1_dir_t *dir, lfs1_entry_t *entry) { + while (dir->off + sizeof(entry->d) > (0x7fffffff & dir->d.size)-4) { + if (!(0x80000000 & dir->d.size)) { + entry->off = dir->off; + return LFS_ERR_NOENT; + } + + int err = lfs1_dir_fetch(lfs, dir, dir->d.tail); + if (err) { + return err; + } + + dir->off = sizeof(dir->d); + dir->pos += sizeof(dir->d) + 4; + } + + int err = lfs1_bd_read(lfs, dir->pair[0], dir->off, + &entry->d, sizeof(entry->d)); + lfs1_entry_fromle32(&entry->d); + if (err) { + return err; + } + + entry->off = dir->off; + dir->off += lfs1_entry_size(entry); + dir->pos += lfs1_entry_size(entry); + return 0; +} + +/// littlefs v1 specific operations /// +int lfs1_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data) { + if (lfs_pair_isnull(lfs->lfs1->root)) { + return 0; + } + + // iterate over metadata pairs + lfs1_dir_t dir; + lfs1_entry_t entry; + lfs_block_t cwd[2] = {0, 1}; + + while (true) { + for (int i = 0; i < 2; i++) { + int err = cb(data, cwd[i]); + if (err) { + return err; + } + } + + int err = lfs1_dir_fetch(lfs, &dir, cwd); + if (err) { + return err; + } + + // iterate over contents + while (dir.off + sizeof(entry.d) <= (0x7fffffff & dir.d.size)-4) { + err = lfs1_bd_read(lfs, dir.pair[0], dir.off, + &entry.d, sizeof(entry.d)); + lfs1_entry_fromle32(&entry.d); + if (err) { + return err; + } + + dir.off += lfs1_entry_size(&entry); + if ((0x70 & entry.d.type) == (0x70 & LFS1_TYPE_REG)) { + err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache, + entry.d.u.file.head, entry.d.u.file.size, cb, data); + if (err) { + return err; + } + } + } + + // we also need to check if we contain a threaded v2 directory + lfs_mdir_t dir2 = {.split=true, .tail={cwd[0], cwd[1]}}; + while (dir2.split) { + err = lfs_dir_fetch(lfs, &dir2, dir2.tail); + if (err) { + break; + } + + for (int i = 0; i < 2; i++) { + err = cb(data, dir2.pair[i]); + if (err) { + return err; + } + } + } + + cwd[0] = dir.d.tail[0]; + cwd[1] = dir.d.tail[1]; + + if (lfs_pair_isnull(cwd)) { + break; + } + } + + return 0; +} + +static int lfs1_moved(lfs_t *lfs, const void *e) { + if (lfs_pair_isnull(lfs->lfs1->root)) { + return 0; + } + + // skip superblock + lfs1_dir_t cwd; + int err = lfs1_dir_fetch(lfs, &cwd, (const lfs_block_t[2]){0, 1}); + if (err) { + return err; + } + + // iterate over all directory directory entries + lfs1_entry_t entry; + while (!lfs_pair_isnull(cwd.d.tail)) { + err = lfs1_dir_fetch(lfs, &cwd, cwd.d.tail); + if (err) { + return err; + } + + while (true) { + err = lfs1_dir_next(lfs, &cwd, &entry); + if (err && err != LFS_ERR_NOENT) { + return err; + } + + if (err == LFS_ERR_NOENT) { + break; + } + + if (!(0x80 & entry.d.type) && + memcmp(&entry.d.u, e, sizeof(entry.d.u)) == 0) { + return true; + } + } + } + + return false; +} + +/// Filesystem operations /// +static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1, + const struct lfs_config *cfg) { + int err = 0; + { + err = lfs_init(lfs, cfg); + if (err) { + return err; + } + + lfs->lfs1 = lfs1; + lfs->lfs1->root[0] = LFS_BLOCK_NULL; + lfs->lfs1->root[1] = LFS_BLOCK_NULL; + + // setup free lookahead + lfs->lookahead.start = 0; + lfs->lookahead.size = 0; + lfs->lookahead.next = 0; + lfs_alloc_ckpoint(lfs); + + // load superblock + lfs1_dir_t dir; + lfs1_superblock_t superblock; + err = lfs1_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1}); + if (err && err != LFS_ERR_CORRUPT) { + goto cleanup; + } + + if (!err) { + err = lfs1_bd_read(lfs, dir.pair[0], sizeof(dir.d), + &superblock.d, sizeof(superblock.d)); + lfs1_superblock_fromle32(&superblock.d); + if (err) { + goto cleanup; + } + + lfs->lfs1->root[0] = superblock.d.root[0]; + lfs->lfs1->root[1] = superblock.d.root[1]; + } + + if (err || memcmp(superblock.d.magic, "littlefs", 8) != 0) { + LFS_ERROR("Invalid superblock at {0x%"PRIx32", 0x%"PRIx32"}", + 0, 1); + err = LFS_ERR_CORRUPT; + goto cleanup; + } + + uint16_t major_version = (0xffff & (superblock.d.version >> 16)); + uint16_t minor_version = (0xffff & (superblock.d.version >> 0)); + if ((major_version != LFS1_DISK_VERSION_MAJOR || + minor_version > LFS1_DISK_VERSION_MINOR)) { + LFS_ERROR("Invalid version v%d.%d", major_version, minor_version); + err = LFS_ERR_INVAL; + goto cleanup; + } + + return 0; + } + +cleanup: + lfs_deinit(lfs); + return err; +} + +static int lfs1_unmount(lfs_t *lfs) { + return lfs_deinit(lfs); +} + +/// v1 migration /// +static int lfs_migrate_(lfs_t *lfs, const struct lfs_config *cfg) { + struct lfs1 lfs1; + + // Indeterminate filesystem size not allowed for migration. + LFS_ASSERT(cfg->block_count != 0); + + int err = lfs1_mount(lfs, &lfs1, cfg); + if (err) { + return err; + } + + { + // iterate through each directory, copying over entries + // into new directory + lfs1_dir_t dir1; + lfs_mdir_t dir2; + dir1.d.tail[0] = lfs->lfs1->root[0]; + dir1.d.tail[1] = lfs->lfs1->root[1]; + while (!lfs_pair_isnull(dir1.d.tail)) { + // iterate old dir + err = lfs1_dir_fetch(lfs, &dir1, dir1.d.tail); + if (err) { + goto cleanup; + } + + // create new dir and bind as temporary pretend root + err = lfs_dir_alloc(lfs, &dir2); + if (err) { + goto cleanup; + } + + dir2.rev = dir1.d.rev; + dir1.head[0] = dir1.pair[0]; + dir1.head[1] = dir1.pair[1]; + lfs->root[0] = dir2.pair[0]; + lfs->root[1] = dir2.pair[1]; + + err = lfs_dir_commit(lfs, &dir2, NULL, 0); + if (err) { + goto cleanup; + } + + while (true) { + lfs1_entry_t entry1; + err = lfs1_dir_next(lfs, &dir1, &entry1); + if (err && err != LFS_ERR_NOENT) { + goto cleanup; + } + + if (err == LFS_ERR_NOENT) { + break; + } + + // check that entry has not been moved + if (entry1.d.type & 0x80) { + int moved = lfs1_moved(lfs, &entry1.d.u); + if (moved < 0) { + err = moved; + goto cleanup; + } + + if (moved) { + continue; + } + + entry1.d.type &= ~0x80; + } + + // also fetch name + char name[LFS_NAME_MAX+1]; + memset(name, 0, sizeof(name)); + err = lfs1_bd_read(lfs, dir1.pair[0], + entry1.off + 4+entry1.d.elen+entry1.d.alen, + name, entry1.d.nlen); + if (err) { + goto cleanup; + } + + bool isdir = (entry1.d.type == LFS1_TYPE_DIR); + + // create entry in new dir + err = lfs_dir_fetch(lfs, &dir2, lfs->root); + if (err) { + goto cleanup; + } + + uint16_t id; + err = lfs_dir_find(lfs, &dir2, &(const char*){name}, &id); + if (!(err == LFS_ERR_NOENT && id != 0x3ff)) { + err = (err < 0) ? err : LFS_ERR_EXIST; + goto cleanup; + } + + lfs1_entry_tole32(&entry1.d); + err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL}, + {LFS_MKTAG_IF_ELSE(isdir, + LFS_TYPE_DIR, id, entry1.d.nlen, + LFS_TYPE_REG, id, entry1.d.nlen), + name}, + {LFS_MKTAG_IF_ELSE(isdir, + LFS_TYPE_DIRSTRUCT, id, sizeof(entry1.d.u), + LFS_TYPE_CTZSTRUCT, id, sizeof(entry1.d.u)), + &entry1.d.u})); + lfs1_entry_fromle32(&entry1.d); + if (err) { + goto cleanup; + } + } + + if (!lfs_pair_isnull(dir1.d.tail)) { + // find last block and update tail to thread into fs + err = lfs_dir_fetch(lfs, &dir2, lfs->root); + if (err) { + goto cleanup; + } + + while (dir2.split) { + err = lfs_dir_fetch(lfs, &dir2, dir2.tail); + if (err) { + goto cleanup; + } + } + + lfs_pair_tole32(dir2.pair); + err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir1.d.tail})); + lfs_pair_fromle32(dir2.pair); + if (err) { + goto cleanup; + } + } + + // Copy over first block to thread into fs. Unfortunately + // if this fails there is not much we can do. + LFS_DEBUG("Migrating {0x%"PRIx32", 0x%"PRIx32"} " + "-> {0x%"PRIx32", 0x%"PRIx32"}", + lfs->root[0], lfs->root[1], dir1.head[0], dir1.head[1]); + + err = lfs_bd_erase(lfs, dir1.head[1]); + if (err) { + goto cleanup; + } + + err = lfs_dir_fetch(lfs, &dir2, lfs->root); + if (err) { + goto cleanup; + } + + for (lfs_off_t i = 0; i < dir2.off; i++) { + uint8_t dat; + err = lfs_bd_read(lfs, + NULL, &lfs->rcache, dir2.off, + dir2.pair[0], i, &dat, 1); + if (err) { + goto cleanup; + } + + err = lfs_bd_prog(lfs, + &lfs->pcache, &lfs->rcache, true, + dir1.head[1], i, &dat, 1); + if (err) { + goto cleanup; + } + } + + err = lfs_bd_flush(lfs, &lfs->pcache, &lfs->rcache, true); + if (err) { + goto cleanup; + } + } + + // Create new superblock. This marks a successful migration! + err = lfs1_dir_fetch(lfs, &dir1, (const lfs_block_t[2]){0, 1}); + if (err) { + goto cleanup; + } + + dir2.pair[0] = dir1.pair[0]; + dir2.pair[1] = dir1.pair[1]; + dir2.rev = dir1.d.rev; + dir2.off = sizeof(dir2.rev); + dir2.etag = 0xffffffff; + dir2.count = 0; + dir2.tail[0] = lfs->lfs1->root[0]; + dir2.tail[1] = lfs->lfs1->root[1]; + dir2.erased = false; + dir2.split = true; + + lfs_superblock_t superblock = { + .version = LFS_DISK_VERSION, + .block_size = lfs->cfg->block_size, + .block_count = lfs->cfg->block_count, + .name_max = lfs->name_max, + .file_max = lfs->file_max, + .attr_max = lfs->attr_max, + }; + + lfs_superblock_tole32(&superblock); + err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS( + {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL}, + {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"}, + {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)), + &superblock})); + if (err) { + goto cleanup; + } + + // sanity check that fetch works + err = lfs_dir_fetch(lfs, &dir2, (const lfs_block_t[2]){0, 1}); + if (err) { + goto cleanup; + } + + // force compaction to prevent accidentally mounting v1 + dir2.erased = false; + err = lfs_dir_commit(lfs, &dir2, NULL, 0); + if (err) { + goto cleanup; + } + } + +cleanup: + lfs1_unmount(lfs); + return err; +} + +#endif + + +/// Public API wrappers /// + +// Here we can add tracing/thread safety easily + +// Thread-safe wrappers if enabled +#ifdef LFS_THREADSAFE +#define LFS_LOCK(cfg) cfg->lock(cfg) +#define LFS_UNLOCK(cfg) cfg->unlock(cfg) +#else +#define LFS_LOCK(cfg) ((void)cfg, 0) +#define LFS_UNLOCK(cfg) ((void)cfg) +#endif + +// Public API +#ifndef LFS_READONLY +int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) { + int err = LFS_LOCK(cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_format(%p, %p {.context=%p, " + ".read=%p, .prog=%p, .erase=%p, .sync=%p, " + ".read_size=%"PRIu32", .prog_size=%"PRIu32", " + ".block_size=%"PRIu32", .block_count=%"PRIu32", " + ".block_cycles=%"PRId32", .cache_size=%"PRIu32", " + ".lookahead_size=%"PRIu32", .read_buffer=%p, " + ".prog_buffer=%p, .lookahead_buffer=%p, " + ".name_max=%"PRIu32", .file_max=%"PRIu32", " + ".attr_max=%"PRIu32"})", + (void*)lfs, (void*)cfg, cfg->context, + (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog, + (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync, + cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count, + cfg->block_cycles, cfg->cache_size, cfg->lookahead_size, + cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer, + cfg->name_max, cfg->file_max, cfg->attr_max); + + err = lfs_format_(lfs, cfg); + + LFS_TRACE("lfs_format -> %d", err); + LFS_UNLOCK(cfg); + return err; +} +#endif + +int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) { + int err = LFS_LOCK(cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_mount(%p, %p {.context=%p, " + ".read=%p, .prog=%p, .erase=%p, .sync=%p, " + ".read_size=%"PRIu32", .prog_size=%"PRIu32", " + ".block_size=%"PRIu32", .block_count=%"PRIu32", " + ".block_cycles=%"PRId32", .cache_size=%"PRIu32", " + ".lookahead_size=%"PRIu32", .read_buffer=%p, " + ".prog_buffer=%p, .lookahead_buffer=%p, " + ".name_max=%"PRIu32", .file_max=%"PRIu32", " + ".attr_max=%"PRIu32"})", + (void*)lfs, (void*)cfg, cfg->context, + (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog, + (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync, + cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count, + cfg->block_cycles, cfg->cache_size, cfg->lookahead_size, + cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer, + cfg->name_max, cfg->file_max, cfg->attr_max); + + err = lfs_mount_(lfs, cfg); + + LFS_TRACE("lfs_mount -> %d", err); + LFS_UNLOCK(cfg); + return err; +} + +int lfs_unmount(lfs_t *lfs) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_unmount(%p)", (void*)lfs); + + err = lfs_unmount_(lfs); + + LFS_TRACE("lfs_unmount -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +#ifndef LFS_READONLY +int lfs_remove(lfs_t *lfs, const char *path) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_remove(%p, \"%s\")", (void*)lfs, path); + + err = lfs_remove_(lfs, path); + + LFS_TRACE("lfs_remove -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +#ifndef LFS_READONLY +int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_rename(%p, \"%s\", \"%s\")", (void*)lfs, oldpath, newpath); + + err = lfs_rename_(lfs, oldpath, newpath); + + LFS_TRACE("lfs_rename -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_stat(%p, \"%s\", %p)", (void*)lfs, path, (void*)info); + + err = lfs_stat_(lfs, path, info); + + LFS_TRACE("lfs_stat -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path, + uint8_t type, void *buffer, lfs_size_t size) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_getattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")", + (void*)lfs, path, type, buffer, size); + + lfs_ssize_t res = lfs_getattr_(lfs, path, type, buffer, size); + + LFS_TRACE("lfs_getattr -> %"PRId32, res); + LFS_UNLOCK(lfs->cfg); + return res; +} + +#ifndef LFS_READONLY +int lfs_setattr(lfs_t *lfs, const char *path, + uint8_t type, const void *buffer, lfs_size_t size) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_setattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")", + (void*)lfs, path, type, buffer, size); + + err = lfs_setattr_(lfs, path, type, buffer, size); + + LFS_TRACE("lfs_setattr -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +#ifndef LFS_READONLY +int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_removeattr(%p, \"%s\", %"PRIu8")", (void*)lfs, path, type); + + err = lfs_removeattr_(lfs, path, type); + + LFS_TRACE("lfs_removeattr -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +#ifndef LFS_NO_MALLOC +int lfs_file_open(lfs_t *lfs, lfs_file_t *file, const char *path, int flags) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_open(%p, %p, \"%s\", %x)", + (void*)lfs, (void*)file, path, (unsigned)flags); + LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + err = lfs_file_open_(lfs, file, path, flags); + + LFS_TRACE("lfs_file_open -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file, + const char *path, int flags, + const struct lfs_file_config *cfg) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_opencfg(%p, %p, \"%s\", %x, %p {" + ".buffer=%p, .attrs=%p, .attr_count=%"PRIu32"})", + (void*)lfs, (void*)file, path, (unsigned)flags, + (void*)cfg, cfg->buffer, (void*)cfg->attrs, cfg->attr_count); + LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + err = lfs_file_opencfg_(lfs, file, path, flags, cfg); + + LFS_TRACE("lfs_file_opencfg -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +int lfs_file_close(lfs_t *lfs, lfs_file_t *file) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_close(%p, %p)", (void*)lfs, (void*)file); + LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + err = lfs_file_close_(lfs, file); + + LFS_TRACE("lfs_file_close -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +#ifndef LFS_READONLY +int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_sync(%p, %p)", (void*)lfs, (void*)file); + LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + err = lfs_file_sync_(lfs, file); + + LFS_TRACE("lfs_file_sync -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file, + void *buffer, lfs_size_t size) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_read(%p, %p, %p, %"PRIu32")", + (void*)lfs, (void*)file, buffer, size); + LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + lfs_ssize_t res = lfs_file_read_(lfs, file, buffer, size); + + LFS_TRACE("lfs_file_read -> %"PRId32, res); + LFS_UNLOCK(lfs->cfg); + return res; +} + +#ifndef LFS_READONLY +lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file, + const void *buffer, lfs_size_t size) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_write(%p, %p, %p, %"PRIu32")", + (void*)lfs, (void*)file, buffer, size); + LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + lfs_ssize_t res = lfs_file_write_(lfs, file, buffer, size); + + LFS_TRACE("lfs_file_write -> %"PRId32, res); + LFS_UNLOCK(lfs->cfg); + return res; +} +#endif + +lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file, + lfs_soff_t off, int whence) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_seek(%p, %p, %"PRId32", %d)", + (void*)lfs, (void*)file, off, whence); + LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + lfs_soff_t res = lfs_file_seek_(lfs, file, off, whence); + + LFS_TRACE("lfs_file_seek -> %"PRId32, res); + LFS_UNLOCK(lfs->cfg); + return res; +} + +#ifndef LFS_READONLY +int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_truncate(%p, %p, %"PRIu32")", + (void*)lfs, (void*)file, size); + LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + err = lfs_file_truncate_(lfs, file, size); + + LFS_TRACE("lfs_file_truncate -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_tell(%p, %p)", (void*)lfs, (void*)file); + LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + lfs_soff_t res = lfs_file_tell_(lfs, file); + + LFS_TRACE("lfs_file_tell -> %"PRId32, res); + LFS_UNLOCK(lfs->cfg); + return res; +} + +int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_rewind(%p, %p)", (void*)lfs, (void*)file); + + err = lfs_file_rewind_(lfs, file); + + LFS_TRACE("lfs_file_rewind -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_file_size(%p, %p)", (void*)lfs, (void*)file); + LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file)); + + lfs_soff_t res = lfs_file_size_(lfs, file); + + LFS_TRACE("lfs_file_size -> %"PRIu32, res); + LFS_UNLOCK(lfs->cfg); + return res; +} + +#ifndef LFS_READONLY +int lfs_mkdir(lfs_t *lfs, const char *path) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_mkdir(%p, \"%s\")", (void*)lfs, path); + + err = lfs_mkdir_(lfs, path); + + LFS_TRACE("lfs_mkdir -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_dir_open(%p, %p, \"%s\")", (void*)lfs, (void*)dir, path); + LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)dir)); + + err = lfs_dir_open_(lfs, dir, path); + + LFS_TRACE("lfs_dir_open -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_dir_close(%p, %p)", (void*)lfs, (void*)dir); + + err = lfs_dir_close_(lfs, dir); + + LFS_TRACE("lfs_dir_close -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_dir_read(%p, %p, %p)", + (void*)lfs, (void*)dir, (void*)info); + + err = lfs_dir_read_(lfs, dir, info); + + LFS_TRACE("lfs_dir_read -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_dir_seek(%p, %p, %"PRIu32")", + (void*)lfs, (void*)dir, off); + + err = lfs_dir_seek_(lfs, dir, off); + + LFS_TRACE("lfs_dir_seek -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_dir_tell(%p, %p)", (void*)lfs, (void*)dir); + + lfs_soff_t res = lfs_dir_tell_(lfs, dir); + + LFS_TRACE("lfs_dir_tell -> %"PRId32, res); + LFS_UNLOCK(lfs->cfg); + return res; +} + +int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_dir_rewind(%p, %p)", (void*)lfs, (void*)dir); + + err = lfs_dir_rewind_(lfs, dir); + + LFS_TRACE("lfs_dir_rewind -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +int lfs_fs_stat(lfs_t *lfs, struct lfs_fsinfo *fsinfo) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_fs_stat(%p, %p)", (void*)lfs, (void*)fsinfo); + + err = lfs_fs_stat_(lfs, fsinfo); + + LFS_TRACE("lfs_fs_stat -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +lfs_ssize_t lfs_fs_size(lfs_t *lfs) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_fs_size(%p)", (void*)lfs); + + lfs_ssize_t res = lfs_fs_size_(lfs); + + LFS_TRACE("lfs_fs_size -> %"PRId32, res); + LFS_UNLOCK(lfs->cfg); + return res; +} + +int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_fs_traverse(%p, %p, %p)", + (void*)lfs, (void*)(uintptr_t)cb, data); + + err = lfs_fs_traverse_(lfs, cb, data, true); + + LFS_TRACE("lfs_fs_traverse -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} + +#ifndef LFS_READONLY +int lfs_fs_mkconsistent(lfs_t *lfs) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_fs_mkconsistent(%p)", (void*)lfs); + + err = lfs_fs_mkconsistent_(lfs); + + LFS_TRACE("lfs_fs_mkconsistent -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +#ifndef LFS_READONLY +int lfs_fs_gc(lfs_t *lfs) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_fs_gc(%p)", (void*)lfs); + + err = lfs_fs_gc_(lfs); + + LFS_TRACE("lfs_fs_gc -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +#ifndef LFS_READONLY +int lfs_fs_grow(lfs_t *lfs, lfs_size_t block_count) { + int err = LFS_LOCK(lfs->cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_fs_grow(%p, %"PRIu32")", (void*)lfs, block_count); + + err = lfs_fs_grow_(lfs, block_count); + + LFS_TRACE("lfs_fs_grow -> %d", err); + LFS_UNLOCK(lfs->cfg); + return err; +} +#endif + +#ifdef LFS_MIGRATE +int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg) { + int err = LFS_LOCK(cfg); + if (err) { + return err; + } + LFS_TRACE("lfs_migrate(%p, %p {.context=%p, " + ".read=%p, .prog=%p, .erase=%p, .sync=%p, " + ".read_size=%"PRIu32", .prog_size=%"PRIu32", " + ".block_size=%"PRIu32", .block_count=%"PRIu32", " + ".block_cycles=%"PRId32", .cache_size=%"PRIu32", " + ".lookahead_size=%"PRIu32", .read_buffer=%p, " + ".prog_buffer=%p, .lookahead_buffer=%p, " + ".name_max=%"PRIu32", .file_max=%"PRIu32", " + ".attr_max=%"PRIu32"})", + (void*)lfs, (void*)cfg, cfg->context, + (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog, + (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync, + cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count, + cfg->block_cycles, cfg->cache_size, cfg->lookahead_size, + cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer, + cfg->name_max, cfg->file_max, cfg->attr_max); + + err = lfs_migrate_(lfs, cfg); + + LFS_TRACE("lfs_migrate -> %d", err); + LFS_UNLOCK(cfg); + return err; +} +#endif + diff --git a/components/joltwallet__littlefs/src/littlefs/lfs.h b/components/joltwallet__littlefs/src/littlefs/lfs.h new file mode 100644 index 0000000..215309c --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/lfs.h @@ -0,0 +1,801 @@ +/* + * The little filesystem + * + * Copyright (c) 2022, The littlefs authors. + * Copyright (c) 2017, Arm Limited. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef LFS_H +#define LFS_H + +#include "lfs_util.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/// Version info /// + +// Software library version +// Major (top-nibble), incremented on backwards incompatible changes +// Minor (bottom-nibble), incremented on feature additions +#define LFS_VERSION 0x0002000b +#define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16)) +#define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0)) + +// Version of On-disk data structures +// Major (top-nibble), incremented on backwards incompatible changes +// Minor (bottom-nibble), incremented on feature additions +#define LFS_DISK_VERSION 0x00020001 +#define LFS_DISK_VERSION_MAJOR (0xffff & (LFS_DISK_VERSION >> 16)) +#define LFS_DISK_VERSION_MINOR (0xffff & (LFS_DISK_VERSION >> 0)) + + +/// Definitions /// + +// Type definitions +typedef uint32_t lfs_size_t; +typedef uint32_t lfs_off_t; + +typedef int32_t lfs_ssize_t; +typedef int32_t lfs_soff_t; + +typedef uint32_t lfs_block_t; + +// Maximum name size in bytes, may be redefined to reduce the size of the +// info struct. Limited to <= 1022. Stored in superblock and must be +// respected by other littlefs drivers. +#ifndef LFS_NAME_MAX +#define LFS_NAME_MAX 255 +#endif + +// Maximum size of a file in bytes, may be redefined to limit to support other +// drivers. Limited on disk to <= 2147483647. Stored in superblock and must be +// respected by other littlefs drivers. +#ifndef LFS_FILE_MAX +#define LFS_FILE_MAX 2147483647 +#endif + +// Maximum size of custom attributes in bytes, may be redefined, but there is +// no real benefit to using a smaller LFS_ATTR_MAX. Limited to <= 1022. Stored +// in superblock and must be respected by other littlefs drivers. +#ifndef LFS_ATTR_MAX +#define LFS_ATTR_MAX 1022 +#endif + +// Possible error codes, these are negative to allow +// valid positive return values +enum lfs_error { + LFS_ERR_OK = 0, // No error + LFS_ERR_IO = -5, // Error during device operation + LFS_ERR_CORRUPT = -84, // Corrupted + LFS_ERR_NOENT = -2, // No directory entry + LFS_ERR_EXIST = -17, // Entry already exists + LFS_ERR_NOTDIR = -20, // Entry is not a dir + LFS_ERR_ISDIR = -21, // Entry is a dir + LFS_ERR_NOTEMPTY = -39, // Dir is not empty + LFS_ERR_BADF = -9, // Bad file number + LFS_ERR_FBIG = -27, // File too large + LFS_ERR_INVAL = -22, // Invalid parameter + LFS_ERR_NOSPC = -28, // No space left on device + LFS_ERR_NOMEM = -12, // No more memory available + LFS_ERR_NOATTR = -61, // No data/attr available + LFS_ERR_NAMETOOLONG = -36, // File name too long +}; + +// File types +enum lfs_type { + // file types + LFS_TYPE_REG = 0x001, + LFS_TYPE_DIR = 0x002, + + // internally used types + LFS_TYPE_SPLICE = 0x400, + LFS_TYPE_NAME = 0x000, + LFS_TYPE_STRUCT = 0x200, + LFS_TYPE_USERATTR = 0x300, + LFS_TYPE_FROM = 0x100, + LFS_TYPE_TAIL = 0x600, + LFS_TYPE_GLOBALS = 0x700, + LFS_TYPE_CRC = 0x500, + + // internally used type specializations + LFS_TYPE_CREATE = 0x401, + LFS_TYPE_DELETE = 0x4ff, + LFS_TYPE_SUPERBLOCK = 0x0ff, + LFS_TYPE_DIRSTRUCT = 0x200, + LFS_TYPE_CTZSTRUCT = 0x202, + LFS_TYPE_INLINESTRUCT = 0x201, + LFS_TYPE_SOFTTAIL = 0x600, + LFS_TYPE_HARDTAIL = 0x601, + LFS_TYPE_MOVESTATE = 0x7ff, + LFS_TYPE_CCRC = 0x500, + LFS_TYPE_FCRC = 0x5ff, + + // internal chip sources + LFS_FROM_NOOP = 0x000, + LFS_FROM_MOVE = 0x101, + LFS_FROM_USERATTRS = 0x102, +}; + +// File open flags +enum lfs_open_flags { + // open flags + LFS_O_RDONLY = 1, // Open a file as read only +#ifndef LFS_READONLY + LFS_O_WRONLY = 2, // Open a file as write only + LFS_O_RDWR = 3, // Open a file as read and write + LFS_O_CREAT = 0x0100, // Create a file if it does not exist + LFS_O_EXCL = 0x0200, // Fail if a file already exists + LFS_O_TRUNC = 0x0400, // Truncate the existing file to zero size + LFS_O_APPEND = 0x0800, // Move to end of file on every write +#endif + + // internally used flags +#ifndef LFS_READONLY + LFS_F_DIRTY = 0x010000, // File does not match storage + LFS_F_WRITING = 0x020000, // File has been written since last flush +#endif + LFS_F_READING = 0x040000, // File has been read since last flush +#ifndef LFS_READONLY + LFS_F_ERRED = 0x080000, // An error occurred during write +#endif + LFS_F_INLINE = 0x100000, // Currently inlined in directory entry +}; + +// File seek flags +enum lfs_whence_flags { + LFS_SEEK_SET = 0, // Seek relative to an absolute position + LFS_SEEK_CUR = 1, // Seek relative to the current file position + LFS_SEEK_END = 2, // Seek relative to the end of the file +}; + + +// Configuration provided during initialization of the littlefs +struct lfs_config { + // Opaque user provided context that can be used to pass + // information to the block device operations + void *context; + + // Read a region in a block. Negative error codes are propagated + // to the user. + int (*read)(const struct lfs_config *c, lfs_block_t block, + lfs_off_t off, void *buffer, lfs_size_t size); + + // Program a region in a block. The block must have previously + // been erased. Negative error codes are propagated to the user. + // May return LFS_ERR_CORRUPT if the block should be considered bad. + int (*prog)(const struct lfs_config *c, lfs_block_t block, + lfs_off_t off, const void *buffer, lfs_size_t size); + + // Erase a block. A block must be erased before being programmed. + // The state of an erased block is undefined. Negative error codes + // are propagated to the user. + // May return LFS_ERR_CORRUPT if the block should be considered bad. + int (*erase)(const struct lfs_config *c, lfs_block_t block); + + // Sync the state of the underlying block device. Negative error codes + // are propagated to the user. + int (*sync)(const struct lfs_config *c); + +#ifdef LFS_THREADSAFE + // Lock the underlying block device. Negative error codes + // are propagated to the user. + int (*lock)(const struct lfs_config *c); + + // Unlock the underlying block device. Negative error codes + // are propagated to the user. + int (*unlock)(const struct lfs_config *c); +#endif + + // Minimum size of a block read in bytes. All read operations will be a + // multiple of this value. + lfs_size_t read_size; + + // Minimum size of a block program in bytes. All program operations will be + // a multiple of this value. + lfs_size_t prog_size; + + // Size of an erasable block in bytes. This does not impact ram consumption + // and may be larger than the physical erase size. However, non-inlined + // files take up at minimum one block. Must be a multiple of the read and + // program sizes. + lfs_size_t block_size; + + // Number of erasable blocks on the device. Defaults to block_count stored + // on disk when zero. + lfs_size_t block_count; + + // Number of erase cycles before littlefs evicts metadata logs and moves + // the metadata to another block. Suggested values are in the + // range 100-1000, with large values having better performance at the cost + // of less consistent wear distribution. + // + // Set to -1 to disable block-level wear-leveling. + int32_t block_cycles; + + // Size of block caches in bytes. Each cache buffers a portion of a block in + // RAM. The littlefs needs a read cache, a program cache, and one additional + // cache per file. Larger caches can improve performance by storing more + // data and reducing the number of disk accesses. Must be a multiple of the + // read and program sizes, and a factor of the block size. + lfs_size_t cache_size; + + // Size of the lookahead buffer in bytes. A larger lookahead buffer + // increases the number of blocks found during an allocation pass. The + // lookahead buffer is stored as a compact bitmap, so each byte of RAM + // can track 8 blocks. + lfs_size_t lookahead_size; + + // Threshold for metadata compaction during lfs_fs_gc in bytes. Metadata + // pairs that exceed this threshold will be compacted during lfs_fs_gc. + // Defaults to ~88% block_size when zero, though the default may change + // in the future. + // + // Note this only affects lfs_fs_gc. Normal compactions still only occur + // when full. + // + // Set to -1 to disable metadata compaction during lfs_fs_gc. + lfs_size_t compact_thresh; + + // Optional statically allocated read buffer. Must be cache_size. + // By default lfs_malloc is used to allocate this buffer. + void *read_buffer; + + // Optional statically allocated program buffer. Must be cache_size. + // By default lfs_malloc is used to allocate this buffer. + void *prog_buffer; + + // Optional statically allocated lookahead buffer. Must be lookahead_size. + // By default lfs_malloc is used to allocate this buffer. + void *lookahead_buffer; + + // Optional upper limit on length of file names in bytes. No downside for + // larger names except the size of the info struct which is controlled by + // the LFS_NAME_MAX define. Defaults to LFS_NAME_MAX or name_max stored on + // disk when zero. + lfs_size_t name_max; + + // Optional upper limit on files in bytes. No downside for larger files + // but must be <= LFS_FILE_MAX. Defaults to LFS_FILE_MAX or file_max stored + // on disk when zero. + lfs_size_t file_max; + + // Optional upper limit on custom attributes in bytes. No downside for + // larger attributes size but must be <= LFS_ATTR_MAX. Defaults to + // LFS_ATTR_MAX or attr_max stored on disk when zero. + lfs_size_t attr_max; + + // Optional upper limit on total space given to metadata pairs in bytes. On + // devices with large blocks (e.g. 128kB) setting this to a low size (2-8kB) + // can help bound the metadata compaction time. Must be <= block_size. + // Defaults to block_size when zero. + lfs_size_t metadata_max; + + // Optional upper limit on inlined files in bytes. Inlined files live in + // metadata and decrease storage requirements, but may be limited to + // improve metadata-related performance. Must be <= cache_size, <= + // attr_max, and <= block_size/8. Defaults to the largest possible + // inline_max when zero. + // + // Set to -1 to disable inlined files. + lfs_size_t inline_max; + +#ifdef LFS_MULTIVERSION + // On-disk version to use when writing in the form of 16-bit major version + // + 16-bit minor version. This limiting metadata to what is supported by + // older minor versions. Note that some features will be lost. Defaults to + // to the most recent minor version when zero. + uint32_t disk_version; +#endif +}; + +// File info structure +struct lfs_info { + // Type of the file, either LFS_TYPE_REG or LFS_TYPE_DIR + uint8_t type; + + // Size of the file, only valid for REG files. Limited to 32-bits. + lfs_size_t size; + + // Name of the file stored as a null-terminated string. Limited to + // LFS_NAME_MAX+1, which can be changed by redefining LFS_NAME_MAX to + // reduce RAM. LFS_NAME_MAX is stored in superblock and must be + // respected by other littlefs drivers. + char name[LFS_NAME_MAX+1]; +}; + +// Filesystem info structure +struct lfs_fsinfo { + // On-disk version. + uint32_t disk_version; + + // Size of a logical block in bytes. + lfs_size_t block_size; + + // Number of logical blocks in filesystem. + lfs_size_t block_count; + + // Upper limit on the length of file names in bytes. + lfs_size_t name_max; + + // Upper limit on the size of files in bytes. + lfs_size_t file_max; + + // Upper limit on the size of custom attributes in bytes. + lfs_size_t attr_max; +}; + +// Custom attribute structure, used to describe custom attributes +// committed atomically during file writes. +struct lfs_attr { + // 8-bit type of attribute, provided by user and used to + // identify the attribute + uint8_t type; + + // Pointer to buffer containing the attribute + void *buffer; + + // Size of attribute in bytes, limited to LFS_ATTR_MAX + lfs_size_t size; +}; + +// Optional configuration provided during lfs_file_opencfg +struct lfs_file_config { + // Optional statically allocated file buffer. Must be cache_size. + // By default lfs_malloc is used to allocate this buffer. + void *buffer; + + // Optional list of custom attributes related to the file. If the file + // is opened with read access, these attributes will be read from disk + // during the open call. If the file is opened with write access, the + // attributes will be written to disk every file sync or close. This + // write occurs atomically with update to the file's contents. + // + // Custom attributes are uniquely identified by an 8-bit type and limited + // to LFS_ATTR_MAX bytes. When read, if the stored attribute is smaller + // than the buffer, it will be padded with zeros. If the stored attribute + // is larger, then it will be silently truncated. If the attribute is not + // found, it will be created implicitly. + struct lfs_attr *attrs; + + // Number of custom attributes in the list + lfs_size_t attr_count; +}; + + +/// internal littlefs data structures /// +typedef struct lfs_cache { + lfs_block_t block; + lfs_off_t off; + lfs_size_t size; + uint8_t *buffer; +} lfs_cache_t; + +typedef struct lfs_mdir { + lfs_block_t pair[2]; + uint32_t rev; + lfs_off_t off; + uint32_t etag; + uint16_t count; + bool erased; + bool split; + lfs_block_t tail[2]; +} lfs_mdir_t; + +// littlefs directory type +typedef struct lfs_dir { + struct lfs_dir *next; + uint16_t id; + uint8_t type; + lfs_mdir_t m; + + lfs_off_t pos; + lfs_block_t head[2]; +} lfs_dir_t; + +// littlefs file type +typedef struct lfs_file { + struct lfs_file *next; + uint16_t id; + uint8_t type; + lfs_mdir_t m; + + struct lfs_ctz { + lfs_block_t head; + lfs_size_t size; + } ctz; + + uint32_t flags; + lfs_off_t pos; + lfs_block_t block; + lfs_off_t off; + lfs_cache_t cache; + + const struct lfs_file_config *cfg; +} lfs_file_t; + +typedef struct lfs_superblock { + uint32_t version; + lfs_size_t block_size; + lfs_size_t block_count; + lfs_size_t name_max; + lfs_size_t file_max; + lfs_size_t attr_max; +} lfs_superblock_t; + +typedef struct lfs_gstate { + uint32_t tag; + lfs_block_t pair[2]; +} lfs_gstate_t; + +// The littlefs filesystem type +typedef struct lfs { + lfs_cache_t rcache; + lfs_cache_t pcache; + + lfs_block_t root[2]; + struct lfs_mlist { + struct lfs_mlist *next; + uint16_t id; + uint8_t type; + lfs_mdir_t m; + } *mlist; + uint32_t seed; + + lfs_gstate_t gstate; + lfs_gstate_t gdisk; + lfs_gstate_t gdelta; + + struct lfs_lookahead { + lfs_block_t start; + lfs_block_t size; + lfs_block_t next; + lfs_block_t ckpoint; + uint8_t *buffer; + } lookahead; + + const struct lfs_config *cfg; + lfs_size_t block_count; + lfs_size_t name_max; + lfs_size_t file_max; + lfs_size_t attr_max; + lfs_size_t inline_max; + +#ifdef LFS_MIGRATE + struct lfs1 *lfs1; +#endif +} lfs_t; + + +/// Filesystem functions /// + +#ifndef LFS_READONLY +// Format a block device with the littlefs +// +// Requires a littlefs object and config struct. This clobbers the littlefs +// object, and does not leave the filesystem mounted. The config struct must +// be zeroed for defaults and backwards compatibility. +// +// Returns a negative error code on failure. +int lfs_format(lfs_t *lfs, const struct lfs_config *config); +#endif + +// Mounts a littlefs +// +// Requires a littlefs object and config struct. Multiple filesystems +// may be mounted simultaneously with multiple littlefs objects. Both +// lfs and config must be allocated while mounted. The config struct must +// be zeroed for defaults and backwards compatibility. +// +// Returns a negative error code on failure. +int lfs_mount(lfs_t *lfs, const struct lfs_config *config); + +// Unmounts a littlefs +// +// Does nothing besides releasing any allocated resources. +// Returns a negative error code on failure. +int lfs_unmount(lfs_t *lfs); + +/// General operations /// + +#ifndef LFS_READONLY +// Removes a file or directory +// +// If removing a directory, the directory must be empty. +// Returns a negative error code on failure. +int lfs_remove(lfs_t *lfs, const char *path); +#endif + +#ifndef LFS_READONLY +// Rename or move a file or directory +// +// If the destination exists, it must match the source in type. +// If the destination is a directory, the directory must be empty. +// +// Returns a negative error code on failure. +int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath); +#endif + +// Find info about a file or directory +// +// Fills out the info structure, based on the specified file or directory. +// Returns a negative error code on failure. +int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info); + +// Get a custom attribute +// +// Custom attributes are uniquely identified by an 8-bit type and limited +// to LFS_ATTR_MAX bytes. When read, if the stored attribute is smaller than +// the buffer, it will be padded with zeros. If the stored attribute is larger, +// then it will be silently truncated. If no attribute is found, the error +// LFS_ERR_NOATTR is returned and the buffer is filled with zeros. +// +// Returns the size of the attribute, or a negative error code on failure. +// Note, the returned size is the size of the attribute on disk, irrespective +// of the size of the buffer. This can be used to dynamically allocate a buffer +// or check for existence. +lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path, + uint8_t type, void *buffer, lfs_size_t size); + +#ifndef LFS_READONLY +// Set custom attributes +// +// Custom attributes are uniquely identified by an 8-bit type and limited +// to LFS_ATTR_MAX bytes. If an attribute is not found, it will be +// implicitly created. +// +// Returns a negative error code on failure. +int lfs_setattr(lfs_t *lfs, const char *path, + uint8_t type, const void *buffer, lfs_size_t size); +#endif + +#ifndef LFS_READONLY +// Removes a custom attribute +// +// If an attribute is not found, nothing happens. +// +// Returns a negative error code on failure. +int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type); +#endif + + +/// File operations /// + +#ifndef LFS_NO_MALLOC +// Open a file +// +// The mode that the file is opened in is determined by the flags, which +// are values from the enum lfs_open_flags that are bitwise-ored together. +// +// Returns a negative error code on failure. +int lfs_file_open(lfs_t *lfs, lfs_file_t *file, + const char *path, int flags); + +// if LFS_NO_MALLOC is defined, lfs_file_open() will fail with LFS_ERR_NOMEM +// thus use lfs_file_opencfg() with config.buffer set. +#endif + +// Open a file with extra configuration +// +// The mode that the file is opened in is determined by the flags, which +// are values from the enum lfs_open_flags that are bitwise-ored together. +// +// The config struct provides additional config options per file as described +// above. The config struct must remain allocated while the file is open, and +// the config struct must be zeroed for defaults and backwards compatibility. +// +// Returns a negative error code on failure. +int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file, + const char *path, int flags, + const struct lfs_file_config *config); + +// Close a file +// +// Any pending writes are written out to storage as though +// sync had been called and releases any allocated resources. +// +// Returns a negative error code on failure. +int lfs_file_close(lfs_t *lfs, lfs_file_t *file); + +// Synchronize a file on storage +// +// Any pending writes are written out to storage. +// Returns a negative error code on failure. +int lfs_file_sync(lfs_t *lfs, lfs_file_t *file); + +// Read data from file +// +// Takes a buffer and size indicating where to store the read data. +// Returns the number of bytes read, or a negative error code on failure. +lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file, + void *buffer, lfs_size_t size); + +#ifndef LFS_READONLY +// Write data to file +// +// Takes a buffer and size indicating the data to write. The file will not +// actually be updated on the storage until either sync or close is called. +// +// Returns the number of bytes written, or a negative error code on failure. +lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file, + const void *buffer, lfs_size_t size); +#endif + +// Change the position of the file +// +// The change in position is determined by the offset and whence flag. +// Returns the new position of the file, or a negative error code on failure. +lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file, + lfs_soff_t off, int whence); + +#ifndef LFS_READONLY +// Truncates the size of the file to the specified size +// +// Returns a negative error code on failure. +int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size); +#endif + +// Return the position of the file +// +// Equivalent to lfs_file_seek(lfs, file, 0, LFS_SEEK_CUR) +// Returns the position of the file, or a negative error code on failure. +lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file); + +// Change the position of the file to the beginning of the file +// +// Equivalent to lfs_file_seek(lfs, file, 0, LFS_SEEK_SET) +// Returns a negative error code on failure. +int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file); + +// Return the size of the file +// +// Similar to lfs_file_seek(lfs, file, 0, LFS_SEEK_END) +// Returns the size of the file, or a negative error code on failure. +lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file); + + +/// Directory operations /// + +#ifndef LFS_READONLY +// Create a directory +// +// Returns a negative error code on failure. +int lfs_mkdir(lfs_t *lfs, const char *path); +#endif + +// Open a directory +// +// Once open a directory can be used with read to iterate over files. +// Returns a negative error code on failure. +int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path); + +// Close a directory +// +// Releases any allocated resources. +// Returns a negative error code on failure. +int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir); + +// Read an entry in the directory +// +// Fills out the info structure, based on the specified file or directory. +// Returns a positive value on success, 0 at the end of directory, +// or a negative error code on failure. +int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info); + +// Change the position of the directory +// +// The new off must be a value previous returned from tell and specifies +// an absolute offset in the directory seek. +// +// Returns a negative error code on failure. +int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off); + +// Return the position of the directory +// +// The returned offset is only meant to be consumed by seek and may not make +// sense, but does indicate the current position in the directory iteration. +// +// Returns the position of the directory, or a negative error code on failure. +lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir); + +// Change the position of the directory to the beginning of the directory +// +// Returns a negative error code on failure. +int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir); + + +/// Filesystem-level filesystem operations + +// Find on-disk info about the filesystem +// +// Fills out the fsinfo structure based on the filesystem found on-disk. +// Returns a negative error code on failure. +int lfs_fs_stat(lfs_t *lfs, struct lfs_fsinfo *fsinfo); + +// Finds the current size of the filesystem +// +// Note: Result is best effort. If files share COW structures, the returned +// size may be larger than the filesystem actually is. +// +// Returns the number of allocated blocks, or a negative error code on failure. +lfs_ssize_t lfs_fs_size(lfs_t *lfs); + +// Traverse through all blocks in use by the filesystem +// +// The provided callback will be called with each block address that is +// currently in use by the filesystem. This can be used to determine which +// blocks are in use or how much of the storage is available. +// +// Returns a negative error code on failure. +int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data); + +#ifndef LFS_READONLY +// Attempt to make the filesystem consistent and ready for writing +// +// Calling this function is not required, consistency will be implicitly +// enforced on the first operation that writes to the filesystem, but this +// function allows the work to be performed earlier and without other +// filesystem changes. +// +// Returns a negative error code on failure. +int lfs_fs_mkconsistent(lfs_t *lfs); +#endif + +#ifndef LFS_READONLY +// Attempt any janitorial work +// +// This currently: +// 1. Calls mkconsistent if not already consistent +// 2. Compacts metadata > compact_thresh +// 3. Populates the block allocator +// +// Though additional janitorial work may be added in the future. +// +// Calling this function is not required, but may allow the offloading of +// expensive janitorial work to a less time-critical code path. +// +// Returns a negative error code on failure. Accomplishing nothing is not +// an error. +int lfs_fs_gc(lfs_t *lfs); +#endif + +#ifndef LFS_READONLY +// Grows the filesystem to a new size, updating the superblock with the new +// block count. +// +// If LFS_SHRINKNONRELOCATING is defined, this function will also accept +// block_counts smaller than the current configuration, after checking +// that none of the blocks that are being removed are in use. +// Note that littlefs's pseudorandom block allocation means that +// this is very unlikely to work in the general case. +// +// Returns a negative error code on failure. +int lfs_fs_grow(lfs_t *lfs, lfs_size_t block_count); +#endif + +#ifndef LFS_READONLY +#ifdef LFS_MIGRATE +// Attempts to migrate a previous version of littlefs +// +// Behaves similarly to the lfs_format function. Attempts to mount +// the previous version of littlefs and update the filesystem so it can be +// mounted with the current version of littlefs. +// +// Requires a littlefs object and config struct. This clobbers the littlefs +// object, and does not leave the filesystem mounted. The config struct must +// be zeroed for defaults and backwards compatibility. +// +// Returns a negative error code on failure. +int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg); +#endif +#endif + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/components/joltwallet__littlefs/src/littlefs/lfs_util.c b/components/joltwallet__littlefs/src/littlefs/lfs_util.c new file mode 100644 index 0000000..dac72ab --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/lfs_util.c @@ -0,0 +1,37 @@ +/* + * lfs util functions + * + * Copyright (c) 2022, The littlefs authors. + * Copyright (c) 2017, Arm Limited. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + */ +#include "lfs_util.h" + +// Only compile if user does not provide custom config +#ifndef LFS_CONFIG + + +// If user provides their own CRC impl we don't need this +#ifndef LFS_CRC +// Software CRC implementation with small lookup table +uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size) { + static const uint32_t rtable[16] = { + 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac, + 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, + 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, + 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c, + }; + + const uint8_t *data = buffer; + + for (size_t i = 0; i < size; i++) { + crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 0)) & 0xf]; + crc = (crc >> 4) ^ rtable[(crc ^ (data[i] >> 4)) & 0xf]; + } + + return crc; +} +#endif + + +#endif diff --git a/components/joltwallet__littlefs/src/littlefs/lfs_util.h b/components/joltwallet__littlefs/src/littlefs/lfs_util.h new file mode 100644 index 0000000..c1999fa --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/lfs_util.h @@ -0,0 +1,273 @@ +/* + * lfs utility functions + * + * Copyright (c) 2022, The littlefs authors. + * Copyright (c) 2017, Arm Limited. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef LFS_UTIL_H +#define LFS_UTIL_H + +#define LFS_STRINGIZE(x) LFS_STRINGIZE2(x) +#define LFS_STRINGIZE2(x) #x + +// Users can override lfs_util.h with their own configuration by defining +// LFS_CONFIG as a header file to include (-DLFS_CONFIG=lfs_config.h). +// +// If LFS_CONFIG is used, none of the default utils will be emitted and must be +// provided by the config file. To start, I would suggest copying lfs_util.h +// and modifying as needed. +#ifdef LFS_CONFIG +#include LFS_STRINGIZE(LFS_CONFIG) +#else + +// Alternatively, users can provide a header file which defines +// macros and other things consumed by littlefs. +// +// For example, provide my_defines.h, which contains +// something like: +// +// #include +// extern void *my_malloc(size_t sz); +// #define LFS_MALLOC(sz) my_malloc(sz) +// +// And build littlefs with the header by defining LFS_DEFINES. +// (-DLFS_DEFINES=my_defines.h) + +#ifdef LFS_DEFINES +#include LFS_STRINGIZE(LFS_DEFINES) +#endif + +// System includes +#include +#include +#include +#include + +#ifndef LFS_NO_MALLOC +#include +#endif +#ifndef LFS_NO_ASSERT +#include +#endif +#if !defined(LFS_NO_DEBUG) || \ + !defined(LFS_NO_WARN) || \ + !defined(LFS_NO_ERROR) || \ + defined(LFS_YES_TRACE) +#include +#endif + +#ifdef __cplusplus +extern "C" +{ +#endif + + +// Macros, may be replaced by system specific wrappers. Arguments to these +// macros must not have side-effects as the macros can be removed for a smaller +// code footprint + +// Logging functions +#ifndef LFS_TRACE +#ifdef LFS_YES_TRACE +#define LFS_TRACE_(fmt, ...) \ + printf("%s:%d:trace: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) +#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "") +#else +#define LFS_TRACE(...) +#endif +#endif + +#ifndef LFS_DEBUG +#ifndef LFS_NO_DEBUG +#define LFS_DEBUG_(fmt, ...) \ + printf("%s:%d:debug: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) +#define LFS_DEBUG(...) LFS_DEBUG_(__VA_ARGS__, "") +#else +#define LFS_DEBUG(...) +#endif +#endif + +#ifndef LFS_WARN +#ifndef LFS_NO_WARN +#define LFS_WARN_(fmt, ...) \ + printf("%s:%d:warn: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) +#define LFS_WARN(...) LFS_WARN_(__VA_ARGS__, "") +#else +#define LFS_WARN(...) +#endif +#endif + +#ifndef LFS_ERROR +#ifndef LFS_NO_ERROR +#define LFS_ERROR_(fmt, ...) \ + printf("%s:%d:error: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) +#define LFS_ERROR(...) LFS_ERROR_(__VA_ARGS__, "") +#else +#define LFS_ERROR(...) +#endif +#endif + +// Runtime assertions +#ifndef LFS_ASSERT +#ifndef LFS_NO_ASSERT +#define LFS_ASSERT(test) assert(test) +#else +#define LFS_ASSERT(test) +#endif +#endif + + +// Builtin functions, these may be replaced by more efficient +// toolchain-specific implementations. LFS_NO_INTRINSICS falls back to a more +// expensive basic C implementation for debugging purposes + +// Min/max functions for unsigned 32-bit numbers +static inline uint32_t lfs_max(uint32_t a, uint32_t b) { + return (a > b) ? a : b; +} + +static inline uint32_t lfs_min(uint32_t a, uint32_t b) { + return (a < b) ? a : b; +} + +// Align to nearest multiple of a size +static inline uint32_t lfs_aligndown(uint32_t a, uint32_t alignment) { + return a - (a % alignment); +} + +static inline uint32_t lfs_alignup(uint32_t a, uint32_t alignment) { + return lfs_aligndown(a + alignment-1, alignment); +} + +// Find the smallest power of 2 greater than or equal to a +static inline uint32_t lfs_npw2(uint32_t a) { +#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM)) + return 32 - __builtin_clz(a-1); +#else + uint32_t r = 0; + uint32_t s; + a -= 1; + s = (a > 0xffff) << 4; a >>= s; r |= s; + s = (a > 0xff ) << 3; a >>= s; r |= s; + s = (a > 0xf ) << 2; a >>= s; r |= s; + s = (a > 0x3 ) << 1; a >>= s; r |= s; + return (r | (a >> 1)) + 1; +#endif +} + +// Count the number of trailing binary zeros in a +// lfs_ctz(0) may be undefined +static inline uint32_t lfs_ctz(uint32_t a) { +#if !defined(LFS_NO_INTRINSICS) && defined(__GNUC__) + return __builtin_ctz(a); +#else + return lfs_npw2((a & -a) + 1) - 1; +#endif +} + +// Count the number of binary ones in a +static inline uint32_t lfs_popc(uint32_t a) { +#if !defined(LFS_NO_INTRINSICS) && (defined(__GNUC__) || defined(__CC_ARM)) + return __builtin_popcount(a); +#else + a = a - ((a >> 1) & 0x55555555); + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); + return (((a + (a >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24; +#endif +} + +// Find the sequence comparison of a and b, this is the distance +// between a and b ignoring overflow +static inline int lfs_scmp(uint32_t a, uint32_t b) { + return (int)(unsigned)(a - b); +} + +// Convert between 32-bit little-endian and native order +static inline uint32_t lfs_fromle32(uint32_t a) { +#if (defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \ + (defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \ + (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) + return a; +#elif !defined(LFS_NO_INTRINSICS) && ( \ + (defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \ + (defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \ + (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)) + return __builtin_bswap32(a); +#else + return ((uint32_t)((uint8_t*)&a)[0] << 0) | + ((uint32_t)((uint8_t*)&a)[1] << 8) | + ((uint32_t)((uint8_t*)&a)[2] << 16) | + ((uint32_t)((uint8_t*)&a)[3] << 24); +#endif +} + +static inline uint32_t lfs_tole32(uint32_t a) { + return lfs_fromle32(a); +} + +// Convert between 32-bit big-endian and native order +static inline uint32_t lfs_frombe32(uint32_t a) { +#if !defined(LFS_NO_INTRINSICS) && ( \ + (defined( BYTE_ORDER ) && defined( ORDER_LITTLE_ENDIAN ) && BYTE_ORDER == ORDER_LITTLE_ENDIAN ) || \ + (defined(__BYTE_ORDER ) && defined(__ORDER_LITTLE_ENDIAN ) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN ) || \ + (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) + return __builtin_bswap32(a); +#elif (defined( BYTE_ORDER ) && defined( ORDER_BIG_ENDIAN ) && BYTE_ORDER == ORDER_BIG_ENDIAN ) || \ + (defined(__BYTE_ORDER ) && defined(__ORDER_BIG_ENDIAN ) && __BYTE_ORDER == __ORDER_BIG_ENDIAN ) || \ + (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) + return a; +#else + return ((uint32_t)((uint8_t*)&a)[0] << 24) | + ((uint32_t)((uint8_t*)&a)[1] << 16) | + ((uint32_t)((uint8_t*)&a)[2] << 8) | + ((uint32_t)((uint8_t*)&a)[3] << 0); +#endif +} + +static inline uint32_t lfs_tobe32(uint32_t a) { + return lfs_frombe32(a); +} + +// Calculate CRC-32 with polynomial = 0x04c11db7 +#ifdef LFS_CRC +static inline uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size) { + return LFS_CRC(crc, buffer, size); +} +#else +uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size); +#endif + +// Allocate memory, only used if buffers are not provided to littlefs +// +// littlefs current has no alignment requirements, as it only allocates +// byte-level buffers. +static inline void *lfs_malloc(size_t size) { +#if defined(LFS_MALLOC) + return LFS_MALLOC(size); +#elif !defined(LFS_NO_MALLOC) + return malloc(size); +#else + (void)size; + return NULL; +#endif +} + +// Deallocate memory, only used if buffers are not provided to littlefs +static inline void lfs_free(void *p) { +#if defined(LFS_FREE) + LFS_FREE(p); +#elif !defined(LFS_NO_MALLOC) + free(p); +#else + (void)p; +#endif +} + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif +#endif diff --git a/components/joltwallet__littlefs/src/littlefs/runners/bench_runner.c b/components/joltwallet__littlefs/src/littlefs/runners/bench_runner.c new file mode 100644 index 0000000..e27c189 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/runners/bench_runner.c @@ -0,0 +1,2063 @@ +/* + * Runner for littlefs benchmarks + * + * Copyright (c) 2022, The littlefs authors. + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef _POSIX_C_SOURCE +#define _POSIX_C_SOURCE 199309L +#endif + +#include "runners/bench_runner.h" +#include "bd/lfs_emubd.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +// some helpers + +// append to an array with amortized doubling +void *mappend(void **p, + size_t size, + size_t *count, + size_t *capacity) { + uint8_t *p_ = *p; + size_t count_ = *count; + size_t capacity_ = *capacity; + + count_ += 1; + if (count_ > capacity_) { + capacity_ = (2*capacity_ < 4) ? 4 : 2*capacity_; + + p_ = realloc(p_, capacity_*size); + if (!p_) { + return NULL; + } + } + + *p = p_; + *count = count_; + *capacity = capacity_; + return &p_[(count_-1)*size]; +} + +// a quick self-terminating text-safe varint scheme +static void leb16_print(uintmax_t x) { + // allow 'w' to indicate negative numbers + if ((intmax_t)x < 0) { + printf("w"); + x = -x; + } + + while (true) { + char nibble = (x & 0xf) | (x > 0xf ? 0x10 : 0); + printf("%c", (nibble < 10) ? '0'+nibble : 'a'+nibble-10); + if (x <= 0xf) { + break; + } + x >>= 4; + } +} + +static uintmax_t leb16_parse(const char *s, char **tail) { + bool neg = false; + uintmax_t x = 0; + if (tail) { + *tail = (char*)s; + } + + if (s[0] == 'w') { + neg = true; + s = s+1; + } + + size_t i = 0; + while (true) { + uintmax_t nibble = s[i]; + if (nibble >= '0' && nibble <= '9') { + nibble = nibble - '0'; + } else if (nibble >= 'a' && nibble <= 'v') { + nibble = nibble - 'a' + 10; + } else { + // invalid? + return 0; + } + + x |= (nibble & 0xf) << (4*i); + i += 1; + if (!(nibble & 0x10)) { + s = s + i; + break; + } + } + + if (tail) { + *tail = (char*)s; + } + return neg ? -x : x; +} + + + +// bench_runner types + +typedef struct bench_geometry { + const char *name; + bench_define_t defines[BENCH_GEOMETRY_DEFINE_COUNT]; +} bench_geometry_t; + +typedef struct bench_id { + const char *name; + const bench_define_t *defines; + size_t define_count; +} bench_id_t; + + +// bench suites are linked into a custom ld section +#if defined(__APPLE__) +extern struct bench_suite __start__bench_suites __asm("section$start$__DATA$_bench_suites"); +extern struct bench_suite __stop__bench_suites __asm("section$end$__DATA$_bench_suites"); +#else +extern struct bench_suite __start__bench_suites; +extern struct bench_suite __stop__bench_suites; +#endif + +const struct bench_suite *bench_suites = &__start__bench_suites; +#define BENCH_SUITE_COUNT \ + ((size_t)(&__stop__bench_suites - &__start__bench_suites)) + + +// bench define management +typedef struct bench_define_map { + const bench_define_t *defines; + size_t count; +} bench_define_map_t; + +typedef struct bench_define_names { + const char *const *names; + size_t count; +} bench_define_names_t; + +intmax_t bench_define_lit(void *data) { + return (intptr_t)data; +} + +#define BENCH_CONST(x) {bench_define_lit, (void*)(uintptr_t)(x)} +#define BENCH_LIT(x) ((bench_define_t)BENCH_CONST(x)) + + +#define BENCH_DEF(k, v) \ + intmax_t bench_define_##k(void *data) { \ + (void)data; \ + return v; \ + } + + BENCH_IMPLICIT_DEFINES +#undef BENCH_DEF + +#define BENCH_DEFINE_MAP_OVERRIDE 0 +#define BENCH_DEFINE_MAP_EXPLICIT 1 +#define BENCH_DEFINE_MAP_PERMUTATION 2 +#define BENCH_DEFINE_MAP_GEOMETRY 3 +#define BENCH_DEFINE_MAP_IMPLICIT 4 +#define BENCH_DEFINE_MAP_COUNT 5 + +bench_define_map_t bench_define_maps[BENCH_DEFINE_MAP_COUNT] = { + [BENCH_DEFINE_MAP_IMPLICIT] = { + (const bench_define_t[BENCH_IMPLICIT_DEFINE_COUNT]) { + #define BENCH_DEF(k, v) \ + [k##_i] = {bench_define_##k, NULL}, + + BENCH_IMPLICIT_DEFINES + #undef BENCH_DEF + }, + BENCH_IMPLICIT_DEFINE_COUNT, + }, +}; + +#define BENCH_DEFINE_NAMES_SUITE 0 +#define BENCH_DEFINE_NAMES_IMPLICIT 1 +#define BENCH_DEFINE_NAMES_COUNT 2 + +bench_define_names_t bench_define_names[BENCH_DEFINE_NAMES_COUNT] = { + [BENCH_DEFINE_NAMES_IMPLICIT] = { + (const char *const[BENCH_IMPLICIT_DEFINE_COUNT]){ + #define BENCH_DEF(k, v) \ + [k##_i] = #k, + + BENCH_IMPLICIT_DEFINES + #undef BENCH_DEF + }, + BENCH_IMPLICIT_DEFINE_COUNT, + }, +}; + +intmax_t *bench_define_cache; +size_t bench_define_cache_count; +unsigned *bench_define_cache_mask; + +const char *bench_define_name(size_t define) { + // lookup in our bench names + for (size_t i = 0; i < BENCH_DEFINE_NAMES_COUNT; i++) { + if (define < bench_define_names[i].count + && bench_define_names[i].names + && bench_define_names[i].names[define]) { + return bench_define_names[i].names[define]; + } + } + + return NULL; +} + +bool bench_define_ispermutation(size_t define) { + // is this define specific to the permutation? + for (size_t i = 0; i < BENCH_DEFINE_MAP_IMPLICIT; i++) { + if (define < bench_define_maps[i].count + && bench_define_maps[i].defines[define].cb) { + return true; + } + } + + return false; +} + +intmax_t bench_define(size_t define) { + // is the define in our cache? + if (define < bench_define_cache_count + && (bench_define_cache_mask[define/(8*sizeof(unsigned))] + & (1 << (define%(8*sizeof(unsigned)))))) { + return bench_define_cache[define]; + } + + // lookup in our bench defines + for (size_t i = 0; i < BENCH_DEFINE_MAP_COUNT; i++) { + if (define < bench_define_maps[i].count + && bench_define_maps[i].defines[define].cb) { + intmax_t v = bench_define_maps[i].defines[define].cb( + bench_define_maps[i].defines[define].data); + + // insert into cache! + bench_define_cache[define] = v; + bench_define_cache_mask[define / (8*sizeof(unsigned))] + |= 1 << (define%(8*sizeof(unsigned))); + + return v; + } + } + + return 0; + + // not found? + const char *name = bench_define_name(define); + fprintf(stderr, "error: undefined define %s (%zd)\n", + name ? name : "(unknown)", + define); + assert(false); + exit(-1); +} + +void bench_define_flush(void) { + // clear cache between permutations + memset(bench_define_cache_mask, 0, + sizeof(unsigned)*( + (bench_define_cache_count+(8*sizeof(unsigned))-1) + / (8*sizeof(unsigned)))); +} + +// geometry updates +const bench_geometry_t *bench_geometry = NULL; + +void bench_define_geometry(const bench_geometry_t *geometry) { + bench_define_maps[BENCH_DEFINE_MAP_GEOMETRY] = (bench_define_map_t){ + geometry->defines, BENCH_GEOMETRY_DEFINE_COUNT}; +} + +// override updates +typedef struct bench_override { + const char *name; + const intmax_t *defines; + size_t permutations; +} bench_override_t; + +const bench_override_t *bench_overrides = NULL; +size_t bench_override_count = 0; + +bench_define_t *bench_override_defines = NULL; +size_t bench_override_define_count = 0; +size_t bench_override_define_permutations = 1; +size_t bench_override_define_capacity = 0; + +// suite/perm updates +void bench_define_suite(const struct bench_suite *suite) { + bench_define_names[BENCH_DEFINE_NAMES_SUITE] = (bench_define_names_t){ + suite->define_names, suite->define_count}; + + // make sure our cache is large enough + if (lfs_max(suite->define_count, BENCH_IMPLICIT_DEFINE_COUNT) + > bench_define_cache_count) { + // align to power of two to avoid any superlinear growth + size_t ncount = 1 << lfs_npw2( + lfs_max(suite->define_count, BENCH_IMPLICIT_DEFINE_COUNT)); + bench_define_cache = realloc(bench_define_cache, ncount*sizeof(intmax_t)); + bench_define_cache_mask = realloc(bench_define_cache_mask, + sizeof(unsigned)*( + (ncount+(8*sizeof(unsigned))-1) + / (8*sizeof(unsigned)))); + bench_define_cache_count = ncount; + } + + // map any overrides + if (bench_override_count > 0) { + // first figure out the total size of override permutations + size_t count = 0; + size_t permutations = 1; + for (size_t i = 0; i < bench_override_count; i++) { + for (size_t d = 0; + d < lfs_max( + suite->define_count, + BENCH_IMPLICIT_DEFINE_COUNT); + d++) { + // define name match? + const char *name = bench_define_name(d); + if (name && strcmp(name, bench_overrides[i].name) == 0) { + count = lfs_max(count, d+1); + permutations *= bench_overrides[i].permutations; + break; + } + } + } + bench_override_define_count = count; + bench_override_define_permutations = permutations; + + // make sure our override arrays are big enough + if (count * permutations > bench_override_define_capacity) { + // align to power of two to avoid any superlinear growth + size_t ncapacity = 1 << lfs_npw2(count * permutations); + bench_override_defines = realloc( + bench_override_defines, + sizeof(bench_define_t)*ncapacity); + bench_override_define_capacity = ncapacity; + } + + // zero unoverridden defines + memset(bench_override_defines, 0, + sizeof(bench_define_t) * count * permutations); + + // compute permutations + size_t p = 1; + for (size_t i = 0; i < bench_override_count; i++) { + for (size_t d = 0; + d < lfs_max( + suite->define_count, + BENCH_IMPLICIT_DEFINE_COUNT); + d++) { + // define name match? + const char *name = bench_define_name(d); + if (name && strcmp(name, bench_overrides[i].name) == 0) { + // scatter the define permutations based on already + // seen permutations + for (size_t j = 0; j < permutations; j++) { + bench_override_defines[j*count + d] = BENCH_LIT( + bench_overrides[i].defines[(j/p) + % bench_overrides[i].permutations]); + } + + // keep track of how many permutations we've seen so far + p *= bench_overrides[i].permutations; + break; + } + } + } + } +} + +void bench_define_perm( + const struct bench_suite *suite, + const struct bench_case *case_, + size_t perm) { + if (case_->defines) { + bench_define_maps[BENCH_DEFINE_MAP_PERMUTATION] = (bench_define_map_t){ + case_->defines + perm*suite->define_count, + suite->define_count}; + } else { + bench_define_maps[BENCH_DEFINE_MAP_PERMUTATION] = (bench_define_map_t){ + NULL, 0}; + } +} + +void bench_define_override(size_t perm) { + bench_define_maps[BENCH_DEFINE_MAP_OVERRIDE] = (bench_define_map_t){ + bench_override_defines + perm*bench_override_define_count, + bench_override_define_count}; +} + +void bench_define_explicit( + const bench_define_t *defines, + size_t define_count) { + bench_define_maps[BENCH_DEFINE_MAP_EXPLICIT] = (bench_define_map_t){ + defines, define_count}; +} + +void bench_define_cleanup(void) { + // bench define management can allocate a few things + free(bench_define_cache); + free(bench_define_cache_mask); + free(bench_override_defines); +} + + + +// bench state +extern const bench_geometry_t *bench_geometries; +extern size_t bench_geometry_count; + +const bench_id_t *bench_ids = (const bench_id_t[]) { + {NULL, NULL, 0}, +}; +size_t bench_id_count = 1; + +size_t bench_step_start = 0; +size_t bench_step_stop = -1; +size_t bench_step_step = 1; + +const char *bench_disk_path = NULL; +const char *bench_trace_path = NULL; +bool bench_trace_backtrace = false; +uint32_t bench_trace_period = 0; +uint32_t bench_trace_freq = 0; +FILE *bench_trace_file = NULL; +uint32_t bench_trace_cycles = 0; +uint64_t bench_trace_time = 0; +uint64_t bench_trace_open_time = 0; +lfs_emubd_sleep_t bench_read_sleep = 0.0; +lfs_emubd_sleep_t bench_prog_sleep = 0.0; +lfs_emubd_sleep_t bench_erase_sleep = 0.0; + +// this determines both the backtrace buffer and the trace printf buffer, if +// trace ends up interleaved or truncated this may need to be increased +#ifndef BENCH_TRACE_BACKTRACE_BUFFER_SIZE +#define BENCH_TRACE_BACKTRACE_BUFFER_SIZE 8192 +#endif +void *bench_trace_backtrace_buffer[ + BENCH_TRACE_BACKTRACE_BUFFER_SIZE / sizeof(void*)]; + +// trace printing +void bench_trace(const char *fmt, ...) { + if (bench_trace_path) { + // sample at a specific period? + if (bench_trace_period) { + if (bench_trace_cycles % bench_trace_period != 0) { + bench_trace_cycles += 1; + return; + } + bench_trace_cycles += 1; + } + + // sample at a specific frequency? + if (bench_trace_freq) { + struct timespec t; + clock_gettime(CLOCK_MONOTONIC, &t); + uint64_t now = (uint64_t)t.tv_sec*1000*1000*1000 + + (uint64_t)t.tv_nsec; + if (now - bench_trace_time < (1000*1000*1000) / bench_trace_freq) { + return; + } + bench_trace_time = now; + } + + if (!bench_trace_file) { + // Tracing output is heavy and trying to open every trace + // call is slow, so we only try to open the trace file every + // so often. Note this doesn't affect successfully opened files + struct timespec t; + clock_gettime(CLOCK_MONOTONIC, &t); + uint64_t now = (uint64_t)t.tv_sec*1000*1000*1000 + + (uint64_t)t.tv_nsec; + if (now - bench_trace_open_time < 100*1000*1000) { + return; + } + bench_trace_open_time = now; + + // try to open the trace file + int fd; + if (strcmp(bench_trace_path, "-") == 0) { + fd = dup(1); + if (fd < 0) { + return; + } + } else { + fd = open( + bench_trace_path, + O_WRONLY | O_CREAT | O_APPEND | O_NONBLOCK, + 0666); + if (fd < 0) { + return; + } + int err = fcntl(fd, F_SETFL, O_WRONLY | O_CREAT | O_APPEND); + assert(!err); + } + + FILE *f = fdopen(fd, "a"); + assert(f); + int err = setvbuf(f, NULL, _IOFBF, + BENCH_TRACE_BACKTRACE_BUFFER_SIZE); + assert(!err); + bench_trace_file = f; + } + + // print trace + va_list va; + va_start(va, fmt); + int res = vfprintf(bench_trace_file, fmt, va); + va_end(va); + if (res < 0) { + fclose(bench_trace_file); + bench_trace_file = NULL; + return; + } + + if (bench_trace_backtrace) { + // print backtrace + size_t count = backtrace( + bench_trace_backtrace_buffer, + BENCH_TRACE_BACKTRACE_BUFFER_SIZE); + // note we skip our own stack frame + for (size_t i = 1; i < count; i++) { + res = fprintf(bench_trace_file, "\tat %p\n", + bench_trace_backtrace_buffer[i]); + if (res < 0) { + fclose(bench_trace_file); + bench_trace_file = NULL; + return; + } + } + } + + // flush immediately + fflush(bench_trace_file); + } +} + + +// bench prng +uint32_t bench_prng(uint32_t *state) { + // A simple xorshift32 generator, easily reproducible. Keep in mind + // determinism is much more important than actual randomness here. + uint32_t x = *state; + x ^= x << 13; + x ^= x >> 17; + x ^= x << 5; + *state = x; + return x; +} + + +// bench recording state +static struct lfs_config *bench_cfg = NULL; +static lfs_emubd_io_t bench_last_readed = 0; +static lfs_emubd_io_t bench_last_proged = 0; +static lfs_emubd_io_t bench_last_erased = 0; +lfs_emubd_io_t bench_readed = 0; +lfs_emubd_io_t bench_proged = 0; +lfs_emubd_io_t bench_erased = 0; + +void bench_reset(void) { + bench_readed = 0; + bench_proged = 0; + bench_erased = 0; + bench_last_readed = 0; + bench_last_proged = 0; + bench_last_erased = 0; +} + +void bench_start(void) { + assert(bench_cfg); + lfs_emubd_sio_t readed = lfs_emubd_readed(bench_cfg); + assert(readed >= 0); + lfs_emubd_sio_t proged = lfs_emubd_proged(bench_cfg); + assert(proged >= 0); + lfs_emubd_sio_t erased = lfs_emubd_erased(bench_cfg); + assert(erased >= 0); + + bench_last_readed = readed; + bench_last_proged = proged; + bench_last_erased = erased; +} + +void bench_stop(void) { + assert(bench_cfg); + lfs_emubd_sio_t readed = lfs_emubd_readed(bench_cfg); + assert(readed >= 0); + lfs_emubd_sio_t proged = lfs_emubd_proged(bench_cfg); + assert(proged >= 0); + lfs_emubd_sio_t erased = lfs_emubd_erased(bench_cfg); + assert(erased >= 0); + + bench_readed += readed - bench_last_readed; + bench_proged += proged - bench_last_proged; + bench_erased += erased - bench_last_erased; +} + + +// encode our permutation into a reusable id +static void perm_printid( + const struct bench_suite *suite, + const struct bench_case *case_) { + (void)suite; + // case[:permutation] + printf("%s:", case_->name); + for (size_t d = 0; + d < lfs_max( + suite->define_count, + BENCH_IMPLICIT_DEFINE_COUNT); + d++) { + if (bench_define_ispermutation(d)) { + leb16_print(d); + leb16_print(BENCH_DEFINE(d)); + } + } +} + +// a quick trie for keeping track of permutations we've seen +typedef struct bench_seen { + struct bench_seen_branch *branches; + size_t branch_count; + size_t branch_capacity; +} bench_seen_t; + +struct bench_seen_branch { + intmax_t define; + struct bench_seen branch; +}; + +bool bench_seen_insert( + bench_seen_t *seen, + const struct bench_suite *suite, + const struct bench_case *case_) { + (void)case_; + bool was_seen = true; + + // use the currently set defines + for (size_t d = 0; + d < lfs_max( + suite->define_count, + BENCH_IMPLICIT_DEFINE_COUNT); + d++) { + // treat unpermuted defines the same as 0 + intmax_t define = bench_define_ispermutation(d) ? BENCH_DEFINE(d) : 0; + + // already seen? + struct bench_seen_branch *branch = NULL; + for (size_t i = 0; i < seen->branch_count; i++) { + if (seen->branches[i].define == define) { + branch = &seen->branches[i]; + break; + } + } + + // need to create a new node + if (!branch) { + was_seen = false; + branch = mappend( + (void**)&seen->branches, + sizeof(struct bench_seen_branch), + &seen->branch_count, + &seen->branch_capacity); + branch->define = define; + branch->branch = (bench_seen_t){NULL, 0, 0}; + } + + seen = &branch->branch; + } + + return was_seen; +} + +void bench_seen_cleanup(bench_seen_t *seen) { + for (size_t i = 0; i < seen->branch_count; i++) { + bench_seen_cleanup(&seen->branches[i].branch); + } + free(seen->branches); +} + +// iterate through permutations in a bench case +static void case_forperm( + const struct bench_suite *suite, + const struct bench_case *case_, + const bench_define_t *defines, + size_t define_count, + void (*cb)( + void *data, + const struct bench_suite *suite, + const struct bench_case *case_), + void *data) { + // explicit permutation? + if (defines) { + bench_define_explicit(defines, define_count); + + for (size_t v = 0; v < bench_override_define_permutations; v++) { + // define override permutation + bench_define_override(v); + bench_define_flush(); + + cb(data, suite, case_); + } + + return; + } + + bench_seen_t seen = {NULL, 0, 0}; + + for (size_t k = 0; k < case_->permutations; k++) { + // define permutation + bench_define_perm(suite, case_, k); + + for (size_t v = 0; v < bench_override_define_permutations; v++) { + // define override permutation + bench_define_override(v); + + for (size_t g = 0; g < bench_geometry_count; g++) { + // define geometry + bench_define_geometry(&bench_geometries[g]); + bench_define_flush(); + + // have we seen this permutation before? + bool was_seen = bench_seen_insert(&seen, suite, case_); + if (!(k == 0 && v == 0 && g == 0) && was_seen) { + continue; + } + + cb(data, suite, case_); + } + } + } + + bench_seen_cleanup(&seen); +} + + +// how many permutations are there actually in a bench case +struct perm_count_state { + size_t total; + size_t filtered; +}; + +void perm_count( + void *data, + const struct bench_suite *suite, + const struct bench_case *case_) { + struct perm_count_state *state = data; + (void)suite; + (void)case_; + + state->total += 1; + + if (case_->filter && !case_->filter()) { + return; + } + + state->filtered += 1; +} + + +// operations we can do +static void summary(void) { + printf("%-23s %7s %7s %7s %11s\n", + "", "flags", "suites", "cases", "perms"); + size_t suites = 0; + size_t cases = 0; + bench_flags_t flags = 0; + struct perm_count_state perms = {0, 0}; + + for (size_t t = 0; t < bench_id_count; t++) { + for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) { + bench_define_suite(&bench_suites[i]); + + for (size_t j = 0; j < bench_suites[i].case_count; j++) { + // does neither suite nor case name match? + if (bench_ids[t].name && !( + strcmp(bench_ids[t].name, + bench_suites[i].name) == 0 + || strcmp(bench_ids[t].name, + bench_suites[i].cases[j].name) == 0)) { + continue; + } + + cases += 1; + case_forperm( + &bench_suites[i], + &bench_suites[i].cases[j], + bench_ids[t].defines, + bench_ids[t].define_count, + perm_count, + &perms); + } + + suites += 1; + flags |= bench_suites[i].flags; + } + } + + char perm_buf[64]; + sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total); + char flag_buf[64]; + sprintf(flag_buf, "%s%s", + (flags & BENCH_REENTRANT) ? "r" : "", + (!flags) ? "-" : ""); + printf("%-23s %7s %7zu %7zu %11s\n", + "TOTAL", + flag_buf, + suites, + cases, + perm_buf); +} + +static void list_suites(void) { + // at least size so that names fit + unsigned name_width = 23; + for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) { + size_t len = strlen(bench_suites[i].name); + if (len > name_width) { + name_width = len; + } + } + name_width = 4*((name_width+1+4-1)/4)-1; + + printf("%-*s %7s %7s %11s\n", + name_width, "suite", "flags", "cases", "perms"); + for (size_t t = 0; t < bench_id_count; t++) { + for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) { + bench_define_suite(&bench_suites[i]); + + size_t cases = 0; + struct perm_count_state perms = {0, 0}; + + for (size_t j = 0; j < bench_suites[i].case_count; j++) { + // does neither suite nor case name match? + if (bench_ids[t].name && !( + strcmp(bench_ids[t].name, + bench_suites[i].name) == 0 + || strcmp(bench_ids[t].name, + bench_suites[i].cases[j].name) == 0)) { + continue; + } + + cases += 1; + case_forperm( + &bench_suites[i], + &bench_suites[i].cases[j], + bench_ids[t].defines, + bench_ids[t].define_count, + perm_count, + &perms); + } + + // no benches found? + if (!cases) { + continue; + } + + char perm_buf[64]; + sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total); + char flag_buf[64]; + sprintf(flag_buf, "%s%s", + (bench_suites[i].flags & BENCH_REENTRANT) ? "r" : "", + (!bench_suites[i].flags) ? "-" : ""); + printf("%-*s %7s %7zu %11s\n", + name_width, + bench_suites[i].name, + flag_buf, + cases, + perm_buf); + } + } +} + +static void list_cases(void) { + // at least size so that names fit + unsigned name_width = 23; + for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) { + for (size_t j = 0; j < bench_suites[i].case_count; j++) { + size_t len = strlen(bench_suites[i].cases[j].name); + if (len > name_width) { + name_width = len; + } + } + } + name_width = 4*((name_width+1+4-1)/4)-1; + + printf("%-*s %7s %11s\n", name_width, "case", "flags", "perms"); + for (size_t t = 0; t < bench_id_count; t++) { + for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) { + bench_define_suite(&bench_suites[i]); + + for (size_t j = 0; j < bench_suites[i].case_count; j++) { + // does neither suite nor case name match? + if (bench_ids[t].name && !( + strcmp(bench_ids[t].name, + bench_suites[i].name) == 0 + || strcmp(bench_ids[t].name, + bench_suites[i].cases[j].name) == 0)) { + continue; + } + + struct perm_count_state perms = {0, 0}; + case_forperm( + &bench_suites[i], + &bench_suites[i].cases[j], + bench_ids[t].defines, + bench_ids[t].define_count, + perm_count, + &perms); + + char perm_buf[64]; + sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total); + char flag_buf[64]; + sprintf(flag_buf, "%s%s", + (bench_suites[i].cases[j].flags & BENCH_REENTRANT) + ? "r" : "", + (!bench_suites[i].cases[j].flags) + ? "-" : ""); + printf("%-*s %7s %11s\n", + name_width, + bench_suites[i].cases[j].name, + flag_buf, + perm_buf); + } + } + } +} + +static void list_suite_paths(void) { + // at least size so that names fit + unsigned name_width = 23; + for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) { + size_t len = strlen(bench_suites[i].name); + if (len > name_width) { + name_width = len; + } + } + name_width = 4*((name_width+1+4-1)/4)-1; + + printf("%-*s %s\n", name_width, "suite", "path"); + for (size_t t = 0; t < bench_id_count; t++) { + for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) { + size_t cases = 0; + + for (size_t j = 0; j < bench_suites[i].case_count; j++) { + // does neither suite nor case name match? + if (bench_ids[t].name && !( + strcmp(bench_ids[t].name, + bench_suites[i].name) == 0 + || strcmp(bench_ids[t].name, + bench_suites[i].cases[j].name) == 0)) { + continue; + + cases += 1; + } + } + + // no benches found? + if (!cases) { + continue; + } + + printf("%-*s %s\n", + name_width, + bench_suites[i].name, + bench_suites[i].path); + } + } +} + +static void list_case_paths(void) { + // at least size so that names fit + unsigned name_width = 23; + for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) { + for (size_t j = 0; j < bench_suites[i].case_count; j++) { + size_t len = strlen(bench_suites[i].cases[j].name); + if (len > name_width) { + name_width = len; + } + } + } + name_width = 4*((name_width+1+4-1)/4)-1; + + printf("%-*s %s\n", name_width, "case", "path"); + for (size_t t = 0; t < bench_id_count; t++) { + for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) { + for (size_t j = 0; j < bench_suites[i].case_count; j++) { + // does neither suite nor case name match? + if (bench_ids[t].name && !( + strcmp(bench_ids[t].name, + bench_suites[i].name) == 0 + || strcmp(bench_ids[t].name, + bench_suites[i].cases[j].name) == 0)) { + continue; + } + + printf("%-*s %s\n", + name_width, + bench_suites[i].cases[j].name, + bench_suites[i].cases[j].path); + } + } + } +} + +struct list_defines_define { + const char *name; + intmax_t *values; + size_t value_count; + size_t value_capacity; +}; + +struct list_defines_defines { + struct list_defines_define *defines; + size_t define_count; + size_t define_capacity; +}; + +static void list_defines_add( + struct list_defines_defines *defines, + size_t d) { + const char *name = bench_define_name(d); + intmax_t value = BENCH_DEFINE(d); + + // define already in defines? + for (size_t i = 0; i < defines->define_count; i++) { + if (strcmp(defines->defines[i].name, name) == 0) { + // value already in values? + for (size_t j = 0; j < defines->defines[i].value_count; j++) { + if (defines->defines[i].values[j] == value) { + return; + } + } + + *(intmax_t*)mappend( + (void**)&defines->defines[i].values, + sizeof(intmax_t), + &defines->defines[i].value_count, + &defines->defines[i].value_capacity) = value; + + return; + } + } + + // new define? + struct list_defines_define *define = mappend( + (void**)&defines->defines, + sizeof(struct list_defines_define), + &defines->define_count, + &defines->define_capacity); + define->name = name; + define->values = malloc(sizeof(intmax_t)); + define->values[0] = value; + define->value_count = 1; + define->value_capacity = 1; +} + +void perm_list_defines( + void *data, + const struct bench_suite *suite, + const struct bench_case *case_) { + struct list_defines_defines *defines = data; + (void)suite; + (void)case_; + + // collect defines + for (size_t d = 0; + d < lfs_max(suite->define_count, + BENCH_IMPLICIT_DEFINE_COUNT); + d++) { + if (d < BENCH_IMPLICIT_DEFINE_COUNT + || bench_define_ispermutation(d)) { + list_defines_add(defines, d); + } + } +} + +void perm_list_permutation_defines( + void *data, + const struct bench_suite *suite, + const struct bench_case *case_) { + struct list_defines_defines *defines = data; + (void)suite; + (void)case_; + + // collect permutation_defines + for (size_t d = 0; + d < lfs_max(suite->define_count, + BENCH_IMPLICIT_DEFINE_COUNT); + d++) { + if (bench_define_ispermutation(d)) { + list_defines_add(defines, d); + } + } +} + +extern const bench_geometry_t builtin_geometries[]; + +static void list_defines(void) { + struct list_defines_defines defines = {NULL, 0, 0}; + + // add defines + for (size_t t = 0; t < bench_id_count; t++) { + for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) { + bench_define_suite(&bench_suites[i]); + + for (size_t j = 0; j < bench_suites[i].case_count; j++) { + // does neither suite nor case name match? + if (bench_ids[t].name && !( + strcmp(bench_ids[t].name, + bench_suites[i].name) == 0 + || strcmp(bench_ids[t].name, + bench_suites[i].cases[j].name) == 0)) { + continue; + } + + case_forperm( + &bench_suites[i], + &bench_suites[i].cases[j], + bench_ids[t].defines, + bench_ids[t].define_count, + perm_list_defines, + &defines); + } + } + } + + for (size_t i = 0; i < defines.define_count; i++) { + printf("%s=", defines.defines[i].name); + for (size_t j = 0; j < defines.defines[i].value_count; j++) { + printf("%jd", defines.defines[i].values[j]); + if (j != defines.defines[i].value_count-1) { + printf(","); + } + } + printf("\n"); + } + + for (size_t i = 0; i < defines.define_count; i++) { + free(defines.defines[i].values); + } + free(defines.defines); +} + +static void list_permutation_defines(void) { + struct list_defines_defines defines = {NULL, 0, 0}; + + // add permutation defines + for (size_t t = 0; t < bench_id_count; t++) { + for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) { + bench_define_suite(&bench_suites[i]); + + for (size_t j = 0; j < bench_suites[i].case_count; j++) { + // does neither suite nor case name match? + if (bench_ids[t].name && !( + strcmp(bench_ids[t].name, + bench_suites[i].name) == 0 + || strcmp(bench_ids[t].name, + bench_suites[i].cases[j].name) == 0)) { + continue; + } + + case_forperm( + &bench_suites[i], + &bench_suites[i].cases[j], + bench_ids[t].defines, + bench_ids[t].define_count, + perm_list_permutation_defines, + &defines); + } + } + } + + for (size_t i = 0; i < defines.define_count; i++) { + printf("%s=", defines.defines[i].name); + for (size_t j = 0; j < defines.defines[i].value_count; j++) { + printf("%jd", defines.defines[i].values[j]); + if (j != defines.defines[i].value_count-1) { + printf(","); + } + } + printf("\n"); + } + + for (size_t i = 0; i < defines.define_count; i++) { + free(defines.defines[i].values); + } + free(defines.defines); +} + +static void list_implicit_defines(void) { + struct list_defines_defines defines = {NULL, 0, 0}; + + // yes we do need to define a suite, this does a bit of bookeeping + // such as setting up the define cache + bench_define_suite(&(const struct bench_suite){0}); + + // make sure to include builtin geometries here + extern const bench_geometry_t builtin_geometries[]; + for (size_t g = 0; builtin_geometries[g].name; g++) { + bench_define_geometry(&builtin_geometries[g]); + bench_define_flush(); + + // add implicit defines + for (size_t d = 0; d < BENCH_IMPLICIT_DEFINE_COUNT; d++) { + list_defines_add(&defines, d); + } + } + + for (size_t i = 0; i < defines.define_count; i++) { + printf("%s=", defines.defines[i].name); + for (size_t j = 0; j < defines.defines[i].value_count; j++) { + printf("%jd", defines.defines[i].values[j]); + if (j != defines.defines[i].value_count-1) { + printf(","); + } + } + printf("\n"); + } + + for (size_t i = 0; i < defines.define_count; i++) { + free(defines.defines[i].values); + } + free(defines.defines); +} + + + +// geometries to bench + +const bench_geometry_t builtin_geometries[] = { + {"default", {{0}, BENCH_CONST(16), BENCH_CONST(512), {0}}}, + {"eeprom", {{0}, BENCH_CONST(1), BENCH_CONST(512), {0}}}, + {"emmc", {{0}, {0}, BENCH_CONST(512), {0}}}, + {"nor", {{0}, BENCH_CONST(1), BENCH_CONST(4096), {0}}}, + {"nand", {{0}, BENCH_CONST(4096), BENCH_CONST(32768), {0}}}, + {NULL, {{0}, {0}, {0}, {0}}}, +}; + +const bench_geometry_t *bench_geometries = builtin_geometries; +size_t bench_geometry_count = 5; + +static void list_geometries(void) { + // at least size so that names fit + unsigned name_width = 23; + for (size_t g = 0; builtin_geometries[g].name; g++) { + size_t len = strlen(builtin_geometries[g].name); + if (len > name_width) { + name_width = len; + } + } + name_width = 4*((name_width+1+4-1)/4)-1; + + // yes we do need to define a suite, this does a bit of bookeeping + // such as setting up the define cache + bench_define_suite(&(const struct bench_suite){0}); + + printf("%-*s %7s %7s %7s %7s %11s\n", + name_width, "geometry", "read", "prog", "erase", "count", "size"); + for (size_t g = 0; builtin_geometries[g].name; g++) { + bench_define_geometry(&builtin_geometries[g]); + bench_define_flush(); + printf("%-*s %7ju %7ju %7ju %7ju %11ju\n", + name_width, + builtin_geometries[g].name, + READ_SIZE, + PROG_SIZE, + ERASE_SIZE, + ERASE_COUNT, + ERASE_SIZE*ERASE_COUNT); + } +} + + + +// global bench step count +size_t bench_step = 0; + +void perm_run( + void *data, + const struct bench_suite *suite, + const struct bench_case *case_) { + (void)data; + + // skip this step? + if (!(bench_step >= bench_step_start + && bench_step < bench_step_stop + && (bench_step-bench_step_start) % bench_step_step == 0)) { + bench_step += 1; + return; + } + bench_step += 1; + + // filter? + if (case_->filter && !case_->filter()) { + printf("skipped "); + perm_printid(suite, case_); + printf("\n"); + return; + } + + // create block device and configuration + lfs_emubd_t bd; + + struct lfs_config cfg = { + .context = &bd, + .read = lfs_emubd_read, + .prog = lfs_emubd_prog, + .erase = lfs_emubd_erase, + .sync = lfs_emubd_sync, + .read_size = READ_SIZE, + .prog_size = PROG_SIZE, + .block_size = BLOCK_SIZE, + .block_count = BLOCK_COUNT, + .block_cycles = BLOCK_CYCLES, + .cache_size = CACHE_SIZE, + .lookahead_size = LOOKAHEAD_SIZE, + .compact_thresh = COMPACT_THRESH, + .metadata_max = METADATA_MAX, + .inline_max = INLINE_MAX, + }; + + struct lfs_emubd_config bdcfg = { + .read_size = READ_SIZE, + .prog_size = PROG_SIZE, + .erase_size = ERASE_SIZE, + .erase_count = ERASE_COUNT, + .erase_value = ERASE_VALUE, + .erase_cycles = ERASE_CYCLES, + .badblock_behavior = BADBLOCK_BEHAVIOR, + .disk_path = bench_disk_path, + .read_sleep = bench_read_sleep, + .prog_sleep = bench_prog_sleep, + .erase_sleep = bench_erase_sleep, + }; + + int err = lfs_emubd_create(&cfg, &bdcfg); + if (err) { + fprintf(stderr, "error: could not create block device: %d\n", err); + exit(-1); + } + + // run the bench + bench_cfg = &cfg; + bench_reset(); + printf("running "); + perm_printid(suite, case_); + printf("\n"); + + case_->run(&cfg); + + printf("finished "); + perm_printid(suite, case_); + printf(" %"PRIu64" %"PRIu64" %"PRIu64, + bench_readed, + bench_proged, + bench_erased); + printf("\n"); + + // cleanup + err = lfs_emubd_destroy(&cfg); + if (err) { + fprintf(stderr, "error: could not destroy block device: %d\n", err); + exit(-1); + } +} + +static void run(void) { + // ignore disconnected pipes + signal(SIGPIPE, SIG_IGN); + + for (size_t t = 0; t < bench_id_count; t++) { + for (size_t i = 0; i < BENCH_SUITE_COUNT; i++) { + bench_define_suite(&bench_suites[i]); + + for (size_t j = 0; j < bench_suites[i].case_count; j++) { + // does neither suite nor case name match? + if (bench_ids[t].name && !( + strcmp(bench_ids[t].name, + bench_suites[i].name) == 0 + || strcmp(bench_ids[t].name, + bench_suites[i].cases[j].name) == 0)) { + continue; + } + + case_forperm( + &bench_suites[i], + &bench_suites[i].cases[j], + bench_ids[t].defines, + bench_ids[t].define_count, + perm_run, + NULL); + } + } + } +} + + + +// option handling +enum opt_flags { + OPT_HELP = 'h', + OPT_SUMMARY = 'Y', + OPT_LIST_SUITES = 'l', + OPT_LIST_CASES = 'L', + OPT_LIST_SUITE_PATHS = 1, + OPT_LIST_CASE_PATHS = 2, + OPT_LIST_DEFINES = 3, + OPT_LIST_PERMUTATION_DEFINES = 4, + OPT_LIST_IMPLICIT_DEFINES = 5, + OPT_LIST_GEOMETRIES = 6, + OPT_DEFINE = 'D', + OPT_GEOMETRY = 'G', + OPT_STEP = 's', + OPT_DISK = 'd', + OPT_TRACE = 't', + OPT_TRACE_BACKTRACE = 7, + OPT_TRACE_PERIOD = 8, + OPT_TRACE_FREQ = 9, + OPT_READ_SLEEP = 10, + OPT_PROG_SLEEP = 11, + OPT_ERASE_SLEEP = 12, +}; + +const char *short_opts = "hYlLD:G:s:d:t:"; + +const struct option long_opts[] = { + {"help", no_argument, NULL, OPT_HELP}, + {"summary", no_argument, NULL, OPT_SUMMARY}, + {"list-suites", no_argument, NULL, OPT_LIST_SUITES}, + {"list-cases", no_argument, NULL, OPT_LIST_CASES}, + {"list-suite-paths", no_argument, NULL, OPT_LIST_SUITE_PATHS}, + {"list-case-paths", no_argument, NULL, OPT_LIST_CASE_PATHS}, + {"list-defines", no_argument, NULL, OPT_LIST_DEFINES}, + {"list-permutation-defines", + no_argument, NULL, OPT_LIST_PERMUTATION_DEFINES}, + {"list-implicit-defines", + no_argument, NULL, OPT_LIST_IMPLICIT_DEFINES}, + {"list-geometries", no_argument, NULL, OPT_LIST_GEOMETRIES}, + {"define", required_argument, NULL, OPT_DEFINE}, + {"geometry", required_argument, NULL, OPT_GEOMETRY}, + {"step", required_argument, NULL, OPT_STEP}, + {"disk", required_argument, NULL, OPT_DISK}, + {"trace", required_argument, NULL, OPT_TRACE}, + {"trace-backtrace", no_argument, NULL, OPT_TRACE_BACKTRACE}, + {"trace-period", required_argument, NULL, OPT_TRACE_PERIOD}, + {"trace-freq", required_argument, NULL, OPT_TRACE_FREQ}, + {"read-sleep", required_argument, NULL, OPT_READ_SLEEP}, + {"prog-sleep", required_argument, NULL, OPT_PROG_SLEEP}, + {"erase-sleep", required_argument, NULL, OPT_ERASE_SLEEP}, + {NULL, 0, NULL, 0}, +}; + +const char *const help_text[] = { + "Show this help message.", + "Show quick summary.", + "List bench suites.", + "List bench cases.", + "List the path for each bench suite.", + "List the path and line number for each bench case.", + "List all defines in this bench-runner.", + "List explicit defines in this bench-runner.", + "List implicit defines in this bench-runner.", + "List the available disk geometries.", + "Override a bench define.", + "Comma-separated list of disk geometries to bench.", + "Comma-separated range of bench permutations to run (start,stop,step).", + "Direct block device operations to this file.", + "Direct trace output to this file.", + "Include a backtrace with every trace statement.", + "Sample trace output at this period in cycles.", + "Sample trace output at this frequency in hz.", + "Artificial read delay in seconds.", + "Artificial prog delay in seconds.", + "Artificial erase delay in seconds.", +}; + +int main(int argc, char **argv) { + void (*op)(void) = run; + + size_t bench_override_capacity = 0; + size_t bench_geometry_capacity = 0; + size_t bench_id_capacity = 0; + + // parse options + while (true) { + int c = getopt_long(argc, argv, short_opts, long_opts, NULL); + switch (c) { + // generate help message + case OPT_HELP: { + printf("usage: %s [options] [bench_id]\n", argv[0]); + printf("\n"); + + printf("options:\n"); + size_t i = 0; + while (long_opts[i].name) { + size_t indent; + if (long_opts[i].has_arg == no_argument) { + if (long_opts[i].val >= '0' && long_opts[i].val < 'z') { + indent = printf(" -%c, --%s ", + long_opts[i].val, + long_opts[i].name); + } else { + indent = printf(" --%s ", + long_opts[i].name); + } + } else { + if (long_opts[i].val >= '0' && long_opts[i].val < 'z') { + indent = printf(" -%c %s, --%s %s ", + long_opts[i].val, + long_opts[i].name, + long_opts[i].name, + long_opts[i].name); + } else { + indent = printf(" --%s %s ", + long_opts[i].name, + long_opts[i].name); + } + } + + // a quick, hacky, byte-level method for text wrapping + size_t len = strlen(help_text[i]); + size_t j = 0; + if (indent < 24) { + printf("%*s %.80s\n", + (int)(24-1-indent), + "", + &help_text[i][j]); + j += 80; + } else { + printf("\n"); + } + + while (j < len) { + printf("%24s%.80s\n", "", &help_text[i][j]); + j += 80; + } + + i += 1; + } + + printf("\n"); + exit(0); + } + // summary/list flags + case OPT_SUMMARY: + op = summary; + break; + case OPT_LIST_SUITES: + op = list_suites; + break; + case OPT_LIST_CASES: + op = list_cases; + break; + case OPT_LIST_SUITE_PATHS: + op = list_suite_paths; + break; + case OPT_LIST_CASE_PATHS: + op = list_case_paths; + break; + case OPT_LIST_DEFINES: + op = list_defines; + break; + case OPT_LIST_PERMUTATION_DEFINES: + op = list_permutation_defines; + break; + case OPT_LIST_IMPLICIT_DEFINES: + op = list_implicit_defines; + break; + case OPT_LIST_GEOMETRIES: + op = list_geometries; + break; + // configuration + case OPT_DEFINE: { + // allocate space + bench_override_t *override = mappend( + (void**)&bench_overrides, + sizeof(bench_override_t), + &bench_override_count, + &bench_override_capacity); + + // parse into string key/intmax_t value, cannibalizing the + // arg in the process + char *sep = strchr(optarg, '='); + char *parsed = NULL; + if (!sep) { + goto invalid_define; + } + *sep = '\0'; + override->name = optarg; + optarg = sep+1; + + // parse comma-separated permutations + { + override->defines = NULL; + override->permutations = 0; + size_t override_capacity = 0; + while (true) { + optarg += strspn(optarg, " "); + + if (strncmp(optarg, "range", strlen("range")) == 0) { + // range of values + optarg += strlen("range"); + optarg += strspn(optarg, " "); + if (*optarg != '(') { + goto invalid_define; + } + optarg += 1; + + intmax_t start = strtoumax(optarg, &parsed, 0); + intmax_t stop = -1; + intmax_t step = 1; + // allow empty string for start=0 + if (parsed == optarg) { + start = 0; + } + optarg = parsed + strspn(parsed, " "); + + if (*optarg != ',' && *optarg != ')') { + goto invalid_define; + } + + if (*optarg == ',') { + optarg += 1; + stop = strtoumax(optarg, &parsed, 0); + // allow empty string for stop=end + if (parsed == optarg) { + stop = -1; + } + optarg = parsed + strspn(parsed, " "); + + if (*optarg != ',' && *optarg != ')') { + goto invalid_define; + } + + if (*optarg == ',') { + optarg += 1; + step = strtoumax(optarg, &parsed, 0); + // allow empty string for stop=1 + if (parsed == optarg) { + step = 1; + } + optarg = parsed + strspn(parsed, " "); + + if (*optarg != ')') { + goto invalid_define; + } + } + } else { + // single value = stop only + stop = start; + start = 0; + } + + if (*optarg != ')') { + goto invalid_define; + } + optarg += 1; + + // calculate the range of values + assert(step != 0); + for (intmax_t i = start; + (step < 0) + ? i > stop + : (uintmax_t)i < (uintmax_t)stop; + i += step) { + *(intmax_t*)mappend( + (void**)&override->defines, + sizeof(intmax_t), + &override->permutations, + &override_capacity) = i; + } + } else if (*optarg != '\0') { + // single value + intmax_t define = strtoimax(optarg, &parsed, 0); + if (parsed == optarg) { + goto invalid_define; + } + optarg = parsed + strspn(parsed, " "); + *(intmax_t*)mappend( + (void**)&override->defines, + sizeof(intmax_t), + &override->permutations, + &override_capacity) = define; + } else { + break; + } + + if (*optarg == ',') { + optarg += 1; + } + } + } + assert(override->permutations > 0); + break; + +invalid_define: + fprintf(stderr, "error: invalid define: %s\n", optarg); + exit(-1); + } + case OPT_GEOMETRY: { + // reset our geometry scenarios + if (bench_geometry_capacity > 0) { + free((bench_geometry_t*)bench_geometries); + } + bench_geometries = NULL; + bench_geometry_count = 0; + bench_geometry_capacity = 0; + + // parse the comma separated list of disk geometries + while (*optarg) { + // allocate space + bench_geometry_t *geometry = mappend( + (void**)&bench_geometries, + sizeof(bench_geometry_t), + &bench_geometry_count, + &bench_geometry_capacity); + + // parse the disk geometry + optarg += strspn(optarg, " "); + + // named disk geometry + size_t len = strcspn(optarg, " ,"); + for (size_t i = 0; builtin_geometries[i].name; i++) { + if (len == strlen(builtin_geometries[i].name) + && memcmp(optarg, + builtin_geometries[i].name, + len) == 0) { + *geometry = builtin_geometries[i]; + optarg += len; + goto geometry_next; + } + } + + // comma-separated read/prog/erase/count + if (*optarg == '{') { + lfs_size_t sizes[4]; + size_t count = 0; + + char *s = optarg + 1; + while (count < 4) { + char *parsed = NULL; + sizes[count] = strtoumax(s, &parsed, 0); + count += 1; + + s = parsed + strspn(parsed, " "); + if (*s == ',') { + s += 1; + continue; + } else if (*s == '}') { + s += 1; + break; + } else { + goto geometry_unknown; + } + } + + // allow implicit r=p and p=e for common geometries + memset(geometry, 0, sizeof(bench_geometry_t)); + if (count >= 3) { + geometry->defines[READ_SIZE_i] + = BENCH_LIT(sizes[0]); + geometry->defines[PROG_SIZE_i] + = BENCH_LIT(sizes[1]); + geometry->defines[ERASE_SIZE_i] + = BENCH_LIT(sizes[2]); + } else if (count >= 2) { + geometry->defines[PROG_SIZE_i] + = BENCH_LIT(sizes[0]); + geometry->defines[ERASE_SIZE_i] + = BENCH_LIT(sizes[1]); + } else { + geometry->defines[ERASE_SIZE_i] + = BENCH_LIT(sizes[0]); + } + if (count >= 4) { + geometry->defines[ERASE_COUNT_i] + = BENCH_LIT(sizes[3]); + } + optarg = s; + goto geometry_next; + } + + // leb16-encoded read/prog/erase/count + if (*optarg == ':') { + lfs_size_t sizes[4]; + size_t count = 0; + + char *s = optarg + 1; + while (true) { + char *parsed = NULL; + uintmax_t x = leb16_parse(s, &parsed); + if (parsed == s || count >= 4) { + break; + } + + sizes[count] = x; + count += 1; + s = parsed; + } + + // allow implicit r=p and p=e for common geometries + memset(geometry, 0, sizeof(bench_geometry_t)); + if (count >= 3) { + geometry->defines[READ_SIZE_i] + = BENCH_LIT(sizes[0]); + geometry->defines[PROG_SIZE_i] + = BENCH_LIT(sizes[1]); + geometry->defines[ERASE_SIZE_i] + = BENCH_LIT(sizes[2]); + } else if (count >= 2) { + geometry->defines[PROG_SIZE_i] + = BENCH_LIT(sizes[0]); + geometry->defines[ERASE_SIZE_i] + = BENCH_LIT(sizes[1]); + } else { + geometry->defines[ERASE_SIZE_i] + = BENCH_LIT(sizes[0]); + } + if (count >= 4) { + geometry->defines[ERASE_COUNT_i] + = BENCH_LIT(sizes[3]); + } + optarg = s; + goto geometry_next; + } + +geometry_unknown: + // unknown scenario? + fprintf(stderr, "error: unknown disk geometry: %s\n", + optarg); + exit(-1); + +geometry_next: + optarg += strspn(optarg, " "); + if (*optarg == ',') { + optarg += 1; + } else if (*optarg == '\0') { + break; + } else { + goto geometry_unknown; + } + } + break; + } + case OPT_STEP: { + char *parsed = NULL; + bench_step_start = strtoumax(optarg, &parsed, 0); + bench_step_stop = -1; + bench_step_step = 1; + // allow empty string for start=0 + if (parsed == optarg) { + bench_step_start = 0; + } + optarg = parsed + strspn(parsed, " "); + + if (*optarg != ',' && *optarg != '\0') { + goto step_unknown; + } + + if (*optarg == ',') { + optarg += 1; + bench_step_stop = strtoumax(optarg, &parsed, 0); + // allow empty string for stop=end + if (parsed == optarg) { + bench_step_stop = -1; + } + optarg = parsed + strspn(parsed, " "); + + if (*optarg != ',' && *optarg != '\0') { + goto step_unknown; + } + + if (*optarg == ',') { + optarg += 1; + bench_step_step = strtoumax(optarg, &parsed, 0); + // allow empty string for stop=1 + if (parsed == optarg) { + bench_step_step = 1; + } + optarg = parsed + strspn(parsed, " "); + + if (*optarg != '\0') { + goto step_unknown; + } + } + } else { + // single value = stop only + bench_step_stop = bench_step_start; + bench_step_start = 0; + } + + break; +step_unknown: + fprintf(stderr, "error: invalid step: %s\n", optarg); + exit(-1); + } + case OPT_DISK: + bench_disk_path = optarg; + break; + case OPT_TRACE: + bench_trace_path = optarg; + break; + case OPT_TRACE_BACKTRACE: + bench_trace_backtrace = true; + break; + case OPT_TRACE_PERIOD: { + char *parsed = NULL; + bench_trace_period = strtoumax(optarg, &parsed, 0); + if (parsed == optarg) { + fprintf(stderr, "error: invalid trace-period: %s\n", optarg); + exit(-1); + } + break; + } + case OPT_TRACE_FREQ: { + char *parsed = NULL; + bench_trace_freq = strtoumax(optarg, &parsed, 0); + if (parsed == optarg) { + fprintf(stderr, "error: invalid trace-freq: %s\n", optarg); + exit(-1); + } + break; + } + case OPT_READ_SLEEP: { + char *parsed = NULL; + double read_sleep = strtod(optarg, &parsed); + if (parsed == optarg) { + fprintf(stderr, "error: invalid read-sleep: %s\n", optarg); + exit(-1); + } + bench_read_sleep = read_sleep*1.0e9; + break; + } + case OPT_PROG_SLEEP: { + char *parsed = NULL; + double prog_sleep = strtod(optarg, &parsed); + if (parsed == optarg) { + fprintf(stderr, "error: invalid prog-sleep: %s\n", optarg); + exit(-1); + } + bench_prog_sleep = prog_sleep*1.0e9; + break; + } + case OPT_ERASE_SLEEP: { + char *parsed = NULL; + double erase_sleep = strtod(optarg, &parsed); + if (parsed == optarg) { + fprintf(stderr, "error: invalid erase-sleep: %s\n", optarg); + exit(-1); + } + bench_erase_sleep = erase_sleep*1.0e9; + break; + } + // done parsing + case -1: + goto getopt_done; + // unknown arg, getopt prints a message for us + default: + exit(-1); + } + } +getopt_done: ; + + if (argc > optind) { + // reset our bench identifier list + bench_ids = NULL; + bench_id_count = 0; + bench_id_capacity = 0; + } + + // parse bench identifier, if any, cannibalizing the arg in the process + for (; argc > optind; optind++) { + bench_define_t *defines = NULL; + size_t define_count = 0; + + // parse name, can be suite or case + char *name = argv[optind]; + char *defines_ = strchr(name, ':'); + if (defines_) { + *defines_ = '\0'; + defines_ += 1; + } + + // remove optional path and .toml suffix + char *slash = strrchr(name, '/'); + if (slash) { + name = slash+1; + } + + size_t name_len = strlen(name); + if (name_len > 5 && strcmp(&name[name_len-5], ".toml") == 0) { + name[name_len-5] = '\0'; + } + + if (defines_) { + // parse defines + while (true) { + char *parsed; + size_t d = leb16_parse(defines_, &parsed); + intmax_t v = leb16_parse(parsed, &parsed); + if (parsed == defines_) { + break; + } + defines_ = parsed; + + if (d >= define_count) { + // align to power of two to avoid any superlinear growth + size_t ncount = 1 << lfs_npw2(d+1); + defines = realloc(defines, + ncount*sizeof(bench_define_t)); + memset(defines+define_count, 0, + (ncount-define_count)*sizeof(bench_define_t)); + define_count = ncount; + } + defines[d] = BENCH_LIT(v); + } + } + + // append to identifier list + *(bench_id_t*)mappend( + (void**)&bench_ids, + sizeof(bench_id_t), + &bench_id_count, + &bench_id_capacity) = (bench_id_t){ + .name = name, + .defines = defines, + .define_count = define_count, + }; + } + + // do the thing + op(); + + // cleanup (need to be done for valgrind benching) + bench_define_cleanup(); + if (bench_overrides) { + for (size_t i = 0; i < bench_override_count; i++) { + free((void*)bench_overrides[i].defines); + } + free((void*)bench_overrides); + } + if (bench_geometry_capacity) { + free((void*)bench_geometries); + } + if (bench_id_capacity) { + for (size_t i = 0; i < bench_id_count; i++) { + free((void*)bench_ids[i].defines); + } + free((void*)bench_ids); + } +} diff --git a/components/joltwallet__littlefs/src/littlefs/runners/bench_runner.h b/components/joltwallet__littlefs/src/littlefs/runners/bench_runner.h new file mode 100644 index 0000000..848b5e8 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/runners/bench_runner.h @@ -0,0 +1,146 @@ +/* + * Runner for littlefs benchmarks + * + * Copyright (c) 2022, The littlefs authors. + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef BENCH_RUNNER_H +#define BENCH_RUNNER_H + + +// override LFS_TRACE +void bench_trace(const char *fmt, ...); + +#define LFS_TRACE_(fmt, ...) \ + bench_trace("%s:%d:trace: " fmt "%s\n", \ + __FILE__, \ + __LINE__, \ + __VA_ARGS__) +#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "") +#define LFS_EMUBD_TRACE(...) LFS_TRACE_(__VA_ARGS__, "") + +// provide BENCH_START/BENCH_STOP macros +void bench_start(void); +void bench_stop(void); + +#define BENCH_START() bench_start() +#define BENCH_STOP() bench_stop() + + +// note these are indirectly included in any generated files +#include "bd/lfs_emubd.h" +#include + +// give source a chance to define feature macros +#undef _FEATURES_H +#undef _STDIO_H + + +// generated bench configurations +struct lfs_config; + +enum bench_flags { + BENCH_REENTRANT = 0x1, +}; +typedef uint8_t bench_flags_t; + +typedef struct bench_define { + intmax_t (*cb)(void *data); + void *data; +} bench_define_t; + +struct bench_case { + const char *name; + const char *path; + bench_flags_t flags; + size_t permutations; + + const bench_define_t *defines; + + bool (*filter)(void); + void (*run)(struct lfs_config *cfg); +}; + +struct bench_suite { + const char *name; + const char *path; + bench_flags_t flags; + + const char *const *define_names; + size_t define_count; + + const struct bench_case *cases; + size_t case_count; +}; + + +// deterministic prng for pseudo-randomness in benches +uint32_t bench_prng(uint32_t *state); + +#define BENCH_PRNG(state) bench_prng(state) + + +// access generated bench defines +intmax_t bench_define(size_t define); + +#define BENCH_DEFINE(i) bench_define(i) + +// a few preconfigured defines that control how benches run + +#define READ_SIZE_i 0 +#define PROG_SIZE_i 1 +#define ERASE_SIZE_i 2 +#define ERASE_COUNT_i 3 +#define BLOCK_SIZE_i 4 +#define BLOCK_COUNT_i 5 +#define CACHE_SIZE_i 6 +#define LOOKAHEAD_SIZE_i 7 +#define COMPACT_THRESH_i 8 +#define METADATA_MAX_i 9 +#define INLINE_MAX_i 10 +#define BLOCK_CYCLES_i 11 +#define ERASE_VALUE_i 12 +#define ERASE_CYCLES_i 13 +#define BADBLOCK_BEHAVIOR_i 14 +#define POWERLOSS_BEHAVIOR_i 15 + +#define READ_SIZE bench_define(READ_SIZE_i) +#define PROG_SIZE bench_define(PROG_SIZE_i) +#define ERASE_SIZE bench_define(ERASE_SIZE_i) +#define ERASE_COUNT bench_define(ERASE_COUNT_i) +#define BLOCK_SIZE bench_define(BLOCK_SIZE_i) +#define BLOCK_COUNT bench_define(BLOCK_COUNT_i) +#define CACHE_SIZE bench_define(CACHE_SIZE_i) +#define LOOKAHEAD_SIZE bench_define(LOOKAHEAD_SIZE_i) +#define COMPACT_THRESH bench_define(COMPACT_THRESH_i) +#define METADATA_MAX bench_define(METADATA_MAX_i) +#define INLINE_MAX bench_define(INLINE_MAX_i) +#define BLOCK_CYCLES bench_define(BLOCK_CYCLES_i) +#define ERASE_VALUE bench_define(ERASE_VALUE_i) +#define ERASE_CYCLES bench_define(ERASE_CYCLES_i) +#define BADBLOCK_BEHAVIOR bench_define(BADBLOCK_BEHAVIOR_i) +#define POWERLOSS_BEHAVIOR bench_define(POWERLOSS_BEHAVIOR_i) + +#define BENCH_IMPLICIT_DEFINES \ + BENCH_DEF(READ_SIZE, PROG_SIZE) \ + BENCH_DEF(PROG_SIZE, ERASE_SIZE) \ + BENCH_DEF(ERASE_SIZE, 0) \ + BENCH_DEF(ERASE_COUNT, (1024*1024)/BLOCK_SIZE) \ + BENCH_DEF(BLOCK_SIZE, ERASE_SIZE) \ + BENCH_DEF(BLOCK_COUNT, ERASE_COUNT/lfs_max(BLOCK_SIZE/ERASE_SIZE,1))\ + BENCH_DEF(CACHE_SIZE, lfs_max(64,lfs_max(READ_SIZE,PROG_SIZE))) \ + BENCH_DEF(LOOKAHEAD_SIZE, 16) \ + BENCH_DEF(COMPACT_THRESH, 0) \ + BENCH_DEF(METADATA_MAX, 0) \ + BENCH_DEF(INLINE_MAX, 0) \ + BENCH_DEF(BLOCK_CYCLES, -1) \ + BENCH_DEF(ERASE_VALUE, 0xff) \ + BENCH_DEF(ERASE_CYCLES, 0) \ + BENCH_DEF(BADBLOCK_BEHAVIOR, LFS_EMUBD_BADBLOCK_PROGERROR) \ + BENCH_DEF(POWERLOSS_BEHAVIOR, LFS_EMUBD_POWERLOSS_NOOP) + +#define BENCH_GEOMETRY_DEFINE_COUNT 4 +#define BENCH_IMPLICIT_DEFINE_COUNT 16 + + +#endif diff --git a/components/joltwallet__littlefs/src/littlefs/runners/test_runner.c b/components/joltwallet__littlefs/src/littlefs/runners/test_runner.c new file mode 100644 index 0000000..76cb149 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/runners/test_runner.c @@ -0,0 +1,2818 @@ +/* + * Runner for littlefs tests + * + * Copyright (c) 2022, The littlefs authors. + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef _POSIX_C_SOURCE +#define _POSIX_C_SOURCE 199309L +#endif + +#include "runners/test_runner.h" +#include "bd/lfs_emubd.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +// some helpers + +// append to an array with amortized doubling +void *mappend(void **p, + size_t size, + size_t *count, + size_t *capacity) { + uint8_t *p_ = *p; + size_t count_ = *count; + size_t capacity_ = *capacity; + + count_ += 1; + if (count_ > capacity_) { + capacity_ = (2*capacity_ < 4) ? 4 : 2*capacity_; + + p_ = realloc(p_, capacity_*size); + if (!p_) { + return NULL; + } + } + + *p = p_; + *count = count_; + *capacity = capacity_; + return &p_[(count_-1)*size]; +} + +// a quick self-terminating text-safe varint scheme +static void leb16_print(uintmax_t x) { + // allow 'w' to indicate negative numbers + if ((intmax_t)x < 0) { + printf("w"); + x = -x; + } + + while (true) { + char nibble = (x & 0xf) | (x > 0xf ? 0x10 : 0); + printf("%c", (nibble < 10) ? '0'+nibble : 'a'+nibble-10); + if (x <= 0xf) { + break; + } + x >>= 4; + } +} + +static uintmax_t leb16_parse(const char *s, char **tail) { + bool neg = false; + uintmax_t x = 0; + if (tail) { + *tail = (char*)s; + } + + if (s[0] == 'w') { + neg = true; + s = s+1; + } + + size_t i = 0; + while (true) { + uintmax_t nibble = s[i]; + if (nibble >= '0' && nibble <= '9') { + nibble = nibble - '0'; + } else if (nibble >= 'a' && nibble <= 'v') { + nibble = nibble - 'a' + 10; + } else { + // invalid? + return 0; + } + + x |= (nibble & 0xf) << (4*i); + i += 1; + if (!(nibble & 0x10)) { + s = s + i; + break; + } + } + + if (tail) { + *tail = (char*)s; + } + return neg ? -x : x; +} + + + +// test_runner types + +typedef struct test_geometry { + const char *name; + test_define_t defines[TEST_GEOMETRY_DEFINE_COUNT]; +} test_geometry_t; + +typedef struct test_powerloss { + const char *name; + void (*run)( + const lfs_emubd_powercycles_t *cycles, + size_t cycle_count, + const struct test_suite *suite, + const struct test_case *case_); + const lfs_emubd_powercycles_t *cycles; + size_t cycle_count; +} test_powerloss_t; + +typedef struct test_id { + const char *name; + const test_define_t *defines; + size_t define_count; + const lfs_emubd_powercycles_t *cycles; + size_t cycle_count; +} test_id_t; + + +// test suites are linked into a custom ld section +#if defined(__APPLE__) +extern struct test_suite __start__test_suites __asm("section$start$__DATA$_test_suites"); +extern struct test_suite __stop__test_suites __asm("section$end$__DATA$_test_suites"); +#else +extern struct test_suite __start__test_suites; +extern struct test_suite __stop__test_suites; +#endif + +const struct test_suite *test_suites = &__start__test_suites; +#define TEST_SUITE_COUNT \ + ((size_t)(&__stop__test_suites - &__start__test_suites)) + + +// test define management +typedef struct test_define_map { + const test_define_t *defines; + size_t count; +} test_define_map_t; + +typedef struct test_define_names { + const char *const *names; + size_t count; +} test_define_names_t; + +intmax_t test_define_lit(void *data) { + return (intptr_t)data; +} + +#define TEST_CONST(x) {test_define_lit, (void*)(uintptr_t)(x)} +#define TEST_LIT(x) ((test_define_t)TEST_CONST(x)) + + +#define TEST_DEF(k, v) \ + intmax_t test_define_##k(void *data) { \ + (void)data; \ + return v; \ + } + + TEST_IMPLICIT_DEFINES +#undef TEST_DEF + +#define TEST_DEFINE_MAP_OVERRIDE 0 +#define TEST_DEFINE_MAP_EXPLICIT 1 +#define TEST_DEFINE_MAP_PERMUTATION 2 +#define TEST_DEFINE_MAP_GEOMETRY 3 +#define TEST_DEFINE_MAP_IMPLICIT 4 +#define TEST_DEFINE_MAP_COUNT 5 + +test_define_map_t test_define_maps[TEST_DEFINE_MAP_COUNT] = { + [TEST_DEFINE_MAP_IMPLICIT] = { + (const test_define_t[TEST_IMPLICIT_DEFINE_COUNT]) { + #define TEST_DEF(k, v) \ + [k##_i] = {test_define_##k, NULL}, + + TEST_IMPLICIT_DEFINES + #undef TEST_DEF + }, + TEST_IMPLICIT_DEFINE_COUNT, + }, +}; + +#define TEST_DEFINE_NAMES_SUITE 0 +#define TEST_DEFINE_NAMES_IMPLICIT 1 +#define TEST_DEFINE_NAMES_COUNT 2 + +test_define_names_t test_define_names[TEST_DEFINE_NAMES_COUNT] = { + [TEST_DEFINE_NAMES_IMPLICIT] = { + (const char *const[TEST_IMPLICIT_DEFINE_COUNT]){ + #define TEST_DEF(k, v) \ + [k##_i] = #k, + + TEST_IMPLICIT_DEFINES + #undef TEST_DEF + }, + TEST_IMPLICIT_DEFINE_COUNT, + }, +}; + +intmax_t *test_define_cache; +size_t test_define_cache_count; +unsigned *test_define_cache_mask; + +const char *test_define_name(size_t define) { + // lookup in our test names + for (size_t i = 0; i < TEST_DEFINE_NAMES_COUNT; i++) { + if (define < test_define_names[i].count + && test_define_names[i].names + && test_define_names[i].names[define]) { + return test_define_names[i].names[define]; + } + } + + return NULL; +} + +bool test_define_ispermutation(size_t define) { + // is this define specific to the permutation? + for (size_t i = 0; i < TEST_DEFINE_MAP_IMPLICIT; i++) { + if (define < test_define_maps[i].count + && test_define_maps[i].defines[define].cb) { + return true; + } + } + + return false; +} + +intmax_t test_define(size_t define) { + // is the define in our cache? + if (define < test_define_cache_count + && (test_define_cache_mask[define/(8*sizeof(unsigned))] + & (1 << (define%(8*sizeof(unsigned)))))) { + return test_define_cache[define]; + } + + // lookup in our test defines + for (size_t i = 0; i < TEST_DEFINE_MAP_COUNT; i++) { + if (define < test_define_maps[i].count + && test_define_maps[i].defines[define].cb) { + intmax_t v = test_define_maps[i].defines[define].cb( + test_define_maps[i].defines[define].data); + + // insert into cache! + test_define_cache[define] = v; + test_define_cache_mask[define / (8*sizeof(unsigned))] + |= 1 << (define%(8*sizeof(unsigned))); + + return v; + } + } + + return 0; + + // not found? + const char *name = test_define_name(define); + fprintf(stderr, "error: undefined define %s (%zd)\n", + name ? name : "(unknown)", + define); + assert(false); + exit(-1); +} + +void test_define_flush(void) { + // clear cache between permutations + memset(test_define_cache_mask, 0, + sizeof(unsigned)*( + (test_define_cache_count+(8*sizeof(unsigned))-1) + / (8*sizeof(unsigned)))); +} + +// geometry updates +const test_geometry_t *test_geometry = NULL; + +void test_define_geometry(const test_geometry_t *geometry) { + test_define_maps[TEST_DEFINE_MAP_GEOMETRY] = (test_define_map_t){ + geometry->defines, TEST_GEOMETRY_DEFINE_COUNT}; +} + +// override updates +typedef struct test_override { + const char *name; + const intmax_t *defines; + size_t permutations; +} test_override_t; + +const test_override_t *test_overrides = NULL; +size_t test_override_count = 0; + +test_define_t *test_override_defines = NULL; +size_t test_override_define_count = 0; +size_t test_override_define_permutations = 1; +size_t test_override_define_capacity = 0; + +// suite/perm updates +void test_define_suite(const struct test_suite *suite) { + test_define_names[TEST_DEFINE_NAMES_SUITE] = (test_define_names_t){ + suite->define_names, suite->define_count}; + + // make sure our cache is large enough + if (lfs_max(suite->define_count, TEST_IMPLICIT_DEFINE_COUNT) + > test_define_cache_count) { + // align to power of two to avoid any superlinear growth + size_t ncount = 1 << lfs_npw2( + lfs_max(suite->define_count, TEST_IMPLICIT_DEFINE_COUNT)); + test_define_cache = realloc(test_define_cache, ncount*sizeof(intmax_t)); + test_define_cache_mask = realloc(test_define_cache_mask, + sizeof(unsigned)*( + (ncount+(8*sizeof(unsigned))-1) + / (8*sizeof(unsigned)))); + test_define_cache_count = ncount; + } + + // map any overrides + if (test_override_count > 0) { + // first figure out the total size of override permutations + size_t count = 0; + size_t permutations = 1; + for (size_t i = 0; i < test_override_count; i++) { + for (size_t d = 0; + d < lfs_max( + suite->define_count, + TEST_IMPLICIT_DEFINE_COUNT); + d++) { + // define name match? + const char *name = test_define_name(d); + if (name && strcmp(name, test_overrides[i].name) == 0) { + count = lfs_max(count, d+1); + permutations *= test_overrides[i].permutations; + break; + } + } + } + test_override_define_count = count; + test_override_define_permutations = permutations; + + // make sure our override arrays are big enough + if (count * permutations > test_override_define_capacity) { + // align to power of two to avoid any superlinear growth + size_t ncapacity = 1 << lfs_npw2(count * permutations); + test_override_defines = realloc( + test_override_defines, + sizeof(test_define_t)*ncapacity); + test_override_define_capacity = ncapacity; + } + + // zero unoverridden defines + memset(test_override_defines, 0, + sizeof(test_define_t) * count * permutations); + + // compute permutations + size_t p = 1; + for (size_t i = 0; i < test_override_count; i++) { + for (size_t d = 0; + d < lfs_max( + suite->define_count, + TEST_IMPLICIT_DEFINE_COUNT); + d++) { + // define name match? + const char *name = test_define_name(d); + if (name && strcmp(name, test_overrides[i].name) == 0) { + // scatter the define permutations based on already + // seen permutations + for (size_t j = 0; j < permutations; j++) { + test_override_defines[j*count + d] = TEST_LIT( + test_overrides[i].defines[(j/p) + % test_overrides[i].permutations]); + } + + // keep track of how many permutations we've seen so far + p *= test_overrides[i].permutations; + break; + } + } + } + } +} + +void test_define_perm( + const struct test_suite *suite, + const struct test_case *case_, + size_t perm) { + if (case_->defines) { + test_define_maps[TEST_DEFINE_MAP_PERMUTATION] = (test_define_map_t){ + case_->defines + perm*suite->define_count, + suite->define_count}; + } else { + test_define_maps[TEST_DEFINE_MAP_PERMUTATION] = (test_define_map_t){ + NULL, 0}; + } +} + +void test_define_override(size_t perm) { + test_define_maps[TEST_DEFINE_MAP_OVERRIDE] = (test_define_map_t){ + test_override_defines + perm*test_override_define_count, + test_override_define_count}; +} + +void test_define_explicit( + const test_define_t *defines, + size_t define_count) { + test_define_maps[TEST_DEFINE_MAP_EXPLICIT] = (test_define_map_t){ + defines, define_count}; +} + +void test_define_cleanup(void) { + // test define management can allocate a few things + free(test_define_cache); + free(test_define_cache_mask); + free(test_override_defines); +} + + + +// test state +extern const test_geometry_t *test_geometries; +extern size_t test_geometry_count; + +extern const test_powerloss_t *test_powerlosses; +extern size_t test_powerloss_count; + +const test_id_t *test_ids = (const test_id_t[]) { + {NULL, NULL, 0, NULL, 0}, +}; +size_t test_id_count = 1; + +size_t test_step_start = 0; +size_t test_step_stop = -1; +size_t test_step_step = 1; + +const char *test_disk_path = NULL; +const char *test_trace_path = NULL; +bool test_trace_backtrace = false; +uint32_t test_trace_period = 0; +uint32_t test_trace_freq = 0; +FILE *test_trace_file = NULL; +uint32_t test_trace_cycles = 0; +uint64_t test_trace_time = 0; +uint64_t test_trace_open_time = 0; +lfs_emubd_sleep_t test_read_sleep = 0.0; +lfs_emubd_sleep_t test_prog_sleep = 0.0; +lfs_emubd_sleep_t test_erase_sleep = 0.0; + +// this determines both the backtrace buffer and the trace printf buffer, if +// trace ends up interleaved or truncated this may need to be increased +#ifndef TEST_TRACE_BACKTRACE_BUFFER_SIZE +#define TEST_TRACE_BACKTRACE_BUFFER_SIZE 8192 +#endif +void *test_trace_backtrace_buffer[ + TEST_TRACE_BACKTRACE_BUFFER_SIZE / sizeof(void*)]; + +// trace printing +void test_trace(const char *fmt, ...) { + if (test_trace_path) { + // sample at a specific period? + if (test_trace_period) { + if (test_trace_cycles % test_trace_period != 0) { + test_trace_cycles += 1; + return; + } + test_trace_cycles += 1; + } + + // sample at a specific frequency? + if (test_trace_freq) { + struct timespec t; + clock_gettime(CLOCK_MONOTONIC, &t); + uint64_t now = (uint64_t)t.tv_sec*1000*1000*1000 + + (uint64_t)t.tv_nsec; + if (now - test_trace_time < (1000*1000*1000) / test_trace_freq) { + return; + } + test_trace_time = now; + } + + if (!test_trace_file) { + // Tracing output is heavy and trying to open every trace + // call is slow, so we only try to open the trace file every + // so often. Note this doesn't affect successfully opened files + struct timespec t; + clock_gettime(CLOCK_MONOTONIC, &t); + uint64_t now = (uint64_t)t.tv_sec*1000*1000*1000 + + (uint64_t)t.tv_nsec; + if (now - test_trace_open_time < 100*1000*1000) { + return; + } + test_trace_open_time = now; + + // try to open the trace file + int fd; + if (strcmp(test_trace_path, "-") == 0) { + fd = dup(1); + if (fd < 0) { + return; + } + } else { + fd = open( + test_trace_path, + O_WRONLY | O_CREAT | O_APPEND | O_NONBLOCK, + 0666); + if (fd < 0) { + return; + } + int err = fcntl(fd, F_SETFL, O_WRONLY | O_CREAT | O_APPEND); + assert(!err); + } + + FILE *f = fdopen(fd, "a"); + assert(f); + int err = setvbuf(f, NULL, _IOFBF, + TEST_TRACE_BACKTRACE_BUFFER_SIZE); + assert(!err); + test_trace_file = f; + } + + // print trace + va_list va; + va_start(va, fmt); + int res = vfprintf(test_trace_file, fmt, va); + va_end(va); + if (res < 0) { + fclose(test_trace_file); + test_trace_file = NULL; + return; + } + + if (test_trace_backtrace) { + // print backtrace + size_t count = backtrace( + test_trace_backtrace_buffer, + TEST_TRACE_BACKTRACE_BUFFER_SIZE); + // note we skip our own stack frame + for (size_t i = 1; i < count; i++) { + res = fprintf(test_trace_file, "\tat %p\n", + test_trace_backtrace_buffer[i]); + if (res < 0) { + fclose(test_trace_file); + test_trace_file = NULL; + return; + } + } + } + + // flush immediately + fflush(test_trace_file); + } +} + + +// test prng +uint32_t test_prng(uint32_t *state) { + // A simple xorshift32 generator, easily reproducible. Keep in mind + // determinism is much more important than actual randomness here. + uint32_t x = *state; + x ^= x << 13; + x ^= x >> 17; + x ^= x << 5; + *state = x; + return x; +} + + +// encode our permutation into a reusable id +static void perm_printid( + const struct test_suite *suite, + const struct test_case *case_, + const lfs_emubd_powercycles_t *cycles, + size_t cycle_count) { + (void)suite; + // case[:permutation[:powercycles]] + printf("%s:", case_->name); + for (size_t d = 0; + d < lfs_max( + suite->define_count, + TEST_IMPLICIT_DEFINE_COUNT); + d++) { + if (test_define_ispermutation(d)) { + leb16_print(d); + leb16_print(TEST_DEFINE(d)); + } + } + + // only print power-cycles if any occured + if (cycles) { + printf(":"); + for (size_t i = 0; i < cycle_count; i++) { + leb16_print(cycles[i]); + } + } +} + + +// a quick trie for keeping track of permutations we've seen +typedef struct test_seen { + struct test_seen_branch *branches; + size_t branch_count; + size_t branch_capacity; +} test_seen_t; + +struct test_seen_branch { + intmax_t define; + struct test_seen branch; +}; + +bool test_seen_insert( + test_seen_t *seen, + const struct test_suite *suite, + const struct test_case *case_) { + (void)case_; + bool was_seen = true; + + // use the currently set defines + for (size_t d = 0; + d < lfs_max( + suite->define_count, + TEST_IMPLICIT_DEFINE_COUNT); + d++) { + // treat unpermuted defines the same as 0 + intmax_t define = test_define_ispermutation(d) ? TEST_DEFINE(d) : 0; + + // already seen? + struct test_seen_branch *branch = NULL; + for (size_t i = 0; i < seen->branch_count; i++) { + if (seen->branches[i].define == define) { + branch = &seen->branches[i]; + break; + } + } + + // need to create a new node + if (!branch) { + was_seen = false; + branch = mappend( + (void**)&seen->branches, + sizeof(struct test_seen_branch), + &seen->branch_count, + &seen->branch_capacity); + branch->define = define; + branch->branch = (test_seen_t){NULL, 0, 0}; + } + + seen = &branch->branch; + } + + return was_seen; +} + +void test_seen_cleanup(test_seen_t *seen) { + for (size_t i = 0; i < seen->branch_count; i++) { + test_seen_cleanup(&seen->branches[i].branch); + } + free(seen->branches); +} + +static void run_powerloss_none( + const lfs_emubd_powercycles_t *cycles, + size_t cycle_count, + const struct test_suite *suite, + const struct test_case *case_); +static void run_powerloss_cycles( + const lfs_emubd_powercycles_t *cycles, + size_t cycle_count, + const struct test_suite *suite, + const struct test_case *case_); + +// iterate through permutations in a test case +static void case_forperm( + const struct test_suite *suite, + const struct test_case *case_, + const test_define_t *defines, + size_t define_count, + const lfs_emubd_powercycles_t *cycles, + size_t cycle_count, + void (*cb)( + void *data, + const struct test_suite *suite, + const struct test_case *case_, + const test_powerloss_t *powerloss), + void *data) { + // explicit permutation? + if (defines) { + test_define_explicit(defines, define_count); + + for (size_t v = 0; v < test_override_define_permutations; v++) { + // define override permutation + test_define_override(v); + test_define_flush(); + + // explicit powerloss cycles? + if (cycles) { + cb(data, suite, case_, &(test_powerloss_t){ + .run=run_powerloss_cycles, + .cycles=cycles, + .cycle_count=cycle_count}); + } else { + for (size_t p = 0; p < test_powerloss_count; p++) { + // skip non-reentrant tests when powerloss testing + if (test_powerlosses[p].run != run_powerloss_none + && !(case_->flags & TEST_REENTRANT)) { + continue; + } + + cb(data, suite, case_, &test_powerlosses[p]); + } + } + } + + return; + } + + test_seen_t seen = {NULL, 0, 0}; + + for (size_t k = 0; k < case_->permutations; k++) { + // define permutation + test_define_perm(suite, case_, k); + + for (size_t v = 0; v < test_override_define_permutations; v++) { + // define override permutation + test_define_override(v); + + for (size_t g = 0; g < test_geometry_count; g++) { + // define geometry + test_define_geometry(&test_geometries[g]); + test_define_flush(); + + // have we seen this permutation before? + bool was_seen = test_seen_insert(&seen, suite, case_); + if (!(k == 0 && v == 0 && g == 0) && was_seen) { + continue; + } + + if (cycles) { + cb(data, suite, case_, &(test_powerloss_t){ + .run=run_powerloss_cycles, + .cycles=cycles, + .cycle_count=cycle_count}); + } else { + for (size_t p = 0; p < test_powerloss_count; p++) { + // skip non-reentrant tests when powerloss testing + if (test_powerlosses[p].run != run_powerloss_none + && !(case_->flags & TEST_REENTRANT)) { + continue; + } + + cb(data, suite, case_, &test_powerlosses[p]); + } + } + } + } + } + + test_seen_cleanup(&seen); +} + + +// how many permutations are there actually in a test case +struct perm_count_state { + size_t total; + size_t filtered; +}; + +void perm_count( + void *data, + const struct test_suite *suite, + const struct test_case *case_, + const test_powerloss_t *powerloss) { + struct perm_count_state *state = data; + (void)suite; + (void)case_; + (void)powerloss; + + state->total += 1; + + if (case_->filter && !case_->filter()) { + return; + } + + state->filtered += 1; +} + + +// operations we can do +static void summary(void) { + printf("%-23s %7s %7s %7s %11s\n", + "", "flags", "suites", "cases", "perms"); + size_t suites = 0; + size_t cases = 0; + test_flags_t flags = 0; + struct perm_count_state perms = {0, 0}; + + for (size_t t = 0; t < test_id_count; t++) { + for (size_t i = 0; i < TEST_SUITE_COUNT; i++) { + test_define_suite(&test_suites[i]); + + for (size_t j = 0; j < test_suites[i].case_count; j++) { + // does neither suite nor case name match? + if (test_ids[t].name && !( + strcmp(test_ids[t].name, + test_suites[i].name) == 0 + || strcmp(test_ids[t].name, + test_suites[i].cases[j].name) == 0)) { + continue; + } + + cases += 1; + case_forperm( + &test_suites[i], + &test_suites[i].cases[j], + test_ids[t].defines, + test_ids[t].define_count, + test_ids[t].cycles, + test_ids[t].cycle_count, + perm_count, + &perms); + } + + suites += 1; + flags |= test_suites[i].flags; + } + } + + char perm_buf[64]; + sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total); + char flag_buf[64]; + sprintf(flag_buf, "%s%s", + (flags & TEST_REENTRANT) ? "r" : "", + (!flags) ? "-" : ""); + printf("%-23s %7s %7zu %7zu %11s\n", + "TOTAL", + flag_buf, + suites, + cases, + perm_buf); +} + +static void list_suites(void) { + // at least size so that names fit + unsigned name_width = 23; + for (size_t i = 0; i < TEST_SUITE_COUNT; i++) { + size_t len = strlen(test_suites[i].name); + if (len > name_width) { + name_width = len; + } + } + name_width = 4*((name_width+1+4-1)/4)-1; + + printf("%-*s %7s %7s %11s\n", + name_width, "suite", "flags", "cases", "perms"); + for (size_t t = 0; t < test_id_count; t++) { + for (size_t i = 0; i < TEST_SUITE_COUNT; i++) { + test_define_suite(&test_suites[i]); + + size_t cases = 0; + struct perm_count_state perms = {0, 0}; + + for (size_t j = 0; j < test_suites[i].case_count; j++) { + // does neither suite nor case name match? + if (test_ids[t].name && !( + strcmp(test_ids[t].name, + test_suites[i].name) == 0 + || strcmp(test_ids[t].name, + test_suites[i].cases[j].name) == 0)) { + continue; + } + + cases += 1; + case_forperm( + &test_suites[i], + &test_suites[i].cases[j], + test_ids[t].defines, + test_ids[t].define_count, + test_ids[t].cycles, + test_ids[t].cycle_count, + perm_count, + &perms); + } + + // no tests found? + if (!cases) { + continue; + } + + char perm_buf[64]; + sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total); + char flag_buf[64]; + sprintf(flag_buf, "%s%s", + (test_suites[i].flags & TEST_REENTRANT) ? "r" : "", + (!test_suites[i].flags) ? "-" : ""); + printf("%-*s %7s %7zu %11s\n", + name_width, + test_suites[i].name, + flag_buf, + cases, + perm_buf); + } + } +} + +static void list_cases(void) { + // at least size so that names fit + unsigned name_width = 23; + for (size_t i = 0; i < TEST_SUITE_COUNT; i++) { + for (size_t j = 0; j < test_suites[i].case_count; j++) { + size_t len = strlen(test_suites[i].cases[j].name); + if (len > name_width) { + name_width = len; + } + } + } + name_width = 4*((name_width+1+4-1)/4)-1; + + printf("%-*s %7s %11s\n", name_width, "case", "flags", "perms"); + for (size_t t = 0; t < test_id_count; t++) { + for (size_t i = 0; i < TEST_SUITE_COUNT; i++) { + test_define_suite(&test_suites[i]); + + for (size_t j = 0; j < test_suites[i].case_count; j++) { + // does neither suite nor case name match? + if (test_ids[t].name && !( + strcmp(test_ids[t].name, + test_suites[i].name) == 0 + || strcmp(test_ids[t].name, + test_suites[i].cases[j].name) == 0)) { + continue; + } + + struct perm_count_state perms = {0, 0}; + case_forperm( + &test_suites[i], + &test_suites[i].cases[j], + test_ids[t].defines, + test_ids[t].define_count, + test_ids[t].cycles, + test_ids[t].cycle_count, + perm_count, + &perms); + + char perm_buf[64]; + sprintf(perm_buf, "%zu/%zu", perms.filtered, perms.total); + char flag_buf[64]; + sprintf(flag_buf, "%s%s", + (test_suites[i].cases[j].flags & TEST_REENTRANT) + ? "r" : "", + (!test_suites[i].cases[j].flags) + ? "-" : ""); + printf("%-*s %7s %11s\n", + name_width, + test_suites[i].cases[j].name, + flag_buf, + perm_buf); + } + } + } +} + +static void list_suite_paths(void) { + // at least size so that names fit + unsigned name_width = 23; + for (size_t i = 0; i < TEST_SUITE_COUNT; i++) { + size_t len = strlen(test_suites[i].name); + if (len > name_width) { + name_width = len; + } + } + name_width = 4*((name_width+1+4-1)/4)-1; + + printf("%-*s %s\n", name_width, "suite", "path"); + for (size_t t = 0; t < test_id_count; t++) { + for (size_t i = 0; i < TEST_SUITE_COUNT; i++) { + size_t cases = 0; + + for (size_t j = 0; j < test_suites[i].case_count; j++) { + // does neither suite nor case name match? + if (test_ids[t].name && !( + strcmp(test_ids[t].name, + test_suites[i].name) == 0 + || strcmp(test_ids[t].name, + test_suites[i].cases[j].name) == 0)) { + continue; + } + + cases += 1; + } + + // no tests found? + if (!cases) { + continue; + } + + printf("%-*s %s\n", + name_width, + test_suites[i].name, + test_suites[i].path); + } + } +} + +static void list_case_paths(void) { + // at least size so that names fit + unsigned name_width = 23; + for (size_t i = 0; i < TEST_SUITE_COUNT; i++) { + for (size_t j = 0; j < test_suites[i].case_count; j++) { + size_t len = strlen(test_suites[i].cases[j].name); + if (len > name_width) { + name_width = len; + } + } + } + name_width = 4*((name_width+1+4-1)/4)-1; + + printf("%-*s %s\n", name_width, "case", "path"); + for (size_t t = 0; t < test_id_count; t++) { + for (size_t i = 0; i < TEST_SUITE_COUNT; i++) { + for (size_t j = 0; j < test_suites[i].case_count; j++) { + // does neither suite nor case name match? + if (test_ids[t].name && !( + strcmp(test_ids[t].name, + test_suites[i].name) == 0 + || strcmp(test_ids[t].name, + test_suites[i].cases[j].name) == 0)) { + continue; + } + + printf("%-*s %s\n", + name_width, + test_suites[i].cases[j].name, + test_suites[i].cases[j].path); + } + } + } +} + +struct list_defines_define { + const char *name; + intmax_t *values; + size_t value_count; + size_t value_capacity; +}; + +struct list_defines_defines { + struct list_defines_define *defines; + size_t define_count; + size_t define_capacity; +}; + +static void list_defines_add( + struct list_defines_defines *defines, + size_t d) { + const char *name = test_define_name(d); + intmax_t value = TEST_DEFINE(d); + + // define already in defines? + for (size_t i = 0; i < defines->define_count; i++) { + if (strcmp(defines->defines[i].name, name) == 0) { + // value already in values? + for (size_t j = 0; j < defines->defines[i].value_count; j++) { + if (defines->defines[i].values[j] == value) { + return; + } + } + + *(intmax_t*)mappend( + (void**)&defines->defines[i].values, + sizeof(intmax_t), + &defines->defines[i].value_count, + &defines->defines[i].value_capacity) = value; + + return; + } + } + + // new define? + struct list_defines_define *define = mappend( + (void**)&defines->defines, + sizeof(struct list_defines_define), + &defines->define_count, + &defines->define_capacity); + define->name = name; + define->values = malloc(sizeof(intmax_t)); + define->values[0] = value; + define->value_count = 1; + define->value_capacity = 1; +} + +void perm_list_defines( + void *data, + const struct test_suite *suite, + const struct test_case *case_, + const test_powerloss_t *powerloss) { + struct list_defines_defines *defines = data; + (void)suite; + (void)case_; + (void)powerloss; + + // collect defines + for (size_t d = 0; + d < lfs_max(suite->define_count, + TEST_IMPLICIT_DEFINE_COUNT); + d++) { + if (d < TEST_IMPLICIT_DEFINE_COUNT + || test_define_ispermutation(d)) { + list_defines_add(defines, d); + } + } +} + +void perm_list_permutation_defines( + void *data, + const struct test_suite *suite, + const struct test_case *case_, + const test_powerloss_t *powerloss) { + struct list_defines_defines *defines = data; + (void)suite; + (void)case_; + (void)powerloss; + + // collect permutation_defines + for (size_t d = 0; + d < lfs_max(suite->define_count, + TEST_IMPLICIT_DEFINE_COUNT); + d++) { + if (test_define_ispermutation(d)) { + list_defines_add(defines, d); + } + } +} + +extern const test_geometry_t builtin_geometries[]; + +static void list_defines(void) { + struct list_defines_defines defines = {NULL, 0, 0}; + + // add defines + for (size_t t = 0; t < test_id_count; t++) { + for (size_t i = 0; i < TEST_SUITE_COUNT; i++) { + test_define_suite(&test_suites[i]); + + for (size_t j = 0; j < test_suites[i].case_count; j++) { + // does neither suite nor case name match? + if (test_ids[t].name && !( + strcmp(test_ids[t].name, + test_suites[i].name) == 0 + || strcmp(test_ids[t].name, + test_suites[i].cases[j].name) == 0)) { + continue; + } + + case_forperm( + &test_suites[i], + &test_suites[i].cases[j], + test_ids[t].defines, + test_ids[t].define_count, + test_ids[t].cycles, + test_ids[t].cycle_count, + perm_list_defines, + &defines); + } + } + } + + for (size_t i = 0; i < defines.define_count; i++) { + printf("%s=", defines.defines[i].name); + for (size_t j = 0; j < defines.defines[i].value_count; j++) { + printf("%jd", defines.defines[i].values[j]); + if (j != defines.defines[i].value_count-1) { + printf(","); + } + } + printf("\n"); + } + + for (size_t i = 0; i < defines.define_count; i++) { + free(defines.defines[i].values); + } + free(defines.defines); +} + +static void list_permutation_defines(void) { + struct list_defines_defines defines = {NULL, 0, 0}; + + // add permutation defines + for (size_t t = 0; t < test_id_count; t++) { + for (size_t i = 0; i < TEST_SUITE_COUNT; i++) { + test_define_suite(&test_suites[i]); + + for (size_t j = 0; j < test_suites[i].case_count; j++) { + // does neither suite nor case name match? + if (test_ids[t].name && !( + strcmp(test_ids[t].name, + test_suites[i].name) == 0 + || strcmp(test_ids[t].name, + test_suites[i].cases[j].name) == 0)) { + continue; + } + + case_forperm( + &test_suites[i], + &test_suites[i].cases[j], + test_ids[t].defines, + test_ids[t].define_count, + test_ids[t].cycles, + test_ids[t].cycle_count, + perm_list_permutation_defines, + &defines); + } + } + } + + for (size_t i = 0; i < defines.define_count; i++) { + printf("%s=", defines.defines[i].name); + for (size_t j = 0; j < defines.defines[i].value_count; j++) { + printf("%jd", defines.defines[i].values[j]); + if (j != defines.defines[i].value_count-1) { + printf(","); + } + } + printf("\n"); + } + + for (size_t i = 0; i < defines.define_count; i++) { + free(defines.defines[i].values); + } + free(defines.defines); +} + +static void list_implicit_defines(void) { + struct list_defines_defines defines = {NULL, 0, 0}; + + // yes we do need to define a suite, this does a bit of bookeeping + // such as setting up the define cache + test_define_suite(&(const struct test_suite){0}); + + // make sure to include builtin geometries here + extern const test_geometry_t builtin_geometries[]; + for (size_t g = 0; builtin_geometries[g].name; g++) { + test_define_geometry(&builtin_geometries[g]); + test_define_flush(); + + // add implicit defines + for (size_t d = 0; d < TEST_IMPLICIT_DEFINE_COUNT; d++) { + list_defines_add(&defines, d); + } + } + + for (size_t i = 0; i < defines.define_count; i++) { + printf("%s=", defines.defines[i].name); + for (size_t j = 0; j < defines.defines[i].value_count; j++) { + printf("%jd", defines.defines[i].values[j]); + if (j != defines.defines[i].value_count-1) { + printf(","); + } + } + printf("\n"); + } + + for (size_t i = 0; i < defines.define_count; i++) { + free(defines.defines[i].values); + } + free(defines.defines); +} + + + +// geometries to test + +const test_geometry_t builtin_geometries[] = { + {"default", {{0}, TEST_CONST(16), TEST_CONST(512), {0}}}, + {"eeprom", {{0}, TEST_CONST(1), TEST_CONST(512), {0}}}, + {"emmc", {{0}, {0}, TEST_CONST(512), {0}}}, + {"nor", {{0}, TEST_CONST(1), TEST_CONST(4096), {0}}}, + {"nand", {{0}, TEST_CONST(4096), TEST_CONST(32768), {0}}}, + {NULL, {{0}, {0}, {0}, {0}}}, +}; + +const test_geometry_t *test_geometries = builtin_geometries; +size_t test_geometry_count = 5; + +static void list_geometries(void) { + // at least size so that names fit + unsigned name_width = 23; + for (size_t g = 0; builtin_geometries[g].name; g++) { + size_t len = strlen(builtin_geometries[g].name); + if (len > name_width) { + name_width = len; + } + } + name_width = 4*((name_width+1+4-1)/4)-1; + + // yes we do need to define a suite, this does a bit of bookeeping + // such as setting up the define cache + test_define_suite(&(const struct test_suite){0}); + + printf("%-*s %7s %7s %7s %7s %11s\n", + name_width, "geometry", "read", "prog", "erase", "count", "size"); + for (size_t g = 0; builtin_geometries[g].name; g++) { + test_define_geometry(&builtin_geometries[g]); + test_define_flush(); + printf("%-*s %7ju %7ju %7ju %7ju %11ju\n", + name_width, + builtin_geometries[g].name, + READ_SIZE, + PROG_SIZE, + ERASE_SIZE, + ERASE_COUNT, + ERASE_SIZE*ERASE_COUNT); + } +} + + +// scenarios to run tests under power-loss + +static void run_powerloss_none( + const lfs_emubd_powercycles_t *cycles, + size_t cycle_count, + const struct test_suite *suite, + const struct test_case *case_) { + (void)cycles; + (void)cycle_count; + (void)suite; + + // create block device and configuration + lfs_emubd_t bd; + + struct lfs_config cfg = { + .context = &bd, + .read = lfs_emubd_read, + .prog = lfs_emubd_prog, + .erase = lfs_emubd_erase, + .sync = lfs_emubd_sync, + .read_size = READ_SIZE, + .prog_size = PROG_SIZE, + .block_size = BLOCK_SIZE, + .block_count = BLOCK_COUNT, + .block_cycles = BLOCK_CYCLES, + .cache_size = CACHE_SIZE, + .lookahead_size = LOOKAHEAD_SIZE, + .compact_thresh = COMPACT_THRESH, + .metadata_max = METADATA_MAX, + .inline_max = INLINE_MAX, + #ifdef LFS_MULTIVERSION + .disk_version = DISK_VERSION, + #endif + }; + + struct lfs_emubd_config bdcfg = { + .read_size = READ_SIZE, + .prog_size = PROG_SIZE, + .erase_size = ERASE_SIZE, + .erase_count = ERASE_COUNT, + .erase_value = ERASE_VALUE, + .erase_cycles = ERASE_CYCLES, + .badblock_behavior = BADBLOCK_BEHAVIOR, + .disk_path = test_disk_path, + .read_sleep = test_read_sleep, + .prog_sleep = test_prog_sleep, + .erase_sleep = test_erase_sleep, + }; + + int err = lfs_emubd_create(&cfg, &bdcfg); + if (err) { + fprintf(stderr, "error: could not create block device: %d\n", err); + exit(-1); + } + + // run the test + printf("running "); + perm_printid(suite, case_, NULL, 0); + printf("\n"); + + case_->run(&cfg); + + printf("finished "); + perm_printid(suite, case_, NULL, 0); + printf("\n"); + + // cleanup + err = lfs_emubd_destroy(&cfg); + if (err) { + fprintf(stderr, "error: could not destroy block device: %d\n", err); + exit(-1); + } +} + +static void powerloss_longjmp(void *c) { + jmp_buf *powerloss_jmp = c; + longjmp(*powerloss_jmp, 1); +} + +static void run_powerloss_linear( + const lfs_emubd_powercycles_t *cycles, + size_t cycle_count, + const struct test_suite *suite, + const struct test_case *case_) { + (void)cycles; + (void)cycle_count; + (void)suite; + + // create block device and configuration + lfs_emubd_t bd; + jmp_buf powerloss_jmp; + volatile lfs_emubd_powercycles_t i = 1; + + struct lfs_config cfg = { + .context = &bd, + .read = lfs_emubd_read, + .prog = lfs_emubd_prog, + .erase = lfs_emubd_erase, + .sync = lfs_emubd_sync, + .read_size = READ_SIZE, + .prog_size = PROG_SIZE, + .block_size = BLOCK_SIZE, + .block_count = BLOCK_COUNT, + .block_cycles = BLOCK_CYCLES, + .cache_size = CACHE_SIZE, + .lookahead_size = LOOKAHEAD_SIZE, + .compact_thresh = COMPACT_THRESH, + .metadata_max = METADATA_MAX, + .inline_max = INLINE_MAX, + #ifdef LFS_MULTIVERSION + .disk_version = DISK_VERSION, + #endif + }; + + struct lfs_emubd_config bdcfg = { + .read_size = READ_SIZE, + .prog_size = PROG_SIZE, + .erase_size = ERASE_SIZE, + .erase_count = ERASE_COUNT, + .erase_value = ERASE_VALUE, + .erase_cycles = ERASE_CYCLES, + .badblock_behavior = BADBLOCK_BEHAVIOR, + .disk_path = test_disk_path, + .read_sleep = test_read_sleep, + .prog_sleep = test_prog_sleep, + .erase_sleep = test_erase_sleep, + .power_cycles = i, + .powerloss_behavior = POWERLOSS_BEHAVIOR, + .powerloss_cb = powerloss_longjmp, + .powerloss_data = &powerloss_jmp, + }; + + int err = lfs_emubd_create(&cfg, &bdcfg); + if (err) { + fprintf(stderr, "error: could not create block device: %d\n", err); + exit(-1); + } + + // run the test, increasing power-cycles as power-loss events occur + printf("running "); + perm_printid(suite, case_, NULL, 0); + printf("\n"); + + while (true) { + if (!setjmp(powerloss_jmp)) { + // run the test + case_->run(&cfg); + break; + } + + // power-loss! + printf("powerloss "); + perm_printid(suite, case_, NULL, 0); + printf(":"); + for (lfs_emubd_powercycles_t j = 1; j <= i; j++) { + leb16_print(j); + } + printf("\n"); + + i += 1; + lfs_emubd_setpowercycles(&cfg, i); + } + + printf("finished "); + perm_printid(suite, case_, NULL, 0); + printf("\n"); + + // cleanup + err = lfs_emubd_destroy(&cfg); + if (err) { + fprintf(stderr, "error: could not destroy block device: %d\n", err); + exit(-1); + } +} + +static void run_powerloss_log( + const lfs_emubd_powercycles_t *cycles, + size_t cycle_count, + const struct test_suite *suite, + const struct test_case *case_) { + (void)cycles; + (void)cycle_count; + (void)suite; + + // create block device and configuration + lfs_emubd_t bd; + jmp_buf powerloss_jmp; + volatile lfs_emubd_powercycles_t i = 1; + + struct lfs_config cfg = { + .context = &bd, + .read = lfs_emubd_read, + .prog = lfs_emubd_prog, + .erase = lfs_emubd_erase, + .sync = lfs_emubd_sync, + .read_size = READ_SIZE, + .prog_size = PROG_SIZE, + .block_size = BLOCK_SIZE, + .block_count = BLOCK_COUNT, + .block_cycles = BLOCK_CYCLES, + .cache_size = CACHE_SIZE, + .lookahead_size = LOOKAHEAD_SIZE, + .compact_thresh = COMPACT_THRESH, + .metadata_max = METADATA_MAX, + .inline_max = INLINE_MAX, + #ifdef LFS_MULTIVERSION + .disk_version = DISK_VERSION, + #endif + }; + + struct lfs_emubd_config bdcfg = { + .read_size = READ_SIZE, + .prog_size = PROG_SIZE, + .erase_size = ERASE_SIZE, + .erase_count = ERASE_COUNT, + .erase_value = ERASE_VALUE, + .erase_cycles = ERASE_CYCLES, + .badblock_behavior = BADBLOCK_BEHAVIOR, + .disk_path = test_disk_path, + .read_sleep = test_read_sleep, + .prog_sleep = test_prog_sleep, + .erase_sleep = test_erase_sleep, + .power_cycles = i, + .powerloss_behavior = POWERLOSS_BEHAVIOR, + .powerloss_cb = powerloss_longjmp, + .powerloss_data = &powerloss_jmp, + }; + + int err = lfs_emubd_create(&cfg, &bdcfg); + if (err) { + fprintf(stderr, "error: could not create block device: %d\n", err); + exit(-1); + } + + // run the test, increasing power-cycles as power-loss events occur + printf("running "); + perm_printid(suite, case_, NULL, 0); + printf("\n"); + + while (true) { + if (!setjmp(powerloss_jmp)) { + // run the test + case_->run(&cfg); + break; + } + + // power-loss! + printf("powerloss "); + perm_printid(suite, case_, NULL, 0); + printf(":"); + for (lfs_emubd_powercycles_t j = 1; j <= i; j *= 2) { + leb16_print(j); + } + printf("\n"); + + i *= 2; + lfs_emubd_setpowercycles(&cfg, i); + } + + printf("finished "); + perm_printid(suite, case_, NULL, 0); + printf("\n"); + + // cleanup + err = lfs_emubd_destroy(&cfg); + if (err) { + fprintf(stderr, "error: could not destroy block device: %d\n", err); + exit(-1); + } +} + +static void run_powerloss_cycles( + const lfs_emubd_powercycles_t *cycles, + size_t cycle_count, + const struct test_suite *suite, + const struct test_case *case_) { + (void)suite; + + // create block device and configuration + lfs_emubd_t bd; + jmp_buf powerloss_jmp; + volatile size_t i = 0; + + struct lfs_config cfg = { + .context = &bd, + .read = lfs_emubd_read, + .prog = lfs_emubd_prog, + .erase = lfs_emubd_erase, + .sync = lfs_emubd_sync, + .read_size = READ_SIZE, + .prog_size = PROG_SIZE, + .block_size = BLOCK_SIZE, + .block_count = BLOCK_COUNT, + .block_cycles = BLOCK_CYCLES, + .cache_size = CACHE_SIZE, + .lookahead_size = LOOKAHEAD_SIZE, + .compact_thresh = COMPACT_THRESH, + .metadata_max = METADATA_MAX, + .inline_max = INLINE_MAX, + #ifdef LFS_MULTIVERSION + .disk_version = DISK_VERSION, + #endif + }; + + struct lfs_emubd_config bdcfg = { + .read_size = READ_SIZE, + .prog_size = PROG_SIZE, + .erase_size = ERASE_SIZE, + .erase_count = ERASE_COUNT, + .erase_value = ERASE_VALUE, + .erase_cycles = ERASE_CYCLES, + .badblock_behavior = BADBLOCK_BEHAVIOR, + .disk_path = test_disk_path, + .read_sleep = test_read_sleep, + .prog_sleep = test_prog_sleep, + .erase_sleep = test_erase_sleep, + .power_cycles = (i < cycle_count) ? cycles[i] : 0, + .powerloss_behavior = POWERLOSS_BEHAVIOR, + .powerloss_cb = powerloss_longjmp, + .powerloss_data = &powerloss_jmp, + }; + + int err = lfs_emubd_create(&cfg, &bdcfg); + if (err) { + fprintf(stderr, "error: could not create block device: %d\n", err); + exit(-1); + } + + // run the test, increasing power-cycles as power-loss events occur + printf("running "); + perm_printid(suite, case_, NULL, 0); + printf("\n"); + + while (true) { + if (!setjmp(powerloss_jmp)) { + // run the test + case_->run(&cfg); + break; + } + + // power-loss! + assert(i <= cycle_count); + printf("powerloss "); + perm_printid(suite, case_, cycles, i+1); + printf("\n"); + + i += 1; + lfs_emubd_setpowercycles(&cfg, + (i < cycle_count) ? cycles[i] : 0); + } + + printf("finished "); + perm_printid(suite, case_, NULL, 0); + printf("\n"); + + // cleanup + err = lfs_emubd_destroy(&cfg); + if (err) { + fprintf(stderr, "error: could not destroy block device: %d\n", err); + exit(-1); + } +} + +struct powerloss_exhaustive_state { + struct lfs_config *cfg; + + lfs_emubd_t *branches; + size_t branch_count; + size_t branch_capacity; +}; + +struct powerloss_exhaustive_cycles { + lfs_emubd_powercycles_t *cycles; + size_t cycle_count; + size_t cycle_capacity; +}; + +static void powerloss_exhaustive_branch(void *c) { + struct powerloss_exhaustive_state *state = c; + // append to branches + lfs_emubd_t *branch = mappend( + (void**)&state->branches, + sizeof(lfs_emubd_t), + &state->branch_count, + &state->branch_capacity); + if (!branch) { + fprintf(stderr, "error: exhaustive: out of memory\n"); + exit(-1); + } + + // create copy-on-write copy + int err = lfs_emubd_copy(state->cfg, branch); + if (err) { + fprintf(stderr, "error: exhaustive: could not create bd copy\n"); + exit(-1); + } + + // also trigger on next power cycle + lfs_emubd_setpowercycles(state->cfg, 1); +} + +static void run_powerloss_exhaustive_layer( + struct powerloss_exhaustive_cycles *cycles, + const struct test_suite *suite, + const struct test_case *case_, + struct lfs_config *cfg, + struct lfs_emubd_config *bdcfg, + size_t depth) { + (void)suite; + + struct powerloss_exhaustive_state state = { + .cfg = cfg, + .branches = NULL, + .branch_count = 0, + .branch_capacity = 0, + }; + + // run through the test without additional powerlosses, collecting possible + // branches as we do so + lfs_emubd_setpowercycles(state.cfg, depth > 0 ? 1 : 0); + bdcfg->powerloss_data = &state; + + // run the tests + case_->run(cfg); + + // aggressively clean up memory here to try to keep our memory usage low + int err = lfs_emubd_destroy(cfg); + if (err) { + fprintf(stderr, "error: could not destroy block device: %d\n", err); + exit(-1); + } + + // recurse into each branch + for (size_t i = 0; i < state.branch_count; i++) { + // first push and print the branch + lfs_emubd_powercycles_t *cycle = mappend( + (void**)&cycles->cycles, + sizeof(lfs_emubd_powercycles_t), + &cycles->cycle_count, + &cycles->cycle_capacity); + if (!cycle) { + fprintf(stderr, "error: exhaustive: out of memory\n"); + exit(-1); + } + *cycle = i+1; + + printf("powerloss "); + perm_printid(suite, case_, cycles->cycles, cycles->cycle_count); + printf("\n"); + + // now recurse + cfg->context = &state.branches[i]; + run_powerloss_exhaustive_layer(cycles, + suite, case_, + cfg, bdcfg, depth-1); + + // pop the cycle + cycles->cycle_count -= 1; + } + + // clean up memory + free(state.branches); +} + +static void run_powerloss_exhaustive( + const lfs_emubd_powercycles_t *cycles, + size_t cycle_count, + const struct test_suite *suite, + const struct test_case *case_) { + (void)cycles; + (void)suite; + + // create block device and configuration + lfs_emubd_t bd; + + struct lfs_config cfg = { + .context = &bd, + .read = lfs_emubd_read, + .prog = lfs_emubd_prog, + .erase = lfs_emubd_erase, + .sync = lfs_emubd_sync, + .read_size = READ_SIZE, + .prog_size = PROG_SIZE, + .block_size = BLOCK_SIZE, + .block_count = BLOCK_COUNT, + .block_cycles = BLOCK_CYCLES, + .cache_size = CACHE_SIZE, + .lookahead_size = LOOKAHEAD_SIZE, + .compact_thresh = COMPACT_THRESH, + .metadata_max = METADATA_MAX, + .inline_max = INLINE_MAX, + #ifdef LFS_MULTIVERSION + .disk_version = DISK_VERSION, + #endif + }; + + struct lfs_emubd_config bdcfg = { + .read_size = READ_SIZE, + .prog_size = PROG_SIZE, + .erase_size = ERASE_SIZE, + .erase_count = ERASE_COUNT, + .erase_value = ERASE_VALUE, + .erase_cycles = ERASE_CYCLES, + .badblock_behavior = BADBLOCK_BEHAVIOR, + .disk_path = test_disk_path, + .read_sleep = test_read_sleep, + .prog_sleep = test_prog_sleep, + .erase_sleep = test_erase_sleep, + .powerloss_behavior = POWERLOSS_BEHAVIOR, + .powerloss_cb = powerloss_exhaustive_branch, + .powerloss_data = NULL, + }; + + int err = lfs_emubd_create(&cfg, &bdcfg); + if (err) { + fprintf(stderr, "error: could not create block device: %d\n", err); + exit(-1); + } + + // run the test, increasing power-cycles as power-loss events occur + printf("running "); + perm_printid(suite, case_, NULL, 0); + printf("\n"); + + // recursively exhaust each layer of powerlosses + run_powerloss_exhaustive_layer( + &(struct powerloss_exhaustive_cycles){NULL, 0, 0}, + suite, case_, + &cfg, &bdcfg, cycle_count); + + printf("finished "); + perm_printid(suite, case_, NULL, 0); + printf("\n"); +} + + +const test_powerloss_t builtin_powerlosses[] = { + {"none", run_powerloss_none, NULL, 0}, + {"log", run_powerloss_log, NULL, 0}, + {"linear", run_powerloss_linear, NULL, 0}, + {"exhaustive", run_powerloss_exhaustive, NULL, SIZE_MAX}, + {NULL, NULL, NULL, 0}, +}; + +const char *const builtin_powerlosses_help[] = { + "Run with no power-losses.", + "Run with exponentially-decreasing power-losses.", + "Run with linearly-decreasing power-losses.", + "Run a all permutations of power-losses, this may take a while.", + "Run a all permutations of n power-losses.", + "Run a custom comma-separated set of power-losses.", + "Run a custom leb16-encoded set of power-losses.", +}; + +// default to -Pnone,linear, which provides a good heuristic while still +// running quickly +const test_powerloss_t *test_powerlosses = (const test_powerloss_t[]){ + {"none", run_powerloss_none, NULL, 0}, + {"linear", run_powerloss_linear, NULL, 0}, +}; +size_t test_powerloss_count = 2; + +static void list_powerlosses(void) { + // at least size so that names fit + unsigned name_width = 23; + for (size_t i = 0; builtin_powerlosses[i].name; i++) { + size_t len = strlen(builtin_powerlosses[i].name); + if (len > name_width) { + name_width = len; + } + } + name_width = 4*((name_width+1+4-1)/4)-1; + + printf("%-*s %s\n", name_width, "scenario", "description"); + size_t i = 0; + for (; builtin_powerlosses[i].name; i++) { + printf("%-*s %s\n", + name_width, + builtin_powerlosses[i].name, + builtin_powerlosses_help[i]); + } + + // a couple more options with special parsing + printf("%-*s %s\n", name_width, "1,2,3", builtin_powerlosses_help[i+0]); + printf("%-*s %s\n", name_width, "{1,2,3}", builtin_powerlosses_help[i+1]); + printf("%-*s %s\n", name_width, ":1248g1", builtin_powerlosses_help[i+2]); +} + + +// global test step count +size_t test_step = 0; + +void perm_run( + void *data, + const struct test_suite *suite, + const struct test_case *case_, + const test_powerloss_t *powerloss) { + (void)data; + + // skip this step? + if (!(test_step >= test_step_start + && test_step < test_step_stop + && (test_step-test_step_start) % test_step_step == 0)) { + test_step += 1; + return; + } + test_step += 1; + + // filter? + if (case_->filter && !case_->filter()) { + printf("skipped "); + perm_printid(suite, case_, NULL, 0); + printf("\n"); + return; + } + + powerloss->run( + powerloss->cycles, powerloss->cycle_count, + suite, case_); +} + +static void run(void) { + // ignore disconnected pipes + signal(SIGPIPE, SIG_IGN); + + for (size_t t = 0; t < test_id_count; t++) { + for (size_t i = 0; i < TEST_SUITE_COUNT; i++) { + test_define_suite(&test_suites[i]); + + for (size_t j = 0; j < test_suites[i].case_count; j++) { + // does neither suite nor case name match? + if (test_ids[t].name && !( + strcmp(test_ids[t].name, + test_suites[i].name) == 0 + || strcmp(test_ids[t].name, + test_suites[i].cases[j].name) == 0)) { + continue; + } + + case_forperm( + &test_suites[i], + &test_suites[i].cases[j], + test_ids[t].defines, + test_ids[t].define_count, + test_ids[t].cycles, + test_ids[t].cycle_count, + perm_run, + NULL); + } + } + } +} + + + +// option handling +enum opt_flags { + OPT_HELP = 'h', + OPT_SUMMARY = 'Y', + OPT_LIST_SUITES = 'l', + OPT_LIST_CASES = 'L', + OPT_LIST_SUITE_PATHS = 1, + OPT_LIST_CASE_PATHS = 2, + OPT_LIST_DEFINES = 3, + OPT_LIST_PERMUTATION_DEFINES = 4, + OPT_LIST_IMPLICIT_DEFINES = 5, + OPT_LIST_GEOMETRIES = 6, + OPT_LIST_POWERLOSSES = 7, + OPT_DEFINE = 'D', + OPT_GEOMETRY = 'G', + OPT_POWERLOSS = 'P', + OPT_STEP = 's', + OPT_DISK = 'd', + OPT_TRACE = 't', + OPT_TRACE_BACKTRACE = 8, + OPT_TRACE_PERIOD = 9, + OPT_TRACE_FREQ = 10, + OPT_READ_SLEEP = 11, + OPT_PROG_SLEEP = 12, + OPT_ERASE_SLEEP = 13, +}; + +const char *short_opts = "hYlLD:G:P:s:d:t:"; + +const struct option long_opts[] = { + {"help", no_argument, NULL, OPT_HELP}, + {"summary", no_argument, NULL, OPT_SUMMARY}, + {"list-suites", no_argument, NULL, OPT_LIST_SUITES}, + {"list-cases", no_argument, NULL, OPT_LIST_CASES}, + {"list-suite-paths", no_argument, NULL, OPT_LIST_SUITE_PATHS}, + {"list-case-paths", no_argument, NULL, OPT_LIST_CASE_PATHS}, + {"list-defines", no_argument, NULL, OPT_LIST_DEFINES}, + {"list-permutation-defines", + no_argument, NULL, OPT_LIST_PERMUTATION_DEFINES}, + {"list-implicit-defines", + no_argument, NULL, OPT_LIST_IMPLICIT_DEFINES}, + {"list-geometries", no_argument, NULL, OPT_LIST_GEOMETRIES}, + {"list-powerlosses", no_argument, NULL, OPT_LIST_POWERLOSSES}, + {"define", required_argument, NULL, OPT_DEFINE}, + {"geometry", required_argument, NULL, OPT_GEOMETRY}, + {"powerloss", required_argument, NULL, OPT_POWERLOSS}, + {"step", required_argument, NULL, OPT_STEP}, + {"disk", required_argument, NULL, OPT_DISK}, + {"trace", required_argument, NULL, OPT_TRACE}, + {"trace-backtrace", no_argument, NULL, OPT_TRACE_BACKTRACE}, + {"trace-period", required_argument, NULL, OPT_TRACE_PERIOD}, + {"trace-freq", required_argument, NULL, OPT_TRACE_FREQ}, + {"read-sleep", required_argument, NULL, OPT_READ_SLEEP}, + {"prog-sleep", required_argument, NULL, OPT_PROG_SLEEP}, + {"erase-sleep", required_argument, NULL, OPT_ERASE_SLEEP}, + {NULL, 0, NULL, 0}, +}; + +const char *const help_text[] = { + "Show this help message.", + "Show quick summary.", + "List test suites.", + "List test cases.", + "List the path for each test suite.", + "List the path and line number for each test case.", + "List all defines in this test-runner.", + "List explicit defines in this test-runner.", + "List implicit defines in this test-runner.", + "List the available disk geometries.", + "List the available power-loss scenarios.", + "Override a test define.", + "Comma-separated list of disk geometries to test.", + "Comma-separated list of power-loss scenarios to test.", + "Comma-separated range of test permutations to run (start,stop,step).", + "Direct block device operations to this file.", + "Direct trace output to this file.", + "Include a backtrace with every trace statement.", + "Sample trace output at this period in cycles.", + "Sample trace output at this frequency in hz.", + "Artificial read delay in seconds.", + "Artificial prog delay in seconds.", + "Artificial erase delay in seconds.", +}; + +int main(int argc, char **argv) { + void (*op)(void) = run; + + size_t test_override_capacity = 0; + size_t test_geometry_capacity = 0; + size_t test_powerloss_capacity = 0; + size_t test_id_capacity = 0; + + // parse options + while (true) { + int c = getopt_long(argc, argv, short_opts, long_opts, NULL); + switch (c) { + // generate help message + case OPT_HELP: { + printf("usage: %s [options] [test_id]\n", argv[0]); + printf("\n"); + + printf("options:\n"); + size_t i = 0; + while (long_opts[i].name) { + size_t indent; + if (long_opts[i].has_arg == no_argument) { + if (long_opts[i].val >= '0' && long_opts[i].val < 'z') { + indent = printf(" -%c, --%s ", + long_opts[i].val, + long_opts[i].name); + } else { + indent = printf(" --%s ", + long_opts[i].name); + } + } else { + if (long_opts[i].val >= '0' && long_opts[i].val < 'z') { + indent = printf(" -%c %s, --%s %s ", + long_opts[i].val, + long_opts[i].name, + long_opts[i].name, + long_opts[i].name); + } else { + indent = printf(" --%s %s ", + long_opts[i].name, + long_opts[i].name); + } + } + + // a quick, hacky, byte-level method for text wrapping + size_t len = strlen(help_text[i]); + size_t j = 0; + if (indent < 24) { + printf("%*s %.80s\n", + (int)(24-1-indent), + "", + &help_text[i][j]); + j += 80; + } else { + printf("\n"); + } + + while (j < len) { + printf("%24s%.80s\n", "", &help_text[i][j]); + j += 80; + } + + i += 1; + } + + printf("\n"); + exit(0); + } + // summary/list flags + case OPT_SUMMARY: + op = summary; + break; + case OPT_LIST_SUITES: + op = list_suites; + break; + case OPT_LIST_CASES: + op = list_cases; + break; + case OPT_LIST_SUITE_PATHS: + op = list_suite_paths; + break; + case OPT_LIST_CASE_PATHS: + op = list_case_paths; + break; + case OPT_LIST_DEFINES: + op = list_defines; + break; + case OPT_LIST_PERMUTATION_DEFINES: + op = list_permutation_defines; + break; + case OPT_LIST_IMPLICIT_DEFINES: + op = list_implicit_defines; + break; + case OPT_LIST_GEOMETRIES: + op = list_geometries; + break; + case OPT_LIST_POWERLOSSES: + op = list_powerlosses; + break; + // configuration + case OPT_DEFINE: { + // allocate space + test_override_t *override = mappend( + (void**)&test_overrides, + sizeof(test_override_t), + &test_override_count, + &test_override_capacity); + + // parse into string key/intmax_t value, cannibalizing the + // arg in the process + char *sep = strchr(optarg, '='); + char *parsed = NULL; + if (!sep) { + goto invalid_define; + } + *sep = '\0'; + override->name = optarg; + optarg = sep+1; + + // parse comma-separated permutations + { + override->defines = NULL; + override->permutations = 0; + size_t override_capacity = 0; + while (true) { + optarg += strspn(optarg, " "); + + if (strncmp(optarg, "range", strlen("range")) == 0) { + // range of values + optarg += strlen("range"); + optarg += strspn(optarg, " "); + if (*optarg != '(') { + goto invalid_define; + } + optarg += 1; + + intmax_t start = strtoumax(optarg, &parsed, 0); + intmax_t stop = -1; + intmax_t step = 1; + // allow empty string for start=0 + if (parsed == optarg) { + start = 0; + } + optarg = parsed + strspn(parsed, " "); + + if (*optarg != ',' && *optarg != ')') { + goto invalid_define; + } + + if (*optarg == ',') { + optarg += 1; + stop = strtoumax(optarg, &parsed, 0); + // allow empty string for stop=end + if (parsed == optarg) { + stop = -1; + } + optarg = parsed + strspn(parsed, " "); + + if (*optarg != ',' && *optarg != ')') { + goto invalid_define; + } + + if (*optarg == ',') { + optarg += 1; + step = strtoumax(optarg, &parsed, 0); + // allow empty string for stop=1 + if (parsed == optarg) { + step = 1; + } + optarg = parsed + strspn(parsed, " "); + + if (*optarg != ')') { + goto invalid_define; + } + } + } else { + // single value = stop only + stop = start; + start = 0; + } + + if (*optarg != ')') { + goto invalid_define; + } + optarg += 1; + + // calculate the range of values + assert(step != 0); + for (intmax_t i = start; + (step < 0) + ? i > stop + : (uintmax_t)i < (uintmax_t)stop; + i += step) { + *(intmax_t*)mappend( + (void**)&override->defines, + sizeof(intmax_t), + &override->permutations, + &override_capacity) = i; + } + } else if (*optarg != '\0') { + // single value + intmax_t define = strtoimax(optarg, &parsed, 0); + if (parsed == optarg) { + goto invalid_define; + } + optarg = parsed + strspn(parsed, " "); + *(intmax_t*)mappend( + (void**)&override->defines, + sizeof(intmax_t), + &override->permutations, + &override_capacity) = define; + } else { + break; + } + + if (*optarg == ',') { + optarg += 1; + } + } + } + assert(override->permutations > 0); + break; + +invalid_define: + fprintf(stderr, "error: invalid define: %s\n", optarg); + exit(-1); + } + case OPT_GEOMETRY: { + // reset our geometry scenarios + if (test_geometry_capacity > 0) { + free((test_geometry_t*)test_geometries); + } + test_geometries = NULL; + test_geometry_count = 0; + test_geometry_capacity = 0; + + // parse the comma separated list of disk geometries + while (*optarg) { + // allocate space + test_geometry_t *geometry = mappend( + (void**)&test_geometries, + sizeof(test_geometry_t), + &test_geometry_count, + &test_geometry_capacity); + + // parse the disk geometry + optarg += strspn(optarg, " "); + + // named disk geometry + size_t len = strcspn(optarg, " ,"); + for (size_t i = 0; builtin_geometries[i].name; i++) { + if (len == strlen(builtin_geometries[i].name) + && memcmp(optarg, + builtin_geometries[i].name, + len) == 0) { + *geometry = builtin_geometries[i]; + optarg += len; + goto geometry_next; + } + } + + // comma-separated read/prog/erase/count + if (*optarg == '{') { + lfs_size_t sizes[4]; + size_t count = 0; + + char *s = optarg + 1; + while (count < 4) { + char *parsed = NULL; + sizes[count] = strtoumax(s, &parsed, 0); + count += 1; + + s = parsed + strspn(parsed, " "); + if (*s == ',') { + s += 1; + continue; + } else if (*s == '}') { + s += 1; + break; + } else { + goto geometry_unknown; + } + } + + // allow implicit r=p and p=e for common geometries + memset(geometry, 0, sizeof(test_geometry_t)); + if (count >= 3) { + geometry->defines[READ_SIZE_i] + = TEST_LIT(sizes[0]); + geometry->defines[PROG_SIZE_i] + = TEST_LIT(sizes[1]); + geometry->defines[ERASE_SIZE_i] + = TEST_LIT(sizes[2]); + } else if (count >= 2) { + geometry->defines[PROG_SIZE_i] + = TEST_LIT(sizes[0]); + geometry->defines[ERASE_SIZE_i] + = TEST_LIT(sizes[1]); + } else { + geometry->defines[ERASE_SIZE_i] + = TEST_LIT(sizes[0]); + } + if (count >= 4) { + geometry->defines[ERASE_COUNT_i] + = TEST_LIT(sizes[3]); + } + optarg = s; + goto geometry_next; + } + + // leb16-encoded read/prog/erase/count + if (*optarg == ':') { + lfs_size_t sizes[4]; + size_t count = 0; + + char *s = optarg + 1; + while (true) { + char *parsed = NULL; + uintmax_t x = leb16_parse(s, &parsed); + if (parsed == s || count >= 4) { + break; + } + + sizes[count] = x; + count += 1; + s = parsed; + } + + // allow implicit r=p and p=e for common geometries + memset(geometry, 0, sizeof(test_geometry_t)); + if (count >= 3) { + geometry->defines[READ_SIZE_i] + = TEST_LIT(sizes[0]); + geometry->defines[PROG_SIZE_i] + = TEST_LIT(sizes[1]); + geometry->defines[ERASE_SIZE_i] + = TEST_LIT(sizes[2]); + } else if (count >= 2) { + geometry->defines[PROG_SIZE_i] + = TEST_LIT(sizes[0]); + geometry->defines[ERASE_SIZE_i] + = TEST_LIT(sizes[1]); + } else { + geometry->defines[ERASE_SIZE_i] + = TEST_LIT(sizes[0]); + } + if (count >= 4) { + geometry->defines[ERASE_COUNT_i] + = TEST_LIT(sizes[3]); + } + optarg = s; + goto geometry_next; + } + +geometry_unknown: + // unknown scenario? + fprintf(stderr, "error: unknown disk geometry: %s\n", + optarg); + exit(-1); + +geometry_next: + optarg += strspn(optarg, " "); + if (*optarg == ',') { + optarg += 1; + } else if (*optarg == '\0') { + break; + } else { + goto geometry_unknown; + } + } + break; + } + case OPT_POWERLOSS: { + // reset our powerloss scenarios + if (test_powerloss_capacity > 0) { + free((test_powerloss_t*)test_powerlosses); + } + test_powerlosses = NULL; + test_powerloss_count = 0; + test_powerloss_capacity = 0; + + // parse the comma separated list of power-loss scenarios + while (*optarg) { + // allocate space + test_powerloss_t *powerloss = mappend( + (void**)&test_powerlosses, + sizeof(test_powerloss_t), + &test_powerloss_count, + &test_powerloss_capacity); + + // parse the power-loss scenario + optarg += strspn(optarg, " "); + + // named power-loss scenario + size_t len = strcspn(optarg, " ,"); + for (size_t i = 0; builtin_powerlosses[i].name; i++) { + if (len == strlen(builtin_powerlosses[i].name) + && memcmp(optarg, + builtin_powerlosses[i].name, + len) == 0) { + *powerloss = builtin_powerlosses[i]; + optarg += len; + goto powerloss_next; + } + } + + // comma-separated permutation + if (*optarg == '{') { + lfs_emubd_powercycles_t *cycles = NULL; + size_t cycle_count = 0; + size_t cycle_capacity = 0; + + char *s = optarg + 1; + while (true) { + char *parsed = NULL; + *(lfs_emubd_powercycles_t*)mappend( + (void**)&cycles, + sizeof(lfs_emubd_powercycles_t), + &cycle_count, + &cycle_capacity) + = strtoumax(s, &parsed, 0); + + s = parsed + strspn(parsed, " "); + if (*s == ',') { + s += 1; + continue; + } else if (*s == '}') { + s += 1; + break; + } else { + goto powerloss_unknown; + } + } + + *powerloss = (test_powerloss_t){ + .run = run_powerloss_cycles, + .cycles = cycles, + .cycle_count = cycle_count, + }; + optarg = s; + goto powerloss_next; + } + + // leb16-encoded permutation + if (*optarg == ':') { + lfs_emubd_powercycles_t *cycles = NULL; + size_t cycle_count = 0; + size_t cycle_capacity = 0; + + char *s = optarg + 1; + while (true) { + char *parsed = NULL; + uintmax_t x = leb16_parse(s, &parsed); + if (parsed == s) { + break; + } + + *(lfs_emubd_powercycles_t*)mappend( + (void**)&cycles, + sizeof(lfs_emubd_powercycles_t), + &cycle_count, + &cycle_capacity) = x; + s = parsed; + } + + *powerloss = (test_powerloss_t){ + .run = run_powerloss_cycles, + .cycles = cycles, + .cycle_count = cycle_count, + }; + optarg = s; + goto powerloss_next; + } + + // exhaustive permutations + { + char *parsed = NULL; + size_t count = strtoumax(optarg, &parsed, 0); + if (parsed == optarg) { + goto powerloss_unknown; + } + *powerloss = (test_powerloss_t){ + .run = run_powerloss_exhaustive, + .cycles = NULL, + .cycle_count = count, + }; + optarg = (char*)parsed; + goto powerloss_next; + } + +powerloss_unknown: + // unknown scenario? + fprintf(stderr, "error: unknown power-loss scenario: %s\n", + optarg); + exit(-1); + +powerloss_next: + optarg += strspn(optarg, " "); + if (*optarg == ',') { + optarg += 1; + } else if (*optarg == '\0') { + break; + } else { + goto powerloss_unknown; + } + } + break; + } + case OPT_STEP: { + char *parsed = NULL; + test_step_start = strtoumax(optarg, &parsed, 0); + test_step_stop = -1; + test_step_step = 1; + // allow empty string for start=0 + if (parsed == optarg) { + test_step_start = 0; + } + optarg = parsed + strspn(parsed, " "); + + if (*optarg != ',' && *optarg != '\0') { + goto step_unknown; + } + + if (*optarg == ',') { + optarg += 1; + test_step_stop = strtoumax(optarg, &parsed, 0); + // allow empty string for stop=end + if (parsed == optarg) { + test_step_stop = -1; + } + optarg = parsed + strspn(parsed, " "); + + if (*optarg != ',' && *optarg != '\0') { + goto step_unknown; + } + + if (*optarg == ',') { + optarg += 1; + test_step_step = strtoumax(optarg, &parsed, 0); + // allow empty string for stop=1 + if (parsed == optarg) { + test_step_step = 1; + } + optarg = parsed + strspn(parsed, " "); + + if (*optarg != '\0') { + goto step_unknown; + } + } + } else { + // single value = stop only + test_step_stop = test_step_start; + test_step_start = 0; + } + + break; +step_unknown: + fprintf(stderr, "error: invalid step: %s\n", optarg); + exit(-1); + } + case OPT_DISK: + test_disk_path = optarg; + break; + case OPT_TRACE: + test_trace_path = optarg; + break; + case OPT_TRACE_BACKTRACE: + test_trace_backtrace = true; + break; + case OPT_TRACE_PERIOD: { + char *parsed = NULL; + test_trace_period = strtoumax(optarg, &parsed, 0); + if (parsed == optarg) { + fprintf(stderr, "error: invalid trace-period: %s\n", optarg); + exit(-1); + } + break; + } + case OPT_TRACE_FREQ: { + char *parsed = NULL; + test_trace_freq = strtoumax(optarg, &parsed, 0); + if (parsed == optarg) { + fprintf(stderr, "error: invalid trace-freq: %s\n", optarg); + exit(-1); + } + break; + } + case OPT_READ_SLEEP: { + char *parsed = NULL; + double read_sleep = strtod(optarg, &parsed); + if (parsed == optarg) { + fprintf(stderr, "error: invalid read-sleep: %s\n", optarg); + exit(-1); + } + test_read_sleep = read_sleep*1.0e9; + break; + } + case OPT_PROG_SLEEP: { + char *parsed = NULL; + double prog_sleep = strtod(optarg, &parsed); + if (parsed == optarg) { + fprintf(stderr, "error: invalid prog-sleep: %s\n", optarg); + exit(-1); + } + test_prog_sleep = prog_sleep*1.0e9; + break; + } + case OPT_ERASE_SLEEP: { + char *parsed = NULL; + double erase_sleep = strtod(optarg, &parsed); + if (parsed == optarg) { + fprintf(stderr, "error: invalid erase-sleep: %s\n", optarg); + exit(-1); + } + test_erase_sleep = erase_sleep*1.0e9; + break; + } + // done parsing + case -1: + goto getopt_done; + // unknown arg, getopt prints a message for us + default: + exit(-1); + } + } +getopt_done: ; + + if (argc > optind) { + // reset our test identifier list + test_ids = NULL; + test_id_count = 0; + test_id_capacity = 0; + } + + // parse test identifier, if any, cannibalizing the arg in the process + for (; argc > optind; optind++) { + test_define_t *defines = NULL; + size_t define_count = 0; + lfs_emubd_powercycles_t *cycles = NULL; + size_t cycle_count = 0; + + // parse name, can be suite or case + char *name = argv[optind]; + char *defines_ = strchr(name, ':'); + if (defines_) { + *defines_ = '\0'; + defines_ += 1; + } + + // remove optional path and .toml suffix + char *slash = strrchr(name, '/'); + if (slash) { + name = slash+1; + } + + size_t name_len = strlen(name); + if (name_len > 5 && strcmp(&name[name_len-5], ".toml") == 0) { + name[name_len-5] = '\0'; + } + + if (defines_) { + // parse defines + char *cycles_ = strchr(defines_, ':'); + if (cycles_) { + *cycles_ = '\0'; + cycles_ += 1; + } + + while (true) { + char *parsed; + size_t d = leb16_parse(defines_, &parsed); + intmax_t v = leb16_parse(parsed, &parsed); + if (parsed == defines_) { + break; + } + defines_ = parsed; + + if (d >= define_count) { + // align to power of two to avoid any superlinear growth + size_t ncount = 1 << lfs_npw2(d+1); + defines = realloc(defines, + ncount*sizeof(test_define_t)); + memset(defines+define_count, 0, + (ncount-define_count)*sizeof(test_define_t)); + define_count = ncount; + } + defines[d] = TEST_LIT(v); + } + + if (cycles_) { + // parse power cycles + size_t cycle_capacity = 0; + while (*cycles_ != '\0') { + char *parsed = NULL; + *(lfs_emubd_powercycles_t*)mappend( + (void**)&cycles, + sizeof(lfs_emubd_powercycles_t), + &cycle_count, + &cycle_capacity) + = leb16_parse(cycles_, &parsed); + if (parsed == cycles_) { + fprintf(stderr, "error: " + "could not parse test cycles: %s\n", + cycles_); + exit(-1); + } + cycles_ = parsed; + } + } + } + + // append to identifier list + *(test_id_t*)mappend( + (void**)&test_ids, + sizeof(test_id_t), + &test_id_count, + &test_id_capacity) = (test_id_t){ + .name = name, + .defines = defines, + .define_count = define_count, + .cycles = cycles, + .cycle_count = cycle_count, + }; + } + + // do the thing + op(); + + // cleanup (need to be done for valgrind testing) + test_define_cleanup(); + if (test_overrides) { + for (size_t i = 0; i < test_override_count; i++) { + free((void*)test_overrides[i].defines); + } + free((void*)test_overrides); + } + if (test_geometry_capacity) { + free((void*)test_geometries); + } + if (test_powerloss_capacity) { + for (size_t i = 0; i < test_powerloss_count; i++) { + free((void*)test_powerlosses[i].cycles); + } + free((void*)test_powerlosses); + } + if (test_id_capacity) { + for (size_t i = 0; i < test_id_count; i++) { + free((void*)test_ids[i].defines); + free((void*)test_ids[i].cycles); + } + free((void*)test_ids); + } +} diff --git a/components/joltwallet__littlefs/src/littlefs/runners/test_runner.h b/components/joltwallet__littlefs/src/littlefs/runners/test_runner.h new file mode 100644 index 0000000..ecdf9c1 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/runners/test_runner.h @@ -0,0 +1,142 @@ +/* + * Runner for littlefs tests + * + * Copyright (c) 2022, The littlefs authors. + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef TEST_RUNNER_H +#define TEST_RUNNER_H + + +// override LFS_TRACE +void test_trace(const char *fmt, ...); + +#define LFS_TRACE_(fmt, ...) \ + test_trace("%s:%d:trace: " fmt "%s\n", \ + __FILE__, \ + __LINE__, \ + __VA_ARGS__) +#define LFS_TRACE(...) LFS_TRACE_(__VA_ARGS__, "") +#define LFS_EMUBD_TRACE(...) LFS_TRACE_(__VA_ARGS__, "") + + +// note these are indirectly included in any generated files +#include "bd/lfs_emubd.h" +#include + +// give source a chance to define feature macros +#undef _FEATURES_H +#undef _STDIO_H + + +// generated test configurations +struct lfs_config; + +enum test_flags { + TEST_REENTRANT = 0x1, +}; +typedef uint8_t test_flags_t; + +typedef struct test_define { + intmax_t (*cb)(void *data); + void *data; +} test_define_t; + +struct test_case { + const char *name; + const char *path; + test_flags_t flags; + size_t permutations; + + const test_define_t *defines; + + bool (*filter)(void); + void (*run)(struct lfs_config *cfg); +}; + +struct test_suite { + const char *name; + const char *path; + test_flags_t flags; + + const char *const *define_names; + size_t define_count; + + const struct test_case *cases; + size_t case_count; +}; + + +// deterministic prng for pseudo-randomness in testes +uint32_t test_prng(uint32_t *state); + +#define TEST_PRNG(state) test_prng(state) + + +// access generated test defines +intmax_t test_define(size_t define); + +#define TEST_DEFINE(i) test_define(i) + +// a few preconfigured defines that control how tests run + +#define READ_SIZE_i 0 +#define PROG_SIZE_i 1 +#define ERASE_SIZE_i 2 +#define ERASE_COUNT_i 3 +#define BLOCK_SIZE_i 4 +#define BLOCK_COUNT_i 5 +#define CACHE_SIZE_i 6 +#define LOOKAHEAD_SIZE_i 7 +#define COMPACT_THRESH_i 8 +#define METADATA_MAX_i 9 +#define INLINE_MAX_i 10 +#define BLOCK_CYCLES_i 11 +#define ERASE_VALUE_i 12 +#define ERASE_CYCLES_i 13 +#define BADBLOCK_BEHAVIOR_i 14 +#define POWERLOSS_BEHAVIOR_i 15 +#define DISK_VERSION_i 16 + +#define READ_SIZE TEST_DEFINE(READ_SIZE_i) +#define PROG_SIZE TEST_DEFINE(PROG_SIZE_i) +#define ERASE_SIZE TEST_DEFINE(ERASE_SIZE_i) +#define ERASE_COUNT TEST_DEFINE(ERASE_COUNT_i) +#define BLOCK_SIZE TEST_DEFINE(BLOCK_SIZE_i) +#define BLOCK_COUNT TEST_DEFINE(BLOCK_COUNT_i) +#define CACHE_SIZE TEST_DEFINE(CACHE_SIZE_i) +#define LOOKAHEAD_SIZE TEST_DEFINE(LOOKAHEAD_SIZE_i) +#define COMPACT_THRESH TEST_DEFINE(COMPACT_THRESH_i) +#define METADATA_MAX TEST_DEFINE(METADATA_MAX_i) +#define INLINE_MAX TEST_DEFINE(INLINE_MAX_i) +#define BLOCK_CYCLES TEST_DEFINE(BLOCK_CYCLES_i) +#define ERASE_VALUE TEST_DEFINE(ERASE_VALUE_i) +#define ERASE_CYCLES TEST_DEFINE(ERASE_CYCLES_i) +#define BADBLOCK_BEHAVIOR TEST_DEFINE(BADBLOCK_BEHAVIOR_i) +#define POWERLOSS_BEHAVIOR TEST_DEFINE(POWERLOSS_BEHAVIOR_i) +#define DISK_VERSION TEST_DEFINE(DISK_VERSION_i) + +#define TEST_IMPLICIT_DEFINES \ + TEST_DEF(READ_SIZE, PROG_SIZE) \ + TEST_DEF(PROG_SIZE, ERASE_SIZE) \ + TEST_DEF(ERASE_SIZE, 0) \ + TEST_DEF(ERASE_COUNT, (1024*1024)/ERASE_SIZE) \ + TEST_DEF(BLOCK_SIZE, ERASE_SIZE) \ + TEST_DEF(BLOCK_COUNT, ERASE_COUNT/lfs_max(BLOCK_SIZE/ERASE_SIZE,1)) \ + TEST_DEF(CACHE_SIZE, lfs_max(64,lfs_max(READ_SIZE,PROG_SIZE))) \ + TEST_DEF(LOOKAHEAD_SIZE, 16) \ + TEST_DEF(COMPACT_THRESH, 0) \ + TEST_DEF(METADATA_MAX, 0) \ + TEST_DEF(INLINE_MAX, 0) \ + TEST_DEF(BLOCK_CYCLES, -1) \ + TEST_DEF(ERASE_VALUE, 0xff) \ + TEST_DEF(ERASE_CYCLES, 0) \ + TEST_DEF(BADBLOCK_BEHAVIOR, LFS_EMUBD_BADBLOCK_PROGERROR) \ + TEST_DEF(POWERLOSS_BEHAVIOR, LFS_EMUBD_POWERLOSS_NOOP) \ + TEST_DEF(DISK_VERSION, 0) + +#define TEST_GEOMETRY_DEFINE_COUNT 4 +#define TEST_IMPLICIT_DEFINE_COUNT 17 + + +#endif diff --git a/components/joltwallet__littlefs/src/littlefs/scripts/bench.py b/components/joltwallet__littlefs/src/littlefs/scripts/bench.py new file mode 100644 index 0000000..0ed2482 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/scripts/bench.py @@ -0,0 +1,1433 @@ +#!/usr/bin/env python3 +# +# Script to compile and runs benches. +# +# Example: +# ./scripts/bench.py runners/bench_runner -b +# +# Copyright (c) 2022, The littlefs authors. +# SPDX-License-Identifier: BSD-3-Clause +# + +import collections as co +import csv +import errno +import glob +import itertools as it +import math as m +import os +import pty +import re +import shlex +import shutil +import signal +import subprocess as sp +import threading as th +import time +import toml + + +RUNNER_PATH = './runners/bench_runner' +HEADER_PATH = 'runners/bench_runner.h' + +GDB_PATH = ['gdb'] +VALGRIND_PATH = ['valgrind'] +PERF_SCRIPT = ['./scripts/perf.py'] + + +def openio(path, mode='r', buffering=-1): + # allow '-' for stdin/stdout + if path == '-': + if mode == 'r': + return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering) + else: + return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering) + else: + return open(path, mode, buffering) + +class BenchCase: + # create a BenchCase object from a config + def __init__(self, config, args={}): + self.name = config.pop('name') + self.path = config.pop('path') + self.suite = config.pop('suite') + self.lineno = config.pop('lineno', None) + self.if_ = config.pop('if', None) + if isinstance(self.if_, bool): + self.if_ = 'true' if self.if_ else 'false' + self.code = config.pop('code') + self.code_lineno = config.pop('code_lineno', None) + self.in_ = config.pop('in', + config.pop('suite_in', None)) + + # figure out defines and build possible permutations + self.defines = set() + self.permutations = [] + + # defines can be a dict or a list or dicts + suite_defines = config.pop('suite_defines', {}) + if not isinstance(suite_defines, list): + suite_defines = [suite_defines] + defines = config.pop('defines', {}) + if not isinstance(defines, list): + defines = [defines] + + def csplit(v): + # split commas but only outside of parens + parens = 0 + i_ = 0 + for i in range(len(v)): + if v[i] == ',' and parens == 0: + yield v[i_:i] + i_ = i+1 + elif v[i] in '([{': + parens += 1 + elif v[i] in '}])': + parens -= 1 + if v[i_:].strip(): + yield v[i_:] + + def parse_define(v): + # a define entry can be a list + if isinstance(v, list): + for v_ in v: + yield from parse_define(v_) + # or a string + elif isinstance(v, str): + # which can be comma-separated values, with optional + # range statements. This matches the runtime define parser in + # the runner itself. + for v_ in csplit(v): + m = re.search(r'\brange\b\s*\(' + '(?P[^,\s]*)' + '\s*(?:,\s*(?P[^,\s]*)' + '\s*(?:,\s*(?P[^,\s]*)\s*)?)?\)', + v_) + if m: + start = (int(m.group('start'), 0) + if m.group('start') else 0) + stop = (int(m.group('stop'), 0) + if m.group('stop') else None) + step = (int(m.group('step'), 0) + if m.group('step') else 1) + if m.lastindex <= 1: + start, stop = 0, start + for x in range(start, stop, step): + yield from parse_define('%s(%d)%s' % ( + v_[:m.start()], x, v_[m.end():])) + else: + yield v_ + # or a literal value + elif isinstance(v, bool): + yield 'true' if v else 'false' + else: + yield v + + # build possible permutations + for suite_defines_ in suite_defines: + self.defines |= suite_defines_.keys() + for defines_ in defines: + self.defines |= defines_.keys() + self.permutations.extend(dict(perm) for perm in it.product(*( + [(k, v) for v in parse_define(vs)] + for k, vs in sorted((suite_defines_ | defines_).items())))) + + for k in config.keys(): + print('%swarning:%s in %s, found unused key %r' % ( + '\x1b[01;33m' if args['color'] else '', + '\x1b[m' if args['color'] else '', + self.name, + k), + file=sys.stderr) + + +class BenchSuite: + # create a BenchSuite object from a toml file + def __init__(self, path, args={}): + self.path = path + self.name = os.path.basename(path) + if self.name.endswith('.toml'): + self.name = self.name[:-len('.toml')] + + # load toml file and parse bench cases + with open(self.path) as f: + # load benches + config = toml.load(f) + + # find line numbers + f.seek(0) + case_linenos = [] + code_linenos = [] + for i, line in enumerate(f): + match = re.match( + '(?P\[\s*cases\s*\.\s*(?P\w+)\s*\])' + '|' '(?Pcode\s*=)', + line) + if match and match.group('case'): + case_linenos.append((i+1, match.group('name'))) + elif match and match.group('code'): + code_linenos.append(i+2) + + # sort in case toml parsing did not retain order + case_linenos.sort() + + cases = config.pop('cases') + for (lineno, name), (nlineno, _) in it.zip_longest( + case_linenos, case_linenos[1:], + fillvalue=(float('inf'), None)): + code_lineno = min( + (l for l in code_linenos if l >= lineno and l < nlineno), + default=None) + cases[name]['lineno'] = lineno + cases[name]['code_lineno'] = code_lineno + + self.if_ = config.pop('if', None) + if isinstance(self.if_, bool): + self.if_ = 'true' if self.if_ else 'false' + + self.code = config.pop('code', None) + self.code_lineno = min( + (l for l in code_linenos + if not case_linenos or l < case_linenos[0][0]), + default=None) + + # a couple of these we just forward to all cases + defines = config.pop('defines', {}) + in_ = config.pop('in', None) + + self.cases = [] + for name, case in sorted(cases.items(), + key=lambda c: c[1].get('lineno')): + self.cases.append(BenchCase(config={ + 'name': name, + 'path': path + (':%d' % case['lineno'] + if 'lineno' in case else ''), + 'suite': self.name, + 'suite_defines': defines, + 'suite_in': in_, + **case}, + args=args)) + + # combine per-case defines + self.defines = set.union(*( + set(case.defines) for case in self.cases)) + + for k in config.keys(): + print('%swarning:%s in %s, found unused key %r' % ( + '\x1b[01;33m' if args['color'] else '', + '\x1b[m' if args['color'] else '', + self.name, + k), + file=sys.stderr) + + + +def compile(bench_paths, **args): + # find .toml files + paths = [] + for path in bench_paths: + if os.path.isdir(path): + path = path + '/*.toml' + + for path in glob.glob(path): + paths.append(path) + + if not paths: + print('no bench suites found in %r?' % bench_paths) + sys.exit(-1) + + # load the suites + suites = [BenchSuite(path, args) for path in paths] + suites.sort(key=lambda s: s.name) + + # check for name conflicts, these will cause ambiguity problems later + # when running benches + seen = {} + for suite in suites: + if suite.name in seen: + print('%swarning:%s conflicting suite %r, %s and %s' % ( + '\x1b[01;33m' if args['color'] else '', + '\x1b[m' if args['color'] else '', + suite.name, + suite.path, + seen[suite.name].path), + file=sys.stderr) + seen[suite.name] = suite + + for case in suite.cases: + # only allow conflicts if a case and its suite share a name + if case.name in seen and not ( + isinstance(seen[case.name], BenchSuite) + and seen[case.name].cases == [case]): + print('%swarning:%s conflicting case %r, %s and %s' % ( + '\x1b[01;33m' if args['color'] else '', + '\x1b[m' if args['color'] else '', + case.name, + case.path, + seen[case.name].path), + file=sys.stderr) + seen[case.name] = case + + # we can only compile one bench suite at a time + if not args.get('source'): + if len(suites) > 1: + print('more than one bench suite for compilation? (%r)' % bench_paths) + sys.exit(-1) + + suite = suites[0] + + # write generated bench source + if 'output' in args: + with openio(args['output'], 'w') as f: + _write = f.write + def write(s): + f.lineno += s.count('\n') + _write(s) + def writeln(s=''): + f.lineno += s.count('\n') + 1 + _write(s) + _write('\n') + f.lineno = 1 + f.write = write + f.writeln = writeln + + f.writeln("// Generated by %s:" % sys.argv[0]) + f.writeln("//") + f.writeln("// %s" % ' '.join(sys.argv)) + f.writeln("//") + f.writeln() + + # include bench_runner.h in every generated file + f.writeln("#include \"%s\"" % args['include']) + f.writeln() + + # write out generated functions, this can end up in different + # files depending on the "in" attribute + # + # note it's up to the specific generated file to declare + # the bench defines + def write_case_functions(f, suite, case): + # create case define functions + if case.defines: + # deduplicate defines by value to try to reduce the + # number of functions we generate + define_cbs = {} + for i, defines in enumerate(case.permutations): + for k, v in sorted(defines.items()): + if v not in define_cbs: + name = ('__bench__%s__%s__%d' + % (case.name, k, i)) + define_cbs[v] = name + f.writeln('intmax_t %s(' + '__attribute__((unused)) ' + 'void *data) {' % name) + f.writeln(4*' '+'return %s;' % v) + f.writeln('}') + f.writeln() + f.writeln('const bench_define_t ' + '__bench__%s__defines[][' + 'BENCH_IMPLICIT_DEFINE_COUNT+%d] = {' + % (case.name, len(suite.defines))) + for defines in case.permutations: + f.writeln(4*' '+'{') + for k, v in sorted(defines.items()): + f.writeln(8*' '+'[%-24s] = {%s, NULL},' % ( + k+'_i', define_cbs[v])) + f.writeln(4*' '+'},') + f.writeln('};') + f.writeln() + + # create case filter function + if suite.if_ is not None or case.if_ is not None: + f.writeln('bool __bench__%s__filter(void) {' + % (case.name)) + f.writeln(4*' '+'return %s;' + % ' && '.join('(%s)' % if_ + for if_ in [suite.if_, case.if_] + if if_ is not None)) + f.writeln('}') + f.writeln() + + # create case run function + f.writeln('void __bench__%s__run(' + '__attribute__((unused)) struct lfs_config *cfg) {' + % (case.name)) + f.writeln(4*' '+'// bench case %s' % case.name) + if case.code_lineno is not None: + f.writeln(4*' '+'#line %d "%s"' + % (case.code_lineno, suite.path)) + f.write(case.code) + if case.code_lineno is not None: + f.writeln(4*' '+'#line %d "%s"' + % (f.lineno+1, args['output'])) + f.writeln('}') + f.writeln() + + if not args.get('source'): + if suite.code is not None: + if suite.code_lineno is not None: + f.writeln('#line %d "%s"' + % (suite.code_lineno, suite.path)) + f.write(suite.code) + if suite.code_lineno is not None: + f.writeln('#line %d "%s"' + % (f.lineno+1, args['output'])) + f.writeln() + + if suite.defines: + for i, define in enumerate(sorted(suite.defines)): + f.writeln('#ifndef %s' % define) + f.writeln('#define %-24s ' + 'BENCH_IMPLICIT_DEFINE_COUNT+%d' % (define+'_i', i)) + f.writeln('#define %-24s ' + 'BENCH_DEFINE(%s)' % (define, define+'_i')) + f.writeln('#endif') + f.writeln() + + # create case functions + for case in suite.cases: + if case.in_ is None: + write_case_functions(f, suite, case) + else: + if case.defines: + f.writeln('extern const bench_define_t ' + '__bench__%s__defines[][' + 'BENCH_IMPLICIT_DEFINE_COUNT+%d];' + % (case.name, len(suite.defines))) + if suite.if_ is not None or case.if_ is not None: + f.writeln('extern bool __bench__%s__filter(' + 'void);' + % (case.name)) + f.writeln('extern void __bench__%s__run(' + 'struct lfs_config *cfg);' + % (case.name)) + f.writeln() + + # create suite struct + f.writeln('#if defined(__APPLE__)') + f.writeln('__attribute__((section("__DATA,_bench_suites")))') + f.writeln('#else') + # note we place this in the custom bench_suites section with + # minimum alignment, otherwise GCC ups the alignment to + # 32-bytes for some reason + f.writeln('__attribute__((section("_bench_suites"), ' + 'aligned(1)))') + f.writeln('#endif') + f.writeln('const struct bench_suite __bench__%s__suite = {' + % suite.name) + f.writeln(4*' '+'.name = "%s",' % suite.name) + f.writeln(4*' '+'.path = "%s",' % suite.path) + f.writeln(4*' '+'.flags = 0,') + if suite.defines: + # create suite define names + f.writeln(4*' '+'.define_names = (const char *const[' + 'BENCH_IMPLICIT_DEFINE_COUNT+%d]){' % ( + len(suite.defines))) + for k in sorted(suite.defines): + f.writeln(8*' '+'[%-24s] = "%s",' % (k+'_i', k)) + f.writeln(4*' '+'},') + f.writeln(4*' '+'.define_count = ' + 'BENCH_IMPLICIT_DEFINE_COUNT+%d,' % len(suite.defines)) + f.writeln(4*' '+'.cases = (const struct bench_case[]){') + for case in suite.cases: + # create case structs + f.writeln(8*' '+'{') + f.writeln(12*' '+'.name = "%s",' % case.name) + f.writeln(12*' '+'.path = "%s",' % case.path) + f.writeln(12*' '+'.flags = 0,') + f.writeln(12*' '+'.permutations = %d,' + % len(case.permutations)) + if case.defines: + f.writeln(12*' '+'.defines ' + '= (const bench_define_t*)__bench__%s__defines,' + % (case.name)) + if suite.if_ is not None or case.if_ is not None: + f.writeln(12*' '+'.filter = __bench__%s__filter,' + % (case.name)) + f.writeln(12*' '+'.run = __bench__%s__run,' + % (case.name)) + f.writeln(8*' '+'},') + f.writeln(4*' '+'},') + f.writeln(4*' '+'.case_count = %d,' % len(suite.cases)) + f.writeln('};') + f.writeln() + + else: + # copy source + f.writeln('#line 1 "%s"' % args['source']) + with open(args['source']) as sf: + shutil.copyfileobj(sf, f) + f.writeln() + + # write any internal benches + for suite in suites: + for case in suite.cases: + if (case.in_ is not None + and os.path.normpath(case.in_) + == os.path.normpath(args['source'])): + # write defines, but note we need to undef any + # new defines since we're in someone else's file + if suite.defines: + for i, define in enumerate( + sorted(suite.defines)): + f.writeln('#ifndef %s' % define) + f.writeln('#define %-24s ' + 'BENCH_IMPLICIT_DEFINE_COUNT+%d' % ( + define+'_i', i)) + f.writeln('#define %-24s ' + 'BENCH_DEFINE(%s)' % ( + define, define+'_i')) + f.writeln('#define ' + '__BENCH__%s__NEEDS_UNDEF' % ( + define)) + f.writeln('#endif') + f.writeln() + + write_case_functions(f, suite, case) + + if suite.defines: + for define in sorted(suite.defines): + f.writeln('#ifdef __BENCH__%s__NEEDS_UNDEF' + % define) + f.writeln('#undef __BENCH__%s__NEEDS_UNDEF' + % define) + f.writeln('#undef %s' % define) + f.writeln('#undef %s' % (define+'_i')) + f.writeln('#endif') + f.writeln() + +def find_runner(runner, **args): + cmd = runner.copy() + + # run under some external command? + if args.get('exec'): + cmd[:0] = args['exec'] + + # run under valgrind? + if args.get('valgrind'): + cmd[:0] = args['valgrind_path'] + [ + '--leak-check=full', + '--track-origins=yes', + '--error-exitcode=4', + '-q'] + + # run under perf? + if args.get('perf'): + cmd[:0] = args['perf_script'] + list(filter(None, [ + '-R', + '--perf-freq=%s' % args['perf_freq'] + if args.get('perf_freq') else None, + '--perf-period=%s' % args['perf_period'] + if args.get('perf_period') else None, + '--perf-events=%s' % args['perf_events'] + if args.get('perf_events') else None, + '--perf-path=%s' % args['perf_path'] + if args.get('perf_path') else None, + '-o%s' % args['perf']])) + + # other context + if args.get('geometry'): + cmd.append('-G%s' % args['geometry']) + if args.get('disk'): + cmd.append('-d%s' % args['disk']) + if args.get('trace'): + cmd.append('-t%s' % args['trace']) + if args.get('trace_backtrace'): + cmd.append('--trace-backtrace') + if args.get('trace_period'): + cmd.append('--trace-period=%s' % args['trace_period']) + if args.get('trace_freq'): + cmd.append('--trace-freq=%s' % args['trace_freq']) + if args.get('read_sleep'): + cmd.append('--read-sleep=%s' % args['read_sleep']) + if args.get('prog_sleep'): + cmd.append('--prog-sleep=%s' % args['prog_sleep']) + if args.get('erase_sleep'): + cmd.append('--erase-sleep=%s' % args['erase_sleep']) + + # defines? + if args.get('define'): + for define in args.get('define'): + cmd.append('-D%s' % define) + + return cmd + +def list_(runner, bench_ids=[], **args): + cmd = find_runner(runner, **args) + bench_ids + if args.get('summary'): cmd.append('--summary') + if args.get('list_suites'): cmd.append('--list-suites') + if args.get('list_cases'): cmd.append('--list-cases') + if args.get('list_suite_paths'): cmd.append('--list-suite-paths') + if args.get('list_case_paths'): cmd.append('--list-case-paths') + if args.get('list_defines'): cmd.append('--list-defines') + if args.get('list_permutation_defines'): + cmd.append('--list-permutation-defines') + if args.get('list_implicit_defines'): + cmd.append('--list-implicit-defines') + if args.get('list_geometries'): cmd.append('--list-geometries') + + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + return sp.call(cmd) + + +def find_perms(runner_, ids=[], **args): + case_suites = {} + expected_case_perms = co.defaultdict(lambda: 0) + expected_perms = 0 + total_perms = 0 + + # query cases from the runner + cmd = runner_ + ['--list-cases'] + ids + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, + stdout=sp.PIPE, + stderr=sp.PIPE if not args.get('verbose') else None, + universal_newlines=True, + errors='replace', + close_fds=False) + pattern = re.compile( + '^(?P[^\s]+)' + '\s+(?P[^\s]+)' + '\s+(?P\d+)/(?P\d+)') + # skip the first line + for line in it.islice(proc.stdout, 1, None): + m = pattern.match(line) + if m: + filtered = int(m.group('filtered')) + perms = int(m.group('perms')) + expected_case_perms[m.group('case')] += filtered + expected_perms += filtered + total_perms += perms + proc.wait() + if proc.returncode != 0: + if not args.get('verbose'): + for line in proc.stderr: + sys.stdout.write(line) + sys.exit(-1) + + # get which suite each case belongs to via paths + cmd = runner_ + ['--list-case-paths'] + ids + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, + stdout=sp.PIPE, + stderr=sp.PIPE if not args.get('verbose') else None, + universal_newlines=True, + errors='replace', + close_fds=False) + pattern = re.compile( + '^(?P[^\s]+)' + '\s+(?P[^:]+):(?P\d+)') + # skip the first line + for line in it.islice(proc.stdout, 1, None): + m = pattern.match(line) + if m: + path = m.group('path') + # strip path/suffix here + suite = os.path.basename(path) + if suite.endswith('.toml'): + suite = suite[:-len('.toml')] + case_suites[m.group('case')] = suite + proc.wait() + if proc.returncode != 0: + if not args.get('verbose'): + for line in proc.stderr: + sys.stdout.write(line) + sys.exit(-1) + + # figure out expected suite perms + expected_suite_perms = co.defaultdict(lambda: 0) + for case, suite in case_suites.items(): + expected_suite_perms[suite] += expected_case_perms[case] + + return ( + case_suites, + expected_suite_perms, + expected_case_perms, + expected_perms, + total_perms) + +def find_path(runner_, id, **args): + path = None + # query from runner + cmd = runner_ + ['--list-case-paths', id] + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, + stdout=sp.PIPE, + stderr=sp.PIPE if not args.get('verbose') else None, + universal_newlines=True, + errors='replace', + close_fds=False) + pattern = re.compile( + '^(?P[^\s]+)' + '\s+(?P[^:]+):(?P\d+)') + # skip the first line + for line in it.islice(proc.stdout, 1, None): + m = pattern.match(line) + if m and path is None: + path_ = m.group('path') + lineno = int(m.group('lineno')) + path = (path_, lineno) + proc.wait() + if proc.returncode != 0: + if not args.get('verbose'): + for line in proc.stderr: + sys.stdout.write(line) + sys.exit(-1) + + return path + +def find_defines(runner_, id, **args): + # query permutation defines from runner + cmd = runner_ + ['--list-permutation-defines', id] + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, + stdout=sp.PIPE, + stderr=sp.PIPE if not args.get('verbose') else None, + universal_newlines=True, + errors='replace', + close_fds=False) + defines = co.OrderedDict() + pattern = re.compile('^(?P\w+)=(?P.+)') + for line in proc.stdout: + m = pattern.match(line) + if m: + define = m.group('define') + value = m.group('value') + defines[define] = value + proc.wait() + if proc.returncode != 0: + if not args.get('verbose'): + for line in proc.stderr: + sys.stdout.write(line) + sys.exit(-1) + + return defines + + +# Thread-safe CSV writer +class BenchOutput: + def __init__(self, path, head=None, tail=None): + self.f = openio(path, 'w+', 1) + self.lock = th.Lock() + self.head = head or [] + self.tail = tail or [] + self.writer = csv.DictWriter(self.f, self.head + self.tail) + self.rows = [] + + def close(self): + self.f.close() + + def __enter__(self): + return self + + def __exit__(self, *_): + self.f.close() + + def writerow(self, row): + with self.lock: + self.rows.append(row) + if all(k in self.head or k in self.tail for k in row.keys()): + # can simply append + self.writer.writerow(row) + else: + # need to rewrite the file + self.head.extend(row.keys() - (self.head + self.tail)) + self.f.seek(0) + self.f.truncate() + self.writer = csv.DictWriter(self.f, self.head + self.tail) + self.writer.writeheader() + for row in self.rows: + self.writer.writerow(row) + +# A bench failure +class BenchFailure(Exception): + def __init__(self, id, returncode, stdout, assert_=None): + self.id = id + self.returncode = returncode + self.stdout = stdout + self.assert_ = assert_ + +def run_stage(name, runner_, ids, stdout_, trace_, output_, **args): + # get expected suite/case/perm counts + (case_suites, + expected_suite_perms, + expected_case_perms, + expected_perms, + total_perms) = find_perms(runner_, ids, **args) + + passed_suite_perms = co.defaultdict(lambda: 0) + passed_case_perms = co.defaultdict(lambda: 0) + passed_perms = 0 + readed = 0 + proged = 0 + erased = 0 + failures = [] + killed = False + + pattern = re.compile('^(?:' + '(?Prunning|finished|skipped|powerloss)' + ' (?P(?P[^:]+)[^\s]*)' + '(?: (?P\d+))?' + '(?: (?P\d+))?' + '(?: (?P\d+))?' + '|' '(?P[^:]+):(?P\d+):(?Passert):' + ' *(?P.*)' + ')$') + locals = th.local() + children = set() + + def run_runner(runner_, ids=[]): + nonlocal passed_suite_perms + nonlocal passed_case_perms + nonlocal passed_perms + nonlocal readed + nonlocal proged + nonlocal erased + nonlocal locals + + # run the benches! + cmd = runner_ + ids + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + + mpty, spty = pty.openpty() + proc = sp.Popen(cmd, stdout=spty, stderr=spty, close_fds=False) + os.close(spty) + children.add(proc) + mpty = os.fdopen(mpty, 'r', 1) + + last_id = None + last_stdout = co.deque(maxlen=args.get('context', 5) + 1) + last_assert = None + try: + while True: + # parse a line for state changes + try: + line = mpty.readline() + except OSError as e: + if e.errno != errno.EIO: + raise + break + if not line: + break + last_stdout.append(line) + if stdout_: + try: + stdout_.write(line) + stdout_.flush() + except BrokenPipeError: + pass + + m = pattern.match(line) + if m: + op = m.group('op') or m.group('op_') + if op == 'running': + locals.seen_perms += 1 + last_id = m.group('id') + last_stdout.clear() + last_assert = None + elif op == 'finished': + case = m.group('case') + suite = case_suites[case] + readed_ = int(m.group('readed')) + proged_ = int(m.group('proged')) + erased_ = int(m.group('erased')) + passed_suite_perms[suite] += 1 + passed_case_perms[case] += 1 + passed_perms += 1 + readed += readed_ + proged += proged_ + erased += erased_ + if output_: + # get defines and write to csv + defines = find_defines( + runner_, m.group('id'), **args) + output_.writerow({ + 'suite': suite, + 'case': case, + 'bench_readed': readed_, + 'bench_proged': proged_, + 'bench_erased': erased_, + **defines}) + elif op == 'skipped': + locals.seen_perms += 1 + elif op == 'assert': + last_assert = ( + m.group('path'), + int(m.group('lineno')), + m.group('message')) + # go ahead and kill the process, aborting takes a while + if args.get('keep_going'): + proc.kill() + except KeyboardInterrupt: + raise BenchFailure(last_id, 1, list(last_stdout)) + finally: + children.remove(proc) + mpty.close() + + proc.wait() + if proc.returncode != 0: + raise BenchFailure( + last_id, + proc.returncode, + list(last_stdout), + last_assert) + + def run_job(runner_, ids=[], start=None, step=None): + nonlocal failures + nonlocal killed + nonlocal locals + + start = start or 0 + step = step or 1 + while start < total_perms: + job_runner = runner_.copy() + if args.get('isolate') or args.get('valgrind'): + job_runner.append('-s%s,%s,%s' % (start, start+step, step)) + else: + job_runner.append('-s%s,,%s' % (start, step)) + + try: + # run the benches + locals.seen_perms = 0 + run_runner(job_runner, ids) + assert locals.seen_perms > 0 + start += locals.seen_perms*step + + except BenchFailure as failure: + # keep track of failures + if output_: + case, _ = failure.id.split(':', 1) + suite = case_suites[case] + # get defines and write to csv + defines = find_defines(runner_, failure.id, **args) + output_.writerow({ + 'suite': suite, + 'case': case, + **defines}) + + # race condition for multiple failures? + if failures and not args.get('keep_going'): + break + + failures.append(failure) + + if args.get('keep_going') and not killed: + # resume after failed bench + assert locals.seen_perms > 0 + start += locals.seen_perms*step + continue + else: + # stop other benches + killed = True + for child in children.copy(): + child.kill() + break + + + # parallel jobs? + runners = [] + if 'jobs' in args: + for job in range(args['jobs']): + runners.append(th.Thread( + target=run_job, args=(runner_, ids, job, args['jobs']), + daemon=True)) + else: + runners.append(th.Thread( + target=run_job, args=(runner_, ids, None, None), + daemon=True)) + + def print_update(done): + if not args.get('verbose') and (args['color'] or done): + sys.stdout.write('%s%srunning %s%s:%s %s%s' % ( + '\r\x1b[K' if args['color'] else '', + '\x1b[?7l' if not done else '', + ('\x1b[34m' if not failures else '\x1b[31m') + if args['color'] else '', + name, + '\x1b[m' if args['color'] else '', + ', '.join(filter(None, [ + '%d/%d suites' % ( + sum(passed_suite_perms[k] == v + for k, v in expected_suite_perms.items()), + len(expected_suite_perms)) + if (not args.get('by_suites') + and not args.get('by_cases')) else None, + '%d/%d cases' % ( + sum(passed_case_perms[k] == v + for k, v in expected_case_perms.items()), + len(expected_case_perms)) + if not args.get('by_cases') else None, + '%d/%d perms' % (passed_perms, expected_perms), + '%s%d/%d failures%s' % ( + '\x1b[31m' if args['color'] else '', + len(failures), + expected_perms, + '\x1b[m' if args['color'] else '') + if failures else None])), + '\x1b[?7h' if not done else '\n')) + sys.stdout.flush() + + for r in runners: + r.start() + + try: + while any(r.is_alive() for r in runners): + time.sleep(0.01) + print_update(False) + except KeyboardInterrupt: + # this is handled by the runner threads, we just + # need to not abort here + killed = True + finally: + print_update(True) + + for r in runners: + r.join() + + return ( + expected_perms, + passed_perms, + readed, + proged, + erased, + failures, + killed) + + +def run(runner, bench_ids=[], **args): + # query runner for benches + runner_ = find_runner(runner, **args) + print('using runner: %s' % ' '.join(shlex.quote(c) for c in runner_)) + (_, + expected_suite_perms, + expected_case_perms, + expected_perms, + total_perms) = find_perms(runner_, bench_ids, **args) + print('found %d suites, %d cases, %d/%d permutations' % ( + len(expected_suite_perms), + len(expected_case_perms), + expected_perms, + total_perms)) + print() + + # automatic job detection? + if args.get('jobs') == 0: + args['jobs'] = len(os.sched_getaffinity(0)) + + # truncate and open logs here so they aren't disconnected between benches + stdout = None + if args.get('stdout'): + stdout = openio(args['stdout'], 'w', 1) + trace = None + if args.get('trace'): + trace = openio(args['trace'], 'w', 1) + output = None + if args.get('output'): + output = BenchOutput(args['output'], + ['suite', 'case'], + ['bench_readed', 'bench_proged', 'bench_erased']) + + # measure runtime + start = time.time() + + # spawn runners + expected = 0 + passed = 0 + readed = 0 + proged = 0 + erased = 0 + failures = [] + for by in (bench_ids if bench_ids + else expected_case_perms.keys() if args.get('by_cases') + else expected_suite_perms.keys() if args.get('by_suites') + else [None]): + # spawn jobs for stage + (expected_, + passed_, + readed_, + proged_, + erased_, + failures_, + killed) = run_stage( + by or 'benches', + runner_, + [by] if by is not None else [], + stdout, + trace, + output, + **args) + # collect passes/failures + expected += expected_ + passed += passed_ + readed += readed_ + proged += proged_ + erased += erased_ + failures.extend(failures_) + if (failures and not args.get('keep_going')) or killed: + break + + stop = time.time() + + if stdout: + try: + stdout.close() + except BrokenPipeError: + pass + if trace: + try: + trace.close() + except BrokenPipeError: + pass + if output: + output.close() + + # show summary + print() + print('%sdone:%s %s' % ( + ('\x1b[34m' if not failures else '\x1b[31m') + if args['color'] else '', + '\x1b[m' if args['color'] else '', + ', '.join(filter(None, [ + '%d readed' % readed, + '%d proged' % proged, + '%d erased' % erased, + 'in %.2fs' % (stop-start)])))) + print() + + # print each failure + for failure in failures: + assert failure.id is not None, '%s broken? %r' % ( + ' '.join(shlex.quote(c) for c in runner_), + failure) + + # get some extra info from runner + path, lineno = find_path(runner_, failure.id, **args) + defines = find_defines(runner_, failure.id, **args) + + # show summary of failure + print('%s%s:%d:%sfailure:%s %s%s failed' % ( + '\x1b[01m' if args['color'] else '', + path, lineno, + '\x1b[01;31m' if args['color'] else '', + '\x1b[m' if args['color'] else '', + failure.id, + ' (%s)' % ', '.join('%s=%s' % (k,v) for k,v in defines.items()) + if defines else '')) + + if failure.stdout: + stdout = failure.stdout + if failure.assert_ is not None: + stdout = stdout[:-1] + for line in stdout[-args.get('context', 5):]: + sys.stdout.write(line) + + if failure.assert_ is not None: + path, lineno, message = failure.assert_ + print('%s%s:%d:%sassert:%s %s' % ( + '\x1b[01m' if args['color'] else '', + path, lineno, + '\x1b[01;31m' if args['color'] else '', + '\x1b[m' if args['color'] else '', + message)) + with open(path) as f: + line = next(it.islice(f, lineno-1, None)).strip('\n') + print(line) + print() + + # drop into gdb? + if failures and (args.get('gdb') + or args.get('gdb_case') + or args.get('gdb_main')): + failure = failures[0] + cmd = runner_ + [failure.id] + + if args.get('gdb_main'): + # we don't really need the case breakpoint here, but it + # can be helpful + path, lineno = find_path(runner_, failure.id, **args) + cmd[:0] = args['gdb_path'] + [ + '-ex', 'break main', + '-ex', 'break %s:%d' % (path, lineno), + '-ex', 'run', + '--args'] + elif args.get('gdb_case'): + path, lineno = find_path(runner_, failure.id, **args) + cmd[:0] = args['gdb_path'] + [ + '-ex', 'break %s:%d' % (path, lineno), + '-ex', 'run', + '--args'] + elif failure.assert_ is not None: + cmd[:0] = args['gdb_path'] + [ + '-ex', 'run', + '-ex', 'frame function raise', + '-ex', 'up 2', + '--args'] + else: + cmd[:0] = args['gdb_path'] + [ + '-ex', 'run', + '--args'] + + # exec gdb interactively + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + os.execvp(cmd[0], cmd) + + return 1 if failures else 0 + + +def main(**args): + # figure out what color should be + if args.get('color') == 'auto': + args['color'] = sys.stdout.isatty() + elif args.get('color') == 'always': + args['color'] = True + else: + args['color'] = False + + if args.get('compile'): + return compile(**args) + elif (args.get('summary') + or args.get('list_suites') + or args.get('list_cases') + or args.get('list_suite_paths') + or args.get('list_case_paths') + or args.get('list_defines') + or args.get('list_permutation_defines') + or args.get('list_implicit_defines') + or args.get('list_geometries')): + return list_(**args) + else: + return run(**args) + + +if __name__ == "__main__": + import argparse + import sys + argparse.ArgumentParser._handle_conflict_ignore = lambda *_: None + argparse._ArgumentGroup._handle_conflict_ignore = lambda *_: None + parser = argparse.ArgumentParser( + description="Build and run benches.", + allow_abbrev=False, + conflict_handler='ignore') + parser.add_argument( + '-v', '--verbose', + action='store_true', + help="Output commands that run behind the scenes.") + parser.add_argument( + '--color', + choices=['never', 'always', 'auto'], + default='auto', + help="When to use terminal colors. Defaults to 'auto'.") + + # bench flags + bench_parser = parser.add_argument_group('bench options') + bench_parser.add_argument( + 'runner', + nargs='?', + type=lambda x: x.split(), + help="Bench runner to use for benching. Defaults to %r." % RUNNER_PATH) + bench_parser.add_argument( + 'bench_ids', + nargs='*', + help="Description of benches to run.") + bench_parser.add_argument( + '-Y', '--summary', + action='store_true', + help="Show quick summary.") + bench_parser.add_argument( + '-l', '--list-suites', + action='store_true', + help="List bench suites.") + bench_parser.add_argument( + '-L', '--list-cases', + action='store_true', + help="List bench cases.") + bench_parser.add_argument( + '--list-suite-paths', + action='store_true', + help="List the path for each bench suite.") + bench_parser.add_argument( + '--list-case-paths', + action='store_true', + help="List the path and line number for each bench case.") + bench_parser.add_argument( + '--list-defines', + action='store_true', + help="List all defines in this bench-runner.") + bench_parser.add_argument( + '--list-permutation-defines', + action='store_true', + help="List explicit defines in this bench-runner.") + bench_parser.add_argument( + '--list-implicit-defines', + action='store_true', + help="List implicit defines in this bench-runner.") + bench_parser.add_argument( + '--list-geometries', + action='store_true', + help="List the available disk geometries.") + bench_parser.add_argument( + '-D', '--define', + action='append', + help="Override a bench define.") + bench_parser.add_argument( + '-G', '--geometry', + help="Comma-separated list of disk geometries to bench.") + bench_parser.add_argument( + '-d', '--disk', + help="Direct block device operations to this file.") + bench_parser.add_argument( + '-t', '--trace', + help="Direct trace output to this file.") + bench_parser.add_argument( + '--trace-backtrace', + action='store_true', + help="Include a backtrace with every trace statement.") + bench_parser.add_argument( + '--trace-period', + help="Sample trace output at this period in cycles.") + bench_parser.add_argument( + '--trace-freq', + help="Sample trace output at this frequency in hz.") + bench_parser.add_argument( + '-O', '--stdout', + help="Direct stdout to this file. Note stderr is already merged here.") + bench_parser.add_argument( + '-o', '--output', + help="CSV file to store results.") + bench_parser.add_argument( + '--read-sleep', + help="Artificial read delay in seconds.") + bench_parser.add_argument( + '--prog-sleep', + help="Artificial prog delay in seconds.") + bench_parser.add_argument( + '--erase-sleep', + help="Artificial erase delay in seconds.") + bench_parser.add_argument( + '-j', '--jobs', + nargs='?', + type=lambda x: int(x, 0), + const=0, + help="Number of parallel runners to run. 0 runs one runner per core.") + bench_parser.add_argument( + '-k', '--keep-going', + action='store_true', + help="Don't stop on first error.") + bench_parser.add_argument( + '-i', '--isolate', + action='store_true', + help="Run each bench permutation in a separate process.") + bench_parser.add_argument( + '-b', '--by-suites', + action='store_true', + help="Step through benches by suite.") + bench_parser.add_argument( + '-B', '--by-cases', + action='store_true', + help="Step through benches by case.") + bench_parser.add_argument( + '--context', + type=lambda x: int(x, 0), + default=5, + help="Show this many lines of stdout on bench failure. " + "Defaults to 5.") + bench_parser.add_argument( + '--gdb', + action='store_true', + help="Drop into gdb on bench failure.") + bench_parser.add_argument( + '--gdb-case', + action='store_true', + help="Drop into gdb on bench failure but stop at the beginning " + "of the failing bench case.") + bench_parser.add_argument( + '--gdb-main', + action='store_true', + help="Drop into gdb on bench failure but stop at the beginning " + "of main.") + bench_parser.add_argument( + '--gdb-path', + type=lambda x: x.split(), + default=GDB_PATH, + help="Path to the gdb executable, may include flags. " + "Defaults to %r." % GDB_PATH) + bench_parser.add_argument( + '--exec', + type=lambda e: e.split(), + help="Run under another executable.") + bench_parser.add_argument( + '--valgrind', + action='store_true', + help="Run under Valgrind to find memory errors. Implicitly sets " + "--isolate.") + bench_parser.add_argument( + '--valgrind-path', + type=lambda x: x.split(), + default=VALGRIND_PATH, + help="Path to the Valgrind executable, may include flags. " + "Defaults to %r." % VALGRIND_PATH) + bench_parser.add_argument( + '-p', '--perf', + help="Run under Linux's perf to sample performance counters, writing " + "samples to this file.") + bench_parser.add_argument( + '--perf-freq', + help="perf sampling frequency. This is passed directly to the perf " + "script.") + bench_parser.add_argument( + '--perf-period', + help="perf sampling period. This is passed directly to the perf " + "script.") + bench_parser.add_argument( + '--perf-events', + help="perf events to record. This is passed directly to the perf " + "script.") + bench_parser.add_argument( + '--perf-script', + type=lambda x: x.split(), + default=PERF_SCRIPT, + help="Path to the perf script to use. Defaults to %r." % PERF_SCRIPT) + bench_parser.add_argument( + '--perf-path', + type=lambda x: x.split(), + help="Path to the perf executable, may include flags. This is passed " + "directly to the perf script") + + # compilation flags + comp_parser = parser.add_argument_group('compilation options') + comp_parser.add_argument( + 'bench_paths', + nargs='*', + help="Description of *.toml files to compile. May be a directory " + "or a list of paths.") + comp_parser.add_argument( + '-c', '--compile', + action='store_true', + help="Compile a bench suite or source file.") + comp_parser.add_argument( + '-s', '--source', + help="Source file to compile, possibly injecting internal benches.") + comp_parser.add_argument( + '--include', + default=HEADER_PATH, + help="Inject this header file into every compiled bench file. " + "Defaults to %r." % HEADER_PATH) + comp_parser.add_argument( + '-o', '--output', + help="Output file.") + + # runner/bench_paths overlap, so need to do some munging here + args = parser.parse_intermixed_args() + args.bench_paths = [' '.join(args.runner or [])] + args.bench_ids + args.runner = args.runner or [RUNNER_PATH] + + sys.exit(main(**{k: v + for k, v in vars(args).items() + if v is not None})) diff --git a/components/joltwallet__littlefs/src/littlefs/scripts/changeprefix.py b/components/joltwallet__littlefs/src/littlefs/scripts/changeprefix.py new file mode 100644 index 0000000..1ecc2e4 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/scripts/changeprefix.py @@ -0,0 +1,181 @@ +#!/usr/bin/env python3 +# +# Change prefixes in files/filenames. Useful for creating different versions +# of a codebase that don't conflict at compile time. +# +# Example: +# $ ./scripts/changeprefix.py lfs lfs3 +# +# Copyright (c) 2022, The littlefs authors. +# Copyright (c) 2019, Arm Limited. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# + +import glob +import itertools +import os +import os.path +import re +import shlex +import shutil +import subprocess +import tempfile + +GIT_PATH = ['git'] + + +def openio(path, mode='r', buffering=-1): + # allow '-' for stdin/stdout + if path == '-': + if mode == 'r': + return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering) + else: + return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering) + else: + return open(path, mode, buffering) + +def changeprefix(from_prefix, to_prefix, line): + line, count1 = re.subn( + '\\b'+from_prefix, + to_prefix, + line) + line, count2 = re.subn( + '\\b'+from_prefix.upper(), + to_prefix.upper(), + line) + line, count3 = re.subn( + '\\B-D'+from_prefix.upper(), + '-D'+to_prefix.upper(), + line) + return line, count1+count2+count3 + +def changefile(from_prefix, to_prefix, from_path, to_path, *, + no_replacements=False): + # rename any prefixes in file + count = 0 + + # create a temporary file to avoid overwriting ourself + if from_path == to_path and to_path != '-': + to_path_temp = tempfile.NamedTemporaryFile('w', delete=False) + to_path = to_path_temp.name + else: + to_path_temp = None + + with openio(from_path) as from_f: + with openio(to_path, 'w') as to_f: + for line in from_f: + if not no_replacements: + line, n = changeprefix(from_prefix, to_prefix, line) + count += n + to_f.write(line) + + if from_path != '-' and to_path != '-': + shutil.copystat(from_path, to_path) + + if to_path_temp: + shutil.move(to_path, from_path) + elif from_path != '-': + os.remove(from_path) + + # Summary + print('%s: %d replacements' % ( + '%s -> %s' % (from_path, to_path) if not to_path_temp else from_path, + count)) + +def main(from_prefix, to_prefix, paths=[], *, + verbose=False, + output=None, + no_replacements=False, + no_renames=False, + git=False, + no_stage=False, + git_path=GIT_PATH): + if not paths: + if git: + cmd = git_path + ['ls-tree', '-r', '--name-only', 'HEAD'] + if verbose: + print(' '.join(shlex.quote(c) for c in cmd)) + paths = subprocess.check_output(cmd, encoding='utf8').split() + else: + print('no paths?', file=sys.stderr) + sys.exit(1) + + for from_path in paths: + # rename filename? + if output: + to_path = output + elif no_renames: + to_path = from_path + else: + to_path = os.path.join( + os.path.dirname(from_path), + changeprefix(from_prefix, to_prefix, + os.path.basename(from_path))[0]) + + # rename contents + changefile(from_prefix, to_prefix, from_path, to_path, + no_replacements=no_replacements) + + # stage? + if git and not no_stage: + if from_path != to_path: + cmd = git_path + ['rm', '-q', from_path] + if verbose: + print(' '.join(shlex.quote(c) for c in cmd)) + subprocess.check_call(cmd) + cmd = git_path + ['add', to_path] + if verbose: + print(' '.join(shlex.quote(c) for c in cmd)) + subprocess.check_call(cmd) + + +if __name__ == "__main__": + import argparse + import sys + parser = argparse.ArgumentParser( + description="Change prefixes in files/filenames. Useful for creating " + "different versions of a codebase that don't conflict at compile " + "time.", + allow_abbrev=False) + parser.add_argument( + 'from_prefix', + help="Prefix to replace.") + parser.add_argument( + 'to_prefix', + help="Prefix to replace with.") + parser.add_argument( + 'paths', + nargs='*', + help="Files to operate on.") + parser.add_argument( + '-v', '--verbose', + action='store_true', + help="Output commands that run behind the scenes.") + parser.add_argument( + '-o', '--output', + help="Output file.") + parser.add_argument( + '-N', '--no-replacements', + action='store_true', + help="Don't change prefixes in files") + parser.add_argument( + '-R', '--no-renames', + action='store_true', + help="Don't rename files") + parser.add_argument( + '--git', + action='store_true', + help="Use git to find/update files.") + parser.add_argument( + '--no-stage', + action='store_true', + help="Don't stage changes with git.") + parser.add_argument( + '--git-path', + type=lambda x: x.split(), + default=GIT_PATH, + help="Path to git executable, may include flags. " + "Defaults to %r." % GIT_PATH) + sys.exit(main(**{k: v + for k, v in vars(parser.parse_intermixed_args()).items() + if v is not None})) diff --git a/components/joltwallet__littlefs/src/littlefs/scripts/code.py b/components/joltwallet__littlefs/src/littlefs/scripts/code.py new file mode 100644 index 0000000..ba8bd1e --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/scripts/code.py @@ -0,0 +1,707 @@ +#!/usr/bin/env python3 +# +# Script to find code size at the function level. Basically just a big wrapper +# around nm with some extra conveniences for comparing builds. Heavily inspired +# by Linux's Bloat-O-Meter. +# +# Example: +# ./scripts/code.py lfs.o lfs_util.o -Ssize +# +# Copyright (c) 2022, The littlefs authors. +# Copyright (c) 2020, Arm Limited. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# + +import collections as co +import csv +import difflib +import itertools as it +import math as m +import os +import re +import shlex +import subprocess as sp + + +NM_PATH = ['nm'] +NM_TYPES = 'tTrRdD' +OBJDUMP_PATH = ['objdump'] + + +# integer fields +class Int(co.namedtuple('Int', 'x')): + __slots__ = () + def __new__(cls, x=0): + if isinstance(x, Int): + return x + if isinstance(x, str): + try: + x = int(x, 0) + except ValueError: + # also accept +-∞ and +-inf + if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x): + x = m.inf + elif re.match('^\s*-\s*(?:∞|inf)\s*$', x): + x = -m.inf + else: + raise + assert isinstance(x, int) or m.isinf(x), x + return super().__new__(cls, x) + + def __str__(self): + if self.x == m.inf: + return '∞' + elif self.x == -m.inf: + return '-∞' + else: + return str(self.x) + + def __int__(self): + assert not m.isinf(self.x) + return self.x + + def __float__(self): + return float(self.x) + + none = '%7s' % '-' + def table(self): + return '%7s' % (self,) + + diff_none = '%7s' % '-' + diff_table = table + + def diff_diff(self, other): + new = self.x if self else 0 + old = other.x if other else 0 + diff = new - old + if diff == +m.inf: + return '%7s' % '+∞' + elif diff == -m.inf: + return '%7s' % '-∞' + else: + return '%+7d' % diff + + def ratio(self, other): + new = self.x if self else 0 + old = other.x if other else 0 + if m.isinf(new) and m.isinf(old): + return 0.0 + elif m.isinf(new): + return +m.inf + elif m.isinf(old): + return -m.inf + elif not old and not new: + return 0.0 + elif not old: + return 1.0 + else: + return (new-old) / old + + def __add__(self, other): + return self.__class__(self.x + other.x) + + def __sub__(self, other): + return self.__class__(self.x - other.x) + + def __mul__(self, other): + return self.__class__(self.x * other.x) + +# code size results +class CodeResult(co.namedtuple('CodeResult', [ + 'file', 'function', + 'size'])): + _by = ['file', 'function'] + _fields = ['size'] + _sort = ['size'] + _types = {'size': Int} + + __slots__ = () + def __new__(cls, file='', function='', size=0): + return super().__new__(cls, file, function, + Int(size)) + + def __add__(self, other): + return CodeResult(self.file, self.function, + self.size + other.size) + + +def openio(path, mode='r', buffering=-1): + # allow '-' for stdin/stdout + if path == '-': + if mode == 'r': + return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering) + else: + return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering) + else: + return open(path, mode, buffering) + +def collect(obj_paths, *, + nm_path=NM_PATH, + nm_types=NM_TYPES, + objdump_path=OBJDUMP_PATH, + sources=None, + everything=False, + **args): + size_pattern = re.compile( + '^(?P[0-9a-fA-F]+)' + + ' (?P[%s])' % re.escape(nm_types) + + ' (?P.+?)$') + line_pattern = re.compile( + '^\s+(?P[0-9]+)' + '(?:\s+(?P[0-9]+))?' + '\s+.*' + '\s+(?P[^\s]+)$') + info_pattern = re.compile( + '^(?:.*(?PDW_TAG_[a-z_]+).*' + '|.*DW_AT_name.*:\s*(?P[^:\s]+)\s*' + '|.*DW_AT_decl_file.*:\s*(?P[0-9]+)\s*)$') + + results = [] + for path in obj_paths: + # guess the source, if we have debug-info we'll replace this later + file = re.sub('(\.o)?$', '.c', path, 1) + + # find symbol sizes + results_ = [] + # note nm-path may contain extra args + cmd = nm_path + ['--size-sort', path] + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, + stdout=sp.PIPE, + stderr=sp.PIPE if not args.get('verbose') else None, + universal_newlines=True, + errors='replace', + close_fds=False) + for line in proc.stdout: + m = size_pattern.match(line) + if m: + func = m.group('func') + # discard internal functions + if not everything and func.startswith('__'): + continue + results_.append(CodeResult( + file, func, + int(m.group('size'), 16))) + proc.wait() + if proc.returncode != 0: + if not args.get('verbose'): + for line in proc.stderr: + sys.stdout.write(line) + sys.exit(-1) + + + # try to figure out the source file if we have debug-info + dirs = {} + files = {} + # note objdump-path may contain extra args + cmd = objdump_path + ['--dwarf=rawline', path] + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, + stdout=sp.PIPE, + stderr=sp.PIPE if not args.get('verbose') else None, + universal_newlines=True, + errors='replace', + close_fds=False) + for line in proc.stdout: + # note that files contain references to dirs, which we + # dereference as soon as we see them as each file table follows a + # dir table + m = line_pattern.match(line) + if m: + if not m.group('dir'): + # found a directory entry + dirs[int(m.group('no'))] = m.group('path') + else: + # found a file entry + dir = int(m.group('dir')) + if dir in dirs: + files[int(m.group('no'))] = os.path.join( + dirs[dir], + m.group('path')) + else: + files[int(m.group('no'))] = m.group('path') + proc.wait() + if proc.returncode != 0: + if not args.get('verbose'): + for line in proc.stderr: + sys.stdout.write(line) + # do nothing on error, we don't need objdump to work, source files + # may just be inaccurate + pass + + defs = {} + is_func = False + f_name = None + f_file = None + # note objdump-path may contain extra args + cmd = objdump_path + ['--dwarf=info', path] + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, + stdout=sp.PIPE, + stderr=sp.PIPE if not args.get('verbose') else None, + universal_newlines=True, + errors='replace', + close_fds=False) + for line in proc.stdout: + # state machine here to find definitions + m = info_pattern.match(line) + if m: + if m.group('tag'): + if is_func: + defs[f_name] = files.get(f_file, '?') + is_func = (m.group('tag') == 'DW_TAG_subprogram') + elif m.group('name'): + f_name = m.group('name') + elif m.group('file'): + f_file = int(m.group('file')) + if is_func: + defs[f_name] = files.get(f_file, '?') + proc.wait() + if proc.returncode != 0: + if not args.get('verbose'): + for line in proc.stderr: + sys.stdout.write(line) + # do nothing on error, we don't need objdump to work, source files + # may just be inaccurate + pass + + for r in results_: + # find best matching debug symbol, this may be slightly different + # due to optimizations + if defs: + # exact match? avoid difflib if we can for speed + if r.function in defs: + file = defs[r.function] + else: + _, file = max( + defs.items(), + key=lambda d: difflib.SequenceMatcher(None, + d[0], + r.function, False).ratio()) + else: + file = r.file + + # ignore filtered sources + if sources is not None: + if not any( + os.path.abspath(file) == os.path.abspath(s) + for s in sources): + continue + else: + # default to only cwd + if not everything and not os.path.commonpath([ + os.getcwd(), + os.path.abspath(file)]) == os.getcwd(): + continue + + # simplify path + if os.path.commonpath([ + os.getcwd(), + os.path.abspath(file)]) == os.getcwd(): + file = os.path.relpath(file) + else: + file = os.path.abspath(file) + + results.append(r._replace(file=file)) + + return results + + +def fold(Result, results, *, + by=None, + defines=None, + **_): + if by is None: + by = Result._by + + for k in it.chain(by or [], (k for k, _ in defines or [])): + if k not in Result._by and k not in Result._fields: + print("error: could not find field %r?" % k) + sys.exit(-1) + + # filter by matching defines + if defines is not None: + results_ = [] + for r in results: + if all(getattr(r, k) in vs for k, vs in defines): + results_.append(r) + results = results_ + + # organize results into conflicts + folding = co.OrderedDict() + for r in results: + name = tuple(getattr(r, k) for k in by) + if name not in folding: + folding[name] = [] + folding[name].append(r) + + # merge conflicts + folded = [] + for name, rs in folding.items(): + folded.append(sum(rs[1:], start=rs[0])) + + return folded + +def table(Result, results, diff_results=None, *, + by=None, + fields=None, + sort=None, + summary=False, + all=False, + percent=False, + **_): + all_, all = all, __builtins__.all + + if by is None: + by = Result._by + if fields is None: + fields = Result._fields + types = Result._types + + # fold again + results = fold(Result, results, by=by) + if diff_results is not None: + diff_results = fold(Result, diff_results, by=by) + + # organize by name + table = { + ','.join(str(getattr(r, k) or '') for k in by): r + for r in results} + diff_table = { + ','.join(str(getattr(r, k) or '') for k in by): r + for r in diff_results or []} + names = list(table.keys() | diff_table.keys()) + + # sort again, now with diff info, note that python's sort is stable + names.sort() + if diff_results is not None: + names.sort(key=lambda n: tuple( + types[k].ratio( + getattr(table.get(n), k, None), + getattr(diff_table.get(n), k, None)) + for k in fields), + reverse=True) + if sort: + for k, reverse in reversed(sort): + names.sort( + key=lambda n: tuple( + (getattr(table[n], k),) + if getattr(table.get(n), k, None) is not None else () + for k in ([k] if k else [ + k for k in Result._sort if k in fields])), + reverse=reverse ^ (not k or k in Result._fields)) + + + # build up our lines + lines = [] + + # header + header = [] + header.append('%s%s' % ( + ','.join(by), + ' (%d added, %d removed)' % ( + sum(1 for n in table if n not in diff_table), + sum(1 for n in diff_table if n not in table)) + if diff_results is not None and not percent else '') + if not summary else '') + if diff_results is None: + for k in fields: + header.append(k) + elif percent: + for k in fields: + header.append(k) + else: + for k in fields: + header.append('o'+k) + for k in fields: + header.append('n'+k) + for k in fields: + header.append('d'+k) + header.append('') + lines.append(header) + + def table_entry(name, r, diff_r=None, ratios=[]): + entry = [] + entry.append(name) + if diff_results is None: + for k in fields: + entry.append(getattr(r, k).table() + if getattr(r, k, None) is not None + else types[k].none) + elif percent: + for k in fields: + entry.append(getattr(r, k).diff_table() + if getattr(r, k, None) is not None + else types[k].diff_none) + else: + for k in fields: + entry.append(getattr(diff_r, k).diff_table() + if getattr(diff_r, k, None) is not None + else types[k].diff_none) + for k in fields: + entry.append(getattr(r, k).diff_table() + if getattr(r, k, None) is not None + else types[k].diff_none) + for k in fields: + entry.append(types[k].diff_diff( + getattr(r, k, None), + getattr(diff_r, k, None))) + if diff_results is None: + entry.append('') + elif percent: + entry.append(' (%s)' % ', '.join( + '+∞%' if t == +m.inf + else '-∞%' if t == -m.inf + else '%+.1f%%' % (100*t) + for t in ratios)) + else: + entry.append(' (%s)' % ', '.join( + '+∞%' if t == +m.inf + else '-∞%' if t == -m.inf + else '%+.1f%%' % (100*t) + for t in ratios + if t) + if any(ratios) else '') + return entry + + # entries + if not summary: + for name in names: + r = table.get(name) + if diff_results is None: + diff_r = None + ratios = None + else: + diff_r = diff_table.get(name) + ratios = [ + types[k].ratio( + getattr(r, k, None), + getattr(diff_r, k, None)) + for k in fields] + if not all_ and not any(ratios): + continue + lines.append(table_entry(name, r, diff_r, ratios)) + + # total + r = next(iter(fold(Result, results, by=[])), None) + if diff_results is None: + diff_r = None + ratios = None + else: + diff_r = next(iter(fold(Result, diff_results, by=[])), None) + ratios = [ + types[k].ratio( + getattr(r, k, None), + getattr(diff_r, k, None)) + for k in fields] + lines.append(table_entry('TOTAL', r, diff_r, ratios)) + + # find the best widths, note that column 0 contains the names and column -1 + # the ratios, so those are handled a bit differently + widths = [ + ((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1 + for w, i in zip( + it.chain([23], it.repeat(7)), + range(len(lines[0])-1))] + + # print our table + for line in lines: + print('%-*s %s%s' % ( + widths[0], line[0], + ' '.join('%*s' % (w, x) + for w, x in zip(widths[1:], line[1:-1])), + line[-1])) + + +def main(obj_paths, *, + by=None, + fields=None, + defines=None, + sort=None, + **args): + # find sizes + if not args.get('use', None): + results = collect(obj_paths, **args) + else: + results = [] + with openio(args['use']) as f: + reader = csv.DictReader(f, restval='') + for r in reader: + if not any('code_'+k in r and r['code_'+k].strip() + for k in CodeResult._fields): + continue + try: + results.append(CodeResult( + **{k: r[k] for k in CodeResult._by + if k in r and r[k].strip()}, + **{k: r['code_'+k] for k in CodeResult._fields + if 'code_'+k in r and r['code_'+k].strip()})) + except TypeError: + pass + + # fold + results = fold(CodeResult, results, by=by, defines=defines) + + # sort, note that python's sort is stable + results.sort() + if sort: + for k, reverse in reversed(sort): + results.sort( + key=lambda r: tuple( + (getattr(r, k),) if getattr(r, k) is not None else () + for k in ([k] if k else CodeResult._sort)), + reverse=reverse ^ (not k or k in CodeResult._fields)) + + # write results to CSV + if args.get('output'): + with openio(args['output'], 'w') as f: + writer = csv.DictWriter(f, + (by if by is not None else CodeResult._by) + + ['code_'+k for k in ( + fields if fields is not None else CodeResult._fields)]) + writer.writeheader() + for r in results: + writer.writerow( + {k: getattr(r, k) for k in ( + by if by is not None else CodeResult._by)} + | {'code_'+k: getattr(r, k) for k in ( + fields if fields is not None else CodeResult._fields)}) + + # find previous results? + if args.get('diff'): + diff_results = [] + try: + with openio(args['diff']) as f: + reader = csv.DictReader(f, restval='') + for r in reader: + if not any('code_'+k in r and r['code_'+k].strip() + for k in CodeResult._fields): + continue + try: + diff_results.append(CodeResult( + **{k: r[k] for k in CodeResult._by + if k in r and r[k].strip()}, + **{k: r['code_'+k] for k in CodeResult._fields + if 'code_'+k in r and r['code_'+k].strip()})) + except TypeError: + pass + except FileNotFoundError: + pass + + # fold + diff_results = fold(CodeResult, diff_results, by=by, defines=defines) + + # print table + if not args.get('quiet'): + table(CodeResult, results, + diff_results if args.get('diff') else None, + by=by if by is not None else ['function'], + fields=fields, + sort=sort, + **args) + + +if __name__ == "__main__": + import argparse + import sys + parser = argparse.ArgumentParser( + description="Find code size at the function level.", + allow_abbrev=False) + parser.add_argument( + 'obj_paths', + nargs='*', + help="Input *.o files.") + parser.add_argument( + '-v', '--verbose', + action='store_true', + help="Output commands that run behind the scenes.") + parser.add_argument( + '-q', '--quiet', + action='store_true', + help="Don't show anything, useful with -o.") + parser.add_argument( + '-o', '--output', + help="Specify CSV file to store results.") + parser.add_argument( + '-u', '--use', + help="Don't parse anything, use this CSV file.") + parser.add_argument( + '-d', '--diff', + help="Specify CSV file to diff against.") + parser.add_argument( + '-a', '--all', + action='store_true', + help="Show all, not just the ones that changed.") + parser.add_argument( + '-p', '--percent', + action='store_true', + help="Only show percentage change, not a full diff.") + parser.add_argument( + '-b', '--by', + action='append', + choices=CodeResult._by, + help="Group by this field.") + parser.add_argument( + '-f', '--field', + dest='fields', + action='append', + choices=CodeResult._fields, + help="Show this field.") + parser.add_argument( + '-D', '--define', + dest='defines', + action='append', + type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)), + help="Only include results where this field is this value.") + class AppendSort(argparse.Action): + def __call__(self, parser, namespace, value, option): + if namespace.sort is None: + namespace.sort = [] + namespace.sort.append((value, True if option == '-S' else False)) + parser.add_argument( + '-s', '--sort', + nargs='?', + action=AppendSort, + help="Sort by this field.") + parser.add_argument( + '-S', '--reverse-sort', + nargs='?', + action=AppendSort, + help="Sort by this field, but backwards.") + parser.add_argument( + '-Y', '--summary', + action='store_true', + help="Only show the total.") + parser.add_argument( + '-F', '--source', + dest='sources', + action='append', + help="Only consider definitions in this file. Defaults to anything " + "in the current directory.") + parser.add_argument( + '--everything', + action='store_true', + help="Include builtin and libc specific symbols.") + parser.add_argument( + '--nm-types', + default=NM_TYPES, + help="Type of symbols to report, this uses the same single-character " + "type-names emitted by nm. Defaults to %r." % NM_TYPES) + parser.add_argument( + '--nm-path', + type=lambda x: x.split(), + default=NM_PATH, + help="Path to the nm executable, may include flags. " + "Defaults to %r." % NM_PATH) + parser.add_argument( + '--objdump-path', + type=lambda x: x.split(), + default=OBJDUMP_PATH, + help="Path to the objdump executable, may include flags. " + "Defaults to %r." % OBJDUMP_PATH) + sys.exit(main(**{k: v + for k, v in vars(parser.parse_intermixed_args()).items() + if v is not None})) diff --git a/components/joltwallet__littlefs/src/littlefs/scripts/cov.py b/components/joltwallet__littlefs/src/littlefs/scripts/cov.py new file mode 100644 index 0000000..b61b2e5 --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/scripts/cov.py @@ -0,0 +1,828 @@ +#!/usr/bin/env python3 +# +# Script to find coverage info after running tests. +# +# Example: +# ./scripts/cov.py \ +# lfs.t.a.gcda lfs_util.t.a.gcda \ +# -Flfs.c -Flfs_util.c -slines +# +# Copyright (c) 2022, The littlefs authors. +# Copyright (c) 2020, Arm Limited. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# + +import collections as co +import csv +import itertools as it +import json +import math as m +import os +import re +import shlex +import subprocess as sp + +# TODO use explode_asserts to avoid counting assert branches? +# TODO use dwarf=info to find functions for inline functions? + +GCOV_PATH = ['gcov'] + + +# integer fields +class Int(co.namedtuple('Int', 'x')): + __slots__ = () + def __new__(cls, x=0): + if isinstance(x, Int): + return x + if isinstance(x, str): + try: + x = int(x, 0) + except ValueError: + # also accept +-∞ and +-inf + if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x): + x = m.inf + elif re.match('^\s*-\s*(?:∞|inf)\s*$', x): + x = -m.inf + else: + raise + assert isinstance(x, int) or m.isinf(x), x + return super().__new__(cls, x) + + def __str__(self): + if self.x == m.inf: + return '∞' + elif self.x == -m.inf: + return '-∞' + else: + return str(self.x) + + def __int__(self): + assert not m.isinf(self.x) + return self.x + + def __float__(self): + return float(self.x) + + none = '%7s' % '-' + def table(self): + return '%7s' % (self,) + + diff_none = '%7s' % '-' + diff_table = table + + def diff_diff(self, other): + new = self.x if self else 0 + old = other.x if other else 0 + diff = new - old + if diff == +m.inf: + return '%7s' % '+∞' + elif diff == -m.inf: + return '%7s' % '-∞' + else: + return '%+7d' % diff + + def ratio(self, other): + new = self.x if self else 0 + old = other.x if other else 0 + if m.isinf(new) and m.isinf(old): + return 0.0 + elif m.isinf(new): + return +m.inf + elif m.isinf(old): + return -m.inf + elif not old and not new: + return 0.0 + elif not old: + return 1.0 + else: + return (new-old) / old + + def __add__(self, other): + return self.__class__(self.x + other.x) + + def __sub__(self, other): + return self.__class__(self.x - other.x) + + def __mul__(self, other): + return self.__class__(self.x * other.x) + +# fractional fields, a/b +class Frac(co.namedtuple('Frac', 'a,b')): + __slots__ = () + def __new__(cls, a=0, b=None): + if isinstance(a, Frac) and b is None: + return a + if isinstance(a, str) and b is None: + a, b = a.split('/', 1) + if b is None: + b = a + return super().__new__(cls, Int(a), Int(b)) + + def __str__(self): + return '%s/%s' % (self.a, self.b) + + def __float__(self): + return float(self.a) + + none = '%11s %7s' % ('-', '-') + def table(self): + t = self.a.x/self.b.x if self.b.x else 1.0 + return '%11s %7s' % ( + self, + '∞%' if t == +m.inf + else '-∞%' if t == -m.inf + else '%.1f%%' % (100*t)) + + diff_none = '%11s' % '-' + def diff_table(self): + return '%11s' % (self,) + + def diff_diff(self, other): + new_a, new_b = self if self else (Int(0), Int(0)) + old_a, old_b = other if other else (Int(0), Int(0)) + return '%11s' % ('%s/%s' % ( + new_a.diff_diff(old_a).strip(), + new_b.diff_diff(old_b).strip())) + + def ratio(self, other): + new_a, new_b = self if self else (Int(0), Int(0)) + old_a, old_b = other if other else (Int(0), Int(0)) + new = new_a.x/new_b.x if new_b.x else 1.0 + old = old_a.x/old_b.x if old_b.x else 1.0 + return new - old + + def __add__(self, other): + return self.__class__(self.a + other.a, self.b + other.b) + + def __sub__(self, other): + return self.__class__(self.a - other.a, self.b - other.b) + + def __mul__(self, other): + return self.__class__(self.a * other.a, self.b + other.b) + + def __lt__(self, other): + self_t = self.a.x/self.b.x if self.b.x else 1.0 + other_t = other.a.x/other.b.x if other.b.x else 1.0 + return (self_t, self.a.x) < (other_t, other.a.x) + + def __gt__(self, other): + return self.__class__.__lt__(other, self) + + def __le__(self, other): + return not self.__gt__(other) + + def __ge__(self, other): + return not self.__lt__(other) + +# coverage results +class CovResult(co.namedtuple('CovResult', [ + 'file', 'function', 'line', + 'calls', 'hits', 'funcs', 'lines', 'branches'])): + _by = ['file', 'function', 'line'] + _fields = ['calls', 'hits', 'funcs', 'lines', 'branches'] + _sort = ['funcs', 'lines', 'branches', 'hits', 'calls'] + _types = { + 'calls': Int, 'hits': Int, + 'funcs': Frac, 'lines': Frac, 'branches': Frac} + + __slots__ = () + def __new__(cls, file='', function='', line=0, + calls=0, hits=0, funcs=0, lines=0, branches=0): + return super().__new__(cls, file, function, int(Int(line)), + Int(calls), Int(hits), Frac(funcs), Frac(lines), Frac(branches)) + + def __add__(self, other): + return CovResult(self.file, self.function, self.line, + max(self.calls, other.calls), + max(self.hits, other.hits), + self.funcs + other.funcs, + self.lines + other.lines, + self.branches + other.branches) + + +def openio(path, mode='r', buffering=-1): + # allow '-' for stdin/stdout + if path == '-': + if mode == 'r': + return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering) + else: + return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering) + else: + return open(path, mode, buffering) + +def collect(gcda_paths, *, + gcov_path=GCOV_PATH, + sources=None, + everything=False, + **args): + results = [] + for path in gcda_paths: + # get coverage info through gcov's json output + # note, gcov-path may contain extra args + cmd = GCOV_PATH + ['-b', '-t', '--json-format', path] + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, + stdout=sp.PIPE, + stderr=sp.PIPE if not args.get('verbose') else None, + universal_newlines=True, + errors='replace', + close_fds=False) + data = json.load(proc.stdout) + proc.wait() + if proc.returncode != 0: + if not args.get('verbose'): + for line in proc.stderr: + sys.stdout.write(line) + sys.exit(-1) + + # collect line/branch coverage + for file in data['files']: + # ignore filtered sources + if sources is not None: + if not any( + os.path.abspath(file['file']) == os.path.abspath(s) + for s in sources): + continue + else: + # default to only cwd + if not everything and not os.path.commonpath([ + os.getcwd(), + os.path.abspath(file['file'])]) == os.getcwd(): + continue + + # simplify path + if os.path.commonpath([ + os.getcwd(), + os.path.abspath(file['file'])]) == os.getcwd(): + file_name = os.path.relpath(file['file']) + else: + file_name = os.path.abspath(file['file']) + + for func in file['functions']: + func_name = func.get('name', '(inlined)') + # discard internal functions (this includes injected test cases) + if not everything: + if func_name.startswith('__'): + continue + + # go ahead and add functions, later folding will merge this if + # there are other hits on this line + results.append(CovResult( + file_name, func_name, func['start_line'], + func['execution_count'], 0, + Frac(1 if func['execution_count'] > 0 else 0, 1), + 0, + 0)) + + for line in file['lines']: + func_name = line.get('function_name', '(inlined)') + # discard internal function (this includes injected test cases) + if not everything: + if func_name.startswith('__'): + continue + + # go ahead and add lines, later folding will merge this if + # there are other hits on this line + results.append(CovResult( + file_name, func_name, line['line_number'], + 0, line['count'], + 0, + Frac(1 if line['count'] > 0 else 0, 1), + Frac( + sum(1 if branch['count'] > 0 else 0 + for branch in line['branches']), + len(line['branches'])))) + + return results + + +def fold(Result, results, *, + by=None, + defines=None, + **_): + if by is None: + by = Result._by + + for k in it.chain(by or [], (k for k, _ in defines or [])): + if k not in Result._by and k not in Result._fields: + print("error: could not find field %r?" % k) + sys.exit(-1) + + # filter by matching defines + if defines is not None: + results_ = [] + for r in results: + if all(getattr(r, k) in vs for k, vs in defines): + results_.append(r) + results = results_ + + # organize results into conflicts + folding = co.OrderedDict() + for r in results: + name = tuple(getattr(r, k) for k in by) + if name not in folding: + folding[name] = [] + folding[name].append(r) + + # merge conflicts + folded = [] + for name, rs in folding.items(): + folded.append(sum(rs[1:], start=rs[0])) + + return folded + +def table(Result, results, diff_results=None, *, + by=None, + fields=None, + sort=None, + summary=False, + all=False, + percent=False, + **_): + all_, all = all, __builtins__.all + + if by is None: + by = Result._by + if fields is None: + fields = Result._fields + types = Result._types + + # fold again + results = fold(Result, results, by=by) + if diff_results is not None: + diff_results = fold(Result, diff_results, by=by) + + # organize by name + table = { + ','.join(str(getattr(r, k) or '') for k in by): r + for r in results} + diff_table = { + ','.join(str(getattr(r, k) or '') for k in by): r + for r in diff_results or []} + names = list(table.keys() | diff_table.keys()) + + # sort again, now with diff info, note that python's sort is stable + names.sort() + if diff_results is not None: + names.sort(key=lambda n: tuple( + types[k].ratio( + getattr(table.get(n), k, None), + getattr(diff_table.get(n), k, None)) + for k in fields), + reverse=True) + if sort: + for k, reverse in reversed(sort): + names.sort( + key=lambda n: tuple( + (getattr(table[n], k),) + if getattr(table.get(n), k, None) is not None else () + for k in ([k] if k else [ + k for k in Result._sort if k in fields])), + reverse=reverse ^ (not k or k in Result._fields)) + + + # build up our lines + lines = [] + + # header + header = [] + header.append('%s%s' % ( + ','.join(by), + ' (%d added, %d removed)' % ( + sum(1 for n in table if n not in diff_table), + sum(1 for n in diff_table if n not in table)) + if diff_results is not None and not percent else '') + if not summary else '') + if diff_results is None: + for k in fields: + header.append(k) + elif percent: + for k in fields: + header.append(k) + else: + for k in fields: + header.append('o'+k) + for k in fields: + header.append('n'+k) + for k in fields: + header.append('d'+k) + header.append('') + lines.append(header) + + def table_entry(name, r, diff_r=None, ratios=[]): + entry = [] + entry.append(name) + if diff_results is None: + for k in fields: + entry.append(getattr(r, k).table() + if getattr(r, k, None) is not None + else types[k].none) + elif percent: + for k in fields: + entry.append(getattr(r, k).diff_table() + if getattr(r, k, None) is not None + else types[k].diff_none) + else: + for k in fields: + entry.append(getattr(diff_r, k).diff_table() + if getattr(diff_r, k, None) is not None + else types[k].diff_none) + for k in fields: + entry.append(getattr(r, k).diff_table() + if getattr(r, k, None) is not None + else types[k].diff_none) + for k in fields: + entry.append(types[k].diff_diff( + getattr(r, k, None), + getattr(diff_r, k, None))) + if diff_results is None: + entry.append('') + elif percent: + entry.append(' (%s)' % ', '.join( + '+∞%' if t == +m.inf + else '-∞%' if t == -m.inf + else '%+.1f%%' % (100*t) + for t in ratios)) + else: + entry.append(' (%s)' % ', '.join( + '+∞%' if t == +m.inf + else '-∞%' if t == -m.inf + else '%+.1f%%' % (100*t) + for t in ratios + if t) + if any(ratios) else '') + return entry + + # entries + if not summary: + for name in names: + r = table.get(name) + if diff_results is None: + diff_r = None + ratios = None + else: + diff_r = diff_table.get(name) + ratios = [ + types[k].ratio( + getattr(r, k, None), + getattr(diff_r, k, None)) + for k in fields] + if not all_ and not any(ratios): + continue + lines.append(table_entry(name, r, diff_r, ratios)) + + # total + r = next(iter(fold(Result, results, by=[])), None) + if diff_results is None: + diff_r = None + ratios = None + else: + diff_r = next(iter(fold(Result, diff_results, by=[])), None) + ratios = [ + types[k].ratio( + getattr(r, k, None), + getattr(diff_r, k, None)) + for k in fields] + lines.append(table_entry('TOTAL', r, diff_r, ratios)) + + # find the best widths, note that column 0 contains the names and column -1 + # the ratios, so those are handled a bit differently + widths = [ + ((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1 + for w, i in zip( + it.chain([23], it.repeat(7)), + range(len(lines[0])-1))] + + # print our table + for line in lines: + print('%-*s %s%s' % ( + widths[0], line[0], + ' '.join('%*s' % (w, x) + for w, x in zip(widths[1:], line[1:-1])), + line[-1])) + + +def annotate(Result, results, *, + annotate=False, + lines=False, + branches=False, + **args): + # if neither branches/lines specified, color both + if annotate and not lines and not branches: + lines, branches = True, True + + for path in co.OrderedDict.fromkeys(r.file for r in results).keys(): + # flatten to line info + results = fold(Result, results, by=['file', 'line']) + table = {r.line: r for r in results if r.file == path} + + # calculate spans to show + if not annotate: + spans = [] + last = None + func = None + for line, r in sorted(table.items()): + if ((lines and int(r.hits) == 0) + or (branches and r.branches.a < r.branches.b)): + if last is not None and line - last.stop <= args['context']: + last = range( + last.start, + line+1+args['context']) + else: + if last is not None: + spans.append((last, func)) + last = range( + line-args['context'], + line+1+args['context']) + func = r.function + if last is not None: + spans.append((last, func)) + + with open(path) as f: + skipped = False + for i, line in enumerate(f): + # skip lines not in spans? + if not annotate and not any(i+1 in s for s, _ in spans): + skipped = True + continue + + if skipped: + skipped = False + print('%s@@ %s:%d: %s @@%s' % ( + '\x1b[36m' if args['color'] else '', + path, + i+1, + next(iter(f for _, f in spans)), + '\x1b[m' if args['color'] else '')) + + # build line + if line.endswith('\n'): + line = line[:-1] + + if i+1 in table: + r = table[i+1] + line = '%-*s // %s hits%s' % ( + args['width'], + line, + r.hits, + ', %s branches' % (r.branches,) + if int(r.branches.b) else '') + + if args['color']: + if lines and int(r.hits) == 0: + line = '\x1b[1;31m%s\x1b[m' % line + elif branches and r.branches.a < r.branches.b: + line = '\x1b[35m%s\x1b[m' % line + + print(line) + + +def main(gcda_paths, *, + by=None, + fields=None, + defines=None, + sort=None, + hits=False, + **args): + # figure out what color should be + if args.get('color') == 'auto': + args['color'] = sys.stdout.isatty() + elif args.get('color') == 'always': + args['color'] = True + else: + args['color'] = False + + # find sizes + if not args.get('use', None): + results = collect(gcda_paths, **args) + else: + results = [] + with openio(args['use']) as f: + reader = csv.DictReader(f, restval='') + for r in reader: + if not any('cov_'+k in r and r['cov_'+k].strip() + for k in CovResult._fields): + continue + try: + results.append(CovResult( + **{k: r[k] for k in CovResult._by + if k in r and r[k].strip()}, + **{k: r['cov_'+k] + for k in CovResult._fields + if 'cov_'+k in r + and r['cov_'+k].strip()})) + except TypeError: + pass + + # fold + results = fold(CovResult, results, by=by, defines=defines) + + # sort, note that python's sort is stable + results.sort() + if sort: + for k, reverse in reversed(sort): + results.sort( + key=lambda r: tuple( + (getattr(r, k),) if getattr(r, k) is not None else () + for k in ([k] if k else CovResult._sort)), + reverse=reverse ^ (not k or k in CovResult._fields)) + + # write results to CSV + if args.get('output'): + with openio(args['output'], 'w') as f: + writer = csv.DictWriter(f, + (by if by is not None else CovResult._by) + + ['cov_'+k for k in ( + fields if fields is not None else CovResult._fields)]) + writer.writeheader() + for r in results: + writer.writerow( + {k: getattr(r, k) for k in ( + by if by is not None else CovResult._by)} + | {'cov_'+k: getattr(r, k) for k in ( + fields if fields is not None else CovResult._fields)}) + + # find previous results? + if args.get('diff'): + diff_results = [] + try: + with openio(args['diff']) as f: + reader = csv.DictReader(f, restval='') + for r in reader: + if not any('cov_'+k in r and r['cov_'+k].strip() + for k in CovResult._fields): + continue + try: + diff_results.append(CovResult( + **{k: r[k] for k in CovResult._by + if k in r and r[k].strip()}, + **{k: r['cov_'+k] + for k in CovResult._fields + if 'cov_'+k in r + and r['cov_'+k].strip()})) + except TypeError: + pass + except FileNotFoundError: + pass + + # fold + diff_results = fold(CovResult, diff_results, + by=by, defines=defines) + + # print table + if not args.get('quiet'): + if (args.get('annotate') + or args.get('lines') + or args.get('branches')): + # annotate sources + annotate(CovResult, results, **args) + else: + # print table + table(CovResult, results, + diff_results if args.get('diff') else None, + by=by if by is not None else ['function'], + fields=fields if fields is not None + else ['lines', 'branches'] if not hits + else ['calls', 'hits'], + sort=sort, + **args) + + # catch lack of coverage + if args.get('error_on_lines') and any( + r.lines.a < r.lines.b for r in results): + sys.exit(2) + elif args.get('error_on_branches') and any( + r.branches.a < r.branches.b for r in results): + sys.exit(3) + + +if __name__ == "__main__": + import argparse + import sys + parser = argparse.ArgumentParser( + description="Find coverage info after running tests.", + allow_abbrev=False) + parser.add_argument( + 'gcda_paths', + nargs='*', + help="Input *.gcda files.") + parser.add_argument( + '-v', '--verbose', + action='store_true', + help="Output commands that run behind the scenes.") + parser.add_argument( + '-q', '--quiet', + action='store_true', + help="Don't show anything, useful with -o.") + parser.add_argument( + '-o', '--output', + help="Specify CSV file to store results.") + parser.add_argument( + '-u', '--use', + help="Don't parse anything, use this CSV file.") + parser.add_argument( + '-d', '--diff', + help="Specify CSV file to diff against.") + parser.add_argument( + '-a', '--all', + action='store_true', + help="Show all, not just the ones that changed.") + parser.add_argument( + '-p', '--percent', + action='store_true', + help="Only show percentage change, not a full diff.") + parser.add_argument( + '-b', '--by', + action='append', + choices=CovResult._by, + help="Group by this field.") + parser.add_argument( + '-f', '--field', + dest='fields', + action='append', + choices=CovResult._fields, + help="Show this field.") + parser.add_argument( + '-D', '--define', + dest='defines', + action='append', + type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)), + help="Only include results where this field is this value.") + class AppendSort(argparse.Action): + def __call__(self, parser, namespace, value, option): + if namespace.sort is None: + namespace.sort = [] + namespace.sort.append((value, True if option == '-S' else False)) + parser.add_argument( + '-s', '--sort', + nargs='?', + action=AppendSort, + help="Sort by this field.") + parser.add_argument( + '-S', '--reverse-sort', + nargs='?', + action=AppendSort, + help="Sort by this field, but backwards.") + parser.add_argument( + '-Y', '--summary', + action='store_true', + help="Only show the total.") + parser.add_argument( + '-F', '--source', + dest='sources', + action='append', + help="Only consider definitions in this file. Defaults to anything " + "in the current directory.") + parser.add_argument( + '--everything', + action='store_true', + help="Include builtin and libc specific symbols.") + parser.add_argument( + '--hits', + action='store_true', + help="Show total hits instead of coverage.") + parser.add_argument( + '-A', '--annotate', + action='store_true', + help="Show source files annotated with coverage info.") + parser.add_argument( + '-L', '--lines', + action='store_true', + help="Show uncovered lines.") + parser.add_argument( + '-B', '--branches', + action='store_true', + help="Show uncovered branches.") + parser.add_argument( + '-c', '--context', + type=lambda x: int(x, 0), + default=3, + help="Show n additional lines of context. Defaults to 3.") + parser.add_argument( + '-W', '--width', + type=lambda x: int(x, 0), + default=80, + help="Assume source is styled with this many columns. Defaults to 80.") + parser.add_argument( + '--color', + choices=['never', 'always', 'auto'], + default='auto', + help="When to use terminal colors. Defaults to 'auto'.") + parser.add_argument( + '-e', '--error-on-lines', + action='store_true', + help="Error if any lines are not covered.") + parser.add_argument( + '-E', '--error-on-branches', + action='store_true', + help="Error if any branches are not covered.") + parser.add_argument( + '--gcov-path', + default=GCOV_PATH, + type=lambda x: x.split(), + help="Path to the gcov executable, may include paths. " + "Defaults to %r." % GCOV_PATH) + sys.exit(main(**{k: v + for k, v in vars(parser.parse_intermixed_args()).items() + if v is not None})) diff --git a/components/joltwallet__littlefs/src/littlefs/scripts/data.py b/components/joltwallet__littlefs/src/littlefs/scripts/data.py new file mode 100644 index 0000000..e9770aa --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/scripts/data.py @@ -0,0 +1,704 @@ +#!/usr/bin/env python3 +# +# Script to find data size at the function level. Basically just a big wrapper +# around nm with some extra conveniences for comparing builds. Heavily inspired +# by Linux's Bloat-O-Meter. +# +# Example: +# ./scripts/data.py lfs.o lfs_util.o -Ssize +# +# Copyright (c) 2022, The littlefs authors. +# Copyright (c) 2020, Arm Limited. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# + +import collections as co +import csv +import difflib +import itertools as it +import math as m +import os +import re +import shlex +import subprocess as sp + + +NM_PATH = ['nm'] +NM_TYPES = 'dDbB' +OBJDUMP_PATH = ['objdump'] + + +# integer fields +class Int(co.namedtuple('Int', 'x')): + __slots__ = () + def __new__(cls, x=0): + if isinstance(x, Int): + return x + if isinstance(x, str): + try: + x = int(x, 0) + except ValueError: + # also accept +-∞ and +-inf + if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x): + x = m.inf + elif re.match('^\s*-\s*(?:∞|inf)\s*$', x): + x = -m.inf + else: + raise + assert isinstance(x, int) or m.isinf(x), x + return super().__new__(cls, x) + + def __str__(self): + if self.x == m.inf: + return '∞' + elif self.x == -m.inf: + return '-∞' + else: + return str(self.x) + + def __int__(self): + assert not m.isinf(self.x) + return self.x + + def __float__(self): + return float(self.x) + + none = '%7s' % '-' + def table(self): + return '%7s' % (self,) + + diff_none = '%7s' % '-' + diff_table = table + + def diff_diff(self, other): + new = self.x if self else 0 + old = other.x if other else 0 + diff = new - old + if diff == +m.inf: + return '%7s' % '+∞' + elif diff == -m.inf: + return '%7s' % '-∞' + else: + return '%+7d' % diff + + def ratio(self, other): + new = self.x if self else 0 + old = other.x if other else 0 + if m.isinf(new) and m.isinf(old): + return 0.0 + elif m.isinf(new): + return +m.inf + elif m.isinf(old): + return -m.inf + elif not old and not new: + return 0.0 + elif not old: + return 1.0 + else: + return (new-old) / old + + def __add__(self, other): + return self.__class__(self.x + other.x) + + def __sub__(self, other): + return self.__class__(self.x - other.x) + + def __mul__(self, other): + return self.__class__(self.x * other.x) + +# data size results +class DataResult(co.namedtuple('DataResult', [ + 'file', 'function', + 'size'])): + _by = ['file', 'function'] + _fields = ['size'] + _sort = ['size'] + _types = {'size': Int} + + __slots__ = () + def __new__(cls, file='', function='', size=0): + return super().__new__(cls, file, function, + Int(size)) + + def __add__(self, other): + return DataResult(self.file, self.function, + self.size + other.size) + + +def openio(path, mode='r', buffering=-1): + # allow '-' for stdin/stdout + if path == '-': + if mode == 'r': + return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering) + else: + return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering) + else: + return open(path, mode, buffering) + +def collect(obj_paths, *, + nm_path=NM_PATH, + nm_types=NM_TYPES, + objdump_path=OBJDUMP_PATH, + sources=None, + everything=False, + **args): + size_pattern = re.compile( + '^(?P[0-9a-fA-F]+)' + + ' (?P[%s])' % re.escape(nm_types) + + ' (?P.+?)$') + line_pattern = re.compile( + '^\s+(?P[0-9]+)' + '(?:\s+(?P[0-9]+))?' + '\s+.*' + '\s+(?P[^\s]+)$') + info_pattern = re.compile( + '^(?:.*(?PDW_TAG_[a-z_]+).*' + '|.*DW_AT_name.*:\s*(?P[^:\s]+)\s*' + '|.*DW_AT_decl_file.*:\s*(?P[0-9]+)\s*)$') + + results = [] + for path in obj_paths: + # guess the source, if we have debug-info we'll replace this later + file = re.sub('(\.o)?$', '.c', path, 1) + + # find symbol sizes + results_ = [] + # note nm-path may contain extra args + cmd = nm_path + ['--size-sort', path] + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, + stdout=sp.PIPE, + stderr=sp.PIPE if not args.get('verbose') else None, + universal_newlines=True, + errors='replace', + close_fds=False) + for line in proc.stdout: + m = size_pattern.match(line) + if m: + func = m.group('func') + # discard internal functions + if not everything and func.startswith('__'): + continue + results_.append(DataResult( + file, func, + int(m.group('size'), 16))) + proc.wait() + if proc.returncode != 0: + if not args.get('verbose'): + for line in proc.stderr: + sys.stdout.write(line) + sys.exit(-1) + + + # try to figure out the source file if we have debug-info + dirs = {} + files = {} + # note objdump-path may contain extra args + cmd = objdump_path + ['--dwarf=rawline', path] + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, + stdout=sp.PIPE, + stderr=sp.PIPE if not args.get('verbose') else None, + universal_newlines=True, + errors='replace', + close_fds=False) + for line in proc.stdout: + # note that files contain references to dirs, which we + # dereference as soon as we see them as each file table follows a + # dir table + m = line_pattern.match(line) + if m: + if not m.group('dir'): + # found a directory entry + dirs[int(m.group('no'))] = m.group('path') + else: + # found a file entry + dir = int(m.group('dir')) + if dir in dirs: + files[int(m.group('no'))] = os.path.join( + dirs[dir], + m.group('path')) + else: + files[int(m.group('no'))] = m.group('path') + proc.wait() + if proc.returncode != 0: + if not args.get('verbose'): + for line in proc.stderr: + sys.stdout.write(line) + # do nothing on error, we don't need objdump to work, source files + # may just be inaccurate + pass + + defs = {} + is_func = False + f_name = None + f_file = None + # note objdump-path may contain extra args + cmd = objdump_path + ['--dwarf=info', path] + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, + stdout=sp.PIPE, + stderr=sp.PIPE if not args.get('verbose') else None, + universal_newlines=True, + errors='replace', + close_fds=False) + for line in proc.stdout: + # state machine here to find definitions + m = info_pattern.match(line) + if m: + if m.group('tag'): + if is_func: + defs[f_name] = files.get(f_file, '?') + is_func = (m.group('tag') == 'DW_TAG_subprogram') + elif m.group('name'): + f_name = m.group('name') + elif m.group('file'): + f_file = int(m.group('file')) + if is_func: + defs[f_name] = files.get(f_file, '?') + proc.wait() + if proc.returncode != 0: + if not args.get('verbose'): + for line in proc.stderr: + sys.stdout.write(line) + # do nothing on error, we don't need objdump to work, source files + # may just be inaccurate + pass + + for r in results_: + # find best matching debug symbol, this may be slightly different + # due to optimizations + if defs: + # exact match? avoid difflib if we can for speed + if r.function in defs: + file = defs[r.function] + else: + _, file = max( + defs.items(), + key=lambda d: difflib.SequenceMatcher(None, + d[0], + r.function, False).ratio()) + else: + file = r.file + + # ignore filtered sources + if sources is not None: + if not any( + os.path.abspath(file) == os.path.abspath(s) + for s in sources): + continue + else: + # default to only cwd + if not everything and not os.path.commonpath([ + os.getcwd(), + os.path.abspath(file)]) == os.getcwd(): + continue + + # simplify path + if os.path.commonpath([ + os.getcwd(), + os.path.abspath(file)]) == os.getcwd(): + file = os.path.relpath(file) + else: + file = os.path.abspath(file) + + results.append(r._replace(file=file)) + + return results + + +def fold(Result, results, *, + by=None, + defines=None, + **_): + if by is None: + by = Result._by + + for k in it.chain(by or [], (k for k, _ in defines or [])): + if k not in Result._by and k not in Result._fields: + print("error: could not find field %r?" % k) + sys.exit(-1) + + # filter by matching defines + if defines is not None: + results_ = [] + for r in results: + if all(getattr(r, k) in vs for k, vs in defines): + results_.append(r) + results = results_ + + # organize results into conflicts + folding = co.OrderedDict() + for r in results: + name = tuple(getattr(r, k) for k in by) + if name not in folding: + folding[name] = [] + folding[name].append(r) + + # merge conflicts + folded = [] + for name, rs in folding.items(): + folded.append(sum(rs[1:], start=rs[0])) + + return folded + +def table(Result, results, diff_results=None, *, + by=None, + fields=None, + sort=None, + summary=False, + all=False, + percent=False, + **_): + all_, all = all, __builtins__.all + + if by is None: + by = Result._by + if fields is None: + fields = Result._fields + types = Result._types + + # fold again + results = fold(Result, results, by=by) + if diff_results is not None: + diff_results = fold(Result, diff_results, by=by) + + # organize by name + table = { + ','.join(str(getattr(r, k) or '') for k in by): r + for r in results} + diff_table = { + ','.join(str(getattr(r, k) or '') for k in by): r + for r in diff_results or []} + names = list(table.keys() | diff_table.keys()) + + # sort again, now with diff info, note that python's sort is stable + names.sort() + if diff_results is not None: + names.sort(key=lambda n: tuple( + types[k].ratio( + getattr(table.get(n), k, None), + getattr(diff_table.get(n), k, None)) + for k in fields), + reverse=True) + if sort: + for k, reverse in reversed(sort): + names.sort( + key=lambda n: tuple( + (getattr(table[n], k),) + if getattr(table.get(n), k, None) is not None else () + for k in ([k] if k else [ + k for k in Result._sort if k in fields])), + reverse=reverse ^ (not k or k in Result._fields)) + + + # build up our lines + lines = [] + + # header + header = [] + header.append('%s%s' % ( + ','.join(by), + ' (%d added, %d removed)' % ( + sum(1 for n in table if n not in diff_table), + sum(1 for n in diff_table if n not in table)) + if diff_results is not None and not percent else '') + if not summary else '') + if diff_results is None: + for k in fields: + header.append(k) + elif percent: + for k in fields: + header.append(k) + else: + for k in fields: + header.append('o'+k) + for k in fields: + header.append('n'+k) + for k in fields: + header.append('d'+k) + header.append('') + lines.append(header) + + def table_entry(name, r, diff_r=None, ratios=[]): + entry = [] + entry.append(name) + if diff_results is None: + for k in fields: + entry.append(getattr(r, k).table() + if getattr(r, k, None) is not None + else types[k].none) + elif percent: + for k in fields: + entry.append(getattr(r, k).diff_table() + if getattr(r, k, None) is not None + else types[k].diff_none) + else: + for k in fields: + entry.append(getattr(diff_r, k).diff_table() + if getattr(diff_r, k, None) is not None + else types[k].diff_none) + for k in fields: + entry.append(getattr(r, k).diff_table() + if getattr(r, k, None) is not None + else types[k].diff_none) + for k in fields: + entry.append(types[k].diff_diff( + getattr(r, k, None), + getattr(diff_r, k, None))) + if diff_results is None: + entry.append('') + elif percent: + entry.append(' (%s)' % ', '.join( + '+∞%' if t == +m.inf + else '-∞%' if t == -m.inf + else '%+.1f%%' % (100*t) + for t in ratios)) + else: + entry.append(' (%s)' % ', '.join( + '+∞%' if t == +m.inf + else '-∞%' if t == -m.inf + else '%+.1f%%' % (100*t) + for t in ratios + if t) + if any(ratios) else '') + return entry + + # entries + if not summary: + for name in names: + r = table.get(name) + if diff_results is None: + diff_r = None + ratios = None + else: + diff_r = diff_table.get(name) + ratios = [ + types[k].ratio( + getattr(r, k, None), + getattr(diff_r, k, None)) + for k in fields] + if not all_ and not any(ratios): + continue + lines.append(table_entry(name, r, diff_r, ratios)) + + # total + r = next(iter(fold(Result, results, by=[])), None) + if diff_results is None: + diff_r = None + ratios = None + else: + diff_r = next(iter(fold(Result, diff_results, by=[])), None) + ratios = [ + types[k].ratio( + getattr(r, k, None), + getattr(diff_r, k, None)) + for k in fields] + lines.append(table_entry('TOTAL', r, diff_r, ratios)) + + # find the best widths, note that column 0 contains the names and column -1 + # the ratios, so those are handled a bit differently + widths = [ + ((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1 + for w, i in zip( + it.chain([23], it.repeat(7)), + range(len(lines[0])-1))] + + # print our table + for line in lines: + print('%-*s %s%s' % ( + widths[0], line[0], + ' '.join('%*s' % (w, x) + for w, x in zip(widths[1:], line[1:-1])), + line[-1])) + + +def main(obj_paths, *, + by=None, + fields=None, + defines=None, + sort=None, + **args): + # find sizes + if not args.get('use', None): + results = collect(obj_paths, **args) + else: + results = [] + with openio(args['use']) as f: + reader = csv.DictReader(f, restval='') + for r in reader: + try: + results.append(DataResult( + **{k: r[k] for k in DataResult._by + if k in r and r[k].strip()}, + **{k: r['data_'+k] for k in DataResult._fields + if 'data_'+k in r and r['data_'+k].strip()})) + except TypeError: + pass + + # fold + results = fold(DataResult, results, by=by, defines=defines) + + # sort, note that python's sort is stable + results.sort() + if sort: + for k, reverse in reversed(sort): + results.sort( + key=lambda r: tuple( + (getattr(r, k),) if getattr(r, k) is not None else () + for k in ([k] if k else DataResult._sort)), + reverse=reverse ^ (not k or k in DataResult._fields)) + + # write results to CSV + if args.get('output'): + with openio(args['output'], 'w') as f: + writer = csv.DictWriter(f, + (by if by is not None else DataResult._by) + + ['data_'+k for k in ( + fields if fields is not None else DataResult._fields)]) + writer.writeheader() + for r in results: + writer.writerow( + {k: getattr(r, k) for k in ( + by if by is not None else DataResult._by)} + | {'data_'+k: getattr(r, k) for k in ( + fields if fields is not None else DataResult._fields)}) + + # find previous results? + if args.get('diff'): + diff_results = [] + try: + with openio(args['diff']) as f: + reader = csv.DictReader(f, restval='') + for r in reader: + if not any('data_'+k in r and r['data_'+k].strip() + for k in DataResult._fields): + continue + try: + diff_results.append(DataResult( + **{k: r[k] for k in DataResult._by + if k in r and r[k].strip()}, + **{k: r['data_'+k] for k in DataResult._fields + if 'data_'+k in r and r['data_'+k].strip()})) + except TypeError: + pass + except FileNotFoundError: + pass + + # fold + diff_results = fold(DataResult, diff_results, by=by, defines=defines) + + # print table + if not args.get('quiet'): + table(DataResult, results, + diff_results if args.get('diff') else None, + by=by if by is not None else ['function'], + fields=fields, + sort=sort, + **args) + + +if __name__ == "__main__": + import argparse + import sys + parser = argparse.ArgumentParser( + description="Find data size at the function level.", + allow_abbrev=False) + parser.add_argument( + 'obj_paths', + nargs='*', + help="Input *.o files.") + parser.add_argument( + '-v', '--verbose', + action='store_true', + help="Output commands that run behind the scenes.") + parser.add_argument( + '-q', '--quiet', + action='store_true', + help="Don't show anything, useful with -o.") + parser.add_argument( + '-o', '--output', + help="Specify CSV file to store results.") + parser.add_argument( + '-u', '--use', + help="Don't parse anything, use this CSV file.") + parser.add_argument( + '-d', '--diff', + help="Specify CSV file to diff against.") + parser.add_argument( + '-a', '--all', + action='store_true', + help="Show all, not just the ones that changed.") + parser.add_argument( + '-p', '--percent', + action='store_true', + help="Only show percentage change, not a full diff.") + parser.add_argument( + '-b', '--by', + action='append', + choices=DataResult._by, + help="Group by this field.") + parser.add_argument( + '-f', '--field', + dest='fields', + action='append', + choices=DataResult._fields, + help="Show this field.") + parser.add_argument( + '-D', '--define', + dest='defines', + action='append', + type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)), + help="Only include results where this field is this value.") + class AppendSort(argparse.Action): + def __call__(self, parser, namespace, value, option): + if namespace.sort is None: + namespace.sort = [] + namespace.sort.append((value, True if option == '-S' else False)) + parser.add_argument( + '-s', '--sort', + nargs='?', + action=AppendSort, + help="Sort by this field.") + parser.add_argument( + '-S', '--reverse-sort', + nargs='?', + action=AppendSort, + help="Sort by this field, but backwards.") + parser.add_argument( + '-Y', '--summary', + action='store_true', + help="Only show the total.") + parser.add_argument( + '-F', '--source', + dest='sources', + action='append', + help="Only consider definitions in this file. Defaults to anything " + "in the current directory.") + parser.add_argument( + '--everything', + action='store_true', + help="Include builtin and libc specific symbols.") + parser.add_argument( + '--nm-types', + default=NM_TYPES, + help="Type of symbols to report, this uses the same single-character " + "type-names emitted by nm. Defaults to %r." % NM_TYPES) + parser.add_argument( + '--nm-path', + type=lambda x: x.split(), + default=NM_PATH, + help="Path to the nm executable, may include flags. " + "Defaults to %r." % NM_PATH) + parser.add_argument( + '--objdump-path', + type=lambda x: x.split(), + default=OBJDUMP_PATH, + help="Path to the objdump executable, may include flags. " + "Defaults to %r." % OBJDUMP_PATH) + sys.exit(main(**{k: v + for k, v in vars(parser.parse_intermixed_args()).items() + if v is not None})) diff --git a/components/joltwallet__littlefs/src/littlefs/scripts/perf.py b/components/joltwallet__littlefs/src/littlefs/scripts/perf.py new file mode 100644 index 0000000..2ee006c --- /dev/null +++ b/components/joltwallet__littlefs/src/littlefs/scripts/perf.py @@ -0,0 +1,1344 @@ +#!/usr/bin/env python3 +# +# Script to aggregate and report Linux perf results. +# +# Example: +# ./scripts/perf.py -R -obench.perf ./runners/bench_runner +# ./scripts/perf.py bench.perf -j -Flfs.c -Flfs_util.c -Scycles +# +# Copyright (c) 2022, The littlefs authors. +# SPDX-License-Identifier: BSD-3-Clause +# + +import bisect +import collections as co +import csv +import errno +import fcntl +import functools as ft +import itertools as it +import math as m +import multiprocessing as mp +import os +import re +import shlex +import shutil +import subprocess as sp +import tempfile +import zipfile + +# TODO support non-zip perf results? + + +PERF_PATH = ['perf'] +PERF_EVENTS = 'cycles,branch-misses,branches,cache-misses,cache-references' +PERF_FREQ = 100 +OBJDUMP_PATH = ['objdump'] +THRESHOLD = (0.5, 0.85) + + +# integer fields +class Int(co.namedtuple('Int', 'x')): + __slots__ = () + def __new__(cls, x=0): + if isinstance(x, Int): + return x + if isinstance(x, str): + try: + x = int(x, 0) + except ValueError: + # also accept +-∞ and +-inf + if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x): + x = m.inf + elif re.match('^\s*-\s*(?:∞|inf)\s*$', x): + x = -m.inf + else: + raise + assert isinstance(x, int) or m.isinf(x), x + return super().__new__(cls, x) + + def __str__(self): + if self.x == m.inf: + return '∞' + elif self.x == -m.inf: + return '-∞' + else: + return str(self.x) + + def __int__(self): + assert not m.isinf(self.x) + return self.x + + def __float__(self): + return float(self.x) + + none = '%7s' % '-' + def table(self): + return '%7s' % (self,) + + diff_none = '%7s' % '-' + diff_table = table + + def diff_diff(self, other): + new = self.x if self else 0 + old = other.x if other else 0 + diff = new - old + if diff == +m.inf: + return '%7s' % '+∞' + elif diff == -m.inf: + return '%7s' % '-∞' + else: + return '%+7d' % diff + + def ratio(self, other): + new = self.x if self else 0 + old = other.x if other else 0 + if m.isinf(new) and m.isinf(old): + return 0.0 + elif m.isinf(new): + return +m.inf + elif m.isinf(old): + return -m.inf + elif not old and not new: + return 0.0 + elif not old: + return 1.0 + else: + return (new-old) / old + + def __add__(self, other): + return self.__class__(self.x + other.x) + + def __sub__(self, other): + return self.__class__(self.x - other.x) + + def __mul__(self, other): + return self.__class__(self.x * other.x) + +# perf results +class PerfResult(co.namedtuple('PerfResult', [ + 'file', 'function', 'line', + 'cycles', 'bmisses', 'branches', 'cmisses', 'caches', + 'children'])): + _by = ['file', 'function', 'line'] + _fields = ['cycles', 'bmisses', 'branches', 'cmisses', 'caches'] + _sort = ['cycles', 'bmisses', 'cmisses', 'branches', 'caches'] + _types = { + 'cycles': Int, + 'bmisses': Int, 'branches': Int, + 'cmisses': Int, 'caches': Int} + + __slots__ = () + def __new__(cls, file='', function='', line=0, + cycles=0, bmisses=0, branches=0, cmisses=0, caches=0, + children=[]): + return super().__new__(cls, file, function, int(Int(line)), + Int(cycles), Int(bmisses), Int(branches), Int(cmisses), Int(caches), + children) + + def __add__(self, other): + return PerfResult(self.file, self.function, self.line, + self.cycles + other.cycles, + self.bmisses + other.bmisses, + self.branches + other.branches, + self.cmisses + other.cmisses, + self.caches + other.caches, + self.children + other.children) + + +def openio(path, mode='r', buffering=-1): + # allow '-' for stdin/stdout + if path == '-': + if mode == 'r': + return os.fdopen(os.dup(sys.stdin.fileno()), mode, buffering) + else: + return os.fdopen(os.dup(sys.stdout.fileno()), mode, buffering) + else: + return open(path, mode, buffering) + +# run perf as a subprocess, storing measurements into a zip file +def record(command, *, + output=None, + perf_freq=PERF_FREQ, + perf_period=None, + perf_events=PERF_EVENTS, + perf_path=PERF_PATH, + **args): + # create a temporary file for perf to write to, as far as I can tell + # this is strictly needed because perf's pipe-mode only works with stdout + with tempfile.NamedTemporaryFile('rb') as f: + # figure out our perf invocation + perf = perf_path + list(filter(None, [ + 'record', + '-F%s' % perf_freq + if perf_freq is not None + and perf_period is None else None, + '-c%s' % perf_period + if perf_period is not None else None, + '-B', + '-g', + '--all-user', + '-e%s' % perf_events, + '-o%s' % f.name])) + + # run our command + try: + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in perf + command)) + err = sp.call(perf + command, close_fds=False) + + except KeyboardInterrupt: + err = errno.EOWNERDEAD + + # synchronize access + z = os.open(output, os.O_RDWR | os.O_CREAT) + fcntl.flock(z, fcntl.LOCK_EX) + + # copy measurements into our zip file + with os.fdopen(z, 'r+b') as z: + with zipfile.ZipFile(z, 'a', + compression=zipfile.ZIP_DEFLATED, + compresslevel=1) as z: + with z.open('perf.%d' % os.getpid(), 'w') as g: + shutil.copyfileobj(f, g) + + # forward the return code + return err + + +# try to only process each dso onceS +# +# note this only caches with the non-keyword arguments +def multiprocessing_cache(f): + local_cache = {} + manager = mp.Manager() + global_cache = manager.dict() + lock = mp.Lock() + + def multiprocessing_cache(*args, **kwargs): + # check local cache? + if args in local_cache: + return local_cache[args] + # check global cache? + with lock: + if args in global_cache: + v = global_cache[args] + local_cache[args] = v + return v + # fall back to calling the function + v = f(*args, **kwargs) + global_cache[args] = v + local_cache[args] = v + return v + + return multiprocessing_cache + +@multiprocessing_cache +def collect_syms_and_lines(obj_path, *, + objdump_path=None, + **args): + symbol_pattern = re.compile( + '^(?P[0-9a-fA-F]+)' + '\s+.*' + '\s+(?P[0-9a-fA-F]+)' + '\s+(?P[^\s]+)\s*$') + line_pattern = re.compile( + '^\s+(?:' + # matches dir/file table + '(?P[0-9]+)' + '(?:\s+(?P[0-9]+))?' + '\s+.*' + '\s+(?P[^\s]+)' + # matches line opcodes + '|' '\[[^\]]*\]\s+' + '(?:' + '(?PSpecial)' + '|' '(?PCopy)' + '|' '(?PEnd of Sequence)' + '|' 'File .*?to (?:entry )?(?P\d+)' + '|' 'Line .*?to (?P[0-9]+)' + '|' '(?:Address|PC) .*?to (?P[0x0-9a-fA-F]+)' + '|' '.' ')*' + ')$', re.IGNORECASE) + + # figure out symbol addresses and file+line ranges + syms = {} + sym_at = [] + cmd = objdump_path + ['-t', obj_path] + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, + stdout=sp.PIPE, + stderr=sp.PIPE if not args.get('verbose') else None, + universal_newlines=True, + errors='replace', + close_fds=False) + for line in proc.stdout: + m = symbol_pattern.match(line) + if m: + name = m.group('name') + addr = int(m.group('addr'), 16) + size = int(m.group('size'), 16) + # ignore zero-sized symbols + if not size: + continue + # note multiple symbols can share a name + if name not in syms: + syms[name] = set() + syms[name].add((addr, size)) + sym_at.append((addr, name, size)) + proc.wait() + if proc.returncode != 0: + if not args.get('verbose'): + for line in proc.stderr: + sys.stdout.write(line) + # assume no debug-info on failure + pass + + # sort and keep largest/first when duplicates + sym_at.sort(key=lambda x: (x[0], -x[2], x[1])) + sym_at_ = [] + for addr, name, size in sym_at: + if len(sym_at_) == 0 or sym_at_[-1][0] != addr: + sym_at_.append((addr, name, size)) + sym_at = sym_at_ + + # state machine for dwarf line numbers, note that objdump's + # decodedline seems to have issues with multiple dir/file + # tables, which is why we need this + lines = [] + line_at = [] + dirs = {} + files = {} + op_file = 1 + op_line = 1 + op_addr = 0 + cmd = objdump_path + ['--dwarf=rawline', obj_path] + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, + stdout=sp.PIPE, + stderr=sp.PIPE if not args.get('verbose') else None, + universal_newlines=True, + errors='replace', + close_fds=False) + for line in proc.stdout: + m = line_pattern.match(line) + if m: + if m.group('no') and not m.group('dir'): + # found a directory entry + dirs[int(m.group('no'))] = m.group('path') + elif m.group('no'): + # found a file entry + dir = int(m.group('dir')) + if dir in dirs: + files[int(m.group('no'))] = os.path.join( + dirs[dir], + m.group('path')) + else: + files[int(m.group('no'))] = m.group('path') + else: + # found a state machine update + if m.group('op_file'): + op_file = int(m.group('op_file'), 0) + if m.group('op_line'): + op_line = int(m.group('op_line'), 0) + if m.group('op_addr'): + op_addr = int(m.group('op_addr'), 0) + + if (m.group('op_special') + or m.group('op_copy') + or m.group('op_end')): + file = os.path.abspath(files.get(op_file, '?')) + lines.append((file, op_line, op_addr)) + line_at.append((op_addr, file, op_line)) + + if m.group('op_end'): + op_file = 1 + op_line = 1 + op_addr = 0 + proc.wait() + if proc.returncode != 0: + if not args.get('verbose'): + for line in proc.stderr: + sys.stdout.write(line) + # assume no debug-info on failure + pass + + # sort and keep first when duplicates + lines.sort() + lines_ = [] + for file, line, addr in lines: + if len(lines_) == 0 or lines_[-1][0] != file or lines[-1][1] != line: + lines_.append((file, line, addr)) + lines = lines_ + + # sort and keep first when duplicates + line_at.sort() + line_at_ = [] + for addr, file, line in line_at: + if len(line_at_) == 0 or line_at_[-1][0] != addr: + line_at_.append((addr, file, line)) + line_at = line_at_ + + return syms, sym_at, lines, line_at + + +def collect_decompressed(path, *, + perf_path=PERF_PATH, + sources=None, + everything=False, + propagate=0, + depth=1, + **args): + sample_pattern = re.compile( + '(?P\w+)' + '\s+(?P\w+)' + '\s+(?P