diff --git a/.woodpecker/debug.yaml b/.woodpecker/debug.yaml index 52894b91..4dc7d3c9 100644 --- a/.woodpecker/debug.yaml +++ b/.woodpecker/debug.yaml @@ -2,14 +2,13 @@ labels: nix: "enabled" when: - - event: - - tag - - pull_request - - deployment - - cron - - manual - - event: push - branch: main-* + event: + - push + - tag + - pull_request + - deployment + - cron + - manual steps: - name: check formatting @@ -17,16 +16,6 @@ steps: commands: - nix-build -j4 --attr flakePackages.fmt - - name: check typos - image: nixpkgs/nix:nixos-24.05 - commands: - - nix-shell --attr ci --run typos - - - name: check lints with clippy - image: nixpkgs/nix:nixos-24.05 - commands: - - nix-build -j4 --attr flakePackages.clippy - - name: build image: nixpkgs/nix:nixos-24.05 commands: diff --git a/.woodpecker/release.yaml b/.woodpecker/release.yaml index 4133b92d..a94a9ccf 100644 --- a/.woodpecker/release.yaml +++ b/.woodpecker/release.yaml @@ -38,15 +38,7 @@ steps: - matrix: ARCH: i386 - - name: upgrade tests from v1.0.0 - image: nixpkgs/nix:nixos-24.05 - commands: - - nix-shell --attr ci --run "./script/test-upgrade.sh v1.0.0 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false) - when: - - matrix: - ARCH: amd64 - - - name: upgrade tests from v0.8.4 + - name: upgrade tests image: nixpkgs/nix:nixos-24.05 commands: - nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false) diff --git a/Cargo.lock b/Cargo.lock index 96af3252..7473d9af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -141,15 +141,15 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.101" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "arc-swap" -version = "1.8.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ded5f9a03ac8f24d1b8a25101ee812cd32cdc8c50a4c50237de2c4915850e73" +checksum = "51d03449bb8ca2cc2ef70869af31463d1ae5ccc8fa3e334b307203fbf815207e" dependencies = [ "rustversion", ] @@ -182,23 +182,11 @@ dependencies = [ "serde_json", ] -[[package]] -name = "async-broadcast" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435a87a52755b8f27fcf321ac4f04b2802e337c8c4872923137471ec39c37532" -dependencies = [ - "event-listener", - "event-listener-strategy", - "futures-core", - "pin-project-lite", -] - [[package]] name = "async-compression" -version = "0.4.39" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68650b7df54f0293fd061972a0fb05aaf4fc0879d3b3d21a638a182c5c543b9f" +checksum = "d10e4f991a553474232bc0a31799f6d24b034a84c0971d80d2e2f78b2e576e40" dependencies = [ "compression-codecs", "compression-core", @@ -263,33 +251,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "aws-lc-rs" -version = "1.15.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7b6141e96a8c160799cc2d5adecd5cbbe5054cb8c7c4af53da0f83bb7ad256" -dependencies = [ - "aws-lc-sys", - "zeroize", -] - -[[package]] -name = "aws-lc-sys" -version = "0.37.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c34dda4df7017c8db52132f0f8a2e0f8161649d15723ed63fc00c82d0f2081a" -dependencies = [ - "cc", - "cmake", - "dunce", - "fs_extra", -] - [[package]] name = "aws-runtime" -version = "1.6.0" +version = "1.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c635c2dc792cb4a11ce1a4f392a925340d1bdf499289b5ec1ec6810954eb43f5" +checksum = "959dab27ce613e6c9658eb3621064d0e2027e5f2acb65bc526a43577facea557" dependencies = [ "aws-credential-types", "aws-sigv4", @@ -303,9 +269,7 @@ dependencies = [ "bytes", "fastrand", "http 0.2.12", - "http 1.4.0", "http-body 0.4.6", - "http-body 1.0.1", "percent-encoding", "pin-project-lite", "tracing", @@ -314,9 +278,9 @@ dependencies = [ [[package]] name = "aws-sdk-config" -version = "1.100.0" +version = "1.99.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3976827de71752a0c986f73bfb9509585a272c1d85165de7ce45a56ee0e2dc3e" +checksum = "67e62e5ffb669e13f084c4e1d89d687604e001187f61503606a7f8cc7a411995" dependencies = [ "aws-credential-types", "aws-runtime", @@ -331,16 +295,15 @@ dependencies = [ "bytes", "fastrand", "http 0.2.12", - "http 1.4.0", "regex-lite", "tracing", ] [[package]] name = "aws-sdk-s3" -version = "1.122.0" +version = "1.120.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94c2ca0cba97e8e279eb6c0b2d0aa10db5959000e602ab2b7c02de6b85d4c19b" +checksum = "06673901e961f20fa8d7da907da48f7ad6c1b383e3726c22bd418900f015abe1" dependencies = [ "aws-credential-types", "aws-runtime", @@ -362,7 +325,7 @@ dependencies = [ "hmac", "http 0.2.12", "http 1.4.0", - "http-body 1.0.1", + "http-body 0.4.6", "lru", "percent-encoding", "regex-lite", @@ -373,9 +336,9 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "1.3.8" +version = "1.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efa49f3c607b92daae0c078d48a4571f599f966dce3caee5f1ea55c4d9073f99" +checksum = "69e523e1c4e8e7e8ff219d732988e22bfeae8a1cafdbe6d9eca1546fa080be7c" dependencies = [ "aws-credential-types", "aws-smithy-eventstream", @@ -396,9 +359,9 @@ dependencies = [ [[package]] name = "aws-smithy-async" -version = "1.2.11" +version = "1.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52eec3db979d18cb807fc1070961cc51d87d069abe9ab57917769687368a8c6c" +checksum = "9ee19095c7c4dda59f1697d028ce704c24b2d33c6718790c7f1d5a3015b4107c" dependencies = [ "futures-util", "pin-project-lite", @@ -407,18 +370,17 @@ dependencies = [ [[package]] name = "aws-smithy-checksums" -version = "0.64.3" +version = "0.63.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddcf418858f9f3edd228acb8759d77394fed7531cce78d02bdda499025368439" +checksum = "23374b9170cbbcc6f5df8dc5ebb9b6c5c28a3c8f599f0e8b8b10eb6f4a5c6e74" dependencies = [ "aws-smithy-http", "aws-smithy-types", "bytes", "crc-fast", "hex", - "http 1.4.0", - "http-body 1.0.1", - "http-body-util", + "http 0.2.12", + "http-body 0.4.6", "md-5", "pin-project-lite", "sha1", @@ -428,9 +390,9 @@ dependencies = [ [[package]] name = "aws-smithy-eventstream" -version = "0.60.18" +version = "0.60.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35b9c7354a3b13c66f60fe4616d6d1969c9fd36b1b5333a5dfb3ee716b33c588" +checksum = "dc12f8b310e38cad85cf3bef45ad236f470717393c613266ce0a89512286b650" dependencies = [ "aws-smithy-types", "bytes", @@ -439,9 +401,9 @@ dependencies = [ [[package]] name = "aws-smithy-http" -version = "0.63.3" +version = "0.62.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630e67f2a31094ffa51b210ae030855cb8f3b7ee1329bdd8d085aaf61e8b97fc" +checksum = "826141069295752372f8203c17f28e30c464d22899a43a0c9fd9c458d469c88b" dependencies = [ "aws-smithy-eventstream", "aws-smithy-runtime-api", @@ -450,9 +412,9 @@ dependencies = [ "bytes-utils", "futures-core", "futures-util", + "http 0.2.12", "http 1.4.0", - "http-body 1.0.1", - "http-body-util", + "http-body 0.4.6", "percent-encoding", "pin-project-lite", "pin-utils", @@ -461,9 +423,9 @@ dependencies = [ [[package]] name = "aws-smithy-http-client" -version = "1.1.9" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12fb0abf49ff0cab20fd31ac1215ed7ce0ea92286ba09e2854b42ba5cabe7525" +checksum = "59e62db736db19c488966c8d787f52e6270be565727236fd5579eaa301e7bc4a" dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", @@ -476,34 +438,34 @@ dependencies = [ "hyper-rustls 0.24.2", "pin-project-lite", "rustls 0.21.12", - "rustls-native-certs", + "rustls-native-certs 0.8.3", "tokio", "tracing", ] [[package]] name = "aws-smithy-json" -version = "0.62.3" +version = "0.61.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb96aa208d62ee94104645f7b2ecaf77bf27edf161590b6224bfbac2832f979" +checksum = "49fa1213db31ac95288d981476f78d05d9cbb0353d22cdf3472cc05bb02f6551" dependencies = [ "aws-smithy-types", ] [[package]] name = "aws-smithy-observability" -version = "0.2.4" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0a46543fbc94621080b3cf553eb4cbbdc41dd9780a30c4756400f0139440a1d" +checksum = "ef1fcbefc7ece1d70dcce29e490f269695dfca2d2bacdeaf9e5c3f799e4e6a42" dependencies = [ "aws-smithy-runtime-api", ] [[package]] name = "aws-smithy-runtime" -version = "1.10.0" +version = "1.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3df87c14f0127a0d77eb261c3bc45d5b4833e2a1f63583ebfb728e4852134ee" +checksum = "bb5b6167fcdf47399024e81ac08e795180c576a20e4d4ce67949f9a88ae37dc1" dependencies = [ "aws-smithy-async", "aws-smithy-http", @@ -517,7 +479,6 @@ dependencies = [ "http 1.4.0", "http-body 0.4.6", "http-body 1.0.1", - "http-body-util", "pin-project-lite", "pin-utils", "tokio", @@ -526,9 +487,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime-api" -version = "1.11.3" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49952c52f7eebb72ce2a754d3866cc0f87b97d2a46146b79f80f3a93fb2b3716" +checksum = "efce7aaaf59ad53c5412f14fc19b2d5c6ab2c3ec688d272fd31f76ec12f44fb0" dependencies = [ "aws-smithy-async", "aws-smithy-types", @@ -543,9 +504,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.4.3" +version = "1.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3a26048eeab0ddeba4b4f9d51654c79af8c3b32357dc5f336cee85ab331c33" +checksum = "65f172bcb02424eb94425db8aed1b6d583b5104d4d5ddddf22402c661a320048" dependencies = [ "base64-simd", "bytes", @@ -591,14 +552,14 @@ dependencies = [ ] [[package]] -name = "backon" -version = "1.6.0" +name = "backoff" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cffb0e931875b666fc4fcb20fee52e9bbd1ef836fd9e9e04ec21555f9f85f7ef" +checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" dependencies = [ - "fastrand", - "gloo-timers", - "tokio", + "getrandom 0.2.17", + "instant", + "rand", ] [[package]] @@ -622,6 +583,12 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + [[package]] name = "base64" version = "0.22.1" @@ -644,6 +611,15 @@ version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06" +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -655,9 +631,6 @@ name = "bitflags" version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" -dependencies = [ - "serde_core", -] [[package]] name = "blake2" @@ -683,6 +656,12 @@ version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" +[[package]] +name = "bytemuck" +version = "1.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" + [[package]] name = "byteorder" version = "1.5.0" @@ -691,9 +670,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.11.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" [[package]] name = "bytes-utils" @@ -707,9 +686,9 @@ dependencies = [ [[package]] name = "bytesize" -version = "2.3.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bd91ee7b2422bcb158d90ef4d14f75ef67f340943fc4149891dcce8f8b972a3" +checksum = "2e93abca9e28e0a1b9877922aacb20576e05d4679ffa78c3d6dc22a26a216659" [[package]] name = "byteview" @@ -719,9 +698,9 @@ checksum = "6236364b88b9b6d0bc181ba374cf1ab55ba3ef97a1cb6f8cddad48a273767fb5" [[package]] name = "cc" -version = "1.2.55" +version = "1.2.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47b26a0954ae34af09b50f0de26458fa95369a0d478d8236d3f93082b219bd29" +checksum = "6354c81bbfd62d9cfa9cb3c773c2b7b2a3a482d569de977fd0e961f6e7c00583" dependencies = [ "find-msvc-tools", "jobserver", @@ -729,12 +708,6 @@ dependencies = [ "shlex", ] -[[package]] -name = "cesu8" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" - [[package]] name = "cfg-if" version = "1.0.4" @@ -790,9 +763,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.57" +version = "4.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6899ea499e3fb9305a65d5ebf6e3d2248c5fab291f300ad0a704fbe142eae31a" +checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" dependencies = [ "clap_builder", "clap_derive", @@ -800,9 +773,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.57" +version = "4.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b12c8b680195a62a8364d16b8447b01b6c2c8f9aaf68bee653be34d4245e238" +checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" dependencies = [ "anstream", "anstyle", @@ -812,9 +785,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.55" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" +checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -828,31 +801,12 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" -[[package]] -name = "cmake" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" -dependencies = [ - "cc", -] - [[package]] name = "colorchoice" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" -[[package]] -name = "combine" -version = "4.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" -dependencies = [ - "bytes", - "memchr", -] - [[package]] name = "compare" version = "0.0.6" @@ -876,15 +830,6 @@ version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75984efb6ed102a0d42db99afb6c1948f0380d1d91808d5529916e6c08b49d8d" -[[package]] -name = "concurrent-queue" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" -dependencies = [ - "crossbeam-utils", -] - [[package]] name = "core-foundation" version = "0.9.4" @@ -947,6 +892,15 @@ dependencies = [ "spin 0.10.0", ] +[[package]] +name = "crc32c" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a47af21622d091a8f0fb295b88bc886ac74efcc613efc19f5d0b21de5c89e47" +dependencies = [ + "rustc_version", +] + [[package]] name = "crc32fast" version = "1.5.0" @@ -1006,7 +960,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ "generic-array", - "rand_core 0.6.4", + "rand_core", "typenum", ] @@ -1021,9 +975,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.23.0" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25ae13da2f202d56bd7f91c25fba009e7717a1e4a1cc98a76d844b65ae912e9d" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" dependencies = [ "darling_core", "darling_macro", @@ -1031,10 +985,11 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.23.0" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9865a50f7c335f53564bb694ef660825eb8610e0a53d3e11bf1b0d3df31e03b0" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" dependencies = [ + "fnv", "ident_case", "proc-macro2", "quote", @@ -1044,9 +999,9 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.23.0" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core", "quote", @@ -1087,24 +1042,14 @@ dependencies = [ ] [[package]] -name = "derive_more" -version = "2.1.1" +name = "derivative" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134" -dependencies = [ - "derive_more-impl", -] - -[[package]] -name = "derive_more-impl" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2", "quote", - "rustc_version", - "syn 2.0.114", + "syn 1.0.109", ] [[package]] @@ -1135,39 +1080,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0d05e1c0dbad51b52c38bda7adceef61b9efc2baf04acfe8726a8c4630a6f57" -[[package]] -name = "doxygen-rs" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "415b6ec780d34dcf624666747194393603d0373b7141eef01d12ee58881507d9" -dependencies = [ - "phf", -] - -[[package]] -name = "dunce" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" - [[package]] name = "dyn-clone" version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" -[[package]] -name = "educe" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" -dependencies = [ - "enum-ordinalize", - "proc-macro2", - "quote", - "syn 2.0.114", -] - [[package]] name = "either" version = "1.15.0" @@ -1183,26 +1101,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "enum-ordinalize" -version = "4.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a1091a7bb1f8f2c4b28f1fe2cef4980ca2d410a3d727d67ecc3178c9b0800f0" -dependencies = [ - "enum-ordinalize-derive", -] - -[[package]] -name = "enum-ordinalize-derive" -version = "4.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.114", -] - [[package]] name = "enum_dispatch" version = "0.3.13" @@ -1244,27 +1142,6 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "event-listener" -version = "5.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" -dependencies = [ - "event-listener", - "pin-project-lite", -] - [[package]] name = "fallible-iterator" version = "0.3.0" @@ -1285,9 +1162,9 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "find-msvc-tools" -version = "0.1.9" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" +checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" [[package]] name = "fixedbitset" @@ -1343,12 +1220,6 @@ dependencies = [ name = "format_table" version = "0.1.1" -[[package]] -name = "fs_extra" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" - [[package]] name = "futures" version = "0.3.31" @@ -1440,18 +1311,18 @@ dependencies = [ [[package]] name = "garage" -version = "2.3.0" +version = "1.3.1" dependencies = [ "assert-json-diff", "async-trait", "aws-sdk-s3", "aws-smithy-runtime", "backtrace", - "base64 0.22.1", + "base64 0.21.7", "bytes", "bytesize", "chrono", - "crc-fast", + "crc32fast", "format_table", "futures", "garage_api_admin", @@ -1480,6 +1351,7 @@ dependencies = [ "opentelemetry-otlp", "opentelemetry-prometheus", "parse_duration", + "serde", "serde_json", "sha1", "sha2", @@ -1491,21 +1363,16 @@ dependencies = [ "tracing", "tracing-journald", "tracing-subscriber", - "utoipa", ] [[package]] name = "garage_api_admin" -version = "2.3.0" +version = "1.3.1" dependencies = [ "argon2", "async-trait", - "bytesize", - "chrono", - "format_table", "futures", "garage_api_common", - "garage_block", "garage_model", "garage_rpc", "garage_table", @@ -1515,7 +1382,6 @@ dependencies = [ "hyper 1.8.1", "opentelemetry", "opentelemetry-prometheus", - "paste", "prometheus", "serde", "serde_json", @@ -1523,17 +1389,17 @@ dependencies = [ "tokio", "tracing", "url", - "utoipa", ] [[package]] name = "garage_api_common" -version = "2.3.0" +version = "1.3.1" dependencies = [ - "base64 0.22.1", + "base64 0.21.7", "bytes", "chrono", - "crc-fast", + "crc32c", + "crc32fast", "crypto-common", "futures", "garage_model", @@ -1548,9 +1414,7 @@ dependencies = [ "md-5", "nom", "opentelemetry", - "percent-encoding", "pin-project", - "quick-xml", "serde", "serde_json", "sha1", @@ -1559,14 +1423,13 @@ dependencies = [ "tokio", "tracing", "url", - "utoipa", ] [[package]] name = "garage_api_k2v" -version = "2.3.0" +version = "1.3.1" dependencies = [ - "base64 0.22.1", + "base64 0.21.7", "futures", "garage_api_common", "garage_model", @@ -1587,14 +1450,15 @@ dependencies = [ [[package]] name = "garage_api_s3" -version = "2.3.0" +version = "1.3.1" dependencies = [ "aes-gcm", "async-compression", - "base64 0.22.1", + "base64 0.21.7", "bytes", "chrono", - "crc-fast", + "crc32c", + "crc32fast", "form_urlencoded", "futures", "garage_api_common", @@ -1605,7 +1469,6 @@ dependencies = [ "garage_table", "garage_util", "hex", - "hmac", "http 1.4.0", "http-body-util", "http-range", @@ -1632,7 +1495,7 @@ dependencies = [ [[package]] name = "garage_block" -version = "2.3.0" +version = "1.3.1" dependencies = [ "arc-swap", "async-compression", @@ -1643,10 +1506,11 @@ dependencies = [ "garage_db", "garage_net", "garage_rpc", + "garage_table", "garage_util", "hex", "opentelemetry", - "rand 0.9.2", + "rand", "serde", "tokio", "tokio-util 0.7.18", @@ -1656,7 +1520,7 @@ dependencies = [ [[package]] name = "garage_db" -version = "2.3.0" +version = "1.3.1" dependencies = [ "fjall", "heed", @@ -1671,11 +1535,10 @@ dependencies = [ [[package]] name = "garage_model" -version = "2.3.0" +version = "1.3.1" dependencies = [ - "argon2", "async-trait", - "base64 0.22.1", + "base64 0.21.7", "blake2", "chrono", "futures", @@ -1688,7 +1551,7 @@ dependencies = [ "hex", "http 1.4.0", "parse_duration", - "rand 0.9.2", + "rand", "serde", "serde_bytes", "thiserror 2.0.18", @@ -1699,7 +1562,7 @@ dependencies = [ [[package]] name = "garage_net" -version = "2.3.0" +version = "1.3.1" dependencies = [ "arc-swap", "bytes", @@ -1713,10 +1576,9 @@ dependencies = [ "opentelemetry-contrib", "pin-project", "pretty_env_logger", - "rand 0.9.2", + "rand", "rmp-serde", "serde", - "socket2 0.6.2", "thiserror 2.0.18", "tokio", "tokio-stream", @@ -1725,7 +1587,7 @@ dependencies = [ [[package]] name = "garage_rpc" -version = "2.3.0" +version = "1.3.1" dependencies = [ "arc-swap", "async-trait", @@ -1737,14 +1599,14 @@ dependencies = [ "gethostname", "hex", "ipnet", - "itertools 0.14.0", + "itertools 0.12.1", "k8s-openapi", "kube", "kuska-sodiumoxide", "nix", "opentelemetry", "pnet_datalink", - "rand 0.9.2", + "rand", "reqwest", "schemars", "serde", @@ -1757,7 +1619,7 @@ dependencies = [ [[package]] name = "garage_table" -version = "2.3.0" +version = "1.3.1" dependencies = [ "arc-swap", "async-trait", @@ -1769,7 +1631,7 @@ dependencies = [ "hex", "hexdump", "opentelemetry", - "rand 0.9.2", + "rand", "serde", "serde_bytes", "tokio", @@ -1778,7 +1640,7 @@ dependencies = [ [[package]] name = "garage_util" -version = "2.3.0" +version = "1.3.1" dependencies = [ "arc-swap", "async-trait", @@ -1795,7 +1657,7 @@ dependencies = [ "lazy_static", "mktemp", "opentelemetry", - "rand 0.9.2", + "rand", "rmp-serde", "rustc_version", "serde", @@ -1810,7 +1672,7 @@ dependencies = [ [[package]] name = "garage_web" -version = "2.3.0" +version = "1.3.1" dependencies = [ "garage_api_common", "garage_api_s3", @@ -1839,12 +1701,12 @@ dependencies = [ [[package]] name = "gethostname" -version = "1.1.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bd49230192a3797a9a4d6abe9b3eed6f7fa4c8a8a4947977c6f80025f92cbd8" +checksum = "0176e0459c2e4a1fe232f984bca6890e681076abb9934f6cea7c326f3fc47818" dependencies = [ - "rustix 1.1.3", - "windows-link", + "libc", + "windows-targets 0.48.5", ] [[package]] @@ -1854,10 +1716,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", - "js-sys", "libc", "wasi", - "wasm-bindgen", ] [[package]] @@ -1867,24 +1727,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", - "js-sys", "libc", "r-efi", "wasip2", - "wasm-bindgen", -] - -[[package]] -name = "getrandom" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" -dependencies = [ - "cfg-if", - "libc", - "r-efi", - "wasip2", - "wasip3", ] [[package]] @@ -1923,18 +1768,6 @@ dependencies = [ "syn 2.0.114", ] -[[package]] -name = "gloo-timers" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" -dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", -] - [[package]] name = "guardian" version = "1.3.0" @@ -1990,6 +1823,10 @@ name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", + "allocator-api2", +] [[package]] name = "hashbrown" @@ -2013,11 +1850,11 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.11.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0b22561a9c04a7cb1a302c013e0259cd3b4bb619f145b32f72b8b4bcbed230" +checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.16.1", + "hashbrown 0.15.5", ] [[package]] @@ -2037,16 +1874,16 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "heed" -version = "0.22.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a56c94661ddfb51aa9cdfbf102cfcc340aa69267f95ebccc4af08d7c530d393" +checksum = "269c7486ed6def5d7b59a427cec3e87b4d4dd4381d01e21c8c9f2d3985688392" dependencies = [ - "bitflags 2.10.0", + "bytemuck", "byteorder", "heed-traits", "heed-types", "libc", - "lmdb-master-sys", + "lmdb-rkv-sys", "once_cell", "page_size", "synchronoise", @@ -2055,18 +1892,22 @@ dependencies = [ [[package]] name = "heed-traits" -version = "0.20.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3130048d404c57ce5a1ac61a903696e8fcde7e8c2991e9fcfc1f27c3ef74ff" +checksum = "a53a94e5b2fd60417e83ffdfe136c39afacff0d4ac1d8d01cd66928ac610e1a2" [[package]] name = "heed-types" -version = "0.21.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c255bdf46e07fb840d120a36dcc81f385140d7191c76a7391672675c01a55d" +checksum = "9a6cf0a6952fcedc992602d5cddd1e3fff091fbe87d38636e3ec23a31f32acbd" dependencies = [ + "bincode", + "bytemuck", "byteorder", "heed-traits", + "serde", + "serde_json", ] [[package]] @@ -2108,17 +1949,6 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "hostname" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "617aaa3557aef3810a6369d0a99fac8a080891b68bd9f9812a1eeda0c0730cbd" -dependencies = [ - "cfg-if", - "libc", - "windows-link", -] - [[package]] name = "http" version = "0.2.12" @@ -2180,6 +2010,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21dec9db110f5f872ed9699c3ecf50cf16f423502706ba5c72462e28d3157573" +[[package]] +name = "http-range-header" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" + [[package]] name = "httparse" version = "1.10.1" @@ -2256,25 +2092,26 @@ dependencies = [ "hyper 0.14.32", "log", "rustls 0.21.12", + "rustls-native-certs 0.6.3", "tokio", "tokio-rustls 0.24.1", ] [[package]] name = "hyper-rustls" -version = "0.27.7" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" dependencies = [ + "futures-util", "http 1.4.0", "hyper 1.8.1", "hyper-util", - "log", - "rustls 0.23.36", - "rustls-native-certs", + "rustls 0.22.4", + "rustls-native-certs 0.7.3", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.4", + "tokio-rustls 0.25.0", "tower-service", ] @@ -2290,28 +2127,16 @@ dependencies = [ "tokio-io-timeout", ] -[[package]] -name = "hyper-timeout" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" -dependencies = [ - "hyper 1.8.1", - "hyper-util", - "pin-project-lite", - "tokio", - "tower-service", -] - [[package]] name = "hyper-util" -version = "0.1.20" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" +checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" dependencies = [ "base64 0.22.1", "bytes", "futures-channel", + "futures-core", "futures-util", "http 1.4.0", "http-body 1.0.1", @@ -2321,7 +2146,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "socket2 0.6.2", - "system-configuration", + "system-configuration 0.6.1", "tokio", "tower-layer", "tower-service", @@ -2331,9 +2156,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.65" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2434,12 +2259,6 @@ dependencies = [ "zerovec", ] -[[package]] -name = "id-arena" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" - [[package]] name = "ident_case" version = "1.0.1" @@ -2485,8 +2304,6 @@ checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", "hashbrown 0.16.1", - "serde", - "serde_core", ] [[package]] @@ -2498,6 +2315,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", +] + [[package]] name = "interval-heap" version = "0.0.5" @@ -2522,16 +2348,6 @@ dependencies = [ "serde", ] -[[package]] -name = "iri-string" -version = "0.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" -dependencies = [ - "memchr", - "serde", -] - [[package]] name = "is-terminal" version = "0.4.17" @@ -2560,9 +2376,9 @@ dependencies = [ [[package]] name = "itertools" -version = "0.14.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" dependencies = [ "either", ] @@ -2573,52 +2389,6 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" -[[package]] -name = "jiff" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d89a5b5e10d5a9ad6e5d1f4bd58225f655d6fe9767575a5e8ac5a6fe64e04495" -dependencies = [ - "jiff-static", - "log", - "portable-atomic", - "portable-atomic-util", - "serde_core", -] - -[[package]] -name = "jiff-static" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff7a39c8862fc1369215ccf0a8f12dd4598c7f6484704359f0351bd617034dbf" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.114", -] - -[[package]] -name = "jni" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" -dependencies = [ - "cesu8", - "cfg-if", - "combine", - "jni-sys", - "log", - "thiserror 1.0.69", - "walkdir", - "windows-sys 0.45.0", -] - -[[package]] -name = "jni-sys" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" - [[package]] name = "jobserver" version = "0.1.34" @@ -2641,11 +2411,10 @@ dependencies = [ [[package]] name = "json-patch" -version = "4.1.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f300e415e2134745ef75f04562dd0145405c2f7fd92065db029ac4b16b57fe90" +checksum = "ec9ad60d674508f3ca8f380a928cfe7b096bc729c4e2dbfe3852bc45da3ab30b" dependencies = [ - "jsonptr", "serde", "serde_json", "thiserror 1.0.69", @@ -2653,25 +2422,15 @@ dependencies = [ [[package]] name = "jsonpath-rust" -version = "1.0.4" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633a7320c4bb672863a3782e89b9094ad70285e097ff6832cddd0ec615beadfa" +checksum = "96acbc6188d3bd83519d053efec756aa4419de62ec47be7f28dec297f7dc9eb0" dependencies = [ "pest", "pest_derive", "regex", "serde_json", - "thiserror 2.0.18", -] - -[[package]] -name = "jsonptr" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5a3cc660ba5d72bce0b3bb295bf20847ccbb40fd423f3f05b61273672e561fe" -dependencies = [ - "serde", - "serde_json", + "thiserror 1.0.69", ] [[package]] @@ -2680,14 +2439,14 @@ version = "0.0.4" dependencies = [ "aws-sdk-config", "aws-sigv4", - "base64 0.22.1", - "clap 4.5.57", + "base64 0.21.7", + "clap 4.5.54", "format_table", "hex", "http 1.4.0", "http-body-util", "hyper 1.8.1", - "hyper-rustls 0.27.7", + "hyper-rustls 0.26.0", "hyper-util", "log", "percent-encoding", @@ -2701,21 +2460,22 @@ dependencies = [ [[package]] name = "k8s-openapi" -version = "0.27.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05a6d6f3611ad1d21732adbd7a2e921f598af6c92d71ae6e2620da4b67ee1f0d" +checksum = "550f99d93aa4c2b25de527bce492d772caf5e21d7ac9bd4b508ba781c8d91e30" dependencies = [ - "base64 0.22.1", - "jiff", + "base64 0.21.7", + "chrono", "serde", + "serde-value", "serde_json", ] [[package]] name = "kube" -version = "3.0.1" +version = "0.88.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f96b537b4c4f61fc183594edbecbbefa3037e403feac0701bb24e6eff78e0034" +checksum = "462fe330a0617b276ec864c2255810adcdf519ecb6844253c54074b2086a97bc" dependencies = [ "k8s-openapi", "kube-client", @@ -2726,86 +2486,83 @@ dependencies = [ [[package]] name = "kube-client" -version = "3.0.1" +version = "0.88.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af97b8b696eb737e5694f087c498ca725b172c2a5bc3a6916328d160225537ee" +checksum = "7fe0d65dd6f3adba29cfb84f19dfe55449c7f6c35425f9d8294bec40313e0b64" dependencies = [ - "base64 0.22.1", + "base64 0.21.7", "bytes", + "chrono", "either", "futures", - "http 1.4.0", - "http-body 1.0.1", - "http-body-util", - "hyper 1.8.1", - "hyper-rustls 0.27.7", - "hyper-timeout 0.5.2", - "hyper-util", - "jiff", + "home", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.32", + "hyper-rustls 0.24.2", + "hyper-timeout", "jsonpath-rust", "k8s-openapi", "kube-core", "pem", - "rustls 0.23.36", + "pin-project", + "rustls 0.21.12", + "rustls-pemfile 1.0.4", "secrecy", "serde", "serde_json", "serde_yaml", - "thiserror 2.0.18", + "thiserror 1.0.69", "tokio", "tokio-util 0.7.18", - "tower 0.5.3", + "tower", "tower-http", "tracing", ] [[package]] name = "kube-core" -version = "3.0.1" +version = "0.88.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7aeade7d2e9f165f96b3c1749ff01a8e2dc7ea954bd333bcfcecc37d5226bdd" +checksum = "a6b42844e9172f631b8263ea9ce003b9251da13beb1401580937ad206dd82f4c" dependencies = [ - "derive_more", + "chrono", "form_urlencoded", - "http 1.4.0", - "jiff", + "http 0.2.12", "json-patch", "k8s-openapi", + "once_cell", "schemars", "serde", - "serde-value", "serde_json", - "thiserror 2.0.18", + "thiserror 1.0.69", ] [[package]] name = "kube-derive" -version = "3.0.1" +version = "0.88.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c98f59f4e68864624a0b993a1cc2424439ab7238eaede5c299e89943e2a093ff" +checksum = "f5b5a111ee287bd237b8190b8c39543ea9fd22f79e9c32a36c24e08234bcda22" dependencies = [ "darling", "proc-macro2", "quote", - "serde", "serde_json", "syn 2.0.114", ] [[package]] name = "kube-runtime" -version = "3.0.1" +version = "0.88.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc158473d6d86ec22692874bd5ddccf07474eab5c6bb41f226c522e945da5244" +checksum = "2bc06275064c81056fbb28ea876b3fb339d970e8132282119359afca0835c0ea" dependencies = [ "ahash", - "async-broadcast", - "async-stream", - "backon", - "educe", + "async-trait", + "backoff", + "derivative", "futures", - "hashbrown 0.16.1", - "hostname", + "hashbrown 0.14.5", "json-patch", "k8s-openapi", "kube-client", @@ -2813,7 +2570,8 @@ dependencies = [ "pin-project", "serde", "serde_json", - "thiserror 2.0.18", + "smallvec", + "thiserror 1.0.69", "tokio", "tokio-util 0.7.18", "tracing", @@ -2849,12 +2607,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" -[[package]] -name = "leb128fmt" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" - [[package]] name = "libc" version = "0.2.180" @@ -2875,9 +2627,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.36.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95b4103cffefa72eb8428cb6b47d6627161e51c2739fc5e3b734584157bc642a" +checksum = "133c182a6a2c87864fe97778797e46c7e999672690dc9fa3ee8e241aa4a9c13f" dependencies = [ "cc", "pkg-config", @@ -2903,14 +2655,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] -name = "lmdb-master-sys" -version = "0.2.5" +name = "lmdb-rkv-sys" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "864808e0b19fb6dd3b70ba94ee671b82fce17554cf80aeb0a155c65bb08027df" +checksum = "61b9ce6b3be08acefa3003c57b7565377432a89ec24476bbe72e11d101f852fe" dependencies = [ "cc", - "doxygen-rs", "libc", + "pkg-config", ] [[package]] @@ -2937,12 +2689,6 @@ dependencies = [ "hashbrown 0.16.1", ] -[[package]] -name = "lru-slab" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" - [[package]] name = "lsm-tree" version = "2.10.4" @@ -2994,9 +2740,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.8.0" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "mime" @@ -3004,6 +2750,12 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.8.9" @@ -3058,9 +2810,9 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "nix" -version = "0.31.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225e7cfe711e0ba79a68baeddb2982723e4235247aefce1482f2f16c27865b66" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ "bitflags 2.10.0", "cfg-if", @@ -3076,11 +2828,12 @@ checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65" [[package]] name = "nom" -version = "8.0.0" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9761775871bdef83bee530e60050f7e54b1105350d6884eb0fb4f46c2f9405" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", + "minimal-lexical", ] [[package]] @@ -3211,6 +2964,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + [[package]] name = "openssl-probe" version = "0.2.1" @@ -3234,7 +2993,7 @@ dependencies = [ "lazy_static", "percent-encoding", "pin-project", - "rand 0.8.5", + "rand", "thiserror 1.0.69", "tokio", "tokio-stream", @@ -3296,20 +3055,14 @@ checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" [[package]] name = "page_size" -version = "0.6.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" +checksum = "eebde548fbbf1ea81a99b128872779c437752fb99f217c45245e1a61dcd9edcd" dependencies = [ "libc", "winapi", ] -[[package]] -name = "parking" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" - [[package]] name = "parking_lot" version = "0.12.5" @@ -3351,16 +3104,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core 0.6.4", + "rand_core", "subtle", ] -[[package]] -name = "paste" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" - [[package]] name = "path-absolutize" version = "3.1.1" @@ -3397,9 +3144,9 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pest" -version = "2.8.6" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0848c601009d37dfa3430c4666e147e49cdcf1b92ecd3e63657d8a5f19da662" +checksum = "2c9eb05c21a464ea704b53158d358a31e6425db2f63a1a7312268b05fe2b75f7" dependencies = [ "memchr", "ucd-trie", @@ -3407,9 +3154,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.8.6" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11f486f1ea21e6c10ed15d5a7c77165d0ee443402f0780849d1768e7d9d6fe77" +checksum = "68f9dbced329c441fa79d80472764b1a2c7e57123553b8519b36663a2fb234ed" dependencies = [ "pest", "pest_generator", @@ -3417,9 +3164,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.8.6" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8040c4647b13b210a963c1ed407c1ff4fdfa01c31d6d2a098218702e6664f94f" +checksum = "3bb96d5051a78f44f43c8f712d8e810adb0ebf923fc9ed2655a7f66f63ba8ee5" dependencies = [ "pest", "pest_meta", @@ -3430,9 +3177,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.8.6" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89815c69d36021a140146f26659a81d6c2afa33d216d736dd4be5381a7362220" +checksum = "602113b5b5e8621770cfd490cfd90b9f84ab29bd2b0e49ad83eb6d186cef2365" dependencies = [ "pest", "sha2", @@ -3448,48 +3195,6 @@ dependencies = [ "indexmap 2.13.0", ] -[[package]] -name = "phf" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" -dependencies = [ - "phf_macros", - "phf_shared", -] - -[[package]] -name = "phf_generator" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" -dependencies = [ - "phf_shared", - "rand 0.8.5", -] - -[[package]] -name = "phf_macros" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" -dependencies = [ - "phf_generator", - "phf_shared", - "proc-macro2", - "quote", - "syn 2.0.114", -] - -[[package]] -name = "phf_shared" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" -dependencies = [ - "siphasher", -] - [[package]] name = "pin-project" version = "1.1.10" @@ -3530,18 +3235,18 @@ checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "pnet_base" -version = "0.35.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc190d4067df16af3aba49b3b74c469e611cad6314676eaf1157f31aa0fb2f7" +checksum = "fe4cf6fb3ab38b68d01ab2aea03ed3d1132b4868fa4e06285f29f16da01c5f4c" dependencies = [ "no-std-net", ] [[package]] name = "pnet_datalink" -version = "0.35.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79e70ec0be163102a332e1d2d5586d362ad76b01cec86f830241f2b6452a7b7" +checksum = "ad5854abf0067ebbd3967f7d45ebc8976ff577ff0c7bd101c4973ae3c70f98fe" dependencies = [ "ipnetwork", "libc", @@ -3552,9 +3257,9 @@ dependencies = [ [[package]] name = "pnet_sys" -version = "0.35.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d4643d3d4db6b08741050c2f3afa9a892c4244c085a72fcda93c9c2c9a00f4b" +checksum = "417c0becd1b573f6d544f73671070b039051e5ad819cc64aa96377b536128d00" dependencies = [ "libc", "winapi", @@ -3572,21 +3277,6 @@ dependencies = [ "universal-hash", ] -[[package]] -name = "portable-atomic" -version = "1.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" - -[[package]] -name = "portable-atomic-util" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a9db96d7fa8782dd8c15ce32ffe8680bbd1e978a43bf51a34d39483540495f5" -dependencies = [ - "portable-atomic", -] - [[package]] name = "potential_utf" version = "0.1.4" @@ -3621,16 +3311,6 @@ dependencies = [ "log", ] -[[package]] -name = "prettyplease" -version = "0.2.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" -dependencies = [ - "proc-macro2", - "syn 2.0.114", -] - [[package]] name = "proc-macro-error" version = "1.0.4" @@ -3740,9 +3420,9 @@ checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" [[package]] name = "quick-xml" -version = "0.39.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2e3bf4aa9d243beeb01a7b3bc30b77cfe2c44e24ec02d751a7104a53c2c49a1" +checksum = "7f50b1c63b38611e7d4d7f68b82d3ad0cc71a2ad2e7f61fc10f1328d917c93cd" dependencies = [ "memchr", "serde", @@ -3758,62 +3438,6 @@ dependencies = [ "hashbrown 0.16.1", ] -[[package]] -name = "quinn" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" -dependencies = [ - "bytes", - "cfg_aliases 0.2.1", - "pin-project-lite", - "quinn-proto", - "quinn-udp", - "rustc-hash", - "rustls 0.23.36", - "socket2 0.6.2", - "thiserror 2.0.18", - "tokio", - "tracing", - "web-time", -] - -[[package]] -name = "quinn-proto" -version = "0.11.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" -dependencies = [ - "aws-lc-rs", - "bytes", - "getrandom 0.3.4", - "lru-slab", - "rand 0.9.2", - "ring", - "rustc-hash", - "rustls 0.23.36", - "rustls-pki-types", - "slab", - "thiserror 2.0.18", - "tinyvec", - "tracing", - "web-time", -] - -[[package]] -name = "quinn-udp" -version = "0.5.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" -dependencies = [ - "cfg_aliases 0.2.1", - "libc", - "once_cell", - "socket2 0.6.2", - "tracing", - "windows-sys 0.60.2", -] - [[package]] name = "quote" version = "1.0.44" @@ -3842,9 +3466,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.32.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ebd03c29250cdf191da93a35118b4567c2ef0eacab54f65e058d6f4c9965f6" +checksum = "63417e83dc891797eea3ad379f52a5986da4bca0d6ef28baf4d14034dd111b0c" dependencies = [ "r2d2", "rusqlite", @@ -3858,18 +3482,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" -dependencies = [ - "rand_chacha 0.9.0", - "rand_core 0.9.5", + "rand_chacha", + "rand_core", ] [[package]] @@ -3879,17 +3493,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" -dependencies = [ - "ppv-lite86", - "rand_core 0.9.5", + "rand_core", ] [[package]] @@ -3901,15 +3505,6 @@ dependencies = [ "getrandom 0.2.17", ] -[[package]] -name = "rand_core" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" -dependencies = [ - "getrandom 0.3.4", -] - [[package]] name = "redox_syscall" version = "0.5.18" @@ -3919,31 +3514,11 @@ dependencies = [ "bitflags 2.10.0", ] -[[package]] -name = "ref-cast" -version = "1.0.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" -dependencies = [ - "ref-cast-impl", -] - -[[package]] -name = "ref-cast-impl" -version = "1.0.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.114", -] - [[package]] name = "regex" -version = "1.12.3" +version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" dependencies = [ "aho-corasick", "memchr", @@ -3953,9 +3528,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.14" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", @@ -3964,51 +3539,54 @@ dependencies = [ [[package]] name = "regex-lite" -version = "0.1.9" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab834c73d247e67f4fae452806d17d3c7501756d98c8808d7c9c7aa7d18f973" +checksum = "8d942b98df5e658f56f20d592c7f868833fe38115e65c33003d8cd224b0155da" [[package]] name = "regex-syntax" -version = "0.8.9" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "reqwest" -version = "0.13.2" +version = "0.11.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ - "base64 0.22.1", + "base64 0.21.7", "bytes", + "encoding_rs", "futures-core", - "http 1.4.0", - "http-body 1.0.1", - "http-body-util", - "hyper 1.8.1", - "hyper-rustls 0.27.7", - "hyper-util", + "futures-util", + "h2 0.3.27", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.32", + "hyper-rustls 0.24.2", + "ipnet", "js-sys", "log", + "mime", + "once_cell", "percent-encoding", "pin-project-lite", - "quinn", - "rustls 0.23.36", - "rustls-pki-types", - "rustls-platform-verifier", + "rustls 0.21.12", + "rustls-pemfile 1.0.4", "serde", "serde_json", + "serde_urlencoded", "sync_wrapper", + "system-configuration 0.5.1", "tokio", - "tokio-rustls 0.26.4", - "tower 0.5.3", - "tower-http", + "tokio-rustls 0.24.1", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", + "winreg", ] [[package]] @@ -4046,28 +3624,15 @@ dependencies = [ [[package]] name = "roxmltree" -version = "0.21.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1964b10c76125c36f8afe190065a4bf9a87bf324842c05701330bba9f1cacbb" -dependencies = [ - "memchr", -] - -[[package]] -name = "rsqlite-vfs" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a1f2315036ef6b1fbacd1972e8ee7688030b0a2121edfc2a6550febd41574d" -dependencies = [ - "hashbrown 0.16.1", - "thiserror 2.0.18", -] +checksum = "3cd14fd5e3b777a7422cca79358c57a8f6e3a703d9ac187448d0daf220c2407f" [[package]] name = "rusqlite" -version = "0.38.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1c93dd1c9683b438c392c492109cb702b8090b2bfc8fed6f6e4eb4523f17af3" +checksum = "165ca6e57b20e1351573e3729b958bc62f0e48025386970b6e4d29e7a7e71f3f" dependencies = [ "bitflags 2.10.0", "fallible-iterator", @@ -4075,7 +3640,6 @@ dependencies = [ "hashlink", "libsqlite3-sys", "smallvec", - "sqlite-wasm-rs", ] [[package]] @@ -4139,30 +3703,70 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.36" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ - "aws-lc-rs", - "log", - "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.9", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe 0.1.6", + "rustls-pemfile 1.0.4", + "schannel", + "security-framework 2.11.1", +] + +[[package]] +name = "rustls-native-certs" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" +dependencies = [ + "openssl-probe 0.1.6", + "rustls-pemfile 2.2.0", + "rustls-pki-types", + "schannel", + "security-framework 2.11.1", +] + [[package]] name = "rustls-native-certs" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe", + "openssl-probe 0.2.1", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 3.5.1", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", ] [[package]] @@ -4171,37 +3775,9 @@ version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" dependencies = [ - "web-time", "zeroize", ] -[[package]] -name = "rustls-platform-verifier" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784" -dependencies = [ - "core-foundation 0.10.1", - "core-foundation-sys", - "jni", - "log", - "once_cell", - "rustls 0.23.36", - "rustls-native-certs", - "rustls-platform-verifier-android", - "rustls-webpki 0.103.9", - "security-framework", - "security-framework-sys", - "webpki-root-certs", - "windows-sys 0.61.2", -] - -[[package]] -name = "rustls-platform-verifier-android" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" - [[package]] name = "rustls-webpki" version = "0.101.7" @@ -4214,11 +3790,10 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.9" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ - "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -4232,9 +3807,9 @@ checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" -version = "1.0.23" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" +checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" [[package]] name = "same-file" @@ -4265,12 +3840,11 @@ dependencies = [ [[package]] name = "schemars" -version = "1.2.1" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2b42f36aa1cd011945615b92222f6bf73c599a102a300334cd7f8dbeec726cc" +checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615" dependencies = [ "dyn-clone", - "ref-cast", "schemars_derive", "serde", "serde_json", @@ -4278,9 +3852,9 @@ dependencies = [ [[package]] name = "schemars_derive" -version = "1.2.1" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d115b50f4aaeea07e79c1912f645c7513d81715d0420f8bc77a18c6260b307f" +checksum = "32e265784ad618884abaea0600a9adf15393368d840e0222d101a072f3f7534d" dependencies = [ "proc-macro2", "quote", @@ -4306,13 +3880,27 @@ dependencies = [ [[package]] name = "secrecy" -version = "0.10.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e891af845473308773346dc847b2c23ee78fe442e0472ac50e22a18a93d3ae5a" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ + "serde", "zeroize", ] +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.10.0", + "core-foundation 0.9.4", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + [[package]] name = "security-framework" version = "3.5.1" @@ -4424,11 +4012,23 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "1.0.4" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" dependencies = [ - "serde_core", + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", ] [[package]] @@ -4491,17 +4091,11 @@ dependencies = [ "libc", ] -[[package]] -name = "siphasher" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" - [[package]] name = "slab" -version = "0.4.12" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "smallvec" @@ -4541,18 +4135,6 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" -[[package]] -name = "sqlite-wasm-rs" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f4206ed3a67690b9c29b77d728f6acc3ce78f16bf846d83c94f76400320181b" -dependencies = [ - "cc", - "js-sys", - "rsqlite-vfs", - "wasm-bindgen", -] - [[package]] name = "stable_deref_trait" version = "1.2.1" @@ -4653,12 +4235,9 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "1.0.2" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" -dependencies = [ - "futures-core", -] +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "synchronoise" @@ -4693,13 +4272,34 @@ dependencies = [ [[package]] name = "system-configuration" -version = "0.7.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation 0.9.4", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ "bitflags 2.10.0", "core-foundation 0.9.4", - "system-configuration-sys", + "system-configuration-sys 0.6.0", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", ] [[package]] @@ -4714,12 +4314,12 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.25.0" +version = "3.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" +checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" dependencies = [ "fastrand", - "getrandom 0.4.1", + "getrandom 0.3.4", "once_cell", "rustix 1.1.3", "windows-sys 0.61.2", @@ -4794,9 +4394,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.47" +version = "0.3.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" +checksum = "9da98b7d9b7dad93488a84b8248efc35352b0b2657397d4167e7ad67e5d535e5" dependencies = [ "deranged", "num-conv", @@ -4814,9 +4414,9 @@ checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.27" +version = "0.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" +checksum = "78cc610bac2dcee56805c99642447d4c5dbde4d01f752ffea0199aee1f601dc4" dependencies = [ "num-conv", "time-core", @@ -4824,9 +4424,9 @@ dependencies = [ [[package]] name = "timeago" -version = "0.5.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05030782ebd7d1295cce15a98c8805de6e70776c95f8e3468f84f7f198824f49" +checksum = "a1710e589de0a76aaf295cd47a6699f6405737dbfd3cf2b75c92d000b548d0e6" [[package]] name = "tinystr" @@ -4838,21 +4438,6 @@ dependencies = [ "zerovec", ] -[[package]] -name = "tinyvec" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" - [[package]] name = "tokio" version = "1.49.0" @@ -4903,11 +4488,12 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.4" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" dependencies = [ - "rustls 0.23.36", + "rustls 0.22.4", + "rustls-pki-types", "tokio", ] @@ -4953,32 +4539,35 @@ dependencies = [ [[package]] name = "toml" -version = "0.9.11+spec-1.1.0" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ - "serde_core", + "serde", "serde_spanned", "toml_datetime", - "toml_parser", - "winnow", + "toml_edit", ] [[package]] name = "toml_datetime" -version = "0.7.5+spec-1.1.0" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" dependencies = [ - "serde_core", + "serde", ] [[package]] -name = "toml_parser" -version = "1.0.6+spec-1.1.0" +name = "toml_edit" +version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ + "indexmap 2.13.0", + "serde", + "serde_spanned", + "toml_datetime", "winnow", ] @@ -4998,7 +4587,7 @@ dependencies = [ "http 0.2.12", "http-body 0.4.6", "hyper 0.14.32", - "hyper-timeout 0.4.1", + "hyper-timeout", "percent-encoding", "pin-project", "prost", @@ -5006,7 +4595,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util 0.6.10", - "tower 0.4.13", + "tower", "tower-layer", "tower-service", "tracing", @@ -5036,7 +4625,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand 0.8.5", + "rand", "slab", "tokio", "tokio-util 0.7.18", @@ -5045,39 +4634,22 @@ dependencies = [ "tracing", ] -[[package]] -name = "tower" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" -dependencies = [ - "futures-core", - "futures-util", - "pin-project-lite", - "sync_wrapper", - "tokio", - "tokio-util 0.7.18", - "tower-layer", - "tower-service", - "tracing", -] - [[package]] name = "tower-http" -version = "0.6.8" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ - "base64 0.22.1", + "base64 0.21.7", "bitflags 2.10.0", "bytes", + "futures-core", "futures-util", - "http 1.4.0", - "http-body 1.0.1", - "iri-string", + "http 0.2.12", + "http-body 0.4.6", + "http-range-header", "mime", "pin-project-lite", - "tower 0.5.3", "tower-layer", "tower-service", "tracing", @@ -5198,9 +4770,9 @@ checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "unicode-ident" -version = "1.0.23" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "537dd038a89878be9b64dd4bd1b260315c1bb94f4d784956b81e27a088d9a09e" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "unicode-segmentation" @@ -5214,12 +4786,6 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" -[[package]] -name = "unicode-xid" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" - [[package]] name = "universal-hash" version = "0.5.1" @@ -5266,29 +4832,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" -[[package]] -name = "utoipa" -version = "5.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fcc29c80c21c31608227e0912b2d7fddba57ad76b606890627ba8ee7964e993" -dependencies = [ - "indexmap 2.13.0", - "serde", - "serde_json", - "utoipa-gen", -] - -[[package]] -name = "utoipa-gen" -version = "5.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d79d08d92ab8af4c5e8a6da20c47ae3f61a0f1dabc1997cdf2d082b757ca08b" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.114", -] - [[package]] name = "uuid" version = "1.4.1" @@ -5296,7 +4839,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" dependencies = [ "getrandom 0.2.17", - "rand 0.8.5", + "rand", ] [[package]] @@ -5380,15 +4923,6 @@ dependencies = [ "wit-bindgen", ] -[[package]] -name = "wasip3" -version = "0.4.0+wasi-0.3.0-rc-2026-01-06" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" -dependencies = [ - "wit-bindgen", -] - [[package]] name = "wasm-bindgen" version = "0.2.108" @@ -5448,40 +4982,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "wasm-encoder" -version = "0.244.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" -dependencies = [ - "leb128fmt", - "wasmparser", -] - -[[package]] -name = "wasm-metadata" -version = "0.244.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" -dependencies = [ - "anyhow", - "indexmap 2.13.0", - "wasm-encoder", - "wasmparser", -] - -[[package]] -name = "wasmparser" -version = "0.244.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" -dependencies = [ - "bitflags 2.10.0", - "hashbrown 0.15.5", - "indexmap 2.13.0", - "semver", -] - [[package]] name = "web-sys" version = "0.3.85" @@ -5492,25 +4992,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "web-time" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "webpki-root-certs" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804f18a4ac2676ffb4e8b5b5fa9ae38af06df08162314f96a68d2a363e21a8ca" -dependencies = [ - "rustls-pki-types", -] - [[package]] name = "which" version = "4.4.2" @@ -5626,11 +5107,11 @@ dependencies = [ [[package]] name = "windows-sys" -version = "0.45.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.42.2", + "windows-targets 0.48.5", ] [[package]] @@ -5671,17 +5152,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.42.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", ] [[package]] @@ -5719,9 +5200,9 @@ dependencies = [ [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" @@ -5737,9 +5218,9 @@ checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" [[package]] name = "windows_aarch64_msvc" -version = "0.42.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" @@ -5755,9 +5236,9 @@ checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" [[package]] name = "windows_i686_gnu" -version = "0.42.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" @@ -5785,9 +5266,9 @@ checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" [[package]] name = "windows_i686_msvc" -version = "0.42.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" @@ -5803,9 +5284,9 @@ checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" [[package]] name = "windows_x86_64_gnu" -version = "0.42.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" @@ -5821,9 +5302,9 @@ checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" @@ -5839,9 +5320,9 @@ checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" [[package]] name = "windows_x86_64_msvc" -version = "0.42.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" @@ -5860,94 +5341,25 @@ name = "winnow" version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" +dependencies = [ + "memchr", +] + +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] [[package]] name = "wit-bindgen" version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" -dependencies = [ - "wit-bindgen-rust-macro", -] - -[[package]] -name = "wit-bindgen-core" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" -dependencies = [ - "anyhow", - "heck 0.5.0", - "wit-parser", -] - -[[package]] -name = "wit-bindgen-rust" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" -dependencies = [ - "anyhow", - "heck 0.5.0", - "indexmap 2.13.0", - "prettyplease", - "syn 2.0.114", - "wasm-metadata", - "wit-bindgen-core", - "wit-component", -] - -[[package]] -name = "wit-bindgen-rust-macro" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" -dependencies = [ - "anyhow", - "prettyplease", - "proc-macro2", - "quote", - "syn 2.0.114", - "wit-bindgen-core", - "wit-bindgen-rust", -] - -[[package]] -name = "wit-component" -version = "0.244.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" -dependencies = [ - "anyhow", - "bitflags 2.10.0", - "indexmap 2.13.0", - "log", - "serde", - "serde_derive", - "serde_json", - "wasm-encoder", - "wasm-metadata", - "wasmparser", - "wit-parser", -] - -[[package]] -name = "wit-parser" -version = "0.244.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" -dependencies = [ - "anyhow", - "id-arena", - "indexmap 2.13.0", - "log", - "semver", - "serde", - "serde_derive", - "serde_json", - "unicode-xid", - "wasmparser", -] [[package]] name = "writeable" @@ -5992,18 +5404,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.39" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" +checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.39" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" +checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1" dependencies = [ "proc-macro2", "quote", @@ -6072,9 +5484,9 @@ dependencies = [ [[package]] name = "zmij" -version = "1.0.20" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4de98dfa5d5b7fef4ee834d0073d560c9ca7b6c46a71d058c48db7960f8cfaf7" +checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65" [[package]] name = "zstd" diff --git a/Cargo.toml b/Cargo.toml index 359f2fd2..df4005a3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,171 +24,130 @@ default-members = ["src/garage"] # Internal Garage crates format_table = { version = "0.1.1", path = "src/format-table" } -garage_api_common = { version = "2.3.0", path = "src/api/common" } -garage_api_admin = { version = "2.3.0", path = "src/api/admin" } -garage_api_s3 = { version = "2.3.0", path = "src/api/s3" } -garage_api_k2v = { version = "2.3.0", path = "src/api/k2v" } -garage_block = { version = "2.3.0", path = "src/block" } -garage_db = { version = "2.3.0", path = "src/db", default-features = false } -garage_model = { version = "2.3.0", path = "src/model", default-features = false } -garage_net = { version = "2.3.0", path = "src/net" } -garage_rpc = { version = "2.3.0", path = "src/rpc" } -garage_table = { version = "2.3.0", path = "src/table" } -garage_util = { version = "2.3.0", path = "src/util" } -garage_web = { version = "2.3.0", path = "src/web" } +garage_api_common = { version = "1.3.1", path = "src/api/common" } +garage_api_admin = { version = "1.3.1", path = "src/api/admin" } +garage_api_s3 = { version = "1.3.1", path = "src/api/s3" } +garage_api_k2v = { version = "1.3.1", path = "src/api/k2v" } +garage_block = { version = "1.3.1", path = "src/block" } +garage_db = { version = "1.3.1", path = "src/db", default-features = false } +garage_model = { version = "1.3.1", path = "src/model", default-features = false } +garage_net = { version = "1.3.1", path = "src/net" } +garage_rpc = { version = "1.3.1", path = "src/rpc" } +garage_table = { version = "1.3.1", path = "src/table" } +garage_util = { version = "1.3.1", path = "src/util" } +garage_web = { version = "1.3.1", path = "src/web" } k2v-client = { version = "0.0.4", path = "src/k2v-client" } # External crates from crates.io -arc-swap = "1.8" +arc-swap = "1.0" argon2 = "0.5" -async-trait = "0.1" +async-trait = "0.1.7" backtrace = "0.3" -base64 = "0.22" +base64 = "0.21" blake2 = "0.10" -bytes = "1.11" -bytesize = "2.3" +bytes = "1.0" +bytesize = "1.1" cfg-if = "1.0" -chrono = { version = "0.4", features = ["serde"] } -crc-fast = "1.9" +chrono = "0.4" +crc32fast = "1.4" +crc32c = "0.6" crypto-common = "0.1" -gethostname = "1.1" -git-version = "0.3" +gethostname = "0.4" +git-version = "0.3.4" hex = "0.4" hexdump = "0.1" hmac = "0.12" -itertools = "0.14" -ipnet = "2.11" -lazy_static = "1.5" +itertools = "0.12" +ipnet = "2.9.0" +lazy_static = "1.4" md-5 = "0.10" mktemp = "0.5" -nix = { version = "0.31", default-features = false, features = ["fs"] } -nom = "8.0" +nix = { version = "0.29", default-features = false, features = ["fs"] } +nom = "7.1" parking_lot = "0.12" parse_duration = "2.1" -paste = "1.0" -pin-project = "1.1" -pnet_datalink = "0.35" -rand = "0.9" +pin-project = "1.0.12" +pnet_datalink = "0.34" +rand = "0.8" sha1 = "0.10" sha2 = "0.10" -timeago = { version = "0.5", default-features = false } +timeago = { version = "0.4", default-features = false } xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] } aes-gcm = { version = "0.10", features = ["aes", "stream"] } sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" } kuska-handshake = { version = "0.2.0", features = ["default", "async_std"] } -clap = { version = "4.5", features = ["derive", "env"] } +clap = { version = "4.1", features = ["derive", "env"] } pretty_env_logger = "0.5" structopt = { version = "0.3", default-features = false } syslog-tracing = "0.3" tracing = "0.1" -tracing-journald = "0.3" +tracing-journald = "0.3.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } -heed = { version = "0.22", default-features = false, features = [] } -rusqlite = { version = "0.38", features = ["fallible_uint"] } +heed = { version = "0.11", default-features = false, features = ["lmdb"] } +rusqlite = "0.37" r2d2 = "0.8" -r2d2_sqlite = "0.32" -fjall = "2.11" +r2d2_sqlite = "0.31" +fjall = "2.4" async-compression = { version = "0.4", features = ["tokio", "zstd"] } zstd = { version = "0.13", default-features = false } -quick-xml = { version = "0.39", features = ["serialize"] } -rmp-serde = "1.3" +quick-xml = { version = "0.26", features = [ "serialize" ] } +rmp-serde = "1.1.2" serde = { version = "1.0", default-features = false, features = ["derive", "rc"] } serde_bytes = "0.11" serde_json = "1.0" -toml = { version = "0.9", default-features = false, features = ["parse", "serde"] } -utoipa = { version = "5.4", features = ["chrono"] } +toml = { version = "0.8", default-features = false, features = ["parse"] } # newer version requires rust edition 2021 -k8s-openapi = { version = "0.27", features = ["v1_35"] } -kube = { version = "3.0", default-features = false, features = [ - "runtime", - "derive", - "client", - "rustls-tls", -] } -schemars = "1.2" -reqwest = { version = "0.13", default-features = false, features = [ - "rustls", - "json", -] } +k8s-openapi = { version = "0.21", features = ["v1_24"] } +kube = { version = "0.88", default-features = false, features = ["runtime", "derive", "client", "rustls-tls"] } +schemars = "0.8" +reqwest = { version = "0.11", default-features = false, features = ["rustls-tls-manual-roots", "json"] } -form_urlencoded = "1.2" -http = "1.4" +form_urlencoded = "1.0.0" +http = "1.0" httpdate = "1.0" http-range = "0.1" http-body-util = "0.1" -hyper = { version = "1.8", default-features = false } -hyper-util = { version = "0.1", features = ["full"] } -multer = "3.1" -percent-encoding = "2.3" -roxmltree = "0.21" -url = "2.5" +hyper = { version = "1.0", default-features = false } +hyper-util = { version = "0.1", features = [ "full" ] } +multer = "3.0" +percent-encoding = "2.2" +roxmltree = "0.19" +url = "2.3" futures = "0.3" futures-util = "0.3" -tokio = { version = "1.49", default-features = false, features = [ - "rt", - "rt-multi-thread", - "io-util", - "net", - "time", - "macros", - "sync", - "signal", - "fs", -] } +tokio = { version = "1.0", default-features = false, features = ["net", "rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] } tokio-util = { version = "0.7", features = ["compat", "io"] } tokio-stream = { version = "0.1", features = ["net"] } -socket2 = { version = "0.6", features = ["all"] } -opentelemetry = { version = "0.17", features = ["rt-tokio", "metrics", "trace"] } +opentelemetry = { version = "0.17", features = [ "rt-tokio", "metrics", "trace" ] } opentelemetry-prometheus = "0.10" opentelemetry-otlp = "0.10" opentelemetry-contrib = "0.9" prometheus = "0.13" # used by the k2v-client crate only -aws-sigv4 = { version = "1.3", default-features = false } -hyper-rustls = { version = "0.27", default-features = false, features = [ - "http1", - "http2", - "ring", - "rustls-native-certs", -] } +aws-sigv4 = { version = "1.1", default-features = false } +hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "http2", "ring", "rustls-native-certs"] } log = "0.4" thiserror = "2.0" # ---- used only as build / dev dependencies ---- assert-json-diff = "2.0" -rustc_version = "0.4" +rustc_version = "0.4.0" static_init = "1.0" -aws-smithy-runtime = { version = "1.9", default-features = false, features = [ - "tls-rustls", -] } -aws-sdk-config = { version = "1.99", default-features = false } -aws-sdk-s3 = { version = "1.121", default-features = false, features = [ - "rt-tokio", -] } +aws-smithy-runtime = { version = "1.8", default-features = false, features = ["tls-rustls"] } +aws-sdk-config = { version = "1.62", default-features = false } +aws-sdk-s3 = { version = "1.79", default-features = false, features = ["rt-tokio"] } [profile.release] lto = "thin" codegen-units = 16 opt-level = 3 strip = "debuginfo" - -[workspace.lints.clippy] -# pedantic lints configuration -doc_markdown = "warn" -format_collect = "warn" -manual_midpoint = "warn" -semicolon_if_nothing_returned = "warn" -unnecessary_semicolon = "warn" -unnecessary_wraps = "warn" - -# nursery lints configuration -# or_fun_call = "warn" # enable it to help detect non trivial code used in `_or` method diff --git a/doc/api/garage-admin-v0.html b/doc/api/garage-admin-v0.html index 7eb11f25..dbdd9e1c 100644 --- a/doc/api/garage-admin-v0.html +++ b/doc/api/garage-admin-v0.html @@ -1,7 +1,7 @@ - Garage administration API v0 + Garage Adminstration API v0 diff --git a/doc/api/garage-admin-v0.yml b/doc/api/garage-admin-v0.yml index d2e05a42..83316d93 100644 --- a/doc/api/garage-admin-v0.yml +++ b/doc/api/garage-admin-v0.yml @@ -3,10 +3,10 @@ info: version: v0.8.0 title: Garage Administration API v0+garage-v0.8.0 description: | - Administrate your Garage cluster programmatically, including status, layout, keys, buckets, and maintenance tasks. - - *Disclaimer: The API is not stable yet, hence its v0 tag. The API can change at any time, and changes can include breaking backward compatibility. Read the changelog and upgrade your scripts before upgrading. Additionally, this specification is very early stage and can contain bugs, especially on error return codes/types that are not tested yet. Do not expect a well finished and polished product!* -paths: + Administrate your Garage cluster programatically, including status, layout, keys, buckets, and maintainance tasks. + + *Disclaimer: The API is not stable yet, hence its v0 tag. The API can change at any time, and changes can include breaking backward compatibility. Read the changelog and upgrade your scripts before upgrading. Additionnaly, this specification is very early stage and can contain bugs, especially on error return codes/types that are not tested yet. Do not expect a well finished and polished product!* +paths: /status: get: tags: diff --git a/doc/api/garage-admin-v1.html b/doc/api/garage-admin-v1.html index e98306b8..783d459e 100644 --- a/doc/api/garage-admin-v1.html +++ b/doc/api/garage-admin-v1.html @@ -1,7 +1,7 @@ - Garage administration API v1 + Garage Adminstration API v0 diff --git a/doc/api/garage-admin-v1.yml b/doc/api/garage-admin-v1.yml index 90465890..a70dc97b 100644 --- a/doc/api/garage-admin-v1.yml +++ b/doc/api/garage-admin-v1.yml @@ -3,10 +3,10 @@ info: version: v0.9.0 title: Garage Administration API v0+garage-v0.9.0 description: | - Administrate your Garage cluster programmatically, including status, layout, keys, buckets, and maintenance tasks. - - *Disclaimer: The API is not stable yet, hence its v0 tag. The API can change at any time, and changes can include breaking backward compatibility. Read the changelog and upgrade your scripts before upgrading. Additionally, this specification is very early stage and can contain bugs, especially on error return codes/types that are not tested yet. Do not expect a well finished and polished product!* -paths: + Administrate your Garage cluster programatically, including status, layout, keys, buckets, and maintainance tasks. + + *Disclaimer: The API is not stable yet, hence its v0 tag. The API can change at any time, and changes can include breaking backward compatibility. Read the changelog and upgrade your scripts before upgrading. Additionnaly, this specification is very early stage and can contain bugs, especially on error return codes/types that are not tested yet. Do not expect a well finished and polished product!* +paths: /health: get: tags: @@ -440,7 +440,7 @@ paths: - "false" example: "true" required: false - description: "Whether or not the secret key should be returned in the response" + description: "Wether or not the secret key should be returned in the response" responses: '500': description: "The server can not handle your request. Check your connectivity with the rest of the cluster." diff --git a/doc/api/garage-admin-v2.html b/doc/api/garage-admin-v2.html deleted file mode 100644 index b079e760..00000000 --- a/doc/api/garage-admin-v2.html +++ /dev/null @@ -1,24 +0,0 @@ - - - - Garage administration API v2 - - - - - - - - - - - - - diff --git a/doc/api/garage-admin-v2.json b/doc/api/garage-admin-v2.json deleted file mode 100644 index b8d809bf..00000000 --- a/doc/api/garage-admin-v2.json +++ /dev/null @@ -1,4879 +0,0 @@ -{ - "openapi": "3.1.0", - "info": { - "title": "Garage administration API", - "description": "Administrate your Garage cluster programmatically, including status, layout, keys, buckets, and maintenance tasks.\n\n*Disclaimer: This API may change in future Garage versions. Read the changelog and upgrade your scripts before upgrading. Additionally, this specification is early stage and can contain bugs, so be careful and please report any issues on our issue tracker.*", - "contact": { - "name": "The Garage team", - "url": "https://garagehq.deuxfleurs.fr/", - "email": "garagehq@deuxfleurs.fr" - }, - "license": { - "name": "AGPL-3.0", - "identifier": "AGPL-3.0" - }, - "version": "v2.3.0" - }, - "servers": [ - { - "url": "http://localhost:3903/", - "description": "A local server" - } - ], - "paths": { - "/check": { - "get": { - "tags": [ - "Special endpoints" - ], - "description": "\nStatic website domain name check. Checks whether a bucket is configured to serve\na static website for the requested domain. This is used by reverse proxies such\nas Caddy or Tricot, to avoid requesting TLS certificates for domain names that\ndo not correspond to an actual website.\n ", - "operationId": "CheckDomain", - "parameters": [ - { - "name": "domain", - "in": "query", - "description": "The domain name to check for", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "The domain name redirects to a static website bucket" - }, - "400": { - "description": "No static website bucket exists for this domain" - } - }, - "security": [ - {} - ] - } - }, - "/health": { - "get": { - "tags": [ - "Special endpoints" - ], - "description": "\nCheck cluster health. The status code returned by this function indicates\nwhether this Garage daemon can answer API requests.\nGarage will return `200 OK` even if some storage nodes are disconnected,\nas long as it is able to have a quorum of nodes for read and write operations.\n ", - "operationId": "Health", - "responses": { - "200": { - "description": "Garage is able to answer requests" - }, - "503": { - "description": "This Garage daemon is not able to handle requests" - } - }, - "security": [ - {} - ] - } - }, - "/metrics": { - "get": { - "tags": [ - "Special endpoints" - ], - "description": "Prometheus metrics endpoint", - "operationId": "Metrics", - "responses": { - "200": { - "description": "Garage daemon metrics exported in Prometheus format" - } - }, - "security": [ - {}, - { - "bearerAuth": [] - } - ] - } - }, - "/v2/AddBucketAlias": { - "post": { - "tags": [ - "Bucket alias" - ], - "description": "Add an alias for the target bucket. This can be either a global or a local alias, depending on which fields are specified.", - "operationId": "AddBucketAlias", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/BucketAliasEnum" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Returns exhaustive information about the bucket", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/AddBucketAliasResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/AllowBucketKey": { - "post": { - "tags": [ - "Permission" - ], - "description": "\n⚠️ **DISCLAIMER**: Garage's developers are aware that this endpoint has an unconventional semantic. Be extra careful when implementing it, its behavior is not obvious.\n\nAllows a key to do read/write/owner operations on a bucket.\n\nFlags in permissions which have the value true will be activated. Other flags will remain unchanged (ie. they will keep their internal value).\n\nFor example, if you set read to true, the key will be allowed to read the bucket.\nIf you set it to false, the key will keeps its previous read permission.\nIf you want to disallow read for the key, check the DenyBucketKey operation.\n ", - "operationId": "AllowBucketKey", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/AllowBucketKeyRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Returns exhaustive information about the bucket", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/AllowBucketKeyResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/ApplyClusterLayout": { - "post": { - "tags": [ - "Cluster layout" - ], - "description": "\nApplies to the cluster the layout changes currently registered as staged layout changes.\n\n*Note: do not try to parse the `message` field of the response, it is given as an array of string specifically because its format is not stable.*\n ", - "operationId": "ApplyClusterLayout", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ApplyClusterLayoutRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "The updated cluster layout has been applied in the cluster", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ApplyClusterLayoutResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/CleanupIncompleteUploads": { - "post": { - "tags": [ - "Bucket" - ], - "description": "Removes all incomplete multipart uploads that are older than the specified number of seconds.", - "operationId": "CleanupIncompleteUploads", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CleanupIncompleteUploadsRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "The bucket was cleaned up successfully", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CleanupIncompleteUploadsResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/ClusterLayoutSkipDeadNodes": { - "post": { - "tags": [ - "Cluster layout" - ], - "description": "Force progress in layout update trackers", - "operationId": "ClusterLayoutSkipDeadNodes", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClusterLayoutSkipDeadNodesRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Request has been taken into account", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClusterLayoutSkipDeadNodesResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/ConnectClusterNodes": { - "post": { - "tags": [ - "Cluster" - ], - "description": "Instructs this Garage node to connect to other Garage nodes at specified `@`. `node_id` is generated automatically on node start.", - "operationId": "ConnectClusterNodes", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ConnectClusterNodesRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "The request has been handled correctly but it does not mean that all connection requests succeeded; some might have fail, you need to check the body!", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ConnectClusterNodesResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/CreateAdminToken": { - "post": { - "tags": [ - "Admin API token" - ], - "description": "Creates a new admin API token", - "operationId": "CreateAdminToken", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateAdminTokenRequestBody" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Admin token has been created", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateAdminTokenResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/CreateBucket": { - "post": { - "tags": [ - "Bucket" - ], - "description": "\nCreates a new bucket, either with a global alias, a local one, or no alias at all.\nTechnically, you can also specify both `globalAlias` and `localAlias` and that would create two aliases.\n ", - "operationId": "CreateBucket", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateBucketRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Returns exhaustive information about the bucket", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateBucketResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/CreateKey": { - "post": { - "tags": [ - "Access key" - ], - "description": "Creates a new API access key.", - "operationId": "CreateKey", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateKeyRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Access key has been created", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateKeyResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/CreateMetadataSnapshot": { - "post": { - "tags": [ - "Node" - ], - "description": "\nInstruct one or several nodes to take a snapshot of their metadata databases.\n ", - "operationId": "CreateMetadataSnapshot", - "parameters": [ - { - "name": "node", - "in": "query", - "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Responses from individual cluster nodes", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MultiResponse_LocalCreateMetadataSnapshotResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/DeleteAdminToken": { - "post": { - "tags": [ - "Admin API token" - ], - "description": "Delete an admin API token from the cluster, revoking all its permissions.", - "operationId": "DeleteAdminToken", - "parameters": [ - { - "name": "id", - "in": "query", - "description": "Admin API token ID", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Admin token has been deleted" - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/DeleteBucket": { - "post": { - "tags": [ - "Bucket" - ], - "description": "\nDeletes a storage bucket. A bucket cannot be deleted if it is not empty.\n\n**Warning:** this will delete all aliases associated with the bucket!\n ", - "operationId": "DeleteBucket", - "parameters": [ - { - "name": "id", - "in": "query", - "description": "ID of the bucket to delete", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Bucket has been deleted" - }, - "400": { - "description": "Bucket is not empty" - }, - "404": { - "description": "Bucket not found" - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/DeleteKey": { - "post": { - "tags": [ - "Access key" - ], - "description": "Delete a key from the cluster. Its access will be removed from all the buckets. Buckets are not automatically deleted and can be dangling. You should manually delete them before. ", - "operationId": "DeleteKey", - "parameters": [ - { - "name": "id", - "in": "query", - "description": "Access key ID", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Access key has been deleted" - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/DenyBucketKey": { - "post": { - "tags": [ - "Permission" - ], - "description": "\n⚠️ **DISCLAIMER**: Garage's developers are aware that this endpoint has an unconventional semantic. Be extra careful when implementing it, its behavior is not obvious.\n\nDenies a key from doing read/write/owner operations on a bucket.\n\nFlags in permissions which have the value true will be deactivated. Other flags will remain unchanged.\n\nFor example, if you set read to true, the key will be denied from reading.\nIf you set read to false, the key will keep its previous permissions.\nIf you want the key to have the reading permission, check the AllowBucketKey operation.\n ", - "operationId": "DenyBucketKey", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DenyBucketKeyRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Returns exhaustive information about the bucket", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DenyBucketKeyResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/GetAdminTokenInfo": { - "get": { - "tags": [ - "Admin API token" - ], - "description": "\nReturn information about a specific admin API token.\nYou can search by specifying the exact token identifier (`id`) or by specifying a pattern (`search`).\n ", - "operationId": "GetAdminTokenInfo", - "parameters": [ - { - "name": "id", - "in": "query", - "description": "Admin API token ID", - "required": false, - "schema": { - "type": "string" - } - }, - { - "name": "search", - "in": "query", - "description": "Partial token ID or name to search for", - "required": false, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Information about the admin token", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetAdminTokenInfoResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/GetBlockInfo": { - "post": { - "tags": [ - "Block" - ], - "description": "\nGet detailed information about a data block stored on a Garage node, including all object versions and in-progress multipart uploads that contain a reference to this block.\n ", - "operationId": "GetBlockInfo", - "parameters": [ - { - "name": "node", - "in": "query", - "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/LocalGetBlockInfoRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Detailed block information", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MultiResponse_LocalGetBlockInfoResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/GetBucketInfo": { - "get": { - "tags": [ - "Bucket" - ], - "description": "\nGiven a bucket identifier (`id`) or a global alias (`alias`), get its information.\nIt includes its aliases, its web configuration, keys that have some permissions\non it, some statistics (number of objects, size), number of dangling multipart uploads,\nand its quotas (if any).\n ", - "operationId": "GetBucketInfo", - "parameters": [ - { - "name": "id", - "in": "query", - "description": "Exact bucket ID to look up", - "required": false, - "schema": { - "type": "string" - } - }, - { - "name": "globalAlias", - "in": "query", - "description": "Global alias of bucket to look up", - "required": false, - "schema": { - "type": "string" - } - }, - { - "name": "search", - "in": "query", - "description": "Partial ID or alias to search for", - "required": false, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Returns exhaustive information about the bucket", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetBucketInfoResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/GetClusterHealth": { - "get": { - "tags": [ - "Cluster" - ], - "description": "Returns the global status of the cluster, the number of connected nodes (over the number of known ones), the number of healthy storage nodes (over the declared ones), and the number of healthy partitions (over the total).", - "operationId": "GetClusterHealth", - "responses": { - "200": { - "description": "Cluster health report", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetClusterHealthResponse" - } - } - } - } - } - } - }, - "/v2/GetClusterLayout": { - "get": { - "tags": [ - "Cluster layout" - ], - "description": "\nReturns the cluster's current layout, including:\n\n- Currently configured cluster layout\n- Staged changes to the cluster layout\n\n*Capacity is given in bytes*\n ", - "operationId": "GetClusterLayout", - "responses": { - "200": { - "description": "Current cluster layout", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetClusterLayoutResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/GetClusterLayoutHistory": { - "get": { - "tags": [ - "Cluster layout" - ], - "description": "\nReturns the history of layouts in the cluster\n ", - "operationId": "GetClusterLayoutHistory", - "responses": { - "200": { - "description": "Cluster layout history", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetClusterLayoutHistoryResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/GetClusterStatistics": { - "get": { - "tags": [ - "Cluster" - ], - "description": "\nFetch global cluster statistics.\n\n*Note: do not try to parse the `freeform` field of the response, it is given as a string specifically because its format is not stable.*\n ", - "operationId": "GetClusterStatistics", - "responses": { - "200": { - "description": "Global cluster statistics", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetClusterStatisticsResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/GetClusterStatus": { - "get": { - "tags": [ - "Cluster" - ], - "description": "\nReturns the cluster's current status, including:\n\n- ID of the node being queried and its version of the Garage daemon\n- Live nodes\n- Currently configured cluster layout\n- Staged changes to the cluster layout\n\n*Capacity is given in bytes*\n ", - "operationId": "GetClusterStatus", - "responses": { - "200": { - "description": "Cluster status report", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetClusterStatusResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/GetCurrentAdminTokenInfo": { - "get": { - "tags": [ - "Admin API token" - ], - "description": "\nReturn information about the calling admin API token.\n ", - "operationId": "GetCurrentAdminTokenInfo", - "responses": { - "200": { - "description": "Information about the admin token", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetCurrentAdminTokenInfoResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/GetKeyInfo": { - "get": { - "tags": [ - "Access key" - ], - "description": "\nReturn information about a specific key like its identifiers, its permissions and buckets on which it has permissions.\nYou can search by specifying the exact key identifier (`id`) or by specifying a pattern (`search`).\n\nFor confidentiality reasons, the secret key is not returned by default: you must pass the `showSecretKey` query parameter to get it.\n ", - "operationId": "GetKeyInfo", - "parameters": [ - { - "name": "id", - "in": "query", - "description": "Access key ID", - "required": false, - "schema": { - "type": "string" - } - }, - { - "name": "search", - "in": "query", - "description": "Partial key ID or name to search for", - "required": false, - "schema": { - "type": "string" - } - }, - { - "name": "showSecretKey", - "in": "query", - "description": "Whether to return the secret access key", - "required": false, - "schema": { - "type": "boolean" - } - } - ], - "responses": { - "200": { - "description": "Information about the access key", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetKeyInfoResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/GetNodeInfo": { - "get": { - "tags": [ - "Node" - ], - "description": "\nReturn information about the Garage daemon running on one or several nodes.\n ", - "operationId": "GetNodeInfo", - "parameters": [ - { - "name": "node", - "in": "query", - "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Responses from individual cluster nodes", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MultiResponse_LocalGetNodeInfoResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/GetNodeStatistics": { - "get": { - "tags": [ - "Node" - ], - "description": "\nFetch statistics for one or several Garage nodes.\n\n*Note: do not try to parse the `freeform` field of the response, it is given as a string specifically because its format is not stable.*\n ", - "operationId": "GetNodeStatistics", - "parameters": [ - { - "name": "node", - "in": "query", - "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Responses from individual cluster nodes", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MultiResponse_LocalGetNodeStatisticsResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/GetWorkerInfo": { - "post": { - "tags": [ - "Worker" - ], - "description": "\nGet information about the specified background worker on one or several cluster nodes.\n ", - "operationId": "GetWorkerInfo", - "parameters": [ - { - "name": "node", - "in": "query", - "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/LocalGetWorkerInfoRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Responses from individual cluster nodes", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MultiResponse_LocalGetWorkerInfoResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/GetWorkerVariable": { - "post": { - "tags": [ - "Worker" - ], - "description": "\nFetch values of one or several worker variables, from one or several cluster nodes.\n ", - "operationId": "GetWorkerVariable", - "parameters": [ - { - "name": "node", - "in": "query", - "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/LocalGetWorkerVariableRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Responses from individual cluster nodes", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MultiResponse_LocalGetWorkerVariableResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/ImportKey": { - "post": { - "tags": [ - "Access key" - ], - "description": "\nImports an existing API key. This feature must only be used for migrations and backup restore.\n\n**Do not use it to generate custom key identifiers or you will break your Garage cluster.**\n ", - "operationId": "ImportKey", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ImportKeyRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Access key has been imported", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ImportKeyResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/InspectObject": { - "get": { - "tags": [ - "Bucket" - ], - "description": "\nReturns detailed information about an object in a bucket, including its internal state in Garage.\n\nThis API call can be used to list the data blocks referenced by an object,\nas well as to view metadata associated to the object.\n\nThis call may return a list of more than one version for the object, for instance in the\ncase where there is a currently stored version of the object, and a newer version whose\nupload is in progress and not yet finished.\n ", - "operationId": "InspectObject", - "parameters": [ - { - "name": "bucketId", - "in": "query", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "key", - "in": "query", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Returns exhaustive information about the object", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/InspectObjectResponse" - } - } - } - }, - "404": { - "description": "Object not found" - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/LaunchRepairOperation": { - "post": { - "tags": [ - "Node" - ], - "description": "\nLaunch a repair operation on one or several cluster nodes.\n ", - "operationId": "LaunchRepairOperation", - "parameters": [ - { - "name": "node", - "in": "query", - "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/LocalLaunchRepairOperationRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Responses from individual cluster nodes", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MultiResponse_LocalLaunchRepairOperationResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/ListAdminTokens": { - "get": { - "tags": [ - "Admin API token" - ], - "description": "Returns all admin API tokens in the cluster.", - "operationId": "ListAdminTokens", - "responses": { - "200": { - "description": "Returns info about all admin API tokens", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListAdminTokensResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/ListBlockErrors": { - "get": { - "tags": [ - "Block" - ], - "description": "\nList data blocks that are currently in an errored state on one or several Garage nodes.\n ", - "operationId": "ListBlockErrors", - "parameters": [ - { - "name": "node", - "in": "query", - "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Responses from individual cluster nodes", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MultiResponse_LocalListBlockErrorsResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/ListBuckets": { - "get": { - "tags": [ - "Bucket" - ], - "description": "List all the buckets on the cluster with their UUID and their global and local aliases.", - "operationId": "ListBuckets", - "responses": { - "200": { - "description": "Returns the UUID of all the buckets and all their aliases", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListBucketsResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/ListKeys": { - "get": { - "tags": [ - "Access key" - ], - "description": "Returns all API access keys in the cluster.", - "operationId": "ListKeys", - "responses": { - "200": { - "description": "Returns the key identifier (aka `AWS_ACCESS_KEY_ID`) and its associated, human friendly, name if any (otherwise return an empty string)", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListKeysResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/ListWorkers": { - "post": { - "tags": [ - "Worker" - ], - "description": "\nList background workers currently running on one or several cluster nodes.\n ", - "operationId": "ListWorkers", - "parameters": [ - { - "name": "node", - "in": "query", - "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/LocalListWorkersRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Responses from individual cluster nodes", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MultiResponse_LocalListWorkersResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/PreviewClusterLayoutChanges": { - "post": { - "tags": [ - "Cluster layout" - ], - "description": "\nComputes a new layout taking into account the staged parameters, and returns it with detailed statistics. The new layout is not applied in the cluster.\n\n*Note: do not try to parse the `message` field of the response, it is given as an array of string specifically because its format is not stable.*\n ", - "operationId": "PreviewClusterLayoutChanges", - "responses": { - "200": { - "description": "Information about the new layout", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PreviewClusterLayoutChangesResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/PurgeBlocks": { - "post": { - "tags": [ - "Block" - ], - "description": "\nPurge references to one or several missing data blocks.\n\nThis will remove all objects and in-progress multipart uploads that contain the specified data block(s). The objects will be permanently deleted from the buckets in which they appear. Use with caution.\n ", - "operationId": "PurgeBlocks", - "parameters": [ - { - "name": "node", - "in": "query", - "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/LocalPurgeBlocksRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Responses from individual cluster nodes", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MultiResponse_LocalPurgeBlocksResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/RemoveBucketAlias": { - "post": { - "tags": [ - "Bucket alias" - ], - "description": "Remove an alias for the target bucket. This can be either a global or a local alias, depending on which fields are specified.", - "operationId": "RemoveBucketAlias", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/BucketAliasEnum" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Returns exhaustive information about the bucket", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RemoveBucketAliasResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/RetryBlockResync": { - "post": { - "tags": [ - "Block" - ], - "description": "\nInstruct Garage node(s) to retry the resynchronization of one or several missing data block(s).\n ", - "operationId": "RetryBlockResync", - "parameters": [ - { - "name": "node", - "in": "query", - "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/LocalRetryBlockResyncRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Responses from individual cluster nodes", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MultiResponse_LocalRetryBlockResyncResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/RevertClusterLayout": { - "post": { - "tags": [ - "Cluster layout" - ], - "description": "Clear staged layout changes", - "operationId": "RevertClusterLayout", - "responses": { - "200": { - "description": "All pending changes to the cluster layout have been erased", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RevertClusterLayoutResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/SetWorkerVariable": { - "post": { - "tags": [ - "Worker" - ], - "description": "\nSet the value for a worker variable, on one or several cluster nodes.\n ", - "operationId": "SetWorkerVariable", - "parameters": [ - { - "name": "node", - "in": "query", - "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/LocalSetWorkerVariableRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Responses from individual cluster nodes", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MultiResponse_LocalSetWorkerVariableResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/UpdateAdminToken": { - "post": { - "tags": [ - "Admin API token" - ], - "description": "\nUpdates information about the specified admin API token.\n ", - "operationId": "UpdateAdminToken", - "parameters": [ - { - "name": "id", - "in": "query", - "description": "Admin API token ID", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateAdminTokenRequestBody" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Admin token has been updated", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateAdminTokenResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/UpdateBucket": { - "post": { - "tags": [ - "Bucket" - ], - "description": "\nAll fields (`websiteAccess` and `quotas`) are optional.\nIf they are present, the corresponding modifications are applied to the bucket, otherwise nothing is changed.\n\nIn `websiteAccess`: if `enabled` is `true`, `indexDocument` must be specified.\nThe field `errorDocument` is optional, if no error document is set a generic\nerror message is displayed when errors happen. Conversely, if `enabled` is\n`false`, neither `indexDocument` nor `errorDocument` must be specified.\n\nIn `quotas`: new values of `maxSize` and `maxObjects` must both be specified, or set to `null`\nto remove the quotas. An absent value will be considered the same as a `null`. It is not possible\nto change only one of the two quotas.\n ", - "operationId": "UpdateBucket", - "parameters": [ - { - "name": "id", - "in": "query", - "description": "ID of the bucket to update", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateBucketRequestBody" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Bucket has been updated", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateBucketResponse" - } - } - } - }, - "404": { - "description": "Bucket not found" - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/UpdateClusterLayout": { - "post": { - "tags": [ - "Cluster layout" - ], - "description": "\nSend modifications to the cluster layout. These modifications will be included in the staged role changes, visible in subsequent calls of `GET /GetClusterHealth`. Once the set of staged changes is satisfactory, the user may call `POST /ApplyClusterLayout` to apply the changed changes, or `POST /RevertClusterLayout` to clear all of the staged changes in the layout.\n\nSetting the capacity to `null` will configure the node as a gateway.\nOtherwise, capacity must be now set in bytes (before Garage 0.9 it was arbitrary weights).\nFor example to declare 100GB, you must set `capacity: 100000000000`.\n\nGarage uses internally the International System of Units (SI), it assumes that 1kB = 1000 bytes, and displays storage as kB, MB, GB (and not KiB, MiB, GiB that assume 1KiB = 1024 bytes).\n ", - "operationId": "UpdateClusterLayout", - "requestBody": { - "description": "\nTo add a new node to the layout or to change the configuration of an existing node, simply set the values you want (`zone`, `capacity`, and `tags`).\nTo remove a node, simply pass the `remove: true` field.\nThis logic is represented in OpenAPI with a 'One Of' object.\n\nContrary to the CLI that may update only a subset of the fields capacity, zone and tags, when calling this API all of these values must be specified.\n ", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateClusterLayoutRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Proposed changes have been added to the list of pending changes", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateClusterLayoutResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - }, - "/v2/UpdateKey": { - "post": { - "tags": [ - "Access key" - ], - "description": "\nUpdates information about the specified API access key.\n\n*Note: the secret key is not returned in the response, `null` is sent instead.*\n ", - "operationId": "UpdateKey", - "parameters": [ - { - "name": "id", - "in": "query", - "description": "Access key ID", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateKeyRequestBody" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Access key has been updated", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdateKeyResponse" - } - } - } - }, - "500": { - "description": "Internal server error" - } - } - } - } - }, - "components": { - "schemas": { - "AddBucketAliasResponse": { - "$ref": "#/components/schemas/GetBucketInfoResponse" - }, - "AllowBucketKeyRequest": { - "$ref": "#/components/schemas/BucketKeyPermChangeRequest" - }, - "AllowBucketKeyResponse": { - "$ref": "#/components/schemas/GetBucketInfoResponse" - }, - "ApiBucketKeyPerm": { - "type": "object", - "properties": { - "owner": { - "type": "boolean" - }, - "read": { - "type": "boolean" - }, - "write": { - "type": "boolean" - } - } - }, - "ApiBucketQuotas": { - "type": "object", - "properties": { - "maxObjects": { - "type": [ - "integer", - "null" - ], - "format": "int64", - "minimum": 0 - }, - "maxSize": { - "type": [ - "integer", - "null" - ], - "format": "int64", - "minimum": 0 - } - } - }, - "ApplyClusterLayoutRequest": { - "type": "object", - "required": [ - "version" - ], - "properties": { - "version": { - "type": "integer", - "format": "int64", - "description": "As a safety measure, the new version number of the layout must\nbe specified here", - "minimum": 0 - } - } - }, - "ApplyClusterLayoutResponse": { - "type": "object", - "required": [ - "message", - "layout" - ], - "properties": { - "layout": { - "$ref": "#/components/schemas/GetClusterLayoutResponse", - "description": "Details about the new cluster layout" - }, - "message": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Plain-text information about the layout computation\n(do not try to parse this)" - } - } - }, - "BlockError": { - "type": "object", - "required": [ - "blockHash", - "refcount", - "errorCount", - "lastTrySecsAgo", - "nextTryInSecs" - ], - "properties": { - "blockHash": { - "type": "string" - }, - "errorCount": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "lastTrySecsAgo": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "nextTryInSecs": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "refcount": { - "type": "integer", - "format": "int64", - "minimum": 0 - } - } - }, - "BlockVersion": { - "type": "object", - "required": [ - "versionId", - "refDeleted", - "versionDeleted", - "garbageCollected" - ], - "properties": { - "backlink": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/BlockVersionBacklink" - } - ] - }, - "garbageCollected": { - "type": "boolean" - }, - "refDeleted": { - "type": "boolean" - }, - "versionDeleted": { - "type": "boolean" - }, - "versionId": { - "type": "string" - } - } - }, - "BlockVersionBacklink": { - "oneOf": [ - { - "type": "object", - "required": [ - "object" - ], - "properties": { - "object": { - "type": "object", - "required": [ - "bucketId", - "key" - ], - "properties": { - "bucketId": { - "type": "string" - }, - "key": { - "type": "string" - } - } - } - } - }, - { - "type": "object", - "required": [ - "upload" - ], - "properties": { - "upload": { - "type": "object", - "required": [ - "uploadId", - "uploadDeleted", - "uploadGarbageCollected" - ], - "properties": { - "bucketId": { - "type": [ - "string", - "null" - ] - }, - "key": { - "type": [ - "string", - "null" - ] - }, - "uploadDeleted": { - "type": "boolean" - }, - "uploadGarbageCollected": { - "type": "boolean" - }, - "uploadId": { - "type": "string" - } - } - } - } - } - ] - }, - "BucketAliasEnum": { - "oneOf": [ - { - "type": "object", - "required": [ - "bucketId", - "globalAlias" - ], - "properties": { - "bucketId": { - "type": "string" - }, - "globalAlias": { - "type": "string" - } - } - }, - { - "type": "object", - "required": [ - "bucketId", - "localAlias", - "accessKeyId" - ], - "properties": { - "accessKeyId": { - "type": "string" - }, - "bucketId": { - "type": "string" - }, - "localAlias": { - "type": "string" - } - } - } - ] - }, - "BucketKeyPermChangeRequest": { - "type": "object", - "required": [ - "bucketId", - "accessKeyId", - "permissions" - ], - "properties": { - "accessKeyId": { - "type": "string" - }, - "bucketId": { - "type": "string" - }, - "permissions": { - "$ref": "#/components/schemas/ApiBucketKeyPerm" - } - } - }, - "BucketLocalAlias": { - "type": "object", - "required": [ - "accessKeyId", - "alias" - ], - "properties": { - "accessKeyId": { - "type": "string" - }, - "alias": { - "type": "string" - } - } - }, - "CleanupIncompleteUploadsRequest": { - "type": "object", - "required": [ - "bucketId", - "olderThanSecs" - ], - "properties": { - "bucketId": { - "type": "string" - }, - "olderThanSecs": { - "type": "integer", - "format": "int64", - "minimum": 0 - } - } - }, - "CleanupIncompleteUploadsResponse": { - "type": "object", - "required": [ - "uploadsDeleted" - ], - "properties": { - "uploadsDeleted": { - "type": "integer", - "format": "int64", - "minimum": 0 - } - } - }, - "ClusterLayoutSkipDeadNodesRequest": { - "type": "object", - "required": [ - "version", - "allowMissingData" - ], - "properties": { - "allowMissingData": { - "type": "boolean", - "description": "Allow the skip even if a quorum of nodes could not be found for\nthe data among the remaining nodes" - }, - "version": { - "type": "integer", - "format": "int64", - "description": "Version number of the layout to assume is currently up-to-date.\nThis will generally be the current layout version.", - "minimum": 0 - } - } - }, - "ClusterLayoutSkipDeadNodesResponse": { - "type": "object", - "required": [ - "ackUpdated", - "syncUpdated" - ], - "properties": { - "ackUpdated": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Nodes for which the ACK update tracker has been updated to `version`" - }, - "syncUpdated": { - "type": "array", - "items": { - "type": "string" - }, - "description": "If `allow_missing_data` is set,\nnodes for which the SYNC update tracker has been updated to `version`" - } - } - }, - "ClusterLayoutVersion": { - "type": "object", - "required": [ - "version", - "status", - "storageNodes", - "gatewayNodes" - ], - "properties": { - "gatewayNodes": { - "type": "integer", - "format": "int64", - "description": "Number of nodes with a gateway role in this layout version", - "minimum": 0 - }, - "status": { - "$ref": "#/components/schemas/ClusterLayoutVersionStatus", - "description": "Status of this layout version" - }, - "storageNodes": { - "type": "integer", - "format": "int64", - "description": "Number of nodes with an assigned storage capacity in this layout version", - "minimum": 0 - }, - "version": { - "type": "integer", - "format": "int64", - "description": "Version number of this layout version", - "minimum": 0 - } - } - }, - "ClusterLayoutVersionStatus": { - "type": "string", - "enum": [ - "Current", - "Draining", - "Historical" - ] - }, - "ConnectClusterNodesRequest": { - "type": "array", - "items": { - "type": "string" - } - }, - "ConnectClusterNodesResponse": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ConnectNodeResponse" - } - }, - "ConnectNodeResponse": { - "type": "object", - "required": [ - "success" - ], - "properties": { - "error": { - "type": [ - "string", - "null" - ], - "description": "An error message if Garage did not manage to connect to this node" - }, - "success": { - "type": "boolean", - "description": "`true` if Garage managed to connect to this node" - } - } - }, - "CreateAdminTokenResponse": { - "allOf": [ - { - "$ref": "#/components/schemas/GetAdminTokenInfoResponse" - }, - { - "type": "object", - "required": [ - "secretToken" - ], - "properties": { - "secretToken": { - "type": "string", - "description": "The secret bearer token. **CAUTION:** This token will be shown only\nONCE, so this value MUST be remembered somewhere, or the token\nwill be unusable." - } - } - } - ] - }, - "CreateBucketLocalAlias": { - "type": "object", - "required": [ - "accessKeyId", - "alias" - ], - "properties": { - "accessKeyId": { - "type": "string" - }, - "alias": { - "type": "string" - }, - "allow": { - "$ref": "#/components/schemas/ApiBucketKeyPerm" - } - } - }, - "CreateBucketRequest": { - "type": "object", - "properties": { - "globalAlias": { - "type": [ - "string", - "null" - ] - }, - "localAlias": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/CreateBucketLocalAlias" - } - ] - } - } - }, - "CreateBucketResponse": { - "$ref": "#/components/schemas/GetBucketInfoResponse" - }, - "CreateKeyRequest": { - "$ref": "#/components/schemas/UpdateKeyRequestBody" - }, - "CreateKeyResponse": { - "$ref": "#/components/schemas/GetKeyInfoResponse" - }, - "DenyBucketKeyRequest": { - "$ref": "#/components/schemas/BucketKeyPermChangeRequest" - }, - "DenyBucketKeyResponse": { - "$ref": "#/components/schemas/GetBucketInfoResponse" - }, - "FreeSpaceResp": { - "type": "object", - "required": [ - "available", - "total" - ], - "properties": { - "available": { - "type": "integer", - "format": "int64", - "description": "Number of bytes available", - "minimum": 0 - }, - "total": { - "type": "integer", - "format": "int64", - "description": "Total number of bytes", - "minimum": 0 - } - } - }, - "GetAdminTokenInfoResponse": { - "type": "object", - "required": [ - "name", - "expired", - "scope" - ], - "properties": { - "created": { - "type": [ - "string", - "null" - ], - "format": "date-time", - "description": "Creation date" - }, - "expiration": { - "type": [ - "string", - "null" - ], - "format": "date-time", - "description": "Expiration time and date, formatted according to RFC 3339" - }, - "expired": { - "type": "boolean", - "description": "Whether this admin token is expired already" - }, - "id": { - "type": [ - "string", - "null" - ], - "description": "Identifier of the admin token (which is also a prefix of the full bearer token)" - }, - "name": { - "type": "string", - "description": "Name of the admin API token" - }, - "scope": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Scope of the admin API token, a list of admin endpoint names (such as\n`GetClusterStatus`, etc), or the special value `*` to allow all\nadmin endpoints" - } - } - }, - "GetBucketInfoKey": { - "type": "object", - "required": [ - "accessKeyId", - "name", - "permissions", - "bucketLocalAliases" - ], - "properties": { - "accessKeyId": { - "type": "string" - }, - "bucketLocalAliases": { - "type": "array", - "items": { - "type": "string" - } - }, - "name": { - "type": "string" - }, - "permissions": { - "$ref": "#/components/schemas/ApiBucketKeyPerm" - } - } - }, - "GetBucketInfoResponse": { - "type": "object", - "required": [ - "id", - "created", - "globalAliases", - "websiteAccess", - "keys", - "objects", - "bytes", - "unfinishedUploads", - "unfinishedMultipartUploads", - "unfinishedMultipartUploadParts", - "unfinishedMultipartUploadBytes", - "quotas" - ], - "properties": { - "bytes": { - "type": "integer", - "format": "int64", - "description": "Total number of bytes used by objects in this bucket" - }, - "corsRules": { - "type": [ - "array", - "null" - ], - "items": { - "$ref": "#/components/schemas/cors.Rule" - }, - "description": "CORS rules for this bucket" - }, - "created": { - "type": "string", - "format": "date-time", - "description": "Bucket creation date" - }, - "globalAliases": { - "type": "array", - "items": { - "type": "string" - }, - "description": "List of global aliases for this bucket" - }, - "id": { - "type": "string", - "description": "Identifier of the bucket" - }, - "keys": { - "type": "array", - "items": { - "$ref": "#/components/schemas/GetBucketInfoKey" - }, - "description": "List of access keys that have permissions granted on this bucket" - }, - "lifecycleRules": { - "type": [ - "array", - "null" - ], - "items": { - "$ref": "#/components/schemas/lifecycle.Rule" - }, - "description": "Object lifecycle rules for this bucket" - }, - "objects": { - "type": "integer", - "format": "int64", - "description": "Number of objects in this bucket" - }, - "quotas": { - "$ref": "#/components/schemas/ApiBucketQuotas", - "description": "Quotas that apply to this bucket" - }, - "unfinishedMultipartUploadBytes": { - "type": "integer", - "format": "int64", - "description": "Total number of bytes used by unfinished multipart uploads in this bucket" - }, - "unfinishedMultipartUploadParts": { - "type": "integer", - "format": "int64", - "description": "Number of parts in unfinished multipart uploads in this bucket" - }, - "unfinishedMultipartUploads": { - "type": "integer", - "format": "int64", - "description": "Number of unfinished multipart uploads in this bucket" - }, - "unfinishedUploads": { - "type": "integer", - "format": "int64", - "description": "Number of unfinished uploads in this bucket" - }, - "websiteAccess": { - "type": "boolean", - "description": "Whether website access is enabled for this bucket" - }, - "websiteConfig": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/GetBucketInfoWebsiteResponse", - "description": "Website configuration for this bucket" - } - ] - } - } - }, - "GetBucketInfoWebsiteResponse": { - "type": "object", - "required": [ - "indexDocument" - ], - "properties": { - "errorDocument": { - "type": [ - "string", - "null" - ] - }, - "indexDocument": { - "type": "string" - }, - "routingRules": { - "type": [ - "array", - "null" - ], - "items": { - "$ref": "#/components/schemas/website.RoutingRule" - } - } - } - }, - "GetClusterHealthResponse": { - "type": "object", - "required": [ - "status", - "knownNodes", - "connectedNodes", - "storageNodes", - "storageNodesUp", - "partitions", - "partitionsQuorum", - "partitionsAllOk" - ], - "properties": { - "connectedNodes": { - "type": "integer", - "description": "the number of nodes this Garage node currently has an open connection to", - "minimum": 0 - }, - "knownNodes": { - "type": "integer", - "description": "the number of nodes this Garage node has had a TCP connection to since the daemon started", - "minimum": 0 - }, - "partitions": { - "type": "integer", - "description": "the total number of partitions of the data (currently always 256)", - "minimum": 0 - }, - "partitionsAllOk": { - "type": "integer", - "description": "the number of partitions for which we are connected to all storage nodes responsible of storing it", - "minimum": 0 - }, - "partitionsQuorum": { - "type": "integer", - "description": "the number of partitions for which a quorum of write nodes is available", - "minimum": 0 - }, - "status": { - "type": "string", - "description": "One of `healthy`, `degraded` or `unavailable`:\n- `healthy`: Garage node is connected to all storage nodes\n- `degraded`: Garage node is not connected to all storage nodes, but a quorum of write nodes is available for all partitions\n- `unavailable`: a quorum of write nodes is not available for some partitions" - }, - "storageNodes": { - "type": "integer", - "description": "the number of storage nodes currently registered in the cluster layout", - "minimum": 0 - }, - "storageNodesUp": { - "type": "integer", - "description": "the number of storage nodes to which a connection is currently open", - "minimum": 0 - } - } - }, - "GetClusterLayoutHistoryResponse": { - "type": "object", - "required": [ - "currentVersion", - "minAck", - "versions" - ], - "properties": { - "currentVersion": { - "type": "integer", - "format": "int64", - "description": "The current version number of the cluster layout", - "minimum": 0 - }, - "minAck": { - "type": "integer", - "format": "int64", - "description": "All nodes in the cluster are aware of layout versions up to\nthis version number (at least)", - "minimum": 0 - }, - "updateTrackers": { - "type": [ - "object", - "null" - ], - "description": "Detailed update trackers for nodes (see\n`https://garagehq.deuxfleurs.fr/blog/2023-12-preserving-read-after-write-consistency/`)", - "additionalProperties": { - "$ref": "#/components/schemas/NodeUpdateTrackers" - }, - "propertyNames": { - "type": "string" - } - }, - "versions": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ClusterLayoutVersion" - }, - "description": "Layout version history" - } - } - }, - "GetClusterLayoutResponse": { - "type": "object", - "required": [ - "version", - "roles", - "parameters", - "partitionSize", - "stagedRoleChanges" - ], - "properties": { - "parameters": { - "$ref": "#/components/schemas/LayoutParameters", - "description": "Layout parameters used when the current layout was computed" - }, - "partitionSize": { - "type": "integer", - "format": "int64", - "description": "The size, in bytes, of one Garage partition (= a shard)", - "minimum": 0 - }, - "roles": { - "type": "array", - "items": { - "$ref": "#/components/schemas/LayoutNodeRole" - }, - "description": "List of nodes that currently have a role in the cluster layout" - }, - "stagedParameters": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/LayoutParameters", - "description": "Layout parameters to use when computing the next version of\nthe cluster layout" - } - ] - }, - "stagedRoleChanges": { - "type": "array", - "items": { - "$ref": "#/components/schemas/NodeRoleChange" - }, - "description": "List of nodes that will have a new role or whose role will be\nremoved in the next version of the cluster layout" - }, - "version": { - "type": "integer", - "format": "int64", - "description": "The current version number of the cluster layout", - "minimum": 0 - } - } - }, - "GetClusterStatisticsResponse": { - "type": "object", - "required": [ - "freeform" - ], - "properties": { - "bucketCount": { - "type": [ - "integer", - "null" - ], - "format": "int64", - "description": "number of buckets in the cluster", - "minimum": 0 - }, - "dataAvail": { - "type": [ - "integer", - "null" - ], - "format": "int64", - "description": "available storage space for object data in the entire cluster, in bytes", - "minimum": 0 - }, - "freeform": { - "type": "string", - "description": "cluster statistics as a free-form string, kept for compatibility with nodes\nrunning older v2.x versions of garage" - }, - "incompleteAvailInfo": { - "type": [ - "boolean", - "null" - ], - "description": "true if the available storage space statistics are imprecise due to missing\ninformation of disconnected nodes. When this is the case, the actual\nspace available in the cluster might be lower than the reported values." - }, - "metadataAvail": { - "type": [ - "integer", - "null" - ], - "format": "int64", - "description": "available storage space for object metadata in the entire cluster, in bytes", - "minimum": 0 - }, - "totalObjectBytes": { - "type": [ - "integer", - "null" - ], - "format": "int64", - "description": "total size of objects stored in all buckets, before compression, deduplication and\nreplication (this is NOT equivalent to actual disk usage in the cluster)", - "minimum": 0 - }, - "totalObjectCount": { - "type": [ - "integer", - "null" - ], - "format": "int64", - "description": "total number of objects stored in all buckets", - "minimum": 0 - } - } - }, - "GetClusterStatusResponse": { - "type": "object", - "required": [ - "layoutVersion", - "nodes" - ], - "properties": { - "layoutVersion": { - "type": "integer", - "format": "int64", - "description": "Current version number of the cluster layout", - "minimum": 0 - }, - "nodes": { - "type": "array", - "items": { - "$ref": "#/components/schemas/NodeResp" - }, - "description": "List of nodes that are either currently connected, part of the\ncurrent cluster layout, or part of an older cluster layout that\nis still active in the cluster (being drained)." - } - } - }, - "GetCurrentAdminTokenInfoResponse": { - "$ref": "#/components/schemas/GetAdminTokenInfoResponse" - }, - "GetKeyInfoResponse": { - "type": "object", - "required": [ - "accessKeyId", - "name", - "expired", - "permissions", - "buckets" - ], - "properties": { - "accessKeyId": { - "type": "string" - }, - "buckets": { - "type": "array", - "items": { - "$ref": "#/components/schemas/KeyInfoBucketResponse" - } - }, - "created": { - "type": [ - "string", - "null" - ], - "format": "date-time" - }, - "expiration": { - "type": [ - "string", - "null" - ], - "format": "date-time" - }, - "expired": { - "type": "boolean" - }, - "name": { - "type": "string" - }, - "permissions": { - "$ref": "#/components/schemas/KeyPerm" - }, - "secretAccessKey": { - "type": [ - "string", - "null" - ] - } - } - }, - "ImportKeyRequest": { - "type": "object", - "required": [ - "accessKeyId", - "secretAccessKey" - ], - "properties": { - "accessKeyId": { - "type": "string" - }, - "name": { - "type": [ - "string", - "null" - ] - }, - "secretAccessKey": { - "type": "string" - } - } - }, - "ImportKeyResponse": { - "$ref": "#/components/schemas/GetKeyInfoResponse" - }, - "InspectObjectBlock": { - "type": "object", - "required": [ - "partNumber", - "offset", - "hash", - "size" - ], - "properties": { - "hash": { - "type": "string", - "description": "Hash (blake2 sum) of the block's data" - }, - "offset": { - "type": "integer", - "format": "int64", - "description": "Offset of this block within the part", - "minimum": 0 - }, - "partNumber": { - "type": "integer", - "format": "int64", - "description": "Part number of the part containing this block, for multipart uploads", - "minimum": 0 - }, - "size": { - "type": "integer", - "format": "int64", - "description": "Length of the blocks's data", - "minimum": 0 - } - } - }, - "InspectObjectResponse": { - "type": "object", - "required": [ - "bucketId", - "key", - "versions" - ], - "properties": { - "bucketId": { - "type": "string", - "description": "ID of the bucket containing the inspected object" - }, - "key": { - "type": "string", - "description": "Key of the inspected object" - }, - "versions": { - "type": "array", - "items": { - "$ref": "#/components/schemas/InspectObjectVersion" - }, - "description": "List of versions currently stored for this object" - } - } - }, - "InspectObjectVersion": { - "type": "object", - "required": [ - "uuid", - "timestamp", - "encrypted", - "uploading", - "aborted", - "deleteMarker", - "inline" - ], - "properties": { - "aborted": { - "type": "boolean", - "description": "Whether this is an aborted upload" - }, - "blocks": { - "type": "array", - "items": { - "$ref": "#/components/schemas/InspectObjectBlock" - }, - "description": "List of data blocks for this object version" - }, - "deleteMarker": { - "type": "boolean", - "description": "Whether this version is a delete marker (a tombstone indicating that a previous version of\nthe object has been deleted)" - }, - "encrypted": { - "type": "boolean", - "description": "Whether this object version was created with SSE-C encryption" - }, - "etag": { - "type": [ - "string", - "null" - ], - "description": "Etag of this object version" - }, - "headers": { - "type": "array", - "items": { - "type": "array", - "items": false, - "prefixItems": [ - { - "type": "string" - }, - { - "type": "string" - } - ] - }, - "description": "Metadata (HTTP headers) associated with this object version" - }, - "inline": { - "type": "boolean", - "description": "Whether the object's data is stored inline (for small objects)" - }, - "size": { - "type": [ - "integer", - "null" - ], - "format": "int64", - "description": "Size of the object, in bytes", - "minimum": 0 - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "Creation timestamp of this object version" - }, - "uploading": { - "type": "boolean", - "description": "Whether this object version is still uploading" - }, - "uuid": { - "type": "string", - "description": "Version ID" - } - } - }, - "KeyInfoBucketResponse": { - "type": "object", - "required": [ - "id", - "globalAliases", - "localAliases", - "permissions" - ], - "properties": { - "globalAliases": { - "type": "array", - "items": { - "type": "string" - } - }, - "id": { - "type": "string" - }, - "localAliases": { - "type": "array", - "items": { - "type": "string" - } - }, - "permissions": { - "$ref": "#/components/schemas/ApiBucketKeyPerm" - } - } - }, - "KeyPerm": { - "type": "object", - "properties": { - "createBucket": { - "type": "boolean" - } - } - }, - "LayoutNodeRole": { - "type": "object", - "required": [ - "id", - "zone", - "tags" - ], - "properties": { - "capacity": { - "type": [ - "integer", - "null" - ], - "format": "int64", - "description": "Capacity (in bytes) assigned by the cluster administrator,\nabsent for gateway nodes", - "minimum": 0 - }, - "id": { - "type": "string", - "description": "Identifier of the node" - }, - "storedPartitions": { - "type": [ - "integer", - "null" - ], - "format": "int64", - "description": "Number of partitions stored on this node\n(a result of the layout computation)", - "minimum": 0 - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "description": "List of tags assigned by the cluster administrator" - }, - "usableCapacity": { - "type": [ - "integer", - "null" - ], - "format": "int64", - "description": "Capacity (in bytes) that is actually usable on this node in the current\nlayout, which is equal to `stored_partitions` × `partition_size`", - "minimum": 0 - }, - "zone": { - "type": "string", - "description": "Zone name assigned by the cluster administrator" - } - } - }, - "LayoutParameters": { - "type": "object", - "required": [ - "zoneRedundancy" - ], - "properties": { - "zoneRedundancy": { - "$ref": "#/components/schemas/ZoneRedundancy", - "description": "Minimum number of zones in which a data partition must be replicated" - } - } - }, - "ListAdminTokensResponse": { - "type": "array", - "items": { - "$ref": "#/components/schemas/GetAdminTokenInfoResponse" - } - }, - "ListBucketsResponse": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ListBucketsResponseItem" - } - }, - "ListBucketsResponseItem": { - "type": "object", - "required": [ - "id", - "created", - "globalAliases", - "localAliases" - ], - "properties": { - "created": { - "type": "string", - "format": "date-time" - }, - "globalAliases": { - "type": "array", - "items": { - "type": "string" - } - }, - "id": { - "type": "string" - }, - "localAliases": { - "type": "array", - "items": { - "$ref": "#/components/schemas/BucketLocalAlias" - } - } - } - }, - "ListKeysResponse": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ListKeysResponseItem" - } - }, - "ListKeysResponseItem": { - "type": "object", - "required": [ - "id", - "name", - "expired" - ], - "properties": { - "created": { - "type": [ - "string", - "null" - ], - "format": "date-time" - }, - "expiration": { - "type": [ - "string", - "null" - ], - "format": "date-time" - }, - "expired": { - "type": "boolean" - }, - "id": { - "type": "string" - }, - "name": { - "type": "string" - } - } - }, - "LocalCreateMetadataSnapshotResponse": { - "default": null - }, - "LocalGetBlockInfoRequest": { - "type": "object", - "required": [ - "blockHash" - ], - "properties": { - "blockHash": { - "type": "string" - } - } - }, - "LocalGetBlockInfoResponse": { - "type": "object", - "required": [ - "blockHash", - "refcount", - "versions" - ], - "properties": { - "blockHash": { - "type": "string" - }, - "refcount": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "versions": { - "type": "array", - "items": { - "$ref": "#/components/schemas/BlockVersion" - } - } - } - }, - "LocalGetNodeInfoResponse": { - "type": "object", - "required": [ - "nodeId", - "garageVersion", - "rustVersion", - "dbEngine" - ], - "properties": { - "dbEngine": { - "type": "string", - "description": "database engine used for metadata" - }, - "garageFeatures": { - "type": [ - "array", - "null" - ], - "items": { - "type": "string" - }, - "description": "build-time features enabled for this garage release" - }, - "garageVersion": { - "type": "string", - "description": "garage version running on this node" - }, - "hostname": { - "type": [ - "string", - "null" - ], - "description": "hostname of this node" - }, - "nodeId": { - "type": "string" - }, - "rustVersion": { - "type": "string", - "description": "rustc version with which this garage release was compiled" - } - } - }, - "LocalGetNodeStatisticsResponse": { - "type": "object", - "required": [ - "freeform" - ], - "properties": { - "blockManagerStats": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/NodeBlockManagerStats", - "description": "block manager statistics" - } - ] - }, - "freeform": { - "type": "string", - "description": "node statistics as a free-form string, kept for compatibility with nodes\nrunning older v2.x versions of garage" - }, - "tableStats": { - "type": [ - "array", - "null" - ], - "items": { - "$ref": "#/components/schemas/NodeTableStats" - }, - "description": "metadata table statistics" - } - } - }, - "LocalGetWorkerInfoRequest": { - "type": "object", - "required": [ - "id" - ], - "properties": { - "id": { - "type": "integer", - "format": "int64", - "minimum": 0 - } - } - }, - "LocalGetWorkerInfoResponse": { - "$ref": "#/components/schemas/WorkerInfoResp" - }, - "LocalGetWorkerVariableRequest": { - "type": "object", - "properties": { - "variable": { - "type": [ - "string", - "null" - ] - } - } - }, - "LocalGetWorkerVariableResponse": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "propertyNames": { - "type": "string" - } - }, - "LocalLaunchRepairOperationRequest": { - "type": "object", - "required": [ - "repairType" - ], - "properties": { - "repairType": { - "$ref": "#/components/schemas/RepairType" - } - } - }, - "LocalLaunchRepairOperationResponse": { - "default": null - }, - "LocalListBlockErrorsResponse": { - "type": "array", - "items": { - "$ref": "#/components/schemas/BlockError" - } - }, - "LocalListWorkersRequest": { - "type": "object", - "properties": { - "busyOnly": { - "type": "boolean" - }, - "errorOnly": { - "type": "boolean" - } - } - }, - "LocalListWorkersResponse": { - "type": "array", - "items": { - "$ref": "#/components/schemas/WorkerInfoResp" - } - }, - "LocalPurgeBlocksRequest": { - "type": "array", - "items": { - "type": "string" - } - }, - "LocalPurgeBlocksResponse": { - "type": "object", - "required": [ - "blocksPurged", - "objectsDeleted", - "uploadsDeleted", - "versionsDeleted", - "blockRefsPurged" - ], - "properties": { - "blockRefsPurged": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "blocksPurged": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "objectsDeleted": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "uploadsDeleted": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "versionsDeleted": { - "type": "integer", - "format": "int64", - "minimum": 0 - } - } - }, - "LocalRetryBlockResyncRequest": { - "oneOf": [ - { - "type": "object", - "required": [ - "all" - ], - "properties": { - "all": { - "type": "boolean" - } - } - }, - { - "type": "object", - "required": [ - "blockHashes" - ], - "properties": { - "blockHashes": { - "type": "array", - "items": { - "type": "string" - } - } - } - } - ] - }, - "LocalRetryBlockResyncResponse": { - "type": "object", - "required": [ - "count" - ], - "properties": { - "count": { - "type": "integer", - "format": "int64", - "minimum": 0 - } - } - }, - "LocalSetWorkerVariableRequest": { - "type": "object", - "required": [ - "variable", - "value" - ], - "properties": { - "value": { - "type": "string" - }, - "variable": { - "type": "string" - } - } - }, - "LocalSetWorkerVariableResponse": { - "type": "object", - "required": [ - "variable", - "value" - ], - "properties": { - "value": { - "type": "string" - }, - "variable": { - "type": "string" - } - } - }, - "MultiResponse_LocalCreateMetadataSnapshotResponse": { - "type": "object", - "required": [ - "success", - "error" - ], - "properties": { - "error": { - "type": "object", - "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall", - "additionalProperties": { - "type": "string" - }, - "propertyNames": { - "type": "string" - } - }, - "success": { - "type": "object", - "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call", - "additionalProperties": { - "default": null - }, - "propertyNames": { - "type": "string" - } - } - } - }, - "MultiResponse_LocalGetBlockInfoResponse": { - "type": "object", - "required": [ - "success", - "error" - ], - "properties": { - "error": { - "type": "object", - "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall", - "additionalProperties": { - "type": "string" - }, - "propertyNames": { - "type": "string" - } - }, - "success": { - "type": "object", - "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call", - "additionalProperties": { - "type": "object", - "required": [ - "blockHash", - "refcount", - "versions" - ], - "properties": { - "blockHash": { - "type": "string" - }, - "refcount": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "versions": { - "type": "array", - "items": { - "$ref": "#/components/schemas/BlockVersion" - } - } - } - }, - "propertyNames": { - "type": "string" - } - } - } - }, - "MultiResponse_LocalGetNodeInfoResponse": { - "type": "object", - "required": [ - "success", - "error" - ], - "properties": { - "error": { - "type": "object", - "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall", - "additionalProperties": { - "type": "string" - }, - "propertyNames": { - "type": "string" - } - }, - "success": { - "type": "object", - "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call", - "additionalProperties": { - "type": "object", - "required": [ - "nodeId", - "garageVersion", - "rustVersion", - "dbEngine" - ], - "properties": { - "dbEngine": { - "type": "string", - "description": "database engine used for metadata" - }, - "garageFeatures": { - "type": [ - "array", - "null" - ], - "items": { - "type": "string" - }, - "description": "build-time features enabled for this garage release" - }, - "garageVersion": { - "type": "string", - "description": "garage version running on this node" - }, - "hostname": { - "type": [ - "string", - "null" - ], - "description": "hostname of this node" - }, - "nodeId": { - "type": "string" - }, - "rustVersion": { - "type": "string", - "description": "rustc version with which this garage release was compiled" - } - } - }, - "propertyNames": { - "type": "string" - } - } - } - }, - "MultiResponse_LocalGetNodeStatisticsResponse": { - "type": "object", - "required": [ - "success", - "error" - ], - "properties": { - "error": { - "type": "object", - "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall", - "additionalProperties": { - "type": "string" - }, - "propertyNames": { - "type": "string" - } - }, - "success": { - "type": "object", - "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call", - "additionalProperties": { - "type": "object", - "required": [ - "freeform" - ], - "properties": { - "blockManagerStats": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/NodeBlockManagerStats", - "description": "block manager statistics" - } - ] - }, - "freeform": { - "type": "string", - "description": "node statistics as a free-form string, kept for compatibility with nodes\nrunning older v2.x versions of garage" - }, - "tableStats": { - "type": [ - "array", - "null" - ], - "items": { - "$ref": "#/components/schemas/NodeTableStats" - }, - "description": "metadata table statistics" - } - } - }, - "propertyNames": { - "type": "string" - } - } - } - }, - "MultiResponse_LocalGetWorkerInfoResponse": { - "type": "object", - "required": [ - "success", - "error" - ], - "properties": { - "error": { - "type": "object", - "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall", - "additionalProperties": { - "type": "string" - }, - "propertyNames": { - "type": "string" - } - }, - "success": { - "type": "object", - "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call", - "additionalProperties": { - "$ref": "#/components/schemas/WorkerInfoResp" - }, - "propertyNames": { - "type": "string" - } - } - } - }, - "MultiResponse_LocalGetWorkerVariableResponse": { - "type": "object", - "required": [ - "success", - "error" - ], - "properties": { - "error": { - "type": "object", - "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall", - "additionalProperties": { - "type": "string" - }, - "propertyNames": { - "type": "string" - } - }, - "success": { - "type": "object", - "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call", - "additionalProperties": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "propertyNames": { - "type": "string" - } - }, - "propertyNames": { - "type": "string" - } - } - } - }, - "MultiResponse_LocalLaunchRepairOperationResponse": { - "type": "object", - "required": [ - "success", - "error" - ], - "properties": { - "error": { - "type": "object", - "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall", - "additionalProperties": { - "type": "string" - }, - "propertyNames": { - "type": "string" - } - }, - "success": { - "type": "object", - "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call", - "additionalProperties": { - "default": null - }, - "propertyNames": { - "type": "string" - } - } - } - }, - "MultiResponse_LocalListBlockErrorsResponse": { - "type": "object", - "required": [ - "success", - "error" - ], - "properties": { - "error": { - "type": "object", - "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall", - "additionalProperties": { - "type": "string" - }, - "propertyNames": { - "type": "string" - } - }, - "success": { - "type": "object", - "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call", - "additionalProperties": { - "type": "array", - "items": { - "$ref": "#/components/schemas/BlockError" - } - }, - "propertyNames": { - "type": "string" - } - } - } - }, - "MultiResponse_LocalListWorkersResponse": { - "type": "object", - "required": [ - "success", - "error" - ], - "properties": { - "error": { - "type": "object", - "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall", - "additionalProperties": { - "type": "string" - }, - "propertyNames": { - "type": "string" - } - }, - "success": { - "type": "object", - "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call", - "additionalProperties": { - "type": "array", - "items": { - "$ref": "#/components/schemas/WorkerInfoResp" - } - }, - "propertyNames": { - "type": "string" - } - } - } - }, - "MultiResponse_LocalPurgeBlocksResponse": { - "type": "object", - "required": [ - "success", - "error" - ], - "properties": { - "error": { - "type": "object", - "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall", - "additionalProperties": { - "type": "string" - }, - "propertyNames": { - "type": "string" - } - }, - "success": { - "type": "object", - "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call", - "additionalProperties": { - "type": "object", - "required": [ - "blocksPurged", - "objectsDeleted", - "uploadsDeleted", - "versionsDeleted", - "blockRefsPurged" - ], - "properties": { - "blockRefsPurged": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "blocksPurged": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "objectsDeleted": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "uploadsDeleted": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "versionsDeleted": { - "type": "integer", - "format": "int64", - "minimum": 0 - } - } - }, - "propertyNames": { - "type": "string" - } - } - } - }, - "MultiResponse_LocalRetryBlockResyncResponse": { - "type": "object", - "required": [ - "success", - "error" - ], - "properties": { - "error": { - "type": "object", - "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall", - "additionalProperties": { - "type": "string" - }, - "propertyNames": { - "type": "string" - } - }, - "success": { - "type": "object", - "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call", - "additionalProperties": { - "type": "object", - "required": [ - "count" - ], - "properties": { - "count": { - "type": "integer", - "format": "int64", - "minimum": 0 - } - } - }, - "propertyNames": { - "type": "string" - } - } - } - }, - "MultiResponse_LocalSetWorkerVariableResponse": { - "type": "object", - "required": [ - "success", - "error" - ], - "properties": { - "error": { - "type": "object", - "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall", - "additionalProperties": { - "type": "string" - }, - "propertyNames": { - "type": "string" - } - }, - "success": { - "type": "object", - "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call", - "additionalProperties": { - "type": "object", - "required": [ - "variable", - "value" - ], - "properties": { - "value": { - "type": "string" - }, - "variable": { - "type": "string" - } - } - }, - "propertyNames": { - "type": "string" - } - } - } - }, - "NodeAssignedRole": { - "type": "object", - "required": [ - "zone", - "tags" - ], - "properties": { - "capacity": { - "type": [ - "integer", - "null" - ], - "format": "int64", - "description": "Capacity (in bytes) assigned by the cluster administrator,\nabsent for gateway nodes", - "minimum": 0 - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "description": "List of tags assigned by the cluster administrator" - }, - "zone": { - "type": "string", - "description": "Zone name assigned by the cluster administrator" - } - } - }, - "NodeBlockManagerStats": { - "type": "object", - "required": [ - "rcEntries", - "resyncQueueLen", - "resyncErrors" - ], - "properties": { - "rcEntries": { - "type": "integer", - "format": "int64", - "description": "number of reference counter entries", - "minimum": 0 - }, - "resyncErrors": { - "type": "integer", - "format": "int64", - "description": "number of blocks with resync errors", - "minimum": 0 - }, - "resyncQueueLen": { - "type": "integer", - "format": "int64", - "description": "number of blocks in the resync queue", - "minimum": 0 - } - } - }, - "NodeResp": { - "type": "object", - "required": [ - "id", - "isUp", - "draining" - ], - "properties": { - "addr": { - "type": [ - "string", - "null" - ], - "description": "Socket address used by other nodes to connect to this node for RPC" - }, - "dataPartition": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/FreeSpaceResp", - "description": "Total and available space on the disk partition(s) containing the data\ndirectory(ies)" - } - ] - }, - "draining": { - "type": "boolean", - "description": "Whether this node is part of an older layout version and is draining data." - }, - "garageVersion": { - "type": [ - "string", - "null" - ], - "description": "Garage version" - }, - "hostname": { - "type": [ - "string", - "null" - ], - "description": "Hostname of the node" - }, - "id": { - "type": "string", - "description": "Full-length node identifier" - }, - "isUp": { - "type": "boolean", - "description": "Whether this node is connected in the cluster" - }, - "lastSeenSecsAgo": { - "type": [ - "integer", - "null" - ], - "format": "int64", - "description": "For disconnected nodes, the number of seconds since last contact,\nor `null` if no contact was established since Garage restarted.", - "minimum": 0 - }, - "metadataPartition": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/FreeSpaceResp", - "description": "Total and available space on the disk partition containing the\nmetadata directory" - } - ] - }, - "role": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/NodeAssignedRole", - "description": "Role assigned to this node in the current cluster layout" - } - ] - } - } - }, - "NodeRoleChange": { - "allOf": [ - { - "$ref": "#/components/schemas/NodeRoleChangeEnum" - }, - { - "type": "object", - "required": [ - "id" - ], - "properties": { - "id": { - "type": "string", - "description": "ID of the node for which this change applies" - } - } - } - ] - }, - "NodeRoleChangeEnum": { - "oneOf": [ - { - "type": "object", - "required": [ - "remove" - ], - "properties": { - "remove": { - "type": "boolean", - "description": "Set `remove` to `true` to remove the node from the layout" - } - } - }, - { - "$ref": "#/components/schemas/NodeAssignedRole" - } - ] - }, - "NodeRoleChangeRequest": { - "oneOf": [ - { - "type": "object", - "required": [ - "id", - "remove" - ], - "properties": { - "id": { - "type": "string", - "description": "ID of the node for which this change applies" - }, - "remove": { - "type": "boolean", - "description": "Set `remove` to `true` to remove the node from the layout" - } - } - }, - { - "allOf": [ - { - "$ref": "#/components/schemas/NodeAssignedRole" - }, - { - "type": "object", - "required": [ - "id" - ], - "properties": { - "id": { - "type": "string", - "description": "ID of the node for which this change applies" - } - } - } - ] - } - ] - }, - "NodeTableStats": { - "type": "object", - "required": [ - "tableName", - "items", - "merkleItems", - "merkleQueueLen", - "insertQueueLen", - "gcQueueLen" - ], - "properties": { - "gcQueueLen": { - "type": "integer", - "format": "int64", - "description": "number of items in the garbage collection queue", - "minimum": 0 - }, - "insertQueueLen": { - "type": "integer", - "format": "int64", - "description": "number of items in the remote insert queue", - "minimum": 0 - }, - "items": { - "type": "integer", - "format": "int64", - "description": "number of items stored in metadata table", - "minimum": 0 - }, - "merkleItems": { - "type": "integer", - "format": "int64", - "description": "size of the merkle tree representing all items in the table", - "minimum": 0 - }, - "merkleQueueLen": { - "type": "integer", - "format": "int64", - "description": "number of items in the merkle tree update queue", - "minimum": 0 - }, - "tableName": { - "type": "string", - "description": "name of metadata table" - } - } - }, - "NodeUpdateTrackers": { - "type": "object", - "required": [ - "ack", - "sync", - "syncAck" - ], - "properties": { - "ack": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "sync": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "syncAck": { - "type": "integer", - "format": "int64", - "minimum": 0 - } - } - }, - "PreviewClusterLayoutChangesResponse": { - "oneOf": [ - { - "type": "object", - "required": [ - "error" - ], - "properties": { - "error": { - "type": "string", - "description": "Error message indicating that the layout could not be computed\nwith the provided configuration" - } - } - }, - { - "type": "object", - "required": [ - "message", - "newLayout" - ], - "properties": { - "message": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Plain-text information about the layout computation\n(do not try to parse this)" - }, - "newLayout": { - "$ref": "#/components/schemas/GetClusterLayoutResponse", - "description": "Details about the new cluster layout" - } - } - } - ] - }, - "RemoveBucketAliasResponse": { - "$ref": "#/components/schemas/GetBucketInfoResponse" - }, - "RepairType": { - "oneOf": [ - { - "type": "string", - "enum": [ - "tables" - ] - }, - { - "type": "string", - "enum": [ - "blocks" - ] - }, - { - "type": "string", - "enum": [ - "versions" - ] - }, - { - "type": "string", - "enum": [ - "multipartUploads" - ] - }, - { - "type": "string", - "enum": [ - "blockRefs" - ] - }, - { - "type": "string", - "enum": [ - "blockRc" - ] - }, - { - "type": "string", - "enum": [ - "rebalance" - ] - }, - { - "type": "object", - "required": [ - "scrub" - ], - "properties": { - "scrub": { - "$ref": "#/components/schemas/ScrubCommand" - } - } - }, - { - "type": "string", - "enum": [ - "aliases" - ] - }, - { - "type": "string", - "enum": [ - "clearResyncQueue" - ] - } - ] - }, - "RevertClusterLayoutResponse": { - "$ref": "#/components/schemas/GetClusterLayoutResponse" - }, - "ScrubCommand": { - "type": "string", - "enum": [ - "start", - "pause", - "resume", - "cancel" - ] - }, - "UpdateAdminTokenRequestBody": { - "type": "object", - "properties": { - "expiration": { - "type": [ - "string", - "null" - ], - "format": "date-time", - "description": "Expiration time and date, formatted according to RFC 3339" - }, - "name": { - "type": [ - "string", - "null" - ], - "description": "Name of the admin API token" - }, - "neverExpires": { - "type": "boolean", - "description": "Set the admin token to never expire" - }, - "scope": { - "type": [ - "array", - "null" - ], - "items": { - "type": "string" - }, - "description": "Scope of the admin API token, a list of admin endpoint names (such as\n`GetClusterStatus`, etc), or the special value `*` to allow all\nadmin endpoints. **WARNING:** Granting a scope of `CreateAdminToken` or\n`UpdateAdminToken` trivially allows for privilege escalation, and is thus\nfunctionally equivalent to granting a scope of `*`." - } - } - }, - "UpdateAdminTokenResponse": { - "$ref": "#/components/schemas/GetAdminTokenInfoResponse" - }, - "UpdateBucketRequestBody": { - "type": "object", - "properties": { - "corsRules": { - "type": [ - "array", - "null" - ], - "items": { - "$ref": "#/components/schemas/cors.Rule" - } - }, - "lifecycleRules": { - "type": [ - "array", - "null" - ], - "items": { - "$ref": "#/components/schemas/lifecycle.Rule" - } - }, - "quotas": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/ApiBucketQuotas" - } - ] - }, - "websiteAccess": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/UpdateBucketWebsiteAccess" - } - ] - } - } - }, - "UpdateBucketResponse": { - "$ref": "#/components/schemas/GetBucketInfoResponse" - }, - "UpdateBucketWebsiteAccess": { - "type": "object", - "required": [ - "enabled" - ], - "properties": { - "enabled": { - "type": "boolean" - }, - "errorDocument": { - "type": [ - "string", - "null" - ] - }, - "indexDocument": { - "type": [ - "string", - "null" - ] - }, - "routingRules": { - "type": [ - "array", - "null" - ], - "items": { - "$ref": "#/components/schemas/website.RoutingRule" - } - } - } - }, - "UpdateClusterLayoutRequest": { - "type": "object", - "properties": { - "parameters": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/LayoutParameters", - "description": "New layout computation parameters to use" - } - ] - }, - "roles": { - "type": "array", - "items": { - "$ref": "#/components/schemas/NodeRoleChangeRequest" - }, - "description": "New node roles to assign or remove in the cluster layout" - } - } - }, - "UpdateClusterLayoutResponse": { - "$ref": "#/components/schemas/GetClusterLayoutResponse" - }, - "UpdateKeyRequestBody": { - "type": "object", - "properties": { - "allow": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/KeyPerm", - "description": "Permissions to allow for the key" - } - ] - }, - "deny": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/KeyPerm", - "description": "Permissions to deny for the key" - } - ] - }, - "expiration": { - "type": [ - "string", - "null" - ], - "format": "date-time", - "description": "Expiration time and date, formatted according to RFC 3339" - }, - "name": { - "type": [ - "string", - "null" - ], - "description": "Name of the API key" - }, - "neverExpires": { - "type": "boolean", - "description": "Set the access key to never expire" - } - } - }, - "UpdateKeyResponse": { - "$ref": "#/components/schemas/GetKeyInfoResponse" - }, - "WorkerInfoResp": { - "type": "object", - "required": [ - "id", - "name", - "state", - "errors", - "consecutiveErrors", - "freeform" - ], - "properties": { - "consecutiveErrors": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "errors": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "freeform": { - "type": "array", - "items": { - "type": "string" - } - }, - "id": { - "type": "integer", - "format": "int64", - "minimum": 0 - }, - "lastError": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/WorkerLastError" - } - ] - }, - "name": { - "type": "string" - }, - "persistentErrors": { - "type": [ - "integer", - "null" - ], - "format": "int64", - "minimum": 0 - }, - "progress": { - "type": [ - "string", - "null" - ] - }, - "queueLength": { - "type": [ - "integer", - "null" - ], - "format": "int64", - "minimum": 0 - }, - "state": { - "$ref": "#/components/schemas/WorkerStateResp" - }, - "tranquility": { - "type": [ - "integer", - "null" - ], - "format": "int32", - "minimum": 0 - } - } - }, - "WorkerLastError": { - "type": "object", - "required": [ - "message", - "secsAgo" - ], - "properties": { - "message": { - "type": "string" - }, - "secsAgo": { - "type": "integer", - "format": "int64", - "minimum": 0 - } - } - }, - "WorkerStateResp": { - "oneOf": [ - { - "type": "string", - "enum": [ - "busy" - ] - }, - { - "type": "object", - "required": [ - "throttled" - ], - "properties": { - "throttled": { - "type": "object", - "required": [ - "durationSecs" - ], - "properties": { - "durationSecs": { - "type": "number", - "format": "float" - } - } - } - } - }, - { - "type": "string", - "enum": [ - "idle" - ] - }, - { - "type": "string", - "enum": [ - "done" - ] - } - ] - }, - "ZoneRedundancy": { - "oneOf": [ - { - "type": "object", - "description": "Partitions must be replicated in at least this number of\ndistinct zones.", - "required": [ - "atLeast" - ], - "properties": { - "atLeast": { - "type": "integer", - "description": "Partitions must be replicated in at least this number of\ndistinct zones.", - "minimum": 0 - } - } - }, - { - "type": "string", - "description": "Partitions must be replicated in as many zones as possible:\nas many zones as there are replicas, if there are enough distinct\nzones, or at least one in each zone otherwise.", - "enum": [ - "maximum" - ] - } - ] - }, - "cors.Rule": { - "type": "object", - "required": [ - "AllowedOrigin", - "AllowedMethod" - ], - "properties": { - "AllowedHeader": { - "type": "array", - "items": {} - }, - "AllowedMethod": { - "type": "array", - "items": {} - }, - "AllowedOrigin": { - "type": "array", - "items": {} - }, - "ExposeHeader": { - "type": "array", - "items": {} - }, - "ID": {}, - "MaxAgeSeconds": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/xml.IntValue" - } - ] - } - } - }, - "lifecycle.AbortIncompleteMpu": { - "type": "object", - "required": [ - "DaysAfterInitiation" - ], - "properties": { - "DaysAfterInitiation": { - "$ref": "#/components/schemas/xml.IntValue" - } - } - }, - "lifecycle.Expiration": { - "type": "object", - "properties": { - "Date": {}, - "Days": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/xml.IntValue" - } - ] - } - } - }, - "lifecycle.Filter": { - "type": "object", - "properties": { - "And": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/lifecycle.Filter" - } - ] - }, - "ObjectSizeGreaterThan": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/xml.IntValue" - } - ] - }, - "ObjectSizeLessThan": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/xml.IntValue" - } - ] - }, - "Prefix": {} - } - }, - "lifecycle.Rule": { - "type": "object", - "required": [ - "Status" - ], - "properties": { - "AbortIncompleteMultipartUpload": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/lifecycle.AbortIncompleteMpu" - } - ] - }, - "Expiration": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/lifecycle.Expiration" - } - ] - }, - "Filter": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/lifecycle.Filter" - } - ] - }, - "ID": {}, - "Status": {} - } - }, - "website.Condition": { - "type": "object", - "properties": { - "HttpErrorCodeReturnedEquals": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/xml.IntValue" - } - ] - }, - "KeyPrefixEquals": {} - } - }, - "website.Redirect": { - "type": "object", - "properties": { - "HostName": {}, - "HttpRedirectCode": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/xml.IntValue" - } - ] - }, - "Protocol": {}, - "ReplaceKeyPrefixWith": {}, - "ReplaceKeyWith": {} - } - }, - "website.RoutingRule": { - "type": "object", - "required": [ - "Redirect" - ], - "properties": { - "Condition": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "#/components/schemas/website.Condition" - } - ] - }, - "Redirect": { - "$ref": "#/components/schemas/website.Redirect" - } - } - }, - "xml.IntValue": { - "type": "integer", - "format": "int64" - } - }, - "securitySchemes": { - "bearerAuth": { - "type": "http", - "scheme": "bearer" - } - } - }, - "security": [ - { - "bearerAuth": [] - } - ] -} diff --git a/doc/book/build/_index.md b/doc/book/build/_index.md index 6a01ef57..021045aa 100644 --- a/doc/book/build/_index.md +++ b/doc/book/build/_index.md @@ -51,4 +51,4 @@ We are currently building this SDK for [Python](@/documentation/build/python.md# More information: - [In the reference manual](@/documentation/reference-manual/admin-api.md) - - [Full specification](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.html) + - [Full specifiction](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.html) diff --git a/doc/book/build/others.md b/doc/book/build/others.md index df055e79..341e82d5 100644 --- a/doc/book/build/others.md +++ b/doc/book/build/others.md @@ -5,13 +5,13 @@ weight = 99 ## S3 -If you are developing a new application, you may want to use Garage to store your user's media. +If you are developping a new application, you may want to use Garage to store your user's media. The S3 API that Garage uses is a standard REST API, so as long as you can make HTTP requests, you can query it. You can check the [S3 REST API Reference](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Operations_Amazon_Simple_Storage_Service.html) from Amazon to learn more. -Developing your own wrapper around the REST API is time consuming and complicated. -Instead, there are some libraries already available. +Developping your own wrapper around the REST API is time consuming and complicated. +Instead, there are some libraries already avalaible. Some of them are maintained by Amazon, some by Minio, others by the community. diff --git a/doc/book/connect/_index.md b/doc/book/connect/_index.md index 497f97a9..7d8e686c 100644 --- a/doc/book/connect/_index.md +++ b/doc/book/connect/_index.md @@ -23,7 +23,7 @@ To configure S3-compatible software to interact with Garage, you will need the following parameters: - An **API endpoint**: this corresponds to the HTTP or HTTPS address - used to contact the Garage server. When running Garage locally this will usually + used to contact the Garage server. When runing Garage locally this will usually be `http://127.0.0.1:3900`. In a real-world setting, you would usually have a reverse-proxy that adds TLS support and makes your Garage server available under a public hostname such as `https://garage.example.com`. diff --git a/doc/book/connect/apps/index.md b/doc/book/connect/apps/index.md index 627f1842..f52d434b 100644 --- a/doc/book/connect/apps/index.md +++ b/doc/book/connect/apps/index.md @@ -12,9 +12,8 @@ In this section, we cover the following web applications: | [Mastodon](#mastodon) | ✅ | Natively supported | | [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` | | [ejabberd](#ejabberd) | ✅ | `mod_s3_upload` | -| [Ente](#ente) | ✅ | Natively supported | -| [Pixelfed](#pixelfed) | ❓ | Natively supported | -| [Pleroma](#pleroma) | ✅ | Natively supported | +| [Pixelfed](#pixelfed) | ✅ | Natively supported | +| [Pleroma](#pleroma) | ❓ | Not yet tested | | [Lemmy](#lemmy) | ✅ | Supported with pict-rs | | [Funkwhale](#funkwhale) | ❓ | Not yet tested | | [Misskey](#misskey) | ❓ | Not yet tested | @@ -54,7 +53,7 @@ garage bucket allow nextcloud --read --write --key nextcloud-key Now edit your Nextcloud configuration file to enable object storage. On my installation, the config. file is located at the following path: `/var/www/nextcloud/config/config.php`. -We will add a new root key to the `$CONFIG` dictionary named `objectstore`: +We will add a new root key to the `$CONFIG` dictionnary named `objectstore`: ```php - user: - password: - -s3: - # Override the primary and secondary hot storage. The commented out values - # are the defaults. - # - hot_storage: - primary: b2-eu-cen - # secondary: wasabi-eu-central-2-v3 - - # If true, enable some workarounds to allow us to use a local minio instance - # for object storage. - # - # 1. Disable SSL. - # 2. Use "path" style S3 URLs (see `use_path_style_urls` below). - # 3. Directly download the file during replication instead of going via the - # Cloudflare worker. - # 4. Do not specify storage classes when uploading objects (since minio does - # not support them, specifically it doesn't support GLACIER). - are_local_buckets: true - - # To use "path" style S3 URLs instead of DNS-based bucket access - # default to true if you set "are_local_buckets: true" - # use_path_style_urls: true - - b2-eu-cen: # Don't change this key, it is hardcoded - key: - secret: - endpoint: garage:3900 # publicly accessible endpoint of your garage instance - region: garage - bucket: - use_path_style: true - # you can specify secondary locations, names are hardcoded as well - # wasabi-eu-central-2-v3: - # scw-eu-fr-v3: - - # and you can also specify a bucket to be used for embeddings, preview etc.. - # default to the first bucket - # derived-storage: wasabi-eu-central-2-derived -``` - -Finally you can run it with Docker : - -```bash -docker run -d --name ente-server --restart unless-stopped -v /path/to/museum.yaml:/museum.yaml -v /path/to/credentials.yaml:/credentials.yaml -p 8080:8080 ghcr.io/ente-io/ente-server -``` - -For more information on deployment you can check the [ente documentation](https://help.ente.io/self-hosting/) - ## Pixelfed [Pixelfed Technical Documentation > Configuration](https://docs.pixelfed.org/technical-documentation/env.html#filesystem) ## Pleroma -### Creating your bucket - -This is the usual Garage setup: - -```bash -garage key new --name pleroma-key -garage bucket create pleroma -garage bucket allow pleroma --read --write --owner --key pleroma-key -``` - -We also need to expose these buckets publicly to serve their content to users: - -```bash -garage bucket website --allow pleroma -``` - -Note the Key ID and Secret Key. - -### Configure Pleroma - -Update your Pleroma configuration like that in `/etc/pleroma/config.exs`. - -``` -config :pleroma, Pleroma.Upload, - uploader: Pleroma.Uploaders.S3, - base_url: "https://pleroma.garage.example.tld" - -config :ex_aws, :s3, - access_key_id: "GW...", - secret_access_key: "XXX", - region: "garage", - host: "api.garage.example.tld" -``` - -And restart Pleroma. - -You can found more information in [Pleroma Documentation > Pleroma.Uploaders.S3](https://docs-develop.pleroma.social/backend/configuration/cheatsheet/#pleromauploaderss3) - -### Migrating your data - -Pleroma have an internal migration tool that can encounter some fatal error - -``` -** (EXIT from #PID<0.98.0>) an exception was raised: - ** (File.Error) could not stream "/var/lib/pleroma/uploads/09/f8": illegal operation on a directory - (elixir 1.17.3) lib/file/stream.ex:100: anonymous fn/3 in Enumerable.File.Stream.reduce/3 - (elixir 1.17.3) lib/stream.ex:1675: anonymous fn/5 in Stream.resource/3 - (elixir 1.17.3) lib/stream.ex:1891: Enumerable.Stream.do_each/4 - (elixir 1.17.3) lib/task/supervised.ex:370: Task.Supervised.stream_reduce/7 - (elixir 1.17.3) lib/enum.ex:4423: Enum.map/2 - (ex_aws_s3 2.5.8) lib/ex_aws/s3/upload.ex:141: ExAws.Operation.ExAws.S3.Upload.perform/2 - (pleroma 2.10.0) lib/pleroma/uploaders/s3.ex:60: Pleroma.Uploaders.S3.put_file/1 - (pleroma 2.10.0) lib/pleroma/uploaders/uploader.ex:49: Pleroma.Uploaders.Uploader.put_file/2 -``` - -So, use [your best tool](https://garagehq.deuxfleurs.fr/documentation/connect/cli/) to sync `/var/lib/pleroma/uploads/` in your S3. - -Then, to avoid some non existent problem (just in case of), run this command - -```bash -while true -do - rm -vr $(./bin/pleroma_ctl uploads migrate_local S3 2>&1 | grep "could not stream" | awk -F '"' '{print $2}') - sleep 5 -done -``` - -If you have many files, stop this command sometime and the command bellow (interactive) to delete local -file after upload. Then restart the loop. - -```bash -./bin/pleroma_ctl uploads migrate_local S3 --delete -``` - -And *voilà* +[Pleroma Documentation > Pleroma.Uploaders.S3](https://docs-develop.pleroma.social/backend/configuration/cheatsheet/#pleromauploaderss3) ## Lemmy diff --git a/doc/book/connect/backup.md b/doc/book/connect/backup.md index dba6900d..7e97d777 100644 --- a/doc/book/connect/backup.md +++ b/doc/book/connect/backup.md @@ -207,13 +207,3 @@ $ plakar at @garageS3 ls ``` More information in Plakar documentation: https://www.plakar.io/docs/main/quickstart/ - -## Synology HyperBackup - -HyperBackup can be configured to upload backups to garage using a custom S3 destination. However, the HyperBackup client hardcodes the `us-east-1` region that is a critical input to the v4 signature process. If garage is not set to `us-east-1`, HyperBackup will recognize available buckets, but fail during the final setup stage. - -In garage.toml: -```toml -[s3_api] -s3_region = "us-east-1" -``` diff --git a/doc/book/connect/cli.md b/doc/book/connect/cli.md index f52cc205..6529e4b2 100644 --- a/doc/book/connect/cli.md +++ b/doc/book/connect/cli.md @@ -41,7 +41,7 @@ Some commands: # list buckets mc ls garage/ -# list objects in a bucket +# list objets in a bucket mc ls garage/my_files # copy from your filesystem to garage @@ -149,15 +149,6 @@ rclone help This will tremendously accelerate operations such as `rclone sync` or `rclone ncdu` by reducing the number of ListObjects calls that are made. -**Garage behind Cloudflare proxy:** when running Garage behind Cloudflare proxy, you might see `Response: error 403 Forbidden, Forbidden: Invalid signature` error in your garage logs or `AccessDenied: Forbidden: Invalid signature` error in rclone logs. Try adding `--s3-sign-accept-encoding=false` flag to your rclone command and see if the issue is resolved. - -```bash -# this throws an error -rclone lsd garage: - -# this should work -rclone lsd --s3-sign-accept-encoding=false garage: -``` ## `s3cmd` @@ -218,7 +209,7 @@ Within Cyberduck, a available within the `Preferences -> Profiles` section. This can enabled and then connections to Garage may be configured. -### Instructions for the CLI +### Instuctions for the CLI To configure duck (Cyberduck's CLI tool), start by creating its folder hierarchy: @@ -323,3 +314,4 @@ ls ``` And through the web interface at http://[::1]:8080/web/client + diff --git a/doc/book/connect/repositories.md b/doc/book/connect/repositories.md index 0ae79b1e..537b02e7 100644 --- a/doc/book/connect/repositories.md +++ b/doc/book/connect/repositories.md @@ -201,9 +201,11 @@ on the binary cache, the client will download the result from the cache instead ### Channels -Channels additionally serve Nix definitions, ie. a `.nix` file referencing +Channels additionnaly serve Nix definitions, ie. a `.nix` file referencing all the derivations you want to serve. ## Gitlab *External link:* [Gitlab Documentation > Object storage](https://docs.gitlab.com/ee/administration/object_storage.html) + + diff --git a/doc/book/cookbook/ansible.md b/doc/book/cookbook/ansible.md index 8d86a7d1..8b0d2969 100644 --- a/doc/book/cookbook/ansible.md +++ b/doc/book/cookbook/ansible.md @@ -8,12 +8,12 @@ have published Ansible roles. We list them and compare them below. ## Comparison of Ansible roles -| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | [eddster2309 ansible-role-garage](#eddster2309-ansible-role-garage) | +| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | [eddster ansible-role-garage](#eddster-ansible-role-garage) | |------------------------------------|---------------------------------------------|---------------------------------------------------------------|---------------------------------| | **Runtime** | Systemd | Docker | Systemd | | **Target OS** | Any Linux | Any Linux | Any Linux | | **Architecture** | amd64, arm64, i686 | amd64, arm64 | arm64, arm, 386, amd64 | -| **Additional software** | None | Traefik | Nginx and Keepalived (optional) | +| **Additional software** | None | Traefik | Ngnix and Keepalived (optional) | | **Automatic node connection** | ❌ | ✅ | ✅ | | **Layout management** | ❌ | ✅ | ✅ | | **Manage buckets & keys** | ❌ | ✅ (basic) | ✅ | diff --git a/doc/book/cookbook/binary-packages.md b/doc/book/cookbook/binary-packages.md index 1e399764..ce6beb7b 100644 --- a/doc/book/cookbook/binary-packages.md +++ b/doc/book/cookbook/binary-packages.md @@ -29,10 +29,6 @@ it's stable). Garage is available in the official repositories under [extra](https://archlinux.org/packages/extra/x86_64/garage). -```bash -pacman -S garage -``` - ## FreeBSD ```bash @@ -44,9 +40,3 @@ pkg install garage ```bash nix-shell -p garage ``` - -## conda-forge - -```bash -pixi global install garage -``` diff --git a/doc/book/cookbook/encryption.md b/doc/book/cookbook/encryption.md index 13da4bd7..bfbea0ec 100644 --- a/doc/book/cookbook/encryption.md +++ b/doc/book/cookbook/encryption.md @@ -33,7 +33,7 @@ by adding encryption at different levels. We would be very curious to know your needs and thougs about ideas such as encryption practices and things like key management, as we want Garage to be a -serious base platform for the development of secure, encrypted applications. +serious base platform for the developpment of secure, encrypted applications. Do not hesitate to come talk to us if you have any thoughts or questions on the subject. @@ -59,7 +59,7 @@ For standard S3 API requests, Garage does not encrypt data at rest by itself. For the most generic at rest encryption of data, we recommend setting up your storage partitions on encrypted LUKS devices. -If you are developing your own client software that makes use of S3 storage, +If you are developping your own client software that makes use of S3 storage, we recommend implementing data encryption directly on the client side and never transmitting plaintext data to Garage. This makes it easy to use an external untrusted storage provider if necessary. @@ -108,14 +108,14 @@ Protects against the following threats: - Stolen HDD -Crucially, does not protect against malicious sysadmins or remote attackers that +Crucially, does not protect againt malicious sysadmins or remote attackers that might gain access to your servers. Methods include full-disk encryption with tools such as LUKS. ## Encrypting data on the client side -Protects against the following threats: +Protects againt the following threats: - A honest-but-curious administrator - A malicious administrator that tries to corrupt your data diff --git a/doc/book/cookbook/exposing-websites.md b/doc/book/cookbook/exposing-websites.md index 74a5613d..9382a541 100644 --- a/doc/book/cookbook/exposing-websites.md +++ b/doc/book/cookbook/exposing-websites.md @@ -9,7 +9,7 @@ There are three methods to expose buckets as website: 1. using the PutBucketWebsite S3 API call, which is allowed for access keys that have the owner permission bit set -2. from the Garage CLI, by an administrator of the cluster +2. from the Garage CLI, by an adminstrator of the cluster 3. using the Garage administration API diff --git a/doc/book/cookbook/from-source.md b/doc/book/cookbook/from-source.md index 04b84aef..7105c999 100644 --- a/doc/book/cookbook/from-source.md +++ b/doc/book/cookbook/from-source.md @@ -20,12 +20,12 @@ sudo apt-get update sudo apt-get install build-essential ``` -## Building from source from the Forgejo repository +## Building from source from the Gitea repository The primary location for Garage's source code is the -[Forgejo repository](https://git.deuxfleurs.fr/Deuxfleurs/garage), +[Gitea repository](https://git.deuxfleurs.fr/Deuxfleurs/garage), which contains all of the released versions as well as the code -for the development of the next version. +for the developpement of the next version. Clone the repository and enter it as follows: @@ -41,7 +41,7 @@ git tag # List available tags git checkout v0.8.0 # Change v0.8.0 with the version you wish to build ``` -Otherwise you will be building a development build from the `main` branch +Otherwise you will be building a developpement build from the `main` branch that includes all of the changes to be released in the next version. Be careful that such a build might be unstable or contain bugs, and could be incompatible with nodes that run stable versions of Garage. @@ -85,14 +85,11 @@ The following feature flags are available in v0.8.0: | Feature flag | Enabled | Description | | ------------ | ------- | ----------- | | `bundled-libs` | *by default* | Use bundled version of sqlite3, zstd, lmdb and libsodium | -| `consul-discovery` | optional | Enable automatic registration and discovery
of cluster nodes through the Consul API | -| `fjall` | experimental | Enable using Fjall to store Garage's metadata | -| `journald` | optional | Enable logging to systemd-journald with
`GARAGE_LOG_TO_JOURNALD=true` environment variable set | +| `system-libs` | optional | Use system version of sqlite3, zstd, lmdb and libsodium
if available (exclusive with `bundled-libs`, build using
`cargo build --no-default-features --features system-libs`) | | `k2v` | optional | Enable the experimental K2V API (if used, all nodes on your
Garage cluster must have it enabled as well) | | `kubernetes-discovery` | optional | Enable automatic registration and discovery
of cluster nodes through the Kubernetes API | -| `lmdb` | *by default* | Enable using LMDB to store Garage's metadata | | `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API | -| `sqlite` | *by default* | Enable using Sqlite3 to store Garage's metadata | -| `syslog` | optional | Enable logging to Syslog with
`GARAGE_LOG_TO_SYSLOG=true` environment variable set | -| `system-libs` | optional | Use system version of sqlite3, zstd, lmdb and libsodium
if available (exclusive with `bundled-libs`, build using
`cargo build --no-default-features --features system-libs`) | | `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry | +| `syslog` | optional | Enable logging to Syslog | +| `lmdb` | *by default* | Enable using LMDB to store Garage's metadata | +| `sqlite` | *by default* | Enable using Sqlite3 to store Garage's metadata | diff --git a/doc/book/cookbook/kubernetes.md b/doc/book/cookbook/kubernetes.md index c1db742f..f5bceec8 100644 --- a/doc/book/cookbook/kubernetes.md +++ b/doc/book/cookbook/kubernetes.md @@ -26,7 +26,7 @@ Or deploy with custom values: helm install --create-namespace --namespace garage garage ./garage -f values.override.yaml ``` -If you want to manage the CustomResourceDefinition used by garage for its `kubernetes_discovery` outside of the helm chart, add `garage.kubernetesSkipCrd: true` to your custom values and use the kustomization before deploying the helm chart: +If you want to manage the CustomRessourceDefinition used by garage for its `kubernetes_discovery` outside of the helm chart, add `garage.kubernetesSkipCrd: true` to your custom values and use the kustomization before deploying the helm chart: ```bash kubectl apply -k ../k8s/crd @@ -47,12 +47,12 @@ All possible configuration values can be found with: helm show values ./garage ``` -This is an example `values.override.yaml` for deploying in a microk8s cluster with a https s3 api ingress route: +This is an example `values.overrride.yaml` for deploying in a microk8s cluster with a https s3 api ingress route: ```yaml garage: # Use only 2 replicas per object - replicationFactor: 2 + replicationMode: "2" # Start 4 instances (StatefulSets) of garage deployment: diff --git a/doc/book/cookbook/real-world.md b/doc/book/cookbook/real-world.md index 73218132..b9927c06 100644 --- a/doc/book/cookbook/real-world.md +++ b/doc/book/cookbook/real-world.md @@ -96,14 +96,14 @@ to store 2 TB of data in total. ## Get a Docker image Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated). -We encourage you to use a fixed tag (eg. `v2.3.0`) and not the `latest` tag. -For this example, we will use the latest published version at the time of the writing which is `v2.3.0` but it's up to you +We encourage you to use a fixed tag (eg. `v1.3.0`) and not the `latest` tag. +For this example, we will use the latest published version at the time of the writing which is `v1.3.0` but it's up to you to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated). For example: ``` -docker pull dxflrs/garage:v2.3.0 +sudo docker pull dxflrs/garage:v1.3.0 ``` ## Deploying and configuring Garage @@ -171,7 +171,7 @@ docker run \ -v /etc/garage.toml:/etc/garage.toml \ -v /var/lib/garage/meta:/var/lib/garage/meta \ -v /var/lib/garage/data:/var/lib/garage/data \ - dxflrs/garage:v2.3.0 + dxflrs/garage:v1.3.0 ``` With this command line, Garage should be started automatically at each boot. @@ -185,7 +185,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y version: "3" services: garage: - image: dxflrs/garage:v2.3.0 + image: dxflrs/garage:v1.3.0 network_mode: "host" restart: unless-stopped volumes: diff --git a/doc/book/cookbook/reverse-proxy.md b/doc/book/cookbook/reverse-proxy.md index 9785d561..bdc1c549 100644 --- a/doc/book/cookbook/reverse-proxy.md +++ b/doc/book/cookbook/reverse-proxy.md @@ -7,7 +7,7 @@ The main reason to add a reverse proxy in front of Garage is to provide TLS to y In production you will likely need your certificates signed by a certificate authority. The most automated way is to use a provider supporting the [ACME protocol](https://datatracker.ietf.org/doc/html/rfc8555) -such as [Let's Encrypt](https://letsencrypt.org/) or [ZeroSSL](https://zerossl.com/). +such as [Let's Encrypt](https://letsencrypt.org/), [ZeroSSL](https://zerossl.com/) or [Buypass Go SSL](https://www.buypass.com/ssl/products/acme). If you are only testing Garage, you can generate a self-signed certificate to follow the documentation: @@ -97,7 +97,7 @@ server { location / { proxy_pass http://s3_backend; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; + proxy_set_header Host $host; # Disable buffering to a temporary file. proxy_max_temp_file_size 0; } @@ -142,74 +142,7 @@ server { ## Apache httpd -The [Apache HTTP Server](https://httpd.apache.org/) -is a general purpose web server that includes -[reverse proxy](https://httpd.apache.org/docs/2.4/mod/mod_proxy.html) -capabilities. - -### Exposing the S3 endpoints - -Create a new [virtual host](https://httpd.apache.org/docs/2.4/vhosts/), -obtain a certificate using -[certbot](https://eff-certbot.readthedocs.io/en/stable/using.html#apache), -and add the -[`ProxyPass`](https://httpd.apache.org/docs/2.4/mod/mod_proxy.html#proxypass) -and -[`ProxyPreserveHost`](https://httpd.apache.org/docs/2.4/mod/mod_proxy.html#proxypreservehost) -options: - -```apache - - ServerName garage.example.com - - SSLCertificateFile /etc/letsencrypt/live/garage.example.com/fullchain.pem - SSLCertificateKeyFile /etc/letsencrypt/live/garage.example.com/privkey.pem - Include /etc/letsencrypt/options-ssl-apache.conf - - Header always set Strict-Transport-Security "max-age=31536000" - Header always add Content-Security-Policy upgrade-insecure-requests - - ProxyPass "/" "http://localhost:3900/" nocanon - ProxyPreserveHost on - -``` - -The `nocanon` keyword is important for -[presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-presigned-url.html); -otherwise, - -> `mod_proxy` will canonicalise ProxyPassed URLs. -> But this may be incompatible with some backends, -> particularly those that make use of `PATH_INFO`. -> The optional `nocanon` keyword suppresses this -> and passes the URL path "raw" to the backend. - -### Exposing the web endpoint - -Adding static websites backed by Garage works very similarly, -with the only difference being the port selected in the `ProxyPass` directive. - -```apache - ProxyPass "/" "http://localhost:3902/" nocanon -``` - -### Using Unix sockets - -Apache can also proxy via Unix sockets instead of TCP ports, -if Garage is so configured. - -`garage.toml`: - -```toml -[s3_api] -api_bind_addr = "/run/garage/s3_api.socket" -``` - -Apache config: - -```apache - ProxyPass "/" "unix:/run/garage/s3_api.socket|http://localhost/" nocanon -``` +@TODO ## Traefik v2 @@ -339,7 +272,7 @@ Add the following configuration section [to compress response](https://doc.traef ### Add caching response -Traefik's caching middleware is only available on [enterprise version](https://doc.traefik.io/traefik-enterprise/middlewares/http-cache/), however the freely-available [Souin plugin](https://github.com/darkweak/souin#tr%C3%A6fik-container) can also do the job. (section to be completed) +Traefik's caching middleware is only available on [entreprise version](https://doc.traefik.io/traefik-enterprise/middlewares/http-cache/), however the freely-available [Souin plugin](https://github.com/darkweak/souin#tr%C3%A6fik-container) can also do the job. (section to be completed) ### Complete example diff --git a/doc/book/cookbook/systemd.md b/doc/book/cookbook/systemd.md index 820a47bf..ebff8c15 100644 --- a/doc/book/cookbook/systemd.md +++ b/doc/book/cookbook/systemd.md @@ -38,7 +38,7 @@ WantedBy=multi-user.target id is dynamically allocated by systemd (set with `DynamicUser=true`). It cannot access (read or write) home folders (`/home`, `/root` and `/run/user`), the rest of the filesystem can only be read but not written, only the path seen as -`/var/lib/garage` is writable as seen by the service. Additionally, the process +`/var/lib/garage` is writable as seen by the service. Additionnaly, the process can not gain new privileges over time. For this to work correctly, your `garage.toml` must be set with diff --git a/doc/book/design/_index.md b/doc/book/design/_index.md index e7098dc8..5881ab8f 100644 --- a/doc/book/design/_index.md +++ b/doc/book/design/_index.md @@ -10,7 +10,7 @@ perspective. It will allow you to understand if Garage is a good fit for you, how to better use it, how to contribute to it, what can Garage could and could not do, etc. -- **[Goals and use cases](@/documentation/design/goals.md):** This page explains why Garage was conceived and what practical use cases it targets. +- **[Goals and use cases](@/documentation/design/goals.md):** This page explains why Garage was concieved and what practical use cases it targets. - **[Related work](@/documentation/design/related-work.md):** This pages presents the theoretical background on which Garage is built, and describes other software storage solutions and why they didn't work for us. @@ -31,3 +31,5 @@ We love to talk and hear about Garage, that's why we keep a log here: - [(en, 2021-04-28) Distributed object storage is centralised](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/commit/b1f60579a13d3c5eba7f74b1775c84639ea9b51a/doc/talks/2021-04-28_spirals-team/talk.pdf) - [(fr, 2020-12-02) Garage : jouer dans la cour des grands quand on est un hébergeur associatif](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/commit/b1f60579a13d3c5eba7f74b1775c84639ea9b51a/doc/talks/2020-12-02_wide-team/talk.pdf) + + diff --git a/doc/book/design/benchmarks/index.md b/doc/book/design/benchmarks/index.md index 2df916e0..79cc5d62 100644 --- a/doc/book/design/benchmarks/index.md +++ b/doc/book/design/benchmarks/index.md @@ -15,14 +15,14 @@ The more a user request will require intra-cluster requests to complete, the mor This is especially true for sequential requests: requests that must wait the result of another request to be sent. We designed Garage without consensus algorithms (eg. Paxos or Raft) to minimize the number of sequential and parallel requests. -This series of benchmarks quantifies the impact of this design choice. +This serie of benchmarks quantifies the impact of this design choice. ### On a simple simulated network We start with a controlled environment, all the instances are running on the same (powerful enough) machine. -To control the network latency, we simulate the network with [mknet](https://git.deuxfleurs.fr/trinity-1686a/mknet) (a tool we developed, based on `tc` and the linux network stack). -To measure S3 endpoints latency, we use our own tool [s3lat](https://git.deuxfleurs.fr/quentin/s3lat/) to observe only the intra-cluster latency and not some contention on the nodes (CPU, RAM, disk I/O, network bandwidth, etc.). +To control the network latency, we simulate the network with [mknet](https://git.deuxfleurs.fr/trinity-1686a/mknet) (a tool we developped, based on `tc` and the linux network stack). +To mesure S3 endpoints latency, we use our own tool [s3lat](https://git.deuxfleurs.fr/quentin/s3lat/) to observe only the intra-cluster latency and not some contention on the nodes (CPU, RAM, disk I/O, network bandwidth, etc.). Compared to other benchmark tools, S3Lat sends only one (small) request at the same time and measures its latency. We selected 5 standard endpoints that are often in the critical path: ListBuckets, ListObjects, GetObject, PutObject and RemoveObject. @@ -32,7 +32,7 @@ In this first benchmark, we consider 5 instances that are located in a different Compared to garage, minio latency drastically increases on 3 endpoints: GetObject, PutObject, RemoveObject. -We suppose that these requests on minio make transactions over Raft, involving 4 sequential requests: 1) sending the message to the leader, 2) having the leader dispatch it to the other nodes, 3) waiting for the confirmation of followers and finally 4) committing it. With our current configuration, one Raft transaction will take around 400 ms. GetObject seems to correlate to 1 transaction while PutObject and RemoveObject seems to correlate to 2 or 3. Reviewing minio code would be required to confirm this hypothesis. +We suppose that these requests on minio make transactions over Raft, involving 4 sequential requests: 1) sending the message to the leader, 2) having the leader dispatch it to the other nodes, 3) waiting for the confirmation of followers and finally 4) commiting it. With our current configuration, one Raft transaction will take around 400 ms. GetObject seems to correlate to 1 transaction while PutObject and RemoveObject seems to correlate to 2 or 3. Reviewing minio code would be required to confirm this hypothesis. Conversely, garage uses an architecture similar to DynamoDB and never require global cluster coordination to answer a request. Instead, garage can always contact the right node in charge of the requested data, and can answer in as low as one request in the case of GetObject and PutObject. We also observed that Garage latency, while often lower to minio, is more dispersed: garage is still in beta and has not received any performance optimization yet. @@ -50,7 +50,7 @@ We plot a similar graph as before: This new graph is very similar to the one before, neither minio or garage seems to benefit from this new topology, but they also do not suffer from it. -Considering garage, this is expected: nodes in the same DC are put in the same zone, and then data are spread on different zones for data resiliency and availability. +Considering garage, this is expected: nodes in the same DC are put in the same zone, and then data are spread on different zones for data resiliency and availaibility. Then, in the default mode, requesting data requires to query at least 2 zones to be sure that we have the most up to date information. These requests will involve at least one inter-DC communication. In other words, we prioritize data availability and synchronization over raw performances. diff --git a/doc/book/design/goals.md b/doc/book/design/goals.md index 3fe80e8f..efa3cd33 100644 --- a/doc/book/design/goals.md +++ b/doc/book/design/goals.md @@ -59,13 +59,11 @@ Garage themselves for the following tasks: - Hosting of their homepage, [privacyguides.org](https://www.privacyguides.org/), and various other static sites -- As a PowerDNS authoritative zone backend through [Lightning Stream](https://doc.powerdns.com/lightningstream/latest/index.html) and [LMDB](https://doc.powerdns.com/authoritative/backends/lmdb.html) - -- As a Mastodon media storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/) +- As a Mastodon object storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/) - As a PeerTube storage backend for [neat.tube](https://neat.tube/) - As a [Matrix media backend](https://github.com/matrix-org/synapse-s3-storage-provider) Triplebit's Garage cluster is a multi-site cluster currently composed of -15 storage nodes in 3 physical locations. +10 nodes in 3 physical locations. diff --git a/doc/book/design/internals.md b/doc/book/design/internals.md index 81a11854..8e3c214e 100644 --- a/doc/book/design/internals.md +++ b/doc/book/design/internals.md @@ -94,7 +94,7 @@ delete a tombstone, the following condition has to be met: - All nodes responsible for storing this entry are aware of the existence of the tombstone, i.e. they cannot hold another version of the entry that is - superseded by the tombstone. This ensures that deleting the tombstone is + superseeded by the tombstone. This ensures that deleting the tombstone is safe and that no deleted value will come back in the system. Garage uses atomic database operations (such as compare-and-swap and @@ -141,3 +141,4 @@ rebalance of data, this would have led to the disk utilization to explode during the rebalancing, only to shrink again after 24 hours. The 10-minute delay is a compromise that gives good security while not having this problem of disk space explosion on rebalance. + diff --git a/doc/book/design/related-work.md b/doc/book/design/related-work.md index a8461803..84e66c4e 100644 --- a/doc/book/design/related-work.md +++ b/doc/book/design/related-work.md @@ -37,7 +37,7 @@ However, Amazon S3 source code is not open but alternatives were proposed. We identified Minio, Pithos, Swift and Ceph. Minio/Ceph enforces a total order, so properties similar to a (relaxed) filesystem. Swift and Pithos are probably the most similar to AWS S3 with their consistent hashing ring. -However Pithos is not maintained anymore. More precisely the company that published Pithos version 1 has developed a second version 2 but has not open sourced it. +However Pithos is not maintained anymore. More precisely the company that published Pithos version 1 has developped a second version 2 but has not open sourced it. Some tests conducted by the [ACIDES project](https://acides.org/) have shown that Openstack Swift consumes way more resources (CPU+RAM) that we can afford. Furthermore, people developing Swift have not designed their software for geo-distribution. There were many attempts in research too. I am only thinking to [LBFS](https://pdos.csail.mit.edu/papers/lbfs:sosp01/lbfs.pdf) that was used as a basis for Seafile. But none of them have been effectively implemented yet. @@ -63,7 +63,7 @@ Due to its industry oriented design, Ceph is also far from being *Simple* to ope In a certain way, Ceph and MinIO are closer together than they are from Garage or OpenStack Swift. **[Pithos](https://github.com/exoscale/pithos):** -Pithos has been abandoned and should probably not used yet, in the following we explain why we did not pick their design. +Pithos has been abandonned and should probably not used yet, in the following we explain why we did not pick their design. Pithos was relying as a S3 proxy in front of Cassandra (and was working with Scylla DB too). From its designers' mouth, storing data in Cassandra has shown its limitations justifying the project abandonment. They built a closed-source version 2 that does not store blobs in the database (only metadata) but did not communicate further on it. diff --git a/doc/book/development/devenv.md b/doc/book/development/devenv.md index 3566072c..518fd232 100644 --- a/doc/book/development/devenv.md +++ b/doc/book/development/devenv.md @@ -82,6 +82,12 @@ nix-build \ *The result is located in `result/bin`. You can pass arguments to cross compile: check `.woodpecker/release.yml` for examples.* +If you modify a `Cargo.toml` or regenerate any `Cargo.lock`, you must run `cargo2nix`: + +``` +cargo2nix -f +``` + Many tools like rclone, `mc` (minio-client), or `aws` (awscliv2) will be available in your environment and will be useful to test Garage. **This is the recommended method.** @@ -118,6 +124,23 @@ cargo fmt # format the project, run it before any commit! cargo clippy # run the linter, run it before any commit! ``` +This is specific to our project, but you will need one last tool, `cargo2nix`. +To install it, run: + +```bash +cargo install --git https://github.com/superboum/cargo2nix --branch main cargo2nix +``` + +You must use it every time you modify a `Cargo.toml` or regenerate a `Cargo.lock` file as follow: + +```bash +cargo build # Rebuild Cargo.lock if needed +cargo2nix -f +``` + +It will output a `Cargo.nix` file which is a specific `Cargo.lock` file dedicated to Nix that is required by our CI +which means you must include it in your commits. + Later, to use our scripts and integration tests, you might need additional tools. These tools are listed at the end of the `shell.nix` package in the `nativeBuildInputs` part. It is up to you to find a way to install the ones you need on your computer. diff --git a/doc/book/development/miscellaneous-notes.md b/doc/book/development/miscellaneous-notes.md index ee7a98d0..a421943f 100644 --- a/doc/book/development/miscellaneous-notes.md +++ b/doc/book/development/miscellaneous-notes.md @@ -3,6 +3,15 @@ title = "Miscellaneous notes" weight = 20 +++ +## Quirks about cargo2nix/rust in Nix + +If you use submodules in your crate (like `crdt` and `replication` in `garage_table`), you must list them in `default.nix` + +The Windows target does not work. it might be solvable through [overrides](https://github.com/cargo2nix/cargo2nix/blob/master/overlay/overrides.nix). Indeed, we pass `x86_64-pc-windows-gnu` but mingw need `x86_64-w64-mingw32` + +We have a simple [PR on cargo2nix](https://github.com/cargo2nix/cargo2nix/pull/201) that fixes critical bugs but the project does not seem very active currently. We must use [my patched version of cargo2nix](https://github.com/superboum/cargo2nix) to enable i686 and armv6l compilation. We might need to contribute to cargo2nix in the future. + + ## Nix Nix has no armv7 + musl toolchains but armv7l is backward compatible with armv6l. diff --git a/doc/book/development/release-process.md b/doc/book/development/release-process.md index 476404f3..0c6701c0 100644 --- a/doc/book/development/release-process.md +++ b/doc/book/development/release-process.md @@ -23,7 +23,7 @@ This logic is defined in `nix/build_index.nix`. For each commit, we first pass the code to a formatter (rustfmt) and a linter (clippy). Then we try to build it in debug mode and run both unit tests and our integration tests. -Additionally, when releasing, our integration tests are run on the release build for amd64 and i686. +Additionnaly, when releasing, our integration tests are run on the release build for amd64 and i686. ## Generated Artifacts @@ -32,7 +32,7 @@ We generate the following binary artifacts for now: - **os**: linux - **format**: static binary, docker container -Additionally we also build two web pages and one JSON document: +Additionnaly we also build two web pages and one JSON document: - the documentation (this website) - [the release page](https://garagehq.deuxfleurs.fr/_releases.html) - [the release list in JSON format](https://garagehq.deuxfleurs.fr/_releases.json) @@ -67,7 +67,7 @@ nix copy --to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/ The previous command will only send the built package and not its dependencies. In the case of our CI pipeline, we want to cache all intermediate build steps as well. This can be done using this quite involved command (here as an example -for the `pkgs.amd64.release` package): +for the `pkgs.amd64.relase` package): ```bash nix copy -j8 \ @@ -174,3 +174,5 @@ drone sign --save Deuxfleurs/garage ``` Looking at the file, you will see that most of the commands are `nix-shell` and `nix-build` commands with various parameters. + + diff --git a/doc/book/operations/durability-repairs.md b/doc/book/operations/durability-repairs.md index 8a307c84..fdf163e2 100644 --- a/doc/book/operations/durability-repairs.md +++ b/doc/book/operations/durability-repairs.md @@ -42,7 +42,7 @@ You may pause an ongoing scrub using `garage repair scrub pause`, but note that the scrub will resume automatically 24 hours later as Garage will not let your cluster run without a regular scrub. If the scrub procedure is too intensive for your servers and is slowing down your workload, the recommended solution -is to increase the "scrub tranquility" using `garage worker set scrub-tranquility`. +is to increase the "scrub tranquility" using `garage repair scrub set-tranquility`. A higher tranquility value will make Garage take longer pauses between two block verifications. Of course, scrubbing the entire data store will also take longer. diff --git a/doc/book/operations/layout.md b/doc/book/operations/layout.md index a0b2f31d..667e89d2 100644 --- a/doc/book/operations/layout.md +++ b/doc/book/operations/layout.md @@ -242,7 +242,7 @@ dc3 Tags Partitions Capacity Usable capacity TOTAL 256 (256 unique) 2.0 GB 1000.0 MB (50.0%) ``` -As we can see, the node that was moved to `dc3` (node4) is only used at 25% (approximately), +As we can see, the node that was moved to `dc3` (node4) is only used at 25% (approximatively), whereas the node that was already in `dc3` (node3) is used at 75%. This can be explained by the following: @@ -260,7 +260,7 @@ This can be explained by the following: data can be removed to be moved to node1. - Garage will move data in equal proportions from all possible sources, in this - case it means that it will transfer 25% of the entire data set from node3 to + case it means that it will tranfer 25% of the entire data set from node3 to node1 and another 25% from node4 to node1. This explains why node3 ends with 75% utilization (100% from before minus 25% diff --git a/doc/book/operations/multi-hdd.md b/doc/book/operations/multi-hdd.md index c21b73d7..1cbcd805 100644 --- a/doc/book/operations/multi-hdd.md +++ b/doc/book/operations/multi-hdd.md @@ -40,7 +40,7 @@ First of all, Garage divides the set of all possible block hashes in a fixed number of slices (currently 1024), and assigns to each slice a primary storage location among the specified data directories. The number of slices having their primary location in each data directory -is proportional to the capacity specified in the config file. +is proportionnal to the capacity specified in the config file. When Garage receives a block to write, it will always write it in the primary directory of the slice that contains its hash. diff --git a/doc/book/operations/recovering.md b/doc/book/operations/recovering.md index fb20656d..05322b67 100644 --- a/doc/book/operations/recovering.md +++ b/doc/book/operations/recovering.md @@ -161,7 +161,4 @@ your recovery options are as follows: - **Option 3: restoring a filesystem-level snapshot.** If you are using ZFS or BTRFS to snapshot your metadata partition, refer to their specific - documentation on rolling back or copying files from an old snapshot. - Note that, depending on the properties of the filesystem and of the DB engine, - if these snapshots were taken during a write operation to the database, they may - also be corrupted and thus unfit for recovery. + documentation on rolling back or copying files from an old snapshot. diff --git a/doc/book/operations/upgrading.md b/doc/book/operations/upgrading.md index 26007dbe..a3d2bcf5 100644 --- a/doc/book/operations/upgrading.md +++ b/doc/book/operations/upgrading.md @@ -56,7 +56,7 @@ From a high level perspective, a major upgrade looks like this: 10. Enable API access (reverse step 1) 11. Monitor your cluster while load comes back, check that all your applications are happy with this new version -### Major upgrades with minimal downtime +### Major upgarades with minimal downtime There is only one operation that has to be coordinated cluster-wide: the switch of one version of the internal RPC protocol to the next. This means that an upgrade with very limited downtime can simply be performed from one major version to the next by restarting all nodes diff --git a/doc/book/quick-start/_index.md b/doc/book/quick-start/_index.md index a50e92f5..633b785a 100644 --- a/doc/book/quick-start/_index.md +++ b/doc/book/quick-start/_index.md @@ -43,10 +43,12 @@ or if you want a build customized for your system, you can [build Garage from source](@/documentation/cookbook/from-source.md). If none of these option work for you, you can also run Garage in a Docker -container. For simplicity, a minimal command to launch Garage using Docker is -provided in this quick start guide. We recommend reading the tutorial on -[configuring a multi-node cluster](@/documentation/cookbook/real-world.md) to -learn about the full Docker workflow for Garage. +container. When using Docker, the commands used in this guide will not work +anymore. We recommend reading the tutorial on [configuring a +multi-node cluster](@/documentation/cookbook/real-world.md) to learn about +using Garage as a Docker container. For simplicity, a minimal command to launch +Garage using Docker is provided in this quick start guide as well. + ## Configuring and starting Garage @@ -80,6 +82,9 @@ bind_addr = "[::]:3902" root_domain = ".web.garage.localhost" index = "index.html" +[k2v_api] +api_bind_addr = "[::]:3904" + [admin] api_bind_addr = "[::]:3903" admin_token = "$(openssl rand -base64 32)" @@ -90,13 +95,10 @@ EOF See the [Configuration file format](https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/) for complete options and values. -By default, Garage looks for its configuration file in **`/etc/garage.toml`.** -Since we have written our configuration file in the working directory, we will have to set -the following environment variable: - -```bash -export GARAGE_CONFIG_FILE=$(pwd)/garage.toml -``` +Now that your configuration file has been created, you may save it to the directory of your choice. +By default, Garage looks for **`/etc/garage.toml`.** +You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml` +at each invocation of the `garage` binary (for example: `garage -c ./garage.toml server`, `garage -c ./garage.toml status`). As you can see, the `rpc_secret` is a 32 bytes hexadecimal string. You can regenerate it with `openssl rand -hex 32`. @@ -109,36 +111,15 @@ Garage server will not be persistent. Change these to locations on your local di your data to be persisted properly. -### Configuring initial access credentials - -Since `v2.3.0`, Garage can automatically create a default access key and a default storage bucket, -based on values provided in environment variables. - -To use this feature, export the following environment variables: - -```bash -export GARAGE_DEFAULT_ACCESS_KEY="GK$(openssl rand -hex 16)" -export GARAGE_DEFAULT_SECRET_KEY="$(openssl rand -hex 32)" -export GARAGE_DEFAULT_BUCKET="default-bucket" -``` - -The example above creates a random access key ID and associated secret key. -You can also provide an access key ID and secret key of your own. - ### Launching the Garage server Use the following command to launch the Garage server: -```bash -garage server --single-node --default-bucket +``` +garage -c path/to/garage.toml server ``` -The `--single-node` flag instructs Garage to automatically configure a single-node cluster without data replication. -The `--default-bucket` flag instructs Garage to create a default access key and a default bucket using the environment variables we defined above. -Both flags are optional and can be omitted, in which case you will have to follow manual configuration steps described below. - -**For older versions of Garage (before v2.3.0):** automatic configuration using `--single-node` and `--default-bucket` is not available, -you must follow the manual configuration steps. +If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`. Alternatively, if you cannot or do not wish to run the Garage binary directly, you may use Docker to run Garage in a container using the following command: @@ -146,58 +127,21 @@ you may use Docker to run Garage in a container using the following command: ```bash docker run \ -d \ - --name garage-container \ + --name garaged \ -p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903 \ - -v $(pwd)/garage.toml:/etc/garage.toml \ - -e GARAGE_DEFAULT_ACCESS_KEY \ - -e GARAGE_DEFAULT_SECRET_KEY \ - -e GARAGE_DEFAULT_BUCKET \ - dxflrs/garage:v2.3.0 - /garage server --single-node --default-bucket + -v /path/to/garage.toml:/etc/garage.toml \ + -v /path/to/garage/meta:/var/lib/garage/meta \ + -v /path/to/garage/data:/var/lib/garage/data \ + dxflrs/garage:v1.3.0 ``` -Note that this command will NOT create persistent volumes for Garage's data, so -your cluster will be wiped if the container terminates. To persist Garage's -data, you must manually add volumes for the `data` and `metadata` directories -and configure their correct paths in your `garage.toml` files (see [configuring -a multi-node cluster](@/documentation/cookbook/real-world.md)). +Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903` -Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`. - -### Checking that Garage runs correctly - -The `garage` utility is also used as a CLI tool to administrate your Garage -deployment. It needs read access to your configuration file and to the metadata directory -to obtain connection parameters to contact the local Garage node. - -Use the following command to show the status of your cluster: - -``` -garage status -``` - -If you are running Garage in a Docker container, you can use the following command instead: - -```bash -docker exec garage-container /garage status -``` - -This should show something like this: - -``` -==== HEALTHY NODES ==== -ID Hostname Address Tags Zone Capacity DataAvail Version -563e1ac825ee3323 linuxbox 127.0.0.1:3901 [default] dc1 19.9 GiB 19.5 GiB (97.6%) v2.3.0 -``` - -### Troubleshooting +#### Troubleshooting Ensure your configuration file, `metadata_dir` and `data_dir` are readable by the user running the `garage` server or Docker. -When running the `garage` CLI, ensure that the path to your configuration file is correctly specified (see below), -and that it can read it and read from your metadata directory. - -You can tune Garage's verbosity by setting the `RUST_LOG=` environment variable. +You can tune Garage's verbosity by setting the `RUST_LOG=` environment variable. \ Available log levels are (from less verbose to more verbose): `error`, `warn`, `info` *(default)*, `debug` and `trace`. ```bash @@ -210,135 +154,36 @@ Log level `info` is the default value and is recommended for most use cases. Log level `debug` can help you check why your S3 API calls are not working. +### Checking that Garage runs correctly -## Uploading and downloading from Garage +The `garage` utility is also used as a CLI tool to configure your Garage deployment. +It uses values from the TOML configuration file to find the Garage daemon running on the +local node, therefore if your configuration file is not at `/etc/garage.toml` you will +again have to specify `-c path/to/garage.toml` at each invocation. -This section will show how to download and upload files on Garage using a third-party tool named `awscli`. +If you are running Garage in a Docker container, you can set `alias garage="docker exec -ti /garage"` +to use the Garage binary inside your container. - -### Install and configure `awscli` - -If you have python on your system, you can install it with: - -```bash -python -m pip install --user awscli -``` - -Now that `awscli` is installed, you must configure it to talk to your Garage -instance using the credentials defined above. Here is a simple way to create -a configuration file in `~/.awsrc` using a single command that will save the -secrets from your environment: - -```bash -cat > ~/.awsrc <=1.29.0` or `>=2.13.0`, otherwise you -need to specify `--endpoint-url` explicitly on each `awscli` invocation. - -Now, each time you want to use `awscli` on this target, run: - -```bash -source ~/.awsrc +garage status ``` -*You can create multiple files with different names if you -have multiple Garage clusters or different keys. -Switching from one cluster to another is as simple as -sourcing the right file.* - -### Example usage of `awscli` - -```bash -# list buckets -aws s3 ls - -# list objects of a bucket -aws s3 ls s3://default-bucket - -# copy from your filesystem to garage -aws s3 cp /proc/cpuinfo s3://default-bucket/cpuinfo.txt - -# copy from garage to your filesystem -aws s3 cp s3://default-bucket/cpuinfo.txt /tmp/cpuinfo.txt -``` - -Note that you can use `awscli` for more advanced operations like -creating a bucket, pre-signing a request or managing your website. -[Read the full documentation to know more](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/index.html). - -Some features are however not implemented like ACL or policy. -Check [our S3 compatibility list](@/documentation/reference-manual/s3-compatibility.md). - -### Other tools for interacting with Garage - -The following tools can also be used to send and receive files from/to Garage: - -- [minio-client](@/documentation/connect/cli.md#minio-client) -- [s3cmd](@/documentation/connect/cli.md#s3cmd) -- [rclone](@/documentation/connect/cli.md#rclone) -- [Cyberduck](@/documentation/connect/cli.md#cyberduck) -- [WinSCP](@/documentation/connect/cli.md#winscp) - -An exhaustive list is maintained in the ["Integrations" > "Browsing tools" section](@/documentation/connect/_index.md). - - - -## Manual configuration - -This section provides instructions that are equivalent to using the -`--single-node` and `--default-bucket` flags for automatic configuration. If -you are using an older version of Garage (before v2.3.0), you must follow -these instructions as automatic configuration is not available. - -We will have to run quite a few `garage` administration commands to get started. -If you ever get lost, don't forget that the `help` command and the `--help` flags can help you anywhere, -the CLI tool is self-documented! Two examples: - -``` -garage help -garage bucket allow --help -``` - -### Configuring the `garage` CLI - -Remember that the `garage` CLI needs to know the path of your `garage.toml` configuration file. -If it is not in the default location of `/etc/garage.toml`, you can specify it either: - -- by setting the `GARAGE_CONFIG_FILE` environment variable; -- by adding the `-c` flag to each `garage` command, for example: `garage -c ./garage.toml status`. - -If you are running Garage in a Docker container, you can set the following alias -to provide a fake `garage`command that uses the Garage binary inside your container: - -```bash -alias garage="docker exec -ti /garage" -``` - -You can test that your `garage` CLI is configured correctly by running a basic command such as `garage status`. - -### Creating a cluster layout - -When you first start a cluster without automatic configuration, the output of `garage status` will look as follows: +This should show something like this: ``` ==== HEALTHY NODES ==== -ID Hostname Address Tags Zone Capacity DataAvail Version -563e1ac825ee3323 linuxbox 127.0.0.1:3901 NO ROLE ASSIGNED v2.3.0 +ID Hostname Address Tag Zone Capacity +563e1ac825ee3323 linuxbox 127.0.0.1:3901 NO ROLE ASSIGNED ``` -Creating a cluster layout for a Garage deployment means informing Garage of the -disk space available on each node of the cluster using the `-c` flag, as well -as the name of the zone (e.g. datacenter) each machine is located in using the -`-z` flag. +## Creating a cluster layout + +Creating a cluster layout for a Garage deployment means informing Garage +of the disk space available on each node of the cluster, `-c`, +as well as the name of the zone (e.g. datacenter), `-z`, each machine is located in. For our test deployment, we are have only one node with zone named `dc1` and a capacity of `1G`, though the capacity is ignored for a single node deployment @@ -359,29 +204,38 @@ garage layout apply --version 1 ``` -### Creating buckets and keys +## Creating buckets and keys + +In this section, we will suppose that we want to create a bucket named `nextcloud-bucket` +that will be accessed through a key named `nextcloud-app-key`. + +Don't forget that `help` command and `--help` subcommands can help you anywhere, +the CLI tool is self-documented! Two examples: + +``` +garage help +garage bucket allow --help +``` + +### Create a bucket Let's take an example where we want to deploy NextCloud using Garage as the -main data storage. We will suppose that we want to create a bucket named -`nextcloud-bucket` that will be accessed through a key named -`nextcloud-app-key`. +main data storage. -#### Create a bucket - -First, create the bucket with the following command: +First, create a bucket with the following command: ``` garage bucket create nextcloud-bucket ``` -Check that the bucket was created properly: +Check that everything went well: ``` garage bucket list garage bucket info nextcloud-bucket ``` -#### Create an API key +### Create an API key The `nextcloud-bucket` bucket now exists on the Garage server, however it cannot be accessed until we add an API key with the proper access rights. @@ -404,14 +258,14 @@ Secret key: 7d37d093435a41f2aab8f13c19ba067d9776c90215f56614adad6ece597dbb34 Authorized buckets: ``` -Check that the key was created properly: +Check that everything works as intended: ``` garage key list garage key info nextcloud-app-key ``` -#### Allow a key to access a bucket +### Allow a key to access a bucket Now that we have a bucket and a key, we need to give permissions to the key on the bucket: @@ -430,5 +284,78 @@ You can check at any time the allowed keys on your bucket with: garage bucket info nextcloud-bucket ``` -You should now be able to read and write objects to the bucket using the -credentials created above. + +## Uploading and downloading from Garage + +To download and upload files on garage, we can use a third-party tool named `awscli`. + + +### Install and configure `awscli` + +If you have python on your system, you can install it with: + +```bash +python -m pip install --user awscli +``` + +Now that `awscli` is installed, you must configure it to talk to your Garage instance, +with your key. There are multiple ways to do that, the simplest one is to create a file +named `~/.awsrc` with this content: + +```bash +export AWS_ACCESS_KEY_ID=xxxx # put your Key ID here +export AWS_SECRET_ACCESS_KEY=xxxx # put your Secret key here +export AWS_DEFAULT_REGION='garage' +export AWS_ENDPOINT_URL='http://localhost:3900' + +aws --version +``` + +Note you need to have at least `awscli` `>=1.29.0` or `>=2.13.0`, otherwise you +need to specify `--endpoint-url` explicitly on each `awscli` invocation. + +Now, each time you want to use `awscli` on this target, run: + +```bash +source ~/.awsrc +``` + +*You can create multiple files with different names if you +have multiple Garage clusters or different keys. +Switching from one cluster to another is as simple as +sourcing the right file.* + +### Example usage of `awscli` + +```bash +# list buckets +aws s3 ls + +# list objects of a bucket +aws s3 ls s3://nextcloud-bucket + +# copy from your filesystem to garage +aws s3 cp /proc/cpuinfo s3://nextcloud-bucket/cpuinfo.txt + +# copy from garage to your filesystem +aws s3 cp s3://nextcloud-bucket/cpuinfo.txt /tmp/cpuinfo.txt +``` + +Note that you can use `awscli` for more advanced operations like +creating a bucket, pre-signing a request or managing your website. +[Read the full documentation to know more](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/index.html). + +Some features are however not implemented like ACL or policy. +Check [our s3 compatibility list](@/documentation/reference-manual/s3-compatibility.md). + +### Other tools for interacting with Garage + +The following tools can also be used to send and receive files from/to Garage: + +- [minio-client](@/documentation/connect/cli.md#minio-client) +- [s3cmd](@/documentation/connect/cli.md#s3cmd) +- [rclone](@/documentation/connect/cli.md#rclone) +- [Cyberduck](@/documentation/connect/cli.md#cyberduck) +- [WinSCP](@/documentation/connect/cli.md#winscp) + +An exhaustive list is maintained in the ["Integrations" > "Browsing tools" section](@/documentation/connect/_index.md). diff --git a/doc/book/reference-manual/admin-api.md b/doc/book/reference-manual/admin-api.md index e96fcaff..fcf49e8c 100644 --- a/doc/book/reference-manual/admin-api.md +++ b/doc/book/reference-manual/admin-api.md @@ -6,167 +6,41 @@ weight = 40 The Garage administration API is accessible through a dedicated server whose listen address is specified in the `[admin]` section of the configuration file (see [configuration file -reference](@/documentation/reference-manual/configuration.md)). +reference](@/documentation/reference-manual/configuration.md)) -The current version of the admin API is v2. No breaking changes to the Garage -administration API will be published outside of a major release. +**WARNING.** At this point, there is no commitment to the stability of the APIs described in this document. +We will bump the version numbers prefixed to each API endpoint each time the syntax +or semantics change, meaning that code that relies on these endpoint will break +when changes are introduced. + +Versions: + - Before Garage 0.7.2 - no admin API + - Garage 0.7.2 - admin APIv0 + - Garage 0.9.0 - admin APIv1, deprecate admin APIv0 -History of previous versions: - - Before Garage v0.7.2 - no admin API - - Garage v0.7.2 - admin API v0 - - Garage v0.9.0 - admin API v1, deprecate admin API v0 - - Garage v2.0.0 - admin API v2, deprecate admin API v1 ## Access control -### Using an API token +The admin API uses two different tokens for access control, that are specified in the config file's `[admin]` section: -Administration API tokens tokens are used as simple HTTP bearer tokens. In -other words, to authenticate access to an admin API endpoint, add the following -HTTP header to your request: +- `metrics_token`: the token for accessing the Metrics endpoint (if this token + is not set in the config file, the Metrics endpoint can be accessed without + access control); + +- `admin_token`: the token for accessing all of the other administration + endpoints (if this token is not set in the config file, access to these + endpoints is disabled entirely). + +These tokens are used as simple HTTP bearer tokens. In other words, to +authenticate access to an admin API endpoint, add the following HTTP header +to your request: ``` Authorization: Bearer ``` -### User-defined API tokens - -Cluster administrators may dynamically define administration tokens using the CLI commands under `garage admin-token`. -Such tokens may be limited in scope, meaning that they may enable access to only a subset of API calls. -They may also have an expiration date to limit their use in time. - -Here is an example to create an administration token that is valid for 30 days -and gives access to only a subset of API calls, allowing it to create buckets -and access keys and give keys permissions on buckets: - -```bash -$ garage admin-token create --expires-in 30d \ - --scope ListBuckets,GetBucketInfo,ListKeys,GetKeyInfo,CreateBucket,CreateKey,AllowBucketKey,DenyBucketKey \ - my-token -This is your secret bearer token, it will not be shown again by Garage: - - 8ed1830b10a276ff57061950.kOSIpxWK9zSGbTO9Xadpv3YndSFWma0_snXcYHaORXk - -==== ADMINISTRATION TOKEN INFORMATION ==== -Token ID: 8ed1830b10a276ff57061950 -Token name: my-token -Created: 2025-06-15 15:12:44.160 +02:00 -Validity: valid -Expiration: 2025-07-15 15:12:44.117 +02:00 - -Scope: ListBuckets - GetBucketInfo - ListKeys - GetKeyInfo - CreateBucket - CreateKey - AllowBucketKey - DenyBucketKey -``` - -When running this command, your token will be shown only once and **will never -be shown again by Garage**, so make sure to save it directly. The token is -hashed internally, and is identified by its prefix (32 hex digits followed by a -dot) which is saved in clear. - -When running `garage admin-token list`, you might see something like this: - -``` -ID Created Name Expiration Scope -- - metrics_token (from daemon configuration) never Metrics -8ed1830b10a276ff57061950 2025-06-15 my-token 2025-07-15 15:12:44.117 +02:00 ListBuckets, ... (8) -``` - -### Master API tokens - -The admin API can also use two different master tokens for access control, -specified in the config file's `[admin]` section: - -- `metrics_token`: the token for accessing the Metrics endpoint. If this token - is not set in the config file, the Metrics endpoint can be accessed without - access control. - -- `admin_token`: the token for accessing all of the other administration - endpoints. If this token is not set in the config file, access to these - endpoints is only possible with a user-defined admin token. - -With the introduction of multiple user-defined admin tokens, the use of master -API tokens is now discouraged. - - -## Using the admin API - -All of the admin API endpoints are described in the OpenAPI specification: - - - APIv2 - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v2.html) - [OpenAPI JSON](https://garagehq.deuxfleurs.fr/api/garage-admin-v2.json) - - APIv1 (deprecated) - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html) - [OpenAPI YAML](https://garagehq.deuxfleurs.fr/api/garage-admin-v1.yml) - - APIv0 (deprecated) - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.html) - [OpenAPI YAML](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.yml) - -Making a request to the API from the command line can be as simple as running: - -```bash -curl -H 'Authorization: Bearer s3cr3t' http://localhost:3903/v2/GetClusterStatus | jq -``` - -For more advanced use cases, we recommend using an SDK. -[Go to the "Build your own app" section to know how to use our SDKs](@/documentation/build/_index.md) - -### Making API calls from the `garage` CLI - -Since v2.0.0, the `garage` binary provides a subcommand `garage json-api` that -allows you to invoke the API without making an HTTP request. This can be -useful for scripting Garage deployments. - -`garage json-api` proxies API calls through Garage's internal RPC protocol, -therefore it does not require any form of authentication: RPC connection -parameters are discovered automatically to contact the locally-running Garage -instance (as when running any other `garage` CLI command). - -For simple calls that take no parameters, usage is as follows: - -``` -$ garage json-api GetClusterHealth -{ - "connectedNodes": 3, - "knownNodes": 3, - "partitions": 256, - "partitionsAllOk": 256, - "partitionsQuorum": 256, - "status": "healthy", - "storageNodes": 3, - "storageNodesOk": 3 -} -``` - -If you need to specify a JSON body for your call, you can add it directly after -the name of the function you are calling: - -``` -$ garage json-api CreateAdminToken '{"name": "test"}' -``` - -Or you can feed it through stdin by adding a `-` as the last command parameter: - -``` -$ garage json-api CreateAdminToken - -{"name": "test"} - -``` - -For admin API calls that would have taken query parameters in their HTTP version, these parameters can be passed in the JSON body object: - -``` -$ garage json-api GetAdminTokenInfo '{"id":"b0e6e0ace2c0b2aca4cdb2de"}' -``` - -For admin API calls that take both query parameters and a JSON body, combine them in the following fashion: - -``` -$ garage json-api UpdateAdminToken '{"id":"b0e6e0ace2c0b2aca4cdb2de", "body":{"name":"not a test"}}' -``` - -## Special administration API endpoints +## Administration API endpoints ### Metrics `GET /metrics` @@ -209,7 +83,7 @@ content-length: 102 date: Tue, 08 Aug 2023 07:22:38 GMT Garage is fully operational -Consult the full health check API endpoint at /v2/GetClusterHealth for more details +Consult the full health check API endpoint at /v0/health for more details ``` ### On-demand TLS `GET /check` @@ -252,7 +126,23 @@ $ curl -so /dev/null -w "%{http_code}" http://localhost:3903/check?domain=exampl 200 ``` + **References:** - [Using On-Demand TLS](https://caddyserver.com/docs/automatic-https#using-on-demand-tls) - [Add option for a backend check to approve use of on-demand TLS](https://github.com/caddyserver/caddy/pull/1939) - [Serving tens of thousands of domains over HTTPS with Caddy](https://caddy.community/t/serving-tens-of-thousands-of-domains-over-https-with-caddy/11179) + +### Cluster operations + +These endpoints have a dedicated OpenAPI spec. + - APIv1 - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html) - [OpenAPI YAML](https://garagehq.deuxfleurs.fr/api/garage-admin-v1.yml) + - APIv0 (deprecated) - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.html) - [OpenAPI YAML](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.yml) + +Requesting the API from the command line can be as simple as running: + +```bash +curl -H 'Authorization: Bearer s3cr3t' http://localhost:3903/v0/status | jq +``` + +For more advanced use cases, we recommend using a SDK. +[Go to the "Build your own app" section to know how to use our SDKs](@/documentation/build/_index.md) diff --git a/doc/book/reference-manual/configuration.md b/doc/book/reference-manual/configuration.md index 312afed0..1f583fe6 100644 --- a/doc/book/reference-manual/configuration.md +++ b/doc/book/reference-manual/configuration.md @@ -51,21 +51,17 @@ allow_punycode = false [consul_discovery] api = "catalog" -consul_http_addr = "https://127.0.0.1:8500" -tls_skip_verify = false +consul_http_addr = "http://127.0.0.1:8500" service_name = "garage-daemon" - ca_cert = "/etc/consul/consul-ca.crt" -# for `agent` API mode, unset client_cert and client_key: client_cert = "/etc/consul/consul-client.crt" client_key = "/etc/consul/consul-key.crt" - -# optionally enable `token` for authentication: +# for `agent` API mode, unset client_cert and client_key, and optionally enable `token` # token = "abcdef-01234-56789" - +tls_skip_verify = false tags = [ "dns-enabled" ] meta = { dns-acl = "allow trusted" } -datacenters = ["dc1", "dc2", "dc3"] + [kubernetes_discovery] namespace = "garage" @@ -86,7 +82,6 @@ add_host_to_metrics = true [admin] api_bind_addr = "0.0.0.0:3903" metrics_token = "BCAdFjoa9G0KJR0WXnHHm7fs1ZAbfpI8iIZ+Z/a2NgI=" -metrics_require_token = true admin_token = "UkLeGWEvHnXBqnueR3ISEMWpOnm40jH2tM2HnnL/0F4=" trace_sink = "http://localhost:4317" ``` @@ -102,9 +97,9 @@ The following gives details about each available configuration option. Top-level configuration options, in alphabetical order: [`allow_punycode`](#allow_punycode), [`allow_world_readable_secrets`](#allow_world_readable_secrets), -[`block_max_concurrent_reads`](#block_max_concurrent_reads), -[`block_max_concurrent_writes_per_request`](#block_max_concurrent_writes_per_request), +[`block_max_concurrent_reads`](`block_max_concurrent_reads), [`block_ram_buffer_max`](#block_ram_buffer_max), +[`block_max_concurrent_writes_per_request`](#block_max_concurrent_writes_per_request), [`block_size`](#block_size), [`bootstrap_peers`](#bootstrap_peers), [`compression_level`](#compression_level), @@ -132,14 +127,12 @@ The `[consul_discovery]` section: [`client_cert`](#consul_client_cert_and_key), [`client_key`](#consul_client_cert_and_key), [`consul_http_addr`](#consul_http_addr), -[`datacenters`](#consul_datacenters) [`meta`](#consul_tags_and_meta), [`service_name`](#consul_service_name), [`tags`](#consul_tags_and_meta), [`tls_skip_verify`](#consul_tls_skip_verify), [`token`](#consul_token). - The `[kubernetes_discovery]` section: [`namespace`](#kube_namespace), [`service_name`](#kube_service_name), @@ -157,7 +150,6 @@ The `[s3_web]` section: The `[admin]` section: [`api_bind_addr`](#admin_api_bind_addr), -[`metrics_require_token`](#admin_metrics_require_token), [`metrics_token`/`metrics_token_file`](#admin_metrics_token), [`admin_token`/`admin_token_file`](#admin_token), [`trace_sink`](#admin_trace_sink), @@ -344,7 +336,7 @@ Since `v0.8.0`, Garage can use alternative storage backends as follows: | --------- | ----------------- | ------------- | | [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `/db.lmdb/` | | [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `/db.sqlite` | -| [Fjall](https://github.com/fjall-rs/fjall) (**experimental support** since `v1.3.0`/`v2.1.0`) | `"fjall"` | `/db.fjall/` | +| [Fjall](https://github.com/fjall-rs/fjall) (**experimental support** since `v1.3.0`) | `"fjall"` | `/db.fjall/` | | [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `/db/` | Sled was supported until Garage v0.9.x, and was removed in Garage v1.0. @@ -353,16 +345,8 @@ old Sled metadata databases to another engine. Performance characteristics of the different DB engines are as follows: -- **LMDB:** the recommended database engine for high-performance distributed clusters - with `replication_factor` ≥ 2. - LMDB works well, but is known to have the following limitations: - - - LMDB is prone to database corruption after an unclean shutdown (e.g. a process kill - or a power outage). It is recommended to configure - [`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval) to be - able to easily recover from this situation. With `replication_factor` ≥ 2, - metadata can also be reconstructed from remote nodes upon corruption - (see [Recovering from failures](@/documentation/operations/recovering.md#corrupted_meta)). +- LMDB: the recommended database engine for high-performance distributed clusters. +LMDB works very well, but is known to have the following limitations: - The data format of LMDB is not portable between architectures, so for instance the Garage database of an x86-64 node cannot be moved to an ARM64 @@ -372,21 +356,30 @@ Performance characteristics of the different DB engines are as follows: node to very small database sizes due to how LMDB works; it is therefore not recommended. + - Several users have reported corrupted LMDB database files after an unclean + shutdown (e.g. a power outage). This situation can generally be recovered + from if your cluster is geo-replicated (by rebuilding your metadata db from + other nodes), or if you have saved regular snapshots at the filesystem + level. + - Keys in LMDB are limited to 511 bytes. This limit translates to limits on - object keys in S3 and sort keys in K2V that are limited to 479 bytes. + object keys in S3 and sort keys in K2V that are limted to 479 bytes. -- **Sqlite:** Garage supports Sqlite as an alternative storage backend for - metadata, which does not have the issues listed above for LMDB. Sqlite is - slower than LMDB, so it is not the best choice for high-performance storage - clusters. +- Sqlite: Garage supports Sqlite as an alternative storage backend for + metadata, which does not have the issues listed above for LMDB. + On versions 0.8.x and earlier, Sqlite should be avoided due to abysmal + performance, which was fixed with the addition of `metadata_fsync`. + Sqlite is still probably slower than LMDB due to the way we use it, + so it is not the best choice for high-performance storage clusters, + but it should work fine in many cases. -- **Fjall:** a storage engine based on LSM trees, which theoretically allow for +- Fjall: a storage engine based on LSM trees, which theoretically allow for higher write throughput than other storage engines that are based on B-trees. Using Fjall could potentially improve Garage's performance significantly in write-heavy workloads. **Support for Fjall is experimental at this point**, - we have added it to Garage for evaluation purposes only. **Use it only with - test data, and report any issues to our bug tracker. Do not use it for - production workloads.** + we have added it to Garage for evaluation purposes only. **Do not use it for + production-critical workloads.** + It is possible to convert Garage's metadata directory from one format to another using the `garage convert-db` command, which should be used as follows: @@ -397,7 +390,7 @@ garage convert-db -a -i \ ``` Make sure to specify the full database path as presented in the table above -(third column), and not just the path to the metadata directory. +(third colummn), and not just the path to the metadata directory. #### `metadata_fsync` {#metadata_fsync} @@ -439,14 +432,13 @@ This might reduce the risk that a data block is lost in rare situations such as simultaneous node losing power, at the cost of a moderate drop in write performance. -Similarly to `metadata_fsync`, this is likely not necessary +Similarly to `metatada_fsync`, this is likely not necessary if geographical replication is used. #### `metadata_auto_snapshot_interval` (since `v0.9.4`) {#metadata_auto_snapshot_interval} If this value is set, Garage will automatically take a snapshot of the metadata -DB file at a regular interval and save it in the metadata directory, -or in [`metadata_snapshots_dir`](#metadata_snapshots_dir) if it is set. +DB file at a regular interval and save it in the metadata directory. This parameter can take any duration string that can be parsed by the [`parse_duration`](https://docs.rs/parse_duration/latest/parse_duration/#syntax) crate. @@ -455,19 +447,14 @@ corrupted, for instance after an unclean shutdown. See [this page](@/documentation/operations/recovering.md#corrupted_meta) for details. Garage keeps only the two most recent snapshots of the metadata DB and deletes older ones automatically. -You can also create metadata snapshots manually at any point using the -`garage meta snapshot` command. - -Using snapshots created by Garage is the best option to make snapshots of your -node's metadata for potential recovery, as they are guaranteed to be clean and -consistent, contrarily to filesystem-level snapshots that may be taken while -some writes are in-flight and thus might be corrupted. Note that taking a metadata snapshot is a relatively intensive operation as the entire data file is copied. A snapshot being taken might have performance impacts on the Garage node while it is running. If the cluster is under heavy write load when a snapshot operation is running, this might also cause the database file to grow in size significantly as pages cannot be recycled easily. +For this reason, it might be better to use filesystem-level snapshots instead +if possible. #### `disable_scrub` {#disable_scrub} @@ -555,19 +542,19 @@ awaits for one of the `block_max_concurrent_reads` slots to be available slot, it reads the entire block file to RAM and frees the slot as soon as the block file is finished reading. Only after the slot is released will the block's data start being transferred over the network. If the request fails to -acquire a reading slot within 15 seconds, it fails with a timeout error. +acquire a reading slot wihtin 15 seconds, it fails with a timeout error. Timeout events can be monitored through the `block_read_semaphore_timeouts` metric in Prometheus: a non-zero number of such events indicates an I/O bottleneck on HDD read speed. -#### `block_max_concurrent_writes_per_request` (since `v1.3.1` / `v2.2.0`) {#block_max_concurrent_writes_per_request} +#### `block_max_concurrent_writes_per_request` (since `v2.1.0`) {#block_max_concurrent_writes_per_request} This parameter is designed to adapt to the concurrent write performance of -different storage media. Maximum number of parallel block writes per put request. -Higher values may improve throughput but increase memory usage. +different storage media.Maximum number of parallel block writes per put request +Higher values improve throughput but increase memory usage. -Default value: 3. Recommended values: 10-30 for NVMe, 3-10 for spinning HDD. +Default: 3, Recommended: 10-30 for NVMe, 3-10 for HDD #### `lmdb_map_size` {#lmdb_map_size} @@ -618,11 +605,11 @@ storing the secret as the `GARAGE_RPC_SECRET_FILE` environment variable. #### `rpc_bind_addr` {#rpc_bind_addr} -The address and port on which to bind for inter-cluster communications -(referred to as RPC for remote procedure calls). +The address and port on which to bind for inter-cluster communcations +(reffered to as RPC for remote procedure calls). The port specified here should be the same one that other nodes will used to contact the node, even in the case of a NAT: the NAT should be configured to forward the external -port number to the same internal port number. This means that if you have several nodes running +port number to the same internal port nubmer. This means that if you have several nodes running behind a NAT, they should each use a different RPC port number. #### `rpc_bind_outgoing` (since `v0.9.2`) {#rpc_bind_outgoing} @@ -741,18 +728,6 @@ node_prefix "" { } ``` - -#### `datacenters` {#consul_datacenters} - -Optional list of datacenters that allow garage to do service discovery when Consul is configured in WAN federation. - -Example: `datacenters = ["dc1", "dc2", "dc3"]` - -In a WAN configuration, by default the Consul services API only responds with -local LAN services. When a list of datacenters is specified using this option, -Garage will query the consul server API by datacenter directly, allowing for -Garage to discover nodes across the Consul WAN. - #### `tags` and `meta` {#consul_tags_and_meta} Additional list of tags and map of service meta to add during service registration. @@ -785,14 +760,14 @@ manually. #### `api_bind_addr` {#s3_api_bind_addr} The IP and port on which to bind for accepting S3 API calls. -This endpoint does not support TLS: a reverse proxy should be used to provide it. +This endpoint does not suport TLS: a reverse proxy should be used to provide it. Alternatively, since `v0.8.5`, a path can be used to create a unix socket with 0222 mode. #### `s3_region` {#s3_region} -Garage will accept S3 API calls that are targeted to the S3 region defined here. -API calls targeted to other regions will fail with a AuthorizationHeaderMalformed error +Garage will accept S3 API calls that are targetted to the S3 region defined here. +API calls targetted to other regions will fail with a AuthorizationHeaderMalformed error message that redirects the client to the correct region. #### `root_domain` {#s3_root_domain} @@ -800,7 +775,7 @@ message that redirects the client to the correct region. The optional suffix to access bucket using vhost-style in addition to path-style request. Note path-style requests are always enabled, whether or not vhost-style is configured. Configuring vhost-style S3 required a wildcard DNS entry, and possibly a wildcard TLS certificate, -but might be required by software not supporting path-style requests. +but might be required by softwares not supporting path-style requests. If `root_domain` is `s3.garage.eu`, a bucket called `my-bucket` can be interacted with using the hostname `my-bucket.s3.garage.eu`. @@ -816,7 +791,7 @@ behaviour of this module. The IP and port on which to bind for accepting HTTP requests to buckets configured for website access. -This endpoint does not support TLS: a reverse proxy should be used to provide it. +This endpoint does not suport TLS: a reverse proxy should be used to provide it. Alternatively, since `v0.8.5`, a path can be used to create a unix socket with 0222 mode. @@ -849,34 +824,10 @@ See [administration API reference](@/documentation/reference-manual/admin-api.md Alternatively, since `v0.8.5`, a path can be used to create a unix socket. Note that for security reasons, the socket will have 0220 mode. Make sure to set user and group permissions accordingly. -#### `admin_token`, `admin_token_file` or `GARAGE_ADMIN_TOKEN`, `GARAGE_ADMIN_TOKEN_FILE` (env) {#admin_token} - -The token for accessing all administration functions on the admin endpoint, -with the exception of the metrics endpoint (see `metrics_token`). - -You can use any random string for this value. We recommend generating a random -token with `openssl rand -base64 32`. - -For Garage version earlier than `v2.0`, if this token is not set, -access to these endpoints is disabled entirely. - -Since Garage `v2.0`, additional admin API tokens can be defined dynamically -in your Garage cluster using administration commands. This new admin token system -is more flexible since it allows admin tokens to have an expiration date, -and to have a scope restricted to certain admin API functions. If `admin_token` -is set, it behaves as an admin token without expiration and with full scope. -Otherwise, only admin API tokens defined dynamically can be used. - -`admin_token` was introduced in Garage `v0.7.2`. -`admin_token_file` and the `GARAGE_ADMIN_TOKEN` environment variable are supported since Garage `v0.8.2`. - -`GARAGE_ADMIN_TOKEN_FILE` is supported since `v0.8.5` / `v0.9.1`. - #### `metrics_token`, `metrics_token_file` or `GARAGE_METRICS_TOKEN`, `GARAGE_METRICS_TOKEN_FILE` (env) {#admin_metrics_token} -The token for accessing the Prometheus metrics endpoint (`/metrics`). -If this token is not set, and unless `metrics_require_token` is set to `true`, -the metrics endpoint can be accessed without access control. +The token for accessing the Metrics endpoint. If this token is not set, the +Metrics endpoint can be accessed without access control. You can use any random string for this value. We recommend generating a random token with `openssl rand -base64 32`. @@ -885,12 +836,17 @@ You can use any random string for this value. We recommend generating a random t `GARAGE_METRICS_TOKEN_FILE` is supported since `v0.8.5` / `v0.9.1`. -#### `metrics_require_token` (since `v2.0.0`) {#admin_metrics_require_token} +#### `admin_token`, `admin_token_file` or `GARAGE_ADMIN_TOKEN`, `GARAGE_ADMIN_TOKEN_FILE` (env) {#admin_token} -If this is set to `true`, accessing the metrics endpoint will always require -an access token. Valid tokens include the `metrics_token` if it is set, -and admin API token defined dynamically in Garage which have -the `Metrics` endpoint in their scope. +The token for accessing all of the other administration endpoints. If this +token is not set, access to these endpoints is disabled entirely. + +You can use any random string for this value. We recommend generating a random token with `openssl rand -base64 32`. + +`admin_token` was introduced in Garage `v0.7.2`. +`admin_token_file` and the `GARAGE_ADMIN_TOKEN` environment variable are supported since Garage `v0.8.2`. + +`GARAGE_ADMIN_TOKEN_FILE` is supported since `v0.8.5` / `v0.9.1`. #### `trace_sink` {#admin_trace_sink} diff --git a/doc/book/reference-manual/features.md b/doc/book/reference-manual/features.md index aa801704..481aef01 100644 --- a/doc/book/reference-manual/features.md +++ b/doc/book/reference-manual/features.md @@ -46,7 +46,7 @@ to select the replication mode best suited to your use case (hint: in most cases ### Compression and deduplication -All data stored in Garage is deduplicated, and optionally compressed using +All data stored in Garage is deduplicated, and optionnally compressed using Zstd. Objects uploaded to Garage are chunked in blocks of constant sizes (see [`block_size`](@/documentation/reference-manual/configuration.md#block_size)), and the hashes of individual blocks are used to dispatch them to storage nodes @@ -84,13 +84,13 @@ exposing the same content under different domain names. Garage also supports bucket aliases which are local to a single user: this allows different users to have different buckets with the same name, thus avoiding naming collisions. -This can be helpful for instance if you want to write an application that creates per-user buckets with always the same name. +This can be helpfull for instance if you want to write an application that creates per-user buckets with always the same name. This feature is totally invisible to S3 clients and does not break compatibility with AWS. ### Cluster administration API -Garage provides a fully-fledged REST API to administer your cluster programmatically. +Garage provides a fully-fledged REST API to administer your cluster programatically. Functionality included in the admin API include: setting up and monitoring cluster nodes, managing access credentials, and managing storage buckets and bucket aliases. A full reference of the administration API is available [here](@/documentation/reference-manual/admin-api.md). @@ -100,7 +100,7 @@ A full reference of the administration API is available [here](@/documentation/r Garage makes some internal metrics available in the Prometheus data format, which allows you to build interactive dashboards to visualize the load and internal state of your storage cluster. -For developers and performance-savvy administrators, +For developpers and performance-savvy administrators, Garage also supports exporting traces of what it does internally in OpenTelemetry format. This allows to monitor the time spent at various steps of the processing of requests, in order to detect potential performance bottlenecks. @@ -129,5 +129,5 @@ related to objects stored in an S3 bucket. In the context of our research project, [Aérogramme](https://aerogramme.deuxfleurs.fr), K2V is used to provide metadata and log storage for operations on encrypted e-mail storage. -Learn more on the specification of K2V [here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/commit/f8be15c37db857e177d543de7be863692628d567/doc/drafts/k2v-spec.md) +Learn more on the specification of K2V [here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/k2v/doc/drafts/k2v-spec.md) and on how to enable it in Garage [here](@/documentation/reference-manual/k2v.md). diff --git a/doc/book/reference-manual/k2v.md b/doc/book/reference-manual/k2v.md index a0eaf064..c01f641e 100644 --- a/doc/book/reference-manual/k2v.md +++ b/doc/book/reference-manual/k2v.md @@ -16,10 +16,10 @@ the `k2v` feature flag enabled can be obtained from our download page under with `-k2v` (example: `v0.7.2-k2v`). The specification of the K2V API can be found -[here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/commit/f8be15c37db857e177d543de7be863692628d567/doc/drafts/k2v-spec.md). +[here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/main/doc/drafts/k2v-spec.md). This document also includes a high-level overview of K2V's design. -The K2V API uses AWSv4 signatures for authentication, same as the S3 API. +The K2V API uses AWSv4 signatures for authentification, same as the S3 API. The AWS region used for signature calculation is always the same as the one defined for the S3 API in the config file. @@ -55,3 +55,4 @@ cargo build --features cli --bin k2v-cli The CLI utility is self-documented, run `k2v-cli --help` to learn how to use it. There is also a short README.md in the `src/k2v-client` folder with some instructions. + diff --git a/doc/book/reference-manual/known-issues.md b/doc/book/reference-manual/known-issues.md deleted file mode 100644 index 3a825db3..00000000 --- a/doc/book/reference-manual/known-issues.md +++ /dev/null @@ -1,188 +0,0 @@ -+++ -title = "Known issues" -weight = 80 -+++ - -Issues in each section are roughly sorted by order of decreasing impact, based on actual reports from users. - -## Architectural limitations - -Issues that are caused by design decisions of Garage internals, and that can't -be fixed without major architectural changes in the codebase. - -### Metadata performance issues with many objects - -**Related issues:** - -- [#851 - Performances collapse with 10 millions pictures in a bucket](https://git.deuxfleurs.fr/Deuxfleurs/garage/issues/851) -- [#1222 - Cluster Setup Write Performance Degraded After Writing 10 Million Object (200-300Kb per object)](https://git.deuxfleurs.fr/Deuxfleurs/garage/issues/1222) - -### Very big objects cause performance degradation - -For each object, there is a single metadata entry called a `Version` that -contains a list of all of the data blocks in the object. For very big objects, -this entry can contain thousands of block references. During the uploading of -an object, this metadata entry needs to be read, deserialized, reserialized and -written for each individual data block uploaded. This means that the -complexity of an upload is `O(n²)` in the number of blocks needed. - -This manifests by excessive metadata I/O and CPU usage, and uploads eventually stalling. - -**Mitigation:** Increase the `block_size` configuration parameter to reduce the -number of blocks. Make sure multipart uploads use chunks that are at least -`block_size` in size, and that are an exact multiple of `block_size` to avoid -the creation of smaller blocks. - -**Long-term solution:** An architectural change in the metadata system would be -required to store block lists in many independent metadata entries instead of -one single big entry per object. - -**Related issues:** - -- [#662 - Large Files fail to upload](https://git.deuxfleurs.fr/Deuxfleurs/garage/issues/662) -- [#1366 - High CPU usage and performance degradation during long multipart uploads](https://git.deuxfleurs.fr/Deuxfleurs/garage/issues/1366) - -### No conditional writes / locking / WORM support (`if-none-match`, ...) - -This is structurally impossible to implement in Garage due to the lack of a consensus algorithm, -which is one of Garage's core design choices which we cannot reconsider. - -A semi-working, *unsafe* implementation of WORM and object locking could be -implemented, with the following constraint: only after the completion of the -first write (in case of WORM) or the setting of a lock (for object lock) can we -guarantee that the object cannot be overwritten. In case where an overwrite -requests arrives at the same time as the initial request to write or to lock -the object, we cannot implement a safe and consistent way to reject it. This -means that many practical use-cases for `if-none-match` cannot be supported -(e.g. using it to implement mutual exclusion between concurrent writers). - -**Related issues:** - -- [#1052 - Support conditional writes](https://git.deuxfleurs.fr/Deuxfleurs/garage/issues/1052) -- [#1127 - Feature Request: WORM (Write Once Read Many) / Object Lock Support](https://git.deuxfleurs.fr/Deuxfleurs/garage/issues/1127) - -### `CreateBucket` race condition - -Also due to the lack of a consensus algorithm, there is no mutual exclusion -between concurrent `CreateBucket` requests using the same bucket name. - -**Related issues:** - -- [#649 - Race condition in CreateBucket](https://git.deuxfleurs.fr/Deuxfleurs/garage/issues/649) - -### Metadata and data have the same replication factor - -There is a single `replication_factor` in the configuration file that applies both to data blocks and metadata entries. -This makes clusters with `replication_factor = 1` particularly vulnerable in cases of metadata corruption (see below), as there -is a single copy of the metadata for each object even in multi-node clusters. - -**Mitigation:** Do not use `replication_factor = 1`. - -**Long-term solution:** We want to allow scenarios such as replicating the -metadata on 2, 3 or more nodes and the data on only 1 or 2 nodes (for example), -so that the metadata can benefit from better redundancy without increasing the -storage costs for the entire dataset. This will require some important changes -in the codebase. - -**Related issues:** - -- [#720 - Separate replication modes for metadata/data](https://git.deuxfleurs.fr/Deuxfleurs/garage/issues/720) - -### Node count limitation - -Garage will have issues in clusters with too many nodes, it will not be able to -spread data uniformly among nodes and some nodes will fill up faster than -other. This starts to manifest when the number of nodes is bigger than `10 × -replication_factor`. This is due to the fact that Garage uses only 256 -partitions internally. - -**Mitigation:** Build clusters with fewer, bigger nodes. - -**Potential solution:** This can be fixed by increasing the number of -partitions in Garage. The code paths exist, there is [a `const` -somewhere](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/commit/6fd9bba0cb55062cb1725ab961b7fa8acb9dcc61/src/rpc/layout/mod.rs#L35) -that theoretically allows to increase the number of partitions up to `2^16`, -but this has not been tested so there might be bugs. - -### Buckets are not sharded - -For each bucket, the first metadata layer that contains an index of all objects -is not sharded. This index, which includes the names and all metadata (size, -headers, ...) for each object, is stored on `$replication_factor` nodes. - -For instance with `replication_factor = 3`, a given bucket will use only 3 -specific nodes for this index (chosen at random when the bucket is created) to -store this index. In a multi-zone deployments, these nodes will be spread in -different zones. Each bucket uses a different set of 3 random nodes for its -index. - -As a consequence, very large buckets might cause uneven load distribution -within a cluster. If all of the requests on a cluster are for objects in a -single bucket, then the `$replication_factor` nodes that store the index will -become a hotspot in the cluster, with more intensive metadata access patterns. -There is no way of choosing which nodes will have this role. - -Currently, we have no report of this being an issue in practice. - -**Mitigation:** This impacts in particular clusters that are used for a single -purpose with a single bucket. This can be solved by dividing your dataset among -many buckets, using a client-side sharding strategy that you will have to -design. Use at least as many buckets as you have nodes on your cluster. - - -## Bugs - -Known bugs that are complex to diagnose and fix, and therefore have not been -fixed yet. - -### LMDB metadata corruption - -Many users have reported situations where the LMDB metadata db becomes -corrupted, sometimes after a forced shutdown of Garage or in case of power -loss. A corrupted database file is generally not recoverable. - -**Mitigation:** Use a `replication_factor` of at least 2. Configure automatic -snapshotting using `metadata_auto_snapshot_interval` so that in case of -corruption you can rollback to a working database. - -Note that taking filesystem-level snapshots of your `metadata_dir`, although it -is much faster and less I/O intensive than Garage's built-in snapshotting, does -not ensure that the snapshot will be consistent. If the snapshot is taking -during a metadata write, the snapshot itself might be corrupted and thus not -usable as a rollback point. Therefore, prefer using -`metadata_auto_snapshot_interval` in all cases. - -### Layout updates might require manual intervention - -In case of disconnected nodes, when changing the cluster layout to remove these -nodes and add other nodes instead, Garage might not be able to properly evict -the old nodes from the system. This is a built-in security measure to avoid any -inconsistent cluster states. - -This manifests by several cluster layout versions staying active even after a -full resync. You can diagnose this situation with `garage layout history`, -which will give you instructions to fix it. - -### Tag assignment - -In the `garage layout assign` command, the `-t` argument has to be repeated -multiple times to set multiple tags on a node. Writing multiple tags separated -by commas will result in a single string. - -## General footguns - -Choices made by the developers that users must be aware of if they don't want -to run into potential issues. - -### Resync tranquility is conservative by default - -By default, the worker parameters `resync-tranquility` and `resync-worker-count` are set to very conservative values, to avoid overloading nodes with I/O when data needs to be resynchronized between nodes. -This can cause issues where the resync queue grows faster than it can be cleared, which in turn causes performance issues in the rest of Garage. - -This situation is indicated by a big resync queue with few resync errors (the queue is not caused by a disconnected/malfunctionning node). -To fix it, increase the number of resync workers and reduce the resync tranquility. For instance, if you want to resync as fast as possible: - -``` -garage worker set -a resync-worker-count 8 -garage worker set -a resync-tranquility 0 -``` diff --git a/doc/book/reference-manual/s3-compatibility.md b/doc/book/reference-manual/s3-compatibility.md index c44a7b1a..b869b6f4 100644 --- a/doc/book/reference-manual/s3-compatibility.md +++ b/doc/book/reference-manual/s3-compatibility.md @@ -45,7 +45,7 @@ we suppose that OpenIO supports presigned URLs. All endpoints that are missing on Garage will return a 501 Not Implemented. Some `x-amz-` headers are not implemented. -### Core endpoints +### Core endoints | Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) | |------------------------------|----------------------------------|-----------------|---------------|---------|-----| @@ -135,12 +135,12 @@ If you need this feature, please [share your use case in our dedicated issue](ht **PutBucketLifecycleConfiguration:** The only actions supported are `AbortIncompleteMultipartUpload` and `Expiration` (without the `ExpiredObjectDeleteMarker` field). All other operations are dependent on -either bucket versioning or storage classes which Garage currently does not +either bucket versionning or storage classes which Garage currently does not implement. The deprecated `Prefix` member directly in the the `Rule` structure/XML tag is not supported, specified prefixes must be inside the `Filter` structure/XML tag. -**GetBucketVersioning:** Stub implementation which always returns "versioning not enabled", since Garage does not yet support bucket versioning. +**GetBucketVersioning:** Stub implementation which always returns "versionning not enabled", since Garage does not yet support bucket versionning. ### Replication endpoints @@ -155,7 +155,7 @@ Please open an issue if you have a use case for replication. *Note: Ceph documentation briefly says that Ceph supports [replication through the S3 API](https://docs.ceph.com/en/latest/radosgw/multisite-sync-policy/#s3-replication-api) but with some limitations. -Additionally, replication endpoints are not documented in the S3 compatibility page so I don't know what kind of support we can expect.* +Additionaly, replication endpoints are not documented in the S3 compatibility page so I don't know what kind of support we can expect.* ### Locking objects @@ -197,7 +197,7 @@ Please open an issue if you have a use case. ### Vendor specific endpoints -
Display Amazon specific endpoints +
Display Amazon specifc endpoints | Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) | @@ -234,3 +234,4 @@ Please open an issue if you have a use case. | [SelectObjectContent](https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html) | ❌ Missing | ❌| ❌| ❌| ❌|
+ diff --git a/doc/book/working-documents/compatibility-target.md b/doc/book/working-documents/compatibility-target.md index 2ed9dad4..630d15a5 100644 --- a/doc/book/working-documents/compatibility-target.md +++ b/doc/book/working-documents/compatibility-target.md @@ -3,7 +3,7 @@ title = "S3 compatibility target" weight = 5 +++ -If there is a specific S3 functionality you have a need for, feel free to open +If there is a specific S3 functionnality you have a need for, feel free to open a PR to put the corresponding endpoints higher in the list. Please explain your motivations for doing so in the PR message. diff --git a/doc/book/working-documents/design-draft.md b/doc/book/working-documents/design-draft.md index de31ba0f..8d3a31f0 100644 --- a/doc/book/working-documents/design-draft.md +++ b/doc/book/working-documents/design-draft.md @@ -68,7 +68,7 @@ Workflow for DELETE: 1. Check write permission (LDAP) 2. Get current version (or versions) in object table 3. Do the deletion of those versions NOT IN A BACKGROUND JOB THIS TIME -4. Return success to the user if we were able to delete blocks from the blocks table and entries from the object table +4. Return succes to the user if we were able to delete blocks from the blocks table and entries from the object table To delete a version: @@ -92,7 +92,7 @@ Known issue: if someone is reading from a version that we want to delete and the - file path = /meta/(first 3 hex digits of hash)/(rest of hash) - map block hash -> set of version UUIDs where it is referenced -Useful metadata: +Usefull metadata: - list of versions that reference this block in the Casandra table, so that we can do GC by checking in Cassandra that the lines still exist - list of other nodes that we know have acknowledged a write of this block, useful in the rebalancing algorithm diff --git a/doc/book/working-documents/load-balancing.md b/doc/book/working-documents/load-balancing.md index d6cbf4cc..1a65fdd2 100644 --- a/doc/book/working-documents/load-balancing.md +++ b/doc/book/working-documents/load-balancing.md @@ -49,12 +49,12 @@ The ring construction that selects `n_token` random positions for each nodes giv is not well-balanced: the space between the tokens varies a lot, and some partitions are thus bigger than others. This problem was demonstrated in the original Dynamo DB paper. -To solve this, we want to apply a better second method for partitioning our dataset: +To solve this, we want to apply a better second method for partitionning our dataset: 1. fix an initially large number of partitions (say 1024) with evenly-spaced delimiters, 2. attribute each partition randomly to a node, with a probability - proportional to its capacity (which `n_tokens` represented in the first + proportionnal to its capacity (which `n_tokens` represented in the first method) For now we continue using the multi-DC ring walking described above. @@ -66,7 +66,7 @@ I have studied two ways to do the attribution of partitions to nodes, in a way t MagLev provided significantly better balancing, as it guarantees that the exact same number of partitions is attributed to all nodes that have the same -capacity (and that this number is proportional to the node's capacity, except +capacity (and that this number is proportionnal to the node's capacity, except for large values), however in both cases: - the distribution is still bad, because we use the naive multi-DC ring walking diff --git a/doc/book/working-documents/migration-04.md b/doc/book/working-documents/migration-04.md index 5aae2a42..52c56737 100644 --- a/doc/book/working-documents/migration-04.md +++ b/doc/book/working-documents/migration-04.md @@ -1,6 +1,6 @@ +++ title = "Migrating from 0.3 to 0.4" -weight = 80 +weight = 20 +++ **Migrating from 0.3 to 0.4 is unsupported. This document is only intended to diff --git a/doc/book/working-documents/migration-06.md b/doc/book/working-documents/migration-06.md index 5fa29120..006b036b 100644 --- a/doc/book/working-documents/migration-06.md +++ b/doc/book/working-documents/migration-06.md @@ -1,6 +1,6 @@ +++ title = "Migrating from 0.5 to 0.6" -weight = 75 +weight = 15 +++ **This guide explains how to migrate to 0.6 if you have an existing 0.5 cluster. diff --git a/doc/book/working-documents/migration-07.md b/doc/book/working-documents/migration-07.md index 8631fa99..03cdfedc 100644 --- a/doc/book/working-documents/migration-07.md +++ b/doc/book/working-documents/migration-07.md @@ -1,6 +1,6 @@ +++ title = "Migrating from 0.6 to 0.7" -weight = 74 +weight = 14 +++ **This guide explains how to migrate to 0.7 if you have an existing 0.6 cluster. We don't recommend trying to migrate to 0.7 directly from 0.5 or older.** @@ -19,7 +19,7 @@ The migration steps are as follows: 2. Disable API and web access. Garage does not support disabling these endpoints but you can change the port number or stop your reverse proxy for instance. -3. Check once again that your cluster is healthy. Run again `garage repair --all-nodes --yes tables` which is quick. +3. Check once again that your cluster is healty. Run again `garage repair --all-nodes --yes tables` which is quick. Also check your queues are empty, run `garage stats` to query them. 4. Turn off Garage v0.6 5. Backup the metadata folder of all your nodes: `cd /var/lib/garage ; tar -acf meta-v0.6.tar.zst meta/` diff --git a/doc/book/working-documents/migration-08.md b/doc/book/working-documents/migration-08.md index 17fe078b..b7c4c783 100644 --- a/doc/book/working-documents/migration-08.md +++ b/doc/book/working-documents/migration-08.md @@ -1,6 +1,6 @@ +++ title = "Migrating from 0.7 to 0.8" -weight = 73 +weight = 13 +++ **This guide explains how to migrate to 0.8 if you have an existing 0.7 cluster. diff --git a/doc/book/working-documents/migration-09.md b/doc/book/working-documents/migration-09.md index cf5f309c..ba758093 100644 --- a/doc/book/working-documents/migration-09.md +++ b/doc/book/working-documents/migration-09.md @@ -1,6 +1,6 @@ +++ title = "Migrating from 0.8 to 0.9" -weight = 72 +weight = 12 +++ **This guide explains how to migrate to 0.9 if you have an existing 0.8 cluster. diff --git a/doc/book/working-documents/migration-1.md b/doc/book/working-documents/migration-1.md index 9a04d101..b6c0bb85 100644 --- a/doc/book/working-documents/migration-1.md +++ b/doc/book/working-documents/migration-1.md @@ -1,6 +1,6 @@ +++ title = "Migrating from 0.9 to 1.0" -weight = 71 +weight = 11 +++ **This guide explains how to migrate to 1.0 if you have an existing 0.9 cluster. diff --git a/doc/book/working-documents/migration-2.md b/doc/book/working-documents/migration-2.md deleted file mode 100644 index 01d984b3..00000000 --- a/doc/book/working-documents/migration-2.md +++ /dev/null @@ -1,70 +0,0 @@ -+++ -title = "Migrating from 1.0 to 2.0" -weight = 70 -+++ - -**This guide explains how to migrate to v2.x if you have an existing v1.x.x cluster. -We don't recommend trying to migrate to v2.x directly from v0.9.x or older.** - -This migration procedure has been tested on several clusters without issues. -However, it is still a *critical procedure* that might cause issues. -**Make sure to back up all your data before attempting it!** - -You might also want to read our [general documentation on upgrading Garage](@/documentation/operations/upgrading.md). - -## Changes introduced in v2.0 - -The following are **breaking changes** in Garage v2.0 that require your attention when migrating: - -- The administration API has been completely reworked. - Some calls to the `/v1/` endpoints will still work but most will not. - New endpoints are prefixed by `/v2/`. **You will need to update all your code that makes use of the admin API.** - -- `replication_mode` is no longer a supported configuration parameter, - please use `replication_factor` and `consistency_mode` instead. - -## Migration procedure - -The migration to Garage v2.0 can be done with almost no downtime, -by restarting all nodes at once in the new version. - -The migration steps are as follows: - -1. Do a `garage repair --all-nodes --yes tables`, check the logs and check that - all data seems to be synced correctly between nodes. If you have time, do - additional `garage repair` procedures (`blocks`, `versions`, `block_refs`, - etc.) - -2. Ensure you have a snapshot of your Garage installation that you can restore - to in case the upgrade goes wrong, with one of the following options: - - - You may use the `garage meta snapshot --all` command - to make a backup snapshot of the metadata directories of your nodes - for backup purposes. Once this command has completed, copy the following - files and directories from the `metadata_dir` of all your nodes - to somewhere safe: `snapshots`, `cluster_layout`, `data_layout`, - `node_key`, `node_key.pub`. (If you have set the `metadata_snapshots_dir` - to a different value in your config file, back up that directory instead.) - - - If you are running a filesystem such as ZFS or BTRFS that support - snapshotting, you can create a filesystem-level snapshot of the `metadata_dir` - of all your nodes to be used as a restoration point if needed. - - - You may also make a back-up manually: turn off each node - individually; back up its metadata folder (for instance, use the following - command if your metadata directory is `/var/lib/garage/meta`: `cd - /var/lib/garage ; tar -acf meta-v1.0.tar.zst meta/`); turn it back on - again. This will allow you to take a backup of all nodes without - impacting global cluster availability. You can do all nodes of a single - zone at once as this does not impact the availability of Garage. - -3. Prepare your updated binaries and configuration files for Garage v2.0. - **Remember to update your configuration file to remove `replication_mode` and replace it by `replication_factor`.** - -4. Shut down all v1.0 nodes simultaneously, and restart them all simultaneously - in v2.0. Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to - achieve this as fast as possible. Garage v2.0 should be in a working state - as soon as enough nodes have started. - -5. Monitor your cluster in the following hours to see if it works well under - your production load. diff --git a/doc/book/working-documents/testing-strategy.md b/doc/book/working-documents/testing-strategy.md index 46550b81..fff706d7 100644 --- a/doc/book/working-documents/testing-strategy.md +++ b/doc/book/working-documents/testing-strategy.md @@ -1,6 +1,6 @@ +++ title = "Testing strategy" -weight = 100 +weight = 30 +++ @@ -28,11 +28,11 @@ We should try to test in least invasive ways, i.e. minimize the impact of the te - Not making `garage` a shared library (launch using `execve`, it's perfectly fine) Instead, we should focus on building a clean outer interface for the `garage` binary, -for example loading configuration using environment variables instead of the configuration file if that's helpful for writing the tests. +for example loading configuration using environnement variables instead of the configuration file if that's helpfull for writing the tests. There are two reasons for this: -- Keep the source code clean and focused +- Keep the soure code clean and focused - Test something that is as close as possible as the true garage that will actually be running Reminder: rules of simplicity, concerning changes to Garage's source code. @@ -71,3 +71,5 @@ Interesting blog posts on the blog of the Sled database: Misc: - [mutagen](https://github.com/llogiq/mutagen) - mutation testing is a way to assert our test quality by mutating the code and see if the mutation makes the tests fail - [fuzzing](https://rust-fuzz.github.io/book/) - cargo supports fuzzing, it could be a way to test our software reliability in presence of garbage data. + + diff --git a/doc/drafts/admin-api.md b/doc/drafts/admin-api.md index 778b4fa8..3ee948cb 100644 --- a/doc/drafts/admin-api.md +++ b/doc/drafts/admin-api.md @@ -13,12 +13,8 @@ We will bump the version numbers prefixed to each API endpoint each time the syn or semantics change, meaning that code that relies on these endpoints will break when changes are introduced. -The Garage administration API was introduced in version 0.7.2, and was -changed several times. - -**THIS DOCUMENT IS DEPRECATED.** We now have an OpenAPI spec which is automatically generated -from Garage's source code and is always up-to-date. See `doc/api/garage-admin-v2.html`. -Text in this document is no longer kept in sync with the admin API's actual behavior. +The Garage administration API was introduced in version 0.7.2, this document +does not apply to older versions of Garage. ## Access control @@ -56,28 +52,34 @@ Returns an HTTP status 200 if the node is ready to answer user's requests, and an HTTP status 503 (Service Unavailable) if there are some partitions for which a quorum of nodes is not available. A simple textual message is also returned in a body with content-type `text/plain`. -See `/v2/GetClusterHealth` for an API that also returns JSON output. - -### Other special endpoints - -#### CheckDomain `GET /check?domain=` - -Checks whether this Garage cluster serves a website for domain ``. -Returns HTTP 200 Ok if yes, or HTTP 4xx if no website is available for this domain. +See `/v1/health` for an API that also returns JSON output. ### Cluster operations -#### GetClusterStatus `GET /v2/GetClusterStatus` +#### GetClusterStatus `GET /v1/status` Returns the cluster's current status in JSON, including: +- ID of the node being queried and its version of the Garage daemon - Live nodes - Currently configured cluster layout +- Staged changes to the cluster layout Example response body: ```json { + "node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df", + "garageVersion": "v1.3.0", + "garageFeatures": [ + "k2v", + "lmdb", + "sqlite", + "metrics", + "bundled-libs" + ], + "rustVersion": "1.68.0", + "dbEngine": "LMDB (using Heed crate)", "layoutVersion": 5, "nodes": [ { @@ -167,7 +169,7 @@ Example response body: } ``` -#### GetClusterHealth `GET /v2/GetClusterHealth` +#### GetClusterHealth `GET /v1/health` Returns the cluster's current health in JSON format, with the following variables: @@ -176,7 +178,7 @@ Returns the cluster's current health in JSON format, with the following variable - degraded: Garage node is not connected to all storage nodes, but a quorum of write nodes is available for all partitions - unavailable: a quorum of write nodes is not available for some partitions - `knownNodes`: the number of nodes this Garage node has had a TCP connection to since the daemon started -- `connectedNodes`: the number of nodes this Garage node currently has an open connection to +- `connectedNodes`: the nubmer of nodes this Garage node currently has an open connection to - `storageNodes`: the number of storage nodes currently registered in the cluster layout - `storageNodesOk`: the number of storage nodes to which a connection is currently open - `partitions`: the total number of partitions of the data (currently always 256) @@ -200,7 +202,7 @@ Example response body: } ``` -#### ConnectClusterNodes `POST /v2/ConnectClusterNodes` +#### ConnectClusterNodes `POST /v1/connect` Instructs this Garage node to connect to other Garage nodes at specified addresses. @@ -230,7 +232,7 @@ Example response: ] ``` -#### GetClusterLayout `GET /v2/GetClusterLayout` +#### GetClusterLayout `GET /v1/layout` Returns the cluster's current layout in JSON, including: @@ -291,7 +293,7 @@ Example response body: } ``` -#### UpdateClusterLayout `POST /v2/UpdateClusterLayout` +#### UpdateClusterLayout `POST /v1/layout` Send modifications to the cluster layout. These modifications will be included in the staged role changes, visible in subsequent calls @@ -328,7 +330,7 @@ This returns the new cluster layout with the proposed staged changes, as returned by GetClusterLayout. -#### ApplyClusterLayout `POST /v2/ApplyClusterLayout` +#### ApplyClusterLayout `POST /v1/layout/apply` Applies to the cluster the layout changes currently registered as staged layout changes. @@ -348,11 +350,23 @@ existing layout in the cluster. This returns the message describing all the calculations done to compute the new layout, as well as the description of the layout as returned by GetClusterLayout. -#### RevertClusterLayout `POST /v2/RevertClusterLayout` +#### RevertClusterLayout `POST /v1/layout/revert` Clears all of the staged layout changes. -This requests contains an empty body. +Request body format: + +```json +{ + "version": 13 +} +``` + +Reverting the staged changes is done by incrementing the version number +and clearing the contents of the staged change list. +Similarly to the CLI, the body must include the incremented +version number, which MUST be 1 + the value of the currently +existing layout in the cluster. This returns the new cluster layout with all changes reverted, as returned by GetClusterLayout. @@ -360,7 +374,7 @@ as returned by GetClusterLayout. ### Access key operations -#### ListKeys `GET /v2/ListKeys` +#### ListKeys `GET /v1/key` Returns all API access keys in the cluster. @@ -379,8 +393,8 @@ Example response: ] ``` -#### GetKeyInfo `GET /v2/GetKeyInfo?id=` -#### GetKeyInfo `GET /v2/GetKeyInfo?search=` +#### GetKeyInfo `GET /v1/key?id=` +#### GetKeyInfo `GET /v1/key?search=` Returns information about the requested API access key. @@ -388,7 +402,7 @@ If `id` is set, the key is looked up using its exact identifier (faster). If `search` is set, the key is looked up using its name or prefix of identifier (slower, all keys are enumerated to do this). -Optionally, the query parameter `showSecretKey=true` can be set to reveal the +Optionnally, the query parameter `showSecretKey=true` can be set to reveal the associated secret access key. Example response: @@ -454,7 +468,7 @@ Example response: } ``` -#### CreateKey `POST /v2/CreateKey` +#### CreateKey `POST /v1/key` Creates a new API access key. @@ -469,7 +483,7 @@ Request body format: This returns the key info, including the created secret key, in the same format as the result of GetKeyInfo. -#### ImportKey `POST /v2/ImportKey` +#### ImportKey `POST /v1/key/import` Imports an existing API key. This will check that the imported key is in the valid format, i.e. @@ -487,7 +501,7 @@ Request body format: This returns the key info in the same format as the result of GetKeyInfo. -#### UpdateKey `POST /v2/UpdateKey?id=` +#### UpdateKey `POST /v1/key?id=` Updates information about the specified API access key. @@ -509,14 +523,14 @@ The possible flags in `allow` and `deny` are: `createBucket`. This returns the key info in the same format as the result of GetKeyInfo. -#### DeleteKey `POST /v2/DeleteKey?id=` +#### DeleteKey `DELETE /v1/key?id=` Deletes an API access key. ### Bucket operations -#### ListBuckets `GET /v2/ListBuckets` +#### ListBuckets `GET /v1/bucket` Returns all storage buckets in the cluster. @@ -558,8 +572,8 @@ Example response: ] ``` -#### GetBucketInfo `GET /v2/GetBucketInfo?id=` -#### GetBucketInfo `GET /v2/GetBucketInfo?globalAlias=` +#### GetBucketInfo `GET /v1/bucket?id=` +#### GetBucketInfo `GET /v1/bucket?globalAlias=` Returns information about the requested storage bucket. @@ -602,7 +616,7 @@ Example response: } ``` -#### CreateBucket `POST /v2/CreateBucket` +#### CreateBucket `POST /v1/bucket` Creates a new storage bucket. @@ -642,7 +656,7 @@ or no alias at all. Technically, you can also specify both `globalAlias` and `localAlias` and that would create two aliases, but I don't see why you would want to do that. -#### UpdateBucket `POST /v2/UpdateBucket?id=` +#### UpdateBucket `PUT /v1/bucket?id=` Updates configuration of the given bucket. @@ -674,38 +688,16 @@ In `quotas`: new values of `maxSize` and `maxObjects` must both be specified, or to remove the quotas. An absent value will be considered the same as a `null`. It is not possible to change only one of the two quotas. -#### DeleteBucket `POST /v2/DeleteBucket?id=` +#### DeleteBucket `DELETE /v1/bucket?id=` Deletes a storage bucket. A bucket cannot be deleted if it is not empty. Warning: this will delete all aliases associated with the bucket! -#### CleanupIncompleteUploads `POST /v2/CleanupIncompleteUploads` - -Cleanup all incomplete uploads in a bucket that are older than a specified number -of seconds. - -Request body format: - -```json -{ - "bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b", - "olderThanSecs": 3600 -} -``` - -Response format - -```json -{ - "uploadsDeleted": 12 -} -``` - ### Operations on permissions for keys on buckets -#### AllowBucketKey `POST /v2/AllowBucketKey` +#### BucketAllowKey `POST /v1/bucket/allow` Allows a key to do read/write/owner operations on a bucket. @@ -726,7 +718,7 @@ Request body format: Flags in `permissions` which have the value `true` will be activated. Other flags will remain unchanged. -#### DenyBucketKey `POST /v2/DenyBucketKey` +#### BucketDenyKey `POST /v1/bucket/deny` Denies a key from doing read/write/owner operations on a bucket. @@ -750,35 +742,19 @@ Other flags will remain unchanged. ### Operations on bucket aliases -#### AddBucketAlias `POST /v2/AddBucketAlias` +#### GlobalAliasBucket `PUT /v1/bucket/alias/global?id=&alias=` -Creates an alias for a bucket in the namespace of a specific access key. -To create a global alias, specify the `globalAlias` field. -To create a local alias, specify the `localAlias` and `accessKeyId` fields. +Empty body. Creates a global alias for a bucket. -Request body format: +#### GlobalUnaliasBucket `DELETE /v1/bucket/alias/global?id=&alias=` -```json -{ - "bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b", - "globalAlias": "my-bucket" -} -``` +Removes a global alias for a bucket. -or: +#### LocalAliasBucket `PUT /v1/bucket/alias/local?id=&accessKeyId=&alias=` -```json -{ - "bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b", - "accessKeyId": "GK31c2f218a2e44f485b94239e", - "localAlias": "my-bucket" -} -``` +Empty body. Creates a local alias for a bucket in the namespace of a specific access key. -#### RemoveBucketAlias `POST /v2/RemoveBucketAlias` +#### LocalUnaliasBucket `DELETE /v1/bucket/alias/local?id=&accessKeyId&alias=` -Removes an alias for a bucket in the namespace of a specific access key. -To remove a global alias, specify the `globalAlias` field. -To remove a local alias, specify the `localAlias` and `accessKeyId` fields. +Removes a local alias for a bucket in the namespace of a specific access key. -Request body format: same as AddBucketAlias. diff --git a/doc/drafts/k2v-spec.md b/doc/drafts/k2v-spec.md index b16628e2..f9696717 100644 --- a/doc/drafts/k2v-spec.md +++ b/doc/drafts/k2v-spec.md @@ -35,7 +35,7 @@ Triples in K2V are constituted of three fields: partition key in which the client wants to read/delete lists of items - a sort key (`sk`), an utf8 string that defines the index of the triplet inside its - partition; triplets are uniquely identified by their partition key + sort key + partition; triplets are uniquely idendified by their partition key + sort key - a value (`v`), an opaque binary blob associated to the partition key + sort key; they are transmitted as binary when possible but in most case in the JSON API @@ -74,7 +74,7 @@ are obsoleted by the new write. **Basic insertion.** To insert a new value `v4` with context `[(node1, t2), (node2, t3)]`, in a simple case where there was no insertion in-between reading the value -mentioned above and writing `v4`, and supposing that node2 receives the +mentionned above and writing `v4`, and supposing that node2 receives the InsertItem query: - `node2` generates a timestamp `t4` such that `t4 > t3`. @@ -332,7 +332,7 @@ Inserts a single item. This request does not use JSON, the body is sent directly To supersede previous values, the HTTP header `X-Garage-Causality-Token` should be set to the causality token returned by a previous read on this key. This -header can be omitted for the first writes to the key. +header can be ommitted for the first writes to the key. Example query: @@ -397,7 +397,7 @@ smallest partition key that exists. It returns partition keys in increasing order, or decreasing order if `reverse` is set to `true`, and stops when either of the following conditions is met: -1. if `end` is specified, the partition key `end` is reached or surpassed (if it +1. if `end` is specfied, the partition key `end` is reached or surpassed (if it is reached exactly, it is not included in the result) 2. if `limit` is specified, `limit` partition keys have been listed @@ -491,7 +491,7 @@ the triplet is inserted for the first time, the causality token should be set to The value is expected to be a base64-encoded binary blob. The value `null` can also be used to delete the triplet while preserving causality information: this -allows to know if a delete has happened concurrently with an insert, in which +allows to know if a delete has happenned concurrently with an insert, in which case both are preserved and returned on reads (see below). Partition keys and sort keys are utf8 strings which are stored sorted by @@ -540,7 +540,7 @@ JSON struct with the following fields: For each of the searches, triplets are listed and returned separately. The semantics of `prefix`, `start`, `end`, `limit` and `reverse` are the same as for ReadIndex. The -additional parameter `singleItem` allows to get a single item, whose sort key +additionnal parameter `singleItem` allows to get a single item, whose sort key is the one given in `start`. Parameters `conflictsOnly` and `tombstones` control additional filters on the items that are returned. diff --git a/doc/optimal_layout_report/geodistrib.tex b/doc/optimal_layout_report/geodistrib.tex index 56d4c925..bb6f0391 100644 --- a/doc/optimal_layout_report/geodistrib.tex +++ b/doc/optimal_layout_report/geodistrib.tex @@ -59,7 +59,7 @@ To link the effective storage capacity of the cluster to partition assignment, w \end{equation} This assumption is justified by the dispersion of the hashing function, when the number of partitions is small relative to the number of stored blocks. -Every node $n$ will store some number $p_n$ of partitions (it is the number of partitions $p$ such that $n$ appears in the $\alpha_p$). Hence the partitions stored by $n$ (and hence all partitions by our assumption) have there size bounded by $c_n/p_n$. This remark leads us to define the optimal size that we will want to maximize: +Every node $n$ wille store some number $p_n$ of partitions (it is the number of partitions $p$ such that $n$ appears in the $\alpha_p$). Hence the partitions stored by $n$ (and hence all partitions by our assumption) have there size bounded by $c_n/p_n$. This remark leads us to define the optimal size that we will want to maximize: \begin{equation} \label{eq:optimal} diff --git a/doc/optimal_layout_report/optimal_layout.tex b/doc/optimal_layout_report/optimal_layout.tex index 42c9d3fd..005e7b50 100644 --- a/doc/optimal_layout_report/optimal_layout.tex +++ b/doc/optimal_layout_report/optimal_layout.tex @@ -38,7 +38,7 @@ We would like to compute an assignment of nodes to partitions. We will impose so \end{equation} This assumption is justified by the dispersion of the hashing function, when the number of partitions is small relative to the number of stored large objects. -Every node $n$ will store some number $k_n$ of partitions. Hence the partitions stored by $n$ (and hence all partitions by our assumption) have there size bounded by $c_n/k_n$. This remark leads us to define the optimal size that we will want to maximize: +Every node $n$ wille store some number $k_n$ of partitions. Hence the partitions stored by $n$ (and hence all partitions by our assumption) have there size bounded by $c_n/k_n$. This remark leads us to define the optimal size that we will want to maximize: \begin{equation} \label{eq:optimal} @@ -62,7 +62,7 @@ For now, in the following, we ask the following redundancy constraint: \textbf{Mode 3:} every partition needs to be assignated to three nodes. We try to spread the three nodes over different zones as much as possible. -\textbf{Warning:} This is a working document written incrementally. The last version of the algorithm is the \textbf{parametric assignment} described in the next section. +\textbf{Warning:} This is a working document written incrementaly. The last version of the algorithm is the \textbf{parametric assignment} described in the next section. \section{Computation of a parametric assignment} @@ -318,7 +318,7 @@ $$ $$ which is the universal upper bound on $s^*$. Hence any optimal utilization $(n_v)$ can be modified to another optimal utilization such that $n_v\ge \hat{n}_v$ -Because $z_0$ cannot store more than $N$ partition occurrences, in any assignment, at least $2N$ partitions must be assignated to the zones $Z\setminus\{z_0\}$. Let $C_0 = C-c_{z_0}$. Suppose that there exists a zone $z_1\neq z_0$ such that $c_{z_1}/C_0 \ge 1/2$. Then, with the same argument as for $z_0$, we can define +Because $z_0$ cannot store more than $N$ partition occurences, in any assignment, at least $2N$ partitions must be assignated to the zones $Z\setminus\{z_0\}$. Let $C_0 = C-c_{z_0}$. Suppose that there exists a zone $z_1\neq z_0$ such that $c_{z_1}/C_0 \ge 1/2$. Then, with the same argument as for $z_0$, we can define $$\hat{n}_v = \left\lfloor\frac{c_v}{c_{z_1}}N\right\rfloor$$ for every $v\in z_1$. @@ -351,7 +351,7 @@ Define $3N$ tokens $t_1,\ldots, t_{3N}\in V$ as follows: Then for $1\le i \le N$, define the triplet $T_i$ to be $(t_i, t_{i+N}, t_{i+2N})$. Since the same nodes of a zone appear contiguously, the three nodes of a triplet must belong to three distinct zones. -However simple, this solution to go from an utilization to an assignment has the drawback of not spreading the triplets: a node will tend to be associated to the same two other nodes for many partitions. Hence, during data transfer, it will tend to use only two link, instead of spreading the bandwidth use over many other links to other nodes. To achieve this goal, we will reframe the search of an assignment as a flow problem. and in the flow algorithm, we will introduce randomness in the order of exploration. This will be sufficient to obtain a good dispersion of the triplets. +However simple, this solution to go from an utilization to an assignment has the drawback of not spreading the triplets: a node will tend to be associated to the same two other nodes for many partitions. Hence, during data transfer, it will tend to use only two link, instead of spreading the bandwith use over many other links to other nodes. To achieve this goal, we will reframe the search of an assignment as a flow problem. and in the flow algorithm, we will introduce randomness in the order of exploration. This will be sufficient to obtain a good dispersion of the triplets. \begin{figure} \centering @@ -436,7 +436,7 @@ T_3=(b,c,d'). $$ One can check that in this case, it is impossible to minimize both the number of zone and node changes. -Because of the redundancy constraint, we cannot use a greedy algorithm to just replace nodes in the triplets to try to get the new utilization rate: this could lead to blocking situation where there is still a hole to fill in a triplet but no available node satisfies the zone separation constraint. To circumvent this issue, we propose an algorithm based on finding cycles in a graph encoding of the assignment. As in section \ref{sec:opt_assign}, we can explore the neighbours in a random order in the graph algorithms, to spread the triplets distribution. +Because of the redundancy constraint, we cannot use a greedy algorithm to just replace nodes in the triplets to try to get the new utilization rate: this could lead to blocking situation where there is still a hole to fill in a triplet but no available node satisfies the zone separation constraint. To circumvent this issue, we propose an algorithm based on finding cycles in a graph encoding of the assignment. As in section \ref{sec:opt_assign}, we can explore the neigbours in a random order in the graph algorithms, to spread the triplets distribution. \subsubsection{Minimizing the zone discrepancy} @@ -550,8 +550,8 @@ We give some considerations of worst case complexity for these algorithms. In th Algorithm \ref{alg:util} can be implemented with complexity $O(\#V^2)$. The complexity of the function call at line \ref{lin:subutil} is $O(\#V)$. The difference between the sum of the subutilizations and $3N$ is at most the sum of the rounding errors when computing the $\hat{n}_v$. Hence it is bounded by $\#V$ and the loop at line \ref{lin:loopsub} is iterated at most $\#V$ times. Finding the minimizing $v$ at line \ref{lin:findmin} takes $O(\#V)$ operations (naively, we could also use a heap). Algorithm \ref{alg:opt} can be implemented with complexity $O(N^3\times \#Z)$. The flow graph has $O(N+\#Z)$ vertices and $O(N\times \#Z)$ edges. Dinic's algorithm has complexity $O(\#\mathrm{Vertices}^2\#\mathrm{Edges})$ hence in our case it is $O(N^3\times \#Z)$. - -Algorithm \ref{alg:mini} can be implemented with complexity $O(N^3\# Z)$ under \eqref{hyp:A} and $O(N^3 \#Z \#V)$ under \eqref{hyp:B}. + +Algorithm \ref{alg:mini} can be implented with complexity $O(N^3\# Z)$ under \eqref{hyp:A} and $O(N^3 \#Z \#V)$ under \eqref{hyp:B}. The graph $G_T$ has $O(N)$ vertices and $O(N\times \#Z)$ edges under assumption \eqref{hyp:A} and respectively $O(N\times \#Z)$ vertices and $O(N\times \#V)$ edges under assumption \eqref{hyp:B}. The loop at line \ref{lin:repeat} is iterated at most $N$ times since the distance between $T$ and $T'$ decreases at every iteration. Bellman-Ford algorithm has complexity $O(\#\mathrm{Vertices}\#\mathrm{Edges})$, which in our case amounts to $O(N^2\# Z)$ under \eqref{hyp:A} and $O(N^2 \#Z \#V)$ under \eqref{hyp:B}. \begin{algorithm} @@ -637,7 +637,7 @@ We try to maximize $s^*$ defined in \eqref{eq:optimal}. So we can compute the op \subsection{Computation of a candidate assignment} -To compute a candidate assignment (that does not optimize zone spreading nor distance to a previous assignment yet), we can use the following flow problem. +To compute a candidate assignment (that does not optimize zone spreading nor distance to a previous assignment yet), we can use the folowing flow problem. Define the oriented weighted graph $(X,E)$. The set of vertices $X$ contains the source $\mathbf{s}$, the sink $\mathbf{t}$, vertices $\mathbf{x}_p, \mathbf{u}^+_p, \mathbf{u}^-_p$ for every partition $p$, vertices $\mathbf{y}_{p,z}$ for every partition $p$ and zone $z$, and vertices $\mathbf{z}_v$ for every node $v$. @@ -680,14 +680,14 @@ Given the flow $f$, let $G_f=(X',E_f)$ be the multi-graph where $X' = X\setminus \end{itemize} To summarize, arcs are oriented left to right if they correspond to a presence of flow in $f$, and right to left if they correspond to an absence of flow. They are positively weighted if we want them to stay at their current state, and negatively if we want them to switch. Let us compute the weight of such graph. -\begin{multiline*} +\begin{multline*} w(G_f) = \sum_{e\in E_f} w(e_f) \\ = (\alpha - \beta -\gamma) N_1 + (\alpha +\beta - \gamma) N_2 + (\alpha+\beta+\gamma) N_3 \\ + \#V\times N - 4 \sum_p 3-\#(T_p\cap T'_p) \\ =(\#V-12+\alpha-\beta-\gamma)\times N + 4Q_V + 2\beta N_2 + 2(\beta+\gamma) N_3 \\ -\end{multiline*} +\end{multline*} As for the mode 3-strict, one can check that the difference of two such graphs corresponding to the same $(n_v)$ is always eulerian. Hence we can navigate in this class with the same greedy algorithm that discovers positive cycles and flips them. diff --git a/doc/talks/2025-10-06-josy/.gitignore b/doc/talks/2025-10-06-josy/.gitignore deleted file mode 100644 index 9f1f00e6..00000000 --- a/doc/talks/2025-10-06-josy/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -* - -!*.txt -!*.md - -!assets - -!.gitignore -!*.svg -!*.png -!*.jpg -!*.tex -!Makefile -!.gitignore -!assets/*.drawio.pdf - -!talk.pdf diff --git a/doc/talks/2025-10-06-josy/Makefile b/doc/talks/2025-10-06-josy/Makefile deleted file mode 100644 index f0aae6a8..00000000 --- a/doc/talks/2025-10-06-josy/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -ASSETS=../assets/lattice/lattice1.pdf_tex \ - ../assets/lattice/lattice2.pdf_tex \ - ../assets/lattice/lattice3.pdf_tex \ - ../assets/lattice/lattice4.pdf_tex \ - ../assets/lattice/lattice5.pdf_tex \ - ../assets/lattice/lattice6.pdf_tex \ - ../assets/lattice/lattice7.pdf_tex \ - ../assets/lattice/lattice8.pdf_tex \ - ../assets/logos/deuxfleurs.pdf \ - ../assets/timeline-22-24.pdf - -talk.pdf: talk.tex $(ASSETS) - pdflatex talk.tex - -%.pdf: %.svg - inkscape -D -z --file=$^ --export-pdf=$@ - -%.pdf_tex: %.svg - inkscape -D -z --file=$^ --export-pdf=$@ --export-latex diff --git a/doc/talks/2025-10-06-josy/talk.pdf b/doc/talks/2025-10-06-josy/talk.pdf deleted file mode 100644 index 2194908a..00000000 Binary files a/doc/talks/2025-10-06-josy/talk.pdf and /dev/null differ diff --git a/doc/talks/2025-10-06-josy/talk.tex b/doc/talks/2025-10-06-josy/talk.tex deleted file mode 100644 index aa483766..00000000 --- a/doc/talks/2025-10-06-josy/talk.tex +++ /dev/null @@ -1,702 +0,0 @@ -\nonstopmode -\documentclass[aspectratio=169,xcolor={svgnames}]{beamer} -\usepackage[utf8]{inputenc} -% \usepackage[frenchb]{babel} -\usepackage{amsmath} -\usepackage{mathtools} -\usepackage{breqn} -\usepackage{multirow} -\usetheme{boxes} -\usepackage{graphicx} -\usepackage{import} -\usepackage{adjustbox} -\usepackage[absolute,overlay]{textpos} -%\useoutertheme[footline=authortitle,subsection=false]{miniframes} -%\useoutertheme[footline=authorinstitute,subsection=false]{miniframes} -\useoutertheme{infolines} -\setbeamertemplate{headline}{} - -\beamertemplatenavigationsymbolsempty - -\definecolor{TitleOrange}{RGB}{255,137,0} -\setbeamercolor{title}{fg=TitleOrange} -\setbeamercolor{frametitle}{fg=TitleOrange} - -\definecolor{ListOrange}{RGB}{255,145,5} -\setbeamertemplate{itemize item}{\color{ListOrange}$\blacktriangleright$} - -\definecolor{verygrey}{RGB}{70,70,70} -\setbeamercolor{normal text}{fg=verygrey} - - -\usepackage{tabu} -\usepackage{multicol} -\usepackage{vwcol} -\usepackage{stmaryrd} -\usepackage{graphicx} - -\usepackage[normalem]{ulem} - -\AtBeginSection[]{ - \begin{frame} - \vfill - \centering - \begin{beamercolorbox}[sep=8pt,center,shadow=true,rounded=true]{title} - \usebeamerfont{title}\insertsectionhead\par% - \end{beamercolorbox} - \vfill - \end{frame} -} - -\title{Garage, an S3 backend as reliable as possible} -\author{Garage Authors} -\date{JoSy S3, 2025-10-08} - -\begin{document} - -\begin{frame} - \centering - \includegraphics[width=.3\linewidth]{../../sticker/Garage.png} - \vspace{1em} - - {\large\bf Garage, an S3 backend as reliable as possible} - \vspace{1em} - - \url{https://garagehq.deuxfleurs.fr/}\\ - \url{mailto:garagehq@deuxfleurs.fr}\\ - \texttt{\#garage:deuxfleurs.fr} on Matrix -\end{frame} - - -\section{Meet Garage} - -\begin{frame} - \frametitle{A non-profit initiative} - - - \begin{columns}[t] - \begin{column}{.2\textwidth} - \centering - \adjincludegraphics[width=.5\linewidth, valign=t]{../assets/logos/deuxfleurs.pdf} - \end{column} - \begin{column}{.8\textwidth} - \textbf{Part of a degrowth initiative}\\ - Garage has been created at Deuxfleurs where we experiment running Internet services without datacenter on commodity and refurbished hardware. - \end{column} - - \end{columns} - \vspace{2em} - \begin{columns}[t] - \begin{column}{.2\textwidth} - \centering - \adjincludegraphics[width=.5\linewidth, valign=t]{../assets/community.png} - \end{column} - \begin{column}{.8\textwidth} - \textbf{Developed by a community}\\ - {\small Some recent contributors: Arthur C, Charles H, dongdigua, Etienne L, Jonah A, Julien K, Lapineige, MagicRR, Milas B, Niklas M, RockWolf, Schwitzd, trinity-1686a, Xavier S, babykart, Baptiste J, eddster2309, James O'C, Joker9944, Maximilien R, Renjaya RZ, Yureka...} - \end{column} - - \end{columns} - \vspace{2em} - \begin{columns}[t] - \begin{column}{.2\textwidth} - \centering - \adjincludegraphics[width=.5\linewidth, valign=t]{../assets/logos/AGPLv3_Logo.png} - \end{column} - \begin{column}{.8\textwidth} - \textbf{Owned by nobody, open-core is impossible, zero VC money}\\ - AGPL + no Contributor License Agreement = Garage ownership spreads among hundredth of contributors. - \end{column} - - \end{columns} -\end{frame} - -\begin{frame} - \frametitle{Getting support for Garage} - \begin{columns}[t] - \begin{column}{.2\textwidth} - \centering - \adjincludegraphics[width=.4\linewidth, valign=t]{../assets/alex.jpg} - \end{column} - \begin{column}{.4\textwidth} - \textbf{Alex Auvolat}\\ - PhD; co-founder of Deuxfleurs\\ - Garage maintainer, Freelance - \end{column} - \begin{column}{.3\textwidth} - \centering - \adjincludegraphics[width=.4\linewidth, valign=t]{../assets/support.png} - \end{column} - \begin{column}{.1\textwidth} - ~ - \end{column} - \end{columns} - \vspace{2em} - \begin{columns}[t] - \begin{column}{.2\textwidth} - \centering - \adjincludegraphics[width=.4\linewidth, valign=t]{../assets/quentin.jpg} - \end{column} - \begin{column}{.4\textwidth} - \textbf{Quentin Dufour}\\ - PhD; co-founder of Deuxfleurs\\ - Garage contributor, Freelance - \end{column} - \begin{column}{.4\textwidth} - For support requests, write at: \\ - \url{garagehq@deuxfleurs.fr} - \end{column} - \end{columns} - \vspace{2em} - \begin{columns}[t] - \begin{column}{.2\textwidth} - \centering - \adjincludegraphics[width=.4\linewidth, valign=t]{../assets/armael.jpg} - \end{column} - \begin{column}{.4\textwidth} - \textbf{Armaël Guéneau}\\ - PhD; member of Deuxfleurs\\ - Garage contributor, Freelance - \end{column} - \begin{column}{.4\textwidth} - Eligible: email support, architecture design, specific feature development, etc. - \end{column} - \end{columns} - - -\end{frame} - -\begin{frame} - \frametitle{Our initial goal} - - \centering - \Large - - Being a self-sovereign community to be free of our degrowth choice - - $\big\downarrow$ - - As web citizens, datacenters are big black boxes. \\ - We want to leave them to autonoumously manage our servers. - - $\big\downarrow$ - - We want reliable services without relying on dedicated hardware or places. - -\end{frame} - -\begin{frame} - \frametitle{Building a resilient system with cheap stuff} - - \only<1,4-7>{ - \begin{itemize} - \item \textcolor<5->{gray}{Commodity hardware (e.g. old desktop PCs)\\ - \vspace{.5em} - \visible<4->{{\footnotesize (can die at any time)}}} - \vspace{1.5em} - \item<5-> \textcolor<7->{gray}{Regular Internet (e.g. FTTB, FTTH) and power grid connections\\ - \vspace{.5em} - \visible<6->{{\footnotesize (can be unavailable randomly)}}} - \vspace{1.5em} - \item<7-> \textbf{Geographical redundancy} (multi-site replication) - \end{itemize} - } - \only<2>{ - \begin{center} - \includegraphics[width=.8\linewidth]{../assets/neptune.jpg} - \end{center} - } - \only<3>{ - \begin{center} - \includegraphics[width=.8\linewidth]{../assets/atuin.jpg} - \end{center} - } - \only<8>{ - \begin{center} - \includegraphics[width=.8\linewidth]{../assets/inframap_jdll2023.pdf} - \end{center} - } -\end{frame} - -\begin{frame} - \frametitle{Object storage: a crucial component} - \begin{center} - \includegraphics[height=6em]{../assets/logos/Amazon-S3.jpg} - \hspace{3em} - \visible<2->{\includegraphics[height=5em]{../assets/logos/minio.png}} - \hspace{3em} - \visible<3>{\includegraphics[height=6em]{../../logo/garage_hires_crop.png}} - \end{center} - \vspace{1em} - S3: a de-facto standard, many compatible applications - - \vspace{1em} - \visible<2->{MinIO is self-hostable but not suited for geo-distributed deployments} - - \vspace{1em} - \visible<3->{\textbf{Garage is a self-hosted drop-in replacement for the Amazon S3 object store}} -\end{frame} - -\begin{frame} - \frametitle{CRDTs / weak consistency instead of consensus} - - \underline{Internally, Garage uses only CRDTs} (conflict-free replicated data types) - - \vspace{2em} - Why not Raft, Paxos, ...? Issues of consensus algorithms: - - \vspace{1em} - \begin{itemize} - \item<2-> \textbf{Software complexity} - \vspace{1em} - \item<3-> \textbf{Performance issues:} - \vspace{.5em} - \begin{itemize} - \item<4-> The leader is a \textbf{bottleneck} for all requests\\ - \vspace{.5em} - \item<5-> \textbf{Sensitive to higher latency} between nodes - \vspace{.5em} - \item<6-> \textbf{Takes time to reconverge} when disrupted (e.g. node going down) - \end{itemize} - \end{itemize} -\end{frame} - -\begin{frame} - \frametitle{The data model of object storage} - Object storage is basically a \textbf{key-value store}: - \vspace{.5em} - - {\scriptsize - \begin{center} - \begin{tabular}{|l|p{7cm}|} - \hline - \textbf{Key: file path + name} & \textbf{Value: file data + metadata} \\ - \hline - \hline - \texttt{index.html} & - \texttt{Content-Type: text/html; charset=utf-8} \newline - \texttt{Content-Length: 24929} \newline - \texttt{} \\ - \hline - \texttt{img/logo.svg} & - \texttt{Content-Type: text/svg+xml} \newline - \texttt{Content-Length: 13429} \newline - \texttt{} \\ - \hline - \texttt{download/index.html} & - \texttt{Content-Type: text/html; charset=utf-8} \newline - \texttt{Content-Length: 26563} \newline - \texttt{} \\ - \hline - \end{tabular} - \end{center} - } - - \vspace{1em} - \begin{itemize} - \item<2> Maps well to CRDT data types - \end{itemize} -\end{frame} - -\begin{frame} - \frametitle{Performance gains in practice} - \begin{center} - \includegraphics[width=.8\linewidth]{../assets/perf/endpoint_latency_0.7_0.8_minio.png} - \end{center} -\end{frame} - -% ======================================== OPERATING -% ======================================== OPERATING -% ======================================== OPERATING - - -\section{Production clusters} - -\begin{frame} - \frametitle{Deployment kinds} - - \includegraphics[width=.9\linewidth]{../assets/cluster_kind.png} - \vspace{1em} - -\end{frame} - -\begin{frame} - \frametitle{How big they are?} - - \includegraphics[width=.9\linewidth]{../assets/cluster_size.png} - \vspace{1em} - - \textit{"Petabyte storage setup for a video site. Nginx as CDN in-front using garage-s3-website feature. Each storage node has ~64TB storage with raid10, no replication within garage. 25gbit nic. haproxy to loadbalance across 5 nodes. mostly reads with very few writes."} - - \vspace{1em} - \textit{"We currently manage 7 Garage nodes, 28TB total storage, 6M blocks for 3M objects and 4TB of object data. We have been running Garage in production for 2.5 years."} - -\end{frame} - -\begin{frame} - \frametitle{Operating Garage} - \begin{center} - \only<1-2>{ - \includegraphics[width=.9\linewidth]{../assets/screenshots/garage_status_0.10.png} - \\\vspace{1em} - \visible<2>{\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_status_unhealthy_0.10.png}} - } - \end{center} -\end{frame} - -\begin{frame} - \frametitle{Garage's architecture} - \begin{center} - \only<1>{\includegraphics[width=.45\linewidth]{../assets/garage.drawio.pdf}}% - \only<2>{\includegraphics[width=.6\linewidth]{../assets/garage_sync.drawio.pdf}}% - \end{center} -\end{frame} - -\begin{frame} - \frametitle{Digging deeper} - \begin{center} - \only<1>{\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_stats_0.10.png}} - \only<2>{\includegraphics[width=.5\linewidth]{../assets/screenshots/garage_worker_list_0.10.png}} - \only<3>{\includegraphics[width=.6\linewidth]{../assets/screenshots/garage_worker_param_0.10.png}} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{Potential limitations and bottlenecks} - \begin{itemize} - \item Global: - \begin{itemize} - \item Max. $\sim$100 nodes per cluster (excluding gateways) - \end{itemize} - \vspace{1em} - \item Metadata: - \begin{itemize} - \item One big bucket = bottleneck, object list on 3 nodes only - \end{itemize} - \vspace{1em} - \item Block manager: - \begin{itemize} - \item Lots of small files on disk - \item Processing the resync queue can be slow - \end{itemize} - \end{itemize} -\end{frame} - -\begin{frame} - \frametitle{Deployment advice for very large clusters} - \begin{itemize} - \item Metadata storage: - \begin{itemize} - \item ZFS mirror (x2) on fast NVMe - \item Use LMDB storage engine - \end{itemize} - \vspace{.5em} - \item Data block storage: - \begin{itemize} - \item Use Garage's native multi-HDD support - \item XFS on individual drives - \item Increase block size (1MB $\to$ 10MB, requires more RAM and good networking) - \item Tune \texttt{resync-tranquility} and \texttt{resync-worker-count} dynamically - \end{itemize} - \vspace{.5em} - \item Other : - \begin{itemize} - \item Split data over several buckets - \item Use less than 100 storage nodes - \item Use gateway nodes - \end{itemize} - \vspace{.5em} - \end{itemize} -\end{frame} - - -\begin{frame} - \frametitle{Focus on Deuxfleurs} - - Host institutional websites, partnership with a web agency. - Matrix media backend. - - Plan to use it as an email backend for an internally developed email server. - -\end{frame} - - -% ======================================== TIMELINE -% ======================================== TIMELINE -% ======================================== TIMELINE - -\section{Recent developments} - -% ====================== v0.7.0 =============================== - -\begin{frame} - \begin{center} - \includegraphics[width=.8\linewidth]{../assets/tl.drawio.png} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{April 2022 - Garage v0.7.0} - Focus on \underline{observability and ecosystem integration} - \vspace{2em} - \begin{itemize} - \item \textbf{Monitoring:} metrics and traces, using OpenTelemetry - \vspace{1em} - \item Replication modes with 1 or 2 copies / weaker consistency - \vspace{1em} - \item Kubernetes integration for node discovery - \vspace{1em} - \item Admin API (v0.7.2) - \end{itemize} -\end{frame} - -\begin{frame} - \frametitle{Metrics (Prometheus + Grafana)} - \begin{center} - \includegraphics[width=.9\linewidth]{../assets/screenshots/grafana_dashboard.png} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{Traces (Jaeger)} - \begin{center} - \includegraphics[width=.8\linewidth]{../assets/screenshots/jaeger_listobjects.png} - \end{center} -\end{frame} - -% ====================== v0.8.0 =============================== - -\begin{frame} - \begin{center} - \includegraphics[width=.8\linewidth]{../assets/tl.drawio.png} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{November 2022 - Garage v0.8.0} - Focus on \underline{performance} - \vspace{2em} - \begin{itemize} - \item \textbf{Alternative metadata DB engines} (LMDB, Sqlite) - \vspace{1em} - \item \textbf{Performance improvements:} block streaming, various optimizations... - \vspace{1em} - \item Bucket quotas (max size, max \#objects) - \vspace{1em} - \item Quality of life improvements, observability, etc. - \end{itemize} -\end{frame} - -\begin{frame} - \frametitle{About metadata DB engines} - \textbf{Issues with Sled:} - \vspace{1em} - \begin{itemize} - \item Huge files on disk - \vspace{.5em} - \item Unpredictable performance, especially on HDD - \vspace{.5em} - \item API limitations - \vspace{.5em} - \item Not actively maintained - \end{itemize} - - \vspace{2em} - \textbf{LMDB:} very stable, good performance, file size is reasonable\\ - \textbf{Sqlite} also available as a second choice - - \vspace{1em} - Sled will be removed in Garage v1.0 -\end{frame} - -\begin{frame} - \frametitle{DB engine performance comparison} - \begin{center} - \includegraphics[width=.6\linewidth]{../assets/perf/db_engine.png} - \end{center} - NB: Sqlite was slow due to synchronous mode, now configurable -\end{frame} - -\begin{frame} - \frametitle{Block streaming} - \begin{center} - \only<1>{\includegraphics[width=.8\linewidth]{../assets/schema-streaming-1.png}} - \only<2>{\includegraphics[width=.8\linewidth]{../assets/schema-streaming-2.png}} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{TTFB benchmark} - \begin{center} - \includegraphics[width=.8\linewidth]{../assets/perf/ttfb.png} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{Throughput benchmark} - \begin{center} - \includegraphics[width=.7\linewidth]{../assets/perf/io-0.7-0.8-minio.png} - \end{center} -\end{frame} - -% ====================== v0.9.0 =============================== - -\begin{frame} - \begin{center} - \includegraphics[width=.8\linewidth]{../assets/tl.drawio.png} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{October 2023 - Garage v0.9.0} - Focus on \underline{streamlining \& usability} - \vspace{2em} - \begin{itemize} - \item Support multiple HDDs per node - \vspace{1em} - \item S3 compatibility: - \vspace{1em} - \begin{itemize} - \item support basic lifecycle configurations - \vspace{.5em} - \item allow for multipart upload part retries - \end{itemize} - \vspace{1em} - \item LMDB by default, deprecation of Sled - \vspace{1em} - \item New layout computation algorithm - \end{itemize} -\end{frame} - - -\begin{frame} - \frametitle{Layout computation} - \begin{overprint} - \onslide<1> - \begin{center} - \includegraphics[width=\linewidth, trim=0 0 0 -4cm]{../assets/screenshots/garage_status_0.9_prod_zonehl.png} - \end{center} - \onslide<2> - \begin{center} - \includegraphics[width=.7\linewidth]{../assets/map.png} - \end{center} - \end{overprint} - \vspace{1em} - Garage stores replicas on different zones when possible -\end{frame} - -\begin{frame} - \frametitle{What a "layout" is} - \textbf{A layout is a precomputed index table:} - \vspace{1em} - - {\footnotesize - \begin{center} - \begin{tabular}{|l|l|l|l|} - \hline - \textbf{Partition} & \textbf{Node 1} & \textbf{Node 2} & \textbf{Node 3} \\ - \hline - \hline - Partition 0 & df-ymk (bespin) & Abricot (scorpio) & Courgette (neptune) \\ - \hline - Partition 1 & Ananas (scorpio) & Courgette (neptune) & df-ykl (bespin) \\ - \hline - Partition 2 & df-ymf (bespin) & Celeri (neptune) & Abricot (scorpio) \\ - \hline - \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ \\ - \hline - Partition 255 & Concombre (neptune) & df-ykl (bespin) & Abricot (scorpio) \\ - \hline - \end{tabular} - \end{center} - } - - \vspace{2em} - \visible<2->{ - The index table is built centrally using an optimal algorithm,\\ - then propagated to all nodes - } - - \vspace{1em} - \visible<3->{ - \footnotesize - Oulamara, M., \& Auvolat, A. (2023). \emph{An algorithm for geo-distributed and redundant storage in Garage}.\\ arXiv preprint arXiv:2302.13798. - } -\end{frame} - - - -% ====================== v1.0.0 =============================== - -\begin{frame} - \begin{center} - \includegraphics[width=.8\linewidth]{../assets/tl.drawio.png} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{April 2024 - Garage v1.0.0} - Focus on \underline{consistency, security \& stability} - \vspace{2em} - \begin{itemize} - \item Fix consistency issues when reshuffling data (Jepsen testing) - \vspace{1em} - \item \textbf{Security audit} by Radically Open Security - \vspace{1em} - \item Misc. S3 features (SSE-C, checksums, ...) and compatibility fixes - \end{itemize} -\end{frame} - -% ====================== v2.0.0 =============================== - -\begin{frame} - \begin{center} - \includegraphics[width=.8\linewidth]{../assets/tl.drawio.png} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{Garage v2.0.0} - Focus on \underline{} - \vspace{2em} - \begin{itemize} - \item TODO - \end{itemize} -\end{frame} - - -\begin{frame} - \frametitle{Currently funding...} - - \textit{...} -\end{frame} - -\begin{frame} - \frametitle{We run community surveys} - \begin{center} - \includegraphics[width=.6\linewidth]{../assets/survey_requested_features.png} - \end{center} -\end{frame} - -% ======================================== END -% ======================================== END -% ======================================== END - -\begin{frame} - \frametitle{Where to find us} - \begin{center} - \includegraphics[width=.25\linewidth]{../../logo/garage_hires.png}\\ - \vspace{-1em} - \url{https://garagehq.deuxfleurs.fr/}\\ - \url{mailto:garagehq@deuxfleurs.fr}\\ - \texttt{\#garage:deuxfleurs.fr} on Matrix - - \vspace{1.5em} - \includegraphics[width=.06\linewidth]{../assets/logos/rust_logo.png} - \includegraphics[width=.13\linewidth]{../assets/logos/AGPLv3_Logo.png} - \end{center} -\end{frame} - -\end{document} - -%% vim: set ts=4 sw=4 tw=0 noet spelllang=en : diff --git a/doc/talks/2026-01-31-fosdem/.gitignore b/doc/talks/2026-01-31-fosdem/.gitignore deleted file mode 100644 index 599774bc..00000000 --- a/doc/talks/2026-01-31-fosdem/.gitignore +++ /dev/null @@ -1,18 +0,0 @@ -* - -!*.txt -!*.md - -!assets - -!.gitignore -!*.svg -!*.png -!*.jpg -!*.tex -!Makefile -!.gitignore -!assets/*.drawio.pdf - -talk.{nav,out,snm,toc,aux,log} -!talk.pdf diff --git a/doc/talks/2026-01-31-fosdem/Makefile b/doc/talks/2026-01-31-fosdem/Makefile deleted file mode 100644 index 8df2258b..00000000 --- a/doc/talks/2026-01-31-fosdem/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -talk.pdf: talk.tex - pdflatex talk.tex - diff --git a/doc/talks/2026-01-31-fosdem/assets/AGPLv3_Logo.png b/doc/talks/2026-01-31-fosdem/assets/AGPLv3_Logo.png deleted file mode 100644 index 445284a3..00000000 Binary files a/doc/talks/2026-01-31-fosdem/assets/AGPLv3_Logo.png and /dev/null differ diff --git a/doc/talks/2026-01-31-fosdem/assets/Garage Web Admin - Bucket details page@2x.png b/doc/talks/2026-01-31-fosdem/assets/Garage Web Admin - Bucket details page@2x.png deleted file mode 100644 index 6075c3b0..00000000 Binary files a/doc/talks/2026-01-31-fosdem/assets/Garage Web Admin - Bucket details page@2x.png and /dev/null differ diff --git a/doc/talks/2026-01-31-fosdem/assets/Garage Web Admin - Dashboard@2x.png b/doc/talks/2026-01-31-fosdem/assets/Garage Web Admin - Dashboard@2x.png deleted file mode 100644 index fe2a9617..00000000 Binary files a/doc/talks/2026-01-31-fosdem/assets/Garage Web Admin - Dashboard@2x.png and /dev/null differ diff --git a/doc/talks/2026-01-31-fosdem/assets/community-ui.png b/doc/talks/2026-01-31-fosdem/assets/community-ui.png deleted file mode 100644 index ad79eef0..00000000 Binary files a/doc/talks/2026-01-31-fosdem/assets/community-ui.png and /dev/null differ diff --git a/doc/talks/2026-01-31-fosdem/assets/compatibility.png b/doc/talks/2026-01-31-fosdem/assets/compatibility.png deleted file mode 100644 index ce364a9b..00000000 Binary files a/doc/talks/2026-01-31-fosdem/assets/compatibility.png and /dev/null differ diff --git a/doc/talks/2026-01-31-fosdem/assets/endpoint-latency-dc.png b/doc/talks/2026-01-31-fosdem/assets/endpoint-latency-dc.png deleted file mode 100644 index 7c7411cd..00000000 Binary files a/doc/talks/2026-01-31-fosdem/assets/endpoint-latency-dc.png and /dev/null differ diff --git a/doc/talks/2026-01-31-fosdem/assets/garage-stats.png b/doc/talks/2026-01-31-fosdem/assets/garage-stats.png deleted file mode 100644 index e1e1a2f0..00000000 Binary files a/doc/talks/2026-01-31-fosdem/assets/garage-stats.png and /dev/null differ diff --git a/doc/talks/2026-01-31-fosdem/assets/garageuses.png b/doc/talks/2026-01-31-fosdem/assets/garageuses.png deleted file mode 100644 index b66d7f30..00000000 Binary files a/doc/talks/2026-01-31-fosdem/assets/garageuses.png and /dev/null differ diff --git a/doc/talks/2026-01-31-fosdem/assets/location-aware.png b/doc/talks/2026-01-31-fosdem/assets/location-aware.png deleted file mode 100644 index 8b55f253..00000000 Binary files a/doc/talks/2026-01-31-fosdem/assets/location-aware.png and /dev/null differ diff --git a/doc/talks/2026-01-31-fosdem/assets/map.png b/doc/talks/2026-01-31-fosdem/assets/map.png deleted file mode 100644 index 1dff3ab6..00000000 Binary files a/doc/talks/2026-01-31-fosdem/assets/map.png and /dev/null differ diff --git a/doc/talks/2026-01-31-fosdem/assets/minio.png b/doc/talks/2026-01-31-fosdem/assets/minio.png deleted file mode 100644 index a71e9ccc..00000000 Binary files a/doc/talks/2026-01-31-fosdem/assets/minio.png and /dev/null differ diff --git a/doc/talks/2026-01-31-fosdem/assets/rust_logo.png b/doc/talks/2026-01-31-fosdem/assets/rust_logo.png deleted file mode 100644 index 0e4809ec..00000000 Binary files a/doc/talks/2026-01-31-fosdem/assets/rust_logo.png and /dev/null differ diff --git a/doc/talks/2026-01-31-fosdem/talk.tex b/doc/talks/2026-01-31-fosdem/talk.tex deleted file mode 100644 index fa86303a..00000000 --- a/doc/talks/2026-01-31-fosdem/talk.tex +++ /dev/null @@ -1,330 +0,0 @@ -%\nonstopmode -\documentclass[aspectratio=169]{beamer} -\usepackage[utf8]{inputenc} -% \usepackage[frenchb]{babel} -\usepackage{amsmath} -\usepackage{mathtools} -\usepackage{breqn} -\usepackage{multirow} -\usetheme{boxes} -\usepackage{graphicx} -%\useoutertheme[footline=authortitle,subsection=false]{miniframes} - -\beamertemplatenavigationsymbolsempty - -\definecolor{TitleOrange}{RGB}{255,137,0} -\setbeamercolor{title}{fg=TitleOrange} -\setbeamercolor{frametitle}{fg=TitleOrange} - -\definecolor{ListOrange}{RGB}{255,145,5} -\setbeamertemplate{itemize item}{\color{ListOrange}$\blacktriangleright$} - -\definecolor{verygrey}{RGB}{70,70,70} -\setbeamercolor{normal text}{fg=verygrey} - - -\usepackage{tabu} -\usepackage{multicol} -\usepackage{vwcol} -\usepackage{stmaryrd} -\usepackage{graphicx} - -\usepackage[normalem]{ulem} - -\title{Garage Object Storage: 2.0 update and best practices} -\subtitle{a new storage platform for self-hosted geo-distributed clusters} -\author{Maximilien Richer, Deuxfleurs} -\date{FOSDEM '26} - -\begin{document} - -\begin{frame} - \centering - \includegraphics[width=.3\linewidth]{../../sticker/Garage.pdf} - \vspace{1em} - - {\large\bf Maximilien Richer, Deuxfleurs} - \vspace{1em} - - \url{https://garagehq.deuxfleurs.fr/} - Matrix channel: \texttt{\#garage:deuxfleurs.fr} -\end{frame} - -\begin{frame} - \frametitle{Our objective at Deuxfleurs} - - \begin{center} - French association promoting digital sovereignty and privacy\\ - through self-hosting hosting \textbf{as an alternative to large cloud providers} - \end{center} - \vspace{2em} - - \vspace{2em} - \begin{center} - \textbf{This requires \underline{resilience}}\\ - {\footnotesize (we want good uptime/availability with low supervision)} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{But what is Garage, exactly?} - \textbf{Garage is a self-hosted drop-in replacement for the Amazon S3 object store}\\ - \vspace{.5em} - that implements resilience through geographical redundancy on commodity hardware - \begin{center} - \includegraphics[width=.8\linewidth]{assets/garageuses.png} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{What makes Garage different?} - \textbf{Coordination-free:} - \vspace{2em} - \begin{itemize} - \item No Raft or Paxos - \vspace{1em} - \item Internal data types are CRDTs - \vspace{1em} - \item All nodes are equivalent (no master/leader/index node) - \end{itemize} - \vspace{2em} - $\to$ less sensitive to higher latencies between nodes -\end{frame} - -\begin{frame} - \frametitle{What makes Garage different?} - \begin{center} - TODO update with latest garage and minio versions - \includegraphics[width=.9\linewidth]{assets/endpoint-latency-dc.png} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{What makes Garage different?} - \textbf{Consistency model:} - \vspace{2em} - \begin{itemize} - \item Not ACID (not required by S3 spec) / not linearizable - \vspace{1em} - \item \textbf{Read-after-write consistency}\\ - {\footnotesize (stronger than eventual consistency)} - \end{itemize} -\end{frame} - -\begin{frame} - \frametitle{What makes Garage different?} - \textbf{Location-aware:} - \vspace{2em} - \begin{center} - \includegraphics[width=\linewidth]{assets/location-aware.png} - \end{center} - \vspace{2em} - Garage replicates data on different zones when possible -\end{frame} - -\begin{frame} - \frametitle{What makes Garage different?} - \begin{center} - \includegraphics[width=.8\linewidth]{assets/map.png} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{An ever-increasing compatibility list} - \begin{center} - \includegraphics[width=.7\linewidth]{assets/compatibility.png} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{Version history and roadmap} - \begin{itemize} - \item v0.3: initial beta release (2021) - \item v0.7: first released version (2022) - \item v1.0: stable release (2024), will be deprecated in summer 2026 1y after v2.0 was released - \item v2.0: stable release (2025) - \begin{itemize} - \item new HTTP admin API - \item reworded replication configuration: \texttt{replication\_mode} changed to \texttt{replication\_factor} \& \texttt{consistency\_policy} - \end{itemize} - \item - \end{itemize} - \begin{center} - v3.0: TBA may include versionning support, tag on buckets and objets, retention policies... - \end{center} -\end{frame} - -\begin{frame} - \centering - {\large\bf Best practices for Garage deployments} -\end{frame} - -\begin{frame} - \frametitle{Things you should know} - \begin{itemize} - \item no TLS support, use your own proxy - \item no anonymous access (use website endpoint) - \item you need to assign roles to nodes manually - \item the replication factor cannot be changed easily - \item the default region is \texttt{garage} and not \texttt{us-east-1} - \item only use the \texttt{degraded} consistency policy for data recovery! - \end{itemize} -\end{frame} - -\begin{frame} - \frametitle{What hardware should I use?} - \begin{itemize} - \item do NOT use network file storage (NFS, SMB, etc.) for \texttt{\/metadata} - \item get a \textbf{write-intensive flash disk} for the \texttt{\/metadata} folder - \item set \texttt{metadata} on a RAID1 if possible, with a COW filesystem (e.g. Btrfs or ZFS) - \item get large HDDs for the \texttt{\/data} folder - \item use XFS and garage multi-hdd mode for best performance - \item you can use a RAID for data but you'll leave a lot of performance on the table - \end{itemize} - \center\textit{Garage doesn't require a powerful CPUs nor much RAM, but your performance will depend on your disks!} -\end{frame} - -\begin{frame} - \frametitle{Picking a metadata engine} - All files-to-block mappings are stored in the metadata engine, including bucket and object metadata. Files below 3KB are stored directly in the metadata engine. - \vspace{1em} - \begin{itemize} - \item Sled: removed in 1.x, move to SQLite or LMDB - \item \textbf{SQLite}: safer, \textbf{recommended for small clusters and single-node} - \item LMDB: faster, recommended for large clusters with metadata redundancy - \begin{itemize} - \item Warning: limited to 480 bytes per key with LMDB (not an issue in practice) - \end{itemize} - \item Fjall: experimental but promising rust-native engine, test it and let us know! - \end{itemize} - \center{Metadata engine can be set node per node, and changed later with a migration tool} -\end{frame} - -\begin{frame} - \frametitle{Single-node deployment} - \begin{itemize} - \item garage was initially designed for multi-node deployments - \item single-node deployments are possible, but you will lose resilience - \item \textbf{If you do please ensure you have backups} (especially for metadata) - \begin{itemize} - \item set up \texttt{metadata\_auto\_snapshot\_interval} - \end{itemize} - \item use sqlite to minimize data loss risks on powercuts - \item or use a UPS! - \end{itemize} - \vspace{1em} - Use \texttt{github.com/bikeshedder/garage-single-node} for an easy single-node setup! -\end{frame} - -\begin{frame} - \frametitle{Multi-node deployment} - \begin{itemize} - \item try to have geo-distributed zones - \item multiple nodes per zone to add more capacity - \item at least 3 zones for best resilience - \item keep in mind your available network and IO bandwidth - \item \textbf{Rebalancing a cluster can take multiple weeks with large HDDs and slow network links} - \item monitor your nodes with Prometheus + Grafana - \end{itemize} - \center{Deuxfleurs has been running a 9TB (3TB usable) 8-nodes cluster (3+3+2) over retail fiber (10ms site-to-site latency) for close to 5 years now. We heard there are petabyte clusters out there!} -\end{frame} - -\begin{frame} - \frametitle{Deploying and administering garage at scale} - \begin{itemize} - \item deploy with your favorite tool (eg. Ansible) and system manager (eg. systemd) - \item or use Docker, docker-compose, Kubernetes or Nomad - \item Kubernetes and Consul are supported for node-to-node discovery - \begin{itemize} - \item you'll still have to manage the layout manually! - \end{itemize} - \item use gateway nodes to optimize network usage - \item ajust \texttt{resync-tranquility} and \texttt{scrub-tranquility} to your ressources - \end{itemize} - \center{Kubernetes storage controller: \texttt{github.com/bmarinov/garage-storage-controller}} -\end{frame} - -\begin{frame} - \frametitle{Community UI available!} - \begin{center} - \includegraphics[width=0.9\linewidth]{assets/community-ui.png}\\ - \vspace{-1em} - \url{https://github.com/khairul169/garage-webui} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{Official Embedded UI comming later this year!} - \begin{center} - \includegraphics[width=0.9\linewidth]{assets/Garage Web Admin - Dashboard@2x.png}\\ - \vspace{-1em} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{Official Embedded UI comming this year!} - \begin{center} - \includegraphics[width=0.9\linewidth]{assets/Garage Web Admin - Bucket details page@2x.png}\\ - \vspace{-1em} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{How to make sense of garage metrics?} - \begin{center} - \includegraphics[width=0.7\linewidth]{assets/garage-stats.png}\\ - \vspace{-1em} - \end{center} -\end{frame} - -\begin{frame} - \frametitle{What if things go wrong?} - \begin{itemize} - \item set logs to debug with \texttt{RUST_LOG=garage_api_common=debug,garage_api_s3=debug,garage=debug} - \item auth issues: check your reverse proxy configuration - \item slow resync: check your network and disk IO usage, and \texttt{resync-tranquility} worker configuration - \item big LMDB database: stop garage and compact with \texttt{mdb\_copy -c} - \item ask us on matrix \texttt{\#garage:deuxfleurs.fr} or open an issue on git.deuxfleurs.fr! - \begin{itemize} - \item provide the output of \texttt{garage status}, \texttt{garage stats} and relevant metrics and logs - \end{itemize} - \end{itemize} -\end{frame} - -\begin{frame} - \frametitle{Moving from Minio} - \begin{itemize} - \item list your buckets and your keys - \item create buckets and keys on the garage cluster - \begin{itemize} - \item you cannot import non-garage keys yet, patch to come soon! - \end{itemize} - \item loop over buckets, copy with rclone - \begin{itemize} - \item see doc \url{https://garagehq.deuxfleurs.fr/documentation/connect/cli/} - \end{itemize} - \item blog post coming soon! - \end{itemize} -\end{frame} - -\begin{frame} - \frametitle{Demo time!} -\end{frame} - -\begin{frame} - \frametitle{Get Garage now!} - \begin{center} - \includegraphics[width=.3\linewidth]{../../logo/garage_hires.png}\\ - \vspace{-1em} - \url{https://garagehq.deuxfleurs.fr/}\\ - Matrix channel: \texttt{\#garage:deuxfleurs.fr} - - \vspace{2em} - \includegraphics[width=.09\linewidth]{assets/rust_logo.png} - \includegraphics[width=.2\linewidth]{assets/AGPLv3_Logo.png} - \end{center} -\end{frame} - -\end{document} - -%% vim: set ts=4 sw=4 tw=0 noet spelllang=fr : diff --git a/doc/talks/assets/armael.jpg b/doc/talks/assets/armael.jpg deleted file mode 100644 index 54b97662..00000000 Binary files a/doc/talks/assets/armael.jpg and /dev/null differ diff --git a/doc/talks/assets/cluster_kind.png b/doc/talks/assets/cluster_kind.png deleted file mode 100644 index 80f8f4b5..00000000 Binary files a/doc/talks/assets/cluster_kind.png and /dev/null differ diff --git a/doc/talks/assets/cluster_size.png b/doc/talks/assets/cluster_size.png deleted file mode 100644 index b4b0f5ce..00000000 Binary files a/doc/talks/assets/cluster_size.png and /dev/null differ diff --git a/doc/talks/assets/community.png b/doc/talks/assets/community.png deleted file mode 100644 index 06c7a1af..00000000 Binary files a/doc/talks/assets/community.png and /dev/null differ diff --git a/doc/talks/assets/quentin.jpg b/doc/talks/assets/quentin.jpg deleted file mode 100644 index a68d9d7b..00000000 Binary files a/doc/talks/assets/quentin.jpg and /dev/null differ diff --git a/doc/talks/assets/support.png b/doc/talks/assets/support.png deleted file mode 100644 index c20d179b..00000000 Binary files a/doc/talks/assets/support.png and /dev/null differ diff --git a/doc/talks/assets/tl.drawio.png b/doc/talks/assets/tl.drawio.png deleted file mode 100644 index c60c310a..00000000 Binary files a/doc/talks/assets/tl.drawio.png and /dev/null differ diff --git a/flake.lock b/flake.lock index e265d0c3..211b70e0 100644 --- a/flake.lock +++ b/flake.lock @@ -12,17 +12,16 @@ "original": { "owner": "ipetkov", "repo": "crane", - "rev": "6fe74265bbb6d016d663b1091f015e2976c4a527", "type": "github" } }, "flake-compat": { "locked": { - "lastModified": 1761640442, - "narHash": "sha256-AtrEP6Jmdvrqiv4x2xa5mrtaIp3OEe8uBYCDZDS+hu8=", + "lastModified": 1717312683, + "narHash": "sha256-FrlieJH50AuvagamEvWMIE6D2OAnERuDboFDYAED/dE=", "owner": "nix-community", "repo": "flake-compat", - "rev": "4a56054d8ffc173222d09dad23adf4ba946c8884", + "rev": "38fd3954cf65ce6faf3d0d45cd26059e059f07ea", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 81d94215..01a077c4 100644 --- a/flake.nix +++ b/flake.nix @@ -11,8 +11,7 @@ "github:oxalica/rust-overlay/ab726555a9a72e6dc80649809147823a813fa95b"; inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs"; - # Crane as of 2025-01-24 - inputs.crane.url = "github:ipetkov/crane/6fe74265bbb6d016d663b1091f015e2976c4a527"; + inputs.crane.url = "github:ipetkov/crane"; inputs.flake-compat.url = "github:nix-community/flake-compat"; inputs.flake-utils.url = "github:numtide/flake-utils"; @@ -67,7 +66,7 @@ clippy = lints.garage-cargo-clippy; }; - # ---- development shell, for making native builds only ---- + # ---- developpment shell, for making native builds only ---- devShells = let targets = compile { @@ -90,9 +89,6 @@ cargo-outdated cargo-machete nixpkgs-fmt - openssl - socat - killall ]; }; }; diff --git a/nix/build_index.nix b/nix/build_index.nix index 92931eea..7869566f 100644 --- a/nix/build_index.nix +++ b/nix/build_index.nix @@ -167,7 +167,7 @@ let

Sources:

diff --git a/script/dev-bucket.sh b/script/dev-bucket.sh index 82e73652..708c2c43 100755 --- a/script/dev-bucket.sh +++ b/script/dev-bucket.sh @@ -17,19 +17,13 @@ else fi $GARAGE_BIN -c /tmp/config.1.toml bucket create eprouvette -if [ "$GARAGE_OLDVER" = "v08" ]; then +if [ "$GARAGE_08" = "1" ]; then KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key new --name opérateur) - ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'` - SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'` -elif [ "$GARAGE_OLDVER" = "v1" ]; then - KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key create opérateur) - ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'` - SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'` else - KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml json-api CreateKey '{"name":"opérateur"}') - ACCESS_KEY=`echo $KEY_INFO|jq -r .accessKeyId` - SECRET_KEY=`echo $KEY_INFO|jq -r .secretAccessKey` + KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key create opérateur) fi +ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'` +SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'` $GARAGE_BIN -c /tmp/config.1.toml bucket allow eprouvette --read --write --owner --key $ACCESS_KEY echo "$ACCESS_KEY $SECRET_KEY" > /tmp/garage.s3 diff --git a/script/dev-cluster.sh b/script/dev-cluster.sh index 81a37099..998ffdb9 100755 --- a/script/dev-cluster.sh +++ b/script/dev-cluster.sh @@ -30,12 +30,6 @@ for count in $(seq 1 3); do CONF_PATH="/tmp/config.$count.toml" LABEL="\e[${FANCYCOLORS[$count]}[$count]\e[49m" -if [ "$GARAGE_OLDVER" == "v08" ]; then - REPLICATION_MODE="replication_mode = \"3\"" -else - REPLICATION_MODE="replication_factor = 3" -fi - cat > $CONF_PATH <&1|grep -q HEALTHY ; do sleep 1 done -if [ "$GARAGE_OLDVER" = "v08" ]; then +if [ "$GARAGE_08" = "1" ]; then $GARAGE_BIN -c /tmp/config.1.toml status \ | grep 'NO ROLE' \ | grep -Po '^[0-9a-f]+' \ diff --git a/script/dev-env-aws.sh b/script/dev-env-aws.sh index 808f9cf1..41f1fdde 100644 --- a/script/dev-env-aws.sh +++ b/script/dev-env-aws.sh @@ -1,6 +1,7 @@ export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1` export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2` export AWS_DEFAULT_REGION='garage' +export AWS_REQUEST_CHECKSUM_CALCULATION='when_required' # FUTUREWORK: set AWS_ENDPOINT_URL instead, once nixpkgs bumps awscli to >=2.13.0. function aws { command aws --endpoint-url http://127.0.0.1:3911 $@ ; } diff --git a/script/helm/garage/Chart.yaml b/script/helm/garage/Chart.yaml index 110ba8c7..b3a7b921 100644 --- a/script/helm/garage/Chart.yaml +++ b/script/helm/garage/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: garage description: S3-compatible object store for small self-hosted geo-distributed deployments type: application -version: 0.9.3 -appVersion: "v2.3.0" +version: 0.7.3 +appVersion: "v1.3.1" home: https://garagehq.deuxfleurs.fr/ icon: https://garagehq.deuxfleurs.fr/images/garage-logo.svg diff --git a/script/helm/garage/README.md b/script/helm/garage/README.md index 50de3360..bdf69ec4 100644 --- a/script/helm/garage/README.md +++ b/script/helm/garage/README.md @@ -1,6 +1,6 @@ # garage -![Version: 0.9.3](https://img.shields.io/badge/Version-0.9.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v2.3.0](https://img.shields.io/badge/AppVersion-v2.3.0-informational?style=flat-square) +![Version: 0.7.3](https://img.shields.io/badge/Version-0.7.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.3.1](https://img.shields.io/badge/AppVersion-v1.3.1-informational?style=flat-square) S3-compatible object store for small self-hosted geo-distributed deployments @@ -15,7 +15,6 @@ S3-compatible object store for small self-hosted geo-distributed deployments | Key | Type | Default | Description | |-----|------|---------|-------------| | affinity | object | `{}` | | -| commonLabels | object | `{}` | Extra labels for all resources | | deployment.kind | string | `"StatefulSet"` | Switchable to DaemonSet | | deployment.podManagementPolicy | string | `"OrderedReady"` | If using statefulset, allow Parallel or OrderedReady (default) | | deployment.replicaCount | int | `3` | Number of StatefulSet replicas/garage nodes to start | @@ -23,16 +22,15 @@ S3-compatible object store for small self-hosted geo-distributed deployments | extraVolumeMounts | object | `{}` | | | extraVolumes | object | `{}` | | | fullnameOverride | string | `""` | | -| garage.blockSize | string | `"1048576"` | Defaults is 1MB An increase can result in better performance in certain scenarios https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block_size | +| garage.blockSize | string | `"1048576"` | Defaults is 1MB An increase can result in better performance in certain scenarios https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size | | garage.bootstrapPeers | list | `[]` | This is not required if you use the integrated kubernetes discovery | -| garage.compressionLevel | string | `"1"` | zstd compression level of stored blocks https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression_level | -| garage.dbEngine | string | `"lmdb"` | Can be changed for better performance on certain systems https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db_engine | +| garage.compressionLevel | string | `"1"` | zstd compression level of stored blocks https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level | +| garage.dbEngine | string | `"lmdb"` | Can be changed for better performance on certain systems https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0 | | garage.existingConfigMap | string | `""` | if not empty string, allow using an existing ConfigMap for the garage.toml, if set, ignores garage.toml | | garage.garageTomlString | string | `""` | String Template for the garage configuration if set, ignores above values. Values can be templated, see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/ | -| garage.kubernetesSkipCrd | bool | `false` | Set to true if you want to use k8s discovery but install the CRDs manually outside of the helm chart, for example if you operate at namespace level without cluster resources | -| garage.replicationFactor | string | `"3"` | Default to 3 replicas, see the replication_factor section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication_factor | -| garage.consistencyMode | string | `"consistent"` | Default to read-after-write consistency, see the consistency_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#consistency_mode | +| garage.kubernetesSkipCrd | bool | `false` | Set to true if you want to use k8s discovery but install the CRDs manually outside of the helm chart, for example if you operate at namespace level without cluster ressources | | garage.metadataAutoSnapshotInterval | string | `""` | If this value is set, Garage will automatically take a snapshot of the metadata DB file at a regular interval and save it in the metadata directory. https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#metadata_auto_snapshot_interval | +| garage.replicationMode | string | `"3"` | Default to 3 replicas, see the replication_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode | | garage.rpcBindAddr | string | `"[::]:3901"` | | | garage.rpcSecret | string | `""` | If not given, a random secret will be generated and stored in a Secret object | | garage.s3.api.region | string | `"garage"` | | @@ -76,7 +74,7 @@ S3-compatible object store for small self-hosted geo-distributed deployments | persistence.enabled | bool | `true` | | | persistence.meta.hostPath | string | `"/var/lib/garage/meta"` | | | persistence.meta.size | string | `"100Mi"` | | -| podAnnotations | object | `{}` | additional pod annotations | +| podAnnotations | object | `{}` | additonal pod annotations | | podSecurityContext.fsGroup | int | `1000` | | | podSecurityContext.runAsGroup | int | `1000` | | | podSecurityContext.runAsNonRoot | bool | `true` | | diff --git a/script/helm/garage/templates/_helpers.tpl b/script/helm/garage/templates/_helpers.tpl index 2ffb90c6..037a5f1c 100644 --- a/script/helm/garage/templates/_helpers.tpl +++ b/script/helm/garage/templates/_helpers.tpl @@ -27,7 +27,7 @@ If release name contains chart name it will be used as a full name. Create the name of the rpc secret */}} {{- define "garage.rpcSecretName" -}} -{{- .Values.garage.existingRpcSecret | default (printf "%s-rpc-secret" (include "garage.fullname" .)) -}} +{{- printf "%s-rpc-secret" (include "garage.fullname" .) -}} {{- end }} {{/* @@ -47,9 +47,6 @@ helm.sh/chart: {{ include "garage.chart" . }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- with .Values.commonLabels }} -{{- toYaml . | nindent 0 }} -{{- end }} {{- end }} {{/* diff --git a/script/helm/garage/templates/clusterrole.yaml b/script/helm/garage/templates/clusterrole.yaml index 3fb81af9..fa3e6405 100644 --- a/script/helm/garage/templates/clusterrole.yaml +++ b/script/helm/garage/templates/clusterrole.yaml @@ -5,11 +5,9 @@ metadata: labels: {{- include "garage.labels" . | nindent 4 }} rules: -{{- if eq .Values.garage.kubernetesSkipCrd false }} - apiGroups: ["apiextensions.k8s.io"] resources: ["customresourcedefinitions"] verbs: ["get", "list", "watch", "create", "patch"] -{{ end }} - apiGroups: ["deuxfleurs.fr"] resources: ["garagenodes"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] @@ -27,4 +25,4 @@ subjects: roleRef: kind: ClusterRole name: manage-crds-{{ .Release.Namespace }}-{{ .Release.Name }} - apiGroup: rbac.authorization.k8s.io + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/script/helm/garage/templates/configmap.yaml b/script/helm/garage/templates/configmap.yaml index 4fc3e152..ab5b84db 100644 --- a/script/helm/garage/templates/configmap.yaml +++ b/script/helm/garage/templates/configmap.yaml @@ -13,10 +13,9 @@ data: db_engine = "{{ .Values.garage.dbEngine }}" - block_size = "{{ .Values.garage.blockSize }}" + block_size = {{ .Values.garage.blockSize }} - replication_factor = {{ .Values.garage.replicationFactor }} - consistency_mode = "{{ .Values.garage.consistencyMode }}" + replication_mode = "{{ .Values.garage.replicationMode }}" compression_level = {{ .Values.garage.compressionLevel }} @@ -28,16 +27,8 @@ data: # rpc_secret will be populated by the init container from a k8s secret object rpc_secret = "__RPC_SECRET_REPLACE__" - bootstrap_peers = [ - {{- range $index, $peer := .Values.garage.bootstrapPeers }} - {{- if $index}}, {{ end }}{{ $peer | quote }} - {{ end }} - ] + bootstrap_peers = {{ .Values.garage.bootstrapPeers }} - {{- if .Values.garage.additionalTopLevelConfig }} - {{ .Values.garage.additionalTopLevelConfig | nindent 4 }} - {{- end }} - [kubernetes_discovery] namespace = "{{ .Release.Namespace }}" service_name = "{{ include "garage.fullname" . }}" diff --git a/script/helm/garage/templates/secret.yaml b/script/helm/garage/templates/secret.yaml index c0c45b93..54749424 100644 --- a/script/helm/garage/templates/secret.yaml +++ b/script/helm/garage/templates/secret.yaml @@ -1,4 +1,3 @@ -{{- if not .Values.garage.existingRpcSecret }} apiVersion: v1 kind: Secret metadata: @@ -13,4 +12,3 @@ data: {{- $prevRpcSecret := $prevSecretData.rpcSecret | default "" | b64dec }} {{/* Priority is: 1. from values, 2. previous value, 3. generate random */}} rpcSecret: {{ .Values.garage.rpcSecret | default $prevRpcSecret | default (include "jupyterhub.randHex" 64) | b64enc | quote }} -{{- end }} diff --git a/script/helm/garage/templates/workload.yaml b/script/helm/garage/templates/workload.yaml index 21c6462d..d144cb41 100644 --- a/script/helm/garage/templates/workload.yaml +++ b/script/helm/garage/templates/workload.yaml @@ -21,16 +21,13 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} labels: - {{- include "garage.labels" . | nindent 8 }} + {{- include "garage.selectorLabels" . | nindent 8 }} spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} serviceAccountName: {{ include "garage.serviceAccountName" . }} - {{- with .Values.priorityClassName }} - priorityClassName: {{ . }} - {{- end }} securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} initContainers: @@ -94,7 +91,7 @@ spec: volumes: - name: configmap configMap: - name: {{ if .Values.garage.existingConfigMap }}{{ .Values.garage.existingConfigMap }}{{ else }}{{ include "garage.fullname" . }}-config{{ end }} + name: {{ include "garage.fullname" . }}-config - name: etc emptyDir: {} {{- if .Values.persistence.enabled }} diff --git a/script/helm/garage/values.yaml b/script/helm/garage/values.yaml index d376c6a7..5e419fe2 100644 --- a/script/helm/garage/values.yaml +++ b/script/helm/garage/values.yaml @@ -2,32 +2,23 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. -# -- Additional labels to add to all resources created by this chart -commonLabels: {} -# app.kubernetes.io/part-of: storage -# team: platform - # Garage configuration. These values go to garage.toml garage: # -- Can be changed for better performance on certain systems - # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db_engine + # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0 dbEngine: "lmdb" # -- Defaults is 1MB # An increase can result in better performance in certain scenarios - # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block_size + # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size blockSize: "1048576" - # -- Default to 3 replicas, see the replication_factor section at - # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication_factor - replicationFactor: "3" - - # -- By default, enable read-after-write consistency guarantees, see the consistency_mode section at - # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#consistency_mode - consistencyMode: "consistent" + # -- Default to 3 replicas, see the replication_mode section at + # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode + replicationMode: "3" # -- zstd compression level of stored blocks - # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression_level + # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level compressionLevel: "1" # -- If this value is set, Garage will automatically take a snapshot of the metadata DB file at a regular interval and save it in the metadata directory. @@ -37,14 +28,10 @@ garage: rpcBindAddr: "[::]:3901" # -- If not given, a random secret will be generated and stored in a Secret object rpcSecret: "" - # -- If you want to provide an rpcSecret within an existing k8s secret, - # specify the secret name here, and store the value under the secret key `rpcSecret` - # the default secret will not be created - existingRpcSecret: "" # -- This is not required if you use the integrated kubernetes discovery bootstrapPeers: [] # -- Set to true if you want to use k8s discovery but install the CRDs manually outside - # of the helm chart, for example if you operate at namespace level without cluster resources + # of the helm chart, for example if you operate at namespace level without cluster ressources kubernetesSkipCrd: false s3: api: @@ -54,12 +41,6 @@ garage: rootDomain: ".web.garage.tld" index: "index.html" - # -- Additional configuration to append to garage.toml. Use a multi-line string for custom config. - # Example: - # additionalTopLevelConfig: |- - # data_fsync = true - additionalTopLevelConfig: "" - # -- if not empty string, allow using an existing ConfigMap for the garage.toml, # if set, ignores garage.toml existingConfigMap: "" @@ -120,14 +101,13 @@ serviceAccount: # If not set and create is true, a name is generated using the fullname template name: "" -# -- additional pod annotations +# -- additonal pod annotations podAnnotations: {} podSecurityContext: runAsUser: 1000 runAsGroup: 1000 fsGroup: 1000 - fsGroupChangePolicy: "OnRootMismatch" runAsNonRoot: true securityContext: @@ -209,7 +189,7 @@ ingress: # - kubernetes.docker.internal resources: {} - # The following are indicative for a small-size deployment, for anything serious double them. + # The following are indicative for a small-size deployement, for anything serious double them. # limits: # cpu: 100m # memory: 1024Mi @@ -238,10 +218,6 @@ tolerations: [] affinity: {} -# -- Optional priority class name to assign to the pods. -# See https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ -priorityClassName: "" - environment: {} extraVolumes: {} diff --git a/script/jepsen.garage/README.md b/script/jepsen.garage/README.md index 4a74471d..50c7eb38 100644 --- a/script/jepsen.garage/README.md +++ b/script/jepsen.garage/README.md @@ -127,7 +127,7 @@ They are due to the download being interrupted in the middle (^C during first la Add `:force?` to the `cached-wget!` call in `daemon.clj` to re-download the binary, or restar the VMs to clear temporary files. -### In `jepsen.garage`: prefix weirdness +### In `jepsen.garage`: prefix wierdness In `store/garage set1/20231019T163358.615+0200`: @@ -146,12 +146,12 @@ and passing all values that were previously in the context (creds and prefix) as The reg2 test is our custom checker for CRDT read-after-write on individual object keys, acting as registers which can be updated. The test fails without the timestamp fix, which is expected as the clock scrambler will prevent nodes from having a correct ordering of objects. -With the timestamp fix (`--patch tsfix1`), the happened-before relationship should at least be respected, meaning that when a PutObject call starts +With the timestamp fix (`--patch tsfix1`), the happenned-before relationship should at least be respected, meaning that when a PutObject call starts after another PutObject call has ended, the second call should overwrite the value of the first call, and that value should not be readable by future GetObject calls. However, we observed inconsistencies even with the timestamp fix. -The inconsistencies seemed to always happened after writing a nil value, which translates to a DeleteObject call +The inconsistencies seemed to always happenned after writing a nil value, which translates to a DeleteObject call instead of a PutObject. By removing the possibility of writing nil values, therefore only doing PutObject calls, the issue disappears. There is therefore an issue to fix in DeleteObject. diff --git a/script/jepsen.garage/src/jepsen/garage/daemon.clj b/script/jepsen.garage/src/jepsen/garage/daemon.clj index 9267a03a..0ea773fb 100644 --- a/script/jepsen.garage/src/jepsen/garage/daemon.clj +++ b/script/jepsen.garage/src/jepsen/garage/daemon.clj @@ -43,7 +43,7 @@ "rpc_bind_addr = \"0.0.0.0:3901\"\n" "rpc_public_addr = \"" node ":3901\"\n" "db_engine = \"lmdb\"\n" - "replication_factor = 3\n" + "replication_mode = \"3\"\n" "data_dir = \"" data-dir "\"\n" "metadata_dir = \"" meta-dir "\"\n" "[s3_api]\n" diff --git a/script/k8s/config.yaml b/script/k8s/config.yaml index bfefd999..8cf40fc2 100644 --- a/script/k8s/config.yaml +++ b/script/k8s/config.yaml @@ -8,7 +8,7 @@ data: metadata_dir = "/tmp/meta" data_dir = "/tmp/data" - replication_factor = 3 + replication_mode = "3" rpc_bind_addr = "[::]:3901" rpc_secret = "1799bccfd7411eddcf9ebd316bc1f5287ad12a68094e1c6ac6abde7e6feae1ec" diff --git a/script/telemetry/grafana-garage-dashboard-prometheus.json b/script/telemetry/grafana-garage-dashboard-prometheus.json index 1e127f8a..28ef1ec0 100644 --- a/script/telemetry/grafana-garage-dashboard-prometheus.json +++ b/script/telemetry/grafana-garage-dashboard-prometheus.json @@ -694,7 +694,32 @@ ] } }, - "overrides": [] + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "10.83.2.3:3903" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] }, "gridPos": { "h": 8, diff --git a/script/test-skip-part.sh b/script/test-skip-part.sh index bb9d5616..20ae017d 100644 --- a/script/test-skip-part.sh +++ b/script/test-skip-part.sh @@ -2,7 +2,7 @@ : ' This script tests whether uploaded parts can be skipped in a - CompleteMultipartUpload + CompleteMultipartUpoad On Minio: yes, parts can be skipped @@ -52,7 +52,7 @@ Conclusions: - - Skipping a part in a CompleteMultipartUpload call is OK + - Skipping a part in a CompleteMultipartUpoad call is OK - The part is simply not included in the stored object - Sequential part renumbering counts only non-skipped parts ' diff --git a/script/test-smoke.sh b/script/test-smoke.sh index eee206ba..acf56a90 100755 --- a/script/test-smoke.sh +++ b/script/test-smoke.sh @@ -112,23 +112,6 @@ if [ -z "$SKIP_S3CMD" ]; then done fi -# BOTO3 -if [ -z "$SKIP_BOTO3" ]; then - echo "🛠️ Testing with boto3 for STREAMING-UNSIGNED-PAYLOAD-TRAILER" - source ${SCRIPT_FOLDER}/dev-env-aws.sh - AWS_ENDPOINT_URL=https://localhost:4443 python <> /tmp/garage.log 2>&1 & sleep 3 echo "🛠️ Retrieving data from old cluster" -rclone copy garage:eprouvette/test_dotgit /tmp/test_dotgit \ - --stats=1s --stats-log-level=NOTICE --stats-one-line --fast-list +rclone copy garage:eprouvette/test_dotgit /tmp/test_dotgit --stats=1s --stats-log-level=NOTICE --stats-one-line --fast-list if ! diff <(find "${SCRIPT_FOLDER}/../.git" -type f | xargs md5sum | cut -d ' ' -f 1 | sort) <(find /tmp/test_dotgit -type f | xargs md5sum | cut -d ' ' -f 1 | sort); then echo "TEST FAILURE: directories are different" @@ -93,23 +68,6 @@ if ! diff <(find "${SCRIPT_FOLDER}/../.git" -type f | xargs md5sum | cut -d ' ' fi rm -r /tmp/test_dotgit -if [ "$DO_SSEC_TEST" = "1" ]; then - rclone copy garage:eprouvette/test-ssec /tmp/test_ssec_out \ - --s3-sse-customer-algorithm AES256 \ - --s3-sse-customer-key-base64 "$SSEC_KEY" \ - --stats=1s --stats-log-level=NOTICE --stats-one-line - if ! diff "/tmp/test_ssec_out/test-upgrade.sh" "${SCRIPT_FOLDER}/test-upgrade.sh"; then - echo "SSEC-FAILURE (small file)" - exit 1 - fi - if ! diff "/tmp/test_ssec_out/randfile-for-upgrade" "/tmp/randfile-for-upgrade"; then - echo "SSEC-FAILURE (big file)" - exit 1 - fi - rm -r /tmp/test_ssec_out - rm /tmp/randfile-for-upgrade -fi - echo "🏁 Teardown" rm -rf /tmp/garage-{data,meta}-* rm -rf /tmp/config.*.toml diff --git a/shell.nix b/shell.nix index 4bbfedc7..c3dedca8 100644 --- a/shell.nix +++ b/shell.nix @@ -26,21 +26,17 @@ in s3cmd minio-client rclone - (python313.withPackages (ps: [ ps.boto3 ])) - socat psmisc which openssl curl jq - typos ]; shellHook = '' export AWS_REQUEST_CHECKSUM_CALCULATION='when_required' function to_s3 { - AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED AWS_RESPONSE_CHECKSUM_VALIDATION=WHEN_REQUIRED \ aws \ --endpoint-url https://garage.deuxfleurs.fr \ --region garage \ @@ -52,7 +48,7 @@ in function to_docker { executor \ --force \ - --custom-platform="$(echo "''${DOCKER_PLATFORM}" | sed 's/i386/386/')" \ + --customPlatform="$(echo "''${DOCKER_PLATFORM}" | sed 's/i386/386/')" \ --destination "$(echo "''${CONTAINER_NAME}" | sed 's/i386/386/'):''${CONTAINER_TAG}" \ --context dir://`pwd` \ --verbosity=debug @@ -97,7 +93,6 @@ in nix-build nix/build_index.nix - AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED AWS_RESPONSE_CHECKSUM_VALIDATION=WHEN_REQUIRED \ aws \ --endpoint-url https://garage.deuxfleurs.fr \ --region garage \ @@ -105,7 +100,6 @@ in result/share/_releases.json \ s3://garagehq.deuxfleurs.fr/ - AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED AWS_RESPONSE_CHECKSUM_VALIDATION=WHEN_REQUIRED \ aws \ --endpoint-url https://garage.deuxfleurs.fr \ --region garage \ diff --git a/src/api/admin/Cargo.toml b/src/api/admin/Cargo.toml index ab9a1f47..656c6825 100644 --- a/src/api/admin/Cargo.toml +++ b/src/api/admin/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_api_admin" -version = "2.3.0" +version = "1.3.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -14,9 +14,7 @@ path = "lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -format_table.workspace = true garage_model.workspace = true -garage_block.workspace = true garage_table.workspace = true garage_util.workspace = true garage_rpc.workspace = true @@ -24,11 +22,8 @@ garage_api_common.workspace = true argon2.workspace = true async-trait.workspace = true -bytesize.workspace = true -chrono.workspace = true thiserror.workspace = true hex.workspace = true -paste.workspace = true tracing.workspace = true futures.workspace = true @@ -39,15 +34,10 @@ url.workspace = true serde.workspace = true serde_json.workspace = true -utoipa.workspace = true opentelemetry.workspace = true opentelemetry-prometheus = { workspace = true, optional = true } prometheus = { workspace = true, optional = true } [features] -metrics = ["opentelemetry-prometheus", "prometheus"] -k2v = ["garage_model/k2v"] - -[lints] -workspace = true +metrics = [ "opentelemetry-prometheus", "prometheus" ] diff --git a/src/api/admin/admin_token.rs b/src/api/admin/admin_token.rs deleted file mode 100644 index 242c9958..00000000 --- a/src/api/admin/admin_token.rs +++ /dev/null @@ -1,292 +0,0 @@ -use std::sync::Arc; - -use chrono::{DateTime, Utc}; - -use garage_table::*; -use garage_util::time::now_msec; - -use garage_model::admin_token_table::*; -use garage_model::garage::Garage; - -use crate::api::*; -use crate::error::*; -use crate::{Admin, RequestHandler}; - -impl RequestHandler for ListAdminTokensRequest { - type Response = ListAdminTokensResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let now = now_msec(); - - let mut res = garage - .admin_token_table - .get_range( - &EmptyKey, - None, - Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)), - 10000, - EnumerationOrder::Forward, - ) - .await? - .iter() - .map(|t| admin_token_info_results(t, now)) - .collect::>(); - - if garage.config.admin.metrics_token.is_some() { - res.insert( - 0, - GetAdminTokenInfoResponse { - id: None, - created: None, - name: "metrics_token (from daemon configuration)".into(), - expiration: None, - expired: false, - scope: vec!["Metrics".into()], - }, - ); - } - - if garage.config.admin.admin_token.is_some() { - res.insert( - 0, - GetAdminTokenInfoResponse { - id: None, - created: None, - name: "admin_token (from daemon configuration)".into(), - expiration: None, - expired: false, - scope: vec!["*".into()], - }, - ); - } - - Ok(ListAdminTokensResponse(res)) - } -} - -impl RequestHandler for GetAdminTokenInfoRequest { - type Response = GetAdminTokenInfoResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let token = match (self.id, self.search) { - (Some(id), None) => get_existing_admin_token(garage, &id).await?, - (None, Some(search)) => { - let candidates = garage - .admin_token_table - .get_range( - &EmptyKey, - None, - Some(KeyFilter::MatchesAndNotDeleted(search.to_string())), - 10, - EnumerationOrder::Forward, - ) - .await? - .into_iter() - .collect::>(); - if candidates.len() != 1 { - return Err(Error::bad_request(format!( - "{} matching admin tokens", - candidates.len() - ))); - } - candidates.into_iter().next().unwrap() - } - _ => { - return Err(Error::bad_request( - "Either id or search must be provided (but not both)", - )); - } - }; - - Ok(admin_token_info_results(&token, now_msec())) - } -} - -impl RequestHandler for CreateAdminTokenRequest { - type Response = CreateAdminTokenResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let (mut token, secret) = if self.0.name.is_some() { - AdminApiToken::new("") - } else { - AdminApiToken::new(&format!("token_{}", Utc::now().format("%Y%m%d_%H%M"))) - }; - - apply_token_updates(&mut token, self.0)?; - - garage.admin_token_table.insert(&token).await?; - - Ok(CreateAdminTokenResponse { - secret_token: secret, - info: admin_token_info_results(&token, now_msec()), - }) - } -} - -impl RequestHandler for UpdateAdminTokenRequest { - type Response = UpdateAdminTokenResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let mut token = get_existing_admin_token(garage, &self.id).await?; - - apply_token_updates(&mut token, self.body)?; - - garage.admin_token_table.insert(&token).await?; - - Ok(UpdateAdminTokenResponse(admin_token_info_results( - &token, - now_msec(), - ))) - } -} - -impl RequestHandler for DeleteAdminTokenRequest { - type Response = DeleteAdminTokenResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let token = get_existing_admin_token(garage, &self.id).await?; - - garage - .admin_token_table - .insert(&AdminApiToken::delete(token.prefix)) - .await?; - - Ok(DeleteAdminTokenResponse) - } -} - -impl RequestHandler for GetCurrentAdminTokenInfoRequest { - type Response = GetCurrentAdminTokenInfoResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let now = now_msec(); - - if garage - .config - .admin - .metrics_token - .as_ref() - .is_some_and(|s| s == &self.admin_token) - { - return Ok(GetCurrentAdminTokenInfoResponse( - GetAdminTokenInfoResponse { - id: None, - created: None, - name: "metrics_token (from daemon configuration)".into(), - expiration: None, - expired: false, - scope: vec!["Metrics".into()], - }, - )); - } - - if garage - .config - .admin - .admin_token - .as_ref() - .is_some_and(|s| s == &self.admin_token) - { - return Ok(GetCurrentAdminTokenInfoResponse( - GetAdminTokenInfoResponse { - id: None, - created: None, - name: "admin_token (from daemon configuration)".into(), - expiration: None, - expired: false, - scope: vec!["*".into()], - }, - )); - } - - let (prefix, _) = self.admin_token.split_once('.').unwrap(); - let token = get_existing_admin_token(garage, &prefix.to_string()).await?; - - Ok(GetCurrentAdminTokenInfoResponse(admin_token_info_results( - &token, now, - ))) - } -} - -// ---- helpers ---- - -fn admin_token_info_results(token: &AdminApiToken, now: u64) -> GetAdminTokenInfoResponse { - let params = token.params().unwrap(); - - GetAdminTokenInfoResponse { - id: Some(token.prefix.clone()), - created: Some( - DateTime::from_timestamp_millis(params.created as i64) - .expect("invalid timestamp stored in db"), - ), - name: params.name.get().to_string(), - expiration: params.expiration.get().map(|x| { - DateTime::from_timestamp_millis(x as i64).expect("invalid timestamp stored in db") - }), - expired: params.is_expired(now), - scope: params.scope.get().0.clone(), - } -} - -async fn get_existing_admin_token(garage: &Garage, id: &String) -> Result { - garage - .admin_token_table - .get(&EmptyKey, id) - .await? - .filter(|k| !k.state.is_deleted()) - .ok_or_else(|| Error::NoSuchAdminToken(id.to_string())) -} - -fn apply_token_updates( - token: &mut AdminApiToken, - updates: UpdateAdminTokenRequestBody, -) -> Result<(), Error> { - if updates.never_expires && updates.expiration.is_some() { - return Err(Error::bad_request( - "cannot specify `expiration` and `never_expires`", - )); - } - - let params = token.params_mut().unwrap(); - - if let Some(name) = updates.name { - params.name.update(name); - } - if let Some(expiration) = updates.expiration { - params - .expiration - .update(Some(expiration.timestamp_millis() as u64)); - } - if updates.never_expires { - params.expiration.update(None); - } - if let Some(scope) = updates.scope { - params.scope.update(AdminApiTokenScope(scope)); - } - - Ok(()) -} diff --git a/src/api/admin/api.rs b/src/api/admin/api.rs deleted file mode 100644 index ed145ea7..00000000 --- a/src/api/admin/api.rs +++ /dev/null @@ -1,1443 +0,0 @@ -use std::collections::HashMap; -use std::convert::TryFrom; -use std::net::SocketAddr; -use std::sync::Arc; - -use chrono::{DateTime, Utc}; -use paste::paste; -use serde::{Deserialize, Serialize}; -use utoipa::{IntoParams, ToSchema}; - -use garage_rpc::*; - -use garage_model::garage::Garage; - -use garage_api_common::{common_error::CommonError, helpers::is_default, xml}; - -use crate::api_server::{find_matching_nodes, AdminRpc, AdminRpcResponse}; -use crate::error::Error; -use crate::macros::*; -use crate::{Admin, RequestHandler}; - -// This generates the following: -// -// - An enum AdminApiRequest that contains a variant for all endpoints -// -// - An enum AdminApiResponse that contains a variant for all non-special endpoints. -// This enum is serialized in api_server.rs, without the enum tag, -// which gives directly the JSON response corresponding to the API call. -// This enum does not implement Deserialize as its meaning can be ambiguous. -// -// - An enum TaggedAdminApiResponse that contains the same variants, but -// serializes as a tagged enum. This allows it to be transmitted through -// Garage RPC and deserialized correctly upon receival. -// Conversion from untagged to tagged can be done using the `.tagged()` method. -// -// - AdminApiRequest::name() that returns the name of the endpoint -// -// - impl EndpointHandler for AdminApiHandler, that uses the impl EndpointHandler -// of each request type below for non-special endpoints -admin_endpoints![ - // Special endpoints of the Admin API - @special Options, - @special CheckDomain, - @special Health, - @special Metrics, - - // Cluster operations - GetClusterStatus, - GetClusterHealth, - GetClusterStatistics, - ConnectClusterNodes, - - // Admin tokens operations - ListAdminTokens, - GetAdminTokenInfo, - CreateAdminToken, - UpdateAdminToken, - DeleteAdminToken, - GetCurrentAdminTokenInfo, - - // Layout operations - GetClusterLayout, - GetClusterLayoutHistory, - UpdateClusterLayout, - PreviewClusterLayoutChanges, - ApplyClusterLayout, - RevertClusterLayout, - ClusterLayoutSkipDeadNodes, - - // Access key operations - ListKeys, - GetKeyInfo, - CreateKey, - ImportKey, - UpdateKey, - DeleteKey, - - // Bucket operations - ListBuckets, - GetBucketInfo, - CreateBucket, - UpdateBucket, - DeleteBucket, - CleanupIncompleteUploads, - InspectObject, - - // Operations on permissions for keys on buckets - AllowBucketKey, - DenyBucketKey, - - // Operations on bucket aliases - AddBucketAlias, - RemoveBucketAlias, - - // Node operations - GetNodeInfo, - GetNodeStatistics, - CreateMetadataSnapshot, - LaunchRepairOperation, - - // Worker operations - ListWorkers, - GetWorkerInfo, - GetWorkerVariable, - SetWorkerVariable, - - // Block operations - ListBlockErrors, - GetBlockInfo, - RetryBlockResync, - PurgeBlocks, -]; - -local_admin_endpoints![ - // Node operations - GetNodeInfo, - GetNodeStatistics, - CreateMetadataSnapshot, - LaunchRepairOperation, - // Background workers - ListWorkers, - GetWorkerInfo, - GetWorkerVariable, - SetWorkerVariable, - // Block operations - ListBlockErrors, - GetBlockInfo, - RetryBlockResync, - PurgeBlocks, -]; - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct MultiRequest { - pub node: String, - pub body: RB, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct MultiResponse { - /// Map of node id to response returned by this node, for nodes that were able to - /// successfully complete the API call - pub success: HashMap, - /// Map of node id to error message, for nodes that were unable to complete the API - /// call - pub error: HashMap, -} - -#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)] -#[into_params(parameter_in = Query)] -pub struct MultiRequestQueryParams { - /// Node ID to query, or `*` for all nodes, or `self` for the node responding to the request - pub node: String, -} - -// ********************************************** -// Special endpoints -// -// These endpoints don't have associated *Response structs -// because they directly produce an http::Response -// ********************************************** - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct OptionsRequest; - -#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)] -#[into_params(parameter_in = Query)] -pub struct CheckDomainRequest { - /// The domain name to check for - pub domain: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct HealthRequest; - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct MetricsRequest; - -// ********************************************** -// Cluster operations -// ********************************************** - -// ---- GetClusterStatus ---- - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct GetClusterStatusRequest; - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct GetClusterStatusResponse { - /// Current version number of the cluster layout - pub layout_version: u64, - /// List of nodes that are either currently connected, part of the - /// current cluster layout, or part of an older cluster layout that - /// is still active in the cluster (being drained). - pub nodes: Vec, -} - -#[derive(Debug, Clone, Serialize, Deserialize, Default, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct NodeResp { - /// Full-length node identifier - pub id: String, - /// Garage version - pub garage_version: Option, - /// Socket address used by other nodes to connect to this node for RPC - #[schema(value_type = Option)] - pub addr: Option, - /// Hostname of the node - pub hostname: Option, - /// Whether this node is connected in the cluster - pub is_up: bool, - /// For disconnected nodes, the number of seconds since last contact, - /// or `null` if no contact was established since Garage restarted. - pub last_seen_secs_ago: Option, - /// Role assigned to this node in the current cluster layout - pub role: Option, - /// Whether this node is part of an older layout version and is draining data. - pub draining: bool, - /// Total and available space on the disk partition(s) containing the data - /// directory(ies) - #[serde(default, skip_serializing_if = "Option::is_none")] - pub data_partition: Option, - /// Total and available space on the disk partition containing the - /// metadata directory - #[serde(default, skip_serializing_if = "Option::is_none")] - pub metadata_partition: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct NodeAssignedRole { - /// Zone name assigned by the cluster administrator - pub zone: String, - /// List of tags assigned by the cluster administrator - pub tags: Vec, - /// Capacity (in bytes) assigned by the cluster administrator, - /// absent for gateway nodes - pub capacity: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct FreeSpaceResp { - /// Number of bytes available - pub available: u64, - /// Total number of bytes - pub total: u64, -} - -// ---- GetClusterHealth ---- - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct GetClusterHealthRequest; - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct GetClusterHealthResponse { - /// One of `healthy`, `degraded` or `unavailable`: - /// - `healthy`: Garage node is connected to all storage nodes - /// - `degraded`: Garage node is not connected to all storage nodes, but a quorum of write nodes is available for all partitions - /// - `unavailable`: a quorum of write nodes is not available for some partitions - pub status: String, - /// the number of nodes this Garage node has had a TCP connection to since the daemon started - pub known_nodes: usize, - /// the number of nodes this Garage node currently has an open connection to - pub connected_nodes: usize, - /// the number of storage nodes currently registered in the cluster layout - pub storage_nodes: usize, - /// the number of storage nodes to which a connection is currently open - pub storage_nodes_up: usize, - /// the total number of partitions of the data (currently always 256) - pub partitions: usize, - /// the number of partitions for which a quorum of write nodes is available - pub partitions_quorum: usize, - /// the number of partitions for which we are connected to all storage nodes responsible of storing it - pub partitions_all_ok: usize, -} - -// ---- GetClusterStatistics ---- - -#[derive(Debug, Clone, Serialize, Deserialize, Default)] -pub struct GetClusterStatisticsRequest; - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct GetClusterStatisticsResponse { - // FIXME for v3: remove freeform field and move display logic to garage crate - /// cluster statistics as a free-form string, kept for compatibility with nodes - /// running older v2.x versions of garage - pub freeform: String, - // FIXME for v3: remove Option<> and serde(default) for all fields below - /// available storage space for object data in the entire cluster, in bytes - #[serde(default, skip_serializing_if = "Option::is_none")] - pub data_avail: Option, - /// available storage space for object metadata in the entire cluster, in bytes - #[serde(default, skip_serializing_if = "Option::is_none")] - pub metadata_avail: Option, - /// true if the available storage space statistics are imprecise due to missing - /// information of disconnected nodes. When this is the case, the actual - /// space available in the cluster might be lower than the reported values. - #[serde(default, skip_serializing_if = "Option::is_none")] - pub incomplete_avail_info: Option, - /// number of buckets in the cluster - #[serde(default, skip_serializing_if = "Option::is_none")] - pub bucket_count: Option, - /// total number of objects stored in all buckets - #[serde(default, skip_serializing_if = "Option::is_none")] - pub total_object_count: Option, - /// total size of objects stored in all buckets, before compression, deduplication and - /// replication (this is NOT equivalent to actual disk usage in the cluster) - #[serde(default, skip_serializing_if = "Option::is_none")] - pub total_object_bytes: Option, -} - -// ---- ConnectClusterNodes ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct ConnectClusterNodesRequest(pub Vec); - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct ConnectClusterNodesResponse(pub Vec); - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct ConnectNodeResponse { - /// `true` if Garage managed to connect to this node - pub success: bool, - /// An error message if Garage did not manage to connect to this node - pub error: Option, -} - -// ********************************************** -// Admin token operations -// ********************************************** - -// ---- ListAdminTokens ---- - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ListAdminTokensRequest; - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct ListAdminTokensResponse(pub Vec); - -// ---- GetAdminTokenInfo ---- - -#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)] -#[into_params(parameter_in = Query)] -#[serde(rename_all = "camelCase")] -pub struct GetAdminTokenInfoRequest { - /// Admin API token ID - pub id: Option, - /// Partial token ID or name to search for - pub search: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct GetAdminTokenInfoResponse { - /// Identifier of the admin token (which is also a prefix of the full bearer token) - pub id: Option, - /// Creation date - pub created: Option>, - /// Name of the admin API token - pub name: String, - /// Expiration time and date, formatted according to RFC 3339 - pub expiration: Option>, - /// Whether this admin token is expired already - pub expired: bool, - /// Scope of the admin API token, a list of admin endpoint names (such as - /// `GetClusterStatus`, etc), or the special value `*` to allow all - /// admin endpoints - pub scope: Vec, -} - -// ---- CreateAdminToken ---- - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct CreateAdminTokenRequest(pub UpdateAdminTokenRequestBody); - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct CreateAdminTokenResponse { - /// The secret bearer token. **CAUTION:** This token will be shown only - /// ONCE, so this value MUST be remembered somewhere, or the token - /// will be unusable. - pub secret_token: String, - #[serde(flatten)] - pub info: GetAdminTokenInfoResponse, -} - -// ---- UpdateAdminToken ---- - -#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)] -#[into_params(parameter_in = Query)] -pub struct UpdateAdminTokenRequest { - /// Admin API token ID - pub id: String, - #[param(ignore = true)] - pub body: UpdateAdminTokenRequestBody, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct UpdateAdminTokenRequestBody { - /// Name of the admin API token - pub name: Option, - /// Expiration time and date, formatted according to RFC 3339 - pub expiration: Option>, - /// Set the admin token to never expire - #[serde(default)] - pub never_expires: bool, - /// Scope of the admin API token, a list of admin endpoint names (such as - /// `GetClusterStatus`, etc), or the special value `*` to allow all - /// admin endpoints. **WARNING:** Granting a scope of `CreateAdminToken` or - /// `UpdateAdminToken` trivially allows for privilege escalation, and is thus - /// functionally equivalent to granting a scope of `*`. - pub scope: Option>, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct UpdateAdminTokenResponse(pub GetAdminTokenInfoResponse); - -// ---- DeleteAdminToken ---- - -#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)] -#[into_params(parameter_in = Query)] -pub struct DeleteAdminTokenRequest { - /// Admin API token ID - pub id: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct DeleteAdminTokenResponse; - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct GetCurrentAdminTokenInfoRequest { - pub admin_token: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct GetCurrentAdminTokenInfoResponse(pub GetAdminTokenInfoResponse); - -// ********************************************** -// Layout operations -// ********************************************** - -// ---- GetClusterLayout ---- - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct GetClusterLayoutRequest; - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct GetClusterLayoutResponse { - /// The current version number of the cluster layout - pub version: u64, - /// List of nodes that currently have a role in the cluster layout - pub roles: Vec, - /// Layout parameters used when the current layout was computed - pub parameters: LayoutParameters, - /// The size, in bytes, of one Garage partition (= a shard) - pub partition_size: u64, - /// List of nodes that will have a new role or whose role will be - /// removed in the next version of the cluster layout - pub staged_role_changes: Vec, - /// Layout parameters to use when computing the next version of - /// the cluster layout - pub staged_parameters: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct LayoutNodeRole { - /// Identifier of the node - pub id: String, - /// Zone name assigned by the cluster administrator - pub zone: String, - /// List of tags assigned by the cluster administrator - pub tags: Vec, - /// Capacity (in bytes) assigned by the cluster administrator, - /// absent for gateway nodes - pub capacity: Option, - /// Number of partitions stored on this node - /// (a result of the layout computation) - pub stored_partitions: Option, - /// Capacity (in bytes) that is actually usable on this node in the current - /// layout, which is equal to `stored_partitions` × `partition_size` - pub usable_capacity: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct NodeRoleChange { - /// ID of the node for which this change applies - pub id: String, - #[serde(flatten)] - pub action: NodeRoleChangeEnum, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(untagged)] -pub enum NodeRoleChangeEnum { - #[serde(rename_all = "camelCase")] - Remove { - /// Set `remove` to `true` to remove the node from the layout - remove: bool, - }, - #[serde(rename_all = "camelCase")] - Update(NodeAssignedRole), -} - -#[derive(Copy, Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct LayoutParameters { - /// Minimum number of zones in which a data partition must be replicated - pub zone_redundancy: ZoneRedundancy, -} - -#[derive(Copy, Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub enum ZoneRedundancy { - /// Partitions must be replicated in at least this number of - /// distinct zones. - AtLeast(usize), - /// Partitions must be replicated in as many zones as possible: - /// as many zones as there are replicas, if there are enough distinct - /// zones, or at least one in each zone otherwise. - Maximum, -} - -// ---- GetClusterLayoutHistory ---- - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct GetClusterLayoutHistoryRequest; - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct GetClusterLayoutHistoryResponse { - /// The current version number of the cluster layout - pub current_version: u64, - /// All nodes in the cluster are aware of layout versions up to - /// this version number (at least) - pub min_ack: u64, - /// Layout version history - pub versions: Vec, - /// Detailed update trackers for nodes (see - /// `https://garagehq.deuxfleurs.fr/blog/2023-12-preserving-read-after-write-consistency/`) - pub update_trackers: Option>, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct ClusterLayoutVersion { - /// Version number of this layout version - pub version: u64, - /// Status of this layout version - pub status: ClusterLayoutVersionStatus, - /// Number of nodes with an assigned storage capacity in this layout version - pub storage_nodes: u64, - /// Number of nodes with a gateway role in this layout version - pub gateway_nodes: u64, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub enum ClusterLayoutVersionStatus { - /// This is the most up-to-date layout version - Current, - /// This version is still active in the cluster because metadata - /// is being rebalanced or migrated from old nodes - Draining, - /// This version is no longer active in the cluster for metadata - /// reads and writes. Note that there is still the possibility - /// that data blocks are being migrated away from nodes in this - /// layout version. - Historical, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct NodeUpdateTrackers { - pub ack: u64, - pub sync: u64, - pub sync_ack: u64, -} - -// ---- UpdateClusterLayout ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct UpdateClusterLayoutRequest { - /// New node roles to assign or remove in the cluster layout - #[serde(default)] - pub roles: Vec, - /// New layout computation parameters to use - #[serde(default)] - pub parameters: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct UpdateClusterLayoutResponse(pub GetClusterLayoutResponse); - -// ---- PreviewClusterLayoutChanges ---- - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct PreviewClusterLayoutChangesRequest; - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(untagged)] -pub enum PreviewClusterLayoutChangesResponse { - #[serde(rename_all = "camelCase")] - Error { - /// Error message indicating that the layout could not be computed - /// with the provided configuration - error: String, - }, - #[serde(rename_all = "camelCase")] - Success { - /// Plain-text information about the layout computation - /// (do not try to parse this) - message: Vec, - /// Details about the new cluster layout - new_layout: GetClusterLayoutResponse, - }, -} - -// ---- ApplyClusterLayout ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct ApplyClusterLayoutRequest { - /// As a safety measure, the new version number of the layout must - /// be specified here - pub version: u64, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct ApplyClusterLayoutResponse { - /// Plain-text information about the layout computation - /// (do not try to parse this) - pub message: Vec, - /// Details about the new cluster layout - pub layout: GetClusterLayoutResponse, -} - -// ---- RevertClusterLayout ---- - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct RevertClusterLayoutRequest; - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct RevertClusterLayoutResponse(pub GetClusterLayoutResponse); - -// ---- ClusterLayoutSkipDeadNodes ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct ClusterLayoutSkipDeadNodesRequest { - /// Version number of the layout to assume is currently up-to-date. - /// This will generally be the current layout version. - pub version: u64, - /// Allow the skip even if a quorum of nodes could not be found for - /// the data among the remaining nodes - pub allow_missing_data: bool, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct ClusterLayoutSkipDeadNodesResponse { - /// Nodes for which the ACK update tracker has been updated to `version` - pub ack_updated: Vec, - /// If `allow_missing_data` is set, - /// nodes for which the SYNC update tracker has been updated to `version` - pub sync_updated: Vec, -} - -// ********************************************** -// Access key operations -// ********************************************** - -// ---- ListKeys ---- - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ListKeysRequest; - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct ListKeysResponse(pub Vec); - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct ListKeysResponseItem { - pub id: String, - pub name: String, - pub created: Option>, - pub expiration: Option>, - pub expired: bool, -} - -// ---- GetKeyInfo ---- - -#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)] -#[into_params(parameter_in = Query)] -#[serde(rename_all = "camelCase")] -pub struct GetKeyInfoRequest { - /// Access key ID - pub id: Option, - /// Partial key ID or name to search for - pub search: Option, - /// Whether to return the secret access key - #[serde(default)] - pub show_secret_key: bool, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct GetKeyInfoResponse { - pub access_key_id: String, - pub created: Option>, - pub name: String, - pub expiration: Option>, - pub expired: bool, - #[serde(default, skip_serializing_if = "is_default")] - pub secret_access_key: Option, - pub permissions: KeyPerm, - pub buckets: Vec, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct KeyPerm { - #[serde(default)] - pub create_bucket: bool, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct KeyInfoBucketResponse { - pub id: String, - pub global_aliases: Vec, - pub local_aliases: Vec, - pub permissions: ApiBucketKeyPerm, -} - -#[derive(Debug, Clone, Serialize, Deserialize, Default, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct ApiBucketKeyPerm { - #[serde(default)] - pub read: bool, - #[serde(default)] - pub write: bool, - #[serde(default)] - pub owner: bool, -} - -// ---- CreateKey ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct CreateKeyRequest(pub UpdateKeyRequestBody); - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct CreateKeyResponse(pub GetKeyInfoResponse); - -// ---- ImportKey ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct ImportKeyRequest { - pub access_key_id: String, - pub secret_access_key: String, - pub name: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct ImportKeyResponse(pub GetKeyInfoResponse); - -// ---- UpdateKey ---- - -#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)] -#[into_params(parameter_in = Query)] -pub struct UpdateKeyRequest { - /// Access key ID - pub id: String, - #[param(ignore = true)] - pub body: UpdateKeyRequestBody, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct UpdateKeyResponse(pub GetKeyInfoResponse); - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct UpdateKeyRequestBody { - /// Name of the API key - pub name: Option, - /// Expiration time and date, formatted according to RFC 3339 - pub expiration: Option>, - /// Set the access key to never expire - #[serde(default)] - pub never_expires: bool, - /// Permissions to allow for the key - pub allow: Option, - /// Permissions to deny for the key - pub deny: Option, -} - -// ---- DeleteKey ---- - -#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)] -#[into_params(parameter_in = Query)] -pub struct DeleteKeyRequest { - /// Access key ID - pub id: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct DeleteKeyResponse; - -// ********************************************** -// Bucket operations -// ********************************************** - -// ---- ListBuckets ---- - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ListBucketsRequest; - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct ListBucketsResponse(pub Vec); - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct ListBucketsResponseItem { - pub id: String, - pub created: DateTime, - pub global_aliases: Vec, - pub local_aliases: Vec, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct BucketLocalAlias { - pub access_key_id: String, - pub alias: String, -} - -// ---- GetBucketInfo ---- - -#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)] -#[into_params(parameter_in = Query)] -#[serde(rename_all = "camelCase")] -pub struct GetBucketInfoRequest { - /// Exact bucket ID to look up - pub id: Option, - /// Global alias of bucket to look up - pub global_alias: Option, - /// Partial ID or alias to search for - pub search: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct GetBucketInfoResponse { - /// Identifier of the bucket - pub id: String, - /// Bucket creation date - pub created: DateTime, - /// List of global aliases for this bucket - pub global_aliases: Vec, - /// Whether website access is enabled for this bucket - pub website_access: bool, - /// Website configuration for this bucket - #[serde(default, skip_serializing_if = "Option::is_none")] - pub website_config: Option, - // FIXME for v3: remove serde(default) for the two fields below - /// CORS rules for this bucket - #[serde(default, skip_serializing_if = "Option::is_none")] - pub cors_rules: Option>, - /// Object lifecycle rules for this bucket - #[serde(default, skip_serializing_if = "Option::is_none")] - pub lifecycle_rules: Option>, - /// List of access keys that have permissions granted on this bucket - pub keys: Vec, - /// Number of objects in this bucket - pub objects: i64, - /// Total number of bytes used by objects in this bucket - pub bytes: i64, - /// Number of unfinished uploads in this bucket - pub unfinished_uploads: i64, - /// Number of unfinished multipart uploads in this bucket - pub unfinished_multipart_uploads: i64, - /// Number of parts in unfinished multipart uploads in this bucket - pub unfinished_multipart_upload_parts: i64, - /// Total number of bytes used by unfinished multipart uploads in this bucket - pub unfinished_multipart_upload_bytes: i64, - /// Quotas that apply to this bucket - pub quotas: ApiBucketQuotas, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct GetBucketInfoWebsiteResponse { - pub index_document: String, - pub error_document: Option, - // FIXME for v3: remove serde(default) for field below - #[serde(default, skip_serializing_if = "Option::is_none")] - pub routing_rules: Option>, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct GetBucketInfoKey { - pub access_key_id: String, - pub name: String, - pub permissions: ApiBucketKeyPerm, - pub bucket_local_aliases: Vec, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct ApiBucketQuotas { - pub max_size: Option, - pub max_objects: Option, -} - -// ---- CreateBucket ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct CreateBucketRequest { - pub global_alias: Option, - pub local_alias: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct CreateBucketResponse(pub GetBucketInfoResponse); - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct CreateBucketLocalAlias { - pub access_key_id: String, - pub alias: String, - #[serde(default)] - pub allow: ApiBucketKeyPerm, -} - -// ---- UpdateBucket ---- - -#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)] -#[into_params(parameter_in = Query)] -pub struct UpdateBucketRequest { - /// ID of the bucket to update - pub id: String, - #[param(ignore = true)] - pub body: UpdateBucketRequestBody, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct UpdateBucketResponse(pub GetBucketInfoResponse); - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct UpdateBucketRequestBody { - pub website_access: Option, - pub quotas: Option, - // FIXME for v3: remove serde(default) for the two fields below - #[serde(default)] - pub cors_rules: Option>, - #[serde(default)] - pub lifecycle_rules: Option>, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct UpdateBucketWebsiteAccess { - pub enabled: bool, - pub index_document: Option, - pub error_document: Option, - // FIXME for v3: remove serde(default) for field below - #[serde(default)] - pub routing_rules: Option>, -} - -// ---- DeleteBucket ---- - -#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)] -#[into_params(parameter_in = Query)] -pub struct DeleteBucketRequest { - /// ID of the bucket to delete - pub id: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct DeleteBucketResponse; - -// ---- CleanupIncompleteUploads ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct CleanupIncompleteUploadsRequest { - pub bucket_id: String, - pub older_than_secs: u64, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct CleanupIncompleteUploadsResponse { - pub uploads_deleted: u64, -} - -#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)] -#[into_params(parameter_in = Query)] -#[serde(rename_all = "camelCase")] -pub struct InspectObjectRequest { - pub bucket_id: String, - pub key: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct InspectObjectResponse { - /// ID of the bucket containing the inspected object - pub bucket_id: String, - /// Key of the inspected object - pub key: String, - /// List of versions currently stored for this object - pub versions: Vec, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, Default)] -#[serde(rename_all = "camelCase")] -pub struct InspectObjectVersion { - /// Version ID - pub uuid: String, - /// Creation timestamp of this object version - pub timestamp: DateTime, - /// Whether this object version was created with SSE-C encryption - pub encrypted: bool, - /// Whether this object version is still uploading - pub uploading: bool, - /// Whether this is an aborted upload - pub aborted: bool, - /// Whether this version is a delete marker (a tombstone indicating that a previous version of - /// the object has been deleted) - pub delete_marker: bool, - /// Whether the object's data is stored inline (for small objects) - pub inline: bool, - /// Size of the object, in bytes - pub size: Option, - /// Etag of this object version - pub etag: Option, - /// Metadata (HTTP headers) associated with this object version - #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub headers: Vec<(String, String)>, - /// List of data blocks for this object version - #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub blocks: Vec, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct InspectObjectBlock { - /// Part number of the part containing this block, for multipart uploads - pub part_number: u64, - /// Offset of this block within the part - pub offset: u64, - /// Hash (blake2 sum) of the block's data - pub hash: String, - /// Length of the blocks's data - pub size: u64, -} - -// ********************************************** -// Operations on permissions for keys on buckets -// ********************************************** - -// ---- AllowBucketKey ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct AllowBucketKeyRequest(pub BucketKeyPermChangeRequest); - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct AllowBucketKeyResponse(pub GetBucketInfoResponse); - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct BucketKeyPermChangeRequest { - pub bucket_id: String, - pub access_key_id: String, - pub permissions: ApiBucketKeyPerm, -} - -// ---- DenyBucketKey ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct DenyBucketKeyRequest(pub BucketKeyPermChangeRequest); - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct DenyBucketKeyResponse(pub GetBucketInfoResponse); - -// ********************************************** -// Operations on bucket aliases -// ********************************************** - -// ---- AddBucketAlias ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct AddBucketAliasRequest { - pub bucket_id: String, - #[serde(flatten)] - pub alias: BucketAliasEnum, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct AddBucketAliasResponse(pub GetBucketInfoResponse); - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(untagged)] -pub enum BucketAliasEnum { - #[serde(rename_all = "camelCase")] - Global { global_alias: String }, - #[serde(rename_all = "camelCase")] - Local { - local_alias: String, - access_key_id: String, - }, -} - -// ---- RemoveBucketAlias ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct RemoveBucketAliasRequest { - pub bucket_id: String, - #[serde(flatten)] - pub alias: BucketAliasEnum, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct RemoveBucketAliasResponse(pub GetBucketInfoResponse); - -// ********************************************** -// Node operations -// ********************************************** - -// ---- GetNodeInfo ---- - -#[derive(Debug, Clone, Serialize, Deserialize, Default)] -pub struct LocalGetNodeInfoRequest; - -#[derive(Debug, Clone, Serialize, Deserialize, Default, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct LocalGetNodeInfoResponse { - pub node_id: String, - // FIXME for v3: remove Option<> and serde(default) for field below - /// hostname of this node - #[serde(default, skip_serializing_if = "Option::is_none")] - pub hostname: Option, - /// garage version running on this node - pub garage_version: String, - /// build-time features enabled for this garage release - pub garage_features: Option>, - /// rustc version with which this garage release was compiled - pub rust_version: String, - /// database engine used for metadata - pub db_engine: String, -} - -// ---- GetNodeStatistics ---- - -#[derive(Debug, Clone, Serialize, Deserialize, Default)] -pub struct LocalGetNodeStatisticsRequest; - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct LocalGetNodeStatisticsResponse { - // FIXME for v3: remove freeform field and move display logic to garage crate - /// node statistics as a free-form string, kept for compatibility with nodes - /// running older v2.x versions of garage - pub freeform: String, - // FIXME for v3: remove serde(default) for fields below - /// metadata table statistics - #[serde(default, skip_serializing_if = "Option::is_none")] - pub table_stats: Option>, - /// block manager statistics - #[serde(default, skip_serializing_if = "Option::is_none")] - pub block_manager_stats: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct NodeTableStats { - /// name of metadata table - pub table_name: String, - /// number of items stored in metadata table - pub items: u64, - /// size of the merkle tree representing all items in the table - pub merkle_items: u64, - /// number of items in the merkle tree update queue - pub merkle_queue_len: u64, - /// number of items in the remote insert queue - pub insert_queue_len: u64, - /// number of items in the garbage collection queue - pub gc_queue_len: u64, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, Default)] -#[serde(rename_all = "camelCase")] -pub struct NodeBlockManagerStats { - /// number of reference counter entries - pub rc_entries: u64, - /// number of blocks in the resync queue - pub resync_queue_len: u64, - /// number of blocks with resync errors - pub resync_errors: u64, -} - -// ---- CreateMetadataSnapshot ---- - -#[derive(Debug, Clone, Serialize, Deserialize, Default)] -pub struct LocalCreateMetadataSnapshotRequest; - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct LocalCreateMetadataSnapshotResponse; - -// ---- LaunchRepairOperation ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct LocalLaunchRepairOperationRequest { - pub repair_type: RepairType, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub enum RepairType { - Tables, - Blocks, - Versions, - MultipartUploads, - BlockRefs, - BlockRc, - Rebalance, - Scrub(ScrubCommand), - Aliases, - ClearResyncQueue, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub enum ScrubCommand { - Start, - Pause, - Resume, - Cancel, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct LocalLaunchRepairOperationResponse; - -// ********************************************** -// Worker operations -// ********************************************** - -// ---- ListWorkers ---- - -#[derive(Debug, Clone, Serialize, Deserialize, Default, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct LocalListWorkersRequest { - #[serde(default)] - pub busy_only: bool, - #[serde(default)] - pub error_only: bool, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct LocalListWorkersResponse(pub Vec); - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct WorkerInfoResp { - pub id: u64, - pub name: String, - pub state: WorkerStateResp, - pub errors: u64, - pub consecutive_errors: u64, - pub last_error: Option, - pub tranquility: Option, - pub progress: Option, - pub queue_length: Option, - pub persistent_errors: Option, - pub freeform: Vec, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub enum WorkerStateResp { - Busy, - #[serde(rename_all = "camelCase")] - Throttled { - duration_secs: f32, - }, - Idle, - Done, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct WorkerLastError { - pub message: String, - pub secs_ago: u64, -} - -// ---- GetWorkerInfo ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct LocalGetWorkerInfoRequest { - pub id: u64, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct LocalGetWorkerInfoResponse(pub WorkerInfoResp); - -// ---- GetWorkerVariable ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct LocalGetWorkerVariableRequest { - pub variable: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct LocalGetWorkerVariableResponse(pub HashMap); - -// ---- SetWorkerVariable ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct LocalSetWorkerVariableRequest { - pub variable: String, - pub value: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct LocalSetWorkerVariableResponse { - pub variable: String, - pub value: String, -} - -// ********************************************** -// Block operations -// ********************************************** - -// ---- ListBlockErrors ---- - -#[derive(Debug, Clone, Serialize, Deserialize, Default)] -pub struct LocalListBlockErrorsRequest; - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -pub struct LocalListBlockErrorsResponse(pub Vec); - -#[derive(Serialize, Deserialize, Clone, Debug, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct BlockError { - pub block_hash: String, - pub refcount: u64, - pub error_count: u64, - pub last_try_secs_ago: u64, - pub next_try_in_secs: u64, -} - -// ---- GetBlockInfo ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct LocalGetBlockInfoRequest { - pub block_hash: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct LocalGetBlockInfoResponse { - pub block_hash: String, - pub refcount: u64, - pub versions: Vec, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct BlockVersion { - pub version_id: String, - pub ref_deleted: bool, - pub version_deleted: bool, - pub garbage_collected: bool, - pub backlink: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub enum BlockVersionBacklink { - #[serde(rename_all = "camelCase")] - Object { bucket_id: String, key: String }, - #[serde(rename_all = "camelCase")] - Upload { - upload_id: String, - upload_deleted: bool, - upload_garbage_collected: bool, - bucket_id: Option, - key: Option, - }, -} - -// ---- RetryBlockResync ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(untagged)] -pub enum LocalRetryBlockResyncRequest { - #[serde(rename_all = "camelCase")] - All { all: bool }, - #[serde(rename_all = "camelCase")] - Blocks { block_hashes: Vec }, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct LocalRetryBlockResyncResponse { - pub count: u64, -} - -// ---- PurgeBlocks ---- - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct LocalPurgeBlocksRequest(pub Vec); - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct LocalPurgeBlocksResponse { - pub blocks_purged: u64, - pub objects_deleted: u64, - pub uploads_deleted: u64, - pub versions_deleted: u64, - pub block_refs_purged: u64, -} diff --git a/src/api/admin/api_server.rs b/src/api/admin/api_server.rs index aa8d8e96..6f0c474f 100644 --- a/src/api/admin/api_server.rs +++ b/src/api/admin/api_server.rs @@ -1,237 +1,333 @@ -use std::borrow::Cow; +use std::collections::HashMap; use std::sync::Arc; -use http::header::{HeaderValue, ACCESS_CONTROL_ALLOW_ORIGIN, AUTHORIZATION}; -use hyper::{body::Incoming as IncomingBody, Request, Response}; -use serde::{Deserialize, Serialize}; +use argon2::password_hash::PasswordHash; + +use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW}; +use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode}; use tokio::sync::watch; use opentelemetry::trace::SpanRef; #[cfg(feature = "metrics")] use opentelemetry_prometheus::PrometheusExporter; +#[cfg(feature = "metrics")] +use prometheus::{Encoder, TextEncoder}; use garage_model::garage::Garage; -use garage_rpc::{Endpoint as RpcEndpoint, *}; -use garage_table::EmptyKey; -use garage_util::background::BackgroundRunner; -use garage_util::data::Uuid; +use garage_rpc::system::ClusterHealthStatus; use garage_util::error::Error as GarageError; use garage_util::socket_address::UnixOrTCPSocketAddress; -use garage_util::time::now_msec; use garage_api_common::generic_server::*; use garage_api_common::helpers::*; -use crate::api::*; +use crate::bucket::*; +use crate::cluster::*; use crate::error::*; +use crate::key::*; use crate::router_v0; -use crate::router_v1; -use crate::Authorization; -use crate::RequestHandler; - -// ---- FOR RPC ---- - -pub const ADMIN_RPC_PATH: &str = "garage_api/admin/rpc.rs/Rpc"; - -#[derive(Debug, Serialize, Deserialize)] -pub enum AdminRpc { - Proxy(AdminApiRequest), - Internal(LocalAdminApiRequest), -} - -#[derive(Debug, Serialize, Deserialize)] -pub enum AdminRpcResponse { - ProxyApiOkResponse(TaggedAdminApiResponse), - InternalApiOkResponse(LocalAdminApiResponse), - ApiErrorResponse { - http_code: u16, - error_code: String, - message: String, - }, -} - -impl Rpc for AdminRpc { - type Response = Result; -} - -impl EndpointHandler for AdminApiServer { - async fn handle( - self: &Arc, - message: &AdminRpc, - _from: NodeID, - ) -> Result { - match message { - AdminRpc::Proxy(req) => { - info!("Proxied admin API request: {}", req.name()); - let res = req.clone().handle(&self.garage, self).await; - match res { - Ok(res) => Ok(AdminRpcResponse::ProxyApiOkResponse(res.tagged())), - Err(e) => Ok(AdminRpcResponse::ApiErrorResponse { - http_code: e.http_status_code().as_u16(), - error_code: e.code().to_string(), - message: e.to_string(), - }), - } - } - AdminRpc::Internal(req) => { - info!("Internal admin API request: {}", req.name()); - let res = req.clone().handle(&self.garage, self).await; - match res { - Ok(res) => Ok(AdminRpcResponse::InternalApiOkResponse(res)), - Err(e) => Ok(AdminRpcResponse::ApiErrorResponse { - http_code: e.http_status_code().as_u16(), - error_code: e.code().to_string(), - message: e.to_string(), - }), - } - } - } - } -} - -// ---- FOR HTTP ---- +use crate::router_v1::{Authorization, Endpoint}; pub type ResBody = BoxBody; pub struct AdminApiServer { garage: Arc, #[cfg(feature = "metrics")] - pub(crate) exporter: PrometheusExporter, + exporter: PrometheusExporter, metrics_token: Option, - metrics_require_token: bool, admin_token: Option, - pub(crate) background: Arc, - pub(crate) endpoint: Arc>, -} - -pub enum HttpEndpoint { - Old(router_v1::Endpoint), - New(String), } impl AdminApiServer { pub fn new( garage: Arc, - background: Arc, #[cfg(feature = "metrics")] exporter: PrometheusExporter, - ) -> Arc { + ) -> Self { let cfg = &garage.config.admin; let metrics_token = cfg.metrics_token.as_deref().map(hash_bearer_token); let admin_token = cfg.admin_token.as_deref().map(hash_bearer_token); - let metrics_require_token = cfg.metrics_require_token; - - let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into()); - let admin = Arc::new(Self { + Self { garage, #[cfg(feature = "metrics")] exporter, metrics_token, - metrics_require_token, admin_token, - background, - endpoint, - }); - admin.endpoint.set_handler(admin.clone()); - admin + } } pub async fn run( - self: Arc, + self, bind_addr: UnixOrTCPSocketAddress, must_exit: watch::Receiver, ) -> Result<(), GarageError> { let region = self.garage.config.s3_api.s3_region.clone(); - ApiServer::new(region, ArcAdminApiServer(self)) + ApiServer::new(region, self) .run_server(bind_addr, Some(0o220), must_exit) .await } - async fn handle_http_api( + fn handle_options(&self, _req: &Request) -> Result, Error> { + Ok(Response::builder() + .status(StatusCode::NO_CONTENT) + .header(ALLOW, "OPTIONS, GET, POST") + .header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS, GET, POST") + .header(ACCESS_CONTROL_ALLOW_ORIGIN, "*") + .body(empty_body())?) + } + + async fn handle_check_domain( &self, req: Request, - endpoint: HttpEndpoint, ) -> Result, Error> { - let auth_header = req.headers().get(AUTHORIZATION).cloned(); + let query_params: HashMap = req + .uri() + .query() + .map(|v| { + url::form_urlencoded::parse(v.as_bytes()) + .into_owned() + .collect() + }) + .unwrap_or_else(HashMap::new); - let request = match endpoint { - HttpEndpoint::Old(endpoint_v1) => AdminApiRequest::from_v1(endpoint_v1, req).await?, - HttpEndpoint::New(_) => AdminApiRequest::from_request(req).await?, + let has_domain_key = query_params.contains_key("domain"); + + if !has_domain_key { + return Err(Error::bad_request("No domain query string found")); + } + + let domain = query_params + .get("domain") + .ok_or_internal_error("Could not parse domain query string")?; + + if self.check_domain(domain).await? { + Ok(Response::builder() + .status(StatusCode::OK) + .body(string_body(format!( + "Domain '{domain}' is managed by Garage" + )))?) + } else { + Err(Error::bad_request(format!( + "Domain '{domain}' is not managed by Garage" + ))) + } + } + + async fn check_domain(&self, domain: &str) -> Result { + // Resolve bucket from domain name, inferring if the website must be activated for the + // domain to be valid. + let (bucket_name, must_check_website) = if let Some(bname) = self + .garage + .config + .s3_api + .root_domain + .as_ref() + .and_then(|rd| host_to_bucket(domain, rd)) + { + (bname.to_string(), false) + } else if let Some(bname) = self + .garage + .config + .s3_web + .as_ref() + .and_then(|sw| host_to_bucket(domain, sw.root_domain.as_str())) + { + (bname.to_string(), true) + } else { + (domain.to_string(), true) }; - let (global_token_hash, token_required) = match request.authorization_type() { - Authorization::None => (None, false), - Authorization::MetricsToken => ( - self.metrics_token.as_deref(), - self.metrics_token.is_some() || self.metrics_require_token, + let bucket_id = match self + .garage + .bucket_helper() + .resolve_global_bucket_name(&bucket_name) + .await? + { + Some(bucket_id) => bucket_id, + None => return Ok(false), + }; + + if !must_check_website { + return Ok(true); + } + + let bucket = self + .garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + + let bucket_state = bucket.state.as_option().unwrap(); + let bucket_website_config = bucket_state.website_config.get(); + + match bucket_website_config { + Some(_v) => Ok(true), + None => Ok(false), + } + } + + fn handle_health(&self) -> Result, Error> { + let health = self.garage.system.health(); + + let (status, status_str) = match health.status { + ClusterHealthStatus::Healthy => (StatusCode::OK, "Garage is fully operational"), + ClusterHealthStatus::Degraded => ( + StatusCode::OK, + "Garage is operational but some storage nodes are unavailable", + ), + ClusterHealthStatus::Unavailable => ( + StatusCode::SERVICE_UNAVAILABLE, + "Quorum is not available for some/all partitions, reads and writes will fail", ), - Authorization::AdminToken => (self.admin_token.as_deref(), true), }; + let status_str = format!( + "{}\nConsult the full health check API endpoint at /v1/health for more details\n", + status_str + ); - if token_required { - verify_authorization(&self.garage, global_token_hash, auth_header, request.name())?; - } + Ok(Response::builder() + .status(status) + .header(http::header::CONTENT_TYPE, "text/plain") + .body(string_body(status_str))?) + } - match request { - AdminApiRequest::Options(req) => req.handle(&self.garage, self).await, - AdminApiRequest::CheckDomain(req) => req.handle(&self.garage, self).await, - AdminApiRequest::Health(req) => req.handle(&self.garage, self).await, - AdminApiRequest::Metrics(req) => req.handle(&self.garage, self).await, - req => { - let res = req.handle(&self.garage, self).await?; - let mut res = json_ok_response(&res)?; - res.headers_mut() - .insert(ACCESS_CONTROL_ALLOW_ORIGIN, HeaderValue::from_static("*")); - Ok(res) - } + fn handle_metrics(&self) -> Result, Error> { + #[cfg(feature = "metrics")] + { + use opentelemetry::trace::Tracer; + + let mut buffer = vec![]; + let encoder = TextEncoder::new(); + + let tracer = opentelemetry::global::tracer("garage"); + let metric_families = tracer.in_span("admin/gather_metrics", |_| { + self.exporter.registry().gather() + }); + + encoder + .encode(&metric_families, &mut buffer) + .ok_or_internal_error("Could not serialize metrics")?; + + Ok(Response::builder() + .status(StatusCode::OK) + .header(http::header::CONTENT_TYPE, encoder.format_type()) + .body(bytes_body(buffer.into()))?) } + #[cfg(not(feature = "metrics"))] + Err(Error::bad_request( + "Garage was built without the metrics feature".to_string(), + )) } } -struct ArcAdminApiServer(Arc); - -impl ApiHandler for ArcAdminApiServer { +impl ApiHandler for AdminApiServer { const API_NAME: &'static str = "admin"; const API_NAME_DISPLAY: &'static str = "Admin"; - type Endpoint = HttpEndpoint; + type Endpoint = Endpoint; type Error = Error; - fn parse_endpoint(&self, req: &Request) -> Result { + fn parse_endpoint(&self, req: &Request) -> Result { if req.uri().path().starts_with("/v0/") { let endpoint_v0 = router_v0::Endpoint::from_request(req)?; - let endpoint_v1 = router_v1::Endpoint::from_v0(endpoint_v0)?; - Ok(HttpEndpoint::Old(endpoint_v1)) - } else if req.uri().path().starts_with("/v1/") { - let endpoint_v1 = router_v1::Endpoint::from_request(req)?; - Ok(HttpEndpoint::Old(endpoint_v1)) + Endpoint::from_v0(endpoint_v0) } else { - Ok(HttpEndpoint::New(req.uri().path().to_string())) + Endpoint::from_request(req) } } async fn handle( &self, req: Request, - endpoint: HttpEndpoint, + endpoint: Endpoint, ) -> Result, Error> { - self.0.handle_http_api(req, endpoint).await - } + let required_auth_hash = + match endpoint.authorization_type() { + Authorization::None => None, + Authorization::MetricsToken => self.metrics_token.as_deref(), + Authorization::AdminToken => match self.admin_token.as_deref() { + None => return Err(Error::forbidden( + "Admin token isn't configured, admin API access is disabled for security.", + )), + Some(t) => Some(t), + }, + }; - fn key_id_from_request(&self, req: &Request) -> Option { - let auth_header = req.headers().get(AUTHORIZATION)?; - let token = parse_authorization(auth_header).ok()?; - let key_id = token.split_once('.')?.0; - Some(key_id.to_string()) + if let Some(password_hash) = required_auth_hash { + match req.headers().get("Authorization") { + None => return Err(Error::forbidden("Authorization token must be provided")), + Some(authorization) => { + verify_bearer_token(&authorization, password_hash)?; + } + } + } + + match endpoint { + Endpoint::Options => self.handle_options(&req), + Endpoint::CheckDomain => self.handle_check_domain(req).await, + Endpoint::Health => self.handle_health(), + Endpoint::Metrics => self.handle_metrics(), + Endpoint::GetClusterStatus => handle_get_cluster_status(&self.garage).await, + Endpoint::GetClusterHealth => handle_get_cluster_health(&self.garage).await, + Endpoint::ConnectClusterNodes => handle_connect_cluster_nodes(&self.garage, req).await, + // Layout + Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await, + Endpoint::UpdateClusterLayout => handle_update_cluster_layout(&self.garage, req).await, + Endpoint::ApplyClusterLayout => handle_apply_cluster_layout(&self.garage, req).await, + Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage).await, + // Keys + Endpoint::ListKeys => handle_list_keys(&self.garage).await, + Endpoint::GetKeyInfo { + id, + search, + show_secret_key, + } => { + let show_secret_key = show_secret_key.map(|x| x == "true").unwrap_or(false); + handle_get_key_info(&self.garage, id, search, show_secret_key).await + } + Endpoint::CreateKey => handle_create_key(&self.garage, req).await, + Endpoint::ImportKey => handle_import_key(&self.garage, req).await, + Endpoint::UpdateKey { id } => handle_update_key(&self.garage, id, req).await, + Endpoint::DeleteKey { id } => handle_delete_key(&self.garage, id).await, + // Buckets + Endpoint::ListBuckets => handle_list_buckets(&self.garage).await, + Endpoint::GetBucketInfo { id, global_alias } => { + handle_get_bucket_info(&self.garage, id, global_alias).await + } + Endpoint::CreateBucket => handle_create_bucket(&self.garage, req).await, + Endpoint::DeleteBucket { id } => handle_delete_bucket(&self.garage, id).await, + Endpoint::UpdateBucket { id } => handle_update_bucket(&self.garage, id, req).await, + // Bucket-key permissions + Endpoint::BucketAllowKey => { + handle_bucket_change_key_perm(&self.garage, req, true).await + } + Endpoint::BucketDenyKey => { + handle_bucket_change_key_perm(&self.garage, req, false).await + } + // Bucket aliasing + Endpoint::GlobalAliasBucket { id, alias } => { + handle_global_alias_bucket(&self.garage, id, alias).await + } + Endpoint::GlobalUnaliasBucket { id, alias } => { + handle_global_unalias_bucket(&self.garage, id, alias).await + } + Endpoint::LocalAliasBucket { + id, + access_key_id, + alias, + } => handle_local_alias_bucket(&self.garage, id, access_key_id, alias).await, + Endpoint::LocalUnaliasBucket { + id, + access_key_id, + alias, + } => handle_local_unalias_bucket(&self.garage, id, access_key_id, alias).await, + } } } -impl ApiEndpoint for HttpEndpoint { - fn name(&self) -> Cow<'static, str> { - match self { - Self::Old(endpoint_v1) => Cow::Borrowed(endpoint_v1.name()), - Self::New(path) => Cow::Owned(path.clone()), - } +impl ApiEndpoint for Endpoint { + fn name(&self) -> &'static str { + Endpoint::name(self) } fn add_span_attributes(&self, _span: SpanRef<'_>) {} @@ -251,91 +347,20 @@ fn hash_bearer_token(token: &str) -> String { .to_string() } -fn parse_authorization(auth_header: &hyper::http::HeaderValue) -> Result<&str, Error> { - let token = auth_header +fn verify_bearer_token(token: &hyper::http::HeaderValue, password_hash: &str) -> Result<(), Error> { + use argon2::{password_hash::PasswordVerifier, Argon2}; + + let parsed_hash = PasswordHash::new(&password_hash).unwrap(); + + token .to_str()? .strip_prefix("Bearer ") - .ok_or_else(|| Error::forbidden("Invalid Authorization header"))? - .trim(); - Ok(token) -} - -fn verify_authorization( - garage: &Garage, - global_token_hash: Option<&str>, - auth_header: Option, - endpoint_name: &str, -) -> Result<(), Error> { - use argon2::{password_hash::PasswordHash, password_hash::PasswordVerifier, Argon2}; - - let invalid_msg = "Invalid bearer token"; - - let token = match &auth_header { - None => { - return Err(Error::forbidden( - "Bearer token must be provided in Authorization header", - )) - } - Some(authorization) => parse_authorization(authorization)?, - }; - - let token_hash_string = if let Some((prefix, _)) = token.split_once('.') { - garage - .admin_token_table - .get_local(&EmptyKey, &prefix.to_string())? - .and_then(|k| k.state.into_option()) - .filter(|p| !p.is_expired(now_msec())) - // GetCurrentAdminTokenInfo endpoint must be accessible even if it is not in the token scopes - .filter(|p| p.has_scope(endpoint_name) || endpoint_name == "GetCurrentAdminTokenInfo") - .ok_or_else(|| Error::forbidden(invalid_msg))? - .token_hash - } else { - global_token_hash - .ok_or_else(|| Error::forbidden(invalid_msg))? - .to_string() - }; - - let token_hash = - PasswordHash::new(&token_hash_string).ok_or_internal_error("Could not parse token hash")?; - - Argon2::default() - .verify_password(token.as_bytes(), &token_hash) - .map_err(|_| Error::forbidden(invalid_msg))?; + .and_then(|token| { + Argon2::default() + .verify_password(token.trim().as_bytes(), &parsed_hash) + .ok() + }) + .ok_or_else(|| Error::forbidden("Invalid authorization token"))?; Ok(()) } - -pub(crate) fn find_matching_nodes(garage: &Garage, spec: &str) -> Result, Error> { - if spec == "self" { - Ok(vec![garage.system.id]) - } else { - // Collect all nodes currently up and/or in cluster layout - let mut res = vec![]; - if let Ok(all_nodes) = garage.system.cluster_layout().all_nodes() { - res = all_nodes.to_vec(); - } - for node in garage.system.get_known_nodes() { - if node.is_up && !res.contains(&node.id) { - res.push(node.id); - } - } - - if spec == "*" { - // match all nodes - Ok(res) - } else { - // filter nodes that match spec - res.retain(|node| hex::encode(node).starts_with(spec)); - if res.is_empty() { - Err(Error::bad_request(format!("No nodes matching {}", spec))) - } else if res.len() > 1 { - Err(Error::bad_request(format!( - "Multiple nodes matching {}: {:?}", - spec, res - ))) - } else { - Ok(res) - } - } - } -} diff --git a/src/api/admin/block.rs b/src/api/admin/block.rs deleted file mode 100644 index 30729866..00000000 --- a/src/api/admin/block.rs +++ /dev/null @@ -1,284 +0,0 @@ -use std::sync::Arc; - -use garage_util::data::*; -use garage_util::error::Error as GarageError; -use garage_util::time::now_msec; - -use garage_table::EmptyKey; - -use garage_model::garage::Garage; -use garage_model::s3::object_table::*; -use garage_model::s3::version_table::*; - -use garage_api_common::common_error::CommonErrorDerivative; - -use crate::api::*; -use crate::error::*; -use crate::{Admin, RequestHandler}; - -impl RequestHandler for LocalListBlockErrorsRequest { - type Response = LocalListBlockErrorsResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let errors = garage.block_manager.list_resync_errors()?; - let now = now_msec(); - let errors = errors - .into_iter() - .map(|e| BlockError { - block_hash: hex::encode(e.hash), - refcount: e.refcount, - error_count: e.error_count, - last_try_secs_ago: now.saturating_sub(e.last_try) / 1000, - next_try_in_secs: e.next_try.saturating_sub(now) / 1000, - }) - .collect(); - Ok(LocalListBlockErrorsResponse(errors)) - } -} - -impl RequestHandler for LocalGetBlockInfoRequest { - type Response = LocalGetBlockInfoResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let hash = find_block_hash_by_prefix(garage, &self.block_hash)?; - let refcount = garage.block_manager.get_block_rc(&hash)?; - let block_refs = garage - .block_ref_table - .get_range(&hash, None, None, 10000, Default::default()) - .await?; - let mut versions = vec![]; - for br in block_refs { - if let Some(v) = garage.version_table.get(&br.version, &EmptyKey).await? { - let bl = match &v.backlink { - VersionBacklink::MultipartUpload { upload_id } => { - if let Some(u) = garage.mpu_table.get(upload_id, &EmptyKey).await? { - BlockVersionBacklink::Upload { - upload_id: hex::encode(upload_id), - upload_deleted: u.deleted.get(), - upload_garbage_collected: false, - bucket_id: Some(hex::encode(u.bucket_id)), - key: Some(u.key.to_string()), - } - } else { - BlockVersionBacklink::Upload { - upload_id: hex::encode(upload_id), - upload_deleted: true, - upload_garbage_collected: true, - bucket_id: None, - key: None, - } - } - } - VersionBacklink::Object { bucket_id, key } => BlockVersionBacklink::Object { - bucket_id: hex::encode(bucket_id), - key: key.to_string(), - }, - }; - versions.push(BlockVersion { - version_id: hex::encode(br.version), - ref_deleted: br.deleted.get(), - version_deleted: v.deleted.get(), - garbage_collected: false, - backlink: Some(bl), - }); - } else { - versions.push(BlockVersion { - version_id: hex::encode(br.version), - ref_deleted: br.deleted.get(), - version_deleted: true, - garbage_collected: true, - backlink: None, - }); - } - } - Ok(LocalGetBlockInfoResponse { - block_hash: hex::encode(hash), - refcount, - versions, - }) - } -} - -impl RequestHandler for LocalRetryBlockResyncRequest { - type Response = LocalRetryBlockResyncResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - match self { - Self::All { all: true } => { - let blocks = garage.block_manager.list_resync_errors()?; - for b in blocks.iter() { - garage.block_manager.resync.clear_backoff(&b.hash)?; - } - Ok(LocalRetryBlockResyncResponse { - count: blocks.len() as u64, - }) - } - Self::All { all: false } => Err(Error::bad_request("nonsense")), - Self::Blocks { block_hashes } => { - for hash in block_hashes.iter() { - let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?; - let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?; - garage.block_manager.resync.clear_backoff(&hash)?; - } - Ok(LocalRetryBlockResyncResponse { - count: block_hashes.len() as u64, - }) - } - } - } -} - -impl RequestHandler for LocalPurgeBlocksRequest { - type Response = LocalPurgeBlocksResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let mut obj_dels = 0; - let mut mpu_dels = 0; - let mut ver_dels = 0; - let mut br_dels = 0; - - for hash in self.0.iter() { - let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?; - let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?; - let block_refs = garage - .block_ref_table - .get_range(&hash, None, None, 10000, Default::default()) - .await?; - - for br in block_refs { - if let Some(version) = garage.version_table.get(&br.version, &EmptyKey).await? { - handle_block_purge_version_backlink( - garage, - &version, - &mut obj_dels, - &mut mpu_dels, - ) - .await?; - - if !version.deleted.get() { - let deleted_version = Version::new(version.uuid, version.backlink, true); - garage.version_table.insert(&deleted_version).await?; - ver_dels += 1; - } - } - if !br.deleted.get() { - let mut br = br; - br.deleted.set(); - garage.block_ref_table.insert(&br).await?; - br_dels += 1; - } - } - } - - Ok(LocalPurgeBlocksResponse { - blocks_purged: self.0.len() as u64, - block_refs_purged: br_dels, - versions_deleted: ver_dels, - objects_deleted: obj_dels, - uploads_deleted: mpu_dels, - }) - } -} - -fn find_block_hash_by_prefix(garage: &Arc, prefix: &str) -> Result { - if prefix.len() < 4 { - return Err(Error::bad_request( - "Please specify at least 4 characters of the block hash", - )); - } - - let prefix_bin = hex::decode(&prefix[..prefix.len() & !1]).ok_or_bad_request("invalid hash")?; - - let iter = garage - .block_ref_table - .data - .store - .range(&prefix_bin[..]..) - .map_err(GarageError::from)?; - let mut found = None; - for item in iter { - let (k, _v) = item.map_err(GarageError::from)?; - let hash = Hash::try_from(&k[..32]).unwrap(); - if hash.as_slice()[..prefix_bin.len()] != prefix_bin { - break; - } - if hex::encode(hash.as_slice()).starts_with(prefix) { - match &found { - Some(x) if *x == hash => (), - Some(_) => { - return Err(Error::bad_request(format!( - "Several blocks match prefix `{}`", - prefix - ))); - } - None => { - found = Some(hash); - } - } - } - } - - found.ok_or_else(|| Error::NoSuchBlock(prefix.to_string())) -} - -async fn handle_block_purge_version_backlink( - garage: &Arc, - version: &Version, - obj_dels: &mut u64, - mpu_dels: &mut u64, -) -> Result<(), Error> { - let (bucket_id, key, ov_id) = match &version.backlink { - VersionBacklink::Object { bucket_id, key } => (*bucket_id, key.clone(), version.uuid), - VersionBacklink::MultipartUpload { upload_id } => { - if let Some(mut mpu) = garage.mpu_table.get(upload_id, &EmptyKey).await? { - if !mpu.deleted.get() { - mpu.parts.clear(); - mpu.deleted.set(); - garage.mpu_table.insert(&mpu).await?; - *mpu_dels += 1; - } - (mpu.bucket_id, mpu.key.clone(), *upload_id) - } else { - return Ok(()); - } - } - }; - - if let Some(object) = garage.object_table.get(&bucket_id, &key).await? { - let ov = object.versions().iter().rev().find(|v| v.is_complete()); - if let Some(ov) = ov { - if ov.uuid == ov_id { - let del_uuid = gen_uuid(); - let deleted_object = Object::new( - bucket_id, - key, - vec![ObjectVersion { - uuid: del_uuid, - timestamp: ov.timestamp + 1, - state: ObjectVersionState::Complete(ObjectVersionData::DeleteMarker), - }], - ); - garage.object_table.insert(&deleted_object).await?; - *obj_dels += 1; - } - } - } - - Ok(()) -} diff --git a/src/api/admin/bucket.rs b/src/api/admin/bucket.rs index e723d3b4..207693b6 100644 --- a/src/api/admin/bucket.rs +++ b/src/api/admin/bucket.rs @@ -1,8 +1,8 @@ use std::collections::HashMap; use std::sync::Arc; -use std::time::Duration; -use chrono::DateTime; +use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode}; +use serde::{Deserialize, Serialize}; use garage_util::crdt::*; use garage_util::data::*; @@ -18,654 +18,102 @@ use garage_model::s3::mpu_table; use garage_model::s3::object_table::*; use garage_api_common::common_error::CommonError; -use garage_api_common::xml; +use garage_api_common::helpers::*; -use crate::api::*; +use crate::api_server::ResBody; use crate::error::*; -use crate::{Admin, RequestHandler}; +use crate::key::ApiBucketKeyPerm; -impl RequestHandler for ListBucketsRequest { - type Response = ListBucketsResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let buckets = garage - .bucket_table - .get_range( - &EmptyKey, - None, - Some(DeletedFilter::NotDeleted), - 1_000_000, - EnumerationOrder::Forward, - ) - .await?; - - let res = buckets - .into_iter() - .map(|b| { - let state = b.state.as_option().unwrap(); - ListBucketsResponseItem { - id: hex::encode(b.id), - created: DateTime::from_timestamp_millis(state.creation_date as i64) - .expect("invalid timestamp stored in db"), - global_aliases: state - .aliases - .items() - .iter() - .filter(|(_, _, a)| *a) - .map(|(n, _, _)| n.to_string()) - .collect::>(), - local_aliases: state - .local_aliases - .items() - .iter() - .filter(|(_, _, a)| *a) - .map(|((k, n), _, _)| BucketLocalAlias { - access_key_id: k.to_string(), - alias: n.to_string(), - }) - .collect::>(), - } - }) - .collect::>(); - - Ok(ListBucketsResponse(res)) - } -} - -impl RequestHandler for GetBucketInfoRequest { - type Response = GetBucketInfoResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let bucket_id = match (self.id, self.global_alias, self.search) { - (Some(id), None, None) => parse_bucket_id(&id)?, - (None, Some(ga), None) => garage - .bucket_alias_table - .get(&EmptyKey, &ga) - .await? - .and_then(|x| *x.state.get()) - .ok_or_else(|| HelperError::NoSuchBucket(ga.to_string()))?, - (None, None, Some(search)) => { - let helper = garage.bucket_helper(); - if let Some(bucket) = helper.resolve_global_bucket(&search).await? { - bucket.id - } else { - let hexdec = if search.len() >= 2 { - search - .get(..search.len() & !1) - .and_then(|x| hex::decode(x).ok()) - } else { - None - }; - let hex = hexdec - .ok_or_else(|| Error::Common(CommonError::NoSuchBucket(search.clone())))?; - - let mut start = [0u8; 32]; - start - .as_mut_slice() - .get_mut(..hex.len()) - .ok_or_bad_request("invalid length")? - .copy_from_slice(&hex); - let mut candidates = garage - .bucket_table - .get_range( - &EmptyKey, - Some(start.into()), - Some(DeletedFilter::NotDeleted), - 10, - EnumerationOrder::Forward, - ) - .await? - .into_iter() - .collect::>(); - candidates.retain(|x| hex::encode(x.id).starts_with(&search)); - if candidates.is_empty() { - return Err(Error::Common(CommonError::NoSuchBucket(search.clone()))); - } else if candidates.len() == 1 { - candidates.into_iter().next().unwrap().id - } else { - return Err(Error::bad_request(format!( - "Several matching buckets: {}", - search - ))); - } - } - } - _ => { - return Err(Error::bad_request( - "Either id, globalAlias or search must be provided (but not several of them)", - )); - } - }; - - bucket_info_results(garage, bucket_id).await - } -} - -impl RequestHandler for CreateBucketRequest { - type Response = CreateBucketResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let helper = garage.locked_helper().await; - - if let Some(ga) = &self.global_alias { - if !is_valid_bucket_name(ga, garage.config.allow_punycode) { - return Err(Error::bad_request(format!( - "{}: {}", - ga, INVALID_BUCKET_NAME_MESSAGE - ))); - } - - if let Some(alias) = garage.bucket_alias_table.get(&EmptyKey, ga).await? { - if alias.state.get().is_some() { - return Err(CommonError::BucketAlreadyExists.into()); - } - } - } - - if let Some(la) = &self.local_alias { - if !is_valid_bucket_name(&la.alias, garage.config.allow_punycode) { - return Err(Error::bad_request(format!( - "{}: {}", - la.alias, INVALID_BUCKET_NAME_MESSAGE - ))); - } - - let key = helper.key().get_existing_key(&la.access_key_id).await?; - let state = key.state.as_option().unwrap(); - if state.local_aliases.get(&la.alias).is_some() { - return Err(Error::bad_request("Local alias already exists")); - } - } - - let bucket = Bucket::new(); - garage.bucket_table.insert(&bucket).await?; - - if let Some(ga) = &self.global_alias { - helper.set_global_bucket_alias(bucket.id, ga).await?; - } - - if let Some(la) = &self.local_alias { - helper - .set_local_bucket_alias(bucket.id, &la.access_key_id, &la.alias) - .await?; - - if la.allow.read || la.allow.write || la.allow.owner { - helper - .set_bucket_key_permissions( - bucket.id, - &la.access_key_id, - BucketKeyPerm { - timestamp: now_msec(), - allow_read: la.allow.read, - allow_write: la.allow.write, - allow_owner: la.allow.owner, - }, - ) - .await?; - } - } - - Ok(CreateBucketResponse( - bucket_info_results(garage, bucket.id).await?, - )) - } -} - -impl RequestHandler for DeleteBucketRequest { - type Response = DeleteBucketResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let helper = garage.locked_helper().await; - - let bucket_id = parse_bucket_id(&self.id)?; - - let mut bucket = helper.bucket().get_existing_bucket(bucket_id).await?; - let state = bucket.state.as_option().unwrap(); - - // Check bucket is empty - if !helper.bucket().is_bucket_empty(bucket_id).await? { - return Err(CommonError::BucketNotEmpty.into()); - } - - // --- done checking, now commit --- - // 1. delete authorization from keys that had access - for (key_id, perm) in bucket.authorized_keys() { - if perm.is_any() { - helper - .set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS) - .await?; - } - } - // 2. delete all local aliases - for ((key_id, alias), _, active) in state.local_aliases.items().iter() { - if *active { - helper - .purge_local_bucket_alias(bucket.id, key_id, alias) - .await?; - } - } - // 3. delete all global aliases - for (alias, _, active) in state.aliases.items().iter() { - if *active { - helper.purge_global_bucket_alias(bucket.id, alias).await?; - } - } - - // 4. delete bucket - bucket.state = Deletable::delete(); - garage.bucket_table.insert(&bucket).await?; - - Ok(DeleteBucketResponse) - } -} - -impl RequestHandler for UpdateBucketRequest { - type Response = UpdateBucketResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let bucket_id = parse_bucket_id(&self.id)?; - - let mut bucket = garage - .bucket_helper() - .get_existing_bucket(bucket_id) - .await?; - - let state = bucket.state.as_option_mut().unwrap(); - - if let Some(wa) = self.body.website_access { - if wa.enabled { - let redirect_all = state - .website_config - .get() - .as_ref() - .and_then(|wc| wc.redirect_all.clone()); - - let routing_rules = if let Some(rr) = wa.routing_rules { - for r in rr.iter() { - r.validate()?; - } - rr.into_iter() - .map(xml::website::RoutingRule::into_garage_routing_rule) - .collect::>() - } else { - state - .website_config - .get() - .as_ref() - .map(|wc| wc.routing_rules.clone()) - .unwrap_or_default() - }; - - state.website_config.update(Some(WebsiteConfig { - index_document: wa.index_document.ok_or_bad_request( - "Please specify indexDocument when enabling website access.", - )?, - error_document: wa.error_document, - redirect_all, - routing_rules, - })); - } else { - if wa.index_document.is_some() || wa.error_document.is_some() { - return Err(Error::bad_request( - "Cannot specify indexDocument or errorDocument when disabling website access.", - )); - } - state.website_config.update(None); - } - } - - if let Some(q) = self.body.quotas { - state.quotas.update(BucketQuotas { - max_size: q.max_size, - max_objects: q.max_objects, - }); - } - - if let Some(cr) = self.body.cors_rules { - let cors_config = if cr.is_empty() { - None - } else { - let cc = xml::cors::CorsConfiguration { - xmlns: (), - cors_rules: cr, - }; - cc.validate()?; - Some(cc.into_garage_cors_config()?) - }; - - state.cors_config.update(cors_config); - } - - if let Some(lr) = self.body.lifecycle_rules { - let lifecycle_config = if lr.is_empty() { - None - } else { - let lc = xml::lifecycle::LifecycleConfiguration { - xmlns: (), - lifecycle_rules: lr, - }; - Some( - lc.validate_into_garage_lifecycle_config() - .ok_or_bad_request("Invalid lifecycle configuration")?, - ) - }; - - state.lifecycle_config.update(lifecycle_config); - } - - garage.bucket_table.insert(&bucket).await?; - - Ok(UpdateBucketResponse( - bucket_info_results(garage, bucket.id).await?, - )) - } -} - -impl RequestHandler for CleanupIncompleteUploadsRequest { - type Response = CleanupIncompleteUploadsResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let duration = Duration::from_secs(self.older_than_secs); - - let bucket_id = parse_bucket_id(&self.bucket_id)?; - - let count = garage - .bucket_helper() - .cleanup_incomplete_uploads(&bucket_id, duration) - .await?; - - Ok(CleanupIncompleteUploadsResponse { - uploads_deleted: count as u64, - }) - } -} - -impl RequestHandler for InspectObjectRequest { - type Response = InspectObjectResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let bucket_id = parse_bucket_id(&self.bucket_id)?; - - let object = garage - .object_table - .get(&bucket_id, &self.key) - .await? - .ok_or_else(|| Error::NoSuchKey)?; - - let mut versions = vec![]; - for obj_ver in object.versions().iter() { - let ver = garage.version_table.get(&obj_ver.uuid, &EmptyKey).await?; - let blocks = ver - .map(|v| { - v.blocks - .items() - .iter() - .map(|(vk, vb)| InspectObjectBlock { - part_number: vk.part_number, - offset: vk.offset, - hash: hex::encode(vb.hash), - size: vb.size, - }) - .collect::>() - }) - .unwrap_or_default(); - let uuid = hex::encode(obj_ver.uuid); - let timestamp = DateTime::from_timestamp_millis(obj_ver.timestamp as i64) - .expect("invalid timestamp in db"); - match &obj_ver.state { - ObjectVersionState::Uploading { encryption, .. } => { - versions.push(InspectObjectVersion { - uuid, - timestamp, - encrypted: !matches!(encryption, ObjectVersionEncryption::Plaintext { .. }), - uploading: true, - headers: match encryption { - ObjectVersionEncryption::Plaintext { inner } => inner.headers.clone(), - _ => vec![], - }, - blocks, - ..Default::default() - }); - } - ObjectVersionState::Complete(data) => match data { - ObjectVersionData::DeleteMarker => { - versions.push(InspectObjectVersion { - uuid, - timestamp, - delete_marker: true, - ..Default::default() - }); - } - ObjectVersionData::Inline(meta, _) => { - versions.push(InspectObjectVersion { - uuid, - timestamp, - inline: true, - size: Some(meta.size), - etag: Some(meta.etag.clone()), - encrypted: !matches!( - meta.encryption, - ObjectVersionEncryption::Plaintext { .. } - ), - headers: match &meta.encryption { - ObjectVersionEncryption::Plaintext { inner } => { - inner.headers.clone() - } - _ => vec![], - }, - ..Default::default() - }); - } - ObjectVersionData::FirstBlock(meta, _) => { - versions.push(InspectObjectVersion { - uuid, - timestamp, - size: Some(meta.size), - etag: Some(meta.etag.clone()), - encrypted: !matches!( - meta.encryption, - ObjectVersionEncryption::Plaintext { .. } - ), - headers: match &meta.encryption { - ObjectVersionEncryption::Plaintext { inner } => { - inner.headers.clone() - } - _ => vec![], - }, - blocks, - ..Default::default() - }); - } - }, - ObjectVersionState::Aborted => { - versions.push(InspectObjectVersion { - uuid, - timestamp, - aborted: true, - blocks, - ..Default::default() - }); - } - } - } - - Ok(InspectObjectResponse { - bucket_id: hex::encode(object.bucket_id), - key: object.key, - versions, - }) - } -} - -// ---- BUCKET/KEY PERMISSIONS ---- - -impl RequestHandler for AllowBucketKeyRequest { - type Response = AllowBucketKeyResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let res = handle_bucket_change_key_perm(garage, self.0, true).await?; - Ok(AllowBucketKeyResponse(res)) - } -} - -impl RequestHandler for DenyBucketKeyRequest { - type Response = DenyBucketKeyResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let res = handle_bucket_change_key_perm(garage, self.0, false).await?; - Ok(DenyBucketKeyResponse(res)) - } -} - -pub async fn handle_bucket_change_key_perm( - garage: &Arc, - req: BucketKeyPermChangeRequest, - new_perm_flag: bool, -) -> Result { - let helper = garage.locked_helper().await; - - let bucket_id = parse_bucket_id(&req.bucket_id)?; - - let bucket = helper.bucket().get_existing_bucket(bucket_id).await?; - let state = bucket.state.as_option().unwrap(); - - let key = helper.key().get_existing_key(&req.access_key_id).await?; - - let mut perm = state - .authorized_keys - .get(&key.key_id) - .cloned() - .unwrap_or(BucketKeyPerm::NO_PERMISSIONS); - - if req.permissions.read { - perm.allow_read = new_perm_flag; - } - if req.permissions.write { - perm.allow_write = new_perm_flag; - } - if req.permissions.owner { - perm.allow_owner = new_perm_flag; - } - - helper - .set_bucket_key_permissions(bucket.id, &key.key_id, perm) +pub async fn handle_list_buckets(garage: &Arc) -> Result, Error> { + let buckets = garage + .bucket_table + .get_range( + &EmptyKey, + None, + Some(DeletedFilter::NotDeleted), + 10000, + EnumerationOrder::Forward, + ) .await?; - bucket_info_results(garage, bucket.id).await + let res = buckets + .into_iter() + .map(|b| { + let state = b.state.as_option().unwrap(); + ListBucketResultItem { + id: hex::encode(b.id), + global_aliases: state + .aliases + .items() + .iter() + .filter(|(_, _, a)| *a) + .map(|(n, _, _)| n.to_string()) + .collect::>(), + local_aliases: state + .local_aliases + .items() + .iter() + .filter(|(_, _, a)| *a) + .map(|((k, n), _, _)| BucketLocalAlias { + access_key_id: k.to_string(), + alias: n.to_string(), + }) + .collect::>(), + } + }) + .collect::>(); + + Ok(json_ok_response(&res)?) } -// ---- BUCKET ALIASES ---- +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct ListBucketResultItem { + id: String, + global_aliases: Vec, + local_aliases: Vec, +} -impl RequestHandler for AddBucketAliasRequest { - type Response = AddBucketAliasResponse; +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct BucketLocalAlias { + access_key_id: String, + alias: String, +} - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let bucket_id = parse_bucket_id(&self.bucket_id)?; +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +struct ApiBucketQuotas { + max_size: Option, + max_objects: Option, +} - let helper = garage.locked_helper().await; - - match self.alias { - BucketAliasEnum::Global { global_alias } => { - helper - .set_global_bucket_alias(bucket_id, &global_alias) - .await?; - } - BucketAliasEnum::Local { - local_alias, - access_key_id, - } => { - helper - .set_local_bucket_alias(bucket_id, &access_key_id, &local_alias) - .await?; - } +pub async fn handle_get_bucket_info( + garage: &Arc, + id: Option, + global_alias: Option, +) -> Result, Error> { + let bucket_id = match (id, global_alias) { + (Some(id), None) => parse_bucket_id(&id)?, + (None, Some(ga)) => garage + .bucket_helper() + .resolve_global_bucket_name(&ga) + .await? + .ok_or_else(|| HelperError::NoSuchBucket(ga.to_string()))?, + _ => { + return Err(Error::bad_request( + "Either id or globalAlias must be provided (but not both)", + )); } + }; - Ok(AddBucketAliasResponse( - bucket_info_results(garage, bucket_id).await?, - )) - } + bucket_info_results(garage, bucket_id).await } -impl RequestHandler for RemoveBucketAliasRequest { - type Response = RemoveBucketAliasResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let bucket_id = parse_bucket_id(&self.bucket_id)?; - - let helper = garage.locked_helper().await; - - match self.alias { - BucketAliasEnum::Global { global_alias } => { - helper - .unset_global_bucket_alias(bucket_id, &global_alias) - .await?; - } - BucketAliasEnum::Local { - local_alias, - access_key_id, - } => { - helper - .unset_local_bucket_alias(bucket_id, &access_key_id, &local_alias) - .await?; - } - } - - Ok(RemoveBucketAliasResponse( - bucket_info_results(garage, bucket_id).await?, - )) - } -} - -// ---- HELPER ---- - async fn bucket_info_results( garage: &Arc, bucket_id: Uuid, -) -> Result { +) -> Result, Error> { let bucket = garage .bucket_helper() .get_existing_bucket(bucket_id) @@ -674,7 +122,7 @@ async fn bucket_info_results( let counters = garage .object_counter_table .table - .get(&bucket.id, &EmptyKey) + .get(&bucket_id, &EmptyKey) .await? .map(|x| x.filtered_values(&garage.system.cluster_layout())) .unwrap_or_default(); @@ -682,7 +130,7 @@ async fn bucket_info_results( let mpu_counters = garage .mpu_counter_table .table - .get(&bucket.id, &EmptyKey) + .get(&bucket_id, &EmptyKey) .await? .map(|x| x.filtered_values(&garage.system.cluster_layout())) .unwrap_or_default(); @@ -728,83 +176,407 @@ async fn bucket_info_results( let state = bucket.state.as_option().unwrap(); let quotas = state.quotas.get(); - let res = GetBucketInfoResponse { - id: hex::encode(bucket.id), - created: DateTime::from_timestamp_millis(state.creation_date as i64) - .expect("invalid timestamp stored in db"), - global_aliases: state - .aliases - .items() - .iter() - .filter(|(_, _, a)| *a) - .map(|(n, _, _)| n.to_string()) - .collect::>(), - website_access: state.website_config.get().is_some(), - website_config: state.website_config.get().clone().map(|wsc| { - GetBucketInfoWebsiteResponse { - index_document: wsc.index_document, - error_document: wsc.error_document, - routing_rules: Some( - wsc.routing_rules - .into_iter() - .map(xml::website::RoutingRule::from_garage_routing_rule) - .collect::>(), - ), - } - }), - cors_rules: state.cors_config.get().as_ref().map(|rules| { - rules + let res = + GetBucketInfoResult { + id: hex::encode(bucket.id), + global_aliases: state + .aliases + .items() .iter() - .map(xml::cors::CorsRule::from_garage_cors_rule) - .collect::>() - }), - lifecycle_rules: state.lifecycle_config.get().as_ref().map(|lc| { - lc.iter() - .map(xml::lifecycle::LifecycleRule::from_garage_lifecycle_rule) - .collect::>() - }), - keys: relevant_keys - .into_values() - .filter_map(|key| { - let p = key.state.as_option().unwrap(); - let permissions = p - .authorized_buckets - .get(&bucket.id) - .filter(|p| p.is_any()) - .map(|p| ApiBucketKeyPerm { - read: p.allow_read, - write: p.allow_write, - owner: p.allow_owner, - })?; - Some(GetBucketInfoKey { - access_key_id: key.key_id, - name: p.name.get().to_string(), - permissions, - bucket_local_aliases: p - .local_aliases - .items() - .iter() - .filter(|(_, _, b)| *b == Some(bucket.id)) - .map(|(n, _, _)| n.to_string()) - .collect::>(), + .filter(|(_, _, a)| *a) + .map(|(n, _, _)| n.to_string()) + .collect::>(), + website_access: state.website_config.get().is_some(), + website_config: state.website_config.get().clone().map(|wsc| { + GetBucketInfoWebsiteResult { + index_document: wsc.index_document, + error_document: wsc.error_document, + } + }), + keys: relevant_keys + .into_values() + .map(|key| { + let p = key.state.as_option().unwrap(); + GetBucketInfoKey { + access_key_id: key.key_id, + name: p.name.get().to_string(), + permissions: p + .authorized_buckets + .get(&bucket.id) + .map(|p| ApiBucketKeyPerm { + read: p.allow_read, + write: p.allow_write, + owner: p.allow_owner, + }) + .unwrap_or_default(), + bucket_local_aliases: p + .local_aliases + .items() + .iter() + .filter(|(_, _, b)| *b == Some(bucket.id)) + .map(|(n, _, _)| n.to_string()) + .collect::>(), + } }) - }) - .collect::>(), - objects: *counters.get(OBJECTS).unwrap_or(&0), - bytes: *counters.get(BYTES).unwrap_or(&0), - unfinished_uploads: *counters.get(UNFINISHED_UPLOADS).unwrap_or(&0), - unfinished_multipart_uploads: *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0), - unfinished_multipart_upload_parts: *mpu_counters.get(mpu_table::PARTS).unwrap_or(&0), - unfinished_multipart_upload_bytes: *mpu_counters.get(mpu_table::BYTES).unwrap_or(&0), - quotas: ApiBucketQuotas { - max_size: quotas.max_size, - max_objects: quotas.max_objects, - }, - }; + .collect::>(), + objects: *counters.get(OBJECTS).unwrap_or(&0), + bytes: *counters.get(BYTES).unwrap_or(&0), + unfinished_uploads: *counters.get(UNFINISHED_UPLOADS).unwrap_or(&0), + unfinished_multipart_uploads: *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0), + unfinished_multipart_upload_parts: *mpu_counters.get(mpu_table::PARTS).unwrap_or(&0), + unfinished_multipart_upload_bytes: *mpu_counters.get(mpu_table::BYTES).unwrap_or(&0), + quotas: ApiBucketQuotas { + max_size: quotas.max_size, + max_objects: quotas.max_objects, + }, + }; - Ok(res) + Ok(json_ok_response(&res)?) } +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct GetBucketInfoResult { + id: String, + global_aliases: Vec, + website_access: bool, + #[serde(default)] + website_config: Option, + keys: Vec, + objects: i64, + bytes: i64, + unfinished_uploads: i64, + unfinished_multipart_uploads: i64, + unfinished_multipart_upload_parts: i64, + unfinished_multipart_upload_bytes: i64, + quotas: ApiBucketQuotas, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct GetBucketInfoWebsiteResult { + index_document: String, + error_document: Option, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct GetBucketInfoKey { + access_key_id: String, + name: String, + permissions: ApiBucketKeyPerm, + bucket_local_aliases: Vec, +} + +pub async fn handle_create_bucket( + garage: &Arc, + req: Request, +) -> Result, Error> { + let req = parse_json_body::(req).await?; + + let helper = garage.locked_helper().await; + + if let Some(ga) = &req.global_alias { + if !is_valid_bucket_name(ga, garage.config.allow_punycode) { + return Err(Error::bad_request(format!( + "{}: {}", + ga, INVALID_BUCKET_NAME_MESSAGE + ))); + } + + if let Some(alias) = garage.bucket_alias_table.get(&EmptyKey, ga).await? { + if alias.state.get().is_some() { + return Err(CommonError::BucketAlreadyExists.into()); + } + } + } + + if let Some(la) = &req.local_alias { + if !is_valid_bucket_name(&la.alias, garage.config.allow_punycode) { + return Err(Error::bad_request(format!( + "{}: {}", + la.alias, INVALID_BUCKET_NAME_MESSAGE + ))); + } + + let key = helper.key().get_existing_key(&la.access_key_id).await?; + let state = key.state.as_option().unwrap(); + if matches!(state.local_aliases.get(&la.alias), Some(_)) { + return Err(Error::bad_request("Local alias already exists")); + } + } + + let bucket = Bucket::new(); + garage.bucket_table.insert(&bucket).await?; + + if let Some(ga) = &req.global_alias { + helper.set_global_bucket_alias(bucket.id, ga).await?; + } + + if let Some(la) = &req.local_alias { + helper + .set_local_bucket_alias(bucket.id, &la.access_key_id, &la.alias) + .await?; + + if la.allow.read || la.allow.write || la.allow.owner { + helper + .set_bucket_key_permissions( + bucket.id, + &la.access_key_id, + BucketKeyPerm { + timestamp: now_msec(), + allow_read: la.allow.read, + allow_write: la.allow.write, + allow_owner: la.allow.owner, + }, + ) + .await?; + } + } + + bucket_info_results(garage, bucket.id).await +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct CreateBucketRequest { + global_alias: Option, + local_alias: Option, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct CreateBucketLocalAlias { + access_key_id: String, + alias: String, + #[serde(default)] + allow: ApiBucketKeyPerm, +} + +pub async fn handle_delete_bucket( + garage: &Arc, + id: String, +) -> Result, Error> { + let helper = garage.locked_helper().await; + + let bucket_id = parse_bucket_id(&id)?; + + let mut bucket = helper.bucket().get_existing_bucket(bucket_id).await?; + let state = bucket.state.as_option().unwrap(); + + // Check bucket is empty + if !helper.bucket().is_bucket_empty(bucket_id).await? { + return Err(CommonError::BucketNotEmpty.into()); + } + + // --- done checking, now commit --- + // 1. delete authorization from keys that had access + for (key_id, perm) in bucket.authorized_keys() { + if perm.is_any() { + helper + .set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS) + .await?; + } + } + // 2. delete all local aliases + for ((key_id, alias), _, active) in state.local_aliases.items().iter() { + if *active { + helper + .purge_local_bucket_alias(bucket.id, key_id, alias) + .await?; + } + } + // 3. delete all global aliases + for (alias, _, active) in state.aliases.items().iter() { + if *active { + helper.purge_global_bucket_alias(bucket.id, alias).await?; + } + } + + // 4. delete bucket + bucket.state = Deletable::delete(); + garage.bucket_table.insert(&bucket).await?; + + Ok(Response::builder() + .status(StatusCode::NO_CONTENT) + .body(empty_body())?) +} + +pub async fn handle_update_bucket( + garage: &Arc, + id: String, + req: Request, +) -> Result, Error> { + let req = parse_json_body::(req).await?; + let bucket_id = parse_bucket_id(&id)?; + + let mut bucket = garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + + let state = bucket.state.as_option_mut().unwrap(); + + if let Some(wa) = req.website_access { + if wa.enabled { + state.website_config.update(Some(WebsiteConfig { + index_document: wa.index_document.ok_or_bad_request( + "Please specify indexDocument when enabling website access.", + )?, + error_document: wa.error_document, + })); + } else { + if wa.index_document.is_some() || wa.error_document.is_some() { + return Err(Error::bad_request( + "Cannot specify indexDocument or errorDocument when disabling website access.", + )); + } + state.website_config.update(None); + } + } + + if let Some(q) = req.quotas { + state.quotas.update(BucketQuotas { + max_size: q.max_size, + max_objects: q.max_objects, + }); + } + + garage.bucket_table.insert(&bucket).await?; + + bucket_info_results(garage, bucket_id).await +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct UpdateBucketRequest { + website_access: Option, + quotas: Option, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct UpdateBucketWebsiteAccess { + enabled: bool, + index_document: Option, + error_document: Option, +} + +// ---- BUCKET/KEY PERMISSIONS ---- + +pub async fn handle_bucket_change_key_perm( + garage: &Arc, + req: Request, + new_perm_flag: bool, +) -> Result, Error> { + let req = parse_json_body::(req).await?; + + let helper = garage.locked_helper().await; + + let bucket_id = parse_bucket_id(&req.bucket_id)?; + + let bucket = helper.bucket().get_existing_bucket(bucket_id).await?; + let state = bucket.state.as_option().unwrap(); + + let key = helper.key().get_existing_key(&req.access_key_id).await?; + + let mut perm = state + .authorized_keys + .get(&key.key_id) + .cloned() + .unwrap_or(BucketKeyPerm::NO_PERMISSIONS); + + if req.permissions.read { + perm.allow_read = new_perm_flag; + } + if req.permissions.write { + perm.allow_write = new_perm_flag; + } + if req.permissions.owner { + perm.allow_owner = new_perm_flag; + } + + helper + .set_bucket_key_permissions(bucket.id, &key.key_id, perm) + .await?; + + bucket_info_results(garage, bucket.id).await +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct BucketKeyPermChangeRequest { + bucket_id: String, + access_key_id: String, + permissions: ApiBucketKeyPerm, +} + +// ---- BUCKET ALIASES ---- + +pub async fn handle_global_alias_bucket( + garage: &Arc, + bucket_id: String, + alias: String, +) -> Result, Error> { + let bucket_id = parse_bucket_id(&bucket_id)?; + + let helper = garage.locked_helper().await; + + helper.set_global_bucket_alias(bucket_id, &alias).await?; + + bucket_info_results(garage, bucket_id).await +} + +pub async fn handle_global_unalias_bucket( + garage: &Arc, + bucket_id: String, + alias: String, +) -> Result, Error> { + let bucket_id = parse_bucket_id(&bucket_id)?; + + let helper = garage.locked_helper().await; + + helper.unset_global_bucket_alias(bucket_id, &alias).await?; + + bucket_info_results(garage, bucket_id).await +} + +pub async fn handle_local_alias_bucket( + garage: &Arc, + bucket_id: String, + access_key_id: String, + alias: String, +) -> Result, Error> { + let bucket_id = parse_bucket_id(&bucket_id)?; + + let helper = garage.locked_helper().await; + + helper + .set_local_bucket_alias(bucket_id, &access_key_id, &alias) + .await?; + + bucket_info_results(garage, bucket_id).await +} + +pub async fn handle_local_unalias_bucket( + garage: &Arc, + bucket_id: String, + access_key_id: String, + alias: String, +) -> Result, Error> { + let bucket_id = parse_bucket_id(&bucket_id)?; + + let helper = garage.locked_helper().await; + + helper + .unset_local_bucket_alias(bucket_id, &access_key_id, &alias) + .await?; + + bucket_info_results(garage, bucket_id).await +} + +// ---- HELPER ---- + fn parse_bucket_id(id: &str) -> Result { let id_hex = hex::decode(id).ok_or_bad_request("Invalid bucket id")?; Ok(Uuid::try_from(&id_hex).ok_or_bad_request("Invalid bucket id")?) diff --git a/src/api/admin/cluster.rs b/src/api/admin/cluster.rs index 15fe370a..ffa0fa71 100644 --- a/src/api/admin/cluster.rs +++ b/src/api/admin/cluster.rs @@ -1,365 +1,411 @@ use std::collections::HashMap; -use std::fmt::Write; +use std::net::SocketAddr; use std::sync::Arc; -use format_table::format_table_to_string; +use hyper::{body::Incoming as IncomingBody, Request, Response}; +use serde::{Deserialize, Serialize}; +use garage_util::crdt::*; use garage_util::data::*; use garage_rpc::layout; -use garage_rpc::layout::PARTITION_BITS; -use garage_table::*; use garage_model::garage::Garage; -use garage_model::s3::object_table; -use crate::api::*; +use garage_api_common::helpers::{json_ok_response, parse_json_body}; + +use crate::api_server::ResBody; use crate::error::*; -use crate::{Admin, RequestHandler}; -impl RequestHandler for GetClusterStatusRequest { - type Response = GetClusterStatusResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let layout = garage.system.cluster_layout(); - let mut nodes = garage - .system - .get_known_nodes() - .into_iter() - .map(|i| { - ( - i.id, - NodeResp { - id: hex::encode(i.id), - garage_version: i.status.garage_version, - addr: i.addr, - hostname: i.status.hostname, - is_up: i.is_up, - last_seen_secs_ago: i.last_seen_secs_ago, - data_partition: i.status.data_disk_avail.map(|(avail, total)| { - FreeSpaceResp { - available: avail, - total, - } +pub async fn handle_get_cluster_status(garage: &Arc) -> Result, Error> { + let layout = garage.system.cluster_layout(); + let mut nodes = garage + .system + .get_known_nodes() + .into_iter() + .map(|i| { + ( + i.id, + NodeResp { + id: hex::encode(i.id), + addr: i.addr, + hostname: i.status.hostname, + is_up: i.is_up, + last_seen_secs_ago: i.last_seen_secs_ago, + data_partition: i + .status + .data_disk_avail + .map(|(avail, total)| FreeSpaceResp { + available: avail, + total, }), - metadata_partition: i.status.meta_disk_avail.map(|(avail, total)| { - FreeSpaceResp { - available: avail, - total, - } - }), - ..Default::default() - }, - ) - }) - .collect::>(); - - if let Ok(current_layout) = layout.current() { - for (id, _, role) in current_layout.roles.items().iter() { - if let layout::NodeRoleV(Some(r)) = role { - let role = NodeAssignedRole { - zone: r.zone.to_string(), - capacity: r.capacity, - tags: r.tags.clone(), - }; - match nodes.get_mut(id) { - None => { - nodes.insert( - *id, - NodeResp { - id: hex::encode(id), - role: Some(role), - ..Default::default() - }, - ); + metadata_partition: i.status.meta_disk_avail.map(|(avail, total)| { + FreeSpaceResp { + available: avail, + total, } - Some(n) => { - n.role = Some(role); - } - } - } - } - } - - if let Ok(layout_versions) = layout.versions() { - for ver in layout_versions.iter().rev().skip(1) { - for (id, _, role) in ver.roles.items().iter() { - if let layout::NodeRoleV(Some(r)) = role { - if r.capacity.is_some() { - if let Some(n) = nodes.get_mut(id) { - if n.role.is_none() { - n.draining = true; - } - } else { - nodes.insert( - *id, - NodeResp { - id: hex::encode(id), - draining: true, - ..Default::default() - }, - ); - } - } - } - } - } - } - - let mut nodes = nodes.into_values().collect::>(); - nodes.sort_by(|x, y| x.id.cmp(&y.id)); - - Ok(GetClusterStatusResponse { - layout_version: layout.inner().current().version, - nodes, + }), + ..Default::default() + }, + ) }) - } -} + .collect::>(); -impl RequestHandler for GetClusterHealthRequest { - type Response = GetClusterHealthResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - use garage_rpc::system::ClusterHealthStatus; - let health = garage.system.health(); - let health = GetClusterHealthResponse { - status: match health.status { - ClusterHealthStatus::Healthy => "healthy", - ClusterHealthStatus::Degraded => "degraded", - ClusterHealthStatus::Unavailable => "unavailable", - } - .to_string(), - known_nodes: health.known_nodes, - connected_nodes: health.connected_nodes, - storage_nodes: health.storage_nodes, - // Translating storage_nodes_up (admin API context) to storage_nodes_ok (metrics context) - // TODO: when releasing major release, consider renaming all the fields in the metrics to storage_nodes_up - storage_nodes_up: health.storage_nodes_ok, - partitions: health.partitions, - partitions_quorum: health.partitions_quorum, - partitions_all_ok: health.partitions_all_ok, - }; - Ok(health) - } -} - -impl RequestHandler for GetClusterStatisticsRequest { - type Response = GetClusterStatisticsResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let mut ret = String::new(); - - // Gather info on number of buckets, objects and object size - let buckets = garage - .bucket_table - .get_range( - &EmptyKey, - None, - Some(DeletedFilter::NotDeleted), - 1_000_000, - EnumerationOrder::Forward, - ) - .await?; - - let bucket_stats_opt = if buckets.len() < 1000 { - futures::future::try_join_all( - buckets - .iter() - .map(|b| garage.object_counter_table.table.get(&b.id, &EmptyKey)), - ) - .await - .ok() - } else { - None - }; - - let layout = &garage.system.cluster_layout(); - - let bucket_count = buckets.len() as u64; - let (total_object_count, total_object_bytes); - if let Some(bucket_stats) = bucket_stats_opt { - let bucket_stats = bucket_stats - .into_iter() - .filter_map(|cnt| cnt.map(|x| x.filtered_values(layout))) - .collect::>(); - - total_object_count = Some( - bucket_stats - .iter() - .clone() - .map(|cnt| *cnt.get(object_table::OBJECTS).unwrap_or(&0) as u64) - .sum(), - ); - total_object_bytes = Some( - bucket_stats - .iter() - .clone() - .map(|cnt| *cnt.get(object_table::BYTES).unwrap_or(&0) as u64) - .sum(), - ); - } else { - total_object_count = None; - total_object_bytes = None; - } - - // Gather storage node and free space statistics for current nodes - let mut node_partition_count = HashMap::::new(); - if let Ok(current_layout) = layout.current() { - for short_id in current_layout.ring_assignment_data.iter() { - let id = current_layout.node_id_vec[*short_id as usize]; - *node_partition_count.entry(id).or_default() += 1; - } - } - let node_info = garage - .system - .get_known_nodes() - .into_iter() - .map(|n| (n.id, n)) - .collect::>(); - - let mut table = vec![" ID\tHostname\tZone\tCapacity\tPart.\tDataAvail\tMetaAvail".into()]; - for (id, parts) in node_partition_count.iter() { - let info = node_info.get(id); - let status = info.map(|x| &x.status); - let role = layout - .current() - .ok() - .and_then(|l| l.roles.get(id)) - .and_then(|x| x.0.as_ref()); - let hostname = status.and_then(|x| x.hostname.as_deref()).unwrap_or("?"); - let zone = role.map(|x| x.zone.as_str()).unwrap_or("?"); - let capacity = role - .map(|x| x.capacity_string()) - .unwrap_or_else(|| "?".into()); - let avail_str = |x| match x { - Some((avail, total)) => { - let pct = (avail as f64) / (total as f64) * 100.; - let avail = bytesize::ByteSize::b(avail); - let total = bytesize::ByteSize::b(total); - format!("{}/{} ({:.1}%)", avail, total, pct) - } - None => "?".into(), + for (id, _, role) in layout.current().roles.items().iter() { + if let layout::NodeRoleV(Some(r)) = role { + let role = NodeRoleResp { + id: hex::encode(id), + zone: r.zone.to_string(), + capacity: r.capacity, + tags: r.tags.clone(), }; - let data_avail = avail_str(status.and_then(|x| x.data_disk_avail)); - let meta_avail = avail_str(status.and_then(|x| x.meta_disk_avail)); - table.push(format!( - " {:?}\t{}\t{}\t{}\t{}\t{}\t{}", - id, hostname, zone, capacity, parts, data_avail, meta_avail - )); + match nodes.get_mut(id) { + None => { + nodes.insert( + *id, + NodeResp { + id: hex::encode(id), + role: Some(role), + ..Default::default() + }, + ); + } + Some(n) => { + n.role = Some(role); + } + } } - write!( - &mut ret, - "Storage nodes:\n{}", - format_table_to_string(table) - ) - .unwrap(); + } - let meta_part_avail = node_partition_count - .iter() - .filter_map(|(id, parts)| { - node_info - .get(id) - .and_then(|x| x.status.meta_disk_avail) - .map(|c| c.0 / *parts) - }) - .collect::>(); - let data_part_avail = node_partition_count - .iter() - .filter_map(|(id, parts)| { - node_info - .get(id) - .and_then(|x| x.status.data_disk_avail) - .map(|c| c.0 / *parts) - }) - .collect::>(); - - let metadata_avail: u64 = - meta_part_avail.iter().min().unwrap_or(&0) * (1 << PARTITION_BITS); - let data_avail: u64 = data_part_avail.iter().min().unwrap_or(&0) * (1 << PARTITION_BITS); - - let metadata_avail_str = bytesize::ByteSize(metadata_avail); - let data_avail_str = bytesize::ByteSize(data_avail); - - let incomplete_info = meta_part_avail.len() < node_partition_count.len() - || data_part_avail.len() < node_partition_count.len(); - - // Display bucket statistics - let mut bucket_stats = vec![format!("Number of buckets:\t{}", bucket_count)]; - if let Some(toc) = total_object_count { - bucket_stats.push(format!("Total number of objects:\t{}", toc)); + for ver in layout.versions().iter().rev().skip(1) { + for (id, _, role) in ver.roles.items().iter() { + if let layout::NodeRoleV(Some(r)) = role { + if r.capacity.is_some() { + if let Some(n) = nodes.get_mut(id) { + if n.role.is_none() { + n.draining = true; + } + } else { + nodes.insert( + *id, + NodeResp { + id: hex::encode(id), + draining: true, + ..Default::default() + }, + ); + } + } + } } - if let Some(tob) = total_object_bytes { - bucket_stats.push(format!( - "Total size of objects:\t{}", - bytesize::ByteSize(tob) - )); - } - writeln!(&mut ret, "\n{}", format_table_to_string(bucket_stats)).unwrap(); + } - writeln!( - &mut ret, - "Estimated available storage space cluster-wide (might be lower in practice):" - ) - .unwrap(); - if incomplete_info { - ret += &format_table_to_string(vec![ - format!(" data: < {}", data_avail_str), - format!(" metadata: < {}", metadata_avail_str), - ]); - writeln!(&mut ret, "A precise estimate could not be given as information is missing for some storage nodes.").unwrap(); - } else { - ret += &format_table_to_string(vec![ - format!(" data: {}", data_avail_str), - format!(" metadata: {}", metadata_avail_str), - ]); - } + let mut nodes = nodes.into_values().collect::>(); + nodes.sort_by(|x, y| x.id.cmp(&y.id)); - Ok(GetClusterStatisticsResponse { - freeform: ret, - metadata_avail: Some(metadata_avail), - data_avail: Some(data_avail), - incomplete_avail_info: Some(incomplete_info), - bucket_count: Some(bucket_count), - total_object_count, - total_object_bytes, + let res = GetClusterStatusResponse { + node: hex::encode(garage.system.id), + garage_version: garage_util::version::garage_version(), + garage_features: garage_util::version::garage_features(), + rust_version: garage_util::version::rust_version(), + db_engine: garage.db.engine(), + layout_version: layout.current().version, + nodes, + }; + + Ok(json_ok_response(&res)?) +} + +pub async fn handle_get_cluster_health(garage: &Arc) -> Result, Error> { + use garage_rpc::system::ClusterHealthStatus; + let health = garage.system.health(); + let health = ClusterHealth { + status: match health.status { + ClusterHealthStatus::Healthy => "healthy", + ClusterHealthStatus::Degraded => "degraded", + ClusterHealthStatus::Unavailable => "unavailable", + }, + known_nodes: health.known_nodes, + connected_nodes: health.connected_nodes, + storage_nodes: health.storage_nodes, + storage_nodes_ok: health.storage_nodes_ok, + partitions: health.partitions, + partitions_quorum: health.partitions_quorum, + partitions_all_ok: health.partitions_all_ok, + }; + Ok(json_ok_response(&health)?) +} + +pub async fn handle_connect_cluster_nodes( + garage: &Arc, + req: Request, +) -> Result, Error> { + let req = parse_json_body::, _, Error>(req).await?; + + let res = futures::future::join_all(req.iter().map(|node| garage.system.connect(node))) + .await + .into_iter() + .map(|r| match r { + Ok(()) => ConnectClusterNodesResponse { + success: true, + error: None, + }, + Err(e) => ConnectClusterNodesResponse { + success: false, + error: Some(format!("{}", e)), + }, }) + .collect::>(); + + Ok(json_ok_response(&res)?) +} + +pub async fn handle_get_cluster_layout(garage: &Arc) -> Result, Error> { + let res = format_cluster_layout(garage.system.cluster_layout().inner()); + + Ok(json_ok_response(&res)?) +} + +fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResponse { + let roles = layout + .current() + .roles + .items() + .iter() + .filter_map(|(k, _, v)| v.0.clone().map(|x| (k, x))) + .map(|(k, v)| NodeRoleResp { + id: hex::encode(k), + zone: v.zone.clone(), + capacity: v.capacity, + tags: v.tags.clone(), + }) + .collect::>(); + + let staged_role_changes = layout + .staging + .get() + .roles + .items() + .iter() + .filter(|(k, _, v)| layout.current().roles.get(k) != Some(v)) + .map(|(k, _, v)| match &v.0 { + None => NodeRoleChange { + id: hex::encode(k), + action: NodeRoleChangeEnum::Remove { remove: true }, + }, + Some(r) => NodeRoleChange { + id: hex::encode(k), + action: NodeRoleChangeEnum::Update { + zone: r.zone.clone(), + capacity: r.capacity, + tags: r.tags.clone(), + }, + }, + }) + .collect::>(); + + GetClusterLayoutResponse { + version: layout.current().version, + roles, + staged_role_changes, } } -impl RequestHandler for ConnectClusterNodesRequest { - type Response = ConnectClusterNodesResponse; +// ---- - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let res = futures::future::join_all(self.0.iter().map(|node| garage.system.connect(node))) - .await - .into_iter() - .map(|r| match r { - Ok(()) => ConnectNodeResponse { - success: true, - error: None, - }, - Err(e) => ConnectNodeResponse { - success: false, - error: Some(format!("{}", e)), - }, - }) - .collect::>(); - Ok(ConnectClusterNodesResponse(res)) - } +#[derive(Debug, Clone, Copy, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ClusterHealth { + status: &'static str, + known_nodes: usize, + connected_nodes: usize, + storage_nodes: usize, + storage_nodes_ok: usize, + partitions: usize, + partitions_quorum: usize, + partitions_all_ok: usize, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct GetClusterStatusResponse { + node: String, + garage_version: &'static str, + garage_features: Option<&'static [&'static str]>, + rust_version: &'static str, + db_engine: String, + layout_version: u64, + nodes: Vec, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct ApplyClusterLayoutResponse { + message: Vec, + layout: GetClusterLayoutResponse, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct ConnectClusterNodesResponse { + success: bool, + error: Option, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct GetClusterLayoutResponse { + version: u64, + roles: Vec, + staged_role_changes: Vec, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct NodeRoleResp { + id: String, + zone: String, + capacity: Option, + tags: Vec, +} + +#[derive(Serialize, Default)] +#[serde(rename_all = "camelCase")] +struct FreeSpaceResp { + available: u64, + total: u64, +} + +#[derive(Serialize, Default)] +#[serde(rename_all = "camelCase")] +struct NodeResp { + id: String, + role: Option, + addr: Option, + hostname: Option, + is_up: bool, + last_seen_secs_ago: Option, + draining: bool, + #[serde(skip_serializing_if = "Option::is_none")] + data_partition: Option, + #[serde(skip_serializing_if = "Option::is_none")] + metadata_partition: Option, +} + +// ---- update functions ---- + +pub async fn handle_update_cluster_layout( + garage: &Arc, + req: Request, +) -> Result, Error> { + let updates = parse_json_body::(req).await?; + + let mut layout = garage.system.cluster_layout().inner().clone(); + + let mut roles = layout.current().roles.clone(); + roles.merge(&layout.staging.get().roles); + + for change in updates { + let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?; + let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?; + + let new_role = match change.action { + NodeRoleChangeEnum::Remove { remove: true } => None, + NodeRoleChangeEnum::Update { + zone, + capacity, + tags, + } => Some(layout::NodeRole { + zone, + capacity, + tags, + }), + _ => return Err(Error::bad_request("Invalid layout change")), + }; + + layout + .staging + .get_mut() + .roles + .merge(&roles.update_mutator(node, layout::NodeRoleV(new_role))); + } + + garage + .system + .layout_manager + .update_cluster_layout(&layout) + .await?; + + let res = format_cluster_layout(&layout); + Ok(json_ok_response(&res)?) +} + +pub async fn handle_apply_cluster_layout( + garage: &Arc, + req: Request, +) -> Result, Error> { + let param = parse_json_body::(req).await?; + + let layout = garage.system.cluster_layout().inner().clone(); + let (layout, msg) = layout.apply_staged_changes(Some(param.version))?; + + garage + .system + .layout_manager + .update_cluster_layout(&layout) + .await?; + + let res = ApplyClusterLayoutResponse { + message: msg, + layout: format_cluster_layout(&layout), + }; + Ok(json_ok_response(&res)?) +} + +pub async fn handle_revert_cluster_layout( + garage: &Arc, +) -> Result, Error> { + let layout = garage.system.cluster_layout().inner().clone(); + let layout = layout.revert_staged_changes()?; + garage + .system + .layout_manager + .update_cluster_layout(&layout) + .await?; + + let res = format_cluster_layout(&layout); + Ok(json_ok_response(&res)?) +} + +// ---- + +type UpdateClusterLayoutRequest = Vec; + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct ApplyLayoutRequest { + version: u64, +} + +// ---- + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +struct NodeRoleChange { + id: String, + #[serde(flatten)] + action: NodeRoleChangeEnum, +} + +#[derive(Serialize, Deserialize)] +#[serde(untagged)] +enum NodeRoleChangeEnum { + #[serde(rename_all = "camelCase")] + Remove { remove: bool }, + #[serde(rename_all = "camelCase")] + Update { + zone: String, + capacity: Option, + tags: Vec, + }, } diff --git a/src/api/admin/error.rs b/src/api/admin/error.rs index b8be278e..17d4c200 100644 --- a/src/api/admin/error.rs +++ b/src/api/admin/error.rs @@ -21,26 +21,10 @@ pub enum Error { Common(#[from] CommonError), // Category: cannot process - /// The admin API token does not exist - #[error("Admin token not found: {0}")] - NoSuchAdminToken(String), - /// The API access key does not exist #[error("Access key not found: {0}")] NoSuchAccessKey(String), - /// The requested block does not exist - #[error("Block not found: {0}")] - NoSuchBlock(String), - - /// The requested worker does not exist - #[error("Worker not found: {0}")] - NoSuchWorker(u64), - - /// The object requested don't exists - #[error("Key not found")] - NoSuchKey, - /// In Import key, the key already exists #[error("Key {0} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.")] KeyAlreadyExists(String), @@ -62,15 +46,11 @@ impl From for Error { } impl Error { - pub fn code(&self) -> &'static str { + fn code(&self) -> &'static str { match self { Error::Common(c) => c.aws_code(), - Error::NoSuchAdminToken(_) => "NoSuchAdminToken", Error::NoSuchAccessKey(_) => "NoSuchAccessKey", - Error::NoSuchWorker(_) => "NoSuchWorker", - Error::NoSuchBlock(_) => "NoSuchBlock", Error::KeyAlreadyExists(_) => "KeyAlreadyExists", - Error::NoSuchKey => "NoSuchKey", } } } @@ -80,11 +60,7 @@ impl ApiError for Error { fn http_status_code(&self) -> StatusCode { match self { Error::Common(c) => c.http_status_code(), - Error::NoSuchAdminToken(_) - | Error::NoSuchAccessKey(_) - | Error::NoSuchWorker(_) - | Error::NoSuchBlock(_) - | Error::NoSuchKey => StatusCode::NOT_FOUND, + Error::NoSuchAccessKey(_) => StatusCode::NOT_FOUND, Error::KeyAlreadyExists(_) => StatusCode::CONFLICT, } } @@ -92,7 +68,6 @@ impl ApiError for Error { fn add_http_headers(&self, header_map: &mut HeaderMap) { use hyper::header; header_map.append(header::CONTENT_TYPE, "application/json".parse().unwrap()); - header_map.append(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*".parse().unwrap()); } fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody { diff --git a/src/api/admin/key.rs b/src/api/admin/key.rs index e8c8ad95..bebf3063 100644 --- a/src/api/admin/key.rs +++ b/src/api/admin/key.rs @@ -1,192 +1,173 @@ use std::collections::HashMap; use std::sync::Arc; -use chrono::DateTime; +use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode}; +use serde::{Deserialize, Serialize}; use garage_table::*; -use garage_util::time::now_msec; use garage_model::garage::Garage; use garage_model::key_table::*; -use crate::api::*; +use garage_api_common::helpers::*; + +use crate::api_server::ResBody; use crate::error::*; -use crate::{Admin, RequestHandler}; -impl RequestHandler for ListKeysRequest { - type Response = ListKeysResponse; - - async fn handle(self, garage: &Arc, _admin: &Admin) -> Result { - let now = now_msec(); - - let res = garage - .key_table - .get_range( - &EmptyKey, - None, - Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)), - 10000, - EnumerationOrder::Forward, - ) - .await? - .iter() - .map(|k| { - let p = k.params().unwrap(); - - ListKeysResponseItem { - id: k.key_id.to_string(), - name: p.name.get().clone(), - created: p.created.map(|x| { - DateTime::from_timestamp_millis(x as i64) - .expect("invalid timestamp stored in db") - }), - expiration: p.expiration.get().map(|x| { - DateTime::from_timestamp_millis(x as i64) - .expect("invalid timestamp stored in db") - }), - expired: p.is_expired(now), - } - }) - .collect::>(); - - Ok(ListKeysResponse(res)) - } -} - -impl RequestHandler for GetKeyInfoRequest { - type Response = GetKeyInfoResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let key = match (self.id, self.search) { - (Some(id), None) => garage.key_helper().get_existing_key(&id).await?, - (None, Some(search)) => { - let candidates = garage - .key_table - .get_range( - &EmptyKey, - None, - Some(KeyFilter::MatchesAndNotDeleted(search.to_string())), - 10, - EnumerationOrder::Forward, - ) - .await? - .into_iter() - .collect::>(); - if candidates.is_empty() { - return Err(Error::NoSuchAccessKey(search.clone())); - } else if candidates.len() != 1 { - return Err(Error::bad_request(format!( - "{} matching keys", - candidates.len() - ))); - } - candidates.into_iter().next().unwrap() - } - _ => { - return Err(Error::bad_request( - "Either id or search must be provided (but not both)", - )); - } - }; - - key_info_results(garage, key, self.show_secret_key).await - } -} - -impl RequestHandler for CreateKeyRequest { - type Response = CreateKeyResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let mut key = Key::new("Unnamed key"); - - apply_key_updates(&mut key, self.0)?; - - garage.key_table.insert(&key).await?; - - Ok(CreateKeyResponse( - key_info_results(garage, key, true).await?, - )) - } -} - -impl RequestHandler for ImportKeyRequest { - type Response = ImportKeyResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let prev_key = garage.key_table.get(&EmptyKey, &self.access_key_id).await?; - if prev_key.is_some() { - return Err(Error::KeyAlreadyExists(self.access_key_id.to_string())); - } - - let imported_key = Key::import( - &self.access_key_id, - &self.secret_access_key, - self.name.as_deref().unwrap_or("Imported key"), +pub async fn handle_list_keys(garage: &Arc) -> Result, Error> { + let res = garage + .key_table + .get_range( + &EmptyKey, + None, + Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)), + 10000, + EnumerationOrder::Forward, ) - .ok_or_bad_request("Invalid key format")?; - garage.key_table.insert(&imported_key).await?; + .await? + .iter() + .map(|k| ListKeyResultItem { + id: k.key_id.to_string(), + name: k.params().unwrap().name.get().clone(), + }) + .collect::>(); - Ok(ImportKeyResponse( - key_info_results(garage, imported_key, false).await?, - )) - } + Ok(json_ok_response(&res)?) } -impl RequestHandler for UpdateKeyRequest { - type Response = UpdateKeyResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let mut key = garage.key_helper().get_existing_key(&self.id).await?; - - apply_key_updates(&mut key, self.body)?; - - garage.key_table.insert(&key).await?; - - Ok(UpdateKeyResponse( - key_info_results(garage, key, false).await?, - )) - } +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct ListKeyResultItem { + id: String, + name: String, } -impl RequestHandler for DeleteKeyRequest { - type Response = DeleteKeyResponse; +pub async fn handle_get_key_info( + garage: &Arc, + id: Option, + search: Option, + show_secret_key: bool, +) -> Result, Error> { + let key = if let Some(id) = id { + garage.key_helper().get_existing_key(&id).await? + } else if let Some(search) = search { + garage + .key_helper() + .get_existing_matching_key(&search) + .await? + } else { + unreachable!(); + }; - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let helper = garage.locked_helper().await; + key_info_results(garage, key, show_secret_key).await +} - let mut key = helper.key().get_existing_key(&self.id).await?; +pub async fn handle_create_key( + garage: &Arc, + req: Request, +) -> Result, Error> { + let req = parse_json_body::(req).await?; - helper.delete_key(&mut key).await?; + let key = Key::new(req.name.as_deref().unwrap_or("Unnamed key")); + garage.key_table.insert(&key).await?; - Ok(DeleteKeyResponse) + key_info_results(garage, key, true).await +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct CreateKeyRequest { + name: Option, +} + +pub async fn handle_import_key( + garage: &Arc, + req: Request, +) -> Result, Error> { + let req = parse_json_body::(req).await?; + + let prev_key = garage.key_table.get(&EmptyKey, &req.access_key_id).await?; + if prev_key.is_some() { + return Err(Error::KeyAlreadyExists(req.access_key_id.to_string())); } + + let imported_key = Key::import( + &req.access_key_id, + &req.secret_access_key, + req.name.as_deref().unwrap_or("Imported key"), + ) + .ok_or_bad_request("Invalid key format")?; + garage.key_table.insert(&imported_key).await?; + + key_info_results(garage, imported_key, false).await +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct ImportKeyRequest { + access_key_id: String, + secret_access_key: String, + name: Option, +} + +pub async fn handle_update_key( + garage: &Arc, + id: String, + req: Request, +) -> Result, Error> { + let req = parse_json_body::(req).await?; + + let mut key = garage.key_helper().get_existing_key(&id).await?; + + let key_state = key.state.as_option_mut().unwrap(); + + if let Some(new_name) = req.name { + key_state.name.update(new_name); + } + if let Some(allow) = req.allow { + if allow.create_bucket { + key_state.allow_create_bucket.update(true); + } + } + if let Some(deny) = req.deny { + if deny.create_bucket { + key_state.allow_create_bucket.update(false); + } + } + + garage.key_table.insert(&key).await?; + + key_info_results(garage, key, false).await +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct UpdateKeyRequest { + name: Option, + allow: Option, + deny: Option, +} + +pub async fn handle_delete_key( + garage: &Arc, + id: String, +) -> Result, Error> { + let helper = garage.locked_helper().await; + + let mut key = helper.key().get_existing_key(&id).await?; + + helper.delete_key(&mut key).await?; + + Ok(Response::builder() + .status(StatusCode::NO_CONTENT) + .body(empty_body())?) } async fn key_info_results( garage: &Arc, key: Key, show_secret: bool, -) -> Result { +) -> Result, Error> { let mut relevant_buckets = HashMap::new(); let key_state = key.state.as_option().unwrap(); @@ -212,15 +193,8 @@ async fn key_info_results( } } - let res = GetKeyInfoResponse { + let res = GetKeyInfoResult { name: key_state.name.get().clone(), - created: key_state.created.map(|x| { - DateTime::from_timestamp_millis(x as i64).expect("invalid timestamp stored in db") - }), - expiration: key_state.expiration.get().map(|x| { - DateTime::from_timestamp_millis(x as i64).expect("invalid timestamp stored in db") - }), - expired: key_state.is_expired(now_msec()), access_key_id: key.key_id.clone(), secret_access_key: if show_secret { Some(key_state.secret_key.clone()) @@ -232,18 +206,9 @@ async fn key_info_results( }, buckets: relevant_buckets .into_values() - .filter_map(|bucket| { + .map(|bucket| { let state = bucket.state.as_option().unwrap(); - let permissions = key_state - .authorized_buckets - .get(&bucket.id) - .filter(|p| p.is_any()) - .map(|p| ApiBucketKeyPerm { - read: p.allow_read, - write: p.allow_write, - owner: p.allow_owner, - })?; - Some(KeyInfoBucketResponse { + KeyInfoBucketResult { id: hex::encode(bucket.id), global_aliases: state .aliases @@ -259,45 +224,57 @@ async fn key_info_results( .filter(|((k, _), _, a)| *a && *k == key.key_id) .map(|((_, n), _, _)| n.to_string()) .collect::>(), - permissions, - }) + permissions: key_state + .authorized_buckets + .get(&bucket.id) + .map(|p| ApiBucketKeyPerm { + read: p.allow_read, + write: p.allow_write, + owner: p.allow_owner, + }) + .unwrap_or_default(), + } }) .collect::>(), }; - Ok(res) + Ok(json_ok_response(&res)?) } -fn apply_key_updates(key: &mut Key, updates: UpdateKeyRequestBody) -> Result<(), Error> { - if updates.never_expires && updates.expiration.is_some() { - return Err(Error::bad_request( - "cannot specify `expiration` and `never_expires`", - )); - } - - let key_state = key.state.as_option_mut().unwrap(); - - if let Some(new_name) = updates.name { - key_state.name.update(new_name); - } - if let Some(expiration) = updates.expiration { - key_state - .expiration - .update(Some(expiration.timestamp_millis() as u64)); - } - if updates.never_expires { - key_state.expiration.update(None); - } - if let Some(allow) = updates.allow { - if allow.create_bucket { - key_state.allow_create_bucket.update(true); - } - } - if let Some(deny) = updates.deny { - if deny.create_bucket { - key_state.allow_create_bucket.update(false); - } - } - - Ok(()) +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct GetKeyInfoResult { + name: String, + access_key_id: String, + #[serde(skip_serializing_if = "is_default")] + secret_access_key: Option, + permissions: KeyPerm, + buckets: Vec, +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +struct KeyPerm { + #[serde(default)] + create_bucket: bool, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct KeyInfoBucketResult { + id: String, + global_aliases: Vec, + local_aliases: Vec, + permissions: ApiBucketKeyPerm, +} + +#[derive(Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub(crate) struct ApiBucketKeyPerm { + #[serde(default)] + pub(crate) read: bool, + #[serde(default)] + pub(crate) write: bool, + #[serde(default)] + pub(crate) owner: bool, } diff --git a/src/api/admin/layout.rs b/src/api/admin/layout.rs deleted file mode 100644 index 1979c11a..00000000 --- a/src/api/admin/layout.rs +++ /dev/null @@ -1,408 +0,0 @@ -use std::sync::Arc; - -use garage_util::crdt::*; -use garage_util::data::*; -use garage_util::error::Error as GarageError; - -use garage_rpc::layout; - -use garage_model::garage::Garage; - -use crate::api::*; -use crate::error::*; -use crate::{Admin, RequestHandler}; - -impl RequestHandler for GetClusterLayoutRequest { - type Response = GetClusterLayoutResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - Ok(format_cluster_layout( - garage.system.cluster_layout().inner(), - )) - } -} - -fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResponse { - let current = layout.current(); - - let roles = current - .roles - .items() - .iter() - .filter_map(|(k, _, v)| v.0.clone().map(|x| (k, x))) - .map(|(k, v)| { - let stored_partitions = current.get_node_usage(k).ok().map(|x| x as u64); - LayoutNodeRole { - id: hex::encode(k), - zone: v.zone.clone(), - capacity: v.capacity, - stored_partitions, - usable_capacity: stored_partitions.map(|x| x * current.partition_size), - tags: v.tags.clone(), - } - }) - .collect::>(); - - let staged_role_changes = layout - .staging - .get() - .roles - .items() - .iter() - .filter(|(k, _, v)| current.roles.get(k) != Some(v)) - .map(|(k, _, v)| match &v.0 { - None => NodeRoleChange { - id: hex::encode(k), - action: NodeRoleChangeEnum::Remove { remove: true }, - }, - Some(r) => NodeRoleChange { - id: hex::encode(k), - action: NodeRoleChangeEnum::Update(NodeAssignedRole { - zone: r.zone.clone(), - capacity: r.capacity, - tags: r.tags.clone(), - }), - }, - }) - .collect::>(); - - let staged_parameters = if *layout.staging.get().parameters.get() != current.parameters { - Some((*layout.staging.get().parameters.get()).into()) - } else { - None - }; - - GetClusterLayoutResponse { - version: current.version, - roles, - partition_size: current.partition_size, - parameters: current.parameters.into(), - staged_role_changes, - staged_parameters, - } -} - -impl RequestHandler for GetClusterLayoutHistoryRequest { - type Response = GetClusterLayoutHistoryResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let layout_helper = garage.system.cluster_layout(); - let layout = layout_helper.inner(); - let min_stored = layout.min_stored(); - - let versions = layout - .versions - .iter() - .rev() - .chain(layout.old_versions.iter().rev()) - .map(|ver| { - let status = if ver.version == layout.current().version { - ClusterLayoutVersionStatus::Current - } else if ver.version >= min_stored { - ClusterLayoutVersionStatus::Draining - } else { - ClusterLayoutVersionStatus::Historical - }; - ClusterLayoutVersion { - version: ver.version, - status, - storage_nodes: ver - .roles - .items() - .iter() - .filter( - |(_, _, x)| matches!(x, layout::NodeRoleV(Some(c)) if c.capacity.is_some()), - ) - .count() as u64, - gateway_nodes: ver - .roles - .items() - .iter() - .filter( - |(_, _, x)| matches!(x, layout::NodeRoleV(Some(c)) if c.capacity.is_none()), - ) - .count() as u64, - } - }) - .collect::>(); - - let all_nodes = layout.get_all_nodes(); - let min_ack = layout_helper.ack_map_min(); - - let update_trackers = if layout.versions.len() > 1 { - Some( - all_nodes - .iter() - .map(|node| { - ( - hex::encode(node), - NodeUpdateTrackers { - ack: layout.update_trackers.ack_map.get(node, min_stored), - sync: layout.update_trackers.sync_map.get(node, min_stored), - sync_ack: layout.update_trackers.sync_ack_map.get(node, min_stored), - }, - ) - }) - .collect(), - ) - } else { - None - }; - - Ok(GetClusterLayoutHistoryResponse { - current_version: layout.current().version, - min_ack, - versions, - update_trackers, - }) - } -} - -// ---- - -// ---- update functions ---- - -impl RequestHandler for UpdateClusterLayoutRequest { - type Response = UpdateClusterLayoutResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let mut layout = garage.system.cluster_layout().inner().clone(); - - let mut roles = layout.current().roles.clone(); - roles.merge(&layout.staging.get().roles); - - for change in self.roles { - let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?; - let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?; - - let new_role = match change.action { - NodeRoleChangeEnum::Remove { remove: true } => None, - NodeRoleChangeEnum::Update(NodeAssignedRole { - zone, - capacity, - tags, - }) => { - if matches!(capacity, Some(cap) if cap < 1024) { - return Err(Error::bad_request("Capacity should be at least 1K (1024)")); - } - Some(layout::NodeRole { - zone, - capacity, - tags, - }) - } - _ => return Err(Error::bad_request("Invalid layout change")), - }; - - layout - .staging - .get_mut() - .roles - .merge(&roles.update_mutator(node, layout::NodeRoleV(new_role))); - } - - if let Some(param) = self.parameters { - if let ZoneRedundancy::AtLeast(r_int) = param.zone_redundancy { - if r_int > layout.current().replication_factor { - return Err(Error::bad_request(format!( - "The zone redundancy must be smaller or equal to the replication factor ({}).", - layout.current().replication_factor - ))); - } else if r_int < 1 { - return Err(Error::bad_request( - "The zone redundancy must be at least 1.", - )); - } - } - layout.staging.get_mut().parameters.update(param.into()); - } - - garage - .system - .layout_manager - .update_cluster_layout(&layout) - .await?; - - let res = format_cluster_layout(&layout); - Ok(UpdateClusterLayoutResponse(res)) - } -} - -impl RequestHandler for PreviewClusterLayoutChangesRequest { - type Response = PreviewClusterLayoutChangesResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let layout = garage.system.cluster_layout().inner().clone(); - let new_ver = layout.current().version + 1; - match layout.apply_staged_changes(new_ver) { - Err(GarageError::Message(error)) => { - Ok(PreviewClusterLayoutChangesResponse::Error { error }) - } - Err(e) => Err(e.into()), - Ok((new_layout, msg)) => Ok(PreviewClusterLayoutChangesResponse::Success { - message: msg, - new_layout: format_cluster_layout(&new_layout), - }), - } - } -} - -impl RequestHandler for ApplyClusterLayoutRequest { - type Response = ApplyClusterLayoutResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let layout = garage.system.cluster_layout().inner().clone(); - let (layout, msg) = layout.apply_staged_changes(self.version)?; - - garage - .system - .layout_manager - .update_cluster_layout(&layout) - .await?; - - Ok(ApplyClusterLayoutResponse { - message: msg, - layout: format_cluster_layout(&layout), - }) - } -} - -impl RequestHandler for RevertClusterLayoutRequest { - type Response = RevertClusterLayoutResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let layout = garage.system.cluster_layout().inner().clone(); - let layout = layout.revert_staged_changes()?; - garage - .system - .layout_manager - .update_cluster_layout(&layout) - .await?; - - let res = format_cluster_layout(&layout); - Ok(RevertClusterLayoutResponse(res)) - } -} - -impl RequestHandler for ClusterLayoutSkipDeadNodesRequest { - type Response = ClusterLayoutSkipDeadNodesResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let status = garage.system.get_known_nodes(); - - let mut layout = garage.system.cluster_layout().inner().clone(); - let mut ack_updated = vec![]; - let mut sync_updated = vec![]; - - if layout.versions.len() == 1 { - return Err(Error::bad_request( - "This command cannot be called when there is only one live cluster layout version", - )); - } - - let min_v = layout.min_stored(); - if self.version <= min_v || self.version > layout.current().version { - return Err(Error::bad_request(format!( - "Invalid version, you may use the following version numbers: {}", - (min_v + 1..=layout.current().version) - .map(|x| x.to_string()) - .collect::>() - .join(" ") - ))); - } - - let all_nodes = layout.get_all_nodes(); - for node in all_nodes.iter() { - // Update ACK tracker for dead nodes or for all nodes if --allow-missing-data - if self.allow_missing_data || !status.iter().any(|x| x.id == *node && x.is_up) { - let ack_changed = layout.update_trackers.ack_map.set_max(*node, self.version); - if ack_changed { - ack_updated.push(hex::encode(node)); - } - } - - // If --allow-missing-data, update SYNC tracker for all nodes. - if self.allow_missing_data { - let sync_changed = layout.update_trackers.sync_map.set_max(*node, self.version); - if sync_changed { - sync_updated.push(hex::encode(node)); - } - } - } - - garage - .system - .layout_manager - .update_cluster_layout(&layout) - .await?; - - Ok(ClusterLayoutSkipDeadNodesResponse { - ack_updated, - sync_updated, - }) - } -} - -// ---- - -impl From for ZoneRedundancy { - fn from(x: layout::ZoneRedundancy) -> Self { - match x { - layout::ZoneRedundancy::Maximum => ZoneRedundancy::Maximum, - layout::ZoneRedundancy::AtLeast(x) => ZoneRedundancy::AtLeast(x), - } - } -} - -impl From for layout::ZoneRedundancy { - fn from(val: ZoneRedundancy) -> Self { - match val { - ZoneRedundancy::Maximum => layout::ZoneRedundancy::Maximum, - ZoneRedundancy::AtLeast(x) => layout::ZoneRedundancy::AtLeast(x), - } - } -} - -impl From for LayoutParameters { - fn from(x: layout::LayoutParameters) -> Self { - LayoutParameters { - zone_redundancy: x.zone_redundancy.into(), - } - } -} - -impl From for layout::LayoutParameters { - fn from(val: LayoutParameters) -> Self { - layout::LayoutParameters { - zone_redundancy: val.zone_redundancy.into(), - } - } -} diff --git a/src/api/admin/lib.rs b/src/api/admin/lib.rs index dd164497..599e9b44 100644 --- a/src/api/admin/lib.rs +++ b/src/api/admin/lib.rs @@ -3,44 +3,9 @@ extern crate tracing; pub mod api_server; mod error; -mod macros; - -pub mod api; -pub mod openapi; mod router_v0; mod router_v1; -mod router_v2; -mod admin_token; mod bucket; mod cluster; mod key; -mod layout; -mod special; - -mod block; -mod node; -mod repair; -mod worker; - -use std::sync::Arc; - -use garage_model::garage::Garage; - -pub use api_server::AdminApiServer as Admin; - -pub enum Authorization { - None, - MetricsToken, - AdminToken, -} - -pub trait RequestHandler { - type Response; - - fn handle( - self, - garage: &Arc, - admin: &Admin, - ) -> impl std::future::Future> + Send; -} diff --git a/src/api/admin/macros.rs b/src/api/admin/macros.rs deleted file mode 100644 index f11a2a25..00000000 --- a/src/api/admin/macros.rs +++ /dev/null @@ -1,208 +0,0 @@ -macro_rules! admin_endpoints { - [ - $(@special $special_endpoint:ident,)* - $($endpoint:ident,)* - ] => { - paste! { - #[derive(Debug, Clone, Serialize, Deserialize)] - pub enum AdminApiRequest { - $( - $special_endpoint( [<$special_endpoint Request>] ), - )* - $( - $endpoint( [<$endpoint Request>] ), - )* - } - - #[derive(Debug, Clone, Serialize)] - #[serde(untagged)] - pub enum AdminApiResponse { - $( - $endpoint( [<$endpoint Response>] ), - )* - } - - #[derive(Debug, Clone, Serialize, Deserialize)] - pub enum TaggedAdminApiResponse { - $( - $endpoint( [<$endpoint Response>] ), - )* - } - - impl AdminApiRequest { - pub fn name(&self) -> &'static str { - match self { - $( - Self::$special_endpoint(_) => stringify!($special_endpoint), - )* - $( - Self::$endpoint(_) => stringify!($endpoint), - )* - } - } - } - - impl AdminApiResponse { - pub fn tagged(self) -> TaggedAdminApiResponse { - match self { - $( - Self::$endpoint(res) => TaggedAdminApiResponse::$endpoint(res), - )* - } - } - } - - $( - impl From< [< $endpoint Request >] > for AdminApiRequest { - fn from(req: [< $endpoint Request >]) -> AdminApiRequest { - AdminApiRequest::$endpoint(req) - } - } - - impl TryFrom for [< $endpoint Response >] { - type Error = TaggedAdminApiResponse; - fn try_from(resp: TaggedAdminApiResponse) -> Result< [< $endpoint Response >], TaggedAdminApiResponse> { - match resp { - TaggedAdminApiResponse::$endpoint(v) => Ok(v), - x => Err(x), - } - } - } - )* - - impl RequestHandler for AdminApiRequest { - type Response = AdminApiResponse; - - async fn handle(self, garage: &Arc, admin: &Admin) -> Result { - match self { - $( - AdminApiRequest::$special_endpoint(_) => Err( - Error::Common(CommonError::BadRequest( - concat!(stringify!($special_endpoint), " cannot be used outside of the HTTP Admin API").into() - )) - ), - )* - $( - AdminApiRequest::$endpoint(req) => Ok(AdminApiResponse::$endpoint(req.handle(garage, admin).await?)), - )* - } - } - } - } - }; -} - -macro_rules! local_admin_endpoints { - [ - $($endpoint:ident,)* - ] => { - paste! { - #[derive(Debug, Clone, Serialize, Deserialize)] - pub enum LocalAdminApiRequest { - $( - $endpoint( [] ), - )* - } - - #[derive(Debug, Clone, Serialize, Deserialize)] - pub enum LocalAdminApiResponse { - $( - $endpoint( [] ), - )* - } - - $( - pub type [< $endpoint Request >] = MultiRequest< [< Local $endpoint Request >] >; - - pub type [< $endpoint RequestBody >] = [< Local $endpoint Request >]; - - pub type [< $endpoint Response >] = MultiResponse< [< Local $endpoint Response >] >; - - impl From< [< Local $endpoint Request >] > for LocalAdminApiRequest { - fn from(req: [< Local $endpoint Request >]) -> LocalAdminApiRequest { - LocalAdminApiRequest::$endpoint(req) - } - } - - impl TryFrom for [< Local $endpoint Response >] { - type Error = LocalAdminApiResponse; - fn try_from(resp: LocalAdminApiResponse) -> Result< [< Local $endpoint Response >], LocalAdminApiResponse> { - match resp { - LocalAdminApiResponse::$endpoint(v) => Ok(v), - x => Err(x), - } - } - } - - impl RequestHandler for [< $endpoint Request >] { - type Response = [< $endpoint Response >]; - - async fn handle(self, garage: &Arc, admin: &Admin) -> Result { - let to = find_matching_nodes(garage, self.node.as_str())?; - - let resps = garage.system.rpc_helper().call_many(&admin.endpoint, - &to, - AdminRpc::Internal(self.body.into()), - RequestStrategy::with_priority(PRIO_NORMAL), - ).await?; - - let mut ret = [< $endpoint Response >] { - success: HashMap::new(), - error: HashMap::new(), - }; - for (node, resp) in resps { - match resp { - Ok(AdminRpcResponse::InternalApiOkResponse(r)) => { - match [< Local $endpoint Response >]::try_from(r) { - Ok(r) => { - ret.success.insert(hex::encode(node), r); - } - Err(_) => { - ret.error.insert(hex::encode(node), "returned invalid value".to_string()); - } - } - } - Ok(AdminRpcResponse::ApiErrorResponse{error_code, http_code, message}) => { - ret.error.insert(hex::encode(node), format!("{} ({}): {}", error_code, http_code, message)); - } - Ok(_) => { - ret.error.insert(hex::encode(node), "returned invalid value".to_string()); - } - Err(e) => { - ret.error.insert(hex::encode(node), e.to_string()); - } - } - } - - Ok(ret) - } - } - )* - - impl LocalAdminApiRequest { - pub fn name(&self) -> &'static str { - match self { - $( - Self::$endpoint(_) => stringify!($endpoint), - )* - } - } - } - - impl RequestHandler for LocalAdminApiRequest { - type Response = LocalAdminApiResponse; - - async fn handle(self, garage: &Arc, admin: &Admin) -> Result { - Ok(match self { - $( - LocalAdminApiRequest::$endpoint(req) => LocalAdminApiResponse::$endpoint(req.handle(garage, admin).await?), - )* - }) - } - } - } - }; -} - -pub(crate) use admin_endpoints; -pub(crate) use local_admin_endpoints; diff --git a/src/api/admin/node.rs b/src/api/admin/node.rs deleted file mode 100644 index 12163f18..00000000 --- a/src/api/admin/node.rs +++ /dev/null @@ -1,171 +0,0 @@ -use std::fmt::Write; -use std::sync::Arc; - -use format_table::format_table_to_string; - -use garage_util::error::Error as GarageError; - -use garage_table::replication::*; -use garage_table::*; - -use garage_model::garage::Garage; - -use crate::api::*; -use crate::error::Error; -use crate::{Admin, RequestHandler}; - -impl RequestHandler for LocalGetNodeInfoRequest { - type Response = LocalGetNodeInfoResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let sys_status = garage.system.local_status(); - let hostname = sys_status.hostname.unwrap_or_default().to_string(); - - Ok(LocalGetNodeInfoResponse { - node_id: hex::encode(garage.system.id), - hostname: Some(hostname), - garage_version: garage_util::version::garage_version().to_string(), - garage_features: garage_util::version::garage_features() - .map(|features| features.iter().map(ToString::to_string).collect()), - rust_version: garage_util::version::rust_version().to_string(), - db_engine: garage.db.engine(), - }) - } -} - -impl RequestHandler for LocalCreateMetadataSnapshotRequest { - type Response = LocalCreateMetadataSnapshotResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - garage_model::snapshot::async_snapshot_metadata(garage).await?; - Ok(LocalCreateMetadataSnapshotResponse) - } -} - -impl RequestHandler for LocalGetNodeStatisticsRequest { - type Response = LocalGetNodeStatisticsResponse; - - // FIXME: return this as a JSON struct instead of text - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let sys_status = garage.system.local_status(); - - let hostname = sys_status.hostname.unwrap_or_default().to_string(); - let garage_version = garage_util::version::garage_version().to_string(); - let garage_features = garage_util::version::garage_features() - .unwrap() - .iter() - .map(ToString::to_string) - .collect::>(); - let rustc_version = garage_util::version::rust_version().to_string(); - let db_engine_descr = garage.db.engine(); - - let mut ret = format_table_to_string(vec![ - format!("Node ID:\t{:?}", garage.system.id), - format!("Hostname:\t{}", hostname), - format!("Garage version:\t{}", garage_version), - format!("Garage features:\t{}", garage_features.join(", ")), - format!("Rust compiler version:\t{}", rustc_version), - format!("Database engine:\t{}", db_engine_descr), - ]); - - let mut table_stats = vec![ - gather_table_stats(&garage.admin_token_table)?, - gather_table_stats(&garage.bucket_table)?, - gather_table_stats(&garage.bucket_alias_table)?, - gather_table_stats(&garage.key_table)?, - gather_table_stats(&garage.object_table)?, - gather_table_stats(&garage.object_counter_table.table)?, - gather_table_stats(&garage.mpu_table)?, - gather_table_stats(&garage.mpu_counter_table.table)?, - gather_table_stats(&garage.version_table)?, - gather_table_stats(&garage.block_ref_table)?, - ]; - - #[cfg(feature = "k2v")] - { - table_stats.push(gather_table_stats(&garage.k2v.item_table)?); - table_stats.push(gather_table_stats(&garage.k2v.counter_table.table)?); - } - - // Gather table statistics - let mut table = vec![" Table\tItems\tMklItems\tMklTodo\tInsQueue\tGcTodo".into()]; - table.extend(table_stats.iter().map(|ts| { - format!( - " {}\t{}\t{}\t{}\t{}\t{}", - ts.table_name, - ts.items, - ts.merkle_items, - ts.merkle_queue_len, - ts.insert_queue_len, - ts.gc_queue_len, - ) - })); - - write!( - &mut ret, - "\nTable stats:\n{}", - format_table_to_string(table) - ) - .unwrap(); - - let block_manager_stats = NodeBlockManagerStats { - rc_entries: garage.block_manager.rc_approximate_len()? as u64, - resync_queue_len: garage.block_manager.resync.queue_approximate_len()? as u64, - resync_errors: garage.block_manager.resync.errors_approximate_len()? as u64, - }; - - // Gather block manager statistics - writeln!(&mut ret, "\nBlock manager stats:").unwrap(); - - ret += &format_table_to_string(vec![ - format!( - " number of RC entries:\t{} (~= number of blocks)", - block_manager_stats.rc_entries - ), - format!( - " resync queue length:\t{}", - block_manager_stats.resync_queue_len, - ), - format!( - " blocks with resync errors:\t{}", - block_manager_stats.resync_errors - ), - ]); - - Ok(LocalGetNodeStatisticsResponse { - freeform: ret, - table_stats: Some(table_stats), - block_manager_stats: Some(block_manager_stats), - }) - } -} - -fn gather_table_stats(t: &Arc>) -> Result -where - F: TableSchema + 'static, - R: TableReplication + 'static, -{ - let data_len = t.data.store.approximate_len().map_err(GarageError::from)?; - let mkl_len = t.merkle_updater.merkle_tree_approximate_len()?; - - Ok(NodeTableStats { - table_name: F::TABLE_NAME.to_string(), - items: data_len as u64, - merkle_items: mkl_len as u64, - merkle_queue_len: t.merkle_updater.todo_approximate_len()? as u64, - insert_queue_len: t.data.insert_queue_approximate_len()? as u64, - gc_queue_len: t.data.gc_todo_approximate_len()? as u64, - }) -} diff --git a/src/api/admin/openapi.rs b/src/api/admin/openapi.rs deleted file mode 100644 index 2bb5b2a4..00000000 --- a/src/api/admin/openapi.rs +++ /dev/null @@ -1,958 +0,0 @@ -#![allow(dead_code)] -#![allow(non_snake_case)] - -use serde::{Deserialize, Serialize}; -use utoipa::{Modify, OpenApi, ToSchema}; - -use crate::api::*; - -// ********************************************** -// Special endpoints -// ********************************************** - -#[utoipa::path(get, - path = "/metrics", - tag = "Special endpoints", - description = "Prometheus metrics endpoint", - security((), ("bearerAuth" = [])), - responses( - (status = 200, description = "Garage daemon metrics exported in Prometheus format"), - ), -)] -fn Metrics() {} - -#[utoipa::path(get, - path = "/health", - tag = "Special endpoints", - description = " -Check cluster health. The status code returned by this function indicates -whether this Garage daemon can answer API requests. -Garage will return `200 OK` even if some storage nodes are disconnected, -as long as it is able to have a quorum of nodes for read and write operations. - ", - security(()), - responses( - (status = 200, description = "Garage is able to answer requests"), - (status = 503, description = "This Garage daemon is not able to handle requests") - ), -)] -fn Health() {} - -#[utoipa::path(get, - path = "/check", - tag = "Special endpoints", - description = " -Static website domain name check. Checks whether a bucket is configured to serve -a static website for the requested domain. This is used by reverse proxies such -as Caddy or Tricot, to avoid requesting TLS certificates for domain names that -do not correspond to an actual website. - ", - params(CheckDomainRequest), - security(()), - responses( - (status = 200, description = "The domain name redirects to a static website bucket"), - (status = 400, description = "No static website bucket exists for this domain") - ), -)] -fn CheckDomain() {} - -// ********************************************** -// Cluster operations -// ********************************************** - -#[utoipa::path(get, - path = "/v2/GetClusterStatus", - tag = "Cluster", - description = " -Returns the cluster's current status, including: - -- ID of the node being queried and its version of the Garage daemon -- Live nodes -- Currently configured cluster layout -- Staged changes to the cluster layout - -*Capacity is given in bytes* - ", - responses( - (status = 200, description = "Cluster status report", body = GetClusterStatusResponse), - (status = 500, description = "Internal server error") - ), -)] -fn GetClusterStatus() {} - -#[utoipa::path(get, - path = "/v2/GetClusterHealth", - tag = "Cluster", - description = "Returns the global status of the cluster, the number of connected nodes (over the number of known ones), the number of healthy storage nodes (over the declared ones), and the number of healthy partitions (over the total).", - responses( - (status = 200, description = "Cluster health report", body = GetClusterHealthResponse), - ), -)] -fn GetClusterHealth() {} - -#[utoipa::path(get, - path = "/v2/GetClusterStatistics", - tag = "Cluster", - description = " -Fetch global cluster statistics. - -*Note: do not try to parse the `freeform` field of the response, it is given as a string specifically because its format is not stable.* - ", - responses( - (status = 200, description = "Global cluster statistics", body = GetClusterStatisticsResponse), - (status = 500, description = "Internal server error") - ), -)] -fn GetClusterStatistics() {} - -#[utoipa::path(post, - path = "/v2/ConnectClusterNodes", - tag = "Cluster", - description = "Instructs this Garage node to connect to other Garage nodes at specified `@`. `node_id` is generated automatically on node start.", - request_body=ConnectClusterNodesRequest, - responses( - (status = 200, description = "The request has been handled correctly but it does not mean that all connection requests succeeded; some might have fail, you need to check the body!", body = ConnectClusterNodesResponse), - (status = 500, description = "Internal server error") - ), -)] -fn ConnectClusterNodes() {} - -// ********************************************** -// Admin API token operations -// ********************************************** - -#[utoipa::path(get, - path = "/v2/ListAdminTokens", - tag = "Admin API token", - description = "Returns all admin API tokens in the cluster.", - responses( - (status = 200, description = "Returns info about all admin API tokens", body = ListAdminTokensResponse), - (status = 500, description = "Internal server error") - ), -)] -fn ListAdminTokens() {} - -#[utoipa::path(get, - path = "/v2/GetAdminTokenInfo", - tag = "Admin API token", - description = " -Return information about a specific admin API token. -You can search by specifying the exact token identifier (`id`) or by specifying a pattern (`search`). - ", - params(GetAdminTokenInfoRequest), - responses( - (status = 200, description = "Information about the admin token", body = GetAdminTokenInfoResponse), - (status = 500, description = "Internal server error") - ), -)] -fn GetAdminTokenInfo() {} - -#[utoipa::path(post, - path = "/v2/CreateAdminToken", - tag = "Admin API token", - description = "Creates a new admin API token", - request_body = UpdateAdminTokenRequestBody, - responses( - (status = 200, description = "Admin token has been created", body = CreateAdminTokenResponse), - (status = 500, description = "Internal server error") - ), -)] -fn CreateAdminToken() {} - -#[utoipa::path(post, - path = "/v2/UpdateAdminToken", - tag = "Admin API token", - description = " -Updates information about the specified admin API token. - ", - request_body = UpdateAdminTokenRequestBody, - params(UpdateAdminTokenRequest), - responses( - (status = 200, description = "Admin token has been updated", body = UpdateAdminTokenResponse), - (status = 500, description = "Internal server error") - ), -)] -fn UpdateAdminToken() {} - -#[utoipa::path(post, - path = "/v2/DeleteAdminToken", - tag = "Admin API token", - description = "Delete an admin API token from the cluster, revoking all its permissions.", - params(DeleteAdminTokenRequest), - responses( - (status = 200, description = "Admin token has been deleted"), - (status = 500, description = "Internal server error") - ), -)] -fn DeleteAdminToken() {} - -#[utoipa::path(get, - path = "/v2/GetCurrentAdminTokenInfo", - tag = "Admin API token", - description = " -Return information about the calling admin API token. - ", - responses( - (status = 200, description = "Information about the admin token", body = GetCurrentAdminTokenInfoResponse), - (status = 500, description = "Internal server error") - ), -)] -fn GetCurrentAdminTokenInfo() {} - -// ********************************************** -// Layout operations -// ********************************************** - -#[utoipa::path(get, - path = "/v2/GetClusterLayout", - tag = "Cluster layout", - description = " -Returns the cluster's current layout, including: - -- Currently configured cluster layout -- Staged changes to the cluster layout - -*Capacity is given in bytes* - ", - responses( - (status = 200, description = "Current cluster layout", body = GetClusterLayoutResponse), - (status = 500, description = "Internal server error") - ), -)] -fn GetClusterLayout() {} - -#[utoipa::path(get, - path = "/v2/GetClusterLayoutHistory", - tag = "Cluster layout", - description = " -Returns the history of layouts in the cluster - ", - responses( - (status = 200, description = "Cluster layout history", body = GetClusterLayoutHistoryResponse), - (status = 500, description = "Internal server error") - ), -)] -fn GetClusterLayoutHistory() {} - -#[utoipa::path(post, - path = "/v2/UpdateClusterLayout", - tag = "Cluster layout", - description = " -Send modifications to the cluster layout. These modifications will be included in the staged role changes, visible in subsequent calls of `GET /GetClusterHealth`. Once the set of staged changes is satisfactory, the user may call `POST /ApplyClusterLayout` to apply the changed changes, or `POST /RevertClusterLayout` to clear all of the staged changes in the layout. - -Setting the capacity to `null` will configure the node as a gateway. -Otherwise, capacity must be now set in bytes (before Garage 0.9 it was arbitrary weights). -For example to declare 100GB, you must set `capacity: 100000000000`. - -Garage uses internally the International System of Units (SI), it assumes that 1kB = 1000 bytes, and displays storage as kB, MB, GB (and not KiB, MiB, GiB that assume 1KiB = 1024 bytes). - ", - request_body( - content=UpdateClusterLayoutRequestOpenapi, - description=" -To add a new node to the layout or to change the configuration of an existing node, simply set the values you want (`zone`, `capacity`, and `tags`). -To remove a node, simply pass the `remove: true` field. -This logic is represented in OpenAPI with a 'One Of' object. - -Contrary to the CLI that may update only a subset of the fields capacity, zone and tags, when calling this API all of these values must be specified. - " - ), - responses( - (status = 200, description = "Proposed changes have been added to the list of pending changes", body = UpdateClusterLayoutResponse), - (status = 500, description = "Internal server error") - ), -)] -fn UpdateClusterLayout() {} - -// Hack: we cannot use the UpdateClusterLayoutRequest from api.rs, -// as it contains (via NodeRoleChange) an untagged enum flattenned into -// a struct, which breaks the openapi generator. -// See issue #1249. -// Instead, we use a rewritten version of the NodeRoleChange struct where -// the struct fields are distributed into the enum variants (this is an equivalent -// representation, but this way we avoid having to rewrite all uses of the original -// struct in the Garage codebase). -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[schema(as = UpdateClusterLayoutRequest)] -pub struct UpdateClusterLayoutRequestOpenapi { - /// New node roles to assign or remove in the cluster layout - #[serde(default)] - pub roles: Vec, - /// New layout computation parameters to use - #[serde(default)] - pub parameters: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[schema(as = NodeRoleChangeRequest)] -#[serde(untagged)] -pub enum NodeRoleChangeOpenapi { - #[serde(rename_all = "camelCase")] - Remove { - /// ID of the node for which this change applies - id: String, - /// Set `remove` to `true` to remove the node from the layout - remove: bool, - }, - #[serde(rename_all = "camelCase")] - Update { - /// ID of the node for which this change applies - id: String, - #[serde(flatten)] - role: NodeAssignedRole, - }, -} - -#[utoipa::path(post, - path = "/v2/PreviewClusterLayoutChanges", - tag = "Cluster layout", - description = " -Computes a new layout taking into account the staged parameters, and returns it with detailed statistics. The new layout is not applied in the cluster. - -*Note: do not try to parse the `message` field of the response, it is given as an array of string specifically because its format is not stable.* - ", - responses( - (status = 200, description = "Information about the new layout", body = PreviewClusterLayoutChangesResponse), - (status = 500, description = "Internal server error") - ), -)] -fn PreviewClusterLayoutChanges() {} - -#[utoipa::path(post, - path = "/v2/ApplyClusterLayout", - tag = "Cluster layout", - description = " -Applies to the cluster the layout changes currently registered as staged layout changes. - -*Note: do not try to parse the `message` field of the response, it is given as an array of string specifically because its format is not stable.* - ", - request_body=ApplyClusterLayoutRequest, - responses( - (status = 200, description = "The updated cluster layout has been applied in the cluster", body = ApplyClusterLayoutResponse), - (status = 500, description = "Internal server error") - ), -)] -fn ApplyClusterLayout() {} - -#[utoipa::path(post, - path = "/v2/RevertClusterLayout", - tag = "Cluster layout", - description = "Clear staged layout changes", - responses( - (status = 200, description = "All pending changes to the cluster layout have been erased", body = RevertClusterLayoutResponse), - (status = 500, description = "Internal server error") - ), -)] -fn RevertClusterLayout() {} - -#[utoipa::path(post, - path = "/v2/ClusterLayoutSkipDeadNodes", - tag = "Cluster layout", - description = "Force progress in layout update trackers", - request_body = ClusterLayoutSkipDeadNodesRequest, - responses( - (status = 200, description = "Request has been taken into account", body = ClusterLayoutSkipDeadNodesResponse), - (status = 500, description = "Internal server error") - ), -)] -fn ClusterLayoutSkipDeadNodes() {} - -// ********************************************** -// Access key operations -// ********************************************** - -#[utoipa::path(get, - path = "/v2/ListKeys", - tag = "Access key", - description = "Returns all API access keys in the cluster.", - responses( - (status = 200, description = "Returns the key identifier (aka `AWS_ACCESS_KEY_ID`) and its associated, human friendly, name if any (otherwise return an empty string)", body = ListKeysResponse), - (status = 500, description = "Internal server error") - ), -)] -fn ListKeys() {} - -#[utoipa::path(get, - path = "/v2/GetKeyInfo", - tag = "Access key", - description = " -Return information about a specific key like its identifiers, its permissions and buckets on which it has permissions. -You can search by specifying the exact key identifier (`id`) or by specifying a pattern (`search`). - -For confidentiality reasons, the secret key is not returned by default: you must pass the `showSecretKey` query parameter to get it. - ", - params(GetKeyInfoRequest), - responses( - (status = 200, description = "Information about the access key", body = GetKeyInfoResponse), - (status = 500, description = "Internal server error") - ), -)] -fn GetKeyInfo() {} - -#[utoipa::path(post, - path = "/v2/CreateKey", - tag = "Access key", - description = "Creates a new API access key.", - request_body = CreateKeyRequest, - responses( - (status = 200, description = "Access key has been created", body = CreateKeyResponse), - (status = 500, description = "Internal server error") - ), -)] -fn CreateKey() {} - -#[utoipa::path(post, - path = "/v2/ImportKey", - tag = "Access key", - description = " -Imports an existing API key. This feature must only be used for migrations and backup restore. - -**Do not use it to generate custom key identifiers or you will break your Garage cluster.** - ", - request_body = ImportKeyRequest, - responses( - (status = 200, description = "Access key has been imported", body = ImportKeyResponse), - (status = 500, description = "Internal server error") - ), -)] -fn ImportKey() {} - -#[utoipa::path(post, - path = "/v2/UpdateKey", - tag = "Access key", - description = " -Updates information about the specified API access key. - -*Note: the secret key is not returned in the response, `null` is sent instead.* - ", - request_body = UpdateKeyRequestBody, - params(UpdateKeyRequest), - responses( - (status = 200, description = "Access key has been updated", body = UpdateKeyResponse), - (status = 500, description = "Internal server error") - ), -)] -fn UpdateKey() {} - -#[utoipa::path(post, - path = "/v2/DeleteKey", - tag = "Access key", - description = "Delete a key from the cluster. Its access will be removed from all the buckets. Buckets are not automatically deleted and can be dangling. You should manually delete them before. ", - params(DeleteKeyRequest), - responses( - (status = 200, description = "Access key has been deleted"), - (status = 500, description = "Internal server error") - ), -)] -fn DeleteKey() {} - -// ********************************************** -// Bucket operations -// ********************************************** - -#[utoipa::path(get, - path = "/v2/ListBuckets", - tag = "Bucket", - description = "List all the buckets on the cluster with their UUID and their global and local aliases.", - responses( - (status = 200, description = "Returns the UUID of all the buckets and all their aliases", body = ListBucketsResponse), - (status = 500, description = "Internal server error") - ), -)] -fn ListBuckets() {} - -#[utoipa::path(get, - path = "/v2/GetBucketInfo", - tag = "Bucket", - description = " -Given a bucket identifier (`id`) or a global alias (`alias`), get its information. -It includes its aliases, its web configuration, keys that have some permissions -on it, some statistics (number of objects, size), number of dangling multipart uploads, -and its quotas (if any). - ", - params(GetBucketInfoRequest), - responses( - (status = 200, description = "Returns exhaustive information about the bucket", body = GetBucketInfoResponse), - (status = 500, description = "Internal server error") - ), -)] -fn GetBucketInfo() {} - -#[utoipa::path(post, - path = "/v2/CreateBucket", - tag = "Bucket", - description = " -Creates a new bucket, either with a global alias, a local one, or no alias at all. -Technically, you can also specify both `globalAlias` and `localAlias` and that would create two aliases. - ", - request_body = CreateBucketRequest, - responses( - (status = 200, description = "Returns exhaustive information about the bucket", body = CreateBucketResponse), - (status = 500, description = "Internal server error") - ), -)] -fn CreateBucket() {} - -#[utoipa::path(post, - path = "/v2/UpdateBucket", - tag = "Bucket", - description = " -All fields (`websiteAccess` and `quotas`) are optional. -If they are present, the corresponding modifications are applied to the bucket, otherwise nothing is changed. - -In `websiteAccess`: if `enabled` is `true`, `indexDocument` must be specified. -The field `errorDocument` is optional, if no error document is set a generic -error message is displayed when errors happen. Conversely, if `enabled` is -`false`, neither `indexDocument` nor `errorDocument` must be specified. - -In `quotas`: new values of `maxSize` and `maxObjects` must both be specified, or set to `null` -to remove the quotas. An absent value will be considered the same as a `null`. It is not possible -to change only one of the two quotas. - ", - params(UpdateBucketRequest), - request_body = UpdateBucketRequestBody, - responses( - (status = 200, description = "Bucket has been updated", body = UpdateBucketResponse), - (status = 404, description = "Bucket not found"), - (status = 500, description = "Internal server error") - ), -)] -fn UpdateBucket() {} - -#[utoipa::path(post, - path = "/v2/DeleteBucket", - tag = "Bucket", - description = " -Deletes a storage bucket. A bucket cannot be deleted if it is not empty. - -**Warning:** this will delete all aliases associated with the bucket! - ", - params(DeleteBucketRequest), - responses( - (status = 200, description = "Bucket has been deleted"), - (status = 400, description = "Bucket is not empty"), - (status = 404, description = "Bucket not found"), - (status = 500, description = "Internal server error") - ), -)] -fn DeleteBucket() {} - -#[utoipa::path(post, - path = "/v2/CleanupIncompleteUploads", - tag = "Bucket", - description = "Removes all incomplete multipart uploads that are older than the specified number of seconds.", - request_body = CleanupIncompleteUploadsRequest, - responses( - (status = 200, description = "The bucket was cleaned up successfully", body = CleanupIncompleteUploadsResponse), - (status = 500, description = "Internal server error") - ), -)] -fn CleanupIncompleteUploads() {} - -#[utoipa::path(get, - path = "/v2/InspectObject", - tag = "Bucket", - description = " -Returns detailed information about an object in a bucket, including its internal state in Garage. - -This API call can be used to list the data blocks referenced by an object, -as well as to view metadata associated to the object. - -This call may return a list of more than one version for the object, for instance in the -case where there is a currently stored version of the object, and a newer version whose -upload is in progress and not yet finished. - ", - params(InspectObjectRequest), - responses( - (status = 200, description = "Returns exhaustive information about the object", body = InspectObjectResponse), - (status = 404, description = "Object not found"), - (status = 500, description = "Internal server error") - ), -)] -fn InspectObject() {} - -// ********************************************** -// Operations on permissions for keys on buckets -// ********************************************** - -#[utoipa::path(post, - path = "/v2/AllowBucketKey", - tag = "Permission", - description = " -⚠️ **DISCLAIMER**: Garage's developers are aware that this endpoint has an unconventional semantic. Be extra careful when implementing it, its behavior is not obvious. - -Allows a key to do read/write/owner operations on a bucket. - -Flags in permissions which have the value true will be activated. Other flags will remain unchanged (ie. they will keep their internal value). - -For example, if you set read to true, the key will be allowed to read the bucket. -If you set it to false, the key will keeps its previous read permission. -If you want to disallow read for the key, check the DenyBucketKey operation. - ", - request_body = AllowBucketKeyRequest, - responses( - (status = 200, description = "Returns exhaustive information about the bucket", body = AllowBucketKeyResponse), - (status = 500, description = "Internal server error") - ), -)] -fn AllowBucketKey() {} - -#[utoipa::path(post, - path = "/v2/DenyBucketKey", - tag = "Permission", - description = " -⚠️ **DISCLAIMER**: Garage's developers are aware that this endpoint has an unconventional semantic. Be extra careful when implementing it, its behavior is not obvious. - -Denies a key from doing read/write/owner operations on a bucket. - -Flags in permissions which have the value true will be deactivated. Other flags will remain unchanged. - -For example, if you set read to true, the key will be denied from reading. -If you set read to false, the key will keep its previous permissions. -If you want the key to have the reading permission, check the AllowBucketKey operation. - ", - request_body = DenyBucketKeyRequest, - responses( - (status = 200, description = "Returns exhaustive information about the bucket", body = DenyBucketKeyResponse), - (status = 500, description = "Internal server error") - ), -)] -fn DenyBucketKey() {} - -// ********************************************** -// Operations on bucket aliases -// ********************************************** - -#[utoipa::path(post, - path = "/v2/AddBucketAlias", - tag = "Bucket alias", - description = "Add an alias for the target bucket. This can be either a global or a local alias, depending on which fields are specified.", - request_body = BucketAliasEnumOpenapi, - responses( - (status = 200, description = "Returns exhaustive information about the bucket", body = AddBucketAliasResponse), - (status = 500, description = "Internal server error") - ), -)] -fn AddBucketAlias() {} - -#[utoipa::path(post, - path = "/v2/RemoveBucketAlias", - tag = "Bucket alias", - description = "Remove an alias for the target bucket. This can be either a global or a local alias, depending on which fields are specified.", - request_body = BucketAliasEnumOpenapi, - responses( - (status = 200, description = "Returns exhaustive information about the bucket", body = RemoveBucketAliasResponse), - (status = 500, description = "Internal server error") - ), -)] -fn RemoveBucketAlias() {} - -// Hack for issue #1249 (see UpdateClusterLayout) -#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] -#[serde(untagged)] -#[schema(as = BucketAliasEnum)] -pub enum BucketAliasEnumOpenapi { - #[serde(rename_all = "camelCase")] - Global { - bucket_id: String, - global_alias: String, - }, - #[serde(rename_all = "camelCase")] - Local { - bucket_id: String, - local_alias: String, - access_key_id: String, - }, -} - -// ********************************************** -// Node operations -// ********************************************** - -#[utoipa::path(get, - path = "/v2/GetNodeInfo", - tag = "Node", - description = " -Return information about the Garage daemon running on one or several nodes. - ", - params(MultiRequestQueryParams), - responses( - (status = 200, description = "Responses from individual cluster nodes", body = MultiResponse), - (status = 500, description = "Internal server error") - ), -)] -fn GetNodeInfo() {} - -#[utoipa::path(get, - path = "/v2/GetNodeStatistics", - tag = "Node", - description = " -Fetch statistics for one or several Garage nodes. - -*Note: do not try to parse the `freeform` field of the response, it is given as a string specifically because its format is not stable.* - ", - params(MultiRequestQueryParams), - responses( - (status = 200, description = "Responses from individual cluster nodes", body = MultiResponse), - (status = 500, description = "Internal server error") - ), -)] -fn GetNodeStatistics() {} - -#[utoipa::path(post, - path = "/v2/CreateMetadataSnapshot", - tag = "Node", - description = " -Instruct one or several nodes to take a snapshot of their metadata databases. - ", - params(MultiRequestQueryParams), - responses( - (status = 200, description = "Responses from individual cluster nodes", body = MultiResponse), - (status = 500, description = "Internal server error") - ), -)] -fn CreateMetadataSnapshot() {} - -#[utoipa::path(post, - path = "/v2/LaunchRepairOperation", - tag = "Node", - description = " -Launch a repair operation on one or several cluster nodes. - ", - params(MultiRequestQueryParams), - request_body = LocalLaunchRepairOperationRequest, - responses( - (status = 200, description = "Responses from individual cluster nodes", body = MultiResponse), - (status = 500, description = "Internal server error") - ), -)] -fn LaunchRepairOperation() {} - -// ********************************************** -// Worker operations -// ********************************************** - -#[utoipa::path(post, - path = "/v2/ListWorkers", - tag = "Worker", - description = " -List background workers currently running on one or several cluster nodes. - ", - params(MultiRequestQueryParams), - request_body = LocalListWorkersRequest, - responses( - (status = 200, description = "Responses from individual cluster nodes", body = MultiResponse), - (status = 500, description = "Internal server error") - ), -)] -fn ListWorkers() {} - -#[utoipa::path(post, - path = "/v2/GetWorkerInfo", - tag = "Worker", - description = " -Get information about the specified background worker on one or several cluster nodes. - ", - params(MultiRequestQueryParams), - request_body = LocalGetWorkerInfoRequest, - responses( - (status = 200, description = "Responses from individual cluster nodes", body = MultiResponse), - (status = 500, description = "Internal server error") - ), -)] -fn GetWorkerInfo() {} - -#[utoipa::path(post, - path = "/v2/GetWorkerVariable", - tag = "Worker", - description = " -Fetch values of one or several worker variables, from one or several cluster nodes. - ", - params(MultiRequestQueryParams), - request_body = LocalGetWorkerVariableRequest, - responses( - (status = 200, description = "Responses from individual cluster nodes", body = MultiResponse), - (status = 500, description = "Internal server error") - ), -)] -fn GetWorkerVariable() {} - -#[utoipa::path(post, - path = "/v2/SetWorkerVariable", - tag = "Worker", - description = " -Set the value for a worker variable, on one or several cluster nodes. - ", - params(MultiRequestQueryParams), - request_body = LocalSetWorkerVariableRequest, - responses( - (status = 200, description = "Responses from individual cluster nodes", body = MultiResponse), - (status = 500, description = "Internal server error") - ), -)] -fn SetWorkerVariable() {} - -// ********************************************** -// Block operations -// ********************************************** - -#[utoipa::path(get, - path = "/v2/ListBlockErrors", - tag = "Block", - description = " -List data blocks that are currently in an errored state on one or several Garage nodes. - ", - params(MultiRequestQueryParams), - responses( - (status = 200, description = "Responses from individual cluster nodes", body = MultiResponse), - (status = 500, description = "Internal server error") - ), -)] -fn ListBlockErrors() {} - -#[utoipa::path(post, - path = "/v2/GetBlockInfo", - tag = "Block", - description = " -Get detailed information about a data block stored on a Garage node, including all object versions and in-progress multipart uploads that contain a reference to this block. - ", - params(MultiRequestQueryParams), - request_body = LocalGetBlockInfoRequest, - responses( - (status = 200, description = "Detailed block information", body = MultiResponse), - (status = 500, description = "Internal server error") - ), -)] -fn GetBlockInfo() {} - -#[utoipa::path(post, - path = "/v2/RetryBlockResync", - tag = "Block", - description = " -Instruct Garage node(s) to retry the resynchronization of one or several missing data block(s). - ", - params(MultiRequestQueryParams), - request_body = LocalRetryBlockResyncRequest, - responses( - (status = 200, description = "Responses from individual cluster nodes", body = MultiResponse), - (status = 500, description = "Internal server error") - ), -)] -fn RetryBlockResync() {} - -#[utoipa::path(post, - path = "/v2/PurgeBlocks", - tag = "Block", - description = " -Purge references to one or several missing data blocks. - -This will remove all objects and in-progress multipart uploads that contain the specified data block(s). The objects will be permanently deleted from the buckets in which they appear. Use with caution. - ", - params(MultiRequestQueryParams), - request_body = LocalPurgeBlocksRequest, - responses( - (status = 200, description = "Responses from individual cluster nodes", body = MultiResponse), - (status = 500, description = "Internal server error") - ), -)] -fn PurgeBlocks() {} - -// ********************************************** -// ********************************************** -// ********************************************** - -struct SecurityAddon; - -impl Modify for SecurityAddon { - fn modify(&self, openapi: &mut utoipa::openapi::OpenApi) { - use utoipa::openapi::security::*; - let components = openapi.components.as_mut().unwrap(); // we can unwrap safely since there already is components registered. - components.add_security_scheme( - "bearerAuth", - SecurityScheme::Http(Http::builder().scheme(HttpAuthScheme::Bearer).build()), - ); - } -} - -#[derive(OpenApi)] -#[openapi( - info( - version = "v2.3.0", - title = "Garage administration API", - description = "Administrate your Garage cluster programmatically, including status, layout, keys, buckets, and maintenance tasks. - -*Disclaimer: This API may change in future Garage versions. Read the changelog and upgrade your scripts before upgrading. Additionally, this specification is early stage and can contain bugs, so be careful and please report any issues on our issue tracker.*", - contact( - name = "The Garage team", - email = "garagehq@deuxfleurs.fr", - url = "https://garagehq.deuxfleurs.fr/", - ), - ), - modifiers(&SecurityAddon), - security(("bearerAuth" = [])), - paths( - // Special ops - Metrics, - Health, - CheckDomain, - // Cluster operations - GetClusterHealth, - GetClusterStatus, - GetClusterStatistics, - ConnectClusterNodes, - // Admin token operations - ListAdminTokens, - GetAdminTokenInfo, - CreateAdminToken, - UpdateAdminToken, - DeleteAdminToken, - GetCurrentAdminTokenInfo, - // Layout operations - GetClusterLayout, - GetClusterLayoutHistory, - UpdateClusterLayout, - PreviewClusterLayoutChanges, - ApplyClusterLayout, - RevertClusterLayout, - ClusterLayoutSkipDeadNodes, - // Key operations - ListKeys, - GetKeyInfo, - CreateKey, - ImportKey, - UpdateKey, - DeleteKey, - // Bucket operations - ListBuckets, - GetBucketInfo, - CreateBucket, - UpdateBucket, - DeleteBucket, - CleanupIncompleteUploads, - InspectObject, - // Operations on permissions - AllowBucketKey, - DenyBucketKey, - // Operations on aliases - AddBucketAlias, - RemoveBucketAlias, - // Node operations - GetNodeInfo, - GetNodeStatistics, - CreateMetadataSnapshot, - LaunchRepairOperation, - // Worker operations - ListWorkers, - GetWorkerInfo, - GetWorkerVariable, - SetWorkerVariable, - // Block operations - ListBlockErrors, - GetBlockInfo, - RetryBlockResync, - PurgeBlocks, - ), - servers( - (url = "http://localhost:3903/", description = "A local server") - ), -)] -pub struct ApiDoc; diff --git a/src/api/admin/router_v0.rs b/src/api/admin/router_v0.rs index 89390666..9dd742ba 100644 --- a/src/api/admin/router_v0.rs +++ b/src/api/admin/router_v0.rs @@ -77,7 +77,7 @@ pub enum Endpoint { impl Endpoint { /// Determine which S3 endpoint a request is for using the request, and a bucket which was /// possibly extracted from the Host header. - /// Returns Self plus bucket name, if endpoint is not `Endpoint::ListBuckets` + /// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets pub fn from_request(req: &Request) -> Result { let uri = req.uri(); let path = uri.path(); @@ -124,7 +124,7 @@ impl Endpoint { ]); if let Some(message) = query.nonempty_message() { - debug!("Unused query parameter: {}", message); + debug!("Unused query parameter: {}", message) } Ok(res) diff --git a/src/api/admin/router_v1.rs b/src/api/admin/router_v1.rs index 79e4fac4..0b4901ea 100644 --- a/src/api/admin/router_v1.rs +++ b/src/api/admin/router_v1.rs @@ -7,6 +7,12 @@ use garage_api_common::router_macros::*; use crate::error::*; use crate::router_v0; +pub enum Authorization { + None, + MetricsToken, + AdminToken, +} + router_match! {@func /// List of all Admin API endpoints. @@ -79,7 +85,7 @@ pub enum Endpoint { impl Endpoint { /// Determine which S3 endpoint a request is for using the request, and a bucket which was /// possibly extracted from the Host header. - /// Returns Self plus bucket name, if endpoint is not `Endpoint::ListBuckets` + /// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets pub fn from_request(req: &Request) -> Result { let uri = req.uri(); let path = uri.path(); @@ -126,7 +132,7 @@ impl Endpoint { ]); if let Some(message) = query.nonempty_message() { - debug!("Unused query parameter: {}", message); + debug!("Unused query parameter: {}", message) } Ok(res) @@ -205,6 +211,15 @@ impl Endpoint { ))), } } + /// Get the kind of authorization which is required to perform the operation. + pub fn authorization_type(&self) -> Authorization { + match self { + Self::Health => Authorization::None, + Self::CheckDomain => Authorization::None, + Self::Metrics => Authorization::MetricsToken, + _ => Authorization::AdminToken, + } + } } generateQueryParameters! { diff --git a/src/api/admin/router_v2.rs b/src/api/admin/router_v2.rs deleted file mode 100644 index 3a9a567d..00000000 --- a/src/api/admin/router_v2.rs +++ /dev/null @@ -1,276 +0,0 @@ -use std::borrow::Cow; - -use hyper::body::Incoming as IncomingBody; -use hyper::{Method, Request}; -use paste::paste; - -use garage_api_common::helpers::*; -use garage_api_common::router_macros::*; - -use crate::api::*; -use crate::error::*; -use crate::router_v1; -use crate::Authorization; - -impl AdminApiRequest { - /// Determine which S3 endpoint a request is for using the request, and a bucket which was - /// possibly extracted from the Host header. - /// Returns Self plus bucket name, if endpoint is not `Endpoint::ListBuckets` - pub async fn from_request(req: Request) -> Result { - let uri = req.uri().clone(); - let path = uri.path(); - let query = uri.query(); - - let method = req.method().clone(); - - let mut query = QueryParameters::from_query(query.unwrap_or_default())?; - - let res = router_match!(@gen_path_parser_v2 (&method, path, "/v2/", query, req) [ - @special OPTIONS _ => Options (), - @special GET "/check" => CheckDomain (query::domain), - @special GET "/health" => Health (), - @special GET "/metrics" => Metrics (), - // Cluster endpoints - GET GetClusterStatus (), - GET GetClusterHealth (), - POST ConnectClusterNodes (body), - // Admin token endpoints - GET ListAdminTokens (), - GET GetAdminTokenInfo (query_opt::id, query_opt::search), - POST CreateAdminToken (body), - POST UpdateAdminToken (body_field, query::id), - POST DeleteAdminToken (query::id), - GET GetCurrentAdminTokenInfo (admin_token), - // Layout endpoints - GET GetClusterLayout (), - GET GetClusterLayoutHistory (), - POST UpdateClusterLayout (body), - POST PreviewClusterLayoutChanges (), - POST ApplyClusterLayout (body), - POST RevertClusterLayout (), - POST ClusterLayoutSkipDeadNodes (body), - // API key endpoints - GET GetKeyInfo (query_opt::id, query_opt::search, parse_default(false)::show_secret_key), - POST UpdateKey (body_field, query::id), - POST CreateKey (body), - POST ImportKey (body), - POST DeleteKey (query::id), - GET ListKeys (), - // Bucket endpoints - GET GetBucketInfo (query_opt::id, query_opt::global_alias, query_opt::search), - GET ListBuckets (), - POST CreateBucket (body), - POST DeleteBucket (query::id), - POST UpdateBucket (body_field, query::id), - POST CleanupIncompleteUploads (body), - GET InspectObject (query::bucket_id, query::key), - // Bucket-key permissions - POST AllowBucketKey (body), - POST DenyBucketKey (body), - // Bucket aliases - POST AddBucketAlias (body), - POST RemoveBucketAlias (body), - // Node APIs - GET GetNodeInfo (default::body, query::node), - POST CreateMetadataSnapshot (default::body, query::node), - GET GetNodeStatistics (default::body, query::node), - GET GetClusterStatistics (), - POST LaunchRepairOperation (body_field, query::node), - // Worker APIs - POST ListWorkers (body_field, query::node), - POST GetWorkerInfo (body_field, query::node), - POST GetWorkerVariable (body_field, query::node), - POST SetWorkerVariable (body_field, query::node), - // Block APIs - GET ListBlockErrors (default::body, query::node), - POST GetBlockInfo (body_field, query::node), - POST RetryBlockResync (body_field, query::node), - POST PurgeBlocks (body_field, query::node), - ]); - - if let Some(message) = query.nonempty_message() { - debug!("Unused query parameter: {}", message); - } - - Ok(res) - } - - /// Some endpoints work exactly the same in their v2/ version as they did in their v1/ version. - /// For these endpoints, we can convert a v1/ call to its equivalent as if it was made using - /// its v2/ URL. - pub async fn from_v1( - v1_endpoint: router_v1::Endpoint, - req: Request, - ) -> Result { - use router_v1::Endpoint; - - match v1_endpoint { - // GetClusterStatus semantics changed: - // info about local node is no longer returned - Endpoint::GetClusterHealth => { - Ok(AdminApiRequest::GetClusterHealth(GetClusterHealthRequest)) - } - Endpoint::ConnectClusterNodes => { - let req = parse_json_body::(req).await?; - Ok(AdminApiRequest::ConnectClusterNodes(req)) - } - - // Layout - Endpoint::GetClusterLayout => { - Ok(AdminApiRequest::GetClusterLayout(GetClusterLayoutRequest)) - } - // UpdateClusterLayout semantics changed - Endpoint::ApplyClusterLayout => { - let param = parse_json_body::(req).await?; - Ok(AdminApiRequest::ApplyClusterLayout(param)) - } - Endpoint::RevertClusterLayout => Ok(AdminApiRequest::RevertClusterLayout( - RevertClusterLayoutRequest, - )), - - // Keys - Endpoint::ListKeys => Ok(AdminApiRequest::ListKeys(ListKeysRequest)), - Endpoint::GetKeyInfo { - id, - search, - show_secret_key, - } => { - let show_secret_key = show_secret_key.map(|x| x == "true").unwrap_or(false); - Ok(AdminApiRequest::GetKeyInfo(GetKeyInfoRequest { - id, - search, - show_secret_key, - })) - } - Endpoint::CreateKey => { - let req = parse_json_body::(req).await?; - Ok(AdminApiRequest::CreateKey(req)) - } - Endpoint::ImportKey => { - let req = parse_json_body::(req).await?; - Ok(AdminApiRequest::ImportKey(req)) - } - Endpoint::UpdateKey { id } => { - let body = parse_json_body::(req).await?; - Ok(AdminApiRequest::UpdateKey(UpdateKeyRequest { id, body })) - } - - // DeleteKey semantics changed: - // - in v1/ : HTTP DELETE => HTTP 204 No Content - // - in v2/ : HTTP POST => HTTP 200 Ok - // Endpoint::DeleteKey { id } => Ok(AdminApiRequest::DeleteKey(DeleteKeyRequest { id })), - - // Buckets - Endpoint::ListBuckets => Ok(AdminApiRequest::ListBuckets(ListBucketsRequest)), - Endpoint::GetBucketInfo { id, global_alias } => { - Ok(AdminApiRequest::GetBucketInfo(GetBucketInfoRequest { - id, - global_alias, - search: None, - })) - } - Endpoint::CreateBucket => { - let req = parse_json_body::(req).await?; - Ok(AdminApiRequest::CreateBucket(req)) - } - - // DeleteBucket semantics changed:: - // - in v1/ : HTTP DELETE => HTTP 204 No Content - // - in v2/ : HTTP POST => HTTP 200 Ok - // Endpoint::DeleteBucket { id } => { - // Ok(AdminApiRequest::DeleteBucket(DeleteBucketRequest { id })) - // } - Endpoint::UpdateBucket { id } => { - let body = parse_json_body::(req).await?; - Ok(AdminApiRequest::UpdateBucket(UpdateBucketRequest { - id, - body, - })) - } - - // Bucket-key permissions - Endpoint::BucketAllowKey => { - let req = parse_json_body::(req).await?; - Ok(AdminApiRequest::AllowBucketKey(AllowBucketKeyRequest(req))) - } - Endpoint::BucketDenyKey => { - let req = parse_json_body::(req).await?; - Ok(AdminApiRequest::DenyBucketKey(DenyBucketKeyRequest(req))) - } - // Bucket aliasing - Endpoint::GlobalAliasBucket { id, alias } => { - Ok(AdminApiRequest::AddBucketAlias(AddBucketAliasRequest { - bucket_id: id, - alias: BucketAliasEnum::Global { - global_alias: alias, - }, - })) - } - Endpoint::GlobalUnaliasBucket { id, alias } => Ok(AdminApiRequest::RemoveBucketAlias( - RemoveBucketAliasRequest { - bucket_id: id, - alias: BucketAliasEnum::Global { - global_alias: alias, - }, - }, - )), - Endpoint::LocalAliasBucket { - id, - access_key_id, - alias, - } => Ok(AdminApiRequest::AddBucketAlias(AddBucketAliasRequest { - bucket_id: id, - alias: BucketAliasEnum::Local { - local_alias: alias, - access_key_id, - }, - })), - Endpoint::LocalUnaliasBucket { - id, - access_key_id, - alias, - } => Ok(AdminApiRequest::RemoveBucketAlias( - RemoveBucketAliasRequest { - bucket_id: id, - alias: BucketAliasEnum::Local { - local_alias: alias, - access_key_id, - }, - }, - )), - - // For endpoints that have different body content syntax, issue - // deprecation warning - _ => Err(Error::bad_request(format!( - "v1/ endpoint is no longer supported: {}", - v1_endpoint.name() - ))), - } - } - - /// Get the kind of authorization which is required to perform the operation. - pub fn authorization_type(&self) -> Authorization { - match self { - Self::Options(_) | Self::Health(_) | Self::CheckDomain(_) => Authorization::None, - Self::Metrics(_) => Authorization::MetricsToken, - _ => Authorization::AdminToken, - } - } -} - -generateQueryParameters! { - keywords: [], - fields: [ - "node" => node, - "domain" => domain, - "format" => format, - "id" => id, - "search" => search, - "globalAlias" => global_alias, - "alias" => alias, - "accessKeyId" => access_key_id, - "showSecretKey" => show_secret_key, - "bucketId" => bucket_id, - "key" => key - ] -} diff --git a/src/api/admin/special.rs b/src/api/admin/special.rs deleted file mode 100644 index 0a4e6705..00000000 --- a/src/api/admin/special.rs +++ /dev/null @@ -1,173 +0,0 @@ -use std::sync::Arc; - -use http::header::{ - ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW, -}; -use hyper::{Response, StatusCode}; - -#[cfg(feature = "metrics")] -use prometheus::{Encoder, TextEncoder}; - -use garage_model::garage::Garage; -use garage_rpc::system::ClusterHealthStatus; - -use garage_api_common::helpers::*; - -use crate::api::{CheckDomainRequest, HealthRequest, MetricsRequest, OptionsRequest}; -use crate::api_server::ResBody; -use crate::error::*; -use crate::{Admin, RequestHandler}; - -impl RequestHandler for OptionsRequest { - type Response = Response; - - async fn handle( - self, - _garage: &Arc, - _admin: &Admin, - ) -> Result, Error> { - Ok(Response::builder() - .status(StatusCode::OK) - .header(ALLOW, "OPTIONS,GET,POST") - .header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS,GET,POST") - .header(ACCESS_CONTROL_ALLOW_HEADERS, "authorization,content-type") - .header(ACCESS_CONTROL_ALLOW_ORIGIN, "*") - .body(empty_body())?) - } -} - -impl RequestHandler for MetricsRequest { - type Response = Response; - - async fn handle( - self, - _garage: &Arc, - admin: &Admin, - ) -> Result, Error> { - #[cfg(feature = "metrics")] - { - use opentelemetry::trace::Tracer; - - let mut buffer = vec![]; - let encoder = TextEncoder::new(); - - let tracer = opentelemetry::global::tracer("garage"); - let metric_families = tracer.in_span("admin/gather_metrics", |_| { - admin.exporter.registry().gather() - }); - - encoder - .encode(&metric_families, &mut buffer) - .ok_or_internal_error("Could not serialize metrics")?; - - Ok(Response::builder() - .status(StatusCode::OK) - .header(http::header::CONTENT_TYPE, encoder.format_type()) - .body(bytes_body(buffer.into()))?) - } - #[cfg(not(feature = "metrics"))] - Err(Error::bad_request( - "Garage was built without the metrics feature".to_string(), - )) - } -} - -impl RequestHandler for HealthRequest { - type Response = Response; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result, Error> { - let health = garage.system.health(); - - let (status, status_str) = match health.status { - ClusterHealthStatus::Healthy => (StatusCode::OK, "Garage is fully operational"), - ClusterHealthStatus::Degraded => ( - StatusCode::OK, - "Garage is operational but some storage nodes are unavailable", - ), - ClusterHealthStatus::Unavailable => ( - StatusCode::SERVICE_UNAVAILABLE, - "Quorum is not available for some/all partitions, reads and writes will fail", - ), - }; - let status_str = format!( - "{}\nConsult the full health check API endpoint at /v2/GetClusterHealth for more details\n", - status_str - ); - - Ok(Response::builder() - .status(status) - .header(http::header::CONTENT_TYPE, "text/plain") - .body(string_body(status_str))?) - } -} - -impl RequestHandler for CheckDomainRequest { - type Response = Response; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result, Error> { - if check_domain(garage, &self.domain).await? { - Ok(Response::builder() - .status(StatusCode::OK) - .body(string_body(format!( - "Domain '{}' is managed by Garage", - self.domain - )))?) - } else { - Err(Error::bad_request(format!( - "Domain '{}' is not managed by Garage", - self.domain - ))) - } - } -} - -async fn check_domain(garage: &Arc, domain: &str) -> Result { - // Resolve bucket from domain name, inferring if the website must be activated for the - // domain to be valid. - let (bucket_name, must_check_website) = if let Some(bname) = garage - .config - .s3_api - .root_domain - .as_ref() - .and_then(|rd| host_to_bucket(domain, rd)) - { - (bname.to_string(), false) - } else if let Some(bname) = garage - .config - .s3_web - .as_ref() - .and_then(|sw| host_to_bucket(domain, sw.root_domain.as_str())) - { - (bname.to_string(), true) - } else { - (domain.to_string(), true) - }; - - let bucket = match garage - .bucket_helper() - .resolve_global_bucket_fast(&bucket_name)? - { - Some(b) => b, - None => return Ok(false), - }; - - if !must_check_website { - return Ok(true); - } - - let bucket_state = bucket.state.as_option().unwrap(); - let bucket_website_config = bucket_state.website_config.get(); - - match bucket_website_config { - Some(_v) => Ok(true), - None => Ok(false), - } -} diff --git a/src/api/admin/worker.rs b/src/api/admin/worker.rs deleted file mode 100644 index b3f4537b..00000000 --- a/src/api/admin/worker.rs +++ /dev/null @@ -1,118 +0,0 @@ -use std::collections::HashMap; -use std::sync::Arc; - -use garage_util::background::*; -use garage_util::time::now_msec; - -use garage_model::garage::Garage; - -use crate::api::*; -use crate::error::Error; -use crate::{Admin, RequestHandler}; - -impl RequestHandler for LocalListWorkersRequest { - type Response = LocalListWorkersResponse; - - async fn handle( - self, - _garage: &Arc, - admin: &Admin, - ) -> Result { - let workers = admin.background.get_worker_info(); - let info = workers - .into_iter() - .filter(|(_, w)| { - (!self.busy_only - || matches!(w.state, WorkerState::Busy | WorkerState::Throttled(_))) - && (!self.error_only || w.errors > 0) - }) - .map(|(id, w)| worker_info_to_api(id as u64, w)) - .collect::>(); - Ok(LocalListWorkersResponse(info)) - } -} - -impl RequestHandler for LocalGetWorkerInfoRequest { - type Response = LocalGetWorkerInfoResponse; - - async fn handle( - self, - _garage: &Arc, - admin: &Admin, - ) -> Result { - let info = admin - .background - .get_worker_info() - .get(&(self.id as usize)) - .ok_or(Error::NoSuchWorker(self.id))? - .clone(); - Ok(LocalGetWorkerInfoResponse(worker_info_to_api( - self.id, info, - ))) - } -} - -impl RequestHandler for LocalGetWorkerVariableRequest { - type Response = LocalGetWorkerVariableResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - let mut res = HashMap::new(); - if let Some(k) = self.variable { - res.insert(k.clone(), garage.bg_vars.get(&k)?); - } else { - let vars = garage.bg_vars.get_all(); - for (k, v) in vars.iter() { - res.insert(k.to_string(), v.to_string()); - } - } - Ok(LocalGetWorkerVariableResponse(res)) - } -} - -impl RequestHandler for LocalSetWorkerVariableRequest { - type Response = LocalSetWorkerVariableResponse; - - async fn handle( - self, - garage: &Arc, - _admin: &Admin, - ) -> Result { - garage.bg_vars.set(&self.variable, &self.value)?; - - Ok(LocalSetWorkerVariableResponse { - variable: self.variable, - value: self.value, - }) - } -} - -// ---- helper functions ---- - -fn worker_info_to_api(id: u64, info: WorkerInfo) -> WorkerInfoResp { - WorkerInfoResp { - id, - name: info.name, - state: match info.state { - WorkerState::Busy => WorkerStateResp::Busy, - WorkerState::Throttled(t) => WorkerStateResp::Throttled { duration_secs: t }, - WorkerState::Idle => WorkerStateResp::Idle, - WorkerState::Done => WorkerStateResp::Done, - }, - errors: info.errors as u64, - consecutive_errors: info.consecutive_errors as u64, - last_error: info.last_error.map(|(message, t)| WorkerLastError { - message, - secs_ago: now_msec().saturating_sub(t) / 1000, - }), - - tranquility: info.status.tranquility, - progress: info.status.progress, - queue_length: info.status.queue_length, - persistent_errors: info.status.persistent_errors, - freeform: info.status.freeform, - } -} diff --git a/src/api/common/Cargo.toml b/src/api/common/Cargo.toml index 88d4f50a..df01d59a 100644 --- a/src/api/common/Cargo.toml +++ b/src/api/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_api_common" -version = "2.3.0" +version = "1.3.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -21,13 +21,13 @@ garage_util.workspace = true base64.workspace = true bytes.workspace = true chrono.workspace = true -crc-fast.workspace = true +crc32fast.workspace = true +crc32c.workspace = true crypto-common.workspace = true thiserror.workspace = true hex.workspace = true hmac.workspace = true md-5.workspace = true -percent-encoding.workspace = true tracing.workspace = true nom.workspace = true pin-project.workspace = true @@ -42,12 +42,7 @@ hyper = { workspace = true, default-features = false, features = ["server", "htt hyper-util.workspace = true url.workspace = true -quick-xml.workspace = true serde.workspace = true serde_json.workspace = true -utoipa.workspace = true opentelemetry.workspace = true - -[lints] -workspace = true diff --git a/src/api/common/common_error.rs b/src/api/common/common_error.rs index aa4c36fa..e596a6e9 100644 --- a/src/api/common/common_error.rs +++ b/src/api/common/common_error.rs @@ -36,10 +36,6 @@ pub enum CommonError { #[error("Invalid header value: {0}")] InvalidHeader(#[from] hyper::header::ToStrError), - /// The client sent a request for an action not supported by garage - #[error("Unimplemented action: {0}")] - NotImplemented(String), - // ---- SPECIFIC ERROR CONDITIONS ---- // These have to be error codes referenced in the S3 spec here: // https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList @@ -59,10 +55,6 @@ pub enum CommonError { /// Bucket name is not valid according to AWS S3 specs #[error("Invalid bucket name: {0}")] InvalidBucketName(String), - - /// Tried to create bucket that is already owned by you - #[error("Bucket already owned by you")] - BucketAlreadyOwnedByYou, } #[macro_export] @@ -105,11 +97,8 @@ impl CommonError { } CommonError::BadRequest(_) => StatusCode::BAD_REQUEST, CommonError::Forbidden(_) => StatusCode::FORBIDDEN, - CommonError::NotImplemented(_) => StatusCode::NOT_IMPLEMENTED, CommonError::NoSuchBucket(_) => StatusCode::NOT_FOUND, - CommonError::BucketNotEmpty - | CommonError::BucketAlreadyExists - | CommonError::BucketAlreadyOwnedByYou => StatusCode::CONFLICT, + CommonError::BucketNotEmpty | CommonError::BucketAlreadyExists => StatusCode::CONFLICT, CommonError::InvalidBucketName(_) | CommonError::InvalidHeader(_) => { StatusCode::BAD_REQUEST } @@ -131,8 +120,6 @@ impl CommonError { CommonError::BucketNotEmpty => "BucketNotEmpty", CommonError::InvalidBucketName(_) => "InvalidBucketName", CommonError::InvalidHeader(_) => "InvalidHeaderValue", - CommonError::BucketAlreadyOwnedByYou => "BucketAlreadyOwnedByYou", - CommonError::NotImplemented(_) => "NotImplemented", } } @@ -155,15 +142,15 @@ impl TryFrom for CommonError { } } -/// This function converts `HelperErrors` into `CommonErrors`, -/// for variants that exist in `CommonError`. -/// This is used for helper functions that might return `InvalidBucketName` -/// or `NoSuchBucket` for instance, and we want to pass that error +/// This function converts HelperErrors into CommonErrors, +/// for variants that exist in CommonError. +/// This is used for helper functions that might return InvalidBucketName +/// or NoSuchBucket for instance, and we want to pass that error /// up to our caller. pub fn pass_helper_error(err: HelperError) -> CommonError { match CommonError::try_from(err) { Ok(e) => e, - Err(e) => panic!("Helper error `{}` should hot have happened here", e), + Err(e) => panic!("Helper error `{}` should hot have happenned here", e), } } diff --git a/src/api/common/cors.rs b/src/api/common/cors.rs index 6f524bf4..09b55c13 100644 --- a/src/api/common/cors.rs +++ b/src/api/common/cors.rs @@ -9,7 +9,9 @@ use hyper::{body::Body, body::Incoming as IncomingBody, Request, Response, Statu use garage_model::bucket_table::{BucketParams, CorsRule as GarageCorsRule}; use garage_model::garage::Garage; -use crate::common_error::{CommonError, OkOrBadRequest, OkOrInternalError}; +use crate::common_error::{ + helper_error_as_internal, CommonError, OkOrBadRequest, OkOrInternalError, +}; use crate::helpers::*; pub fn find_matching_cors_rule<'a, B>( @@ -74,7 +76,7 @@ pub fn add_cors_headers( Ok(()) } -pub fn handle_options_api( +pub async fn handle_options_api( garage: Arc, req: &Request, bucket_name: Option, @@ -88,11 +90,19 @@ pub fn handle_options_api( // the same name, its CORS rules won't be applied // and will be shadowed by the rules of the globally // existing bucket (but this is inevitable because - // OPTIONS calls are not authenticated). + // OPTIONS calls are not auhtenticated). if let Some(bn) = bucket_name { let helper = garage.bucket_helper(); - let bucket_opt = helper.resolve_global_bucket_fast(&bn)?; - if let Some(bucket) = bucket_opt { + let bucket_id = helper + .resolve_global_bucket_name(&bn) + .await + .map_err(helper_error_as_internal)?; + if let Some(id) = bucket_id { + let bucket = garage + .bucket_helper() + .get_existing_bucket(id) + .await + .map_err(helper_error_as_internal)?; let bucket_params = bucket.state.into_option().unwrap(); handle_options_for_bucket(req, &bucket_params) } else { diff --git a/src/api/common/encoding.rs b/src/api/common/encoding.rs index 4ff33ecc..e286a784 100644 --- a/src/api/common/encoding.rs +++ b/src/api/common/encoding.rs @@ -1,7 +1,5 @@ //! Module containing various helpers for encoding -use std::fmt::Write as _; - /// Encode &str for use in a URI pub fn uri_encode(string: &str, encode_slash: bool) -> String { let mut result = String::with_capacity(string.len() * 2); @@ -11,98 +9,14 @@ pub fn uri_encode(string: &str, encode_slash: bool) -> String { '/' if encode_slash => result.push_str("%2F"), '/' if !encode_slash => result.push('/'), _ => { - let mut buf = [0_u8; 4]; - let str = c.encode_utf8(&mut buf); - for b in str.bytes() { - write!(&mut result, "%{:02X}", b).unwrap(); - } + result.push_str( + &format!("{}", c) + .bytes() + .map(|b| format!("%{:02X}", b)) + .collect::(), + ); } } } result } - -#[cfg(test)] -mod tests { - use crate::encoding::uri_encode; - - #[test] - fn test_uri_encode() { - let url1_encoded = uri_encode( - "https://garagehq.deuxfleurs.fr/documentation/reference-manual/features/", - true, - ); - assert_eq!( - &url1_encoded, - "https%3A%2F%2Fgaragehq.deuxfleurs.fr%2Fdocumentation%2Freference-manual%2Ffeatures%2F" - ); - - let url2_encoded = uri_encode( - "https://garagehq.deuxfleurs.fr/blog/2025-06-garage-v2/", - true, - ); - assert_eq!( - &url2_encoded, - "https%3A%2F%2Fgaragehq.deuxfleurs.fr%2Fblog%2F2025-06-garage-v2%2F" - ); - - let url3_encoded = uri_encode( - "https://garagehq.deuxfleurs.fr/blog/2025-06-hé_les_gens/", - true, - ); - assert_eq!( - &url3_encoded, - "https%3A%2F%2Fgaragehq.deuxfleurs.fr%2Fblog%2F2025-06-h%C3%A9_les_gens%2F" - ); - - let url4_encoded = uri_encode("/home/local user/Documents/personnel/à_blog.md", true); - assert_eq!( - &url4_encoded, - "%2Fhome%2Flocal%20user%2FDocuments%2Fpersonnel%2F%C3%A0_blog.md" - ); - } - - #[test] - fn test_uri_encode_without_slash() { - let url1_encoded = uri_encode( - "https://garagehq.deuxfleurs.fr/documentation/reference-manual/features/", - false, - ); - assert_eq!( - &url1_encoded, - "https%3A//garagehq.deuxfleurs.fr/documentation/reference-manual/features/" - ); - - let url2_encoded = uri_encode( - "https://garagehq.deuxfleurs.fr/blog/2025-06-garage-v2/", - false, - ); - assert_eq!( - &url2_encoded, - "https%3A//garagehq.deuxfleurs.fr/blog/2025-06-garage-v2/" - ); - - let url3_encoded = uri_encode( - "https://garagehq.deuxfleurs.fr/blog/2025-06-hé_les_gens/", - false, - ); - assert_eq!( - &url3_encoded, - "https%3A//garagehq.deuxfleurs.fr/blog/2025-06-h%C3%A9_les_gens/" - ); - let url4_encoded = uri_encode("/home/local user/Documents/personnel/à_blog.md", false); - assert_eq!( - &url4_encoded, - "/home/local%20user/Documents/personnel/%C3%A0_blog.md" - ); - } - - #[test] - fn test_uri_encode_most_than_double_size() { - let url_encoded = uri_encode("/home/ùàé ç/çaèù/à_êô.md", true); - assert_eq!( - &url_encoded, - "%2Fhome%2F%C3%B9%C3%A0%C3%A9%20%C3%A7%2F%C3%A7a%C3%A8%C3%B9%2F%C3%A0_%C3%AA%C3%B4.md" - ); - } -} diff --git a/src/api/common/generic_server.rs b/src/api/common/generic_server.rs index 8a09d647..3f14c07d 100644 --- a/src/api/common/generic_server.rs +++ b/src/api/common/generic_server.rs @@ -1,4 +1,3 @@ -use std::borrow::Cow; use std::convert::Infallible; use std::fs::{self, Permissions}; use std::os::unix::fs::PermissionsExt; @@ -36,7 +35,7 @@ use garage_util::socket_address::UnixOrTCPSocketAddress; use crate::helpers::{BoxBody, ErrorBody}; pub trait ApiEndpoint: Send + Sync + 'static { - fn name(&self) -> Cow<'static, str>; + fn name(&self) -> &'static str; fn add_span_attributes(&self, span: SpanRef<'_>); } @@ -125,7 +124,7 @@ impl ApiServer { } UnixOrTCPSocketAddress::UnixSocket(ref path) => { if path.exists() { - fs::remove_file(path)?; + fs::remove_file(path)? } let listener = UnixListener::bind(path)?; @@ -154,7 +153,7 @@ impl ApiServer { { format!("{forwarded_for_ip_addr} (via {addr})") } else { - addr + format!("{addr}") }; // we only do this to log the access key, so we can discard any error let key = self @@ -162,14 +161,7 @@ impl ApiServer { .key_id_from_request(&req) .map(|k| format!("(key {k}) ")) .unwrap_or_default(); - - let method = req.method().clone(); - if A::API_NAME == "admin" && (uri.path() == "/health" || uri.path() == "/metrics") { - debug!("{source} {key}{method} {uri}"); - } else { - info!("{source} {key}{method} {uri}"); - } - + info!("{source} {key}{} {uri}", req.method()); debug!("{:?}", req); let tracer = opentelemetry::global::tracer("garage"); @@ -197,23 +189,15 @@ impl ApiServer { let mut http_error_builder = Response::builder().status(e.http_status_code()); if let Some(header_map) = http_error_builder.headers_mut() { - e.add_http_headers(header_map); + e.add_http_headers(header_map) } let http_error = http_error_builder.body(body)?; if e.http_status_code().is_server_error() { - warn!( - "error {}, {} in response to {source} {key}{method} {uri}", - e.http_status_code(), - e - ); + warn!("Response: error {}, {}", e.http_status_code(), e); } else { - info!( - "error {}, {} in response to {source} {key}{method} {uri}", - e.http_status_code(), - e - ); + info!("Response: error {}, {}", e.http_status_code(), e); } Ok(http_error .map(|body| BoxBody::new(body.map_err(|_: Infallible| unreachable!())))) diff --git a/src/api/common/lib.rs b/src/api/common/lib.rs index 219f3624..0e655a53 100644 --- a/src/api/common/lib.rs +++ b/src/api/common/lib.rs @@ -10,4 +10,3 @@ pub mod generic_server; pub mod helpers; pub mod router_macros; pub mod signature; -pub mod xml; diff --git a/src/api/common/router_macros.rs b/src/api/common/router_macros.rs index 531dbad8..d9fe86db 100644 --- a/src/api/common/router_macros.rs +++ b/src/api/common/router_macros.rs @@ -45,83 +45,6 @@ macro_rules! router_match { } } }}; - (@gen_path_parser_v2 ($method:expr, $reqpath:expr, $pathprefix:literal, $query:expr, $req:expr) - [ - $(@special $spec_meth:ident $spec_path:pat => $spec_api:ident $spec_params:tt,)* - $($meth:ident $api:ident $params:tt,)* - ]) => {{ - { - #[allow(unused_parens)] - match ($method, $reqpath) { - $( - (&Method::$spec_meth, $spec_path) => AdminApiRequest::$spec_api ( - router_match!(@@gen_parse_request $spec_api, $spec_params, $query, $req) - ), - )* - $( - (&Method::$meth, concat!($pathprefix, stringify!($api))) - => AdminApiRequest::$api ( - router_match!(@@gen_parse_request $api, $params, $query, $req) - ), - )* - (m, p) => { - return Err(Error::bad_request(format!( - "Unknown API endpoint: {} {}", - m, p - ))) - } - } - } - }}; - (@@gen_parse_request $api:ident, (), $query: expr, $req:expr) => {{ - paste!( - [< $api Request >] - ) - }}; - (@@gen_parse_request $api:ident, (body), $query: expr, $req:expr) => {{ - paste!({ - parse_json_body::< [<$api Request>], _, Error>($req).await? - }) - }}; - (@@gen_parse_request $api:ident, (admin_token), $query: expr, $req:expr) => {{ - paste!({ - let auth_header = $req.headers() - .get(hyper::header::AUTHORIZATION) - .ok_or_else(|| Error::bad_request("Missing Authorization header"))? - .to_str() - .map_err(|_| Error::bad_request("Invalid Authorization header"))?; - - let admin_token = auth_header.strip_prefix("Bearer ") - .ok_or_else(|| Error::bad_request("Authorization header must be Bearer token"))? - .to_string(); - - [< $api Request >] { admin_token } - }) - }}; - (@@gen_parse_request $api:ident, (body_field, $($conv:ident $(($conv_arg:expr))? :: $param:ident),*), $query: expr, $req:expr) - => - {{ - paste!({ - let body = parse_json_body::< [<$api RequestBody>], _, Error>($req).await?; - [< $api Request >] { - body, - $( - $param: router_match!(@@parse_param $query, $conv $(($conv_arg))?, $param), - )+ - } - }) - }}; - (@@gen_parse_request $api:ident, ($($conv:ident $(($conv_arg:expr))? :: $param:ident),*), $query: expr, $req:expr) - => - {{ - paste!({ - [< $api Request >] { - $( - $param: router_match!(@@parse_param $query, $conv $(($conv_arg))?, $param), - )+ - } - }) - }}; (@gen_parser ($keyword:expr, $key:ident, $query:expr, $header:expr), key: [$($kw_k:ident $(if $required_k:ident)? $(header $header_k:expr)? => $api_k:ident $(($($conv_k:ident :: $param_k:ident),*))?,)*], no_key: [$($kw_nk:ident $(if $required_nk:ident)? $(if_header $header_nk:expr)? => $api_nk:ident $(($($conv_nk:ident :: $param_nk:ident),*))?,)*]) => {{ @@ -156,19 +79,13 @@ macro_rules! router_match { } }}; - (@@parse_param $query:expr, default, $param:ident) => {{ - Default::default() - }}; (@@parse_param $query:expr, query_opt, $param:ident) => {{ // extract optional query parameter $query.$param.take().map(|param| param.into_owned()) }}; (@@parse_param $query:expr, query, $param:ident) => {{ - // extract mandatory query parameter - $query.$param.take() - .ok_or_bad_request( - format!("Missing argument `{}` for endpoint", stringify!($param)) - )?.into_owned() + // extract mendatory query parameter + $query.$param.take().ok_or_bad_request("Missing argument for endpoint")?.into_owned() }}; (@@parse_param $query:expr, opt_parse, $param:ident) => {{ // extract and parse optional query parameter @@ -182,22 +99,10 @@ macro_rules! router_match { (@@parse_param $query:expr, parse, $param:ident) => {{ // extract and parse mandatory query parameter // both missing and un-parseable parameters are reported as errors - $query.$param.take() - .ok_or_bad_request( - format!("Missing argument `{}` for endpoint", stringify!($param)) - )? + $query.$param.take().ok_or_bad_request("Missing argument for endpoint")? .parse() .map_err(|_| Error::bad_request("Failed to parse query parameter"))? }}; - (@@parse_param $query:expr, parse_default($default:expr), $param:ident) => {{ - // extract and parse optional query parameter - // using provided value as default if parameter is missing - $query.$param.take().map(|x| x - .parse() - .map_err(|_| Error::bad_request("Failed to parse query parameter"))) - .transpose()? - .unwrap_or($default) - }}; (@func $(#[$doc:meta])* pub enum Endpoint { @@ -282,7 +187,6 @@ macro_rules! generateQueryParameters { }, )* $( - // FIXME: remove if !v.is_empty() ? $f_param => if !v.is_empty() { if res.$f_name.replace(v).is_some() { return Err(Error::bad_request(format!( diff --git a/src/api/common/signature/body.rs b/src/api/common/signature/body.rs index 0bace5ef..96be0d5b 100644 --- a/src/api/common/signature/body.rs +++ b/src/api/common/signature/body.rs @@ -89,7 +89,7 @@ impl ReqBody { checksummer }) .await - .unwrap(); + .unwrap() } Err(frame) => { let trailers = frame.into_trailers().unwrap(); diff --git a/src/api/common/signature/checksum.rs b/src/api/common/signature/checksum.rs index 20f4bd7e..3c5e7c53 100644 --- a/src/api/common/signature/checksum.rs +++ b/src/api/common/signature/checksum.rs @@ -1,7 +1,9 @@ -use std::convert::TryInto; +use std::convert::{TryFrom, TryInto}; +use std::hash::Hasher; use base64::prelude::*; -use crc_fast::{CrcAlgorithm, Digest as CrcDigest}; +use crc32c::Crc32cHasher as Crc32c; +use crc32fast::Hasher as Crc32; use md5::{Digest, Md5}; use sha1::Sha1; use sha2::Sha256; @@ -11,7 +13,6 @@ use http::{HeaderMap, HeaderName, HeaderValue}; use garage_util::data::*; use super::*; -use crate::common_error::CommonError; pub use garage_model::s3::object_table::{ChecksumAlgorithm, ChecksumValue}; @@ -20,40 +21,17 @@ pub const CONTENT_MD5: HeaderName = HeaderName::from_static("content-md5"); pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName = HeaderName::from_static("x-amz-checksum-algorithm"); pub const X_AMZ_CHECKSUM_MODE: HeaderName = HeaderName::from_static("x-amz-checksum-mode"); -pub const X_AMZ_CHECKSUM_TYPE: HeaderName = HeaderName::from_static("x-amz-checksum-type"); pub const X_AMZ_CHECKSUM_CRC32: HeaderName = HeaderName::from_static("x-amz-checksum-crc32"); pub const X_AMZ_CHECKSUM_CRC32C: HeaderName = HeaderName::from_static("x-amz-checksum-crc32c"); -pub const X_AMZ_CHECKSUM_CRC64NVME: HeaderName = - HeaderName::from_static("x-amz-checksum-crc64nvme"); pub const X_AMZ_CHECKSUM_SHA1: HeaderName = HeaderName::from_static("x-amz-checksum-sha1"); pub const X_AMZ_CHECKSUM_SHA256: HeaderName = HeaderName::from_static("x-amz-checksum-sha256"); -// Values for x-amz-checksum-type -pub const COMPOSITE: &str = "COMPOSITE"; -pub const FULL_OBJECT: &str = "FULL_OBJECT"; - pub type Crc32Checksum = [u8; 4]; pub type Crc32cChecksum = [u8; 4]; -pub type Crc64NvmeChecksum = [u8; 8]; pub type Md5Checksum = [u8; 16]; pub type Sha1Checksum = [u8; 20]; pub type Sha256Checksum = [u8; 32]; -// -- MAP OF CRC ALGORITHMS : -// CRC32 -> CrcAlgorithm::Crc32IsoHdlc -// CRC32C -> CrcAlgorithm::Crc32Iscsi -// CRC64NVME -> CrcAlgorithm::Crc64Nvme - -pub fn new_crc32() -> CrcDigest { - CrcDigest::new(CrcAlgorithm::Crc32IsoHdlc) -} -pub fn new_crc32c() -> CrcDigest { - CrcDigest::new(CrcAlgorithm::Crc32Iscsi) -} -pub fn new_crc64nvme() -> CrcDigest { - CrcDigest::new(CrcAlgorithm::Crc64Nvme) -} - #[derive(Debug, Default, Clone)] pub struct ExpectedChecksums { // base64-encoded md5 (content-md5 header) @@ -64,11 +42,9 @@ pub struct ExpectedChecksums { pub extra: Option, } -#[derive(Default)] pub struct Checksummer { - pub crc32: Option, - pub crc32c: Option, - pub crc64nvme: Option, + pub crc32: Option, + pub crc32c: Option, pub md5: Option, pub sha1: Option, pub sha256: Option, @@ -78,7 +54,6 @@ pub struct Checksummer { pub struct Checksums { pub crc32: Option, pub crc32c: Option, - pub crc64nvme: Option, pub md5: Option, pub sha1: Option, pub sha256: Option, @@ -86,7 +61,13 @@ pub struct Checksums { impl Checksummer { pub fn new() -> Self { - Default::default() + Self { + crc32: None, + crc32c: None, + md5: None, + sha1: None, + sha256: None, + } } pub fn init(expected: &ExpectedChecksums, add_md5: bool) -> Self { @@ -110,29 +91,23 @@ impl Checksummer { self.sha256 = Some(Sha256::new()); } if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) { - self.crc32 = Some(new_crc32()); + self.crc32 = Some(Crc32::new()); } if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) { - self.crc32c = Some(new_crc32c()); - } - if matches!(&expected.extra, Some(ChecksumValue::Crc64Nvme(_))) { - self.crc64nvme = Some(new_crc64nvme()); + self.crc32c = Some(Crc32c::default()); } if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) { self.sha1 = Some(Sha1::new()); } } - pub fn add_algorithm(mut self, algo: Option) -> Self { + pub fn add(mut self, algo: Option) -> Self { match algo { Some(ChecksumAlgorithm::Crc32) => { - self.crc32 = Some(new_crc32()); + self.crc32 = Some(Crc32::new()); } Some(ChecksumAlgorithm::Crc32c) => { - self.crc32c = Some(new_crc32c()); - } - Some(ChecksumAlgorithm::Crc64Nvme) => { - self.crc64nvme = Some(new_crc64nvme()); + self.crc32c = Some(Crc32c::default()); } Some(ChecksumAlgorithm::Sha1) => { self.sha1 = Some(Sha1::new()); @@ -150,10 +125,7 @@ impl Checksummer { crc32.update(bytes); } if let Some(crc32c) = &mut self.crc32c { - crc32c.update(bytes); - } - if let Some(crc64nvme) = &mut self.crc64nvme { - crc64nvme.update(bytes); + crc32c.write(bytes); } if let Some(md5) = &mut self.md5 { md5.update(bytes); @@ -168,9 +140,10 @@ impl Checksummer { pub fn finalize(self) -> Checksums { Checksums { - crc32: self.crc32.map(|x| u32::to_be_bytes(x.finalize() as u32)), - crc32c: self.crc32c.map(|x| u32::to_be_bytes(x.finalize() as u32)), - crc64nvme: self.crc64nvme.map(|x| u64::to_be_bytes(x.finalize())), + crc32: self.crc32.map(|x| u32::to_be_bytes(x.finalize())), + crc32c: self + .crc32c + .map(|x| u32::to_be_bytes(u32::try_from(x.finish()).unwrap())), md5: self.md5.map(|x| x.finalize()[..].try_into().unwrap()), sha1: self.sha1.map(|x| x.finalize()[..].try_into().unwrap()), sha256: self.sha256.map(|x| x.finalize()[..].try_into().unwrap()), @@ -182,7 +155,7 @@ impl Checksums { pub fn verify(&self, expected: &ExpectedChecksums) -> Result<(), Error> { if let Some(expected_md5) = &expected.md5 { match self.md5 { - Some(md5) if BASE64_STANDARD.encode(md5) == expected_md5.trim_matches('"') => (), + Some(md5) if BASE64_STANDARD.encode(&md5) == expected_md5.trim_matches('"') => (), _ => { return Err(Error::InvalidDigest( "MD5 checksum verification failed (from content-md5)".into(), @@ -202,56 +175,24 @@ impl Checksums { } if let Some(extra) = expected.extra { let algo = extra.algorithm(); - let calculated = self.extract(Some(algo))?; - if calculated != Some(extra) { + if self.extract(Some(algo)) != Some(extra) { return Err(Error::InvalidDigest(format!( - "Failed to validate checksum for algorithm {:?}: calculated {:?}, expected {:?}", - algo, calculated, extra + "Failed to validate checksum for algorithm {:?}", + algo ))); } } Ok(()) } - pub fn extract(&self, algo: Option) -> Result, Error> { - Ok(match algo { + pub fn extract(&self, algo: Option) -> Option { + match algo { None => None, - Some(ChecksumAlgorithm::Crc32) => { - Some(ChecksumValue::Crc32(self.crc32.ok_or_else(|| { - CommonError::BadRequest( - "Requested checksum verification without providing checksum".to_string(), - ) - })?)) - } - Some(ChecksumAlgorithm::Crc32c) => { - Some(ChecksumValue::Crc32c(self.crc32c.ok_or_else(|| { - CommonError::BadRequest( - "Requested checksum verification without providing checksum".to_string(), - ) - })?)) - } - Some(ChecksumAlgorithm::Crc64Nvme) => Some(ChecksumValue::Crc64Nvme( - self.crc64nvme.ok_or_else(|| { - CommonError::BadRequest( - "Requested checksum verification without providing checksum".to_string(), - ) - })?, - )), - Some(ChecksumAlgorithm::Sha1) => { - Some(ChecksumValue::Sha1(self.sha1.ok_or_else(|| { - CommonError::BadRequest( - "Requested checksum verification without providing checksum".to_string(), - ) - })?)) - } - Some(ChecksumAlgorithm::Sha256) => { - Some(ChecksumValue::Sha256(self.sha256.ok_or_else(|| { - CommonError::BadRequest( - "Requested checksum verification without providing checksum".to_string(), - ) - })?)) - } - }) + Some(ChecksumAlgorithm::Crc32) => Some(ChecksumValue::Crc32(self.crc32.unwrap())), + Some(ChecksumAlgorithm::Crc32c) => Some(ChecksumValue::Crc32c(self.crc32c.unwrap())), + Some(ChecksumAlgorithm::Sha1) => Some(ChecksumValue::Sha1(self.sha1.unwrap())), + Some(ChecksumAlgorithm::Sha256) => Some(ChecksumValue::Sha256(self.sha256.unwrap())), + } } } @@ -261,7 +202,6 @@ pub fn parse_checksum_algorithm(algo: &str) -> Result match algo { "CRC32" => Ok(ChecksumAlgorithm::Crc32), "CRC32C" => Ok(ChecksumAlgorithm::Crc32c), - "CRC64NVME" => Ok(ChecksumAlgorithm::Crc64Nvme), "SHA1" => Ok(ChecksumAlgorithm::Sha1), "SHA256" => Ok(ChecksumAlgorithm::Sha256), _ => Err(Error::bad_request("invalid checksum algorithm")), @@ -285,7 +225,6 @@ pub fn request_trailer_checksum_algorithm( None => Ok(None), Some(x) if x == X_AMZ_CHECKSUM_CRC32 => Ok(Some(ChecksumAlgorithm::Crc32)), Some(x) if x == X_AMZ_CHECKSUM_CRC32C => Ok(Some(ChecksumAlgorithm::Crc32c)), - Some(x) if x == X_AMZ_CHECKSUM_CRC64NVME => Ok(Some(ChecksumAlgorithm::Crc64Nvme)), Some(x) if x == X_AMZ_CHECKSUM_SHA1 => Ok(Some(ChecksumAlgorithm::Sha1)), Some(x) if x == X_AMZ_CHECKSUM_SHA256 => Ok(Some(ChecksumAlgorithm::Sha256)), _ => Err(Error::bad_request("invalid checksum algorithm")), @@ -304,12 +243,6 @@ pub fn request_checksum_value( if headers.contains_key(X_AMZ_CHECKSUM_CRC32C) { ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Crc32c)?); } - if headers.contains_key(X_AMZ_CHECKSUM_CRC64NVME) { - ret.push(extract_checksum_value( - headers, - ChecksumAlgorithm::Crc64Nvme, - )?); - } if headers.contains_key(X_AMZ_CHECKSUM_SHA1) { ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Sha1)?); } @@ -335,7 +268,7 @@ pub fn extract_checksum_value( ChecksumAlgorithm::Crc32 => { let crc32 = headers .get(X_AMZ_CHECKSUM_CRC32) - .and_then(|x| BASE64_STANDARD.decode(x).ok()) + .and_then(|x| BASE64_STANDARD.decode(&x).ok()) .and_then(|x| x.try_into().ok()) .ok_or_bad_request("invalid x-amz-checksum-crc32 header")?; Ok(ChecksumValue::Crc32(crc32)) @@ -343,23 +276,15 @@ pub fn extract_checksum_value( ChecksumAlgorithm::Crc32c => { let crc32c = headers .get(X_AMZ_CHECKSUM_CRC32C) - .and_then(|x| BASE64_STANDARD.decode(x).ok()) + .and_then(|x| BASE64_STANDARD.decode(&x).ok()) .and_then(|x| x.try_into().ok()) .ok_or_bad_request("invalid x-amz-checksum-crc32c header")?; Ok(ChecksumValue::Crc32c(crc32c)) } - ChecksumAlgorithm::Crc64Nvme => { - let crc64nvme = headers - .get(X_AMZ_CHECKSUM_CRC64NVME) - .and_then(|x| BASE64_STANDARD.decode(x).ok()) - .and_then(|x| x.try_into().ok()) - .ok_or_bad_request("invalid x-amz-checksum-crc64nvme header")?; - Ok(ChecksumValue::Crc64Nvme(crc64nvme)) - } ChecksumAlgorithm::Sha1 => { let sha1 = headers .get(X_AMZ_CHECKSUM_SHA1) - .and_then(|x| BASE64_STANDARD.decode(x).ok()) + .and_then(|x| BASE64_STANDARD.decode(&x).ok()) .and_then(|x| x.try_into().ok()) .ok_or_bad_request("invalid x-amz-checksum-sha1 header")?; Ok(ChecksumValue::Sha1(sha1)) @@ -367,7 +292,7 @@ pub fn extract_checksum_value( ChecksumAlgorithm::Sha256 => { let sha256 = headers .get(X_AMZ_CHECKSUM_SHA256) - .and_then(|x| BASE64_STANDARD.decode(x).ok()) + .and_then(|x| BASE64_STANDARD.decode(&x).ok()) .and_then(|x| x.try_into().ok()) .ok_or_bad_request("invalid x-amz-checksum-sha256 header")?; Ok(ChecksumValue::Sha256(sha256)) @@ -381,19 +306,16 @@ pub fn add_checksum_response_headers( ) -> http::response::Builder { match checksum { Some(ChecksumValue::Crc32(crc32)) => { - resp = resp.header(X_AMZ_CHECKSUM_CRC32, BASE64_STANDARD.encode(crc32)); + resp = resp.header(X_AMZ_CHECKSUM_CRC32, BASE64_STANDARD.encode(&crc32)); } Some(ChecksumValue::Crc32c(crc32c)) => { - resp = resp.header(X_AMZ_CHECKSUM_CRC32C, BASE64_STANDARD.encode(crc32c)); - } - Some(ChecksumValue::Crc64Nvme(crc64nvme)) => { - resp = resp.header(X_AMZ_CHECKSUM_CRC64NVME, BASE64_STANDARD.encode(crc64nvme)); + resp = resp.header(X_AMZ_CHECKSUM_CRC32C, BASE64_STANDARD.encode(&crc32c)); } Some(ChecksumValue::Sha1(sha1)) => { - resp = resp.header(X_AMZ_CHECKSUM_SHA1, BASE64_STANDARD.encode(sha1)); + resp = resp.header(X_AMZ_CHECKSUM_SHA1, BASE64_STANDARD.encode(&sha1)); } Some(ChecksumValue::Sha256(sha256)) => { - resp = resp.header(X_AMZ_CHECKSUM_SHA256, BASE64_STANDARD.encode(sha256)); + resp = resp.header(X_AMZ_CHECKSUM_SHA256, BASE64_STANDARD.encode(&sha256)); } None => (), } diff --git a/src/api/common/signature/error.rs b/src/api/common/signature/error.rs index 8d86f042..a1b353e1 100644 --- a/src/api/common/signature/error.rs +++ b/src/api/common/signature/error.rs @@ -11,13 +11,8 @@ pub enum Error { Common(CommonError), /// Authorization Header Malformed - #[error( - "Authorization header malformed, unexpected scope: '{unexpected}', expected: '{expected}'" - )] - AuthorizationHeaderMalformed { - unexpected: String, - expected: String, - }, + #[error("Authorization header malformed, unexpected scope: {0}")] + AuthorizationHeaderMalformed(String), // Category: bad request /// The request contained an invalid UTF-8 sequence in its path or in other parameters diff --git a/src/api/common/signature/mod.rs b/src/api/common/signature/mod.rs index bae63d1b..50fbd304 100644 --- a/src/api/common/signature/mod.rs +++ b/src/api/common/signature/mod.rs @@ -64,12 +64,12 @@ pub struct VerifiedRequest { pub content_sha256_header: ContentSha256Header, } -pub fn verify_request( +pub async fn verify_request( garage: &Garage, mut req: Request, service: &'static str, ) -> Result { - let checked_signature = payload::check_payload_signature(garage, &mut req, service)?; + let checked_signature = payload::check_payload_signature(&garage, &mut req, service).await?; let request = streaming::parse_streaming_body( req, diff --git a/src/api/common/signature/payload.rs b/src/api/common/signature/payload.rs index 532fa04b..3939da19 100644 --- a/src/api/common/signature/payload.rs +++ b/src/api/common/signature/payload.rs @@ -9,7 +9,6 @@ use sha2::{Digest, Sha256}; use garage_table::*; use garage_util::data::Hash; -use garage_util::time::now_msec; use garage_model::garage::Garage; use garage_model::key_table::*; @@ -33,7 +32,7 @@ pub struct CheckedSignature { pub signature_header: Option, } -pub fn check_payload_signature( +pub async fn check_payload_signature( garage: &Garage, request: &mut Request, service: &'static str, @@ -44,9 +43,9 @@ pub fn check_payload_signature( // We check for presigned-URL-style authentication first, because // the browser or something else could inject an Authorization header // that is totally unrelated to AWS signatures. - check_presigned_signature(garage, service, request, query) + check_presigned_signature(garage, service, request, query).await } else if request.headers().contains_key(AUTHORIZATION) { - check_standard_signature(garage, service, request, query) + check_standard_signature(garage, service, request, query).await } else { // Unsigned (anonymous) request let content_sha256 = request @@ -81,7 +80,7 @@ fn parse_x_amz_content_sha256(header: Option<&str>) -> Result { return Err(Error::bad_request( "invalid or unsupported x-amz-content-sha256", - )); + )) } }; Ok(ContentSha256Header::StreamingPayload { trailer, signed }) @@ -94,7 +93,7 @@ fn parse_x_amz_content_sha256(header: Option<&str>) -> Result, @@ -129,7 +128,7 @@ fn check_standard_signature( trace!("canonical request:\n{}", canonical_request); trace!("string to sign:\n{}", string_to_sign); - let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes())?; + let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?; let content_sha256_header = parse_x_amz_content_sha256(Some(&authorization.content_sha256))?; @@ -140,7 +139,7 @@ fn check_standard_signature( }) } -fn check_presigned_signature( +async fn check_presigned_signature( garage: &Garage, service: &'static str, request: &mut Request, @@ -179,7 +178,7 @@ fn check_presigned_signature( trace!("canonical request (presigned url):\n{}", canonical_request); trace!("string to sign (presigned url):\n{}", string_to_sign); - let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes())?; + let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?; // In the page on presigned URLs, AWS specifies that if a signed query // parameter and a signed header of the same name have different values, @@ -187,7 +186,7 @@ fn check_presigned_signature( let headers_mut = request.headers_mut(); for (name, value) in query.iter() { if let Some(existing) = headers_mut.get(name) { - if signed_headers.contains(name) && existing.as_bytes() != value.value.as_bytes() { + if signed_headers.contains(&name) && existing.as_bytes() != value.value.as_bytes() { return Err(Error::bad_request(format!( "Conflicting values for `{}` in query parameters and request headers", name @@ -269,24 +268,20 @@ fn verify_signed_headers(headers: &HeaderMap, signed_headers: &[HeaderName]) -> return Err(Error::bad_request("Header `Host` should be signed")); } for (name, _) in headers.iter() { - // Enforce signature of some headers - if header_should_be_signed(name) && !signed_headers.contains(name) { - return Err(Error::bad_request(format!( - "Header `{}` should be signed", - name - ))); + // Enforce signature of all x-amz-* headers, except x-amz-content-sh256 + // because it is included in the canonical request in all cases + if name.as_str().starts_with("x-amz-") && name != X_AMZ_CONTENT_SHA256 { + if !signed_headers.contains(name) { + return Err(Error::bad_request(format!( + "Header `{}` should be signed", + name + ))); + } } } Ok(()) } -// Indicates whether a header is required to be signed -fn header_should_be_signed(name: &HeaderName) -> bool { - // Enforce signature of all x-amz-* headers, except x-amz-content-sh256 - // because it is included in the canonical request in all cases - name.as_str().starts_with("x-amz-") && name != X_AMZ_CONTENT_SHA256 -} - pub fn string_to_sign(datetime: &DateTime, scope_string: &str, canonical_req: &str) -> String { let mut hasher = Sha256::default(); hasher.update(canonical_req.as_bytes()); @@ -340,18 +335,14 @@ pub fn canonical_request( let canonical_uri: std::borrow::Cow = if service != "s3" { uri_encode(canonical_uri, false).into() } else { - //TODO: decode is already do for construct Api::EndPoint, should be better to be able to keep it instead of compute it again. - let key = percent_encoding::percent_decode_str(canonical_uri) - .decode_utf8() - .unwrap(); - uri_encode(&key, false).into() + canonical_uri.into() }; // Canonical query string from passed HeaderMap let canonical_query_string = { let mut items = Vec::with_capacity(query.len()); for (_, QueryValue { key, value }) in query.iter() { - items.push(uri_encode(key, true) + "=" + &uri_encode(value, true)); + items.push(uri_encode(&key, true) + "=" + &uri_encode(&value, true)); } items.sort(); items.join("&") @@ -361,18 +352,11 @@ pub fn canonical_request( let canonical_header_string = signed_headers .iter() .map(|name| { - let all_values = headers.get_all(name); - let mut iter_values = all_values.iter(); - let base_value = iter_values - .next() + let value = headers + .get(name) .ok_or_bad_request(format!("signed header `{}` is not present", name))?; - let mut built_string = std::str::from_utf8(base_value.as_bytes())?.to_string(); - for extend_value in iter_values { - let extend_string = std::str::from_utf8(extend_value.as_bytes())?; - built_string.push(','); - built_string.push_str(extend_string); - } - Ok(format!("{}:{}", name.as_str(), built_string.trim())) + let value = std::str::from_utf8(value.as_bytes())?; + Ok(format!("{}:{}", name.as_str(), value.trim())) }) .collect::, Error>>()? .join("\n"); @@ -396,7 +380,7 @@ pub fn parse_date(date: &str) -> Result, Error> { Ok(Utc.from_utc_datetime(&date)) } -pub fn verify_v4( +pub async fn verify_v4( garage: &Garage, service: &str, auth: &Authorization, @@ -404,26 +388,17 @@ pub fn verify_v4( ) -> Result { let scope_expected = compute_scope(&auth.date, &garage.config.s3_api.s3_region, service); if auth.scope != scope_expected { - return Err(Error::AuthorizationHeaderMalformed { - unexpected: auth.scope.to_string(), - expected: scope_expected, - }); + return Err(Error::AuthorizationHeaderMalformed(auth.scope.to_string())); } let key = garage .key_table - .get_local(&EmptyKey, &auth.key_id)? + .get(&EmptyKey, &auth.key_id) + .await? .filter(|k| !k.state.is_deleted()) .ok_or_else(|| Error::forbidden(format!("No such key: {}", &auth.key_id)))?; let key_p = key.params().unwrap(); - if key_p.is_expired(now_msec()) { - return Err(Error::forbidden(format!( - "Access key {} has expired", - key.key_id - ))); - } - let mut hmac = signing_hmac( &auth.date, &key_p.secret_key, diff --git a/src/api/common/signature/streaming.rs b/src/api/common/signature/streaming.rs index 5d2652fd..64362727 100644 --- a/src/api/common/signature/streaming.rs +++ b/src/api/common/signature/streaming.rs @@ -1,4 +1,3 @@ -use std::iter::FromIterator; use std::pin::Pin; use std::sync::Mutex; @@ -6,7 +5,7 @@ use chrono::{DateTime, NaiveDateTime, TimeZone, Utc}; use futures::prelude::*; use futures::task; use hmac::Mac; -use http::header::{Entry, HeaderMap, HeaderValue, CONTENT_ENCODING}; +use http::header::{HeaderMap, HeaderValue, CONTENT_ENCODING}; use hyper::body::{Bytes, Frame, Incoming as IncomingBody}; use hyper::Request; @@ -43,52 +42,15 @@ pub fn parse_streaming_body( // Remove the aws-chunked component in the content-encoding: header // Note: this header is not properly sent by minio client, so don't fail // if it is absent from the request. - let mut original_content_encoding = vec![]; - if let Entry::Occupied(content_encoding) = req.headers_mut().entry(CONTENT_ENCODING) { - // 1. collect headers - let (_, vals) = content_encoding.remove_entry_mult(); - original_content_encoding = Vec::from_iter(vals); - } - let mut header_initialized = false; - let mut chunked_found = false; - for enc_val in original_content_encoding.iter() { - // 2. clean each header value and reinject it. - let mut rebuilt_val = vec![]; - for part in enc_val.as_bytes().split(|c| *c == b',') { - let trimmed_part = part.trim_ascii(); - if trimmed_part == b"aws-chunked" { - chunked_found = true; - continue; - } - if !rebuilt_val.is_empty() { - rebuilt_val.push(b','); - } - rebuilt_val.extend_from_slice(trimmed_part); + if let Some(content_encoding) = req.headers_mut().remove(CONTENT_ENCODING) { + if let Some(rest) = content_encoding.as_bytes().strip_prefix(b"aws-chunked,") { + req.headers_mut() + .insert(CONTENT_ENCODING, HeaderValue::from_bytes(rest).unwrap()); + } else if content_encoding != "aws-chunked" { + return Err(Error::bad_request( + "content-encoding does not contain aws-chunked for STREAMING-*-PAYLOAD", + )); } - - if rebuilt_val.is_empty() { - // skip empty headers - continue; - } - - if !header_initialized { - req.headers_mut().insert( - CONTENT_ENCODING, - HeaderValue::from_bytes(&rebuilt_val).unwrap(), - ); - header_initialized = true; - } else { - req.headers_mut().append( - CONTENT_ENCODING, - HeaderValue::from_bytes(&rebuilt_val).unwrap(), - ); - } - } - - if !original_content_encoding.is_empty() && !chunked_found { - return Err(Error::bad_request( - "content-encoding does not contain aws-chunked for STREAMING-*-PAYLOAD", - )); } // If trailer header is announced, add the calculation of the requested checksum @@ -98,7 +60,7 @@ pub fn parse_streaming_body( request_trailer_checksum_algorithm(req.headers())? .ok_or_bad_request("Missing x-amz-trailer header")?, ); - checksummer = checksummer.add_algorithm(algo); + checksummer = checksummer.add(algo); algo } else { None @@ -239,7 +201,6 @@ mod payload { use nom::character::streaming::hex_digit1; use nom::combinator::{map_res, opt}; use nom::number::streaming::hex_u32; - use nom::Parser as _; macro_rules! try_parse { ($expr:expr) => { @@ -273,7 +234,7 @@ mod payload { let (input, _) = try_parse!(tag(";")(input)); let (input, _) = try_parse!(tag("chunk-signature=")(input)); - let (input, data) = try_parse!(map_res(hex_digit1, hex::decode).parse(input)); + let (input, data) = try_parse!(map_res(hex_digit1, hex::decode)(input)); let signature = Hash::try_from(&data).ok_or(nom::Err::Failure(Error::BadSignature))?; let (input, _) = try_parse!(tag("\r\n")(input)); @@ -311,20 +272,18 @@ mod payload { let (input, header_name) = try_parse!(map_res( take_while(|c: u8| c.is_ascii_alphanumeric() || c == b'-'), HeaderName::from_bytes - ) - .parse(input)); - let (input, _) = try_parse!(tag(&b":"[..])(input)); + )(input)); + let (input, _) = try_parse!(tag(b":")(input)); let (input, header_value) = try_parse!(map_res( take_while(|c: u8| c.is_ascii_alphanumeric() || b"+/=".contains(&c)), HeaderValue::from_bytes - ) - .parse(input)); + )(input)); // Possible '\n' after the header value, depends on clients // https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - let (input, _) = try_parse!(opt(tag(&b"\n"[..])).parse(input)); + let (input, _) = try_parse!(opt(tag(b"\n"))(input)); - let (input, _) = try_parse!(tag(&b"\r\n"[..]).parse(input)); + let (input, _) = try_parse!(tag(b"\r\n")(input)); Ok(( input, @@ -338,10 +297,10 @@ mod payload { pub fn parse_signed(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> { let (input, trailer) = Self::parse_content(input)?; - let (input, _) = try_parse!(tag(&b"x-amz-trailer-signature:"[..]).parse(input)); - let (input, data) = try_parse!(map_res(hex_digit1, hex::decode).parse(input)); + let (input, _) = try_parse!(tag(b"x-amz-trailer-signature:")(input)); + let (input, data) = try_parse!(map_res(hex_digit1, hex::decode)(input)); let signature = Hash::try_from(&data).ok_or(nom::Err::Failure(Error::BadSignature))?; - let (input, _) = try_parse!(tag(&b"\r\n"[..]).parse(input)); + let (input, _) = try_parse!(tag(b"\r\n")(input)); Ok(( input, @@ -353,7 +312,7 @@ mod payload { } pub fn parse_unsigned(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> { let (input, trailer) = Self::parse_content(input)?; - let (input, _) = try_parse!(tag(&b"\r\n"[..]).parse(input)); + let (input, _) = try_parse!(tag(b"\r\n")(input)); Ok((input, trailer)) } @@ -518,7 +477,7 @@ where continue; } Some(Err(e)) => { - return Poll::Ready(Some(Err(StreamingPayloadError::Stream(e)))); + return Poll::Ready(Some(Err(StreamingPayloadError::Stream(e)))) } None => { return Poll::Ready(Some(Err(StreamingPayloadError::message( @@ -528,7 +487,7 @@ where } } Err(nom::Err::Error(e)) | Err(nom::Err::Failure(e)) => { - return Poll::Ready(Some(Err(e))); + return Poll::Ready(Some(Err(e))) } }; diff --git a/src/api/common/xml/cors.rs b/src/api/common/xml/cors.rs deleted file mode 100644 index c62a0fa3..00000000 --- a/src/api/common/xml/cors.rs +++ /dev/null @@ -1,222 +0,0 @@ -use serde::{Deserialize, Serialize}; -use utoipa::ToSchema; - -use hyper::{header::HeaderName, Method}; - -use garage_model::bucket_table::CorsRule as GarageCorsRule; - -use super::{xmlns_tag, IntValue, Value}; -use crate::common_error::{CommonError as Error, OkOrBadRequest}; - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -#[serde(rename = "CORSConfiguration")] -pub struct CorsConfiguration { - #[serde(rename = "@xmlns", serialize_with = "xmlns_tag", skip_deserializing)] - pub xmlns: (), - // "default" is required to be able to parse an empty list of rules, - // cf https://docs.rs/quick-xml/latest/quick_xml/de/#sequences-xsall-and-xssequence-xml-schema-types - #[serde(rename = "CORSRule", default)] - pub cors_rules: Vec, -} - -#[derive(Debug, ToSchema, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] -#[schema(as = cors::Rule)] -pub struct CorsRule { - #[serde(rename = "ID", skip_serializing_if = "Option::is_none")] - pub id: Option, - #[serde(rename = "MaxAgeSeconds", skip_serializing_if = "Option::is_none")] - pub max_age_seconds: Option, - #[serde(rename = "AllowedOrigin")] - pub allowed_origins: Vec, - #[serde(rename = "AllowedMethod")] - pub allowed_methods: Vec, - #[serde(rename = "AllowedHeader", default)] - pub allowed_headers: Vec, - #[serde(rename = "ExposeHeader", default)] - pub expose_headers: Vec, -} - -#[derive(Debug, ToSchema, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] -#[schema(as = cors::AllowedMethod)] -pub struct AllowedMethod { - #[serde(rename = "AllowedMethod")] - pub allowed_method: Value, -} - -#[derive(Debug, ToSchema, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] -#[schema(as = cors::AllowedHeader)] -pub struct AllowedHeader { - #[serde(rename = "AllowedHeader")] - pub allowed_header: Value, -} - -#[derive(Debug, ToSchema, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] -#[schema(as = cors::ExposedHeader)] -pub struct ExposeHeader { - #[serde(rename = "ExposeHeader")] - pub expose_header: Value, -} - -impl CorsConfiguration { - pub fn validate(&self) -> Result<(), Error> { - for r in self.cors_rules.iter() { - r.validate()?; - } - Ok(()) - } - - pub fn into_garage_cors_config(self) -> Result, Error> { - Ok(self - .cors_rules - .iter() - .map(CorsRule::to_garage_cors_rule) - .collect()) - } -} - -impl CorsRule { - pub fn validate(&self) -> Result<(), Error> { - for method in self.allowed_methods.iter() { - method - .0 - .parse::() - .ok_or_bad_request("Invalid CORSRule method")?; - } - for header in self - .allowed_headers - .iter() - .chain(self.expose_headers.iter()) - { - header - .0 - .parse::() - .ok_or_bad_request("Invalid HTTP header name")?; - } - Ok(()) - } - - pub fn to_garage_cors_rule(&self) -> GarageCorsRule { - let convert_vec = - |vval: &[Value]| vval.iter().map(|x| x.0.to_owned()).collect::>(); - GarageCorsRule { - id: self.id.as_ref().map(|x| x.0.to_owned()), - max_age_seconds: self.max_age_seconds.as_ref().map(|x| x.0 as u64), - allow_origins: convert_vec(&self.allowed_origins), - allow_methods: convert_vec(&self.allowed_methods), - allow_headers: convert_vec(&self.allowed_headers), - expose_headers: convert_vec(&self.expose_headers), - } - } - - pub fn from_garage_cors_rule(rule: &GarageCorsRule) -> Self { - let convert_vec = |vval: &[String]| { - vval.iter() - .map(|x| Value(x.clone())) - .collect::>() - }; - Self { - id: rule.id.as_ref().map(|x| Value(x.clone())), - max_age_seconds: rule.max_age_seconds.map(|x| IntValue(x as i64)), - allowed_origins: convert_vec(&rule.allow_origins), - allowed_methods: convert_vec(&rule.allow_methods), - allowed_headers: convert_vec(&rule.allow_headers), - expose_headers: convert_vec(&rule.expose_headers), - } - } -} - -#[cfg(test)] -mod tests { - use crate::xml::{to_xml_with_header, unprettify_xml}; - - use super::*; - - use quick_xml::de::from_str; - - #[test] - fn test_deserialize() { - let message = r#" - - - http://www.example.com - - PUT - POST - DELETE - - * - - - * - GET - - - qsdfjklm - 12345 - https://perdu.com - - GET - DELETE - * - * - -"#; - let conf: CorsConfiguration = - from_str(message).expect("failed to deserialize xml into `CorsConfiguration` struct"); - let ref_value = CorsConfiguration { - xmlns: (), - cors_rules: vec![ - CorsRule { - id: None, - max_age_seconds: None, - allowed_origins: vec!["http://www.example.com".into()], - allowed_methods: vec!["PUT".into(), "POST".into(), "DELETE".into()], - allowed_headers: vec!["*".into()], - expose_headers: vec![], - }, - CorsRule { - id: None, - max_age_seconds: None, - allowed_origins: vec!["*".into()], - allowed_methods: vec!["GET".into()], - allowed_headers: vec![], - expose_headers: vec![], - }, - CorsRule { - id: Some("qsdfjklm".into()), - max_age_seconds: Some(IntValue(12345)), - allowed_origins: vec!["https://perdu.com".into()], - allowed_methods: vec!["GET".into(), "DELETE".into()], - allowed_headers: vec!["*".into()], - expose_headers: vec!["*".into()], - }, - ], - }; - assert_eq! { - ref_value, - conf - }; - - let message2 = to_xml_with_header(&ref_value).expect("xml serialization"); - - assert_eq!(unprettify_xml(message), unprettify_xml(&message2)); - } - - #[test] - fn test_deserialize_norules() { - let message = r#" - "#; - let conf: CorsConfiguration = from_str(message).unwrap(); - let ref_value = CorsConfiguration { - xmlns: (), - cors_rules: vec![], - }; - assert_eq! { - ref_value, - conf - }; - - let message2 = to_xml_with_header(&ref_value).expect("xml serialization"); - assert_eq!(unprettify_xml(&message), unprettify_xml(&message2)); - } -} diff --git a/src/api/common/xml/lifecycle.rs b/src/api/common/xml/lifecycle.rs deleted file mode 100644 index 61f12a51..00000000 --- a/src/api/common/xml/lifecycle.rs +++ /dev/null @@ -1,345 +0,0 @@ -use serde::{Deserialize, Serialize}; -use utoipa::ToSchema; - -use garage_model::bucket_table::{ - parse_lifecycle_date, LifecycleExpiration as GarageLifecycleExpiration, - LifecycleFilter as GarageLifecycleFilter, LifecycleRule as GarageLifecycleRule, -}; - -use super::{xmlns_tag, IntValue, Value}; - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct LifecycleConfiguration { - #[serde(rename = "@xmlns", serialize_with = "xmlns_tag", skip_deserializing)] - pub xmlns: (), - #[serde(rename = "Rule")] - pub lifecycle_rules: Vec, -} - -#[derive(Debug, ToSchema, Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -#[schema(as = lifecycle::Rule)] -pub struct LifecycleRule { - #[serde(rename = "ID", skip_serializing_if = "Option::is_none")] - pub id: Option, - #[serde(rename = "Status")] - pub status: Value, - #[serde(rename = "Filter", default, skip_serializing_if = "Option::is_none")] - pub filter: Option, - #[serde( - rename = "Expiration", - default, - skip_serializing_if = "Option::is_none" - )] - pub expiration: Option, - #[serde( - rename = "AbortIncompleteMultipartUpload", - default, - skip_serializing_if = "Option::is_none" - )] - pub abort_incomplete_mpu: Option, -} - -#[derive( - Debug, ToSchema, Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Default, -)] -#[schema(as = lifecycle::Filter)] -pub struct Filter { - #[serde(rename = "And", skip_serializing_if = "Option::is_none")] - #[schema(no_recursion)] - pub and: Option>, - #[serde(rename = "Prefix", skip_serializing_if = "Option::is_none")] - pub prefix: Option, - #[serde( - rename = "ObjectSizeGreaterThan", - skip_serializing_if = "Option::is_none" - )] - pub size_gt: Option, - #[serde(rename = "ObjectSizeLessThan", skip_serializing_if = "Option::is_none")] - pub size_lt: Option, -} - -#[derive(Debug, ToSchema, Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -#[schema(as = lifecycle::Expiration)] -pub struct Expiration { - #[serde(rename = "Days", skip_serializing_if = "Option::is_none")] - pub days: Option, - #[serde(rename = "Date", skip_serializing_if = "Option::is_none")] - pub at_date: Option, -} - -#[derive(Debug, ToSchema, Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -#[schema(as = lifecycle::AbortIncompleteMpu)] -pub struct AbortIncompleteMpu { - #[serde(rename = "DaysAfterInitiation")] - pub days: IntValue, -} - -impl LifecycleConfiguration { - pub fn validate_into_garage_lifecycle_config( - self, - ) -> Result, &'static str> { - let mut ret = vec![]; - for rule in self.lifecycle_rules { - ret.push(rule.validate_into_garage_lifecycle_rule()?); - } - Ok(ret) - } - - pub fn from_garage_lifecycle_config(config: &[GarageLifecycleRule]) -> Self { - Self { - xmlns: (), - lifecycle_rules: config - .iter() - .map(LifecycleRule::from_garage_lifecycle_rule) - .collect(), - } - } -} - -impl LifecycleRule { - pub fn validate_into_garage_lifecycle_rule(self) -> Result { - let enabled = match self.status.0.as_str() { - "Enabled" => true, - "Disabled" => false, - _ => return Err("invalid value for "), - }; - - let filter = self - .filter - .map(Filter::validate_into_garage_lifecycle_filter) - .transpose()? - .unwrap_or_default(); - - let abort_incomplete_mpu_days = self.abort_incomplete_mpu.map(|x| x.days.0 as usize); - - let expiration = self - .expiration - .map(Expiration::validate_into_garage_lifecycle_expiration) - .transpose()?; - - Ok(GarageLifecycleRule { - id: self.id.map(|x| x.0), - enabled, - filter, - abort_incomplete_mpu_days, - expiration, - }) - } - - pub fn from_garage_lifecycle_rule(rule: &GarageLifecycleRule) -> Self { - Self { - id: rule.id.as_deref().map(Value::from), - status: if rule.enabled { - Value::from("Enabled") - } else { - Value::from("Disabled") - }, - filter: Filter::from_garage_lifecycle_filter(&rule.filter), - abort_incomplete_mpu: rule - .abort_incomplete_mpu_days - .map(|days| AbortIncompleteMpu { - days: IntValue(days as i64), - }), - expiration: rule - .expiration - .as_ref() - .map(Expiration::from_garage_lifecycle_expiration), - } - } -} - -impl Filter { - pub fn count(&self) -> i32 { - fn count(x: &Option) -> i32 { - x.as_ref().map(|_| 1).unwrap_or(0) - } - count(&self.prefix) + count(&self.size_gt) + count(&self.size_lt) - } - - pub fn validate_into_garage_lifecycle_filter( - self, - ) -> Result { - if self.count() > 0 && self.and.is_some() { - Err("Filter tag cannot contain both and another condition") - } else if let Some(and) = self.and { - if and.and.is_some() { - return Err("Nested tags"); - } - Ok(and.internal_into_garage_lifecycle_filter()) - } else if self.count() > 1 { - Err("Multiple Filter conditions must be wrapped in an tag") - } else { - Ok(self.internal_into_garage_lifecycle_filter()) - } - } - - fn internal_into_garage_lifecycle_filter(self) -> GarageLifecycleFilter { - GarageLifecycleFilter { - prefix: self.prefix.map(|x| x.0), - size_gt: self.size_gt.map(|x| x.0 as u64), - size_lt: self.size_lt.map(|x| x.0 as u64), - } - } - - pub fn from_garage_lifecycle_filter(rule: &GarageLifecycleFilter) -> Option { - let filter = Filter { - and: None, - prefix: rule.prefix.as_deref().map(Value::from), - size_gt: rule.size_gt.map(|x| IntValue(x as i64)), - size_lt: rule.size_lt.map(|x| IntValue(x as i64)), - }; - match filter.count() { - 0 => None, - 1 => Some(filter), - _ => Some(Filter { - and: Some(Box::new(filter)), - ..Default::default() - }), - } - } -} - -impl Expiration { - pub fn validate_into_garage_lifecycle_expiration( - self, - ) -> Result { - match (self.days, self.at_date) { - (Some(_), Some(_)) => Err("cannot have both and in "), - (None, None) => Err(" must contain either or "), - (Some(days), None) => Ok(GarageLifecycleExpiration::AfterDays(days.0 as usize)), - (None, Some(date)) => { - parse_lifecycle_date(&date.0)?; - Ok(GarageLifecycleExpiration::AtDate(date.0)) - } - } - } - - pub fn from_garage_lifecycle_expiration(exp: &GarageLifecycleExpiration) -> Self { - match exp { - GarageLifecycleExpiration::AfterDays(days) => Expiration { - days: Some(IntValue(*days as i64)), - at_date: None, - }, - GarageLifecycleExpiration::AtDate(date) => Expiration { - days: None, - at_date: Some(Value(date.to_string())), - }, - } - } -} - -#[cfg(test)] -mod tests { - use crate::xml::{to_xml_with_header, unprettify_xml}; - - use super::*; - - use quick_xml::de::from_str; - - #[test] - fn test_deserialize_lifecycle_config() { - let message = r#" - - - id1 - Enabled - - documents/ - - - 7 - - - - id2 - Enabled - - - logs/ - 1000000 - - - - 365 - - -"#; - let conf: LifecycleConfiguration = from_str(message).unwrap(); - let ref_value = LifecycleConfiguration { - xmlns: (), - lifecycle_rules: vec![ - LifecycleRule { - id: Some("id1".into()), - status: "Enabled".into(), - filter: Some(Filter { - prefix: Some("documents/".into()), - ..Default::default() - }), - expiration: None, - abort_incomplete_mpu: Some(AbortIncompleteMpu { days: IntValue(7) }), - }, - LifecycleRule { - id: Some("id2".into()), - status: "Enabled".into(), - filter: Some(Filter { - and: Some(Box::new(Filter { - prefix: Some("logs/".into()), - size_gt: Some(IntValue(1000000)), - ..Default::default() - })), - ..Default::default() - }), - expiration: Some(Expiration { - days: Some(IntValue(365)), - at_date: None, - }), - abort_incomplete_mpu: None, - }, - ], - }; - assert_eq! { - ref_value, - conf - }; - - let message2 = to_xml_with_header(&ref_value).expect("serialize xml"); - - assert_eq!(unprettify_xml(message), unprettify_xml(&message2)); - - // Check validation - let validated = ref_value - .validate_into_garage_lifecycle_config() - .expect("invalid xml config"); - - let ref_config = vec![ - GarageLifecycleRule { - id: Some("id1".into()), - enabled: true, - filter: GarageLifecycleFilter { - prefix: Some("documents/".into()), - ..Default::default() - }, - expiration: None, - abort_incomplete_mpu_days: Some(7), - }, - GarageLifecycleRule { - id: Some("id2".into()), - enabled: true, - filter: GarageLifecycleFilter { - prefix: Some("logs/".into()), - size_gt: Some(1000000), - ..Default::default() - }, - expiration: Some(GarageLifecycleExpiration::AfterDays(365)), - abort_incomplete_mpu_days: None, - }, - ]; - assert_eq!(validated, ref_config); - - let message3 = to_xml_with_header(&LifecycleConfiguration::from_garage_lifecycle_config( - &validated, - )) - .expect("serialize xml"); - assert_eq!(unprettify_xml(message), unprettify_xml(&message3)); - } -} diff --git a/src/api/common/xml/mod.rs b/src/api/common/xml/mod.rs deleted file mode 100644 index be07949c..00000000 --- a/src/api/common/xml/mod.rs +++ /dev/null @@ -1,48 +0,0 @@ -pub mod cors; -pub mod lifecycle; -pub mod website; - -use serde::{Deserialize, Serialize, Serializer}; -use utoipa::ToSchema; - -pub fn to_xml_with_header(x: &T) -> Result { - use quick_xml::se::{self, EmptyElementHandling, QuoteLevel}; - - let mut xml = r#""#.to_string(); - - let mut ser = se::Serializer::new(&mut xml); - ser.set_quote_level(QuoteLevel::Full) - .empty_element_handling(EmptyElementHandling::Expanded); - let _serialized = x.serialize(ser)?; - Ok(xml) -} - -#[cfg(test)] -pub fn unprettify_xml(xml_in: &str) -> String { - xml_in.trim().lines().fold(String::new(), |mut val, line| { - val.push_str(line.trim()); - val - }) -} - -pub fn xmlns_tag(_v: &(), s: S) -> Result { - s.serialize_str("http://s3.amazonaws.com/doc/2006-03-01/") -} - -pub fn xmlns_xsi_tag(_v: &(), s: S) -> Result { - s.serialize_str("http://www.w3.org/2001/XMLSchema-instance") -} - -#[derive(Debug, ToSchema, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] -#[schema(as = xml::Value)] -pub struct Value(#[serde(rename = "$value")] pub String); - -impl From<&str> for Value { - fn from(s: &str) -> Value { - Value(s.to_string()) - } -} - -#[derive(Debug, ToSchema, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] -#[schema(as = xml::IntValue)] -pub struct IntValue(#[serde(rename = "$value")] pub i64); diff --git a/src/api/common/xml/website.rs b/src/api/common/xml/website.rs deleted file mode 100644 index f0c0d596..00000000 --- a/src/api/common/xml/website.rs +++ /dev/null @@ -1,423 +0,0 @@ -use serde::{Deserialize, Serialize}; -use utoipa::ToSchema; - -use garage_model::bucket_table::{self, RoutingRule as GarageRoutingRule, WebsiteConfig}; - -use crate::common_error::CommonError as Error; -use crate::xml::{xmlns_tag, IntValue, Value}; - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct WebsiteConfiguration { - #[serde(rename = "@xmlns", serialize_with = "xmlns_tag", skip_deserializing)] - pub xmlns: (), - #[serde(rename = "ErrorDocument", skip_serializing_if = "Option::is_none")] - pub error_document: Option, - #[serde(rename = "IndexDocument", skip_serializing_if = "Option::is_none")] - pub index_document: Option, - #[serde( - rename = "RedirectAllRequestsTo", - skip_serializing_if = "Option::is_none" - )] - pub redirect_all_requests_to: Option, - #[serde( - rename = "RoutingRules", - default, - skip_serializing_if = "RoutingRules::is_empty" - )] - pub routing_rules: RoutingRules, -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Default)] -pub struct RoutingRules { - #[serde(rename = "RoutingRule")] - pub rules: Vec, -} - -impl RoutingRules { - fn is_empty(&self) -> bool { - self.rules.is_empty() - } -} - -#[derive(Debug, ToSchema, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] -#[schema(as = website::RoutingRule)] -pub struct RoutingRule { - #[serde(rename = "Condition")] - pub condition: Option, - #[serde(rename = "Redirect")] - pub redirect: Redirect, -} - -#[derive(Debug, ToSchema, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] -#[schema(as = website::Key)] -pub struct Key { - #[serde(rename = "Key")] - pub key: Value, -} - -#[derive(Debug, ToSchema, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] -#[schema(as = website::Suffix)] -pub struct Suffix { - #[serde(rename = "Suffix")] - pub suffix: Value, -} - -#[derive(Debug, ToSchema, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] -#[schema(as = website::Target)] -pub struct Target { - #[serde(rename = "HostName")] - pub hostname: Value, - #[serde(rename = "Protocol")] - pub protocol: Option, -} - -#[derive(Debug, ToSchema, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] -#[schema(as = website::Condition)] -pub struct Condition { - #[serde( - rename = "HttpErrorCodeReturnedEquals", - skip_serializing_if = "Option::is_none" - )] - pub http_error_code: Option, - #[serde(rename = "KeyPrefixEquals", skip_serializing_if = "Option::is_none")] - pub prefix: Option, -} - -#[derive(Debug, ToSchema, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] -#[schema(as = website::Redirect)] -pub struct Redirect { - #[serde(rename = "HostName", skip_serializing_if = "Option::is_none")] - pub hostname: Option, - #[serde(rename = "Protocol", skip_serializing_if = "Option::is_none")] - pub protocol: Option, - #[serde(rename = "HttpRedirectCode", skip_serializing_if = "Option::is_none")] - pub http_redirect_code: Option, - #[serde( - rename = "ReplaceKeyPrefixWith", - skip_serializing_if = "Option::is_none" - )] - pub replace_prefix: Option, - #[serde(rename = "ReplaceKeyWith", skip_serializing_if = "Option::is_none")] - pub replace_full: Option, -} - -impl WebsiteConfiguration { - pub fn validate(&self) -> Result<(), Error> { - if self.redirect_all_requests_to.is_some() - && (self.error_document.is_some() - || self.index_document.is_some() - || !self.routing_rules.is_empty()) - { - return Err(Error::bad_request( - "Bad XML: can't have RedirectAllRequestsTo and other fields", - )); - } - if let Some(ref ed) = self.error_document { - ed.validate()?; - } - if let Some(ref id) = self.index_document { - id.validate()?; - } - if let Some(ref rart) = self.redirect_all_requests_to { - rart.validate()?; - } - for rr in &self.routing_rules.rules { - rr.validate()?; - } - if self.routing_rules.rules.len() > 1000 { - // we will do linear scans, best to avoid overly long configuration. The - // limit was chosen arbitrarily - return Err(Error::bad_request( - "Bad XML: RoutingRules can't have more than 1000 child elements", - )); - } - - Ok(()) - } - - pub fn into_garage_website_config(self) -> Result { - if self.redirect_all_requests_to.is_some() { - Err(Error::NotImplemented( - "RedirectAllRequestsTo is not currently implemented in Garage, however its effect can be emulated using a single unconditional RoutingRule.".into(), - )) - } else { - Ok(WebsiteConfig { - index_document: self - .index_document - .map(|x| x.suffix.0) - .unwrap_or_else(|| "index.html".to_string()), - error_document: self.error_document.map(|x| x.key.0), - redirect_all: None, - routing_rules: self - .routing_rules - .rules - .into_iter() - .map(RoutingRule::into_garage_routing_rule) - .collect(), - }) - } - } -} - -impl Key { - pub fn validate(&self) -> Result<(), Error> { - if self.key.0.is_empty() { - Err(Error::bad_request( - "Bad XML: error document specified but empty", - )) - } else { - Ok(()) - } - } -} - -impl Suffix { - pub fn validate(&self) -> Result<(), Error> { - if self.suffix.0.is_empty() | self.suffix.0.contains('/') { - Err(Error::bad_request( - "Bad XML: index document is empty or contains /", - )) - } else { - Ok(()) - } - } -} - -impl Target { - pub fn validate(&self) -> Result<(), Error> { - if let Some(ref protocol) = self.protocol { - if protocol.0 != "http" && protocol.0 != "https" { - return Err(Error::bad_request("Bad XML: invalid protocol")); - } - } - Ok(()) - } -} - -impl RoutingRule { - pub fn validate(&self) -> Result<(), Error> { - if let Some(condition) = &self.condition { - condition.validate()?; - } - self.redirect.validate() - } - - pub fn from_garage_routing_rule(rule: GarageRoutingRule) -> Self { - RoutingRule { - condition: rule.condition.map(|cond| Condition { - http_error_code: cond.http_error_code.map(|c| IntValue(c as i64)), - prefix: cond.prefix.map(Value), - }), - redirect: Redirect { - hostname: rule.redirect.hostname.map(Value), - http_redirect_code: Some(IntValue(rule.redirect.http_redirect_code as i64)), - protocol: rule.redirect.protocol.map(Value), - replace_full: rule.redirect.replace_key.map(Value), - replace_prefix: rule.redirect.replace_key_prefix.map(Value), - }, - } - } - - pub fn into_garage_routing_rule(self) -> bucket_table::RoutingRule { - bucket_table::RoutingRule { - condition: self - .condition - .map(|condition| bucket_table::RedirectCondition { - http_error_code: condition.http_error_code.map(|c| c.0 as u16), - prefix: condition.prefix.map(|p| p.0), - }), - redirect: bucket_table::Redirect { - hostname: self.redirect.hostname.map(|h| h.0), - protocol: self.redirect.protocol.map(|p| p.0), - // aws default to 301, which i find punitive in case of - // misconfiguration (can be permanently cached on the - // user agent) - http_redirect_code: self - .redirect - .http_redirect_code - .map(|c| c.0 as u16) - .unwrap_or(302), - replace_key_prefix: self.redirect.replace_prefix.map(|k| k.0), - replace_key: self.redirect.replace_full.map(|k| k.0), - }, - } - } -} - -impl Condition { - pub fn validate(&self) -> Result { - if let Some(ref error_code) = self.http_error_code { - // TODO do other error codes make sense? Aws only allows 4xx and 5xx - if error_code.0 != 404 { - return Err(Error::bad_request( - "Bad XML: HttpErrorCodeReturnedEquals must be 404 or absent", - )); - } - } - Ok(self.prefix.is_some()) - } -} - -impl Redirect { - pub fn validate(&self) -> Result<(), Error> { - if self.replace_prefix.is_some() && self.replace_full.is_some() { - return Err(Error::bad_request( - "Bad XML: both ReplaceKeyPrefixWith and ReplaceKeyWith are set", - )); - } - if let Some(ref protocol) = self.protocol { - if protocol.0 != "http" && protocol.0 != "https" { - return Err(Error::bad_request("Bad XML: invalid protocol")); - } - } - if let Some(ref http_redirect_code) = self.http_redirect_code { - match http_redirect_code.0 { - // aws allows all 3xx except 300, but some are non-sensical (not modified, - // use proxy...) - 301 | 302 | 303 | 307 | 308 => { - if self.hostname.is_none() && self.protocol.is_some() { - return Err(Error::bad_request( - "Bad XML: HostName must be set if Protocol is set", - )); - } - } - // aws doesn't allow these codes, but netlify does, and it seems like a - // cool feature (change the page seen without changing the url shown by the - // user agent) - 200 | 404 => { - if self.hostname.is_some() || self.protocol.is_some() { - // hostname would mean different bucket, protocol doesn't make - // sense - return Err(Error::bad_request( - "Bad XML: an HttpRedirectCode of 200 is not acceptable alongside HostName or Protocol", - )); - } - } - _ => { - return Err(Error::bad_request("Bad XML: invalid HttpRedirectCode")); - } - } - } - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use crate::xml::{to_xml_with_header, unprettify_xml}; - - use super::*; - - use quick_xml::de::from_str; - - #[test] - fn test_deserialize() { - let message = r#" - - - my-error-doc - - - my-index - - - garage.tld - https - - - - - 404 - prefix1 - - - gara.ge - http - 303 - prefix2 - fullkey - - - - - - - - 404 - missing - - - -"#; - let conf: WebsiteConfiguration = - from_str(message).expect("failed to deserialize xml in `WebsiteConfiguration`"); - let ref_value = WebsiteConfiguration { - xmlns: (), - error_document: Some(Key { - key: Value("my-error-doc".to_owned()), - }), - index_document: Some(Suffix { - suffix: Value("my-index".to_owned()), - }), - redirect_all_requests_to: Some(Target { - hostname: Value("garage.tld".to_owned()), - protocol: Some(Value("https".to_owned())), - }), - routing_rules: RoutingRules { - rules: vec![ - RoutingRule { - condition: Some(Condition { - http_error_code: Some(IntValue(404)), - prefix: Some(Value("prefix1".to_owned())), - }), - redirect: Redirect { - hostname: Some(Value("gara.ge".to_owned())), - protocol: Some(Value("http".to_owned())), - http_redirect_code: Some(IntValue(303)), - replace_prefix: Some(Value("prefix2".to_owned())), - replace_full: Some(Value("fullkey".to_owned())), - }, - }, - RoutingRule { - condition: Some(Condition { - http_error_code: None, - prefix: Some(Value("".to_owned())), - }), - redirect: Redirect { - hostname: None, - protocol: None, - http_redirect_code: Some(IntValue(404)), - replace_prefix: None, - replace_full: Some(Value("missing".to_owned())), - }, - }, - ], - }, - }; - assert_eq! { - ref_value, - conf - } - - let message2 = to_xml_with_header(&ref_value).expect("xml serialization"); - - assert_eq!(unprettify_xml(message), unprettify_xml(&message2)); - } - - #[test] - fn test_serialize_empty() { - let conf = WebsiteConfiguration { - xmlns: (), - error_document: None, - index_document: None, - redirect_all_requests_to: None, - routing_rules: RoutingRules { rules: vec![] }, - }; - let serialized_ref = r#" - -"#; - - let serialized = to_xml_with_header(&conf).expect("xml serialization"); - assert_eq!(unprettify_xml(&serialized), unprettify_xml(&serialized_ref)); - } -} diff --git a/src/api/k2v/Cargo.toml b/src/api/k2v/Cargo.toml index 24a0b1bf..28f74ea3 100644 --- a/src/api/k2v/Cargo.toml +++ b/src/api/k2v/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_api_k2v" -version = "2.3.0" +version = "1.3.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -14,9 +14,9 @@ path = "lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -garage_model = { workspace = true, features = ["k2v"] } +garage_model = { workspace = true, features = [ "k2v" ] } garage_table.workspace = true -garage_util = { workspace = true, features = ["k2v"] } +garage_util = { workspace = true, features = [ "k2v" ] } garage_api_common.workspace = true base64.workspace = true @@ -35,6 +35,3 @@ serde.workspace = true serde_json.workspace = true opentelemetry.workspace = true - -[lints] -workspace = true diff --git a/src/api/k2v/api_server.rs b/src/api/k2v/api_server.rs index 8c89c35d..8e10d9a6 100644 --- a/src/api/k2v/api_server.rs +++ b/src/api/k2v/api_server.rs @@ -1,4 +1,3 @@ -use std::borrow::Cow; use std::sync::Arc; use hyper::{body::Incoming as IncomingBody, Method, Request, Response}; @@ -77,19 +76,25 @@ impl ApiHandler for K2VApiServer { // The OPTIONS method is processed early, before we even check for an API key if let Endpoint::Options = endpoint { let options_res = handle_options_api(garage, &req, Some(bucket_name)) + .await .ok_or_bad_request("Error handling OPTIONS")?; return Ok(options_res.map(|_empty_body: EmptyBody| empty_body())); } - let verified_request = verify_request(&garage, req, "k2v")?; + let verified_request = verify_request(&garage, req, "k2v").await?; let req = verified_request.request; let api_key = verified_request.access_key; + let bucket_id = garage + .bucket_helper() + .resolve_bucket(&bucket_name, &api_key) + .await + .map_err(pass_helper_error)?; let bucket = garage .bucket_helper() - .resolve_bucket_fast(&bucket_name, &api_key) - .map_err(pass_helper_error)?; - let bucket_id = bucket.id; + .get_existing_bucket(bucket_id) + .await + .map_err(helper_error_as_internal)?; let bucket_params = bucket.state.into_option().unwrap(); let allowed = match endpoint.authorization_type() { @@ -180,8 +185,8 @@ impl ApiHandler for K2VApiServer { } impl ApiEndpoint for K2VApiEndpoint { - fn name(&self) -> Cow<'static, str> { - Cow::Borrowed(self.endpoint.name()) + fn name(&self) -> &'static str { + self.endpoint.name() } fn add_span_attributes(&self, span: SpanRef<'_>) { diff --git a/src/api/k2v/batch.rs b/src/api/k2v/batch.rs index 5f38cce1..7a03d836 100644 --- a/src/api/k2v/batch.rs +++ b/src/api/k2v/batch.rs @@ -61,7 +61,7 @@ pub async fn handle_read_batch( resps.push(resp?); } - json_ok_response(&resps) + Ok(json_ok_response(&resps)?) } async fn handle_read_batch_query( @@ -155,7 +155,7 @@ pub async fn handle_delete_batch( resps.push(resp?); } - json_ok_response(&resps) + Ok(json_ok_response(&resps)?) } async fn handle_delete_batch_query( diff --git a/src/api/k2v/error.rs b/src/api/k2v/error.rs index 797eb868..f1937fe5 100644 --- a/src/api/k2v/error.rs +++ b/src/api/k2v/error.rs @@ -2,8 +2,8 @@ use hyper::header::HeaderValue; use hyper::{HeaderMap, StatusCode}; use thiserror::Error; -pub(crate) use garage_api_common::common_error::pass_helper_error; use garage_api_common::common_error::{commonErrorDerivative, CommonError}; +pub(crate) use garage_api_common::common_error::{helper_error_as_internal, pass_helper_error}; pub use garage_api_common::common_error::{ CommonErrorDerivative, OkOrBadRequest, OkOrInternalError, }; @@ -20,13 +20,8 @@ pub enum Error { // Category: cannot process /// Authorization Header Malformed - #[error( - "Authorization header malformed, unexpected scope: '{unexpected}', expected: '{expected}'" - )] - AuthorizationHeaderMalformed { - unexpected: String, - expected: String, - }, + #[error("Authorization header malformed, unexpected scope: {0}")] + AuthorizationHeaderMalformed(String), /// The provided digest (checksum) value was invalid #[error("Invalid digest: {0}")] @@ -59,13 +54,9 @@ impl From for Error { fn from(err: SignatureError) -> Self { match err { SignatureError::Common(c) => Self::Common(c), - SignatureError::AuthorizationHeaderMalformed { - unexpected, - expected, - } => Self::AuthorizationHeaderMalformed { - unexpected, - expected, - }, + SignatureError::AuthorizationHeaderMalformed(c) => { + Self::AuthorizationHeaderMalformed(c) + } SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i), SignatureError::InvalidDigest(d) => Self::InvalidDigest(d), } @@ -81,7 +72,7 @@ impl Error { Error::Common(c) => c.aws_code(), Error::NoSuchKey => "NoSuchKey", Error::NotAcceptable(_) => "NotAcceptable", - Error::AuthorizationHeaderMalformed { .. } => "AuthorizationHeaderMalformed", + Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed", Error::InvalidBase64(_) => "InvalidBase64", Error::InvalidUtf8Str(_) => "InvalidUtf8String", Error::InvalidCausalityToken => "CausalityToken", @@ -97,7 +88,7 @@ impl ApiError for Error { Error::Common(c) => c.http_status_code(), Error::NoSuchKey => StatusCode::NOT_FOUND, Error::NotAcceptable(_) => StatusCode::NOT_ACCEPTABLE, - Error::AuthorizationHeaderMalformed { .. } + Error::AuthorizationHeaderMalformed(_) | Error::InvalidBase64(_) | Error::InvalidUtf8Str(_) | Error::InvalidDigest(_) @@ -108,7 +99,6 @@ impl ApiError for Error { fn add_http_headers(&self, header_map: &mut HeaderMap) { use hyper::header; header_map.append(header::CONTENT_TYPE, "application/json".parse().unwrap()); - header_map.append(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*".parse().unwrap()); } fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody { diff --git a/src/api/k2v/index.rs b/src/api/k2v/index.rs index 5188c32f..fbfaad98 100644 --- a/src/api/k2v/index.rs +++ b/src/api/k2v/index.rs @@ -28,12 +28,12 @@ pub async fn handle_read_index( let node_id_vec = garage .system .cluster_layout() - .all_nongateway_nodes()? + .all_nongateway_nodes() .to_vec(); let (partition_keys, more, next_start) = read_range( &garage.k2v.counter_table.table, - bucket_id, + &bucket_id, &prefix, &start, &end, @@ -66,7 +66,7 @@ pub async fn handle_read_index( bytes: *vals.get(&s_bytes).unwrap_or(&0), } }) - .collect(), + .collect::>(), more, next_start, }; diff --git a/src/api/k2v/item.rs b/src/api/k2v/item.rs index 2482d7cd..0fb945d2 100644 --- a/src/api/k2v/item.rs +++ b/src/api/k2v/item.rs @@ -97,7 +97,7 @@ impl ReturnFormat { } } -/// Handle `ReadItem` request +/// Handle ReadItem request #[allow(clippy::ptr_arg)] pub async fn handle_read_item( ctx: ReqCtx, @@ -201,7 +201,7 @@ pub async fn handle_delete_item( .body(empty_body())?) } -/// Handle `ReadItem` request +/// Handle ReadItem request #[allow(clippy::ptr_arg)] pub async fn handle_poll_item( ctx: ReqCtx, diff --git a/src/api/k2v/range.rs b/src/api/k2v/range.rs index dc1bdaac..eb4738db 100644 --- a/src/api/k2v/range.rs +++ b/src/api/k2v/range.rs @@ -1,6 +1,6 @@ //! Utility module for retrieving ranges of items in Garage tables //! Implements parameters (prefix, start, end, limit) as specified -//! for endpoints `ReadIndex`, `ReadBatch` and `DeleteBatch` +//! for endpoints ReadIndex, ReadBatch and DeleteBatch use std::sync::Arc; diff --git a/src/api/k2v/router.rs b/src/api/k2v/router.rs index 642b002b..a04b0f81 100644 --- a/src/api/k2v/router.rs +++ b/src/api/k2v/router.rs @@ -53,7 +53,7 @@ pub enum Endpoint { impl Endpoint { /// Determine which S3 endpoint a request is for using the request, and a bucket which was /// possibly extracted from the Host header. - /// Returns Self plus bucket name, if endpoint is not `Endpoint::ListBuckets` + /// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets pub fn from_request(req: &Request) -> Result<(Self, String), Error> { let uri = req.uri(); let path = uri.path().trim_start_matches('/'); @@ -62,7 +62,7 @@ impl Endpoint { let (bucket, partition_key) = path .split_once('/') .map(|(b, p)| (b.to_owned(), p.trim_start_matches('/'))) - .unwrap_or_else(|| (path.to_owned(), "")); + .unwrap_or((path.to_owned(), "")); if bucket.is_empty() { return Err(Error::bad_request("Missing bucket name")); @@ -90,7 +90,7 @@ impl Endpoint { }; if let Some(message) = query.nonempty_message() { - debug!("Unused query parameter: {}", message); + debug!("Unused query parameter: {}", message) } Ok((res, bucket)) } diff --git a/src/api/s3/Cargo.toml b/src/api/s3/Cargo.toml index 5a9d2e5f..88630866 100644 --- a/src/api/s3/Cargo.toml +++ b/src/api/s3/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_api_s3" -version = "2.3.0" +version = "1.3.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -27,10 +27,10 @@ async-compression.workspace = true base64.workspace = true bytes.workspace = true chrono.workspace = true -crc-fast.workspace = true +crc32fast.workspace = true +crc32c.workspace = true thiserror.workspace = true hex.workspace = true -hmac.workspace = true tracing.workspace = true md-5.workspace = true pin-project.workspace = true @@ -58,6 +58,3 @@ serde_json.workspace = true quick-xml.workspace = true opentelemetry.workspace = true - -[lints] -workspace = true diff --git a/src/api/s3/api_server.rs b/src/api/s3/api_server.rs index 689d174d..acb0cf56 100644 --- a/src/api/s3/api_server.rs +++ b/src/api/s3/api_server.rs @@ -1,4 +1,3 @@ -use std::borrow::Cow; use std::sync::Arc; use hyper::header; @@ -13,7 +12,6 @@ use garage_util::socket_address::UnixOrTCPSocketAddress; use garage_model::garage::Garage; use garage_model::key_table::Key; -use garage_api_common::common_error::CommonError; use garage_api_common::cors::*; use garage_api_common::generic_server::*; use garage_api_common::helpers::*; @@ -65,7 +63,7 @@ impl S3ApiServer { ) -> Result, Error> { match endpoint { Endpoint::ListBuckets => handle_list_buckets(&self.garage, &api_key).await, - endpoint => Err(CommonError::NotImplemented(endpoint.name().to_owned()).into()), + endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())), } } } @@ -119,11 +117,11 @@ impl ApiHandler for S3ApiServer { return handle_post_object(garage, req, bucket_name.unwrap()).await; } if let Endpoint::Options = endpoint { - let options_res = handle_options_api(garage, &req, bucket_name)?; + let options_res = handle_options_api(garage, &req, bucket_name).await?; return Ok(options_res.map(|_empty_body: EmptyBody| empty_body())); } - let verified_request = verify_request(&garage, req, "s3")?; + let verified_request = verify_request(&garage, req, "s3").await?; let req = verified_request.request; let api_key = verified_request.access_key; @@ -141,11 +139,15 @@ impl ApiHandler for S3ApiServer { return handle_create_bucket(&garage, req, &api_key.key_id, bucket_name).await; } + let bucket_id = garage + .bucket_helper() + .resolve_bucket(&bucket_name, &api_key) + .await + .map_err(pass_helper_error)?; let bucket = garage .bucket_helper() - .resolve_bucket_fast(&bucket_name, &api_key) - .map_err(pass_helper_error)?; - let bucket_id = bucket.id; + .get_existing_bucket(bucket_id) + .await?; let bucket_params = bucket.state.into_option().unwrap(); let allowed = match endpoint.authorization_type() { @@ -328,7 +330,7 @@ impl ApiHandler for S3ApiServer { Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(ctx).await, Endpoint::PutBucketLifecycleConfiguration {} => handle_put_lifecycle(ctx, req).await, Endpoint::DeleteBucketLifecycle {} => handle_delete_lifecycle(ctx).await, - endpoint => Err(CommonError::NotImplemented(endpoint.name().to_owned()).into()), + endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())), }; // If request was a success and we have a CORS rule that applies to it, @@ -350,8 +352,8 @@ impl ApiHandler for S3ApiServer { } impl ApiEndpoint for S3ApiEndpoint { - fn name(&self) -> Cow<'static, str> { - Cow::Borrowed(self.endpoint.name()) + fn name(&self) -> &'static str { + self.endpoint.name() } fn add_span_attributes(&self, span: SpanRef<'_>) { diff --git a/src/api/s3/bucket.rs b/src/api/s3/bucket.rs index 901961e9..55caa6c8 100644 --- a/src/api/s3/bucket.rs +++ b/src/api/s3/bucket.rs @@ -57,23 +57,23 @@ pub fn handle_get_bucket_acl(ctx: ReqCtx) -> Result, Error> { if kp.allow_owner { grants.push(s3_xml::Grant { - grantee: create_grantee(key_p, &api_key), + grantee: create_grantee(&key_p, &api_key), permission: s3_xml::Value("FULL_CONTROL".to_string()), }); } else { if kp.allow_read { grants.push(s3_xml::Grant { - grantee: create_grantee(key_p, &api_key), + grantee: create_grantee(&key_p, &api_key), permission: s3_xml::Value("READ".to_string()), }); grants.push(s3_xml::Grant { - grantee: create_grantee(key_p, &api_key), + grantee: create_grantee(&key_p, &api_key), permission: s3_xml::Value("READ_ACP".to_string()), }); } if kp.allow_write { grants.push(s3_xml::Grant { - grantee: create_grantee(key_p, &api_key), + grantee: create_grantee(&key_p, &api_key), permission: s3_xml::Value("WRITE".to_string()), }); } @@ -192,20 +192,23 @@ pub async fn handle_create_bucket( let api_key = helper.key().get_existing_key(api_key_id).await?; let key_params = api_key.params().unwrap(); - let existing_bucket = helper - .bucket() - .resolve_bucket(&bucket_name, &api_key.key_id) - .await?; + let existing_bucket = if let Some(Some(bucket_id)) = key_params.local_aliases.get(&bucket_name) + { + Some(*bucket_id) + } else { + helper + .bucket() + .resolve_global_bucket_name(&bucket_name) + .await? + }; - if let Some(bucket) = existing_bucket { - // According to https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html - // in such case we have to return 409 BucketAlreadyOwnedByYou if request was sent - // by bucket owner and 409 BucketAlreadyExists otherwise. - let kp = api_key.bucket_permissions(&bucket.id); + if let Some(bucket_id) = existing_bucket { + // Check we have write or owner permission on the bucket, + // in that case it's fine, return 200 OK, bucket exists; + // otherwise return a forbidden error. + let kp = api_key.bucket_permissions(&bucket_id); if !(kp.allow_write || kp.allow_owner) { return Err(CommonError::BucketAlreadyExists.into()); - } else { - return Err(CommonError::BucketAlreadyOwnedByYou.into()); } } else { // Check user is allowed to create bucket @@ -330,8 +333,8 @@ fn parse_create_bucket_xml(xml_bytes: &[u8]) -> Option> { // Returns Some(None) if no location constraint is given // Returns Some(Some("xxxx")) where xxxx is the given location constraint - let xml_str = std::str::from_utf8(xml_bytes).ok()?.trim(); - if xml_str.is_empty() { + let xml_str = std::str::from_utf8(xml_bytes).ok()?; + if xml_str.trim_matches(char::is_whitespace).is_empty() { return Some(None); } @@ -373,7 +376,6 @@ mod tests { #[test] fn create_bucket() { assert_eq!(parse_create_bucket_xml(br#""#), Some(None)); - assert_eq!(parse_create_bucket_xml(br#" "#), Some(None)); assert_eq!( parse_create_bucket_xml( br#" diff --git a/src/api/s3/copy.rs b/src/api/s3/copy.rs index 9d5eed8e..47a63c82 100644 --- a/src/api/s3/copy.rs +++ b/src/api/s3/copy.rs @@ -24,7 +24,7 @@ use garage_api_common::helpers::*; use garage_api_common::signature::checksum::*; use crate::api_server::{ReqBody, ResBody}; -use crate::encryption::{EncryptionParams, OekDerivationInfo}; +use crate::encryption::EncryptionParams; use crate::error::*; use crate::get::{check_version_not_deleted, full_object_byte_stream, PreconditionHeaders}; use crate::multipart; @@ -66,37 +66,11 @@ pub async fn handle_copy( &ctx.garage, req.headers(), &source_version_meta.encryption, - OekDerivationInfo::for_object(&source_object, source_version), )?; - let dest_uuid = gen_uuid(); - let dest_encryption = EncryptionParams::new_from_headers( - &ctx.garage, - req.headers(), - OekDerivationInfo { - bucket_id: ctx.bucket_id, - version_id: dest_uuid, - object_key: dest_key, - }, - )?; - - let was_multipart = source_version_meta.etag.contains('-') // HACK - || source_object_meta_inner.checksum_type == Some(ChecksumType::Composite); + let dest_encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?; // Extract source checksum info before source_object_meta_inner is consumed let source_checksum = source_object_meta_inner.checksum; - let source_checksum_type = match (source_object_meta_inner.checksum_type, source_checksum) { - (Some(ct), _) => Some(ct), - (None, Some(_)) => { - // Migrated object from garage v1.x or older - // determine checksum type depending if this is a multipart upload or not - if was_multipart { - Some(ChecksumType::Composite) - } else { - Some(ChecksumType::FullObject) - } - } - (None, None) => None, - }; let source_checksum_algorithm = source_checksum.map(|x| x.algorithm()); // If source object has a checksum, the destination object must as well. @@ -105,6 +79,7 @@ pub async fn handle_copy( let checksum_algorithm = checksum_algorithm.or(source_checksum_algorithm); // Determine metadata of destination object + let was_multipart = source_version_meta.etag.contains('-'); let dest_object_meta = ObjectVersionMetaInner { headers: match req.headers().get("x-amz-metadata-directive") { Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => { @@ -124,7 +99,6 @@ pub async fn handle_copy( } }, checksum: source_checksum, - checksum_type: source_checksum_type, }; // Do actual object copying @@ -144,53 +118,40 @@ pub async fn handle_copy( // See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html let must_recopy = !EncryptionParams::is_same(&source_encryption, &dest_encryption) - || (checksum_algorithm.is_some() - && (was_multipart || checksum_algorithm != source_checksum_algorithm)); + || source_checksum_algorithm != checksum_algorithm + || (was_multipart && checksum_algorithm.is_some()); let res = if !must_recopy { - let dest_info = DestInfo { - key: dest_key, - uuid: dest_uuid, - object_meta: dest_object_meta, - encryption: dest_encryption, - }; - // In most cases, we can just copy the metadata and link blocks of the // old object from the new object. handle_copy_metaonly( ctx, - dest_info, + dest_key, + dest_object_meta, + dest_encryption, source_version, source_version_data, source_version_meta, ) .await? } else { + let expected_checksum = ExpectedChecksums { + md5: None, + sha256: None, + extra: source_checksum, + }; let checksum_mode = if was_multipart || source_checksum_algorithm != checksum_algorithm { ChecksumMode::Calculate(checksum_algorithm) } else { - ChecksumMode::Verify(ExpectedChecksums { - md5: None, - sha256: None, - extra: source_checksum, - }) - }; - // For multipart uploads that had a composite checksum, set checksum type - // to full object as it will be recalculated. - let dest_object_meta = ObjectVersionMetaInner { - checksum_type: checksum_algorithm.map(|_| ChecksumType::FullObject), - ..dest_object_meta - }; - - let dest_info = DestInfo { - key: dest_key, - uuid: dest_uuid, - object_meta: dest_object_meta, - encryption: dest_encryption, + ChecksumMode::Verify(&expected_checksum) }; + // If source and dest encryption use different keys, + // we must decrypt content and re-encrypt, so rewrite all data blocks. handle_copy_reencrypt( ctx, - dest_info, + dest_key, + dest_object_meta, + dest_encryption, source_version, source_version_data, source_encryption, @@ -217,16 +178,11 @@ pub async fn handle_copy( Ok(resp.body(string_body(xml))?) } -struct DestInfo<'a> { - key: &'a str, - uuid: Uuid, - object_meta: ObjectVersionMetaInner, - encryption: EncryptionParams, -} - async fn handle_copy_metaonly( ctx: ReqCtx, - dest_info: DestInfo<'_>, + dest_key: &str, + dest_object_meta: ObjectVersionMetaInner, + dest_encryption: EncryptionParams, source_version: &ObjectVersion, source_version_data: &ObjectVersionData, source_version_meta: &ObjectVersionMeta, @@ -238,16 +194,17 @@ async fn handle_copy_metaonly( } = ctx; // Generate parameters for copied object + let new_uuid = gen_uuid(); let new_timestamp = now_msec(); let new_meta = ObjectVersionMeta { - encryption: dest_info.encryption.encrypt_meta(dest_info.object_meta)?, + encryption: dest_encryption.encrypt_meta(dest_object_meta)?, size: source_version_meta.size, etag: source_version_meta.etag.clone(), }; let res = SaveStreamResult { - version_uuid: dest_info.uuid, + version_uuid: new_uuid, version_timestamp: new_timestamp, etag: new_meta.etag.clone(), }; @@ -259,7 +216,7 @@ async fn handle_copy_metaonly( // bytes is either plaintext before&after or encrypted with the // same keys, so it's ok to just copy it as is let dest_object_version = ObjectVersion { - uuid: dest_info.uuid, + uuid: new_uuid, timestamp: new_timestamp, state: ObjectVersionState::Complete(ObjectVersionData::Inline( new_meta, @@ -268,7 +225,7 @@ async fn handle_copy_metaonly( }; let dest_object = Object::new( dest_bucket_id, - dest_info.key.to_string(), + dest_key.to_string(), vec![dest_object_version], ); garage.object_table.insert(&dest_object).await?; @@ -286,7 +243,7 @@ async fn handle_copy_metaonly( // This holds a reference to the object in the Version table // so that it won't be deleted, e.g. by repair_versions. let tmp_dest_object_version = ObjectVersion { - uuid: dest_info.uuid, + uuid: new_uuid, timestamp: new_timestamp, state: ObjectVersionState::Uploading { encryption: new_meta.encryption.clone(), @@ -296,22 +253,20 @@ async fn handle_copy_metaonly( }; let tmp_dest_object = Object::new( dest_bucket_id, - dest_info.key.to_string(), + dest_key.to_string(), vec![tmp_dest_object_version], ); garage.object_table.insert(&tmp_dest_object).await?; - let dest_uuid = dest_info.uuid; - // Write version in the version table. Even with empty block list, // this means that the BlockRef entries linked to this version cannot be // marked as deleted (they are marked as deleted only if the Version // doesn't exist or is marked as deleted). let mut dest_version = Version::new( - dest_uuid, + new_uuid, VersionBacklink::Object { bucket_id: dest_bucket_id, - key: dest_info.key.to_string(), + key: dest_key.to_string(), }, false, ); @@ -327,7 +282,7 @@ async fn handle_copy_metaonly( .iter() .map(|b| BlockRef { block: b.1.hash, - version: dest_uuid, + version: new_uuid, deleted: false.into(), }) .collect::>(); @@ -343,7 +298,7 @@ async fn handle_copy_metaonly( // with the stuff before, the block's reference counts could be decremented before // they are incremented again for the new version, leading to data being deleted. let dest_object_version = ObjectVersion { - uuid: dest_info.uuid, + uuid: new_uuid, timestamp: new_timestamp, state: ObjectVersionState::Complete(ObjectVersionData::FirstBlock( new_meta, @@ -352,7 +307,7 @@ async fn handle_copy_metaonly( }; let dest_object = Object::new( dest_bucket_id, - dest_info.key.to_string(), + dest_key.to_string(), vec![dest_object_version], ); garage.object_table.insert(&dest_object).await?; @@ -364,11 +319,13 @@ async fn handle_copy_metaonly( async fn handle_copy_reencrypt( ctx: ReqCtx, - dest_info: DestInfo<'_>, + dest_key: &str, + dest_object_meta: ObjectVersionMetaInner, + dest_encryption: EncryptionParams, source_version: &ObjectVersion, source_version_data: &ObjectVersionData, source_encryption: EncryptionParams, - checksum_mode: ChecksumMode, + checksum_mode: ChecksumMode<'_>, ) -> Result { // basically we will read the source data (decrypt if necessary) // and save that in a new object (encrypt if necessary), @@ -382,11 +339,10 @@ async fn handle_copy_reencrypt( save_stream( &ctx, - dest_info.uuid, - dest_info.object_meta, - dest_info.encryption, + dest_object_meta, + dest_encryption, source_stream.map_err(|e| Error::from(GarageError::from(e))), - &dest_info.key.to_string(), + &dest_key.to_string(), checksum_mode, ) .await @@ -406,7 +362,7 @@ pub async fn handle_upload_part_copy( let dest_upload_id = multipart::decode_upload_id(upload_id)?; let dest_key = dest_key.to_string(); - let (source_object, (dest_object, dest_version, mut dest_mpu)) = futures::try_join!( + let (source_object, (_, dest_version, mut dest_mpu)) = futures::try_join!( get_copy_source(&ctx, req), multipart::get_upload(&ctx, &dest_key, &dest_upload_id) )?; @@ -424,10 +380,7 @@ pub async fn handle_upload_part_copy( &garage, req.headers(), &source_version_meta.encryption, - OekDerivationInfo::for_object(&source_object, source_object_version), )?; - - let dest_oek_params = OekDerivationInfo::for_object(&dest_object, &dest_version); let (dest_object_encryption, dest_object_checksum_algorithm) = match dest_version.state { ObjectVersionState::Uploading { encryption, @@ -436,12 +389,8 @@ pub async fn handle_upload_part_copy( } => (encryption, checksum_algorithm), _ => unreachable!(), }; - let (dest_encryption, _) = EncryptionParams::check_decrypt( - &garage, - req.headers(), - &dest_object_encryption, - dest_oek_params, - )?; + let (dest_encryption, _) = + EncryptionParams::check_decrypt(&garage, req.headers(), &dest_object_encryption)?; let same_encryption = EncryptionParams::is_same(&source_encryption, &dest_encryption); // Check source range is valid @@ -556,7 +505,7 @@ pub async fn handle_upload_part_copy( // Now, actually copy the blocks let mut checksummer = Checksummer::init(&Default::default(), !dest_encryption.is_encrypted()) - .add_algorithm(dest_object_checksum_algorithm.map(|(algo, _)| algo)); + .add(dest_object_checksum_algorithm); // First, create a stream that is able to read the source blocks // and extract the subrange if necessary. @@ -706,7 +655,7 @@ pub async fn handle_upload_part_copy( let checksums = checksummer.finalize(); let etag = dest_encryption.etag_from_md5(&checksums.md5); - let checksum = checksums.extract(dest_object_checksum_algorithm.map(|(algo, _)| algo))?; + let checksum = checksums.extract(dest_object_checksum_algorithm); // Put the part's ETag in the Versiontable dest_mpu.parts.put( @@ -746,15 +695,16 @@ async fn get_copy_source(ctx: &ReqCtx, req: &Request) -> Result) -> Result Result, Error> { let ReqCtx { bucket_params, .. } = ctx; @@ -27,7 +28,9 @@ pub async fn handle_get_cors(ctx: ReqCtx) -> Result, Error> { .header(http::header::CONTENT_TYPE, "application/xml") .body(string_body(xml))?) } else { - Err(Error::NoSuchCORSConfiguration) + Ok(Response::builder() + .status(StatusCode::NO_CONTENT) + .body(empty_body())?) } } @@ -77,3 +80,218 @@ pub async fn handle_put_cors( .status(StatusCode::OK) .body(empty_body())?) } + +// ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ---- + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +#[serde(rename = "CORSConfiguration")] +pub struct CorsConfiguration { + #[serde(serialize_with = "xmlns_tag", skip_deserializing)] + pub xmlns: (), + // "default" is required to be able to parse an empty list of rules, + // cf https://docs.rs/quick-xml/latest/quick_xml/de/#sequences-xsall-and-xssequence-xml-schema-types + #[serde(rename = "CORSRule", default)] + pub cors_rules: Vec, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct CorsRule { + #[serde(rename = "ID")] + pub id: Option, + #[serde(rename = "MaxAgeSeconds")] + pub max_age_seconds: Option, + #[serde(rename = "AllowedOrigin")] + pub allowed_origins: Vec, + #[serde(rename = "AllowedMethod")] + pub allowed_methods: Vec, + #[serde(rename = "AllowedHeader", default)] + pub allowed_headers: Vec, + #[serde(rename = "ExposeHeader", default)] + pub expose_headers: Vec, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct AllowedMethod { + #[serde(rename = "AllowedMethod")] + pub allowed_method: Value, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct AllowedHeader { + #[serde(rename = "AllowedHeader")] + pub allowed_header: Value, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct ExposeHeader { + #[serde(rename = "ExposeHeader")] + pub expose_header: Value, +} + +impl CorsConfiguration { + pub fn validate(&self) -> Result<(), Error> { + for r in self.cors_rules.iter() { + r.validate()?; + } + Ok(()) + } + + pub fn into_garage_cors_config(self) -> Result, Error> { + Ok(self + .cors_rules + .iter() + .map(CorsRule::to_garage_cors_rule) + .collect()) + } +} + +impl CorsRule { + pub fn validate(&self) -> Result<(), Error> { + for method in self.allowed_methods.iter() { + method + .0 + .parse::() + .ok_or_bad_request("Invalid CORSRule method")?; + } + for header in self + .allowed_headers + .iter() + .chain(self.expose_headers.iter()) + { + header + .0 + .parse::() + .ok_or_bad_request("Invalid HTTP header name")?; + } + Ok(()) + } + + pub fn to_garage_cors_rule(&self) -> GarageCorsRule { + let convert_vec = + |vval: &[Value]| vval.iter().map(|x| x.0.to_owned()).collect::>(); + GarageCorsRule { + id: self.id.as_ref().map(|x| x.0.to_owned()), + max_age_seconds: self.max_age_seconds.as_ref().map(|x| x.0 as u64), + allow_origins: convert_vec(&self.allowed_origins), + allow_methods: convert_vec(&self.allowed_methods), + allow_headers: convert_vec(&self.allowed_headers), + expose_headers: convert_vec(&self.expose_headers), + } + } + + pub fn from_garage_cors_rule(rule: &GarageCorsRule) -> Self { + let convert_vec = |vval: &[String]| { + vval.iter() + .map(|x| Value(x.clone())) + .collect::>() + }; + Self { + id: rule.id.as_ref().map(|x| Value(x.clone())), + max_age_seconds: rule.max_age_seconds.map(|x| IntValue(x as i64)), + allowed_origins: convert_vec(&rule.allow_origins), + allowed_methods: convert_vec(&rule.allow_methods), + allowed_headers: convert_vec(&rule.allow_headers), + expose_headers: convert_vec(&rule.expose_headers), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use quick_xml::de::from_str; + + #[test] + fn test_deserialize() -> Result<(), Error> { + let message = r#" + + + http://www.example.com + + PUT + POST + DELETE + + * + + + * + GET + + + qsdfjklm + 12345 + https://perdu.com + + GET + DELETE + * + * + +"#; + let conf: CorsConfiguration = from_str(message).unwrap(); + let ref_value = CorsConfiguration { + xmlns: (), + cors_rules: vec![ + CorsRule { + id: None, + max_age_seconds: None, + allowed_origins: vec!["http://www.example.com".into()], + allowed_methods: vec!["PUT".into(), "POST".into(), "DELETE".into()], + allowed_headers: vec!["*".into()], + expose_headers: vec![], + }, + CorsRule { + id: None, + max_age_seconds: None, + allowed_origins: vec!["*".into()], + allowed_methods: vec!["GET".into()], + allowed_headers: vec![], + expose_headers: vec![], + }, + CorsRule { + id: Some("qsdfjklm".into()), + max_age_seconds: Some(IntValue(12345)), + allowed_origins: vec!["https://perdu.com".into()], + allowed_methods: vec!["GET".into(), "DELETE".into()], + allowed_headers: vec!["*".into()], + expose_headers: vec!["*".into()], + }, + ], + }; + assert_eq! { + ref_value, + conf + }; + + let message2 = to_xml_with_header(&ref_value)?; + + let cleanup = |c: &str| c.replace(char::is_whitespace, ""); + assert_eq!(cleanup(message), cleanup(&message2)); + + Ok(()) + } + + #[test] + fn test_deserialize_norules() -> Result<(), Error> { + let message = r#" +"#; + let conf: CorsConfiguration = from_str(message).unwrap(); + let ref_value = CorsConfiguration { + xmlns: (), + cors_rules: vec![], + }; + assert_eq! { + ref_value, + conf + }; + + let message2 = to_xml_with_header(&ref_value)?; + + let cleanup = |c: &str| c.replace(char::is_whitespace, ""); + assert_eq!(cleanup(message), cleanup(&message2)); + + Ok(()) + } +} diff --git a/src/api/s3/delete.rs b/src/api/s3/delete.rs index 43824a04..d785b9d8 100644 --- a/src/api/s3/delete.rs +++ b/src/api/s3/delete.rs @@ -29,7 +29,7 @@ async fn handle_delete_internal(ctx: &ReqCtx, key: &str) -> Result<(Uuid, Uuid), .iter() .rev() .find(|v| !matches!(&v.state, ObjectVersionState::Aborted)) - .or_else(|| object.versions().iter().next_back()); + .or_else(|| object.versions().iter().rev().next()); let deleted_version = match deleted_version { Some(dv) => dv.uuid, None => { @@ -125,22 +125,13 @@ fn parse_delete_objects_xml(xml: &roxmltree::Document) -> Option }; let root = xml.root(); - let delete = root.children().find(|n| n.is_element())?; + let delete = root.first_child()?; if !delete.has_tag_name("Delete") { return None; } for item in delete.children() { - // Skip text nodes introduced by formatted XML. - if !item.is_element() { - // text nodes are allowed only if they contain whitespace characters only - if !item.text()?.trim().is_empty() { - return None; - } - continue; - } - if item.has_tag_name("Object") { let key = item.children().find(|e| e.has_tag_name("Key"))?; let key_str = key.text()?; @@ -148,7 +139,11 @@ fn parse_delete_objects_xml(xml: &roxmltree::Document) -> Option key: key_str.to_string(), }); } else if item.has_tag_name("Quiet") { - ret.quiet = item.text()? == "true"; + if item.text()? == "true" { + ret.quiet = true; + } else { + ret.quiet = false; + } } else { return None; } @@ -156,59 +151,3 @@ fn parse_delete_objects_xml(xml: &roxmltree::Document) -> Option Some(ret) } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn parse_delete_objects_xml_with_formatting() { - let body = r#" - - - 1_746573745f66696c65 - - true - - "#; - let xml = roxmltree::Document::parse(body).expect("valid delete XML"); - let req = parse_delete_objects_xml(&xml).expect("request should be parsed"); - - assert_eq!(req.objects.len(), 1); - assert_eq!(req.objects[0].key, "1_746573745f66696c65"); - assert!(req.quiet); - } - - #[test] - fn parse_delete_objects_xml_rejects_non_whitespace_text_node() { - let body = r#"oops1_746573745f66696c65"#; - let xml = roxmltree::Document::parse(body).expect("valid XML"); - let req = parse_delete_objects_xml(&xml); - assert!(req.is_none()); - } - - #[test] - fn parse_delete_objects_xml_rejects_pretty_print_with_stray_text() { - let body = r#" - - oops - - 1_746573745f66696c65 - - - "#; - let xml = roxmltree::Document::parse(body).expect("valid XML"); - let req = parse_delete_objects_xml(&xml); - assert!(req.is_none()); - } - - #[test] - fn parse_delete_objects_xml_accepts_compact_valid_xml() { - let body = r#"1_746573745f66696c65false"#; - let xml = roxmltree::Document::parse(body).expect("valid XML"); - let req = parse_delete_objects_xml(&xml).expect("request should be parsed"); - assert_eq!(req.objects.len(), 1); - assert_eq!(req.objects[0].key, "1_746573745f66696c65"); - assert!(!req.quiet); - } -} diff --git a/src/api/s3/encryption.rs b/src/api/s3/encryption.rs index 1723b762..fa7285ca 100644 --- a/src/api/s3/encryption.rs +++ b/src/api/s3/encryption.rs @@ -11,7 +11,6 @@ use aes_gcm::{ }; use base64::prelude::*; use bytes::Bytes; -use sha2::Sha256; use futures::stream::Stream; use futures::task; @@ -22,12 +21,12 @@ use http::header::{HeaderMap, HeaderName, HeaderValue}; use garage_net::bytes_buf::BytesBuf; use garage_net::stream::{stream_asyncread, ByteStream}; use garage_rpc::rpc_helper::OrderTag; -use garage_util::data::{Hash, Uuid}; +use garage_util::data::Hash; use garage_util::error::Error as GarageError; use garage_util::migrate::Migrate; use garage_model::garage::Garage; -use garage_model::s3::object_table::*; +use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionMetaInner}; use garage_api_common::common_error::*; use garage_api_common::signature::checksum::Md5Checksum; @@ -65,42 +64,32 @@ const STREAM_ENC_CYPER_CHUNK_SIZE: usize = STREAM_ENC_PLAIN_CHUNK_SIZE + 16; pub enum EncryptionParams { Plaintext, SseC { - /// the value of x-amz-server-side-encryption-customer-key client_key: Key, - /// the value of x-amz-server-side-encryption-customer-key-md5 client_key_md5: Md5Output, - /// the object encryption key, for uploads created in garage v2+ - object_key: Option>, - /// the compression level used for compressing data blocks compression_level: Option, }, } -#[derive(Clone, Copy)] -pub struct OekDerivationInfo<'a> { - pub bucket_id: Uuid, - pub version_id: Uuid, - pub object_key: &'a str, -} - impl EncryptionParams { pub fn is_encrypted(&self) -> bool { !matches!(self, Self::Plaintext) } pub fn is_same(a: &Self, b: &Self) -> bool { - // This function is used in CopyObject and UploadPartCopy to determine - // whether the object must be re-encrypted. If this returns true, - // data blocks are reused as-is. Since Garage v2, we are using - // object-specific encryption keys, so we know that if both source - // and destination are encrypted, it can't be with the same key. - matches!((a, b), (Self::Plaintext, Self::Plaintext)) + let relevant_info = |x: &Self| match x { + Self::Plaintext => None, + Self::SseC { + client_key, + compression_level, + .. + } => Some((*client_key, compression_level.is_some())), + }; + relevant_info(a) == relevant_info(b) } pub fn new_from_headers( garage: &Garage, headers: &HeaderMap, - oek_info: OekDerivationInfo<'_>, ) -> Result { let key = parse_request_headers( headers, @@ -112,7 +101,6 @@ impl EncryptionParams { Some((client_key, client_key_md5)) => Ok(EncryptionParams::SseC { client_key, client_key_md5, - object_key: Some(oek_info.derive_oek(&client_key)), compression_level: garage.config.compression_level, }), None => Ok(EncryptionParams::Plaintext), @@ -121,7 +109,7 @@ impl EncryptionParams { pub fn add_response_headers(&self, resp: &mut http::response::Builder) { if let Self::SseC { client_key_md5, .. } = self { - let md5 = BASE64_STANDARD.encode(client_key_md5); + let md5 = BASE64_STANDARD.encode(&client_key_md5); resp.headers_mut().unwrap().insert( X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM, @@ -138,7 +126,6 @@ impl EncryptionParams { garage: &Garage, headers: &HeaderMap, obj_enc: &'a ObjectVersionEncryption, - oek_info: OekDerivationInfo<'_>, ) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> { let key = parse_request_headers( headers, @@ -146,14 +133,13 @@ impl EncryptionParams { &X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY, &X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5, )?; - Self::check_decrypt_common(garage, key, obj_enc, oek_info) + Self::check_decrypt_common(garage, key, obj_enc) } pub fn check_decrypt_for_copy_source<'a>( garage: &Garage, headers: &HeaderMap, obj_enc: &'a ObjectVersionEncryption, - oek_info: OekDerivationInfo<'_>, ) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> { let key = parse_request_headers( headers, @@ -161,39 +147,29 @@ impl EncryptionParams { &X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY, &X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5, )?; - Self::check_decrypt_common(garage, key, obj_enc, oek_info) + Self::check_decrypt_common(garage, key, obj_enc) } fn check_decrypt_common<'a>( garage: &Garage, key: Option<(Key, Md5Output)>, obj_enc: &'a ObjectVersionEncryption, - oek_info: OekDerivationInfo<'_>, ) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> { match (key, &obj_enc) { ( Some((client_key, client_key_md5)), - ObjectVersionEncryption::SseC { - inner, - compressed, - use_oek, - }, + ObjectVersionEncryption::SseC { inner, compressed }, ) => { let enc = Self::SseC { client_key, client_key_md5, - object_key: if *use_oek { - Some(oek_info.derive_oek(&client_key)) - } else { - None - }, compression_level: if *compressed { Some(garage.config.compression_level.unwrap_or(1)) } else { None }, }; - let plaintext = enc.decrypt_blob(inner)?; + let plaintext = enc.decrypt_blob(&inner)?; let inner = ObjectVersionMetaInner::decode(&plaintext) .ok_or_internal_error("Could not decode encrypted metadata")?; Ok((enc, Cow::Owned(inner))) @@ -217,16 +193,13 @@ impl EncryptionParams { ) -> Result { match self { Self::SseC { - compression_level, - object_key, - .. + compression_level, .. } => { let plaintext = meta.encode().map_err(GarageError::from)?; let ciphertext = self.encrypt_blob(&plaintext)?; Ok(ObjectVersionEncryption::SseC { inner: ciphertext.into_owned(), compressed: compression_level.is_some(), - use_oek: object_key.is_some(), }) } Self::Plaintext => Ok(ObjectVersionEncryption::Plaintext { inner: meta }), @@ -245,7 +218,7 @@ impl EncryptionParams { // So we just put some random bytes. let mut random = [0u8; 16]; OsRng.fill_bytes(&mut random); - hex::encode(random) + hex::encode(&random) } } } @@ -255,37 +228,24 @@ impl EncryptionParams { // This is used for encrypting object metadata and inlined data for small objects. // This does not compress anything. - fn cipher(&self) -> Option { - match self { - Self::SseC { - object_key: Some(oek), - .. - } => Some(Aes256Gcm::new(oek)), - Self::SseC { - client_key, - object_key: None, - .. - } => Some(Aes256Gcm::new(client_key)), - Self::Plaintext => None, - } - } - pub fn encrypt_blob<'a>(&self, blob: &'a [u8]) -> Result, Error> { - match self.cipher() { - Some(cipher) => { + match self { + Self::SseC { client_key, .. } => { + let cipher = Aes256Gcm::new(&client_key); let nonce = Aes256Gcm::generate_nonce(&mut OsRng); let ciphertext = cipher .encrypt(&nonce, blob) .ok_or_internal_error("Encryption failed")?; Ok(Cow::Owned([nonce.to_vec(), ciphertext].concat())) } - None => Ok(Cow::Borrowed(blob)), + Self::Plaintext => Ok(Cow::Borrowed(blob)), } } pub fn decrypt_blob<'a>(&self, blob: &'a [u8]) -> Result, Error> { - match self.cipher() { - Some(cipher) => { + match self { + Self::SseC { client_key, .. } => { + let cipher = Aes256Gcm::new(&client_key); let nonce_size = ::NonceSize::to_usize(); let nonce = Nonce::from_slice( blob.get(..nonce_size) @@ -298,7 +258,7 @@ impl EncryptionParams { )?; Ok(Cow::Owned(plaintext)) } - None => Ok(Cow::Borrowed(blob)), + Self::Plaintext => Ok(Cow::Borrowed(blob)), } } @@ -324,12 +284,10 @@ impl EncryptionParams { Self::Plaintext => stream, Self::SseC { client_key, - object_key, compression_level, .. } => { - let key = object_key.as_ref().unwrap_or(client_key); - let plaintext = DecryptStream::new(stream, *key); + let plaintext = DecryptStream::new(stream, *client_key); if compression_level.is_some() { let reader = stream_asyncread(Box::pin(plaintext)); let reader = BufReader::new(reader); @@ -349,12 +307,9 @@ impl EncryptionParams { Self::Plaintext => Ok(block), Self::SseC { client_key, - object_key, compression_level, .. } => { - let key = object_key.as_ref().unwrap_or(client_key); - let block = if let Some(level) = compression_level { Cow::Owned( garage_block::zstd_encode(block.as_ref(), *level) @@ -370,7 +325,7 @@ impl EncryptionParams { OsRng.fill_bytes(&mut nonce); ret.extend_from_slice(nonce.as_slice()); - let mut cipher = EncryptorLE31::::new(key, &nonce); + let mut cipher = EncryptorLE31::::new(&client_key, &nonce); let mut iter = block.chunks(STREAM_ENC_PLAIN_CHUNK_SIZE).peekable(); if iter.peek().is_none() { @@ -406,13 +361,6 @@ impl EncryptionParams { } } -pub fn has_encryption_header(headers: &HeaderMap) -> bool { - match headers.get(X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM) { - Some(h) => h.as_bytes() == CUSTOMER_ALGORITHM_AES256, - None => false, - } -} - fn parse_request_headers( headers: &HeaderMap, alg_header: &HeaderName, @@ -430,7 +378,7 @@ fn parse_request_headers( let key_b64 = key.ok_or_bad_request("Missing server-side-encryption-customer-key header")?; let key_bytes: [u8; 32] = BASE64_STANDARD - .decode(key_b64) + .decode(&key_b64) .ok_or_bad_request( "Invalid server-side-encryption-customer-key header: invalid base64", )? @@ -442,7 +390,7 @@ fn parse_request_headers( let md5_b64 = md5.ok_or_bad_request("Missing server-side-encryption-customer-key-md5 header")?; - let md5_bytes = BASE64_STANDARD.decode(md5_b64).ok_or_bad_request( + let md5_bytes = BASE64_STANDARD.decode(&md5_b64).ok_or_bad_request( "Invalid server-side-encryption-customer-key-md5 header: invalid bass64", )?; @@ -472,30 +420,6 @@ fn parse_request_headers( } } -impl<'a> OekDerivationInfo<'a> { - pub fn for_object<'b>(object: &'a Object, version: &'b ObjectVersion) -> Self { - Self { - bucket_id: object.bucket_id, - version_id: version.uuid, - object_key: &object.key, - } - } - - fn derive_oek(&self, client_key: &Key) -> Key { - use hmac::{Hmac, Mac}; - - // info = bucket_id + object_name + version_uuid + "garage-object-encryption-key" - // oek = hmac_sha256(ssec_key, info) - let mut hmac = as Mac>::new_from_slice(client_key.as_slice()) - .expect("create hmac-sha256"); - hmac.update(b"garage-object-encryption-key"); - hmac.update(self.bucket_id.as_slice()); - hmac.update(self.version_id.as_slice()); - hmac.update(self.object_key.as_bytes()); - hmac.finalize().into_bytes() - } -} - // ---- encrypt & decrypt streams ---- #[pin_project::pin_project] @@ -508,7 +432,6 @@ struct DecryptStream { state: DecryptStreamState, } -#[expect(clippy::large_enum_variant)] enum DecryptStreamState { Starting, Running(DecryptorLE31), @@ -545,7 +468,7 @@ impl Stream for DecryptStream { let nonce_size = StreamNonceSize::to_usize(); if let Some(nonce) = this.buf.take_exact(nonce_size) { let nonce = Nonce::from_slice(nonce.as_ref()); - *this.state = DecryptStreamState::Running(DecryptorLE31::new(this.key, nonce)); + *this.state = DecryptStreamState::Running(DecryptorLE31::new(&this.key, nonce)); break; } @@ -585,7 +508,8 @@ impl Stream for DecryptStream { if matches!(this.state, DecryptStreamState::Done) { if !this.buf.is_empty() { - return Poll::Ready(Some(Err(std::io::Error::other( + return Poll::Ready(Some(Err(std::io::Error::new( + std::io::ErrorKind::Other, "Decrypt: unexpected bytes after last encrypted chunk", )))); } @@ -619,7 +543,10 @@ impl Stream for DecryptStream { match res { Ok(bytes) if bytes.is_empty() => Poll::Ready(None), Ok(bytes) => Poll::Ready(Some(Ok(bytes.into()))), - Err(_) => Poll::Ready(Some(Err(std::io::Error::other("Decryption failed")))), + Err(_) => Poll::Ready(Some(Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Decryption failed", + )))), } } } @@ -642,7 +569,6 @@ mod tests { let enc = EncryptionParams::SseC { client_key: Aes256Gcm::generate_key(&mut OsRng), client_key_md5: Default::default(), // not needed - object_key: Some(Aes256Gcm::generate_key(&mut OsRng)), compression_level, }; @@ -660,11 +586,11 @@ mod tests { #[tokio::test] async fn test_encrypt_block() { - test_block_enc(None).await; + test_block_enc(None).await } #[tokio::test] async fn test_encrypt_block_compressed() { - test_block_enc(Some(1)).await; + test_block_enc(Some(1)).await } } diff --git a/src/api/s3/error.rs b/src/api/s3/error.rs index 09aa0604..64112084 100644 --- a/src/api/s3/error.rs +++ b/src/api/s3/error.rs @@ -31,13 +31,8 @@ pub enum Error { // Category: cannot process /// Authorization Header Malformed - #[error( - "Authorization header malformed, unexpected scope: '{unexpected}', expected: '{expected}'" - )] - AuthorizationHeaderMalformed { - unexpected: String, - expected: String, - }, + #[error("Authorization header malformed, unexpected scope: {0}")] + AuthorizationHeaderMalformed(String), /// The object requested don't exists #[error("Key not found")] @@ -47,14 +42,6 @@ pub enum Error { #[error("Upload not found")] NoSuchUpload, - /// CORS configuration doesn't exist for this bucket - #[error("The CORS configuration does not exist")] - NoSuchCORSConfiguration, - - /// CORS configuration doesn't exist for this bucket - #[error("The lifecycle configuration does not exist")] - NoSuchLifecycleConfiguration, - /// Precondition failed (e.g. x-amz-copy-source-if-match) #[error("At least one of the preconditions you specified did not hold")] PreconditionFailed, @@ -63,11 +50,11 @@ pub enum Error { #[error("Parts given to CompleteMultipartUpload do not match uploaded parts")] InvalidPart, - /// Parts given to `CompleteMultipartUpload` were not in ascending order + /// Parts given to CompleteMultipartUpload were not in ascending order #[error("Parts given to CompleteMultipartUpload were not in ascending order")] InvalidPartOrder, - /// In `CompleteMultipartUpload`: not enough data + /// In CompleteMultipartUpload: not enough data /// (here we are more lenient than AWS S3) #[error("Proposed upload is smaller than the minimum allowed object size")] EntityTooSmall, @@ -82,16 +69,8 @@ pub enum Error { InvalidUtf8String(#[from] std::string::FromUtf8Error), /// The client sent invalid XML data - #[error("failed to deserialize XML")] - InvalidXml(#[from] roxmltree::Error), - - /// The client sent invalid XML data - #[error("XML deserialization failed")] - InvalidXmlDe(#[from] quick_xml::de::DeError), - - /// The server failed to serialize data into XML - #[error("failed to serialize XML")] - InvalidXmlSe(#[from] quick_xml::se::SeError), + #[error("Invalid XML: {0}")] + InvalidXml(String), /// The client sent a range header with invalid value #[error("Invalid HTTP range: {0:?}")] @@ -104,6 +83,10 @@ pub enum Error { /// The provided digest (checksum) value was invalid #[error("Invalid digest: {0}")] InvalidDigest(String), + + /// The client sent a request for an action not supported by garage + #[error("Unimplemented action: {0}")] + NotImplemented(String), } commonErrorDerivative!(Error); @@ -122,17 +105,25 @@ impl From<(http_range::HttpRangeParseError, u64)> for Error { } } +impl From for Error { + fn from(err: roxmltree::Error) -> Self { + Self::InvalidXml(format!("{}", err)) + } +} + +impl From for Error { + fn from(err: quick_xml::de::DeError) -> Self { + Self::InvalidXml(format!("{}", err)) + } +} + impl From for Error { fn from(err: SignatureError) -> Self { match err { SignatureError::Common(c) => Self::Common(c), - SignatureError::AuthorizationHeaderMalformed { - unexpected, - expected, - } => Self::AuthorizationHeaderMalformed { - unexpected, - expected, - }, + SignatureError::AuthorizationHeaderMalformed(c) => { + Self::AuthorizationHeaderMalformed(c) + } SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i), SignatureError::InvalidDigest(d) => Self::InvalidDigest(d), } @@ -155,16 +146,13 @@ impl Error { Error::InvalidPart => "InvalidPart", Error::InvalidPartOrder => "InvalidPartOrder", Error::EntityTooSmall => "EntityTooSmall", - Error::AuthorizationHeaderMalformed { .. } => "AuthorizationHeaderMalformed", + Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed", + Error::NotImplemented(_) => "NotImplemented", Error::InvalidXml(_) => "MalformedXML", - Error::InvalidXmlDe(_) => "MalformedXML", - Error::InvalidXmlSe(_) => "InternalError", Error::InvalidRange(_) => "InvalidRange", Error::InvalidDigest(_) => "InvalidDigest", Error::InvalidUtf8Str(_) | Error::InvalidUtf8String(_) => "InvalidRequest", Error::InvalidEncryptionAlgorithm(_) => "InvalidEncryptionAlgorithmError", - Error::NoSuchCORSConfiguration => "NoSuchCORSConfiguration", - Error::NoSuchLifecycleConfiguration => "NoSuchLifecycleConfiguration", } } } @@ -174,21 +162,17 @@ impl ApiError for Error { fn http_status_code(&self) -> StatusCode { match self { Error::Common(c) => c.http_status_code(), - Error::NoSuchKey - | Error::NoSuchUpload - | Error::NoSuchCORSConfiguration - | Error::NoSuchLifecycleConfiguration => StatusCode::NOT_FOUND, + Error::NoSuchKey | Error::NoSuchUpload => StatusCode::NOT_FOUND, Error::PreconditionFailed => StatusCode::PRECONDITION_FAILED, Error::InvalidRange(_) => StatusCode::RANGE_NOT_SATISFIABLE, - Error::InvalidXmlSe(_) => StatusCode::INTERNAL_SERVER_ERROR, - Error::AuthorizationHeaderMalformed { .. } + Error::NotImplemented(_) => StatusCode::NOT_IMPLEMENTED, + Error::AuthorizationHeaderMalformed(_) | Error::InvalidPart | Error::InvalidPartOrder | Error::EntityTooSmall | Error::InvalidDigest(_) | Error::InvalidEncryptionAlgorithm(_) | Error::InvalidXml(_) - | Error::InvalidXmlDe(_) | Error::InvalidUtf8Str(_) | Error::InvalidUtf8String(_) => StatusCode::BAD_REQUEST, } @@ -198,7 +182,6 @@ impl ApiError for Error { use hyper::header; header_map.append(header::CONTENT_TYPE, "application/xml".parse().unwrap()); - header_map.append(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*".parse().unwrap()); #[allow(clippy::single_match)] match self { diff --git a/src/api/s3/get.rs b/src/api/s3/get.rs index 02500bf4..a1e4ce10 100644 --- a/src/api/s3/get.rs +++ b/src/api/s3/get.rs @@ -31,7 +31,7 @@ use garage_api_common::signature::checksum::{add_checksum_response_headers, X_AM use crate::api_server::ResBody; use crate::copy::*; -use crate::encryption::{EncryptionParams, OekDerivationInfo}; +use crate::encryption::EncryptionParams; use crate::error::*; const X_AMZ_MP_PARTS_COUNT: HeaderName = HeaderName::from_static("x-amz-mp-parts-count"); @@ -93,7 +93,7 @@ fn object_headers( /// Override headers according to specific query parameters, see /// section "Overriding response header values through the request" in -/// +/// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html fn getobject_override_headers( overrides: GetObjectOverrides, resp: &mut http::response::Builder, @@ -120,42 +120,16 @@ fn getobject_override_headers( fn handle_http_precondition( version: &ObjectVersion, version_meta: &ObjectVersionMeta, - meta_inner: &ObjectVersionMetaInner, - encryption: EncryptionParams, req: &Request<()>, ) -> Result>, Error> { let precondition_headers = PreconditionHeaders::parse(req)?; - if let Some(status_code) = precondition_headers.check(version, &version_meta.etag) { - let mut response = object_headers( - version, - version_meta, - meta_inner, - encryption, - ChecksumMode { enabled: false }, - ); - if let Some(header_map) = response.headers_mut() { - use http::header; - let headers_to_keep: Vec<_> = header_map - .drain() - .filter(|(k, _v)| { - k.as_ref().is_some_and(|k| { - [ - header::CONTENT_LOCATION, - header::DATE, - header::ETAG, - header::VARY, - header::CACHE_CONTROL, - header::EXPIRES, - ] - .contains(k) - }) - }) - .collect(); - header_map.extend(headers_to_keep); - } + if let Some(status_code) = precondition_headers.check(&version, &version_meta.etag)? { Ok(Some( - response.status(status_code).body(empty_body()).unwrap(), + Response::builder() + .status(status_code) + .body(empty_body()) + .unwrap(), )) } else { Ok(None) @@ -204,25 +178,19 @@ pub async fn handle_head_without_ctx( _ => unreachable!(), }; - let (encryption, headers) = EncryptionParams::check_decrypt( - &garage, - req.headers(), - &version_meta.encryption, - OekDerivationInfo::for_object(&object, object_version), - )?; - - if let Some(res) = - handle_http_precondition(object_version, version_meta, &headers, encryption, req)? - { + if let Some(res) = handle_http_precondition(object_version, version_meta, req)? { return Ok(res); } - let checksum_mode = checksum_mode(req); + let (encryption, headers) = + EncryptionParams::check_decrypt(&garage, req.headers(), &version_meta.encryption)?; - if let Some(part_number) = part_number { + let checksum_mode = checksum_mode(&req); + + if let Some(pn) = part_number { match version_data { ObjectVersionData::Inline(_, _) => { - if part_number != 1 { + if pn != 1 { return Err(Error::InvalidPart); } let bytes_len = version_meta.size; @@ -251,7 +219,7 @@ pub async fn handle_head_without_ctx( check_version_not_deleted(&version)?; let (part_offset, part_end) = - calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?; + calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?; Ok(object_headers( object_version, @@ -333,27 +301,14 @@ pub async fn handle_get_without_ctx( ObjectVersionData::FirstBlock(meta, _) => meta, }; - let (enc, headers) = EncryptionParams::check_decrypt( - &garage, - req.headers(), - &last_v_meta.encryption, - OekDerivationInfo::for_object(&object, last_v), - )?; - - if let Some(res) = handle_http_precondition(last_v, last_v_meta, &headers, enc, req)? { + if let Some(res) = handle_http_precondition(last_v, last_v_meta, req)? { return Ok(res); } - let checksum_mode = checksum_mode(req); + let (enc, headers) = + EncryptionParams::check_decrypt(&garage, req.headers(), &last_v_meta.encryption)?; - let handle_get_info = HandleGetInfo { - garage, - version: last_v, - version_data: last_v_data, - version_meta: last_v_meta, - encryption: enc, - meta_inner: &headers, - }; + let checksum_mode = checksum_mode(&req); match (part_number, parse_range_header(req, last_v_meta.size)?) { (Some(_), Some(_)) => Err(Error::bad_request( @@ -361,7 +316,12 @@ pub async fn handle_get_without_ctx( )), (Some(pn), None) => { handle_get_part( - handle_get_info, + garage, + last_v, + last_v_data, + last_v_meta, + enc, + &headers, pn, ChecksumMode { // TODO: for multipart uploads, checksums of each part should be stored @@ -374,7 +334,12 @@ pub async fn handle_get_without_ctx( } (None, Some(range)) => { handle_get_range( - handle_get_info, + garage, + last_v, + last_v_data, + last_v_meta, + enc, + &headers, range.start, range.start + range.length, ChecksumMode { @@ -386,14 +351,26 @@ pub async fn handle_get_without_ctx( ) .await } - (None, None) => handle_get_full(handle_get_info, overrides, checksum_mode).await, + (None, None) => { + handle_get_full( + garage, + last_v, + last_v_data, + last_v_meta, + enc, + &headers, + overrides, + checksum_mode, + ) + .await + } } } pub(crate) fn check_version_not_deleted(version: &Version) -> Result<(), Error> { if version.deleted.get() { // the version was deleted between when the object_table was consulted - // and now, this could mean the object was deleted, or overridden. + // and now, this could mean the object was deleted, or overriden. // Rather than say the key doesn't exist, return a transient error // to signal the client to try again. return Err(CommonError::InternalError(UtilError::Message( @@ -405,37 +382,28 @@ pub(crate) fn check_version_not_deleted(version: &Version) -> Result<(), Error> Ok(()) } -struct HandleGetInfo<'a> { - garage: Arc, - version: &'a ObjectVersion, - version_data: &'a ObjectVersionData, - version_meta: &'a ObjectVersionMeta, - encryption: EncryptionParams, - meta_inner: &'a ObjectVersionMetaInner, -} - async fn handle_get_full( - info: HandleGetInfo<'_>, + garage: Arc, + version: &ObjectVersion, + version_data: &ObjectVersionData, + version_meta: &ObjectVersionMeta, + encryption: EncryptionParams, + meta_inner: &ObjectVersionMetaInner, overrides: GetObjectOverrides, checksum_mode: ChecksumMode, ) -> Result, Error> { let mut resp_builder = object_headers( - info.version, - info.version_meta, - info.meta_inner, - info.encryption, + version, + version_meta, + &meta_inner, + encryption, checksum_mode, ) - .header(CONTENT_LENGTH, format!("{}", info.version_meta.size)) + .header(CONTENT_LENGTH, format!("{}", version_meta.size)) .status(StatusCode::OK); getobject_override_headers(overrides, &mut resp_builder)?; - let stream = full_object_byte_stream( - info.garage, - info.version, - info.version_data, - info.encryption, - ); + let stream = full_object_byte_stream(garage, version, version_data, encryption); Ok(resp_builder.body(response_body_from_stream(stream))?) } @@ -515,7 +483,12 @@ pub fn full_object_byte_stream( } async fn handle_get_range( - info: HandleGetInfo<'_>, + garage: Arc, + version: &ObjectVersion, + version_data: &ObjectVersionData, + version_meta: &ObjectVersionMeta, + encryption: EncryptionParams, + meta_inner: &ObjectVersionMetaInner, begin: u64, end: u64, checksum_mode: ChecksumMode, @@ -523,24 +496,18 @@ async fn handle_get_range( // Here we do not use getobject_override_headers because we don't // want to add any overridden headers (those should not be added // when returning PARTIAL_CONTENT) - let resp_builder = object_headers( - info.version, - info.version_meta, - info.meta_inner, - info.encryption, - checksum_mode, - ) - .header(CONTENT_LENGTH, format!("{}", end - begin)) - .header( - CONTENT_RANGE, - format!("bytes {}-{}/{}", begin, end - 1, info.version_meta.size), - ) - .status(StatusCode::PARTIAL_CONTENT); + let resp_builder = object_headers(version, version_meta, meta_inner, encryption, checksum_mode) + .header(CONTENT_LENGTH, format!("{}", end - begin)) + .header( + CONTENT_RANGE, + format!("bytes {}-{}/{}", begin, end - 1, version_meta.size), + ) + .status(StatusCode::PARTIAL_CONTENT); - match &info.version_data { + match &version_data { ObjectVersionData::DeleteMarker => unreachable!(), ObjectVersionData::Inline(_meta, bytes) => { - let bytes = info.encryption.decrypt_blob(bytes)?; + let bytes = encryption.decrypt_blob(&bytes)?; if end as usize <= bytes.len() { let body = bytes_body(bytes[begin as usize..end as usize].to_vec().into()); Ok(resp_builder.body(body)?) @@ -551,47 +518,46 @@ async fn handle_get_range( } } ObjectVersionData::FirstBlock(_meta, _first_block_hash) => { - let version = info - .garage + let version = garage .version_table - .get(&info.version.uuid, &EmptyKey) + .get(&version.uuid, &EmptyKey) .await? .ok_or(Error::NoSuchKey)?; check_version_not_deleted(&version)?; - let body = body_from_blocks_range( - info.garage, - info.encryption, - version.blocks.items(), - begin, - end, - ); + let body = + body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end); Ok(resp_builder.body(body)?) } } } async fn handle_get_part( - info: HandleGetInfo<'_>, + garage: Arc, + object_version: &ObjectVersion, + version_data: &ObjectVersionData, + version_meta: &ObjectVersionMeta, + encryption: EncryptionParams, + meta_inner: &ObjectVersionMetaInner, part_number: u64, checksum_mode: ChecksumMode, ) -> Result, Error> { // Same as for get_range, no getobject_override_headers let resp_builder = object_headers( - info.version, - info.version_meta, - info.meta_inner, - info.encryption, + object_version, + version_meta, + meta_inner, + encryption, checksum_mode, ) .status(StatusCode::PARTIAL_CONTENT); - match info.version_data { + match version_data { ObjectVersionData::Inline(_, bytes) => { if part_number != 1 { return Err(Error::InvalidPart); } - let bytes = info.encryption.decrypt_blob(bytes)?; - assert_eq!(bytes.len() as u64, info.version_meta.size); + let bytes = encryption.decrypt_blob(&bytes)?; + assert_eq!(bytes.len() as u64, version_meta.size); Ok(resp_builder .header(CONTENT_LENGTH, format!("{}", bytes.len())) .header( @@ -602,10 +568,9 @@ async fn handle_get_part( .body(bytes_body(bytes.into_owned().into()))?) } ObjectVersionData::FirstBlock(_, _) => { - let version = info - .garage + let version = garage .version_table - .get(&info.version.uuid, &EmptyKey) + .get(&object_version.uuid, &EmptyKey) .await? .ok_or(Error::NoSuchKey)?; @@ -614,19 +579,14 @@ async fn handle_get_part( let (begin, end) = calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?; - let body = body_from_blocks_range( - info.garage, - info.encryption, - version.blocks.items(), - begin, - end, - ); + let body = + body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end); Ok(resp_builder .header(CONTENT_LENGTH, format!("{}", end - begin)) .header( CONTENT_RANGE, - format!("bytes {}-{}/{}", begin, end - 1, info.version_meta.size), + format!("bytes {}-{}/{}", begin, end - 1, version_meta.size), ) .header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?)) .body(body)?) @@ -740,7 +700,11 @@ fn body_from_blocks_range( Some(None) } else { // The chunk has an intersection with the requested range - let start_in_chunk = begin.saturating_sub(*chunk_offset); + let start_in_chunk = if *chunk_offset > begin { + 0 + } else { + begin - *chunk_offset + }; let end_in_chunk = if *chunk_offset + chunk_len < end { chunk_len } else { @@ -801,7 +765,10 @@ fn error_stream_item(e: E) -> ByteStream { } fn std_error_from_read_error(e: E) -> std::io::Error { - std::io::Error::other(format!("Error while reading object data: {}", e)) + std::io::Error::new( + std::io::ErrorKind::Other, + format!("Error while reading object data: {}", e), + ) } // ---- @@ -877,7 +844,7 @@ impl PreconditionHeaders { }) } - fn check(&self, v: &ObjectVersion, etag: &str) -> Option { + fn check(&self, v: &ObjectVersion, etag: &str) -> Result, Error> { // we store date with ms precision, but headers are precise to the second: truncate // the timestamp to handle the same-second edge case let v_date = UNIX_EPOCH + Duration::from_secs(v.timestamp / 1000); @@ -887,32 +854,32 @@ impl PreconditionHeaders { if let Some(im) = &self.if_match { // Step 1: if-match is present if !im.iter().any(|x| x == etag || x == "*") { - return Some(StatusCode::PRECONDITION_FAILED); + return Ok(Some(StatusCode::PRECONDITION_FAILED)); } } else if let Some(ius) = &self.if_unmodified_since { // Step 2: if-unmodified-since is present, and if-match is absent if v_date > *ius { - return Some(StatusCode::PRECONDITION_FAILED); + return Ok(Some(StatusCode::PRECONDITION_FAILED)); } } if let Some(inm) = &self.if_none_match { // Step 3: if-none-match is present if inm.iter().any(|x| x == etag || x == "*") { - return Some(StatusCode::NOT_MODIFIED); + return Ok(Some(StatusCode::NOT_MODIFIED)); } } else if let Some(ims) = &self.if_modified_since { // Step 4: if-modified-since is present, and if-none-match is absent if v_date <= *ims { - return Some(StatusCode::NOT_MODIFIED); + return Ok(Some(StatusCode::NOT_MODIFIED)); } } - None + Ok(None) } pub(crate) fn check_copy_source(&self, v: &ObjectVersion, etag: &str) -> Result<(), Error> { - match self.check(v, etag) { + match self.check(v, etag)? { Some(_) => Err(Error::PreconditionFailed), None => Ok(()), } diff --git a/src/api/s3/lifecycle.rs b/src/api/s3/lifecycle.rs index 83064bca..ccda6cfd 100644 --- a/src/api/s3/lifecycle.rs +++ b/src/api/s3/lifecycle.rs @@ -2,14 +2,18 @@ use quick_xml::de::from_reader; use hyper::{Request, Response, StatusCode}; +use serde::{Deserialize, Serialize}; + use garage_api_common::helpers::*; -use garage_api_common::xml::lifecycle::*; use crate::api_server::{ReqBody, ResBody}; use crate::error::*; -use crate::xml::to_xml_with_header; +use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value}; -use garage_model::bucket_table::Bucket; +use garage_model::bucket_table::{ + parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration, + LifecycleFilter as GarageLifecycleFilter, LifecycleRule as GarageLifecycleRule, +}; pub async fn handle_get_lifecycle(ctx: ReqCtx) -> Result, Error> { let ReqCtx { bucket_params, .. } = ctx; @@ -22,7 +26,9 @@ pub async fn handle_get_lifecycle(ctx: ReqCtx) -> Result, Erro .header(http::header::CONTENT_TYPE, "application/xml") .body(string_body(xml))?) } else { - Err(Error::NoSuchLifecycleConfiguration) + Ok(Response::builder() + .status(StatusCode::NOT_FOUND) + .body(empty_body())?) } } @@ -72,3 +78,323 @@ pub async fn handle_put_lifecycle( .status(StatusCode::OK) .body(empty_body())?) } + +// ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ---- + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct LifecycleConfiguration { + #[serde(serialize_with = "xmlns_tag", skip_deserializing)] + pub xmlns: (), + #[serde(rename = "Rule")] + pub lifecycle_rules: Vec, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct LifecycleRule { + #[serde(rename = "ID")] + pub id: Option, + #[serde(rename = "Status")] + pub status: Value, + #[serde(rename = "Filter", default)] + pub filter: Option, + #[serde(rename = "Expiration", default)] + pub expiration: Option, + #[serde(rename = "AbortIncompleteMultipartUpload", default)] + pub abort_incomplete_mpu: Option, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Default)] +pub struct Filter { + #[serde(rename = "And")] + pub and: Option>, + #[serde(rename = "Prefix")] + pub prefix: Option, + #[serde(rename = "ObjectSizeGreaterThan")] + pub size_gt: Option, + #[serde(rename = "ObjectSizeLessThan")] + pub size_lt: Option, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct Expiration { + #[serde(rename = "Days")] + pub days: Option, + #[serde(rename = "Date")] + pub at_date: Option, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct AbortIncompleteMpu { + #[serde(rename = "DaysAfterInitiation")] + pub days: IntValue, +} + +impl LifecycleConfiguration { + pub fn validate_into_garage_lifecycle_config( + self, + ) -> Result, &'static str> { + let mut ret = vec![]; + for rule in self.lifecycle_rules { + ret.push(rule.validate_into_garage_lifecycle_rule()?); + } + Ok(ret) + } + + pub fn from_garage_lifecycle_config(config: &[GarageLifecycleRule]) -> Self { + Self { + xmlns: (), + lifecycle_rules: config + .iter() + .map(LifecycleRule::from_garage_lifecycle_rule) + .collect(), + } + } +} + +impl LifecycleRule { + pub fn validate_into_garage_lifecycle_rule(self) -> Result { + let enabled = match self.status.0.as_str() { + "Enabled" => true, + "Disabled" => false, + _ => return Err("invalid value for "), + }; + + let filter = self + .filter + .map(Filter::validate_into_garage_lifecycle_filter) + .transpose()? + .unwrap_or_default(); + + let abort_incomplete_mpu_days = self.abort_incomplete_mpu.map(|x| x.days.0 as usize); + + let expiration = self + .expiration + .map(Expiration::validate_into_garage_lifecycle_expiration) + .transpose()?; + + Ok(GarageLifecycleRule { + id: self.id.map(|x| x.0), + enabled, + filter, + abort_incomplete_mpu_days, + expiration, + }) + } + + pub fn from_garage_lifecycle_rule(rule: &GarageLifecycleRule) -> Self { + Self { + id: rule.id.as_deref().map(Value::from), + status: if rule.enabled { + Value::from("Enabled") + } else { + Value::from("Disabled") + }, + filter: Filter::from_garage_lifecycle_filter(&rule.filter), + abort_incomplete_mpu: rule + .abort_incomplete_mpu_days + .map(|days| AbortIncompleteMpu { + days: IntValue(days as i64), + }), + expiration: rule + .expiration + .as_ref() + .map(Expiration::from_garage_lifecycle_expiration), + } + } +} + +impl Filter { + pub fn count(&self) -> i32 { + fn count(x: &Option) -> i32 { + x.as_ref().map(|_| 1).unwrap_or(0) + } + count(&self.prefix) + count(&self.size_gt) + count(&self.size_lt) + } + + pub fn validate_into_garage_lifecycle_filter( + self, + ) -> Result { + if self.count() > 0 && self.and.is_some() { + Err("Filter tag cannot contain both and another condition") + } else if let Some(and) = self.and { + if and.and.is_some() { + return Err("Nested tags"); + } + Ok(and.internal_into_garage_lifecycle_filter()) + } else if self.count() > 1 { + Err("Multiple Filter conditions must be wrapped in an tag") + } else { + Ok(self.internal_into_garage_lifecycle_filter()) + } + } + + fn internal_into_garage_lifecycle_filter(self) -> GarageLifecycleFilter { + GarageLifecycleFilter { + prefix: self.prefix.map(|x| x.0), + size_gt: self.size_gt.map(|x| x.0 as u64), + size_lt: self.size_lt.map(|x| x.0 as u64), + } + } + + pub fn from_garage_lifecycle_filter(rule: &GarageLifecycleFilter) -> Option { + let filter = Filter { + and: None, + prefix: rule.prefix.as_deref().map(Value::from), + size_gt: rule.size_gt.map(|x| IntValue(x as i64)), + size_lt: rule.size_lt.map(|x| IntValue(x as i64)), + }; + match filter.count() { + 0 => None, + 1 => Some(filter), + _ => Some(Filter { + and: Some(Box::new(filter)), + ..Default::default() + }), + } + } +} + +impl Expiration { + pub fn validate_into_garage_lifecycle_expiration( + self, + ) -> Result { + match (self.days, self.at_date) { + (Some(_), Some(_)) => Err("cannot have both and in "), + (None, None) => Err(" must contain either or "), + (Some(days), None) => Ok(GarageLifecycleExpiration::AfterDays(days.0 as usize)), + (None, Some(date)) => { + parse_lifecycle_date(&date.0)?; + Ok(GarageLifecycleExpiration::AtDate(date.0)) + } + } + } + + pub fn from_garage_lifecycle_expiration(exp: &GarageLifecycleExpiration) -> Self { + match exp { + GarageLifecycleExpiration::AfterDays(days) => Expiration { + days: Some(IntValue(*days as i64)), + at_date: None, + }, + GarageLifecycleExpiration::AtDate(date) => Expiration { + days: None, + at_date: Some(Value(date.to_string())), + }, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use quick_xml::de::from_str; + + #[test] + fn test_deserialize_lifecycle_config() -> Result<(), Error> { + let message = r#" + + + id1 + Enabled + + documents/ + + + 7 + + + + id2 + Enabled + + + logs/ + 1000000 + + + + 365 + + +"#; + let conf: LifecycleConfiguration = from_str(message).unwrap(); + let ref_value = LifecycleConfiguration { + xmlns: (), + lifecycle_rules: vec![ + LifecycleRule { + id: Some("id1".into()), + status: "Enabled".into(), + filter: Some(Filter { + prefix: Some("documents/".into()), + ..Default::default() + }), + expiration: None, + abort_incomplete_mpu: Some(AbortIncompleteMpu { days: IntValue(7) }), + }, + LifecycleRule { + id: Some("id2".into()), + status: "Enabled".into(), + filter: Some(Filter { + and: Some(Box::new(Filter { + prefix: Some("logs/".into()), + size_gt: Some(IntValue(1000000)), + ..Default::default() + })), + ..Default::default() + }), + expiration: Some(Expiration { + days: Some(IntValue(365)), + at_date: None, + }), + abort_incomplete_mpu: None, + }, + ], + }; + assert_eq! { + ref_value, + conf + }; + + let message2 = to_xml_with_header(&ref_value)?; + + let cleanup = |c: &str| c.replace(char::is_whitespace, ""); + assert_eq!(cleanup(message), cleanup(&message2)); + + // Check validation + let validated = ref_value + .validate_into_garage_lifecycle_config() + .ok_or_bad_request("invalid xml config")?; + + let ref_config = vec![ + GarageLifecycleRule { + id: Some("id1".into()), + enabled: true, + filter: GarageLifecycleFilter { + prefix: Some("documents/".into()), + ..Default::default() + }, + expiration: None, + abort_incomplete_mpu_days: Some(7), + }, + GarageLifecycleRule { + id: Some("id2".into()), + enabled: true, + filter: GarageLifecycleFilter { + prefix: Some("logs/".into()), + size_gt: Some(1000000), + ..Default::default() + }, + expiration: Some(GarageLifecycleExpiration::AfterDays(365)), + abort_incomplete_mpu_days: None, + }, + ]; + assert_eq!(validated, ref_config); + + let message3 = to_xml_with_header(&LifecycleConfiguration::from_garage_lifecycle_config( + &validated, + ))?; + assert_eq!(cleanup(message), cleanup(&message3)); + + Ok(()) + } +} diff --git a/src/api/s3/list.rs b/src/api/s3/list.rs index 9c2bc116..94c2c895 100644 --- a/src/api/s3/list.rs +++ b/src/api/s3/list.rs @@ -17,7 +17,7 @@ use garage_api_common::encoding::*; use garage_api_common::helpers::*; use crate::api_server::{ReqBody, ResBody}; -use crate::encryption::{EncryptionParams, OekDerivationInfo}; +use crate::encryption::EncryptionParams; use crate::error::*; use crate::multipart as s3_multipart; use crate::xml as s3_xml; @@ -285,18 +285,10 @@ pub async fn handle_list_parts( ObjectVersionState::Uploading { encryption, .. } => encryption, _ => unreachable!(), }; - let encryption_res = EncryptionParams::check_decrypt( - &ctx.garage, - req.headers(), - &object_encryption, - OekDerivationInfo { - bucket_id: ctx.bucket_id, - version_id: upload_id, - object_key: &query.key, - }, - ); + let encryption_res = + EncryptionParams::check_decrypt(&ctx.garage, req.headers(), &object_encryption); - let (info, next) = fetch_part_info(query, &mpu); + let (info, next) = fetch_part_info(query, &mpu)?; let result = s3_xml::ListPartsResult { xmlns: (), @@ -324,31 +316,25 @@ pub async fn handle_list_parts( size: s3_xml::IntValue(part.size as i64), checksum_crc32: match &checksum { Some(ChecksumValue::Crc32(x)) => { - Some(s3_xml::Value(BASE64_STANDARD.encode(x))) + Some(s3_xml::Value(BASE64_STANDARD.encode(&x))) } _ => None, }, checksum_crc32c: match &checksum { Some(ChecksumValue::Crc32c(x)) => { - Some(s3_xml::Value(BASE64_STANDARD.encode(x))) - } - _ => None, - }, - checksum_crc64nvme: match &checksum { - Some(ChecksumValue::Crc64Nvme(x)) => { - Some(s3_xml::Value(BASE64_STANDARD.encode(x))) + Some(s3_xml::Value(BASE64_STANDARD.encode(&x))) } _ => None, }, checksum_sha1: match &checksum { Some(ChecksumValue::Sha1(x)) => { - Some(s3_xml::Value(BASE64_STANDARD.encode(x))) + Some(s3_xml::Value(BASE64_STANDARD.encode(&x))) } _ => None, }, checksum_sha256: match &checksum { Some(ChecksumValue::Sha256(x)) => { - Some(s3_xml::Value(BASE64_STANDARD.encode(x))) + Some(s3_xml::Value(BASE64_STANDARD.encode(&x))) } _ => None, }, @@ -484,7 +470,7 @@ where iter.next(); } _ => (), - } + }; while let Some(object) = iter.peek() { if !object.key.starts_with(&query.prefix) { @@ -508,7 +494,7 @@ where ExtractionResult::NoMore => { return Ok(None); } - } + }; } if !server_more { @@ -526,7 +512,7 @@ where fn fetch_part_info<'a>( query: &ListPartsQuery, mpu: &'a MultipartUpload, -) -> (Vec>, Option) { +) -> Result<(Vec>, Option), Error> { assert!((1..=1000).contains(&query.max_parts)); // see s3/api_server.rs // Parse multipart upload part list, removing parts not yet finished @@ -565,10 +551,10 @@ fn fetch_part_info<'a>( if parts.len() > query.max_parts as usize { parts.truncate(query.max_parts as usize); let pagination = Some(parts.last().unwrap().part_number); - return (parts, pagination); + return Ok((parts, pagination)); } - (parts, None) + Ok((parts, None)) } /* @@ -598,7 +584,7 @@ impl ListObjectsQuery { Some("[") => Ok(RangeBegin::IncludingKey { key: String::from_utf8( BASE64_STANDARD - .decode(&token.as_bytes()[1..]) + .decode(token[1..].as_bytes()) .ok_or_bad_request("Invalid continuation token")?, )?, fallback_key: None, @@ -606,7 +592,7 @@ impl ListObjectsQuery { Some("]") => Ok(RangeBegin::AfterKey { key: String::from_utf8( BASE64_STANDARD - .decode(&token.as_bytes()[1..]) + .decode(token[1..].as_bytes()) .ok_or_bad_request("Invalid continuation token")?, )?, }), @@ -725,7 +711,10 @@ impl Accumulator { let object = objects.peek().expect("This iterator can not be empty as it is checked earlier in the code. This is a logic bug, please report it."); // Check if this is a common prefix (requires a passed delimiter and its value in the key) - let pfx = common_prefix(object, query)?; + let pfx = match common_prefix(object, query) { + Some(p) => p, + None => return None, + }; assert!(pfx.starts_with(&query.prefix)); // Try to register this prefix @@ -756,7 +745,7 @@ impl Accumulator { None => Some(ExtractionResult::NoMore), } } - } + }; } } @@ -939,7 +928,7 @@ fn common_prefix<'a>(object: &'a Object, query: &ListQueryCommon) -> Option<&'a } } -/// `URIencode` a value if needed +/// URIencode a value if needed fn uriencode_maybe(s: &str, yes: bool) -> s3_xml::Value { if yes { s3_xml::Value(uri_encode(s, true)) @@ -999,7 +988,6 @@ mod tests { inner: ObjectVersionMetaInner { headers: vec![], checksum: None, - checksum_type: None, }, }, checksum_algorithm: None, @@ -1014,12 +1002,12 @@ mod tests { query.common.prefix = "a/".to_string(); assert_eq!( - common_prefix(objs.first().unwrap(), &query.common), + common_prefix(objs.get(0).unwrap(), &query.common), Some("a/b/") ); query.common.prefix = "a/b/".to_string(); - assert_eq!(common_prefix(objs.first().unwrap(), &query.common), None); + assert_eq!(common_prefix(objs.get(0).unwrap(), &query.common), None); } #[test] @@ -1040,7 +1028,7 @@ mod tests { #[test] fn test_extract_upload() { - let objs = [ + let objs = vec![ Object::new( bucket(), "b".to_string(), @@ -1069,7 +1057,7 @@ mod tests { assert_eq!(upload, Uuid::from([0x8f; 32])); } _ => panic!("wrong result"), - } + }; assert_eq!(acc.keys.len(), 2); assert_eq!( @@ -1098,7 +1086,7 @@ mod tests { match acc.extract(&(query().common), &start, &mut iter) { ExtractionResult::Extracted { key } if key.as_str() == "b" => (), _ => panic!("wrong result"), - } + }; } #[tokio::test] @@ -1255,7 +1243,7 @@ mod tests { } #[test] - fn test_fetch_part_info() { + fn test_fetch_part_info() -> Result<(), Error> { let mut query = ListPartsQuery { bucket_name: "a".to_string(), key: "a".to_string(), @@ -1267,7 +1255,7 @@ mod tests { let mpu = mpu(); // Start from the beginning but with limited size to trigger pagination - let (info, pagination) = fetch_part_info(&query, &mpu); + let (info, pagination) = fetch_part_info(&query, &mpu)?; assert_eq!(pagination.unwrap(), 3); assert_eq!( info, @@ -1291,7 +1279,7 @@ mod tests { // Use previous pagination to make a new request query.part_number_marker = Some(pagination.unwrap()); - let (info, pagination) = fetch_part_info(&query, &mpu); + let (info, pagination) = fetch_part_info(&query, &mpu)?; assert!(pagination.is_none()); assert_eq!( info, @@ -1315,14 +1303,14 @@ mod tests { // Trying to access a part that is way larger than registered ones query.part_number_marker = Some(9999); - let (info, pagination) = fetch_part_info(&query, &mpu); + let (info, pagination) = fetch_part_info(&query, &mpu)?; assert!(pagination.is_none()); assert_eq!(info, vec![]); // Try without any limitation query.max_parts = 1000; query.part_number_marker = None; - let (info, pagination) = fetch_part_info(&query, &mpu); + let (info, pagination) = fetch_part_info(&query, &mpu)?; assert!(pagination.is_none()); assert_eq!( info, @@ -1357,5 +1345,7 @@ mod tests { }, ] ); + + Ok(()) } } diff --git a/src/api/s3/multipart.rs b/src/api/s3/multipart.rs index 16c8b6aa..d6eb26cb 100644 --- a/src/api/s3/multipart.rs +++ b/src/api/s3/multipart.rs @@ -1,12 +1,13 @@ use std::collections::HashMap; -use std::convert::TryInto; +use std::convert::{TryFrom, TryInto}; +use std::hash::Hasher; use std::sync::Arc; use base64::prelude::*; -use crc_fast::{CrcAlgorithm, Digest as CrcDigest}; +use crc32c::Crc32cHasher as Crc32c; +use crc32fast::Hasher as Crc32; use futures::prelude::*; -use http::StatusCode; -use hyper::{header::HeaderValue, HeaderMap, Request, Response}; +use hyper::{Request, Response}; use md5::{Digest, Md5}; use sha1::Sha1; use sha2::Sha256; @@ -25,7 +26,7 @@ use garage_api_common::helpers::*; use garage_api_common::signature::checksum::*; use crate::api_server::{ReqBody, ResBody}; -use crate::encryption::{has_encryption_header, EncryptionParams, OekDerivationInfo}; +use crate::encryption::EncryptionParams; use crate::error::*; use crate::put::*; use crate::xml as s3_xml; @@ -43,7 +44,7 @@ pub async fn handle_create_multipart_upload( bucket_name, .. } = &ctx; - let existing_object = garage.object_table.get(bucket_id, key).await?; + let existing_object = garage.object_table.get(&bucket_id, &key).await?; let upload_id = gen_uuid(); let timestamp = next_timestamp(existing_object.as_ref()); @@ -52,25 +53,13 @@ pub async fn handle_create_multipart_upload( let meta = ObjectVersionMetaInner { headers, checksum: None, - checksum_type: None, }; // Determine whether object should be encrypted, and if so the key - let encryption = EncryptionParams::new_from_headers( - garage, - req.headers(), - OekDerivationInfo { - bucket_id: *bucket_id, - version_id: upload_id, - object_key: key, - }, - )?; + let encryption = EncryptionParams::new_from_headers(&garage, req.headers())?; let object_encryption = encryption.encrypt_meta(meta)?; - let checksum_algorithm = request_checksum_algorithm_and_type( - req.headers(), - request_checksum_algorithm(req.headers())?, - )?; + let checksum_algorithm = request_checksum_algorithm(req.headers())?; // Create object in object table let object_version = ObjectVersion { @@ -131,7 +120,8 @@ pub async fn handle_put_part( // Before we stream the body, configure the needed checksums. req_body.add_expected_checksums(expected_checksums.clone()); - if !has_encryption_header(&req_head.headers) { + // TODO: avoid parsing encryption headers twice... + if !EncryptionParams::new_from_headers(&garage, &req_head.headers)?.is_encrypted() { // For non-encrypted objects, we need to compute the md5sum in all cases // (even if content-md5 is not set), because it is used as an etag of the // part, which is in turn used in the etag computation of the whole object @@ -144,11 +134,10 @@ pub async fn handle_put_part( let mut chunker = StreamChunker::new(stream, garage.config.block_size); // Read first chuck, and at the same time try to get object to see if it exists - let ((object, object_version, mut mpu), first_block) = + let ((_, object_version, mut mpu), first_block) = futures::try_join!(get_upload(&ctx, &key, &upload_id), chunker.next(),)?; // Check encryption params - let oek_params = OekDerivationInfo::for_object(&object, &object_version); let (object_encryption, checksum_algorithm) = match object_version.state { ObjectVersionState::Uploading { encryption, @@ -158,7 +147,7 @@ pub async fn handle_put_part( _ => unreachable!(), }; let (encryption, _) = - EncryptionParams::check_decrypt(garage, &req_head.headers, &object_encryption, oek_params)?; + EncryptionParams::check_decrypt(&garage, &req_head.headers, &object_encryption)?; // Check object is valid and part can be accepted let first_block = first_block.ok_or_bad_request("Empty body")?; @@ -225,7 +214,7 @@ pub async fn handle_put_part( MpuPart { version: version_uuid, etag: Some(etag.clone()), - checksum: checksums.extract(checksum_algorithm.map(|(algo, _)| algo))?, + checksum: checksums.extract(checksum_algorithm), size: Some(total_size), }, ); @@ -287,23 +276,12 @@ pub async fn handle_complete_multipart_upload( let (req_head, req_body) = req.into_parts(); let expected_checksum = request_checksum_value(&req_head.headers)?; - let req_checksum_algorithm = request_checksum_algorithm_and_type( - &req_head.headers, - expected_checksum.map(|x| x.algorithm()), - )?; - debug!( - "CompleteMultipartUpload expected checksum: {:?}, request checksum type: {:?}", - expected_checksum, req_checksum_algorithm - ); let body = req_body.collect().await?; let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?; - let body_list_of_parts = - parse_complete_multipart_upload_body(&body_xml).ok_or_bad_request(format!( - "Invalid CompleteMultipartUpload XML:\n{}", - String::from_utf8_lossy(&body) - ))?; + let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml) + .ok_or_bad_request("Invalid CompleteMultipartUpload XML")?; debug!( "CompleteMultipartUpload list of parts: {:?}", body_list_of_parts @@ -319,7 +297,6 @@ pub async fn handle_complete_multipart_upload( return Err(Error::bad_request("No data was uploaded")); } - let oek_params = OekDerivationInfo::for_object(&object, &object_version); let (object_encryption, checksum_algorithm) = match object_version.state { ObjectVersionState::Uploading { encryption, @@ -328,17 +305,6 @@ pub async fn handle_complete_multipart_upload( } => (encryption, checksum_algorithm), _ => unreachable!(), }; - debug!( - "CompleteMultipartUpload object checksum_algorithm: {:?}", - checksum_algorithm - ); - if req_checksum_algorithm.is_some() && req_checksum_algorithm != checksum_algorithm { - return Err(Error::InvalidDigest(format!( - "checksum algorithm {:?} does not correspond to algorithm specified in CreateMultipartUpload {:?}", - req_checksum_algorithm, - checksum_algorithm - ))); - } // Check that part numbers are an increasing sequence. // (it doesn't need to start at 1 nor to be a continuous sequence, @@ -364,13 +330,14 @@ pub async fn handle_complete_multipart_upload( for req_part in body_list_of_parts.iter() { match have_parts.get(&req_part.part_number) { Some(part) if part.etag.as_ref() == Some(&req_part.etag) && part.size.is_some() => { - if req_part.checksum.is_some() && part.checksum != req_part.checksum { + // alternative version: if req_part.checksum.is_some() && part.checksum != req_part.checksum { + if part.checksum != req_part.checksum { return Err(Error::InvalidDigest(format!( "Invalid checksum for part {}: in request = {:?}, uploaded part = {:?}", req_part.part_number, req_part.checksum, part.checksum ))); } - parts.push(*part); + parts.push(*part) } _ => return Err(Error::InvalidPart), } @@ -423,11 +390,7 @@ pub async fn handle_complete_multipart_upload( // https://teppen.io/2018/06/23/aws_s3_etags/ let mut checksummer = MultipartChecksummer::init(checksum_algorithm); for part in parts.iter() { - checksummer.update( - part.etag.as_ref().unwrap(), - part.checksum, - part.size.unwrap(), - )?; + checksummer.update(part.etag.as_ref().unwrap(), part.checksum)?; } let (checksum_md5, checksum_extra) = checksummer.finalize(); @@ -454,16 +417,11 @@ pub async fn handle_complete_multipart_upload( let object_encryption = match checksum_algorithm { None => object_encryption, Some(_) => { - let (encryption, meta) = EncryptionParams::check_decrypt( - garage, - &req_head.headers, - &object_encryption, - oek_params, - )?; + let (encryption, meta) = + EncryptionParams::check_decrypt(&garage, &req_head.headers, &object_encryption)?; let new_meta = ObjectVersionMetaInner { headers: meta.into_owned().headers, checksum: checksum_extra, - checksum_type: checksum_algorithm.map(|(_, ty)| ty), }; encryption.encrypt_meta(new_meta)? } @@ -494,35 +452,26 @@ pub async fn handle_complete_multipart_upload( .root_domain .as_ref() .map(|rd| s3_xml::Value(format!("https://{}.{}/{}", bucket_name, rd, key))) - .or_else(|| Some(s3_xml::Value(format!("/{}/{}", bucket_name, key)))), + .or(Some(s3_xml::Value(format!("/{}/{}", bucket_name, key)))), bucket: s3_xml::Value(bucket_name.to_string()), key: s3_xml::Value(key), etag: s3_xml::Value(format!("\"{}\"", etag)), checksum_crc32: match &checksum_extra { - Some(ChecksumValue::Crc32(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(x))), + Some(ChecksumValue::Crc32(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))), _ => None, }, checksum_crc32c: match &checksum_extra { - Some(ChecksumValue::Crc32c(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(x))), - _ => None, - }, - checksum_crc64nvme: match &checksum_extra { - Some(ChecksumValue::Crc64Nvme(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(x))), + Some(ChecksumValue::Crc32c(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))), _ => None, }, checksum_sha1: match &checksum_extra { - Some(ChecksumValue::Sha1(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(x))), + Some(ChecksumValue::Sha1(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))), _ => None, }, checksum_sha256: match &checksum_extra { - Some(ChecksumValue::Sha256(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(x))), + Some(ChecksumValue::Sha256(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))), _ => None, }, - checksum_type: match checksum_algorithm { - Some((_, ChecksumType::Composite)) => Some(s3_xml::Value(COMPOSITE.into())), - Some((_, ChecksumType::FullObject)) => Some(s3_xml::Value(FULL_OBJECT.into())), - None => None, - }, }; let xml = s3_xml::to_xml_with_header(&result)?; @@ -548,9 +497,7 @@ pub async fn handle_abort_multipart_upload( let final_object = Object::new(*bucket_id, key.to_string(), vec![object_version]); garage.object_table.insert(&final_object).await?; - Ok(Response::builder() - .status(StatusCode::NO_CONTENT) - .body(empty_body())?) + Ok(Response::new(empty_body())) } // ======== helpers ============ @@ -602,32 +549,6 @@ struct CompleteMultipartUploadPart { checksum: Option, } -macro_rules! extract_checksum_from { - ($node:ident { $($name:expr => $variant:ident),* $(,)? }) => { - if false { None } - $( - else if let Some(node) = $node.children().find(|e| e.has_tag_name($name)) { - match node.last_child().map(|x| x.text()) { - // Child is text but empty post-trim, ignore it. - Some(Some(text)) if text.trim().is_empty() => None, - - // Child is non-empty text, parse it. - Some(Some(text)) => Some(ChecksumValue::$variant( - BASE64_STANDARD.decode(text).ok()?[..].try_into().ok()? - )), - - // Child is not text, reject it. - Some(None) => return None, - - // No child, ignore it. - None => None, - } - } - )* - else { None } - } -} - fn parse_complete_multipart_upload_body( xml: &roxmltree::Document, ) -> Option> { @@ -651,15 +572,37 @@ fn parse_complete_multipart_upload_body( .children() .find(|e| e.has_tag_name("PartNumber"))? .text()?; - - let checksum = extract_checksum_from!(item { - "ChecksumCRC32" => Crc32, - "ChecksumCRC32C" => Crc32c, - "ChecksumCRC64NVME" => Crc64Nvme, - "ChecksumSHA1" => Sha1, - "ChecksumSHA256" => Sha256, - }); - + let checksum = if let Some(crc32) = + item.children().find(|e| e.has_tag_name("ChecksumCRC32")) + { + Some(ChecksumValue::Crc32( + BASE64_STANDARD.decode(crc32.text()?).ok()?[..] + .try_into() + .ok()?, + )) + } else if let Some(crc32c) = item.children().find(|e| e.has_tag_name("ChecksumCRC32C")) + { + Some(ChecksumValue::Crc32c( + BASE64_STANDARD.decode(crc32c.text()?).ok()?[..] + .try_into() + .ok()?, + )) + } else if let Some(sha1) = item.children().find(|e| e.has_tag_name("ChecksumSHA1")) { + Some(ChecksumValue::Sha1( + BASE64_STANDARD.decode(sha1.text()?).ok()?[..] + .try_into() + .ok()?, + )) + } else if let Some(sha256) = item.children().find(|e| e.has_tag_name("ChecksumSHA256")) + { + Some(ChecksumValue::Sha256( + BASE64_STANDARD.decode(sha256.text()?).ok()?[..] + .try_into() + .ok()?, + )) + } else { + None + }; parts.push(CompleteMultipartUploadPart { etag: etag.trim_matches('"').to_string(), part_number: part_number.parse().ok()?, @@ -675,52 +618,36 @@ fn parse_complete_multipart_upload_body( // ====== checksummer ==== -pub fn request_checksum_algorithm_and_type( - headers: &HeaderMap, - algo: Option, -) -> Result, Error> { - match (headers.get(X_AMZ_CHECKSUM_TYPE), algo) { - (None, None) => Ok(None), - (None, Some(algo)) => { - let ty = match algo { - ChecksumAlgorithm::Crc64Nvme => ChecksumType::FullObject, - _ => ChecksumType::Composite, - }; - Ok(Some((algo, ty))) - } - (Some(_), None) => Err(Error::bad_request( - "Cannot specify x-amz-checksum-type when no checksum algorithm is in use.", - )), - (Some(x), Some(algo)) => { - let checksum_type = match x.as_bytes() { - x if x == COMPOSITE.as_bytes() => ChecksumType::Composite, - x if x == FULL_OBJECT.as_bytes() => ChecksumType::FullObject, - _ => return Err(Error::bad_request("Invalid x-amz-checksum-type value")), - }; - match (checksum_type, algo) { - (ChecksumType::Composite, ChecksumAlgorithm::Crc64Nvme) - | (ChecksumType::FullObject, ChecksumAlgorithm::Sha1) - | (ChecksumType::FullObject, ChecksumAlgorithm::Sha256) => Err(Error::bad_request(format!( - "checksum type {:?} is not supported for algorithm {:?}", - checksum_type, algo - ))), - (ty, algo) => Ok(Some((algo, ty))), - } - } - } -} - #[derive(Default)] pub(crate) struct MultipartChecksummer { pub md5: Md5, pub extra: Option, } +pub(crate) enum MultipartExtraChecksummer { + Crc32(Crc32), + Crc32c(Crc32c), + Sha1(Sha1), + Sha256(Sha256), +} + impl MultipartChecksummer { - pub(crate) fn init(algo: Option<(ChecksumAlgorithm, ChecksumType)>) -> Self { + pub(crate) fn init(algo: Option) -> Self { Self { md5: Md5::new(), - extra: algo.map(|(algo, cktype)| MultipartExtraChecksummer::init(algo, cktype)), + extra: match algo { + None => None, + Some(ChecksumAlgorithm::Crc32) => { + Some(MultipartExtraChecksummer::Crc32(Crc32::new())) + } + Some(ChecksumAlgorithm::Crc32c) => { + Some(MultipartExtraChecksummer::Crc32c(Crc32c::default())) + } + Some(ChecksumAlgorithm::Sha1) => Some(MultipartExtraChecksummer::Sha1(Sha1::new())), + Some(ChecksumAlgorithm::Sha256) => { + Some(MultipartExtraChecksummer::Sha256(Sha256::new())) + } + }, } } @@ -728,130 +655,59 @@ impl MultipartChecksummer { &mut self, etag: &str, checksum: Option, - part_len: u64, ) -> Result<(), Error> { self.md5 - .update(&hex::decode(etag).ok_or_message("invalid etag hex")?); - if let Some(extra) = &mut self.extra { - extra.update(checksum, part_len)?; + .update(&hex::decode(&etag).ok_or_message("invalid etag hex")?); + match (&mut self.extra, checksum) { + (None, _) => (), + ( + Some(MultipartExtraChecksummer::Crc32(ref mut crc32)), + Some(ChecksumValue::Crc32(x)), + ) => { + crc32.update(&x); + } + ( + Some(MultipartExtraChecksummer::Crc32c(ref mut crc32c)), + Some(ChecksumValue::Crc32c(x)), + ) => { + crc32c.write(&x); + } + (Some(MultipartExtraChecksummer::Sha1(ref mut sha1)), Some(ChecksumValue::Sha1(x))) => { + sha1.update(&x); + } + ( + Some(MultipartExtraChecksummer::Sha256(ref mut sha256)), + Some(ChecksumValue::Sha256(x)), + ) => { + sha256.update(&x); + } + (Some(_), b) => { + return Err(Error::internal_error(format!( + "part checksum was not computed correctly, got: {:?}", + b + ))) + } } Ok(()) } pub(crate) fn finalize(self) -> (Md5Checksum, Option) { let md5 = self.md5.finalize()[..].try_into().unwrap(); - let extra = self.extra.map(|c| c.finalize()); + let extra = match self.extra { + None => None, + Some(MultipartExtraChecksummer::Crc32(crc32)) => { + Some(ChecksumValue::Crc32(u32::to_be_bytes(crc32.finalize()))) + } + Some(MultipartExtraChecksummer::Crc32c(crc32c)) => Some(ChecksumValue::Crc32c( + u32::to_be_bytes(u32::try_from(crc32c.finish()).unwrap()), + )), + Some(MultipartExtraChecksummer::Sha1(sha1)) => { + Some(ChecksumValue::Sha1(sha1.finalize()[..].try_into().unwrap())) + } + Some(MultipartExtraChecksummer::Sha256(sha256)) => Some(ChecksumValue::Sha256( + sha256.finalize()[..].try_into().unwrap(), + )), + }; (md5, extra) } } - -pub(crate) enum MultipartExtraChecksummer { - FullObjectCrc(CrcAlgorithm, Option), - CompositeCrc(ChecksumAlgorithm, CrcDigest), - CompositeSha1(Sha1), - CompositeSha256(Sha256), -} - -impl MultipartExtraChecksummer { - fn init(algo: ChecksumAlgorithm, cktype: ChecksumType) -> Self { - match (algo, cktype) { - (algo, ChecksumType::FullObject) => { - let crc_type = match algo { - ChecksumAlgorithm::Crc32 => CrcAlgorithm::Crc32IsoHdlc, - ChecksumAlgorithm::Crc32c => CrcAlgorithm::Crc32Iscsi, - ChecksumAlgorithm::Crc64Nvme => CrcAlgorithm::Crc64Nvme, - _ => unreachable!(), - }; - Self::FullObjectCrc(crc_type, None) - } - (ChecksumAlgorithm::Crc32, ChecksumType::Composite) => { - Self::CompositeCrc(ChecksumAlgorithm::Crc32, new_crc32()) - } - (ChecksumAlgorithm::Crc32c, ChecksumType::Composite) => { - Self::CompositeCrc(ChecksumAlgorithm::Crc32c, new_crc32c()) - } - (ChecksumAlgorithm::Sha1, ChecksumType::Composite) => Self::CompositeSha1(Sha1::new()), - (ChecksumAlgorithm::Sha256, ChecksumType::Composite) => { - Self::CompositeSha256(Sha256::new()) - } - _ => unreachable!(), - } - } - - fn update(&mut self, checksum: Option, part_len: u64) -> Result<(), Error> { - match (self, checksum) { - (Self::FullObjectCrc(crc_algo, crc_value), Some(ck)) => { - let ck_u64 = match ck { - ChecksumValue::Crc32(x) => u32::from_be_bytes(x) as u64, - ChecksumValue::Crc32c(x) => u32::from_be_bytes(x) as u64, - ChecksumValue::Crc64Nvme(x) => u64::from_be_bytes(x), - _ => { - return Err(Error::internal_error(format!( - "part checksum was not computed correctly, got: {:?}", - ck - ))) - } - }; - *crc_value = match *crc_value { - None => Some(ck_u64), - Some(prev) => Some(crc_fast::checksum_combine( - *crc_algo, prev, ck_u64, part_len, - )), - }; - } - (Self::CompositeCrc(_, digest), Some(ck)) => match ck { - ChecksumValue::Crc32(x) => digest.update(&x), - ChecksumValue::Crc32c(x) => digest.update(&x), - ChecksumValue::Crc64Nvme(x) => digest.update(&x), - _ => { - return Err(Error::internal_error(format!( - "part checksum was not computed correctly, got: {:?}", - ck - ))) - } - }, - (Self::CompositeSha1(sha1), Some(ChecksumValue::Sha1(x))) => { - sha1.update(x); - } - (Self::CompositeSha256(sha256), Some(ChecksumValue::Sha256(x))) => { - sha256.update(x); - } - _ => { - return Err(Error::internal_error(format!( - "part checksum was not computed correctly, got: {:?}", - checksum - ))) - } - } - Ok(()) - } - fn finalize(self) -> ChecksumValue { - match self { - Self::FullObjectCrc(algo, value) => match (algo, value) { - (CrcAlgorithm::Crc32IsoHdlc, Some(v)) => { - ChecksumValue::Crc32(u32::to_be_bytes(v as u32)) - } - (CrcAlgorithm::Crc32Iscsi, Some(v)) => { - ChecksumValue::Crc32c(u32::to_be_bytes(v as u32)) - } - (CrcAlgorithm::Crc64Nvme, Some(v)) => ChecksumValue::Crc64Nvme(u64::to_be_bytes(v)), - _ => unreachable!(), - }, - Self::CompositeCrc(algo, crc) => match algo { - ChecksumAlgorithm::Crc32 => { - ChecksumValue::Crc32(u32::to_be_bytes(crc.finalize() as u32)) - } - ChecksumAlgorithm::Crc32c => { - ChecksumValue::Crc32c(u32::to_be_bytes(crc.finalize() as u32)) - } - _ => unreachable!(), - }, - Self::CompositeSha1(sha1) => { - ChecksumValue::Sha1(sha1.finalize()[..].try_into().unwrap()) - } - Self::CompositeSha256(sha256) => { - ChecksumValue::Sha256(sha256.finalize()[..].try_into().unwrap()) - } - } - } -} diff --git a/src/api/s3/post_object.rs b/src/api/s3/post_object.rs index e89e9ea9..09be7e7c 100644 --- a/src/api/s3/post_object.rs +++ b/src/api/s3/post_object.rs @@ -15,7 +15,6 @@ use serde::Deserialize; use garage_model::garage::Garage; use garage_model::s3::object_table::*; -use garage_util::data::gen_uuid; use garage_api_common::cors::*; use garage_api_common::helpers::*; @@ -23,7 +22,7 @@ use garage_api_common::signature::checksum::*; use garage_api_common::signature::payload::{verify_v4, Authorization}; use crate::api_server::ResBody; -use crate::encryption::{EncryptionParams, OekDerivationInfo}; +use crate::encryption::EncryptionParams; use crate::error::*; use crate::put::{extract_metadata_headers, save_stream, ChecksumMode}; use crate::xml as s3_xml; @@ -104,18 +103,22 @@ pub async fn handle_post_object( key.to_owned() }; - let api_key = verify_v4(&garage, "s3", &authorization, policy.as_bytes())?; + let api_key = verify_v4(&garage, "s3", &authorization, policy.as_bytes()).await?; - let bucket = garage + let bucket_id = garage .bucket_helper() - .resolve_bucket_fast(&bucket_name, &api_key) + .resolve_bucket(&bucket_name, &api_key) + .await .map_err(pass_helper_error)?; - let bucket_id = bucket.id; if !api_key.allow_write(&bucket_id) { return Err(Error::forbidden("Operation is not allowed for this key.")); } + let bucket = garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; let bucket_params = bucket.state.into_option().unwrap(); let matching_cors_rule = find_matching_cors_rule( &bucket_params, @@ -244,23 +247,12 @@ pub async fn handle_post_object( .transpose()?, }; - let version_uuid = gen_uuid(); - let meta = ObjectVersionMetaInner { headers, checksum: expected_checksums.extra, - checksum_type: expected_checksums.extra.map(|_| ChecksumType::FullObject), }; - let encryption = EncryptionParams::new_from_headers( - &garage, - ¶ms, - OekDerivationInfo { - bucket_id, - version_id: version_uuid, - object_key: &key, - }, - )?; + let encryption = EncryptionParams::new_from_headers(&garage, ¶ms)?; let stream = file_field.map(|r| r.map_err(Into::into)); let ctx = ReqCtx { @@ -273,12 +265,11 @@ pub async fn handle_post_object( let res = save_stream( &ctx, - version_uuid, meta, encryption, StreamLimiter::new(stream, conditions.content_length), &key, - ChecksumMode::Verify(expected_checksums), + ChecksumMode::Verify(&expected_checksums), ) .await?; @@ -505,15 +496,15 @@ mod tests { let mut conditions = policy_2.into_conditions().unwrap(); assert_eq!( - conditions.params.remove("acl"), + conditions.params.remove(&"acl".to_string()), Some(vec![Operation::Equal("public-read".into())]) ); assert_eq!( - conditions.params.remove("bucket"), + conditions.params.remove(&"bucket".to_string()), Some(vec![Operation::Equal("johnsmith".into())]) ); assert_eq!( - conditions.params.remove("key"), + conditions.params.remove(&"key".to_string()), Some(vec![Operation::StartsWith("user/eric/".into())]) ); assert!(conditions.params.is_empty()); @@ -536,7 +527,7 @@ mod tests { let mut conditions = policy_2.into_conditions().unwrap(); assert_eq!( - conditions.params.remove("acl"), + conditions.params.remove(&"acl".to_string()), Some(vec![Operation::Equal("public-read".into())]) ); assert_eq!( @@ -544,7 +535,9 @@ mod tests { vec![Operation::StartsWith("image/".into())] ); assert_eq!( - conditions.params.remove("success_action_redirect"), + conditions + .params + .remove(&"success_action_redirect".to_string()), Some(vec![Operation::StartsWith("".into())]) ); assert!(conditions.params.is_empty()); diff --git a/src/api/s3/put.rs b/src/api/s3/put.rs index e6f379fa..b915f2ec 100644 --- a/src/api/s3/put.rs +++ b/src/api/s3/put.rs @@ -35,7 +35,7 @@ use garage_api_common::signature::body::StreamingChecksumReceiver; use garage_api_common::signature::checksum::*; use crate::api_server::{ReqBody, ResBody}; -use crate::encryption::{EncryptionParams, OekDerivationInfo}; +use crate::encryption::EncryptionParams; use crate::error::*; use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION; @@ -46,8 +46,8 @@ pub(crate) struct SaveStreamResult { pub(crate) etag: String, } -pub(crate) enum ChecksumMode { - Verify(ExpectedChecksums), +pub(crate) enum ChecksumMode<'a> { + Verify(&'a ExpectedChecksums), VerifyFrom { checksummer: StreamingChecksumReceiver, trailer_algo: Option, @@ -60,10 +60,6 @@ pub async fn handle_put( req: Request, key: &String, ) -> Result, Error> { - // Generate version uuid now, because it is necessary to compute SSE-C - // encryption parameters - let version_uuid = gen_uuid(); - // Retrieve interesting headers from request let headers = extract_metadata_headers(req.headers())?; debug!("Object headers: {:?}", headers); @@ -81,19 +77,10 @@ pub async fn handle_put( let meta = ObjectVersionMetaInner { headers, checksum: expected_checksums.extra, - checksum_type: expected_checksums.extra.map(|_| ChecksumType::FullObject), }; // Determine whether object should be encrypted, and if so the key - let encryption = EncryptionParams::new_from_headers( - &ctx.garage, - req.headers(), - OekDerivationInfo { - bucket_id: ctx.bucket_id, - version_id: version_uuid, - object_key: key, - }, - )?; + let encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?; // The request body is a special ReqBody object (see garage_api_common::signature::body) // which supports calculating checksums while streaming the data. @@ -111,7 +98,6 @@ pub async fn handle_put( let res = save_stream( &ctx, - version_uuid, meta, encryption, stream, @@ -133,12 +119,11 @@ pub async fn handle_put( pub(crate) async fn save_stream> + Unpin>( ctx: &ReqCtx, - version_uuid: Uuid, mut meta: ObjectVersionMetaInner, encryption: EncryptionParams, body: S, key: &String, - checksum_mode: ChecksumMode, + checksum_mode: ChecksumMode<'_>, ) -> Result { let ReqCtx { garage, bucket_id, .. @@ -153,12 +138,13 @@ pub(crate) async fn save_stream> + Unpin>( let first_block = first_block_opt.unwrap_or_default(); // Generate identity of new version + let version_uuid = gen_uuid(); let version_timestamp = next_timestamp(existing_object.as_ref()); let mut checksummer = match &checksum_mode { ChecksumMode::Verify(expected) => Checksummer::init(expected, !encryption.is_encrypted()), ChecksumMode::Calculate(algo) => { - Checksummer::init(&Default::default(), !encryption.is_encrypted()).add_algorithm(*algo) + Checksummer::init(&Default::default(), !encryption.is_encrypted()).add(*algo) } ChecksumMode::VerifyFrom { .. } => { // Checksums are calculated by the garage_api_common::signature module @@ -178,7 +164,7 @@ pub(crate) async fn save_stream> + Unpin>( checksums.verify(&expected)?; } ChecksumMode::Calculate(algo) => { - meta.checksum = checksums.extract(algo)?; + meta.checksum = checksums.extract(algo); } ChecksumMode::VerifyFrom { checksummer, @@ -189,10 +175,10 @@ pub(crate) async fn save_stream> + Unpin>( .await .ok_or_internal_error("checksum calculation")??; if let Some(algo) = trailer_algo { - meta.checksum = checksums.extract(Some(algo))?; + meta.checksum = checksums.extract(Some(algo)); } } - } + }; let size = first_block.len() as u64; check_quotas(ctx, size, existing_object.as_ref()).await?; @@ -280,7 +266,7 @@ pub(crate) async fn save_stream> + Unpin>( checksums.verify(&expected)?; } ChecksumMode::Calculate(algo) => { - meta.checksum = checksums.extract(algo)?; + meta.checksum = checksums.extract(algo); } ChecksumMode::VerifyFrom { checksummer, @@ -290,10 +276,10 @@ pub(crate) async fn save_stream> + Unpin>( .await .ok_or_internal_error("checksum calculation")??; if let Some(algo) = trailer_algo { - meta.checksum = checksums.extract(Some(algo))?; + meta.checksum = checksums.extract(Some(algo)); } } - } + }; // Verify quotas are respsected check_quotas(ctx, total_size, existing_object.as_ref()).await?; @@ -339,7 +325,7 @@ pub(crate) async fn check_quotas( let quotas = bucket_params.quotas.get(); if quotas.max_objects.is_none() && quotas.max_size.is_none() { return Ok(()); - } + }; let counters = garage .object_counter_table @@ -436,7 +422,7 @@ pub(crate) async fn read_and_put_blocks> + tracer.start("Hash block (md5, sha256)"), )) .await - .unwrap(); + .unwrap() } Err(e) => { block_tx2.send(Err(e)).await?; @@ -554,7 +540,6 @@ pub(crate) async fn read_and_put_blocks> + Ok((total_size, checksums, first_block_hash)) } -#[expect(clippy::too_many_arguments)] async fn put_block_and_meta( ctx: &ReqCtx, version: &Version, @@ -669,7 +654,7 @@ pub(crate) fn extract_metadata_headers( let mut ret = Vec::new(); // Preserve standard headers - let standard_header = [ + let standard_header = vec![ hyper::header::CONTENT_TYPE, hyper::header::CACHE_CONTROL, hyper::header::CONTENT_DISPOSITION, diff --git a/src/api/s3/router.rs b/src/api/s3/router.rs index 69c95876..e3f58490 100644 --- a/src/api/s3/router.rs +++ b/src/api/s3/router.rs @@ -309,7 +309,7 @@ pub enum Endpoint { impl Endpoint { /// Determine which S3 endpoint a request is for using the request, and a bucket which was /// possibly extracted from the Host header. - /// Returns Self plus bucket name, if endpoint is not `Endpoint::ListBuckets` + /// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets pub fn from_request( req: &Request, bucket: Option, @@ -330,7 +330,7 @@ impl Endpoint { } else { path.split_once('/') .map(|(b, p)| (b.to_owned(), p.trim_start_matches('/'))) - .unwrap_or_else(|| (path.to_owned(), "")) + .unwrap_or((path.to_owned(), "")) }; if *req.method() == Method::OPTIONS { @@ -355,7 +355,7 @@ impl Endpoint { if let Some(x_id) = query.x_id.take() { if x_id != res.name() { // I think AWS ignores the x-id parameter. - // Let's make this at least be a warning to help debugging. + // Let's make this at least be a warnin to help debugging. warn!( "x-id ({}) does not match parsed endpoint ({})", x_id, @@ -365,7 +365,7 @@ impl Endpoint { } if let Some(message) = query.nonempty_message() { - debug!("Unused query parameter: {}", message); + debug!("Unused query parameter: {}", message) } Ok((res, Some(bucket))) } @@ -580,7 +580,7 @@ impl Endpoint { pub fn authorization_type(&self) -> Authorization { if let Endpoint::ListBuckets = self { return Authorization::None; - } + }; let readonly = router_match! { @match self, @@ -725,7 +725,7 @@ mod tests { ) -> (Endpoint, Option) { let mut req = Request::builder().method(method).uri(uri); if let Some((k, v)) = header { - req = req.header(k, v); + req = req.header(k, v) } let req = req.body(()).unwrap(); @@ -859,7 +859,7 @@ mod tests { .body(()) .unwrap(); - assert!(Endpoint::from_request(&req, None).is_err()); + assert!(Endpoint::from_request(&req, None).is_err()) } #[test] @@ -949,7 +949,7 @@ mod tests { GET "/?uploads&delimiter=/&prefix=photos/2006/" => ListMultipartUploads GET "/?uploads&delimiter=D&encoding-type=EncodingType&key-marker=KeyMarker&max-uploads=1&prefix=Prefix&upload-id-marker=UploadIdMarker" => ListMultipartUploads GET "/" => ListObjects - GET "/?prefix=N&marker=Need&max-keys=40" => ListObjects + GET "/?prefix=N&marker=Ned&max-keys=40" => ListObjects GET "/?delimiter=/" => ListObjects GET "/?prefix=photos/2006/&delimiter=/" => ListObjects @@ -1011,7 +1011,7 @@ mod tests { // no bucket, won't work with the rest of the test suite assert!(matches!( parse("GET", "/", None, None).0, - Endpoint::ListBuckets + Endpoint::ListBuckets { .. } )); assert!(matches!( parse("GET", "/", None, None).0.authorization_type(), diff --git a/src/api/s3/website.rs b/src/api/s3/website.rs index 0010a300..03cc01d8 100644 --- a/src/api/s3/website.rs +++ b/src/api/s3/website.rs @@ -1,15 +1,15 @@ use quick_xml::de::from_reader; use hyper::{header::HeaderName, Request, Response, StatusCode}; +use serde::{Deserialize, Serialize}; -use garage_model::bucket_table::Bucket; +use garage_model::bucket_table::*; use garage_api_common::helpers::*; -use garage_api_common::xml::website::*; use crate::api_server::{ReqBody, ResBody}; use crate::error::*; -use crate::xml::{to_xml_with_header, Value}; +use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value}; pub const X_AMZ_WEBSITE_REDIRECT_LOCATION: HeaderName = HeaderName::from_static("x-amz-website-redirect-location"); @@ -26,14 +26,7 @@ pub async fn handle_get_website(ctx: ReqCtx) -> Result, Error> suffix: Value(website.index_document.to_string()), }), redirect_all_requests_to: None, - routing_rules: RoutingRules { - rules: website - .routing_rules - .clone() - .into_iter() - .map(RoutingRule::from_garage_routing_rule) - .collect(), - }, + routing_rules: None, }; let xml = to_xml_with_header(&wc)?; Ok(Response::builder() @@ -93,3 +86,272 @@ pub async fn handle_put_website( .status(StatusCode::OK) .body(empty_body())?) } + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct WebsiteConfiguration { + #[serde(serialize_with = "xmlns_tag", skip_deserializing)] + pub xmlns: (), + #[serde(rename = "ErrorDocument")] + pub error_document: Option, + #[serde(rename = "IndexDocument")] + pub index_document: Option, + #[serde(rename = "RedirectAllRequestsTo")] + pub redirect_all_requests_to: Option, + #[serde(rename = "RoutingRules")] + pub routing_rules: Option>, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct RoutingRule { + #[serde(rename = "RoutingRule")] + pub inner: RoutingRuleInner, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct RoutingRuleInner { + #[serde(rename = "Condition")] + pub condition: Option, + #[serde(rename = "Redirect")] + pub redirect: Redirect, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct Key { + #[serde(rename = "Key")] + pub key: Value, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct Suffix { + #[serde(rename = "Suffix")] + pub suffix: Value, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct Target { + #[serde(rename = "HostName")] + pub hostname: Value, + #[serde(rename = "Protocol")] + pub protocol: Option, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct Condition { + #[serde(rename = "HttpErrorCodeReturnedEquals")] + pub http_error_code: Option, + #[serde(rename = "KeyPrefixEquals")] + pub prefix: Option, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct Redirect { + #[serde(rename = "HostName")] + pub hostname: Option, + #[serde(rename = "Protocol")] + pub protocol: Option, + #[serde(rename = "HttpRedirectCode")] + pub http_redirect_code: Option, + #[serde(rename = "ReplaceKeyPrefixWith")] + pub replace_prefix: Option, + #[serde(rename = "ReplaceKeyWith")] + pub replace_full: Option, +} + +impl WebsiteConfiguration { + pub fn validate(&self) -> Result<(), Error> { + if self.redirect_all_requests_to.is_some() + && (self.error_document.is_some() + || self.index_document.is_some() + || self.routing_rules.is_some()) + { + return Err(Error::bad_request( + "Bad XML: can't have RedirectAllRequestsTo and other fields", + )); + } + if let Some(ref ed) = self.error_document { + ed.validate()?; + } + if let Some(ref id) = self.index_document { + id.validate()?; + } + if let Some(ref rart) = self.redirect_all_requests_to { + rart.validate()?; + } + if let Some(ref rrs) = self.routing_rules { + for rr in rrs { + rr.inner.validate()?; + } + } + + Ok(()) + } + + pub fn into_garage_website_config(self) -> Result { + if self.redirect_all_requests_to.is_some() { + Err(Error::NotImplemented( + "S3 website redirects are not currently implemented in Garage.".into(), + )) + } else if self.routing_rules.map(|x| !x.is_empty()).unwrap_or(false) { + Err(Error::NotImplemented( + "S3 routing rules are not currently implemented in Garage.".into(), + )) + } else { + Ok(WebsiteConfig { + index_document: self + .index_document + .map(|x| x.suffix.0) + .unwrap_or_else(|| "index.html".to_string()), + error_document: self.error_document.map(|x| x.key.0), + }) + } + } +} + +impl Key { + pub fn validate(&self) -> Result<(), Error> { + if self.key.0.is_empty() { + Err(Error::bad_request( + "Bad XML: error document specified but empty", + )) + } else { + Ok(()) + } + } +} + +impl Suffix { + pub fn validate(&self) -> Result<(), Error> { + if self.suffix.0.is_empty() | self.suffix.0.contains('/') { + Err(Error::bad_request( + "Bad XML: index document is empty or contains /", + )) + } else { + Ok(()) + } + } +} + +impl Target { + pub fn validate(&self) -> Result<(), Error> { + if let Some(ref protocol) = self.protocol { + if protocol.0 != "http" && protocol.0 != "https" { + return Err(Error::bad_request("Bad XML: invalid protocol")); + } + } + Ok(()) + } +} + +impl RoutingRuleInner { + pub fn validate(&self) -> Result<(), Error> { + let has_prefix = self + .condition + .as_ref() + .and_then(|c| c.prefix.as_ref()) + .is_some(); + self.redirect.validate(has_prefix) + } +} + +impl Redirect { + pub fn validate(&self, has_prefix: bool) -> Result<(), Error> { + if self.replace_prefix.is_some() { + if self.replace_full.is_some() { + return Err(Error::bad_request( + "Bad XML: both ReplaceKeyPrefixWith and ReplaceKeyWith are set", + )); + } + if !has_prefix { + return Err(Error::bad_request( + "Bad XML: ReplaceKeyPrefixWith is set, but KeyPrefixEquals isn't", + )); + } + } + if let Some(ref protocol) = self.protocol { + if protocol.0 != "http" && protocol.0 != "https" { + return Err(Error::bad_request("Bad XML: invalid protocol")); + } + } + // TODO there are probably more invalid cases, but which ones? + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use quick_xml::de::from_str; + + #[test] + fn test_deserialize() -> Result<(), Error> { + let message = r#" + + + my-error-doc + + + my-index + + + garage.tld + https + + + + + 404 + prefix1 + + + gara.ge + http + 303 + prefix2 + fullkey + + + +"#; + let conf: WebsiteConfiguration = from_str(message).unwrap(); + let ref_value = WebsiteConfiguration { + xmlns: (), + error_document: Some(Key { + key: Value("my-error-doc".to_owned()), + }), + index_document: Some(Suffix { + suffix: Value("my-index".to_owned()), + }), + redirect_all_requests_to: Some(Target { + hostname: Value("garage.tld".to_owned()), + protocol: Some(Value("https".to_owned())), + }), + routing_rules: Some(vec![RoutingRule { + inner: RoutingRuleInner { + condition: Some(Condition { + http_error_code: Some(IntValue(404)), + prefix: Some(Value("prefix1".to_owned())), + }), + redirect: Redirect { + hostname: Some(Value("gara.ge".to_owned())), + protocol: Some(Value("http".to_owned())), + http_redirect_code: Some(IntValue(303)), + replace_prefix: Some(Value("prefix2".to_owned())), + replace_full: Some(Value("fullkey".to_owned())), + }, + }, + }]), + }; + assert_eq! { + ref_value, + conf + } + + let message2 = to_xml_with_header(&ref_value)?; + + let cleanup = |c: &str| c.replace(char::is_whitespace, ""); + assert_eq!(cleanup(message), cleanup(&message2)); + + Ok(()) + } +} diff --git a/src/api/s3/xml.rs b/src/api/s3/xml.rs index 5970c964..fdb36318 100644 --- a/src/api/s3/xml.rs +++ b/src/api/s3/xml.rs @@ -1,6 +1,33 @@ -use serde::Serialize; +use quick_xml::se::to_string; +use serde::{Deserialize, Serialize, Serializer}; -pub use garage_api_common::xml::{to_xml_with_header, xmlns_tag, xmlns_xsi_tag, IntValue, Value}; +use crate::error::Error as ApiError; + +pub fn to_xml_with_header(x: &T) -> Result { + let mut xml = r#""#.to_string(); + xml.push_str(&to_string(x)?); + Ok(xml) +} + +pub fn xmlns_tag(_v: &(), s: S) -> Result { + s.serialize_str("http://s3.amazonaws.com/doc/2006-03-01/") +} + +pub fn xmlns_xsi_tag(_v: &(), s: S) -> Result { + s.serialize_str("http://www.w3.org/2001/XMLSchema-instance") +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct Value(#[serde(rename = "$value")] pub String); + +impl From<&str> for Value { + fn from(s: &str) -> Value { + Value(s.to_string()) + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct IntValue(#[serde(rename = "$value")] pub i64); #[derive(Debug, Serialize, PartialEq, Eq)] pub struct Bucket { @@ -34,7 +61,7 @@ pub struct ListAllMyBucketsResult { #[derive(Debug, Serialize, PartialEq, Eq)] pub struct LocationConstraint { - #[serde(rename = "@xmlns", serialize_with = "xmlns_tag")] + #[serde(serialize_with = "xmlns_tag")] pub xmlns: (), #[serde(rename = "$value")] pub region: String, @@ -70,13 +97,13 @@ pub struct DeleteError { pub key: Option, #[serde(rename = "Message")] pub message: Value, - #[serde(rename = "VersionId", skip_serializing_if = "Option::is_none")] + #[serde(rename = "VersionId")] pub version_id: Option, } #[derive(Debug, Serialize, PartialEq, Eq)] pub struct DeleteResult { - #[serde(rename = "@xmlns", serialize_with = "xmlns_tag")] + #[serde(serialize_with = "xmlns_tag")] pub xmlns: (), #[serde(rename = "Deleted")] pub deleted: Vec, @@ -86,7 +113,7 @@ pub struct DeleteResult { #[derive(Debug, Serialize, PartialEq, Eq)] pub struct InitiateMultipartUploadResult { - #[serde(rename = "@xmlns", serialize_with = "xmlns_tag")] + #[serde(serialize_with = "xmlns_tag")] pub xmlns: (), #[serde(rename = "Bucket")] pub bucket: Value, @@ -98,7 +125,7 @@ pub struct InitiateMultipartUploadResult { #[derive(Debug, Serialize, PartialEq, Eq)] pub struct CompleteMultipartUploadResult { - #[serde(rename = "@xmlns", serialize_with = "xmlns_tag")] + #[serde(serialize_with = "xmlns_tag")] pub xmlns: (), #[serde(rename = "Location")] pub location: Option, @@ -108,18 +135,14 @@ pub struct CompleteMultipartUploadResult { pub key: Value, #[serde(rename = "ETag")] pub etag: Value, - #[serde(rename = "ChecksumCRC32", skip_serializing_if = "Option::is_none")] + #[serde(rename = "ChecksumCRC32")] pub checksum_crc32: Option, - #[serde(rename = "ChecksumCRC32C", skip_serializing_if = "Option::is_none")] + #[serde(rename = "ChecksumCRC32C")] pub checksum_crc32c: Option, - #[serde(rename = "ChecksumCR64NVME", skip_serializing_if = "Option::is_none")] - pub checksum_crc64nvme: Option, - #[serde(rename = "ChecksumSHA1", skip_serializing_if = "Option::is_none")] + #[serde(rename = "ChecksumSHA1")] pub checksum_sha1: Option, - #[serde(rename = "ChecksumSHA256", skip_serializing_if = "Option::is_none")] + #[serde(rename = "ChecksumSHA256")] pub checksum_sha256: Option, - #[serde(rename = "ChecksumType", skip_serializing_if = "Option::is_none")] - pub checksum_type: Option, } #[derive(Debug, Serialize, PartialEq, Eq)] @@ -148,21 +171,21 @@ pub struct ListMultipartItem { #[derive(Debug, Serialize, PartialEq, Eq)] pub struct ListMultipartUploadsResult { - #[serde(rename = "@xmlns", serialize_with = "xmlns_tag")] + #[serde(serialize_with = "xmlns_tag")] pub xmlns: (), #[serde(rename = "Bucket")] pub bucket: Value, - #[serde(rename = "KeyMarker", skip_serializing_if = "Option::is_none")] + #[serde(rename = "KeyMarker")] pub key_marker: Option, - #[serde(rename = "UploadIdMarker", skip_serializing_if = "Option::is_none")] + #[serde(rename = "UploadIdMarker")] pub upload_id_marker: Option, - #[serde(rename = "NextKeyMarker", skip_serializing_if = "Option::is_none")] + #[serde(rename = "NextKeyMarker")] pub next_key_marker: Option, - #[serde(rename = "NextUploadIdMarker", skip_serializing_if = "Option::is_none")] + #[serde(rename = "NextUploadIdMarker")] pub next_upload_id_marker: Option, #[serde(rename = "Prefix")] pub prefix: Value, - #[serde(rename = "Delimiter", skip_serializing_if = "Option::is_none")] + #[serde(rename = "Delimiter")] pub delimiter: Option, #[serde(rename = "MaxUploads")] pub max_uploads: IntValue, @@ -172,7 +195,7 @@ pub struct ListMultipartUploadsResult { pub upload: Vec, #[serde(rename = "CommonPrefixes")] pub common_prefixes: Vec, - #[serde(rename = "EncodingType", skip_serializing_if = "Option::is_none")] + #[serde(rename = "EncodingType")] pub encoding_type: Option, } @@ -186,21 +209,19 @@ pub struct PartItem { pub part_number: IntValue, #[serde(rename = "Size")] pub size: IntValue, - #[serde(rename = "ChecksumCRC32", skip_serializing_if = "Option::is_none")] + #[serde(rename = "ChecksumCRC32")] pub checksum_crc32: Option, - #[serde(rename = "ChecksumCRC32C", skip_serializing_if = "Option::is_none")] + #[serde(rename = "ChecksumCRC32C")] pub checksum_crc32c: Option, - #[serde(rename = "ChecksumCRC64NVME", skip_serializing_if = "Option::is_none")] - pub checksum_crc64nvme: Option, - #[serde(rename = "ChecksumSHA1", skip_serializing_if = "Option::is_none")] + #[serde(rename = "ChecksumSHA1")] pub checksum_sha1: Option, - #[serde(rename = "ChecksumSHA256", skip_serializing_if = "Option::is_none")] + #[serde(rename = "ChecksumSHA256")] pub checksum_sha256: Option, } #[derive(Debug, Serialize, PartialEq, Eq)] pub struct ListPartsResult { - #[serde(rename = "@xmlns", serialize_with = "xmlns_tag")] + #[serde(serialize_with = "xmlns_tag")] pub xmlns: (), #[serde(rename = "Bucket")] pub bucket: Value, @@ -208,12 +229,9 @@ pub struct ListPartsResult { pub key: Value, #[serde(rename = "UploadId")] pub upload_id: Value, - #[serde(rename = "PartNumberMarker", skip_serializing_if = "Option::is_none")] + #[serde(rename = "PartNumberMarker")] pub part_number_marker: Option, - #[serde( - rename = "NextPartNumberMarker", - skip_serializing_if = "Option::is_none" - )] + #[serde(rename = "NextPartNumberMarker")] pub next_part_number_marker: Option, #[serde(rename = "MaxParts")] pub max_parts: IntValue, @@ -251,32 +269,29 @@ pub struct CommonPrefix { #[derive(Debug, Serialize, PartialEq, Eq)] pub struct ListBucketResult { - #[serde(rename = "@xmlns", serialize_with = "xmlns_tag")] + #[serde(serialize_with = "xmlns_tag")] pub xmlns: (), #[serde(rename = "Name")] pub name: Value, #[serde(rename = "Prefix")] pub prefix: Value, - #[serde(rename = "Marker", skip_serializing_if = "Option::is_none")] + #[serde(rename = "Marker")] pub marker: Option, - #[serde(rename = "NextMarker", skip_serializing_if = "Option::is_none")] + #[serde(rename = "NextMarker")] pub next_marker: Option, - #[serde(rename = "StartAfter", skip_serializing_if = "Option::is_none")] + #[serde(rename = "StartAfter")] pub start_after: Option, - #[serde(rename = "ContinuationToken", skip_serializing_if = "Option::is_none")] + #[serde(rename = "ContinuationToken")] pub continuation_token: Option, - #[serde( - rename = "NextContinuationToken", - skip_serializing_if = "Option::is_none" - )] + #[serde(rename = "NextContinuationToken")] pub next_continuation_token: Option, - #[serde(rename = "KeyCount", skip_serializing_if = "Option::is_none")] + #[serde(rename = "KeyCount")] pub key_count: Option, #[serde(rename = "MaxKeys")] pub max_keys: IntValue, - #[serde(rename = "Delimiter", skip_serializing_if = "Option::is_none")] + #[serde(rename = "Delimiter")] pub delimiter: Option, - #[serde(rename = "EncodingType", skip_serializing_if = "Option::is_none")] + #[serde(rename = "EncodingType")] pub encoding_type: Option, #[serde(rename = "IsTruncated")] pub is_truncated: Value, @@ -288,15 +303,15 @@ pub struct ListBucketResult { #[derive(Debug, Serialize, PartialEq, Eq)] pub struct VersioningConfiguration { - #[serde(rename = "@xmlns", serialize_with = "xmlns_tag")] + #[serde(serialize_with = "xmlns_tag")] pub xmlns: (), - #[serde(rename = "Status", skip_serializing_if = "Option::is_none")] + #[serde(rename = "Status")] pub status: Option, } #[derive(Debug, Serialize, PartialEq, Eq)] pub struct PostObject { - #[serde(rename = "@xmlns", serialize_with = "xmlns_tag")] + #[serde(serialize_with = "xmlns_tag")] pub xmlns: (), #[serde(rename = "Location")] pub location: Value, @@ -310,11 +325,11 @@ pub struct PostObject { #[derive(Debug, Serialize, PartialEq, Eq)] pub struct Grantee { - #[serde(rename = "@xmlns:xsi", serialize_with = "xmlns_xsi_tag")] + #[serde(rename = "xmlns:xsi", serialize_with = "xmlns_xsi_tag")] pub xmlns_xsi: (), - #[serde(rename = "@xsi:type")] + #[serde(rename = "xsi:type")] pub typ: String, - #[serde(rename = "DisplayName", skip_serializing_if = "Option::is_none")] + #[serde(rename = "DisplayName")] pub display_name: Option, #[serde(rename = "ID")] pub id: Option, @@ -336,9 +351,9 @@ pub struct AccessControlList { #[derive(Debug, Serialize, PartialEq, Eq)] pub struct AccessControlPolicy { - #[serde(rename = "@xmlns", serialize_with = "xmlns_tag")] + #[serde(serialize_with = "xmlns_tag")] pub xmlns: (), - #[serde(rename = "Owner", skip_serializing_if = "Option::is_none")] + #[serde(rename = "Owner")] pub owner: Option, #[serde(rename = "AccessControlList")] pub acl: AccessControlList, @@ -347,7 +362,6 @@ pub struct AccessControlPolicy { #[cfg(test)] mod tests { use super::*; - use crate::error::Error as ApiError; use garage_util::time::*; @@ -438,7 +452,7 @@ mod tests { assert_eq!( to_xml_with_header(&get_bucket_versioning)?, "\ -" +" ); let get_bucket_versioning2 = VersioningConfiguration { xmlns: (), @@ -573,7 +587,6 @@ mod tests { #[test] fn complete_multipart_upload_result() -> Result<(), ApiError> { - use garage_api_common::signature::checksum::COMPOSITE; let result = CompleteMultipartUploadResult { xmlns: (), location: Some(Value("https://garage.tld/mybucket/a/plop".to_string())), @@ -582,10 +595,8 @@ mod tests { etag: Value("\"3858f62230ac3c915f300c664312c11f-9\"".to_string()), checksum_crc32: None, checksum_crc32c: None, - checksum_crc64nvme: None, checksum_sha1: Some(Value("ZJAnHyG8PeKz9tI8UTcHrJos39A=".into())), checksum_sha256: None, - checksum_type: Some(Value(COMPOSITE.into())), }; assert_eq!( to_xml_with_header(&result)?, @@ -596,7 +607,6 @@ mod tests { a/plop\ "3858f62230ac3c915f300c664312c11f-9"\ ZJAnHyG8PeKz9tI8UTcHrJos39A=\ - COMPOSITE\ " ); Ok(()) @@ -870,7 +880,6 @@ mod tests { size: IntValue(10485760), checksum_crc32: None, checksum_crc32c: None, - checksum_crc64nvme: None, checksum_sha256: Some(Value( "5RQ3A5uk0w7ojNjvegohch4JRBBGN/cLhsNrPzfv/hA=".into(), )), @@ -884,7 +893,6 @@ mod tests { checksum_sha256: None, checksum_crc32c: None, checksum_crc32: Some(Value("ZJAnHyG8=".into())), - checksum_crc64nvme: None, checksum_sha1: None, }, ], diff --git a/src/block/Cargo.toml b/src/block/Cargo.toml index ed00b683..c4dbba44 100644 --- a/src/block/Cargo.toml +++ b/src/block/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_block" -version = "2.3.0" +version = "1.3.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -18,6 +18,7 @@ garage_db.workspace = true garage_net.workspace = true garage_rpc.workspace = true garage_util.workspace = true +garage_table.workspace = true opentelemetry.workspace = true @@ -39,7 +40,4 @@ tokio.workspace = true tokio-util.workspace = true [features] -system-libs = ["zstd/pkg-config"] - -[lints] -workspace = true +system-libs = [ "zstd/pkg-config" ] diff --git a/src/block/block.rs b/src/block/block.rs index 867aa546..bd95680e 100644 --- a/src/block/block.rs +++ b/src/block/block.rs @@ -89,7 +89,7 @@ impl DataBlock { return DataBlock::compressed(data_compressed.into()); } } - DataBlock::plain(data) + DataBlock::plain(data.into()) }) .await .unwrap() diff --git a/src/block/layout.rs b/src/block/layout.rs index dd153d6e..00e3debb 100644 --- a/src/block/layout.rs +++ b/src/block/layout.rs @@ -21,7 +21,7 @@ pub(crate) struct DataLayout { pub(crate) data_dirs: Vec, markers: HashMap, - /// Primary storage location (index in `data_dirs`) for each partition + /// Primary storage location (index in data_dirs) for each partition /// = the location where the data is supposed to be, blocks are always /// written there (copies in other dirs may be deleted if they exist) pub(crate) part_prim: Vec, @@ -159,7 +159,7 @@ impl DataLayout { for (idir, parts) in dir_prim.iter().enumerate() { for part in parts.iter() { assert!(part_prim[*part].is_none()); - part_prim[*part] = Some(idir as Idx); + part_prim[*part] = Some(idir as Idx) } } @@ -262,7 +262,7 @@ impl DataLayout { pub(crate) fn primary_block_dir(&self, hash: &Hash) -> PathBuf { let ipart = self.partition_from(hash); let idir = self.part_prim[ipart] as usize; - self.block_dir_from(hash, self.data_dirs[idir].path.clone()) + self.block_dir_from(hash, &self.data_dirs[idir].path) } pub(crate) fn secondary_block_dirs<'a>( @@ -272,7 +272,7 @@ impl DataLayout { let ipart = self.partition_from(hash); self.part_sec[ipart] .iter() - .map(move |idir| self.block_dir_from(hash, self.data_dirs[*idir as usize].path.clone())) + .map(move |idir| self.block_dir_from(hash, &self.data_dirs[*idir as usize].path)) } fn partition_from(&self, hash: &Hash) -> usize { @@ -283,7 +283,8 @@ impl DataLayout { % DRIVE_NPART } - fn block_dir_from(&self, hash: &Hash, mut path: PathBuf) -> PathBuf { + fn block_dir_from(&self, hash: &Hash, dir: &PathBuf) -> PathBuf { + let mut path = dir.clone(); path.push(hex::encode(&hash.as_slice()[0..1])); path.push(hex::encode(&hash.as_slice()[1..2])); path @@ -325,7 +326,7 @@ fn make_data_dirs(dirs: &DataDirEnum) -> Result, Error> { let mut ok = false; for dir in dirs.iter() { let state = match &dir.capacity { - Some(cap) if !dir.read_only => { + Some(cap) if dir.read_only == false => { let capacity = cap.parse::() .ok_or_message("invalid capacity value")?.as_u64(); if capacity == 0 { @@ -336,7 +337,7 @@ fn make_data_dirs(dirs: &DataDirEnum) -> Result, Error> { capacity, } } - None if dir.read_only => { + None if dir.read_only == true => { DataDirState::ReadOnly } _ => return Err(Error::Message(format!("data directories in data_dir should have a capacity value or be marked read_only, not the case for {}", dir.path.to_string_lossy()))), @@ -358,7 +359,7 @@ fn make_data_dirs(dirs: &DataDirEnum) -> Result, Error> { } fn dir_not_empty(path: &PathBuf) -> Result { - for entry in std::fs::read_dir(path)? { + for entry in std::fs::read_dir(&path)? { let dir = entry?; let ft = dir.file_type()?; let name = dir.file_name().into_string().ok(); diff --git a/src/block/manager.rs b/src/block/manager.rs index 4765f441..96ca9c90 100644 --- a/src/block/manager.rs +++ b/src/block/manager.rs @@ -33,6 +33,8 @@ use garage_rpc::rpc_helper::OrderTag; use garage_rpc::system::System; use garage_rpc::*; +use garage_table::replication::{TableReplication, TableShardedReplication}; + use crate::block::*; use crate::layout::*; use crate::metrics::*; @@ -74,8 +76,8 @@ impl Rpc for BlockRpc { /// The block manager, handling block exchange between nodes, and block storage on local node pub struct BlockManager { - /// Quorum of nodes for write operations - pub write_quorum: usize, + /// Replication strategy, allowing to find on which node blocks should be located + pub replication: TableShardedReplication, /// Data layout pub(crate) data_layout: ArcSwap, @@ -123,7 +125,7 @@ impl BlockManager { pub fn new( db: &db::Db, config: &Config, - write_quorum: usize, + replication: TableShardedReplication, system: Arc, ) -> Result, Error> { // Load or compute layout, i.e. assignment of data blocks to the different data directories @@ -167,13 +169,13 @@ impl BlockManager { let scrub_persister = PersisterShared::new(&system.metadata_dir, "scrub_info"); let block_manager = Arc::new(Self { - write_quorum, + replication, data_layout: ArcSwap::new(Arc::new(data_layout)), data_layout_persister, data_fsync: config.data_fsync, disable_scrub: config.disable_scrub, compression_level: config.compression_level, - mutation_lock: [(); MUTEX_COUNT] + mutation_lock: vec![(); MUTEX_COUNT] .iter() .map(|_| Mutex::new(BlockManagerLocked())) .collect::>(), @@ -290,7 +292,7 @@ impl BlockManager { let who = self .system .rpc_helper() - .block_read_nodes_of(hash, self.system.rpc_helper())?; + .block_read_nodes_of(hash, self.system.rpc_helper()); for node in who.iter() { let node_id = NodeID::from(*node); @@ -341,16 +343,6 @@ impl BlockManager { Err(err) } - /// Returns the set of nodes that should store a copy of a given block. - /// These are the nodes assigned to the block's hash in the current - /// layout version only: since blocks are immutable, we don't need to - /// do complex logic when several layout versions are active at once, - /// just move them directly to the new nodes. - pub(crate) fn storage_nodes_of(&self, hash: &Hash) -> Result, Error> { - let cluster_layout = self.system.cluster_layout(); - Ok(cluster_layout.current()?.nodes_of(hash).collect()) - } - // ---- Public interface ---- /// Ask nodes that might have a block for it, return it as a stream @@ -383,7 +375,7 @@ impl BlockManager { prevent_compression: bool, order_tag: Option, ) -> Result<(), Error> { - let who = self.storage_nodes_of(&hash)?; + let who = self.system.cluster_layout().current_storage_nodes_of(&hash); let compression_level = self.compression_level.filter(|_| !prevent_compression); let (header, bytes) = DataBlock::from_buffer(data, compression_level) @@ -413,7 +405,7 @@ impl BlockManager { put_block_rpc, RequestStrategy::with_priority(PRIO_NORMAL | PRIO_SECONDARY) .with_drop_on_completion(permit) - .with_quorum(self.write_quorum), + .with_quorum(self.replication.write_quorum()), ) .await?; @@ -569,10 +561,12 @@ impl BlockManager { async { match self.find_block(hash).await { Some(p) => self.read_block_from(hash, &p).await, - None => Err(Error::Message(format!( - "block {:?} not found on node", - hash - ))), + None => { + return Err(Error::Message(format!( + "block {:?} not found on node", + hash + ))); + } } } .bound_record_duration(&self.metrics.block_read_duration) @@ -780,7 +774,7 @@ impl BlockManagerLocked { assert!(to_delete.as_ref() != Some(&tgt_path)); let mut path_tmp = tgt_path.clone(); - let tmp_extension = format!("tmp{}", hex::encode(rand::rng().random::<[u8; 4]>())); + let tmp_extension = format!("tmp{}", hex::encode(thread_rng().gen::<[u8; 4]>())); path_tmp.set_extension(tmp_extension); fs::create_dir_all(&directory).await?; diff --git a/src/block/metrics.rs b/src/block/metrics.rs index 7a07018c..81021fe1 100644 --- a/src/block/metrics.rs +++ b/src/block/metrics.rs @@ -6,7 +6,7 @@ use opentelemetry::{global, metrics::*}; use garage_db as db; -/// `TableMetrics` reference all counter used for metrics +/// TableMetrics reference all counter used for metrics pub struct BlockManagerMetrics { pub(crate) _compression_level: ValueObserver, pub(crate) _rc_size: ValueObserver, @@ -52,7 +52,7 @@ impl BlockManagerMetrics { _rc_size: meter .u64_value_observer("block.rc_size", move |observer| { if let Ok(value) = rc_tree.approximate_len() { - observer.observe(value as u64, &[]); + observer.observe(value as u64, &[]) } }) .with_description("Number of blocks known to the reference counter") @@ -78,7 +78,7 @@ impl BlockManagerMetrics { _buffer_free_kb: meter .u64_value_observer("block.ram_buffer_free_kb", move |observer| { - observer.observe(buffer_semaphore.available_permits() as u64, &[]); + observer.observe(buffer_semaphore.available_permits() as u64, &[]) }) .with_description( "Available RAM in KiB to use for buffering data blocks to be written to remote nodes", diff --git a/src/block/rc.rs b/src/block/rc.rs index 881df68b..4a55ee29 100644 --- a/src/block/rc.rs +++ b/src/block/rc.rs @@ -37,7 +37,7 @@ impl BlockRc { match old_rc.increment().serialize() { Some(x) => tx.insert(&self.rc_table, hash, x)?, None => unreachable!(), - } + }; Ok(old_rc.is_zero()) } @@ -52,7 +52,7 @@ impl BlockRc { match new_rc.serialize() { Some(x) => tx.insert(&self.rc_table, hash, x)?, None => tx.remove(&self.rc_table, hash)?, - } + }; Ok(matches!(new_rc, RcEntry::Deletable { .. })) } @@ -72,7 +72,7 @@ impl BlockRc { tx.remove(&self.rc_table, hash)?; } _ => (), - } + }; Ok(()) })?; Ok(()) @@ -89,7 +89,7 @@ impl BlockRc { .transaction(|tx| { let mut cnt = 0; for f in recalc_fns.iter() { - cnt += f(tx, hash)?; + cnt += f(&tx, hash)?; } let old_rc = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?); trace!( @@ -136,14 +136,14 @@ impl BlockRc { pub(crate) enum RcEntry { /// Present: the block has `count` references, with `count` > 0. /// - /// This is stored as `u64::to_be_bytes(count)` + /// This is stored as u64::to_be_bytes(count) Present { count: u64 }, /// Deletable: the block has zero references, and can be deleted - /// once time (returned by `now_msec`) is larger than `at_time` + /// once time (returned by now_msec) is larger than at_time /// (in millis since Unix epoch) /// - /// This is stored as [0u8; 8] followed by `u64::to_be_bytes(at_time)`, + /// This is stored as [0u8; 8] followed by u64::to_be_bytes(at_time), /// (this allows for the data format to be backwards compatible with /// previous Garage versions that didn't have this intermediate state) Deletable { at_time: u64 }, diff --git a/src/block/repair.rs b/src/block/repair.rs index 343574f8..ef271094 100644 --- a/src/block/repair.rs +++ b/src/block/repair.rs @@ -127,7 +127,7 @@ impl Worker for RepairWorker { self.manager .resync .put_to_resync(&hash, Duration::from_secs(0))?; - self.next_start = Some(hash); + self.next_start = Some(hash) } Ok(WorkerState::Busy) @@ -248,7 +248,7 @@ fn randomize_next_scrub_run_time(timestamp: u64) -> u64 { timestamp + SCRUB_INTERVAL .saturating_add(Duration::from_secs( - rand::rng().random_range(0..3600 * 24 * 10), + rand::thread_rng().gen_range(0..3600 * 24 * 10), )) .as_millis() as u64 } @@ -440,7 +440,7 @@ impl Worker for ScrubWorker { Ok(cmd) => self.handle_cmd(cmd).await, Err(mpsc::error::TryRecvError::Disconnected) => return Ok(WorkerState::Done), Err(mpsc::error::TryRecvError::Empty) => (), - } + }; match &mut self.work { ScrubWorkerState::Running { iterator, t_cp } => { @@ -455,7 +455,7 @@ impl Worker for ScrubWorker { } Err(e) => return Err(e), _ => (), - } + }; if now - *t_cp > 60 * 1000 { self.persister @@ -558,7 +558,7 @@ impl Worker for RebalanceWorker { } fn status(&self) -> WorkerStatus { - let t_cur = self.t_finished.unwrap_or_else(now_msec); + let t_cur = self.t_finished.unwrap_or_else(|| now_msec()); let rate = self.moved_bytes / std::cmp::max(1, (t_cur - self.t_started) / 1000); let mut freeform = vec![ format!("Blocks moved: {}", self.moved), @@ -570,7 +570,7 @@ impl Worker for RebalanceWorker { format!("Started: {}", msec_to_rfc3339(self.t_started)), ]; if let Some(t_fin) = self.t_finished { - freeform.push(format!("Finished: {}", msec_to_rfc3339(t_fin))); + freeform.push(format!("Finished: {}", msec_to_rfc3339(t_fin))) } WorkerStatus { progress: Some(format!("{:.2}%", self.block_iter.progress() * 100.)), diff --git a/src/block/resync.rs b/src/block/resync.rs index 003c1172..7056a828 100644 --- a/src/block/resync.rs +++ b/src/block/resync.rs @@ -27,6 +27,8 @@ use garage_util::tranquilizer::Tranquilizer; use garage_rpc::system::System; use garage_rpc::*; +use garage_table::replication::TableReplication; + use crate::manager::*; // The delay between the time where a resync operation fails @@ -383,8 +385,11 @@ impl BlockResyncManager { info!("Resync block {:?}: offloading and deleting", hash); let existing_path = existing_path.unwrap(); - let mut who = manager.storage_nodes_of(hash)?; - if who.len() < manager.write_quorum { + let mut who = manager + .system + .cluster_layout() + .current_storage_nodes_of(hash); + if who.len() < manager.replication.write_quorum() { return Err(Error::Message("Not trying to offload block because we don't have a quorum of nodes to write to".to_string())); } who.retain(|id| *id != manager.system.id); @@ -466,7 +471,10 @@ impl BlockResyncManager { // First, check whether we are still supposed to store that // block in the latest cluster layout version. - let storage_nodes = manager.storage_nodes_of(hash)?; + let storage_nodes = manager + .system + .cluster_layout() + .current_storage_nodes_of(&hash); if !storage_nodes.contains(&manager.system.id) { info!( @@ -588,7 +596,7 @@ impl Worker for ResyncWorker { async fn wait_for_work(&mut self) -> WorkerState { while self.index >= self.persister.get_with(|x| x.n_workers) { - self.manager.resync.notify.notified().await; + self.manager.resync.notify.notified().await } select! { diff --git a/src/db/Cargo.toml b/src/db/Cargo.toml index 474593f5..9e860e7d 100644 --- a/src/db/Cargo.toml +++ b/src/db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_db" -version = "2.3.0" +version = "1.3.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -28,11 +28,8 @@ parking_lot = { workspace = true, optional = true } mktemp.workspace = true [features] -default = ["lmdb", "sqlite"] -bundled-libs = ["rusqlite?/bundled"] -lmdb = ["heed"] -fjall = ["dep:fjall", "dep:parking_lot"] -sqlite = ["rusqlite", "r2d2", "r2d2_sqlite"] - -[lints] -workspace = true +default = [ "lmdb", "sqlite" ] +bundled-libs = [ "rusqlite?/bundled" ] +lmdb = [ "heed" ] +fjall = [ "dep:fjall", "dep:parking_lot" ] +sqlite = [ "rusqlite", "r2d2", "r2d2_sqlite" ] diff --git a/src/db/fjall_adapter.rs b/src/db/fjall_adapter.rs index 9e9efe9f..25913a1f 100644 --- a/src/db/fjall_adapter.rs +++ b/src/db/fjall_adapter.rs @@ -1,6 +1,6 @@ use core::ops::Bound; -use std::path::Path; +use std::path::PathBuf; use std::sync::Arc; use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard}; @@ -20,7 +20,7 @@ pub use fjall; // -- -pub(crate) fn open_db(path: &Path, opt: &OpenOpt) -> Result { +pub(crate) fn open_db(path: &PathBuf, opt: &OpenOpt) -> Result { info!("Opening Fjall database at: {}", path.display()); if opt.fsync { return Err(Error( @@ -105,14 +105,15 @@ impl IDb for FjallDb { } fn list_trees(&self) -> Result> { - self.keyspace + Ok(self + .keyspace .list_partitions() .iter() - .map(|n| decode_name(n)) - .collect::>>() + .map(|n| decode_name(&n)) + .collect::>>()?) } - fn snapshot(&self, base_path: &Path) -> Result<()> { + fn snapshot(&self, base_path: &PathBuf) -> Result<()> { std::fs::create_dir_all(base_path)?; let path = Engine::Fjall.db_path(base_path); @@ -271,7 +272,7 @@ impl<'a> FjallTx<'a> { fn get_tree(&self, i: usize) -> TxOpResult<&TransactionalPartitionHandle> { self.trees.get(i).map(|tup| &tup.1).ok_or_else(|| { TxOpError(Error( - "invalid tree id (it might have been opened after the transaction started)".into(), + "invalid tree id (it might have been openned after the transaction started)".into(), )) }) } @@ -287,7 +288,7 @@ impl<'a> ITx for FjallTx<'a> { } fn len(&self, tree_idx: usize) -> TxOpResult { let tree = self.get_tree(tree_idx)?; - Ok(self.tx.len(tree)?) + Ok(self.tx.len(tree)? as usize) } fn insert(&mut self, tree_idx: usize, key: &[u8], value: &[u8]) -> TxOpResult<()> { @@ -324,7 +325,7 @@ impl<'a> ITx for FjallTx<'a> { let high = clone_bound(high); Ok(Box::new( self.tx - .range::, ByteVecRangeBounds>(tree, (low, high)) + .range::, ByteVecRangeBounds>(&tree, (low, high)) .map(iterator_remap_tx), )) } @@ -339,7 +340,7 @@ impl<'a> ITx for FjallTx<'a> { let high = clone_bound(high); Ok(Box::new( self.tx - .range::, ByteVecRangeBounds>(tree, (low, high)) + .range::, ByteVecRangeBounds>(&tree, (low, high)) .rev() .map(iterator_remap_tx), )) diff --git a/src/db/lib.rs b/src/db/lib.rs index bb04d6cc..2a467c7c 100644 --- a/src/db/lib.rs +++ b/src/db/lib.rs @@ -17,7 +17,7 @@ use core::ops::{Bound, RangeBounds}; use std::borrow::Cow; use std::cell::Cell; -use std::path::Path; +use std::path::PathBuf; use std::sync::Arc; use thiserror::Error; @@ -133,7 +133,7 @@ impl Db { Err(TxError::Db(tx_e)) } (Err(TxError::Db(tx_e)), Some(Ok(_))) => { - // Transaction encounterred a DB error when committing the transaction, + // Transaction encounterred a DB error when commiting the transaction, // after user code was called Err(TxError::Db(tx_e)) } @@ -147,7 +147,7 @@ impl Db { } } - pub fn snapshot(&self, path: &Path) -> Result<()> { + pub fn snapshot(&self, path: &PathBuf) -> Result<()> { self.0.snapshot(path) } @@ -222,13 +222,9 @@ impl Tree { } #[inline] pub fn get_gt>(&self, from: T) -> Result> { - if from.as_ref().is_empty() { - self.iter()?.next().transpose() - } else { - self.range((Bound::Excluded(from), Bound::Unbounded))? - .next() - .transpose() - } + self.range((Bound::Excluded(from), Bound::Unbounded))? + .next() + .transpose() } /// Returns the old value if there was one @@ -352,7 +348,7 @@ pub(crate) trait IDb: Send + Sync { fn engine(&self) -> String; fn open_tree(&self, name: &str) -> Result; fn list_trees(&self) -> Result>; - fn snapshot(&self, path: &Path) -> Result<()>; + fn snapshot(&self, path: &PathBuf) -> Result<()>; fn get(&self, tree: usize, key: &[u8]) -> Result>; fn approximate_len(&self, tree: usize) -> Result; diff --git a/src/db/lmdb_adapter.rs b/src/db/lmdb_adapter.rs index c5360d04..ac185ae9 100644 --- a/src/db/lmdb_adapter.rs +++ b/src/db/lmdb_adapter.rs @@ -3,14 +3,12 @@ use core::ops::Bound; use std::collections::HashMap; use std::convert::TryInto; use std::marker::PhantomPinned; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::pin::Pin; use std::sync::{Arc, RwLock}; -use heed::types::Bytes; -use heed::{BytesDecode, Env, EnvFlags, RoTxn, RwTxn, WithTls}; - -type Database = heed::Database; +use heed::types::ByteSlice; +use heed::{BytesDecode, Env, RoTxn, RwTxn, UntypedDatabase as Database}; use crate::{ open::{Engine, OpenOpt}, @@ -24,7 +22,7 @@ pub use heed; pub(crate) fn open_db(path: &PathBuf, opt: &OpenOpt) -> Result { info!("Opening LMDB database at: {}", path.display()); - if let Err(e) = std::fs::create_dir_all(path) { + if let Err(e) = std::fs::create_dir_all(&path) { return Err(Error( format!("Unable to create LMDB data directory: {}", e).into(), )); @@ -39,23 +37,24 @@ pub(crate) fn open_db(path: &PathBuf, opt: &OpenOpt) -> Result { env_builder.max_dbs(100); env_builder.map_size(map_size); env_builder.max_readers(2048); - let mut env_flags = EnvFlags::NO_READ_AHEAD | EnvFlags::NO_META_SYNC; - if !opt.fsync { - env_flags |= EnvFlags::NO_SYNC; + unsafe { + env_builder.flag(heed::flags::Flags::MdbNoRdAhead); + env_builder.flag(heed::flags::Flags::MdbNoMetaSync); + if !opt.fsync { + env_builder.flag(heed::flags::Flags::MdbNoSync); + } } - let open_res = unsafe { - env_builder.flags(env_flags); - env_builder.open(path) - }; - match open_res { - Err(heed::Error::Io(e)) if e.kind() == std::io::ErrorKind::OutOfMemory => Err(Error( - "OutOfMemory error while trying to open LMDB database. This can happen \ + match env_builder.open(&path) { + Err(heed::Error::Io(e)) if e.kind() == std::io::ErrorKind::OutOfMemory => { + return Err(Error( + "OutOfMemory error while trying to open LMDB database. This can happen \ if your operating system is not allowing you to use sufficient virtual \ memory address space. Please check that no limit is set (ulimit -v). \ You may also try to set a smaller `lmdb_map_size` configuration parameter. \ On 32-bit machines, you should probably switch to another database engine." - .into(), - )), + .into(), + )) + } Err(e) => Err(Error(format!("Cannot open LMDB database: {}", e).into())), Ok(db) => Ok(LmdbDb::init(db)), } @@ -112,9 +111,7 @@ impl IDb for LmdbDb { if let Some(i) = trees.1.get(name) { Ok(*i) } else { - let mut wtxn = self.db.write_txn()?; - let tree = self.db.create_database(&mut wtxn, Some(name))?; - wtxn.commit()?; + let tree = self.db.create_database(Some(name))?; let i = trees.0.len(); trees.0.push(tree); trees.1.insert(name.to_string(), i); @@ -123,37 +120,34 @@ impl IDb for LmdbDb { } fn list_trees(&self) -> Result> { - let rtxn = self.db.read_txn()?; - let tree0 = match self - .db - .open_database::(&rtxn, None)? - { + let tree0 = match self.db.open_database::(None)? { Some(x) => x, None => return Ok(vec![]), }; let mut ret = vec![]; - for item in tree0.iter(&rtxn)? { + let tx = self.db.read_txn()?; + for item in tree0.iter(&tx)? { let (tree_name, _) = item?; ret.push(tree_name.to_string()); } + drop(tx); let mut ret2 = vec![]; for tree_name in ret { if self .db - .open_database::(&rtxn, Some(&tree_name))? + .open_database::(Some(&tree_name))? .is_some() { ret2.push(tree_name); } } - drop(rtxn); Ok(ret2) } - fn snapshot(&self, base_path: &Path) -> Result<()> { + fn snapshot(&self, base_path: &PathBuf) -> Result<()> { std::fs::create_dir_all(base_path)?; let path = Engine::Lmdb.db_path(base_path); self.db @@ -266,11 +260,11 @@ impl IDb for LmdbDb { Ok(on_commit) } TxFnResult::Abort => { - tx.tx.abort(); + tx.tx.abort().map_err(Error::from).map_err(TxError::Db)?; Err(TxError::Abort(())) } TxFnResult::DbErr => { - tx.tx.abort(); + tx.tx.abort().map_err(Error::from).map_err(TxError::Db)?; Err(TxError::Db(Error( "(this message will be discarded)".into(), ))) @@ -283,7 +277,7 @@ impl IDb for LmdbDb { struct LmdbTx<'a> { trees: &'a [Database], - tx: RwTxn<'a>, + tx: RwTxn<'a, 'a>, } impl<'a> LmdbTx<'a> { @@ -363,15 +357,15 @@ impl<'a> ITx for LmdbTx<'a> { // therefore a bit of unsafe code (it is a self-referential struct) type IteratorItem<'a> = heed::Result<( - >::DItem, - >::DItem, + >::DItem, + >::DItem, )>; struct TxAndIterator<'a, I> where I: Iterator> + 'a, { - tx: RoTxn<'a, WithTls>, + tx: RoTxn<'a>, iter: Option, _pin: PhantomPinned, } @@ -386,7 +380,7 @@ where } /// Safety: iterfun must not store its argument anywhere but in its result. - unsafe fn make(tx: RoTxn<'a, WithTls>, iterfun: F) -> Result> + unsafe fn make(tx: RoTxn<'a>, iterfun: F) -> Result> where F: FnOnce(&'a RoTxn<'a>) -> Result, { @@ -403,12 +397,9 @@ where // this reference will only be stored and accessed from the // returned ValueIter which guarantees that it is destroyed // before the tx it is pointing to. - #[expect(clippy::deref_addrof)] - unsafe { - &*&raw const *tx - } + unsafe { &*&raw const *tx } }; - let iter = iterfun(tx_lifetime_overextended)?; + let iter = iterfun(&tx_lifetime_overextended)?; *boxed.as_mut().iter() = Some(iter); diff --git a/src/db/open.rs b/src/db/open.rs index dad0492a..23391c61 100644 --- a/src/db/open.rs +++ b/src/db/open.rs @@ -1,4 +1,4 @@ -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use crate::{Db, Error, Result}; @@ -25,13 +25,20 @@ impl Engine { } /// Return engine-specific DB path from base path - pub fn db_path(&self, base_path: &Path) -> PathBuf { - let suffix = match self { - Self::Lmdb => "db.lmdb", - Self::Sqlite => "db.sqlite", - Self::Fjall => "db.fjall", - }; - base_path.join(suffix) + pub fn db_path(&self, base_path: &PathBuf) -> PathBuf { + let mut ret = base_path.clone(); + match self { + Self::Lmdb => { + ret.push("db.lmdb"); + } + Self::Sqlite => { + ret.push("db.sqlite"); + } + Self::Fjall => { + ret.push("db.fjall"); + } + } + ret } } @@ -61,13 +68,22 @@ impl std::str::FromStr for Engine { } } -#[derive(Default)] pub struct OpenOpt { pub fsync: bool, pub lmdb_map_size: Option, pub fjall_block_cache_size: Option, } +impl Default for OpenOpt { + fn default() -> Self { + Self { + fsync: false, + lmdb_map_size: None, + fjall_block_cache_size: None, + } + } +} + pub fn open_db(path: &PathBuf, engine: Engine, opt: &OpenOpt) -> Result { match engine { // ---- Sqlite DB ---- diff --git a/src/db/sqlite_adapter.rs b/src/db/sqlite_adapter.rs index b640e273..a03ee8ef 100644 --- a/src/db/sqlite_adapter.rs +++ b/src/db/sqlite_adapter.rs @@ -1,7 +1,7 @@ use core::ops::Bound; use std::marker::PhantomPinned; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::pin::Pin; use std::ptr::NonNull; use std::sync::{Arc, Mutex, RwLock}; @@ -23,7 +23,7 @@ pub use rusqlite; pub(crate) fn open_db(path: &PathBuf, opt: &OpenOpt) -> Result { info!("Opening Sqlite database at: {}", path.display()); let manager = r2d2_sqlite::SqliteConnectionManager::file(path); - SqliteDb::open(manager, opt.fsync) + Ok(SqliteDb::new(manager, opt.fsync)?) } // ---- @@ -62,7 +62,7 @@ pub struct SqliteDb { } impl SqliteDb { - pub fn open(manager: SqliteConnectionManager, sync_mode: bool) -> Result { + pub fn new(manager: SqliteConnectionManager, sync_mode: bool) -> Result { let manager = manager.with_init(move |db| { db.pragma_update(None, "journal_mode", "WAL")?; if sync_mode { @@ -110,7 +110,7 @@ impl IDb for SqliteDb { let name = format!("tree_{}", name.replace(':', "_COLON_")); let mut trees = self.trees.write().unwrap(); - if let Some(i) = trees.iter().position(|x| x.as_ref() == name) { + if let Some(i) = trees.iter().position(|x| x.as_ref() == &name) { Ok(i) } else { let db = self.db.get()?; @@ -150,10 +150,10 @@ impl IDb for SqliteDb { Ok(trees) } - fn snapshot(&self, base_path: &Path) -> Result<()> { + fn snapshot(&self, base_path: &PathBuf) -> Result<()> { std::fs::create_dir_all(base_path)?; let path = Engine::Sqlite - .db_path(base_path) + .db_path(&base_path) .into_os_string() .into_string() .map_err(|_| Error("invalid sqlite path string".into()))?; @@ -308,7 +308,7 @@ impl IDb for SqliteDb { trace!("transaction done"); drop(lock); - res + return res; } } @@ -564,7 +564,7 @@ fn bounds_sql<'r>(low: Bound<&'r [u8]>, high: Bound<&'r [u8]>) -> (String, Vec (), - } + }; match high { Bound::Included(b) => { diff --git a/src/db/test.rs b/src/db/test.rs index f5f62b30..977dc965 100644 --- a/src/db/test.rs +++ b/src/db/test.rs @@ -21,7 +21,7 @@ fn test_suite(db: Db) { let res = db.transaction::<_, (), _>(|tx| { assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), va); - let _: () = tx.insert(&tree, ka, vb).unwrap(); + assert_eq!(tx.insert(&tree, ka, vb).unwrap(), ()); assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb); @@ -33,7 +33,7 @@ fn test_suite(db: Db) { let res = db.transaction::<(), _, _>(|tx| { assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb); - let _: () = tx.insert(&tree, ka, vc).unwrap(); + assert_eq!(tx.insert(&tree, ka, vc).unwrap(), ()); assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vc); @@ -130,12 +130,10 @@ fn test_lmdb_db() { use crate::lmdb_adapter::LmdbDb; let path = mktemp::Temp::new_dir().unwrap(); - let db = unsafe { - heed::EnvOpenOptions::new() - .max_dbs(100) - .open(&path) - .unwrap() - }; + let db = heed::EnvOpenOptions::new() + .max_dbs(100) + .open(&path) + .unwrap(); let db = LmdbDb::init(db); test_suite(db); drop(path); @@ -147,7 +145,7 @@ fn test_sqlite_db() { use crate::sqlite_adapter::SqliteDb; let manager = r2d2_sqlite::SqliteConnectionManager::memory(); - let db = SqliteDb::open(manager, false).unwrap(); + let db = SqliteDb::new(manager, false).unwrap(); test_suite(db); } diff --git a/src/garage/Cargo.toml b/src/garage/Cargo.toml index 0c7eb89b..a4f695a4 100644 --- a/src/garage/Cargo.toml +++ b/src/garage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage" -version = "2.3.0" +version = "1.3.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -26,7 +26,6 @@ garage_db.workspace = true garage_api_admin.workspace = true garage_api_s3.workspace = true garage_api_k2v = { workspace = true, optional = true } -garage_api_common.workspace = true garage_block.workspace = true garage_model.workspace = true garage_net.workspace = true @@ -38,7 +37,6 @@ garage_web.workspace = true backtrace.workspace = true bytes.workspace = true bytesize.workspace = true -chrono.workspace = true timeago.workspace = true parse_duration.workspace = true hex.workspace = true @@ -49,8 +47,8 @@ sha1.workspace = true sodiumoxide.workspace = true structopt.workspace = true git-version.workspace = true -utoipa.workspace = true -serde_json.workspace = true + +serde.workspace = true futures.workspace = true tokio.workspace = true @@ -80,45 +78,38 @@ static_init.workspace = true assert-json-diff.workspace = true serde_json.workspace = true base64.workspace = true -crc-fast.workspace = true +crc32fast.workspace = true k2v-client.workspace = true [features] -default = ["bundled-libs", "metrics", "lmdb", "sqlite", "k2v"] +default = [ "bundled-libs", "metrics", "lmdb", "sqlite", "k2v" ] -k2v = ["garage_util/k2v", "garage_api_k2v", "garage_api_admin/k2v"] +k2v = [ "garage_util/k2v", "garage_api_k2v" ] # Database engines -lmdb = ["garage_model/lmdb"] -sqlite = ["garage_model/sqlite"] -fjall = ["garage_model/fjall"] +lmdb = [ "garage_model/lmdb" ] +sqlite = [ "garage_model/sqlite" ] +fjall = [ "garage_model/fjall" ] # Automatic registration and discovery via Consul API -consul-discovery = ["garage_rpc/consul-discovery"] +consul-discovery = [ "garage_rpc/consul-discovery" ] # Automatic registration and discovery via Kubernetes API -kubernetes-discovery = ["garage_rpc/kubernetes-discovery"] +kubernetes-discovery = [ "garage_rpc/kubernetes-discovery" ] # Prometheus exporter (/metrics endpoint). -metrics = ["garage_api_admin/metrics", "opentelemetry-prometheus"] +metrics = [ "garage_api_admin/metrics", "opentelemetry-prometheus" ] # Exporter for the OpenTelemetry Collector. -telemetry-otlp = ["opentelemetry-otlp"] +telemetry-otlp = [ "opentelemetry-otlp" ] # Logging to syslog -syslog = ["syslog-tracing"] +syslog = [ "syslog-tracing" ] # Logging to journald -journald = ["tracing-journald"] +journald = [ "tracing-journald" ] # NOTE: bundled-libs and system-libs should be treat as mutually exclusive; # exactly one of them should be enabled. # Use bundled libsqlite instead of linking against system-provided. -bundled-libs = ["garage_db/bundled-libs"] +bundled-libs = [ "garage_db/bundled-libs" ] # Link against system-provided libsodium and libzstd. -system-libs = [ - "garage_block/system-libs", - "garage_rpc/system-libs", - "sodiumoxide/use-pkg-config", -] - -[lints] -workspace = true +system-libs = [ "garage_block/system-libs", "garage_rpc/system-libs", "sodiumoxide/use-pkg-config" ] diff --git a/src/garage/admin/block.rs b/src/garage/admin/block.rs new file mode 100644 index 00000000..5f908ce4 --- /dev/null +++ b/src/garage/admin/block.rs @@ -0,0 +1,243 @@ +use garage_util::data::*; + +use garage_table::*; + +use garage_model::helper::error::{Error, OkOrBadRequest}; +use garage_model::s3::object_table::*; +use garage_model::s3::version_table::*; + +use crate::cli::*; + +use super::*; + +impl AdminRpcHandler { + pub(super) async fn handle_block_cmd(&self, cmd: &BlockOperation) -> Result { + match cmd { + BlockOperation::ListErrors => Ok(AdminRpc::BlockErrorList( + self.garage.block_manager.list_resync_errors()?, + )), + BlockOperation::Info { hash } => self.handle_block_info(hash).await, + BlockOperation::RetryNow { all, blocks } => { + self.handle_block_retry_now(*all, blocks).await + } + BlockOperation::Purge { yes, blocks } => self.handle_block_purge(*yes, blocks).await, + } + } + + async fn handle_block_info(&self, hash: &String) -> Result { + let hash = self.find_block_hash_by_prefix(hash)?; + let refcount = self.garage.block_manager.get_block_rc(&hash)?; + let block_refs = self + .garage + .block_ref_table + .get_range(&hash, None, None, 10000, Default::default()) + .await?; + let mut versions = vec![]; + let mut uploads = vec![]; + for br in block_refs { + if let Some(v) = self + .garage + .version_table + .get(&br.version, &EmptyKey) + .await? + { + if let VersionBacklink::MultipartUpload { upload_id } = &v.backlink { + if let Some(u) = self.garage.mpu_table.get(upload_id, &EmptyKey).await? { + uploads.push(u); + } + } + versions.push(Ok(v)); + } else { + versions.push(Err(br.version)); + } + } + Ok(AdminRpc::BlockInfo { + hash, + refcount, + versions, + uploads, + }) + } + + async fn handle_block_retry_now( + &self, + all: bool, + blocks: &[String], + ) -> Result { + if all { + if !blocks.is_empty() { + return Err(Error::BadRequest( + "--all was specified, cannot also specify blocks".into(), + )); + } + let blocks = self.garage.block_manager.list_resync_errors()?; + for b in blocks.iter() { + self.garage.block_manager.resync.clear_backoff(&b.hash)?; + } + Ok(AdminRpc::Ok(format!( + "{} blocks returned in queue for a retry now (check logs to see results)", + blocks.len() + ))) + } else { + for hash in blocks { + let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?; + let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?; + self.garage.block_manager.resync.clear_backoff(&hash)?; + } + Ok(AdminRpc::Ok(format!( + "{} blocks returned in queue for a retry now (check logs to see results)", + blocks.len() + ))) + } + } + + async fn handle_block_purge(&self, yes: bool, blocks: &[String]) -> Result { + if !yes { + return Err(Error::BadRequest( + "Pass the --yes flag to confirm block purge operation.".into(), + )); + } + + let mut obj_dels = 0; + let mut mpu_dels = 0; + let mut ver_dels = 0; + let mut br_dels = 0; + + for hash in blocks { + let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?; + let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?; + let block_refs = self + .garage + .block_ref_table + .get_range(&hash, None, None, 10000, Default::default()) + .await?; + + for br in block_refs { + if let Some(version) = self + .garage + .version_table + .get(&br.version, &EmptyKey) + .await? + { + self.handle_block_purge_version_backlink( + &version, + &mut obj_dels, + &mut mpu_dels, + ) + .await?; + + if !version.deleted.get() { + let deleted_version = Version::new(version.uuid, version.backlink, true); + self.garage.version_table.insert(&deleted_version).await?; + ver_dels += 1; + } + } + if !br.deleted.get() { + let mut br = br; + br.deleted.set(); + self.garage.block_ref_table.insert(&br).await?; + br_dels += 1; + } + } + } + + Ok(AdminRpc::Ok(format!( + "Purged {} blocks: marked {} block refs, {} versions, {} objects and {} multipart uploads as deleted", + blocks.len(), + br_dels, + ver_dels, + obj_dels, + mpu_dels, + ))) + } + + async fn handle_block_purge_version_backlink( + &self, + version: &Version, + obj_dels: &mut usize, + mpu_dels: &mut usize, + ) -> Result<(), Error> { + let (bucket_id, key, ov_id) = match &version.backlink { + VersionBacklink::Object { bucket_id, key } => (*bucket_id, key.clone(), version.uuid), + VersionBacklink::MultipartUpload { upload_id } => { + if let Some(mut mpu) = self.garage.mpu_table.get(upload_id, &EmptyKey).await? { + if !mpu.deleted.get() { + mpu.parts.clear(); + mpu.deleted.set(); + self.garage.mpu_table.insert(&mpu).await?; + *mpu_dels += 1; + } + (mpu.bucket_id, mpu.key.clone(), *upload_id) + } else { + return Ok(()); + } + } + }; + + if let Some(object) = self.garage.object_table.get(&bucket_id, &key).await? { + let ov = object.versions().iter().rev().find(|v| v.is_complete()); + if let Some(ov) = ov { + if ov.uuid == ov_id { + let del_uuid = gen_uuid(); + let deleted_object = Object::new( + bucket_id, + key, + vec![ObjectVersion { + uuid: del_uuid, + timestamp: ov.timestamp + 1, + state: ObjectVersionState::Complete(ObjectVersionData::DeleteMarker), + }], + ); + self.garage.object_table.insert(&deleted_object).await?; + *obj_dels += 1; + } + } + } + + Ok(()) + } + + // ---- helper function ---- + fn find_block_hash_by_prefix(&self, prefix: &str) -> Result { + if prefix.len() < 4 { + return Err(Error::BadRequest( + "Please specify at least 4 characters of the block hash".into(), + )); + } + + let prefix_bin = + hex::decode(&prefix[..prefix.len() & !1]).ok_or_bad_request("invalid hash")?; + + let iter = self + .garage + .block_ref_table + .data + .store + .range(&prefix_bin[..]..) + .map_err(GarageError::from)?; + let mut found = None; + for item in iter { + let (k, _v) = item.map_err(GarageError::from)?; + let hash = Hash::try_from(&k[..32]).unwrap(); + if &hash.as_slice()[..prefix_bin.len()] != prefix_bin { + break; + } + if hex::encode(hash.as_slice()).starts_with(prefix) { + match &found { + Some(x) if *x == hash => (), + Some(_) => { + return Err(Error::BadRequest(format!( + "Several blocks match prefix `{}`", + prefix + ))); + } + None => { + found = Some(hash); + } + } + } + } + + found.ok_or_else(|| Error::BadRequest("No matching block found".into())) + } +} diff --git a/src/garage/admin/bucket.rs b/src/garage/admin/bucket.rs new file mode 100644 index 00000000..073329c1 --- /dev/null +++ b/src/garage/admin/bucket.rs @@ -0,0 +1,500 @@ +use std::collections::HashMap; +use std::fmt::Write; + +use garage_util::crdt::*; +use garage_util::time::*; + +use garage_table::*; + +use garage_model::bucket_alias_table::*; +use garage_model::bucket_table::*; +use garage_model::helper::error::{Error, OkOrBadRequest}; +use garage_model::permission::*; + +use crate::cli::*; + +use super::*; + +impl AdminRpcHandler { + pub(super) async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result { + match cmd { + BucketOperation::List => self.handle_list_buckets().await, + BucketOperation::Info(query) => self.handle_bucket_info(query).await, + BucketOperation::Create(query) => self.handle_create_bucket(&query.name).await, + BucketOperation::Delete(query) => self.handle_delete_bucket(query).await, + BucketOperation::Alias(query) => self.handle_alias_bucket(query).await, + BucketOperation::Unalias(query) => self.handle_unalias_bucket(query).await, + BucketOperation::Allow(query) => self.handle_bucket_allow(query).await, + BucketOperation::Deny(query) => self.handle_bucket_deny(query).await, + BucketOperation::Website(query) => self.handle_bucket_website(query).await, + BucketOperation::SetQuotas(query) => self.handle_bucket_set_quotas(query).await, + BucketOperation::CleanupIncompleteUploads(query) => { + self.handle_bucket_cleanup_incomplete_uploads(query).await + } + } + } + + async fn handle_list_buckets(&self) -> Result { + let buckets = self + .garage + .bucket_table + .get_range( + &EmptyKey, + None, + Some(DeletedFilter::NotDeleted), + 10000, + EnumerationOrder::Forward, + ) + .await?; + + Ok(AdminRpc::BucketList(buckets)) + } + + async fn handle_bucket_info(&self, query: &BucketOpt) -> Result { + let bucket_id = self + .garage + .bucket_helper() + .admin_get_existing_matching_bucket(&query.name) + .await?; + + let bucket = self + .garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + + let counters = self + .garage + .object_counter_table + .table + .get(&bucket_id, &EmptyKey) + .await? + .map(|x| x.filtered_values(&self.garage.system.cluster_layout())) + .unwrap_or_default(); + + let mpu_counters = self + .garage + .mpu_counter_table + .table + .get(&bucket_id, &EmptyKey) + .await? + .map(|x| x.filtered_values(&self.garage.system.cluster_layout())) + .unwrap_or_default(); + + let mut relevant_keys = HashMap::new(); + for (k, _) in bucket + .state + .as_option() + .unwrap() + .authorized_keys + .items() + .iter() + { + if let Some(key) = self + .garage + .key_table + .get(&EmptyKey, k) + .await? + .filter(|k| !k.is_deleted()) + { + relevant_keys.insert(k.clone(), key); + } + } + for ((k, _), _, _) in bucket + .state + .as_option() + .unwrap() + .local_aliases + .items() + .iter() + { + if relevant_keys.contains_key(k) { + continue; + } + if let Some(key) = self.garage.key_table.get(&EmptyKey, k).await? { + relevant_keys.insert(k.clone(), key); + } + } + + Ok(AdminRpc::BucketInfo { + bucket, + relevant_keys, + counters, + mpu_counters, + }) + } + + #[allow(clippy::ptr_arg)] + async fn handle_create_bucket(&self, name: &String) -> Result { + if !is_valid_bucket_name(name, self.garage.config.allow_punycode) { + return Err(Error::BadRequest(format!( + "{}: {}", + name, INVALID_BUCKET_NAME_MESSAGE + ))); + } + + let helper = self.garage.locked_helper().await; + + if let Some(alias) = self.garage.bucket_alias_table.get(&EmptyKey, name).await? { + if alias.state.get().is_some() { + return Err(Error::BadRequest(format!("Bucket {} already exists", name))); + } + } + + // ---- done checking, now commit ---- + + let bucket = Bucket::new(); + self.garage.bucket_table.insert(&bucket).await?; + + helper.set_global_bucket_alias(bucket.id, name).await?; + + Ok(AdminRpc::Ok(format!("Bucket {} was created.", name))) + } + + async fn handle_delete_bucket(&self, query: &DeleteBucketOpt) -> Result { + let helper = self.garage.locked_helper().await; + + let bucket_id = helper + .bucket() + .admin_get_existing_matching_bucket(&query.name) + .await?; + + // Get the alias, but keep in minde here the bucket name + // given in parameter can also be directly the bucket's ID. + // In that case bucket_alias will be None, and + // we can still delete the bucket if it has zero aliases + // (a condition which we try to prevent but that could still happen somehow). + // We just won't try to delete an alias entry because there isn't one. + let bucket_alias = self + .garage + .bucket_alias_table + .get(&EmptyKey, &query.name) + .await?; + + // Check bucket doesn't have other aliases + let mut bucket = helper.bucket().get_existing_bucket(bucket_id).await?; + let bucket_state = bucket.state.as_option().unwrap(); + if bucket_state + .aliases + .items() + .iter() + .filter(|(_, _, active)| *active) + .any(|(name, _, _)| name != &query.name) + { + return Err(Error::BadRequest(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", query.name))); + } + if bucket_state + .local_aliases + .items() + .iter() + .any(|(_, _, active)| *active) + { + return Err(Error::BadRequest(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", query.name))); + } + + // Check bucket is empty + if !helper.bucket().is_bucket_empty(bucket_id).await? { + return Err(Error::BadRequest(format!( + "Bucket {} is not empty", + query.name + ))); + } + + if !query.yes { + return Err(Error::BadRequest( + "Add --yes flag to really perform this operation".to_string(), + )); + } + + // --- done checking, now commit --- + // 1. delete authorization from keys that had access + for (key_id, _) in bucket.authorized_keys() { + helper + .set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS) + .await?; + } + + // 2. delete bucket alias + if bucket_alias.is_some() { + helper + .purge_global_bucket_alias(bucket_id, &query.name) + .await?; + } + + // 3. delete bucket + bucket.state = Deletable::delete(); + self.garage.bucket_table.insert(&bucket).await?; + + Ok(AdminRpc::Ok(format!("Bucket {} was deleted.", query.name))) + } + + async fn handle_alias_bucket(&self, query: &AliasBucketOpt) -> Result { + let helper = self.garage.locked_helper().await; + + let bucket_id = helper + .bucket() + .admin_get_existing_matching_bucket(&query.existing_bucket) + .await?; + + if let Some(key_pattern) = &query.local { + let key = helper.key().get_existing_matching_key(key_pattern).await?; + + helper + .set_local_bucket_alias(bucket_id, &key.key_id, &query.new_name) + .await?; + Ok(AdminRpc::Ok(format!( + "Alias {} now points to bucket {:?} in namespace of key {}", + query.new_name, bucket_id, key.key_id + ))) + } else { + helper + .set_global_bucket_alias(bucket_id, &query.new_name) + .await?; + Ok(AdminRpc::Ok(format!( + "Alias {} now points to bucket {:?}", + query.new_name, bucket_id + ))) + } + } + + async fn handle_unalias_bucket(&self, query: &UnaliasBucketOpt) -> Result { + let helper = self.garage.locked_helper().await; + + if let Some(key_pattern) = &query.local { + let key = helper.key().get_existing_matching_key(key_pattern).await?; + + let bucket_id = key + .state + .as_option() + .unwrap() + .local_aliases + .get(&query.name) + .cloned() + .flatten() + .ok_or_bad_request("Bucket not found")?; + + helper + .unset_local_bucket_alias(bucket_id, &key.key_id, &query.name) + .await?; + + Ok(AdminRpc::Ok(format!( + "Alias {} no longer points to bucket {:?} in namespace of key {}", + &query.name, bucket_id, key.key_id + ))) + } else { + let bucket_id = helper + .bucket() + .resolve_global_bucket_name(&query.name) + .await? + .ok_or_bad_request("Bucket not found")?; + + helper + .unset_global_bucket_alias(bucket_id, &query.name) + .await?; + + Ok(AdminRpc::Ok(format!( + "Alias {} no longer points to bucket {:?}", + &query.name, bucket_id + ))) + } + } + + async fn handle_bucket_allow(&self, query: &PermBucketOpt) -> Result { + let helper = self.garage.locked_helper().await; + + let bucket_id = helper + .bucket() + .admin_get_existing_matching_bucket(&query.bucket) + .await?; + let key = helper + .key() + .get_existing_matching_key(&query.key_pattern) + .await?; + + let allow_read = query.read || key.allow_read(&bucket_id); + let allow_write = query.write || key.allow_write(&bucket_id); + let allow_owner = query.owner || key.allow_owner(&bucket_id); + + helper + .set_bucket_key_permissions( + bucket_id, + &key.key_id, + BucketKeyPerm { + timestamp: now_msec(), + allow_read, + allow_write, + allow_owner, + }, + ) + .await?; + + Ok(AdminRpc::Ok(format!( + "New permissions for {} on {}: read {}, write {}, owner {}.", + &key.key_id, &query.bucket, allow_read, allow_write, allow_owner + ))) + } + + async fn handle_bucket_deny(&self, query: &PermBucketOpt) -> Result { + let helper = self.garage.locked_helper().await; + + let bucket_id = helper + .bucket() + .admin_get_existing_matching_bucket(&query.bucket) + .await?; + let key = helper + .key() + .get_existing_matching_key(&query.key_pattern) + .await?; + + let allow_read = !query.read && key.allow_read(&bucket_id); + let allow_write = !query.write && key.allow_write(&bucket_id); + let allow_owner = !query.owner && key.allow_owner(&bucket_id); + + helper + .set_bucket_key_permissions( + bucket_id, + &key.key_id, + BucketKeyPerm { + timestamp: now_msec(), + allow_read, + allow_write, + allow_owner, + }, + ) + .await?; + + Ok(AdminRpc::Ok(format!( + "New permissions for {} on {}: read {}, write {}, owner {}.", + &key.key_id, &query.bucket, allow_read, allow_write, allow_owner + ))) + } + + async fn handle_bucket_website(&self, query: &WebsiteOpt) -> Result { + let bucket_id = self + .garage + .bucket_helper() + .admin_get_existing_matching_bucket(&query.bucket) + .await?; + + let mut bucket = self + .garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + let bucket_state = bucket.state.as_option_mut().unwrap(); + + if !(query.allow ^ query.deny) { + return Err(Error::BadRequest( + "You must specify exactly one flag, either --allow or --deny".to_string(), + )); + } + + let website = if query.allow { + Some(WebsiteConfig { + index_document: query.index_document.clone(), + error_document: query.error_document.clone(), + }) + } else { + None + }; + + bucket_state.website_config.update(website); + self.garage.bucket_table.insert(&bucket).await?; + + let msg = if query.allow { + format!("Website access allowed for {}", &query.bucket) + } else { + format!("Website access denied for {}", &query.bucket) + }; + + Ok(AdminRpc::Ok(msg)) + } + + async fn handle_bucket_set_quotas(&self, query: &SetQuotasOpt) -> Result { + let bucket_id = self + .garage + .bucket_helper() + .admin_get_existing_matching_bucket(&query.bucket) + .await?; + + let mut bucket = self + .garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + let bucket_state = bucket.state.as_option_mut().unwrap(); + + if query.max_size.is_none() && query.max_objects.is_none() { + return Err(Error::BadRequest( + "You must specify either --max-size or --max-objects (or both) for this command to do something.".to_string(), + )); + } + + let mut quotas = bucket_state.quotas.get().clone(); + + match query.max_size.as_ref().map(String::as_ref) { + Some("none") => quotas.max_size = None, + Some(v) => { + let bs = v + .parse::() + .ok_or_bad_request(format!("Invalid size specified: {}", v))?; + quotas.max_size = Some(bs.as_u64()); + } + _ => (), + } + + match query.max_objects.as_ref().map(String::as_ref) { + Some("none") => quotas.max_objects = None, + Some(v) => { + let mo = v + .parse::() + .ok_or_bad_request(format!("Invalid number specified: {}", v))?; + quotas.max_objects = Some(mo); + } + _ => (), + } + + bucket_state.quotas.update(quotas); + self.garage.bucket_table.insert(&bucket).await?; + + Ok(AdminRpc::Ok(format!( + "Quotas updated for {}", + &query.bucket + ))) + } + + async fn handle_bucket_cleanup_incomplete_uploads( + &self, + query: &CleanupIncompleteUploadsOpt, + ) -> Result { + let mut bucket_ids = vec![]; + for b in query.buckets.iter() { + bucket_ids.push( + self.garage + .bucket_helper() + .admin_get_existing_matching_bucket(b) + .await?, + ); + } + + let duration = parse_duration::parse::parse(&query.older_than) + .ok_or_bad_request("Invalid duration passed for --older-than parameter")?; + + let mut ret = String::new(); + for bucket in bucket_ids { + let count = self + .garage + .bucket_helper() + .cleanup_incomplete_uploads(&bucket, duration) + .await?; + writeln!( + &mut ret, + "Bucket {:?}: {} incomplete uploads aborted", + bucket, count + ) + .unwrap(); + } + + Ok(AdminRpc::Ok(ret)) + } +} diff --git a/src/garage/admin/key.rs b/src/garage/admin/key.rs new file mode 100644 index 00000000..bd010d2c --- /dev/null +++ b/src/garage/admin/key.rs @@ -0,0 +1,161 @@ +use std::collections::HashMap; + +use garage_table::*; + +use garage_model::helper::error::*; +use garage_model::key_table::*; + +use crate::cli::*; + +use super::*; + +impl AdminRpcHandler { + pub(super) async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result { + match cmd { + KeyOperation::List => self.handle_list_keys().await, + KeyOperation::Info(query) => self.handle_key_info(query).await, + KeyOperation::Create(query) => self.handle_create_key(query).await, + KeyOperation::Rename(query) => self.handle_rename_key(query).await, + KeyOperation::Delete(query) => self.handle_delete_key(query).await, + KeyOperation::Allow(query) => self.handle_allow_key(query).await, + KeyOperation::Deny(query) => self.handle_deny_key(query).await, + KeyOperation::Import(query) => self.handle_import_key(query).await, + } + } + + async fn handle_list_keys(&self) -> Result { + let key_ids = self + .garage + .key_table + .get_range( + &EmptyKey, + None, + Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)), + 10000, + EnumerationOrder::Forward, + ) + .await? + .iter() + .map(|k| (k.key_id.to_string(), k.params().unwrap().name.get().clone())) + .collect::>(); + Ok(AdminRpc::KeyList(key_ids)) + } + + async fn handle_key_info(&self, query: &KeyInfoOpt) -> Result { + let mut key = self + .garage + .key_helper() + .get_existing_matching_key(&query.key_pattern) + .await?; + + if !query.show_secret { + key.state.as_option_mut().unwrap().secret_key = "(redacted)".into(); + } + + self.key_info_result(key).await + } + + async fn handle_create_key(&self, query: &KeyNewOpt) -> Result { + let key = Key::new(&query.name); + self.garage.key_table.insert(&key).await?; + self.key_info_result(key).await + } + + async fn handle_rename_key(&self, query: &KeyRenameOpt) -> Result { + let mut key = self + .garage + .key_helper() + .get_existing_matching_key(&query.key_pattern) + .await?; + key.params_mut() + .unwrap() + .name + .update(query.new_name.clone()); + self.garage.key_table.insert(&key).await?; + self.key_info_result(key).await + } + + async fn handle_delete_key(&self, query: &KeyDeleteOpt) -> Result { + let helper = self.garage.locked_helper().await; + + let mut key = helper + .key() + .get_existing_matching_key(&query.key_pattern) + .await?; + + if !query.yes { + return Err(Error::BadRequest( + "Add --yes flag to really perform this operation".to_string(), + )); + } + + helper.delete_key(&mut key).await?; + + Ok(AdminRpc::Ok(format!( + "Key {} was deleted successfully.", + key.key_id + ))) + } + + async fn handle_allow_key(&self, query: &KeyPermOpt) -> Result { + let mut key = self + .garage + .key_helper() + .get_existing_matching_key(&query.key_pattern) + .await?; + if query.create_bucket { + key.params_mut().unwrap().allow_create_bucket.update(true); + } + self.garage.key_table.insert(&key).await?; + self.key_info_result(key).await + } + + async fn handle_deny_key(&self, query: &KeyPermOpt) -> Result { + let mut key = self + .garage + .key_helper() + .get_existing_matching_key(&query.key_pattern) + .await?; + if query.create_bucket { + key.params_mut().unwrap().allow_create_bucket.update(false); + } + self.garage.key_table.insert(&key).await?; + self.key_info_result(key).await + } + + async fn handle_import_key(&self, query: &KeyImportOpt) -> Result { + if !query.yes { + return Err(Error::BadRequest("This command is intended to re-import keys that were previously generated by Garage. If you want to create a new key, use `garage key new` instead. Add the --yes flag if you really want to re-import a key.".to_string())); + } + + let prev_key = self.garage.key_table.get(&EmptyKey, &query.key_id).await?; + if prev_key.is_some() { + return Err(Error::BadRequest(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id))); + } + + let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name) + .ok_or_bad_request("Invalid key format")?; + self.garage.key_table.insert(&imported_key).await?; + + self.key_info_result(imported_key).await + } + + async fn key_info_result(&self, key: Key) -> Result { + let mut relevant_buckets = HashMap::new(); + + for (id, _) in key + .state + .as_option() + .unwrap() + .authorized_buckets + .items() + .iter() + { + if let Some(b) = self.garage.bucket_table.get(&EmptyKey, id).await? { + relevant_buckets.insert(*id, b); + } + } + + Ok(AdminRpc::KeyInfo(key, relevant_buckets)) + } +} diff --git a/src/garage/admin/mod.rs b/src/garage/admin/mod.rs new file mode 100644 index 00000000..6ae8fa88 --- /dev/null +++ b/src/garage/admin/mod.rs @@ -0,0 +1,545 @@ +mod block; +mod bucket; +mod key; + +use std::collections::HashMap; +use std::fmt::Write; +use std::future::Future; +use std::sync::Arc; + +use futures::future::FutureExt; + +use serde::{Deserialize, Serialize}; + +use format_table::format_table_to_string; + +use garage_util::background::BackgroundRunner; +use garage_util::data::*; +use garage_util::error::Error as GarageError; + +use garage_table::replication::*; +use garage_table::*; + +use garage_rpc::layout::PARTITION_BITS; +use garage_rpc::*; + +use garage_block::manager::BlockResyncErrorInfo; + +use garage_model::bucket_table::*; +use garage_model::garage::Garage; +use garage_model::helper::error::{Error, OkOrBadRequest}; +use garage_model::key_table::*; +use garage_model::s3::mpu_table::MultipartUpload; +use garage_model::s3::version_table::Version; + +use crate::cli::*; +use crate::repair::online::launch_online_repair; + +pub const ADMIN_RPC_PATH: &str = "garage/admin_rpc.rs/Rpc"; + +#[derive(Debug, Serialize, Deserialize)] +#[allow(clippy::large_enum_variant)] +pub enum AdminRpc { + BucketOperation(BucketOperation), + KeyOperation(KeyOperation), + LaunchRepair(RepairOpt), + Stats(StatsOpt), + Worker(WorkerOperation), + BlockOperation(BlockOperation), + MetaOperation(MetaOperation), + + // Replies + Ok(String), + BucketList(Vec), + BucketInfo { + bucket: Bucket, + relevant_keys: HashMap, + counters: HashMap, + mpu_counters: HashMap, + }, + KeyList(Vec<(String, String)>), + KeyInfo(Key, HashMap), + WorkerList( + HashMap, + WorkerListOpt, + ), + WorkerVars(Vec<(Uuid, String, String)>), + WorkerInfo(usize, garage_util::background::WorkerInfo), + BlockErrorList(Vec), + BlockInfo { + hash: Hash, + refcount: u64, + versions: Vec>, + uploads: Vec, + }, +} + +impl Rpc for AdminRpc { + type Response = Result; +} + +pub struct AdminRpcHandler { + garage: Arc, + background: Arc, + endpoint: Arc>, +} + +impl AdminRpcHandler { + pub fn new(garage: Arc, background: Arc) -> Arc { + let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into()); + let admin = Arc::new(Self { + garage, + background, + endpoint, + }); + admin.endpoint.set_handler(admin.clone()); + admin + } + + // ================ REPAIR COMMANDS ==================== + + async fn handle_launch_repair(self: &Arc, opt: RepairOpt) -> Result { + if !opt.yes { + return Err(Error::BadRequest( + "Please provide the --yes flag to initiate repair operations.".to_string(), + )); + } + if opt.all_nodes { + let mut opt_to_send = opt.clone(); + opt_to_send.all_nodes = false; + + let mut failures = vec![]; + let all_nodes = self.garage.system.cluster_layout().all_nodes().to_vec(); + for node in all_nodes.iter() { + let node = (*node).into(); + let resp = self + .endpoint + .call( + &node, + AdminRpc::LaunchRepair(opt_to_send.clone()), + PRIO_NORMAL, + ) + .await; + if !matches!(resp, Ok(Ok(_))) { + failures.push(node); + } + } + if failures.is_empty() { + Ok(AdminRpc::Ok("Repair launched on all nodes".to_string())) + } else { + Err(Error::BadRequest(format!( + "Could not launch repair on nodes: {:?} (launched successfully on other nodes)", + failures + ))) + } + } else { + launch_online_repair(&self.garage, &self.background, opt).await?; + Ok(AdminRpc::Ok(format!( + "Repair launched on {:?}", + self.garage.system.id + ))) + } + } + + // ================ STATS COMMANDS ==================== + + async fn handle_stats(&self, opt: StatsOpt) -> Result { + if opt.all_nodes { + let mut ret = String::new(); + let mut all_nodes = self.garage.system.cluster_layout().all_nodes().to_vec(); + for node in self.garage.system.get_known_nodes().iter() { + if node.is_up && !all_nodes.contains(&node.id) { + all_nodes.push(node.id); + } + } + + for node in all_nodes.iter() { + let mut opt = opt.clone(); + opt.all_nodes = false; + opt.skip_global = true; + + writeln!(&mut ret, "\n======================").unwrap(); + writeln!(&mut ret, "Stats for node {:?}:", node).unwrap(); + + let node_id = (*node).into(); + match self + .endpoint + .call(&node_id, AdminRpc::Stats(opt), PRIO_NORMAL) + .await + { + Ok(Ok(AdminRpc::Ok(s))) => writeln!(&mut ret, "{}", s).unwrap(), + Ok(Ok(x)) => writeln!(&mut ret, "Bad answer: {:?}", x).unwrap(), + Ok(Err(e)) => writeln!(&mut ret, "Remote error: {}", e).unwrap(), + Err(e) => writeln!(&mut ret, "Network error: {}", e).unwrap(), + } + } + + writeln!(&mut ret, "\n======================").unwrap(); + write!( + &mut ret, + "Cluster statistics:\n\n{}", + self.gather_cluster_stats() + ) + .unwrap(); + + Ok(AdminRpc::Ok(ret)) + } else { + Ok(AdminRpc::Ok(self.gather_stats_local(opt)?)) + } + } + + fn gather_stats_local(&self, opt: StatsOpt) -> Result { + let mut ret = String::new(); + writeln!( + &mut ret, + "\nGarage version: {} [features: {}]\nRust compiler version: {}", + garage_util::version::garage_version(), + garage_util::version::garage_features() + .map(|list| list.join(", ")) + .unwrap_or_else(|| "(unknown)".into()), + garage_util::version::rust_version(), + ) + .unwrap(); + + writeln!(&mut ret, "\nDatabase engine: {}", self.garage.db.engine()).unwrap(); + + // Gather table statistics + let mut table = vec![" Table\tItems\tMklItems\tMklTodo\tGcTodo".into()]; + table.push(self.gather_table_stats(&self.garage.bucket_table)?); + table.push(self.gather_table_stats(&self.garage.key_table)?); + table.push(self.gather_table_stats(&self.garage.object_table)?); + table.push(self.gather_table_stats(&self.garage.version_table)?); + table.push(self.gather_table_stats(&self.garage.block_ref_table)?); + write!( + &mut ret, + "\nTable stats:\n{}", + format_table_to_string(table) + ) + .unwrap(); + + // Gather block manager statistics + writeln!(&mut ret, "\nBlock manager stats:").unwrap(); + let rc_len = self.garage.block_manager.rc_approximate_len()?.to_string(); + + writeln!( + &mut ret, + " number of RC entries (~= number of blocks): {}", + rc_len + ) + .unwrap(); + writeln!( + &mut ret, + " resync queue length: {}", + self.garage.block_manager.resync.queue_approximate_len()? + ) + .unwrap(); + writeln!( + &mut ret, + " blocks with resync errors: {}", + self.garage.block_manager.resync.errors_approximate_len()? + ) + .unwrap(); + + if !opt.skip_global { + write!(&mut ret, "\n{}", self.gather_cluster_stats()).unwrap(); + } + + Ok(ret) + } + + fn gather_cluster_stats(&self) -> String { + let mut ret = String::new(); + + // Gather storage node and free space statistics for current nodes + let layout = &self.garage.system.cluster_layout(); + let mut node_partition_count = HashMap::::new(); + for short_id in layout.current().ring_assignment_data.iter() { + let id = layout.current().node_id_vec[*short_id as usize]; + *node_partition_count.entry(id).or_default() += 1; + } + let node_info = self + .garage + .system + .get_known_nodes() + .into_iter() + .map(|n| (n.id, n)) + .collect::>(); + + let mut table = vec![" ID\tHostname\tZone\tCapacity\tPart.\tDataAvail\tMetaAvail".into()]; + for (id, parts) in node_partition_count.iter() { + let info = node_info.get(id); + let status = info.map(|x| &x.status); + let role = layout.current().roles.get(id).and_then(|x| x.0.as_ref()); + let hostname = status.and_then(|x| x.hostname.as_deref()).unwrap_or("?"); + let zone = role.map(|x| x.zone.as_str()).unwrap_or("?"); + let capacity = role + .map(|x| x.capacity_string()) + .unwrap_or_else(|| "?".into()); + let avail_str = |x| match x { + Some((avail, total)) => { + let pct = (avail as f64) / (total as f64) * 100.; + let avail = bytesize::ByteSize::b(avail); + let total = bytesize::ByteSize::b(total); + format!("{}/{} ({:.1}%)", avail, total, pct) + } + None => "?".into(), + }; + let data_avail = avail_str(status.and_then(|x| x.data_disk_avail)); + let meta_avail = avail_str(status.and_then(|x| x.meta_disk_avail)); + table.push(format!( + " {:?}\t{}\t{}\t{}\t{}\t{}\t{}", + id, hostname, zone, capacity, parts, data_avail, meta_avail + )); + } + write!( + &mut ret, + "Storage nodes:\n{}", + format_table_to_string(table) + ) + .unwrap(); + + let meta_part_avail = node_partition_count + .iter() + .filter_map(|(id, parts)| { + node_info + .get(id) + .and_then(|x| x.status.meta_disk_avail) + .map(|c| c.0 / *parts) + }) + .collect::>(); + let data_part_avail = node_partition_count + .iter() + .filter_map(|(id, parts)| { + node_info + .get(id) + .and_then(|x| x.status.data_disk_avail) + .map(|c| c.0 / *parts) + }) + .collect::>(); + if !meta_part_avail.is_empty() && !data_part_avail.is_empty() { + let meta_avail = + bytesize::ByteSize(meta_part_avail.iter().min().unwrap() * (1 << PARTITION_BITS)); + let data_avail = + bytesize::ByteSize(data_part_avail.iter().min().unwrap() * (1 << PARTITION_BITS)); + writeln!( + &mut ret, + "\nEstimated available storage space cluster-wide (might be lower in practice):" + ) + .unwrap(); + if meta_part_avail.len() < node_partition_count.len() + || data_part_avail.len() < node_partition_count.len() + { + writeln!(&mut ret, " data: < {}", data_avail).unwrap(); + writeln!(&mut ret, " metadata: < {}", meta_avail).unwrap(); + writeln!(&mut ret, "A precise estimate could not be given as information is missing for some storage nodes.").unwrap(); + } else { + writeln!(&mut ret, " data: {}", data_avail).unwrap(); + writeln!(&mut ret, " metadata: {}", meta_avail).unwrap(); + } + } + + ret + } + + fn gather_table_stats(&self, t: &Arc>) -> Result + where + F: TableSchema + 'static, + R: TableReplication + 'static, + { + let data_len = t + .data + .store + .approximate_len() + .map_err(GarageError::from)? + .to_string(); + let mkl_len = t.merkle_updater.merkle_tree_approximate_len()?.to_string(); + + Ok(format!( + " {}\t{}\t{}\t{}\t{}", + F::TABLE_NAME, + data_len, + mkl_len, + t.merkle_updater.todo_approximate_len()?, + t.data.gc_todo_approximate_len()? + )) + } + + // ================ WORKER COMMANDS ==================== + + async fn handle_worker_cmd(&self, cmd: &WorkerOperation) -> Result { + match cmd { + WorkerOperation::List { opt } => { + let workers = self.background.get_worker_info(); + Ok(AdminRpc::WorkerList(workers, *opt)) + } + WorkerOperation::Info { tid } => { + let info = self + .background + .get_worker_info() + .get(tid) + .ok_or_bad_request(format!("No worker with TID {}", tid))? + .clone(); + Ok(AdminRpc::WorkerInfo(*tid, info)) + } + WorkerOperation::Get { + all_nodes, + variable, + } => self.handle_get_var(*all_nodes, variable).await, + WorkerOperation::Set { + all_nodes, + variable, + value, + } => self.handle_set_var(*all_nodes, variable, value).await, + } + } + + async fn handle_get_var( + &self, + all_nodes: bool, + variable: &Option, + ) -> Result { + if all_nodes { + let mut ret = vec![]; + let all_nodes = self.garage.system.cluster_layout().all_nodes().to_vec(); + for node in all_nodes.iter() { + let node = (*node).into(); + match self + .endpoint + .call( + &node, + AdminRpc::Worker(WorkerOperation::Get { + all_nodes: false, + variable: variable.clone(), + }), + PRIO_NORMAL, + ) + .await?? + { + AdminRpc::WorkerVars(v) => ret.extend(v), + m => return Err(GarageError::unexpected_rpc_message(m).into()), + } + } + Ok(AdminRpc::WorkerVars(ret)) + } else { + #[allow(clippy::collapsible_else_if)] + if let Some(v) = variable { + Ok(AdminRpc::WorkerVars(vec![( + self.garage.system.id, + v.clone(), + self.garage.bg_vars.get(v)?, + )])) + } else { + let mut vars = self.garage.bg_vars.get_all(); + vars.sort(); + Ok(AdminRpc::WorkerVars( + vars.into_iter() + .map(|(k, v)| (self.garage.system.id, k.to_string(), v)) + .collect(), + )) + } + } + } + + async fn handle_set_var( + &self, + all_nodes: bool, + variable: &str, + value: &str, + ) -> Result { + if all_nodes { + let mut ret = vec![]; + let all_nodes = self.garage.system.cluster_layout().all_nodes().to_vec(); + for node in all_nodes.iter() { + let node = (*node).into(); + match self + .endpoint + .call( + &node, + AdminRpc::Worker(WorkerOperation::Set { + all_nodes: false, + variable: variable.to_string(), + value: value.to_string(), + }), + PRIO_NORMAL, + ) + .await?? + { + AdminRpc::WorkerVars(v) => ret.extend(v), + m => return Err(GarageError::unexpected_rpc_message(m).into()), + } + } + Ok(AdminRpc::WorkerVars(ret)) + } else { + self.garage.bg_vars.set(variable, value)?; + Ok(AdminRpc::WorkerVars(vec![( + self.garage.system.id, + variable.to_string(), + value.to_string(), + )])) + } + } + + // ================ META DB COMMANDS ==================== + + async fn handle_meta_cmd(self: &Arc, mo: &MetaOperation) -> Result { + match mo { + MetaOperation::Snapshot { all: true } => { + let to = self.garage.system.cluster_layout().all_nodes().to_vec(); + + let resps = futures::future::join_all(to.iter().map(|to| async move { + let to = (*to).into(); + self.endpoint + .call( + &to, + AdminRpc::MetaOperation(MetaOperation::Snapshot { all: false }), + PRIO_NORMAL, + ) + .await? + })) + .await; + + let mut ret = vec![]; + for (to, resp) in to.iter().zip(resps.iter()) { + let res_str = match resp { + Ok(_) => "ok".to_string(), + Err(e) => format!("error: {}", e), + }; + ret.push(format!("{:?}\t{}", to, res_str)); + } + + if resps.iter().any(Result::is_err) { + Err(GarageError::Message(format_table_to_string(ret)).into()) + } else { + Ok(AdminRpc::Ok(format_table_to_string(ret))) + } + } + MetaOperation::Snapshot { all: false } => { + garage_model::snapshot::async_snapshot_metadata(&self.garage).await?; + Ok(AdminRpc::Ok("Snapshot has been saved.".into())) + } + } + } +} + +impl EndpointHandler for AdminRpcHandler { + fn handle( + self: &Arc, + message: &AdminRpc, + _from: NodeID, + ) -> impl Future> + Send { + let self2 = self.clone(); + async move { + match message { + AdminRpc::BucketOperation(bo) => self2.handle_bucket_cmd(bo).await, + AdminRpc::KeyOperation(ko) => self2.handle_key_cmd(ko).await, + AdminRpc::LaunchRepair(opt) => self2.handle_launch_repair(opt.clone()).await, + AdminRpc::Stats(opt) => self2.handle_stats(opt.clone()).await, + AdminRpc::Worker(wo) => self2.handle_worker_cmd(wo).await, + AdminRpc::BlockOperation(bo) => self2.handle_block_cmd(bo).await, + AdminRpc::MetaOperation(mo) => self2.handle_meta_cmd(mo).await, + m => Err(GarageError::unexpected_rpc_message(m).into()), + } + } + .boxed() + } +} diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs new file mode 100644 index 00000000..44d3d96c --- /dev/null +++ b/src/garage/cli/cmd.rs @@ -0,0 +1,280 @@ +use std::collections::{HashMap, HashSet}; +use std::time::Duration; + +use format_table::format_table; +use garage_util::error::*; + +use garage_rpc::layout::*; +use garage_rpc::system::*; +use garage_rpc::*; + +use garage_model::helper::error::Error as HelperError; + +use crate::admin::*; +use crate::cli::*; + +pub async fn cli_command_dispatch( + cmd: Command, + system_rpc_endpoint: &Endpoint, + admin_rpc_endpoint: &Endpoint, + rpc_host: NodeID, +) -> Result<(), HelperError> { + match cmd { + Command::Status => Ok(cmd_status(system_rpc_endpoint, rpc_host).await?), + Command::Node(NodeOperation::Connect(connect_opt)) => { + Ok(cmd_connect(system_rpc_endpoint, rpc_host, connect_opt).await?) + } + Command::Layout(layout_opt) => { + Ok(cli_layout_command_dispatch(layout_opt, system_rpc_endpoint, rpc_host).await?) + } + Command::Bucket(bo) => { + cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BucketOperation(bo)).await + } + Command::Key(ko) => { + cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::KeyOperation(ko)).await + } + Command::Repair(ro) => { + cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::LaunchRepair(ro)).await + } + Command::Stats(so) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Stats(so)).await, + Command::Worker(wo) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Worker(wo)).await, + Command::Block(bo) => { + cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BlockOperation(bo)).await + } + Command::Meta(mo) => { + cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::MetaOperation(mo)).await + } + _ => unreachable!(), + } +} + +pub async fn cmd_status(rpc_cli: &Endpoint, rpc_host: NodeID) -> Result<(), Error> { + let status = fetch_status(rpc_cli, rpc_host).await?; + let layout = fetch_layout(rpc_cli, rpc_host).await?; + + println!("==== HEALTHY NODES ===="); + let mut healthy_nodes = + vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()]; + for adv in status.iter().filter(|adv| adv.is_up) { + let host = adv.status.hostname.as_deref().unwrap_or("?"); + let addr = match adv.addr { + Some(addr) => addr.to_string(), + None => "N/A".to_string(), + }; + if let Some(NodeRoleV(Some(cfg))) = layout.current().roles.get(&adv.id) { + let data_avail = match &adv.status.data_disk_avail { + _ if cfg.capacity.is_none() => "N/A".into(), + Some((avail, total)) => { + let pct = (*avail as f64) / (*total as f64) * 100.; + let avail = bytesize::ByteSize::b(*avail); + format!("{} ({:.1}%)", avail, pct) + } + None => "?".into(), + }; + healthy_nodes.push(format!( + "{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}", + id = adv.id, + host = host, + addr = addr, + tags = cfg.tags.join(","), + zone = cfg.zone, + capacity = cfg.capacity_string(), + data_avail = data_avail, + )); + } else { + let prev_role = layout + .versions + .iter() + .rev() + .find_map(|x| match x.roles.get(&adv.id) { + Some(NodeRoleV(Some(cfg))) => Some(cfg), + _ => None, + }); + if let Some(cfg) = prev_role { + healthy_nodes.push(format!( + "{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\tdraining metadata...", + id = adv.id, + host = host, + addr = addr, + tags = cfg.tags.join(","), + zone = cfg.zone, + )); + } else { + let new_role = match layout.staging.get().roles.get(&adv.id) { + Some(NodeRoleV(Some(_))) => "pending...", + _ => "NO ROLE ASSIGNED", + }; + healthy_nodes.push(format!( + "{id:?}\t{h}\t{addr}\t\t\t{new_role}", + id = adv.id, + h = host, + addr = addr, + new_role = new_role, + )); + } + } + } + format_table(healthy_nodes); + + // Determine which nodes are unhealthy and print that to stdout + let status_map = status + .iter() + .map(|adv| (adv.id, adv)) + .collect::>(); + + let tf = timeago::Formatter::new(); + let mut drain_msg = false; + let mut failed_nodes = vec!["ID\tHostname\tTags\tZone\tCapacity\tLast seen".to_string()]; + let mut listed = HashSet::new(); + for ver in layout.versions.iter().rev() { + for (node, _, role) in ver.roles.items().iter() { + let cfg = match role { + NodeRoleV(Some(role)) if role.capacity.is_some() => role, + _ => continue, + }; + + if listed.contains(node) { + continue; + } + listed.insert(*node); + + let adv = status_map.get(node); + if adv.map(|x| x.is_up).unwrap_or(false) { + continue; + } + + // Node is in a layout version, is not a gateway node, and is not up: + // it is in a failed state, add proper line to the output + let (host, last_seen) = match adv { + Some(adv) => ( + adv.status.hostname.as_deref().unwrap_or("?"), + adv.last_seen_secs_ago + .map(|s| tf.convert(Duration::from_secs(s))) + .unwrap_or_else(|| "never seen".into()), + ), + None => ("??", "never seen".into()), + }; + let capacity = if ver.version == layout.current().version { + cfg.capacity_string() + } else { + drain_msg = true; + "draining metadata...".to_string() + }; + failed_nodes.push(format!( + "{id:?}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}", + id = node, + host = host, + tags = cfg.tags.join(","), + zone = cfg.zone, + capacity = capacity, + last_seen = last_seen, + )); + } + } + + if failed_nodes.len() > 1 { + println!("\n==== FAILED NODES ===="); + format_table(failed_nodes); + if drain_msg { + println!(); + println!("Your cluster is expecting to drain data from nodes that are currently unavailable."); + println!("If these nodes are definitely dead, please review the layout history with"); + println!( + "`garage layout history` and use `garage layout skip-dead-nodes` to force progress." + ); + } + } + + if print_staging_role_changes(&layout) { + println!(); + println!("Please use `garage layout show` to check the proposed new layout and apply it."); + println!(); + } + + Ok(()) +} + +pub async fn cmd_connect( + rpc_cli: &Endpoint, + rpc_host: NodeID, + args: ConnectNodeOpt, +) -> Result<(), Error> { + match rpc_cli + .call(&rpc_host, SystemRpc::Connect(args.node), PRIO_NORMAL) + .await?? + { + SystemRpc::Ok => { + println!("Success."); + Ok(()) + } + m => Err(Error::unexpected_rpc_message(m)), + } +} + +pub async fn cmd_admin( + rpc_cli: &Endpoint, + rpc_host: NodeID, + args: AdminRpc, +) -> Result<(), HelperError> { + match rpc_cli.call(&rpc_host, args, PRIO_NORMAL).await?? { + AdminRpc::Ok(msg) => { + println!("{}", msg); + } + AdminRpc::BucketList(bl) => { + print_bucket_list(bl); + } + AdminRpc::BucketInfo { + bucket, + relevant_keys, + counters, + mpu_counters, + } => { + print_bucket_info(&bucket, &relevant_keys, &counters, &mpu_counters); + } + AdminRpc::KeyList(kl) => { + print_key_list(kl); + } + AdminRpc::KeyInfo(key, rb) => { + print_key_info(&key, &rb); + } + AdminRpc::WorkerList(wi, wlo) => { + print_worker_list(wi, wlo); + } + AdminRpc::WorkerVars(wv) => { + print_worker_vars(wv); + } + AdminRpc::WorkerInfo(tid, wi) => { + print_worker_info(tid, wi); + } + AdminRpc::BlockErrorList(el) => { + print_block_error_list(el); + } + AdminRpc::BlockInfo { + hash, + refcount, + versions, + uploads, + } => { + print_block_info(hash, refcount, versions, uploads); + } + r => { + error!("Unexpected response: {:?}", r); + } + } + Ok(()) +} + +// ---- utility ---- + +pub async fn fetch_status( + rpc_cli: &Endpoint, + rpc_host: NodeID, +) -> Result, Error> { + match rpc_cli + .call(&rpc_host, SystemRpc::GetKnownNodes, PRIO_NORMAL) + .await?? + { + SystemRpc::ReturnKnownNodes(nodes) => Ok(nodes), + resp => Err(Error::unexpected_rpc_message(resp)), + } +} diff --git a/src/garage/cli/local/convert_db.rs b/src/garage/cli/convert_db.rs similarity index 90% rename from src/garage/cli/local/convert_db.rs rename to src/garage/cli/convert_db.rs index e1a67d02..a40fb61f 100644 --- a/src/garage/cli/local/convert_db.rs +++ b/src/garage/cli/convert_db.rs @@ -7,8 +7,8 @@ use garage_db::*; /// K2V command line interface #[derive(StructOpt, Debug)] pub struct ConvertDbOpt { - /// Input database path (not the same as `metadata_dir`, see - /// + /// Input database path (not the same as metadata_dir, see + /// https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0) #[structopt(short = "i")] input_path: PathBuf, /// Input database engine (lmdb or sqlite; limited by db engines diff --git a/src/garage/cli/local/init.rs b/src/garage/cli/init.rs similarity index 86% rename from src/garage/cli/local/init.rs rename to src/garage/cli/init.rs index 683930ca..43ca5c09 100644 --- a/src/garage/cli/local/init.rs +++ b/src/garage/cli/init.rs @@ -36,6 +36,16 @@ pub fn node_id_command(config_file: PathBuf, quiet: bool) -> Result<(), Error> { ); eprintln!(" garage [-c ] node connect {}", idstr); eprintln!(); + eprintln!("Or instruct them to connect from here by running:"); + eprintln!( + " garage -c {} -h node connect {}", + config_file.to_string_lossy(), + idstr + ); + eprintln!( + "where is their own node identifier in the format: @:" + ); + eprintln!(); eprintln!("This node identifier can also be added as a bootstrap node in other node's garage.toml files:"); eprintln!(" bootstrap_peers = ["); eprintln!(" \"{}\",", idstr); diff --git a/src/garage/cli/layout.rs b/src/garage/cli/layout.rs new file mode 100644 index 00000000..f053eef4 --- /dev/null +++ b/src/garage/cli/layout.rs @@ -0,0 +1,584 @@ +use bytesize::ByteSize; + +use format_table::format_table; +use garage_util::crdt::Crdt; +use garage_util::error::*; + +use garage_rpc::layout::*; +use garage_rpc::system::*; +use garage_rpc::*; + +use crate::cli::*; + +pub async fn cli_layout_command_dispatch( + cmd: LayoutOperation, + system_rpc_endpoint: &Endpoint, + rpc_host: NodeID, +) -> Result<(), Error> { + match cmd { + LayoutOperation::Assign(assign_opt) => { + cmd_assign_role(system_rpc_endpoint, rpc_host, assign_opt).await + } + LayoutOperation::Remove(remove_opt) => { + cmd_remove_role(system_rpc_endpoint, rpc_host, remove_opt).await + } + LayoutOperation::Show => cmd_show_layout(system_rpc_endpoint, rpc_host).await, + LayoutOperation::Apply(apply_opt) => { + cmd_apply_layout(system_rpc_endpoint, rpc_host, apply_opt).await + } + LayoutOperation::Revert(revert_opt) => { + cmd_revert_layout(system_rpc_endpoint, rpc_host, revert_opt).await + } + LayoutOperation::Config(config_opt) => { + cmd_config_layout(system_rpc_endpoint, rpc_host, config_opt).await + } + LayoutOperation::History => cmd_layout_history(system_rpc_endpoint, rpc_host).await, + LayoutOperation::SkipDeadNodes(assume_sync_opt) => { + cmd_layout_skip_dead_nodes(system_rpc_endpoint, rpc_host, assume_sync_opt).await + } + } +} + +pub async fn cmd_assign_role( + rpc_cli: &Endpoint, + rpc_host: NodeID, + args: AssignRoleOpt, +) -> Result<(), Error> { + let status = match rpc_cli + .call(&rpc_host, SystemRpc::GetKnownNodes, PRIO_NORMAL) + .await?? + { + SystemRpc::ReturnKnownNodes(nodes) => nodes, + resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))), + }; + + let mut layout = fetch_layout(rpc_cli, rpc_host).await?; + let all_nodes = layout.get_all_nodes(); + + let added_nodes = args + .node_ids + .iter() + .map(|node_id| { + find_matching_node( + status + .iter() + .map(|adv| adv.id) + .chain(all_nodes.iter().cloned()), + node_id, + ) + }) + .collect::, _>>()?; + + let mut roles = layout.current().roles.clone(); + roles.merge(&layout.staging.get().roles); + + for replaced in args.replace.iter() { + let replaced_node = find_matching_node(all_nodes.iter().cloned(), replaced)?; + match roles.get(&replaced_node) { + Some(NodeRoleV(Some(_))) => { + layout + .staging + .get_mut() + .roles + .merge(&roles.update_mutator(replaced_node, NodeRoleV(None))); + } + _ => { + return Err(Error::Message(format!( + "Cannot replace node {:?} as it is not currently in planned layout", + replaced_node + ))); + } + } + } + + if args.capacity.is_some() && args.gateway { + return Err(Error::Message( + "-c and -g are mutually exclusive, please configure node either with c>0 to act as a storage node or with -g to act as a gateway node".into())); + } + if args.capacity == Some(ByteSize::b(0)) { + return Err(Error::Message("Invalid capacity value: 0".into())); + } + + for added_node in added_nodes { + let new_entry = match roles.get(&added_node) { + Some(NodeRoleV(Some(old))) => { + let capacity = match args.capacity { + Some(c) => Some(c.as_u64()), + None if args.gateway => None, + None => old.capacity, + }; + let tags = if args.tags.is_empty() { + old.tags.clone() + } else { + args.tags.clone() + }; + NodeRole { + zone: args.zone.clone().unwrap_or_else(|| old.zone.to_string()), + capacity, + tags, + } + } + _ => { + let capacity = match args.capacity { + Some(c) => Some(c.as_u64()), + None if args.gateway => None, + None => return Err(Error::Message( + "Please specify a capacity with the -c flag, or set node explicitly as gateway with -g".into())), + }; + NodeRole { + zone: args + .zone + .clone() + .ok_or("Please specify a zone with the -z flag")?, + capacity, + tags: args.tags.clone(), + } + } + }; + + layout + .staging + .get_mut() + .roles + .merge(&roles.update_mutator(added_node, NodeRoleV(Some(new_entry)))); + } + + send_layout(rpc_cli, rpc_host, layout).await?; + + println!("Role changes are staged but not yet committed."); + println!("Use `garage layout show` to view staged role changes,"); + println!("and `garage layout apply` to enact staged changes."); + Ok(()) +} + +pub async fn cmd_remove_role( + rpc_cli: &Endpoint, + rpc_host: NodeID, + args: RemoveRoleOpt, +) -> Result<(), Error> { + let mut layout = fetch_layout(rpc_cli, rpc_host).await?; + + let mut roles = layout.current().roles.clone(); + roles.merge(&layout.staging.get().roles); + + let deleted_node = + find_matching_node(roles.items().iter().map(|(id, _, _)| *id), &args.node_id)?; + + layout + .staging + .get_mut() + .roles + .merge(&roles.update_mutator(deleted_node, NodeRoleV(None))); + + send_layout(rpc_cli, rpc_host, layout).await?; + + println!("Role removal is staged but not yet committed."); + println!("Use `garage layout show` to view staged role changes,"); + println!("and `garage layout apply` to enact staged changes."); + Ok(()) +} + +pub async fn cmd_show_layout( + rpc_cli: &Endpoint, + rpc_host: NodeID, +) -> Result<(), Error> { + let layout = fetch_layout(rpc_cli, rpc_host).await?; + + println!("==== CURRENT CLUSTER LAYOUT ===="); + print_cluster_layout(layout.current(), "No nodes currently have a role in the cluster.\nSee `garage status` to view available nodes."); + println!(); + println!( + "Current cluster layout version: {}", + layout.current().version + ); + + let has_role_changes = print_staging_role_changes(&layout); + if has_role_changes { + let v = layout.current().version; + let res_apply = layout.apply_staged_changes(Some(v + 1)); + + // this will print the stats of what partitions + // will move around when we apply + match res_apply { + Ok((layout, msg)) => { + println!(); + println!("==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ===="); + print_cluster_layout(layout.current(), "No nodes have a role in the new layout."); + println!(); + + for line in msg.iter() { + println!("{}", line); + } + println!("To enact the staged role changes, type:"); + println!(); + println!(" garage layout apply --version {}", v + 1); + println!(); + println!("You can also revert all proposed changes with: garage layout revert"); + } + Err(e) => { + println!("Error while trying to compute the assignment: {}", e); + println!("This new layout cannot yet be applied."); + println!("You can also revert all proposed changes with: garage layout revert"); + } + } + } + + Ok(()) +} + +pub async fn cmd_apply_layout( + rpc_cli: &Endpoint, + rpc_host: NodeID, + apply_opt: ApplyLayoutOpt, +) -> Result<(), Error> { + let layout = fetch_layout(rpc_cli, rpc_host).await?; + + let (layout, msg) = layout.apply_staged_changes(apply_opt.version)?; + for line in msg.iter() { + println!("{}", line); + } + + send_layout(rpc_cli, rpc_host, layout).await?; + + println!("New cluster layout with updated role assignment has been applied in cluster."); + println!("Data will now be moved around between nodes accordingly."); + + Ok(()) +} + +pub async fn cmd_revert_layout( + rpc_cli: &Endpoint, + rpc_host: NodeID, + revert_opt: RevertLayoutOpt, +) -> Result<(), Error> { + if !revert_opt.yes { + return Err(Error::Message( + "Please add the --yes flag to run the layout revert operation".into(), + )); + } + + let layout = fetch_layout(rpc_cli, rpc_host).await?; + + let layout = layout.revert_staged_changes()?; + + send_layout(rpc_cli, rpc_host, layout).await?; + + println!("All proposed role changes in cluster layout have been canceled."); + Ok(()) +} + +pub async fn cmd_config_layout( + rpc_cli: &Endpoint, + rpc_host: NodeID, + config_opt: ConfigLayoutOpt, +) -> Result<(), Error> { + let mut layout = fetch_layout(rpc_cli, rpc_host).await?; + + let mut did_something = false; + match config_opt.redundancy { + None => (), + Some(r_str) => { + let r = r_str + .parse::() + .ok_or_message("invalid zone redundancy value")?; + if let ZoneRedundancy::AtLeast(r_int) = r { + if r_int > layout.current().replication_factor { + return Err(Error::Message(format!( + "The zone redundancy must be smaller or equal to the \ + replication factor ({}).", + layout.current().replication_factor + ))); + } else if r_int < 1 { + return Err(Error::Message( + "The zone redundancy must be at least 1.".into(), + )); + } + } + + layout + .staging + .get_mut() + .parameters + .update(LayoutParameters { zone_redundancy: r }); + println!("The zone redundancy parameter has been set to '{}'.", r); + did_something = true; + } + } + + if !did_something { + return Err(Error::Message( + "Please specify an action for `garage layout config`".into(), + )); + } + + send_layout(rpc_cli, rpc_host, layout).await?; + Ok(()) +} + +pub async fn cmd_layout_history( + rpc_cli: &Endpoint, + rpc_host: NodeID, +) -> Result<(), Error> { + let layout = fetch_layout(rpc_cli, rpc_host).await?; + let min_stored = layout.min_stored(); + + println!("==== LAYOUT HISTORY ===="); + let mut table = vec!["Version\tStatus\tStorage nodes\tGateway nodes".to_string()]; + for ver in layout + .versions + .iter() + .rev() + .chain(layout.old_versions.iter().rev()) + { + let status = if ver.version == layout.current().version { + "current" + } else if ver.version >= min_stored { + "draining" + } else { + "historical" + }; + table.push(format!( + "#{}\t{}\t{}\t{}", + ver.version, + status, + ver.roles + .items() + .iter() + .filter(|(_, _, x)| matches!(x, NodeRoleV(Some(c)) if c.capacity.is_some())) + .count(), + ver.roles + .items() + .iter() + .filter(|(_, _, x)| matches!(x, NodeRoleV(Some(c)) if c.capacity.is_none())) + .count(), + )); + } + format_table(table); + println!(); + + if layout.versions.len() > 1 { + println!("==== UPDATE TRACKERS ===="); + println!("Several layout versions are currently live in the cluster, and data is being migrated."); + println!( + "This is the internal data that Garage stores to know which nodes have what data." + ); + println!(); + let mut table = vec!["Node\tAck\tSync\tSync_ack".to_string()]; + let all_nodes = layout.get_all_nodes(); + for node in all_nodes.iter() { + table.push(format!( + "{:?}\t#{}\t#{}\t#{}", + node, + layout.update_trackers.ack_map.get(node, min_stored), + layout.update_trackers.sync_map.get(node, min_stored), + layout.update_trackers.sync_ack_map.get(node, min_stored), + )); + } + table[1..].sort(); + format_table(table); + + let min_ack = layout + .update_trackers + .ack_map + .min_among(&all_nodes, layout.min_stored()); + + println!(); + println!( + "If some nodes are not catching up to the latest layout version in the update trackers," + ); + println!("it might be because they are offline or unable to complete a sync successfully."); + if min_ack < layout.current().version { + println!( + "You may force progress using `garage layout skip-dead-nodes --version {}`", + layout.current().version + ); + } else { + println!( + "You may force progress using `garage layout skip-dead-nodes --version {} --allow-missing-data`.", + layout.current().version + ); + } + } else { + println!("Your cluster is currently in a stable state with a single live layout version."); + println!("No metadata migration is in progress. Note that the migration of data blocks is not tracked,"); + println!( + "so you might want to keep old nodes online until their data directories become empty." + ); + } + + Ok(()) +} + +pub async fn cmd_layout_skip_dead_nodes( + rpc_cli: &Endpoint, + rpc_host: NodeID, + opt: SkipDeadNodesOpt, +) -> Result<(), Error> { + let status = fetch_status(rpc_cli, rpc_host).await?; + let mut layout = fetch_layout(rpc_cli, rpc_host).await?; + + if layout.versions.len() == 1 { + return Err(Error::Message( + "This command cannot be called when there is only one live cluster layout version" + .into(), + )); + } + + let min_v = layout.min_stored(); + if opt.version <= min_v || opt.version > layout.current().version { + return Err(Error::Message(format!( + "Invalid version, you may use the following version numbers: {}", + (min_v + 1..=layout.current().version) + .map(|x| x.to_string()) + .collect::>() + .join(" ") + ))); + } + + let all_nodes = layout.get_all_nodes(); + let mut did_something = false; + for node in all_nodes.iter() { + // Update ACK tracker for dead nodes or for all nodes if --allow-missing-data + if opt.allow_missing_data || !status.iter().any(|x| x.id == *node && x.is_up) { + if layout.update_trackers.ack_map.set_max(*node, opt.version) { + println!("Increased the ACK tracker for node {:?}", node); + did_something = true; + } + } + + // If --allow-missing-data, update SYNC tracker for all nodes. + if opt.allow_missing_data { + if layout.update_trackers.sync_map.set_max(*node, opt.version) { + println!("Increased the SYNC tracker for node {:?}", node); + did_something = true; + } + } + } + + if did_something { + send_layout(rpc_cli, rpc_host, layout).await?; + println!("Success."); + Ok(()) + } else if !opt.allow_missing_data { + Err(Error::Message("Nothing was done, try passing the `--allow-missing-data` flag to force progress even when not enough nodes can complete a metadata sync.".into())) + } else { + Err(Error::Message( + "Sorry, there is nothing I can do for you. Please wait patiently. If you ask for help, please send the output of the `garage layout history` command.".into(), + )) + } +} + +// --- utility --- + +pub async fn fetch_layout( + rpc_cli: &Endpoint, + rpc_host: NodeID, +) -> Result { + match rpc_cli + .call(&rpc_host, SystemRpc::PullClusterLayout, PRIO_NORMAL) + .await?? + { + SystemRpc::AdvertiseClusterLayout(t) => Ok(t), + resp => Err(Error::unexpected_rpc_message(resp)), + } +} + +pub async fn send_layout( + rpc_cli: &Endpoint, + rpc_host: NodeID, + layout: LayoutHistory, +) -> Result<(), Error> { + rpc_cli + .call( + &rpc_host, + SystemRpc::AdvertiseClusterLayout(layout), + PRIO_NORMAL, + ) + .await??; + Ok(()) +} + +pub fn print_cluster_layout(layout: &LayoutVersion, empty_msg: &str) { + let mut table = vec!["ID\tTags\tZone\tCapacity\tUsable capacity".to_string()]; + for (id, _, role) in layout.roles.items().iter() { + let role = match &role.0 { + Some(r) => r, + _ => continue, + }; + let tags = role.tags.join(","); + let usage = layout.get_node_usage(id).unwrap_or(0); + let capacity = layout.get_node_capacity(id).unwrap_or(0); + if capacity > 0 { + table.push(format!( + "{:?}\t{}\t{}\t{}\t{} ({:.1}%)", + id, + tags, + role.zone, + role.capacity_string(), + ByteSize::b(usage as u64 * layout.partition_size).to_string_as(false), + (100.0 * usage as f32 * layout.partition_size as f32) / (capacity as f32) + )); + } else { + table.push(format!( + "{:?}\t{}\t{}\t{}", + id, + tags, + role.zone, + role.capacity_string() + )); + }; + } + if table.len() > 1 { + format_table(table); + println!(); + println!("Zone redundancy: {}", layout.parameters.zone_redundancy); + } else { + println!("{}", empty_msg); + } +} + +pub fn print_staging_role_changes(layout: &LayoutHistory) -> bool { + let staging = layout.staging.get(); + let has_role_changes = staging + .roles + .items() + .iter() + .any(|(k, _, v)| layout.current().roles.get(k) != Some(v)); + let has_layout_changes = *staging.parameters.get() != layout.current().parameters; + + if has_role_changes || has_layout_changes { + println!(); + println!("==== STAGED ROLE CHANGES ===="); + if has_role_changes { + let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()]; + for (id, _, role) in staging.roles.items().iter() { + if layout.current().roles.get(id) == Some(role) { + continue; + } + if let Some(role) = &role.0 { + let tags = role.tags.join(","); + table.push(format!( + "{:?}\t{}\t{}\t{}", + id, + tags, + role.zone, + role.capacity_string() + )); + } else { + table.push(format!("{:?}\tREMOVED", id)); + } + } + format_table(table); + println!(); + } + if has_layout_changes { + println!( + "Zone redundancy: {}", + staging.parameters.get().zone_redundancy + ); + } + true + } else { + false + } +} diff --git a/src/garage/cli/local/completions.rs b/src/garage/cli/local/completions.rs deleted file mode 100644 index 58f58a40..00000000 --- a/src/garage/cli/local/completions.rs +++ /dev/null @@ -1,10 +0,0 @@ -use structopt::{clap::Shell, StructOpt}; - -use crate::Opt; - -pub(crate) fn generate_completions(shell: Shell) { - let mut command = Opt::clap(); - - let command_name = command.get_name().to_string(); - command.gen_completions_to(command_name, shell, &mut std::io::stdout()); -} diff --git a/src/garage/cli/local/mod.rs b/src/garage/cli/local/mod.rs deleted file mode 100644 index edd5e66c..00000000 --- a/src/garage/cli/local/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub(crate) mod completions; -pub(crate) mod convert_db; -pub(crate) mod init; -pub(crate) mod repair; diff --git a/src/garage/cli/mod.rs b/src/garage/cli/mod.rs index 60e9a5de..e131f62c 100644 --- a/src/garage/cli/mod.rs +++ b/src/garage/cli/mod.rs @@ -1,4 +1,13 @@ -pub mod structs; +pub(crate) mod cmd; +pub(crate) mod init; +pub(crate) mod layout; +pub(crate) mod structs; +pub(crate) mod util; -pub mod local; -pub mod remote; +pub(crate) mod convert_db; + +pub(crate) use cmd::*; +pub(crate) use init::*; +pub(crate) use layout::*; +pub(crate) use structs::*; +pub(crate) use util::*; diff --git a/src/garage/cli/remote/admin_token.rs b/src/garage/cli/remote/admin_token.rs deleted file mode 100644 index 144c11c7..00000000 --- a/src/garage/cli/remote/admin_token.rs +++ /dev/null @@ -1,256 +0,0 @@ -use std::borrow::Cow; - -use format_table::format_table; - -use chrono::Local; - -use garage_util::error::*; - -use garage_api_admin::api::*; - -use crate::cli::remote::*; -use crate::cli::structs::*; - -impl Cli { - pub async fn cmd_admin_token(&self, cmd: AdminTokenOperation) -> Result<(), Error> { - match cmd { - AdminTokenOperation::List => self.cmd_list_admin_tokens().await, - AdminTokenOperation::Info { api_token } => self.cmd_admin_token_info(api_token).await, - AdminTokenOperation::Create(opt) => self.cmd_create_admin_token(opt).await, - AdminTokenOperation::Rename { - api_token, - new_name, - } => self.cmd_rename_admin_token(api_token, new_name).await, - AdminTokenOperation::Set(opt) => self.cmd_update_admin_token(opt).await, - AdminTokenOperation::Delete { api_token, yes } => { - self.cmd_delete_admin_token(api_token, yes).await - } - AdminTokenOperation::DeleteExpired { yes } => { - self.cmd_delete_expired_admin_tokens(yes).await - } - } - } - - pub async fn cmd_list_admin_tokens(&self) -> Result<(), Error> { - let mut list = self.api_request(ListAdminTokensRequest).await?; - - list.0.sort_by_key(|x| x.created); - - let mut table = vec!["ID\tCreated\tName\tExpiration\tScope".to_string()]; - for tok in list.0.iter() { - let scope = if tok.expired { - String::new() - } else { - table_list_abbr(&tok.scope) - }; - let exp = if tok.expired { - Cow::Borrowed("expired") - } else { - tok.expiration - .map(|x| x.with_timezone(&Local).to_string().into()) - .unwrap_or(Cow::Borrowed("never")) - }; - table.push(format!( - "{}\t{}\t{}\t{}\t{}", - tok.id.as_deref().unwrap_or("-"), - tok.created - .map(|x| x.with_timezone(&Local).date_naive().to_string().into()) - .unwrap_or(Cow::Borrowed("-")), - tok.name, - exp, - scope, - )); - } - format_table(table); - - Ok(()) - } - - pub async fn cmd_admin_token_info(&self, search: String) -> Result<(), Error> { - let info = self - .api_request(GetAdminTokenInfoRequest { - id: None, - search: Some(search), - }) - .await?; - - print_token_info(&info); - - Ok(()) - } - - pub async fn cmd_create_admin_token(&self, opt: AdminTokenCreateOp) -> Result<(), Error> { - let res = self - .api_request(CreateAdminTokenRequest(UpdateAdminTokenRequestBody { - name: opt.name, - expiration: parse_expires_in(&opt.expires_in)?, - never_expires: false, - scope: opt.scope.map(|s| { - s.split(",") - .map(|x| x.trim().to_string()) - .collect::>() - }), - })) - .await?; - - if opt.quiet { - println!("{}", res.secret_token); - } else { - println!("This is your secret bearer token, it will not be shown again by Garage:"); - println!("\n {}\n", res.secret_token); - print_token_info(&res.info); - } - - Ok(()) - } - - pub async fn cmd_rename_admin_token(&self, old: String, new: String) -> Result<(), Error> { - let token = self - .api_request(GetAdminTokenInfoRequest { - id: None, - search: Some(old), - }) - .await?; - - let info = self - .api_request(UpdateAdminTokenRequest { - id: token.id.unwrap(), - body: UpdateAdminTokenRequestBody { - name: Some(new), - expiration: None, - never_expires: false, - scope: None, - }, - }) - .await?; - - print_token_info(&info.0); - - Ok(()) - } - - pub async fn cmd_update_admin_token(&self, opt: AdminTokenSetOp) -> Result<(), Error> { - let token = self - .api_request(GetAdminTokenInfoRequest { - id: None, - search: Some(opt.api_token), - }) - .await?; - - let info = self - .api_request(UpdateAdminTokenRequest { - id: token.id.unwrap(), - body: UpdateAdminTokenRequestBody { - name: None, - expiration: parse_expires_in(&opt.expires_in)?, - never_expires: opt.never_expires, - scope: opt.scope.map({ - let mut new_scope = token.scope; - |scope_str| { - if let Some(add) = scope_str.strip_prefix("+") { - for a in add.split(",").map(|x| x.trim().to_string()) { - if !new_scope.contains(&a) { - new_scope.push(a); - } - } - new_scope - } else if let Some(sub) = scope_str.strip_prefix("-") { - for r in sub.split(",").map(|x| x.trim()) { - new_scope.retain(|x| x != r); - } - new_scope - } else { - scope_str - .split(",") - .map(|x| x.trim().to_string()) - .collect::>() - } - } - }), - }, - }) - .await?; - - print_token_info(&info.0); - - Ok(()) - } - - pub async fn cmd_delete_admin_token(&self, token: String, yes: bool) -> Result<(), Error> { - let token = self - .api_request(GetAdminTokenInfoRequest { - id: None, - search: Some(token), - }) - .await?; - - let id = token.id.unwrap(); - - if !yes { - return Err(Error::Message(format!( - "Add the --yes flag to delete API token `{}` ({})", - token.name, id - ))); - } - - self.api_request(DeleteAdminTokenRequest { id }).await?; - - println!("Admin API token has been deleted."); - - Ok(()) - } - - pub async fn cmd_delete_expired_admin_tokens(&self, yes: bool) -> Result<(), Error> { - let mut list = self.api_request(ListAdminTokensRequest).await?.0; - - list.retain(|tok| tok.expired); - - if !yes { - return Err(Error::Message(format!( - "This would delete {} admin API tokens, add the --yes flag to proceed.", - list.len(), - ))); - } - - for token in list.iter() { - let id = token.id.clone().unwrap(); - println!("Deleting token `{}` ({})", token.name, id); - self.api_request(DeleteAdminTokenRequest { id }).await?; - } - - println!("{} admin API tokens have been deleted.", list.len()); - - Ok(()) - } -} - -fn print_token_info(token: &GetAdminTokenInfoResponse) { - println!("==== ADMINISTRATION TOKEN INFORMATION ===="); - let mut table = vec![ - format!("Token ID:\t{}", token.id.as_ref().unwrap()), - format!("Token name:\t{}", token.name), - format!("Created:\t{}", token.created.unwrap().with_timezone(&Local)), - format!( - "Validity:\t{}", - if token.expired { "EXPIRED" } else { "valid" } - ), - format!( - "Expiration:\t{}", - token - .expiration - .map(|x| x.with_timezone(&Local).to_string().into()) - .unwrap_or(Cow::Borrowed("never")) - ), - String::new(), - ]; - - for (i, scope) in token.scope.iter().enumerate() { - if i == 0 { - table.push(format!("Scope:\t{}", scope)); - } else { - table.push(format!("\t{}", scope)); - } - } - - format_table(table); -} diff --git a/src/garage/cli/remote/block.rs b/src/garage/cli/remote/block.rs deleted file mode 100644 index 613a1a16..00000000 --- a/src/garage/cli/remote/block.rs +++ /dev/null @@ -1,174 +0,0 @@ -//use bytesize::ByteSize; -use format_table::format_table; - -use garage_util::error::*; - -use garage_api_admin::api::*; - -use crate::cli::remote::*; -use crate::cli::structs::*; - -impl Cli { - pub async fn cmd_block(&self, cmd: BlockOperation) -> Result<(), Error> { - match cmd { - BlockOperation::ListErrors => self.cmd_list_block_errors().await, - BlockOperation::Info { hash } => self.cmd_get_block_info(hash).await, - BlockOperation::RetryNow { all, blocks } => self.cmd_block_retry_now(all, blocks).await, - BlockOperation::Purge { yes, blocks } => self.cmd_block_purge(yes, blocks).await, - } - } - - pub async fn cmd_list_block_errors(&self) -> Result<(), Error> { - let errors = self.local_api_request(LocalListBlockErrorsRequest).await?.0; - - let tf = timeago::Formatter::new(); - let mut tf2 = timeago::Formatter::new(); - tf2.ago(""); - - let mut table = vec!["Hash\tRC\tErrors\tLast error\tNext try".into()]; - for e in errors { - let next_try = if e.next_try_in_secs > 0 { - tf2.convert(Duration::from_secs(e.next_try_in_secs)) - } else { - "asap".to_string() - }; - table.push(format!( - "{}\t{}\t{}\t{}\tin {}", - e.block_hash, - e.refcount, - e.error_count, - tf.convert(Duration::from_secs(e.last_try_secs_ago)), - next_try - )); - } - format_table(table); - - Ok(()) - } - - pub async fn cmd_get_block_info(&self, hash: String) -> Result<(), Error> { - let info = self - .local_api_request(LocalGetBlockInfoRequest { block_hash: hash }) - .await?; - - println!("==== BLOCK INFORMATION ===="); - format_table(vec![ - format!("Block hash:\t{}", info.block_hash), - format!("Refcount:\t{}", info.refcount), - ]); - println!(); - - println!("==== REFERENCES TO THIS BLOCK ===="); - let mut table = vec!["Status\tVersion\tBucket\tKey\tMPU".into()]; - let mut nondeleted_count = 0; - let mut inconsistent_refs = false; - for ver in info.versions.iter() { - match &ver.backlink { - Some(BlockVersionBacklink::Object { bucket_id, key }) => { - table.push(format!( - "{}\t{:.16}{}\t{:.16}\t{}", - if ver.ref_deleted { "deleted" } else { "active" }, - ver.version_id, - deleted_to_str(ver.version_deleted), - bucket_id, - key - )); - } - Some(BlockVersionBacklink::Upload { - upload_id, - upload_deleted, - upload_garbage_collected: _, - bucket_id, - key, - }) => { - table.push(format!( - "{}\t{:.16}{}\t{:.16}\t{}\t{:.16}{}", - if ver.ref_deleted { "deleted" } else { "active" }, - ver.version_id, - deleted_to_str(ver.version_deleted), - bucket_id.as_deref().unwrap_or(""), - key.as_deref().unwrap_or(""), - upload_id, - deleted_to_str(*upload_deleted), - )); - } - None => { - table.push(format!("{:.16}\t\t\tyes", ver.version_id)); - } - } - if ver.ref_deleted != ver.version_deleted { - inconsistent_refs = true; - } - if !ver.ref_deleted { - nondeleted_count += 1; - } - } - format_table(table); - - if inconsistent_refs { - println!(); - println!("There are inconsistencies between the block_ref and the version tables."); - println!("Fix them by running `garage repair block-refs`"); - } - - if info.refcount != nondeleted_count { - println!(); - println!( - "Warning: refcount does not match number of non-deleted versions, you should try `garage repair block-rc`." - ); - } - - Ok(()) - } - - pub async fn cmd_block_retry_now(&self, all: bool, blocks: Vec) -> Result<(), Error> { - let req = match (all, blocks.len()) { - (true, 0) => LocalRetryBlockResyncRequest::All { all: true }, - (false, n) if n > 0 => LocalRetryBlockResyncRequest::Blocks { - block_hashes: blocks, - }, - _ => { - return Err(Error::Message( - "Please specify block hashes or --all (not both)".into(), - )) - } - }; - - let res = self.local_api_request(req).await?; - - println!( - "{} blocks returned in queue for a retry now (check logs to see results)", - res.count - ); - - Ok(()) - } - - pub async fn cmd_block_purge(&self, yes: bool, blocks: Vec) -> Result<(), Error> { - if !yes { - return Err(Error::Message( - "Pass the --yes flag to confirm block purge operation.".into(), - )); - } - - let res = self - .local_api_request(LocalPurgeBlocksRequest(blocks)) - .await?; - - println!( - "Purged {} blocks: deleted {} block refs, {} versions, {} objects, {} multipart uploads", - res.blocks_purged, res.block_refs_purged, res.versions_deleted, res.objects_deleted, res.uploads_deleted, - ); - - Ok(()) - } -} - -#[must_use] -const fn deleted_to_str(deleted: bool) -> &'static str { - if deleted { - " (deleted)" - } else { - "" - } -} diff --git a/src/garage/cli/remote/bucket.rs b/src/garage/cli/remote/bucket.rs deleted file mode 100644 index edfeaca5..00000000 --- a/src/garage/cli/remote/bucket.rs +++ /dev/null @@ -1,592 +0,0 @@ -//use bytesize::ByteSize; -use format_table::format_table; - -use chrono::Local; - -use garage_util::error::*; - -use garage_api_admin::api::*; - -use crate::cli::remote::*; -use crate::cli::structs::*; - -impl Cli { - pub async fn cmd_bucket(&self, cmd: BucketOperation) -> Result<(), Error> { - match cmd { - BucketOperation::List => self.cmd_list_buckets().await, - BucketOperation::Info(query) => self.cmd_bucket_info(query).await, - BucketOperation::Create(query) => self.cmd_create_bucket(query).await, - BucketOperation::Delete(query) => self.cmd_delete_bucket(query).await, - BucketOperation::Alias(query) => self.cmd_alias_bucket(query).await, - BucketOperation::Unalias(query) => self.cmd_unalias_bucket(query).await, - BucketOperation::Allow(query) => self.cmd_bucket_allow(query).await, - BucketOperation::Deny(query) => self.cmd_bucket_deny(query).await, - BucketOperation::Website(query) => self.cmd_bucket_website(query).await, - BucketOperation::SetQuotas(query) => self.cmd_bucket_set_quotas(query).await, - BucketOperation::CleanupIncompleteUploads(query) => { - self.cmd_cleanup_incomplete_uploads(query).await - } - BucketOperation::InspectObject(query) => self.cmd_inspect_object(query).await, - } - } - - pub async fn cmd_list_buckets(&self) -> Result<(), Error> { - let mut buckets = self.api_request(ListBucketsRequest).await?; - - buckets.0.sort_by_key(|x| x.created); - - let mut table = vec!["ID\tCreated\tGlobal aliases\tLocal aliases".to_string()]; - for bucket in buckets.0.iter() { - table.push(format!( - "{:.16}\t{}\t{}\t{}", - bucket.id, - bucket.created.with_timezone(&Local).date_naive(), - table_list_abbr(&bucket.global_aliases), - table_list_abbr( - bucket - .local_aliases - .iter() - .map(|x| format!("{}:{}", x.access_key_id, x.alias)) - ), - )); - } - format_table(table); - - Ok(()) - } - - pub async fn cmd_bucket_info(&self, opt: BucketOpt) -> Result<(), Error> { - let bucket = self - .api_request(GetBucketInfoRequest { - id: None, - global_alias: None, - search: Some(opt.name), - }) - .await?; - - print_bucket_info(&bucket); - - Ok(()) - } - - pub async fn cmd_create_bucket(&self, opt: BucketOpt) -> Result<(), Error> { - let bucket = self - .api_request(CreateBucketRequest { - global_alias: Some(opt.name.clone()), - local_alias: None, - }) - .await?; - - print_bucket_info(&bucket.0); - - Ok(()) - } - - pub async fn cmd_delete_bucket(&self, opt: DeleteBucketOpt) -> Result<(), Error> { - let bucket = self - .api_request(GetBucketInfoRequest { - id: None, - global_alias: None, - search: Some(opt.name.clone()), - }) - .await?; - - // CLI-only checks: the bucket must not have other aliases - if bucket.global_aliases.iter().any(|a| *a != opt.name) { - return Err(Error::Message(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", opt.name))); - } - - if bucket - .keys - .iter() - .any(|k| !k.bucket_local_aliases.is_empty()) - { - return Err(Error::Message(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", opt.name))); - } - - if !opt.yes { - println!("About to delete bucket {}.", bucket.id); - return Err(Error::Message( - "Add --yes flag to really perform this operation".to_string(), - )); - } - - self.api_request(DeleteBucketRequest { - id: bucket.id.clone(), - }) - .await?; - - println!("Bucket {} has been deleted.", bucket.id); - - Ok(()) - } - - pub async fn cmd_alias_bucket(&self, opt: AliasBucketOpt) -> Result<(), Error> { - let bucket = self - .api_request(GetBucketInfoRequest { - id: None, - global_alias: None, - search: Some(opt.existing_bucket.clone()), - }) - .await?; - - let res = if let Some(key_pat) = &opt.local { - let key = self - .api_request(GetKeyInfoRequest { - search: Some(key_pat.clone()), - id: None, - show_secret_key: false, - }) - .await?; - - self.api_request(AddBucketAliasRequest { - bucket_id: bucket.id.clone(), - alias: BucketAliasEnum::Local { - local_alias: opt.new_name.clone(), - access_key_id: key.access_key_id.clone(), - }, - }) - .await? - } else { - self.api_request(AddBucketAliasRequest { - bucket_id: bucket.id.clone(), - alias: BucketAliasEnum::Global { - global_alias: opt.new_name.clone(), - }, - }) - .await? - }; - - print_bucket_info(&res.0); - - Ok(()) - } - - pub async fn cmd_unalias_bucket(&self, opt: UnaliasBucketOpt) -> Result<(), Error> { - let res = if let Some(key_pat) = &opt.local { - let key = self - .api_request(GetKeyInfoRequest { - search: Some(key_pat.clone()), - id: None, - show_secret_key: false, - }) - .await?; - - let bucket = key - .buckets - .iter() - .find(|x| x.local_aliases.contains(&opt.name)) - .ok_or_message(format!( - "No bucket called {} in namespace of key {}", - opt.name, key.access_key_id - ))?; - - self.api_request(RemoveBucketAliasRequest { - bucket_id: bucket.id.clone(), - alias: BucketAliasEnum::Local { - access_key_id: key.access_key_id.clone(), - local_alias: opt.name.clone(), - }, - }) - .await? - } else { - let bucket = self - .api_request(GetBucketInfoRequest { - id: None, - global_alias: Some(opt.name.clone()), - search: None, - }) - .await?; - - self.api_request(RemoveBucketAliasRequest { - bucket_id: bucket.id.clone(), - alias: BucketAliasEnum::Global { - global_alias: opt.name.clone(), - }, - }) - .await? - }; - - print_bucket_info(&res.0); - - Ok(()) - } - - pub async fn cmd_bucket_allow(&self, opt: PermBucketOpt) -> Result<(), Error> { - let bucket = self - .api_request(GetBucketInfoRequest { - id: None, - global_alias: None, - search: Some(opt.bucket.clone()), - }) - .await?; - - let key = self - .api_request(GetKeyInfoRequest { - id: None, - search: Some(opt.key_pattern.clone()), - show_secret_key: false, - }) - .await?; - - let res = self - .api_request(AllowBucketKeyRequest(BucketKeyPermChangeRequest { - bucket_id: bucket.id.clone(), - access_key_id: key.access_key_id.clone(), - permissions: ApiBucketKeyPerm { - read: opt.read, - write: opt.write, - owner: opt.owner, - }, - })) - .await?; - - print_bucket_info(&res.0); - - Ok(()) - } - - pub async fn cmd_bucket_deny(&self, opt: PermBucketOpt) -> Result<(), Error> { - let bucket = self - .api_request(GetBucketInfoRequest { - id: None, - global_alias: None, - search: Some(opt.bucket.clone()), - }) - .await?; - - let key = self - .api_request(GetKeyInfoRequest { - id: None, - search: Some(opt.key_pattern.clone()), - show_secret_key: false, - }) - .await?; - - let res = self - .api_request(DenyBucketKeyRequest(BucketKeyPermChangeRequest { - bucket_id: bucket.id.clone(), - access_key_id: key.access_key_id.clone(), - permissions: ApiBucketKeyPerm { - read: opt.read, - write: opt.write, - owner: opt.owner, - }, - })) - .await?; - - print_bucket_info(&res.0); - - Ok(()) - } - - pub async fn cmd_bucket_website(&self, opt: WebsiteOpt) -> Result<(), Error> { - let bucket = self - .api_request(GetBucketInfoRequest { - id: None, - global_alias: None, - search: Some(opt.bucket.clone()), - }) - .await?; - - if !(opt.allow ^ opt.deny) { - return Err(Error::Message( - "You must specify exactly one flag, either --allow or --deny".to_string(), - )); - } - - // Destructure becket info to allow separate use of `id` and `website_config` - let GetBucketInfoResponse { - id: bucket_id, - website_config: bucket_website_config, - .. - } = bucket; - - let wa = if opt.allow { - UpdateBucketWebsiteAccess { - enabled: true, - index_document: Some(opt.index_document.clone()), - error_document: opt - .error_document - .or_else(|| bucket_website_config.and_then(|x| x.error_document.clone())), - routing_rules: None, - } - } else { - UpdateBucketWebsiteAccess { - enabled: false, - index_document: None, - error_document: None, - routing_rules: None, - } - }; - - let res = self - .api_request(UpdateBucketRequest { - id: bucket_id, - body: UpdateBucketRequestBody { - website_access: Some(wa), - quotas: None, - cors_rules: None, - lifecycle_rules: None, - }, - }) - .await?; - - print_bucket_info(&res.0); - - Ok(()) - } - - pub async fn cmd_bucket_set_quotas(&self, opt: SetQuotasOpt) -> Result<(), Error> { - let bucket = self - .api_request(GetBucketInfoRequest { - id: None, - global_alias: None, - search: Some(opt.bucket.clone()), - }) - .await?; - - if opt.max_size.is_none() && opt.max_objects.is_none() { - return Err(Error::Message( - "You must specify either --max-size or --max-objects (or both) for this command to do something.".to_string(), - )); - } - - let new_quotas = ApiBucketQuotas { - max_size: match opt.max_size.as_deref() { - Some("none") => None, - Some(v) => Some( - v.parse::() - .ok_or_message(format!("Invalid size specified: {}", v))? - .as_u64(), - ), - None => bucket.quotas.max_size, - }, - max_objects: match opt.max_objects.as_deref() { - Some("none") => None, - Some(v) => Some( - v.parse::() - .ok_or_message(format!("Invalid number: {}", v))?, - ), - None => bucket.quotas.max_objects, - }, - }; - - let res = self - .api_request(UpdateBucketRequest { - id: bucket.id.clone(), - body: UpdateBucketRequestBody { - website_access: None, - quotas: Some(new_quotas), - cors_rules: None, - lifecycle_rules: None, - }, - }) - .await?; - - print_bucket_info(&res.0); - - Ok(()) - } - - pub async fn cmd_cleanup_incomplete_uploads( - &self, - opt: CleanupIncompleteUploadsOpt, - ) -> Result<(), Error> { - let older_than = parse_duration::parse::parse(&opt.older_than) - .ok_or_message("Invalid duration passed for --older-than parameter")?; - - for b in opt.buckets.iter() { - let bucket = self - .api_request(GetBucketInfoRequest { - id: None, - global_alias: None, - search: Some(b.clone()), - }) - .await?; - - let res = self - .api_request(CleanupIncompleteUploadsRequest { - bucket_id: bucket.id.clone(), - older_than_secs: older_than.as_secs(), - }) - .await?; - - if res.uploads_deleted > 0 { - println!("{:.16}: {} uploads deleted", bucket.id, res.uploads_deleted); - } else { - println!("{:.16}: no uploads deleted", bucket.id); - } - } - - Ok(()) - } - - pub async fn cmd_inspect_object(&self, opt: InspectObjectOpt) -> Result<(), Error> { - let bucket = self - .api_request(GetBucketInfoRequest { - id: None, - global_alias: None, - search: Some(opt.bucket), - }) - .await?; - - let info = self - .api_request(InspectObjectRequest { - bucket_id: bucket.id, - key: opt.key, - }) - .await?; - - for ver in info.versions { - println!("==== OBJECT VERSION ===="); - let mut tab = vec![ - format!("Bucket ID:\t{}", info.bucket_id), - format!("Key:\t{}", info.key), - format!("Version ID:\t{}", ver.uuid), - format!("Timestamp:\t{}", ver.timestamp), - ]; - if let Some(size) = ver.size { - let bs = bytesize::ByteSize::b(size); - tab.push(format!( - "Size:\t{} ({})", - bs.display().si(), - bs.display().iec() - )); - tab.push(format!("Size (exact):\t{}", size)); - if !ver.blocks.is_empty() { - tab.push(format!("Number of blocks:\t{:?}", ver.blocks.len())); - } - } - if let Some(etag) = ver.etag { - tab.push(format!("Etag:\t{}", etag)); - } - tab.extend([ - format!("Encrypted:\t{}", ver.encrypted), - format!("Uploading:\t{}", ver.uploading), - format!("Aborted:\t{}", ver.aborted), - format!("Delete marker:\t{}", ver.delete_marker), - format!("Inline data:\t{}", ver.inline), - ]); - if !ver.headers.is_empty() { - tab.push(String::new()); - tab.extend(ver.headers.iter().map(|(k, v)| format!("{}\t{}", k, v))); - } - format_table(tab); - - if !ver.blocks.is_empty() { - let mut tab = vec!["Part#\tOffset\tBlock hash\tSize".to_string()]; - tab.extend(ver.blocks.iter().map(|b| { - format!( - "{:4}\t{:9}\t{}\t{:9}", - b.part_number, b.offset, b.hash, b.size - ) - })); - println!(); - format_table(tab); - } - println!(); - } - - Ok(()) - } -} - -fn print_bucket_info(bucket: &GetBucketInfoResponse) { - println!("==== BUCKET INFORMATION ===="); - - let mut info = vec![ - format!("Bucket:\t{}", bucket.id), - format!("Created:\t{}", bucket.created.with_timezone(&Local)), - String::new(), - { - let size = bytesize::ByteSize::b(bucket.bytes as u64); - format!("Size:\t{} ({})", size.display().si(), size.display().iec()) - }, - format!("Objects:\t{}", bucket.objects), - ]; - - if bucket.unfinished_uploads > 0 { - info.extend([ - format!( - "Unfinished uploads:\t{} multipart uploads", - bucket.unfinished_multipart_uploads - ), - format!("\t{} including regular uploads", bucket.unfinished_uploads), - { - let mpu_size = - bytesize::ByteSize::b(bucket.unfinished_multipart_upload_bytes as u64); - format!( - "Size of unfinished multipart uploads:\t{} ({})", - mpu_size.display().si(), - mpu_size.display().iec(), - ) - }, - ]); - } - - info.extend([ - String::new(), - format!("Website access:\t{}", bucket.website_access), - ]); - - if let Some(wc) = &bucket.website_config { - info.extend([ - format!(" index document:\t{}", wc.index_document), - format!( - " error document:\t{}", - wc.error_document.as_deref().unwrap_or("(not defined)") - ), - ]); - } - - if bucket.quotas.max_size.is_some() || bucket.quotas.max_objects.is_some() { - info.push(String::new()); - info.push("Quotas:\tenabled".into()); - if let Some(ms) = bucket.quotas.max_size { - let ms = bytesize::ByteSize::b(ms); - info.push(format!( - " maximum size:\t{} ({})", - ms.display().si(), - ms.display().iec() - )); - } - if let Some(mo) = bucket.quotas.max_objects { - info.push(format!(" maximum number of objects:\t{}", mo)); - } - } - - if !bucket.global_aliases.is_empty() { - info.push(String::new()); - for (i, alias) in bucket.global_aliases.iter().enumerate() { - if i == 0 && bucket.global_aliases.len() > 1 { - info.push(format!("Global aliases:\t{}", alias)); - } else if i == 0 { - info.push(format!("Global alias:\t{}", alias)); - } else { - info.push(format!("\t{}", alias)); - } - } - } - - format_table(info); - - println!(); - println!("==== KEYS FOR THIS BUCKET ===="); - let mut key_info = vec!["Permissions\tAccess key\t\tLocal aliases".to_string()]; - key_info.extend(bucket.keys.iter().map(|key| { - let rflag = if key.permissions.read { "R" } else { " " }; - let wflag = if key.permissions.write { "W" } else { " " }; - let oflag = if key.permissions.owner { "O" } else { " " }; - format!( - "{}{}{}\t{}\t{}\t{}", - rflag, - wflag, - oflag, - key.access_key_id, - key.name, - key.bucket_local_aliases.to_vec().join(","), - ) - })); - format_table(key_info); -} diff --git a/src/garage/cli/remote/cluster.rs b/src/garage/cli/remote/cluster.rs deleted file mode 100644 index 284e3690..00000000 --- a/src/garage/cli/remote/cluster.rs +++ /dev/null @@ -1,160 +0,0 @@ -use format_table::format_table; - -use garage_util::error::*; - -use garage_api_admin::api::*; - -use crate::cli::remote::layout::*; -use crate::cli::remote::*; -use crate::cli::structs::*; - -impl Cli { - pub async fn cmd_status(&self) -> Result<(), Error> { - let status = self.api_request(GetClusterStatusRequest).await?; - let layout = self.api_request(GetClusterLayoutRequest).await?; - - println!("==== HEALTHY NODES ===="); - - let mut healthy_nodes = - vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail\tVersion".to_string()]; - - for adv in status.nodes.iter().filter(|adv| adv.is_up) { - let host = adv.hostname.as_deref().unwrap_or("?"); - let addr = match adv.addr { - Some(addr) => addr.to_string(), - None => "N/A".to_string(), - }; - if let Some(cfg) = &adv.role { - let data_avail = match &adv.data_partition { - _ if cfg.capacity.is_none() => "N/A".into(), - Some(FreeSpaceResp { available, total }) => { - let pct = (*available as f64) / (*total as f64) * 100.; - let avail_str = bytesize::ByteSize::b(*available); - format!("{} ({:.1}%)", avail_str, pct) - } - None => "?".into(), - }; - healthy_nodes.push(format!( - "{id:.16}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}\t{version}", - id = adv.id, - host = host, - addr = addr, - tags = cfg.tags.join(","), - zone = cfg.zone, - capacity = capacity_string(cfg.capacity), - data_avail = data_avail, - version = adv.garage_version.as_deref().unwrap_or_default(), - )); - } else { - let status = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) { - Some(NodeRoleChange { - action: NodeRoleChangeEnum::Update { .. }, - .. - }) => "pending...", - _ if adv.draining => "draining metadata..", - _ => "NO ROLE ASSIGNED", - }; - healthy_nodes.push(format!( - "{id:.16}\t{h}\t{addr}\t\t\t{status}\t\t{version}", - id = adv.id, - h = host, - addr = addr, - status = status, - version = adv.garage_version.as_deref().unwrap_or_default(), - )); - } - } - format_table(healthy_nodes); - - let tf = timeago::Formatter::new(); - let mut drain_msg = false; - let mut failed_nodes = vec!["ID\tHostname\tTags\tZone\tCapacity\tLast seen".to_string()]; - for adv in status.nodes.iter().filter(|x| !x.is_up) { - let node = &adv.id; - - let host = adv.hostname.as_deref().unwrap_or("?"); - let last_seen = adv - .last_seen_secs_ago - .map(|s| tf.convert(Duration::from_secs(s))) - .unwrap_or_else(|| "never seen".into()); - - if let Some(cfg) = &adv.role { - let capacity = capacity_string(cfg.capacity); - - failed_nodes.push(format!( - "{id:.16}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}", - id = node, - host = host, - tags = cfg.tags.join(","), - zone = cfg.zone, - capacity = capacity, - last_seen = last_seen, - )); - } else { - let status = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) { - Some(NodeRoleChange { - action: NodeRoleChangeEnum::Update { .. }, - .. - }) => "pending...", - _ if adv.draining => { - drain_msg = true; - "draining metadata.." - } - _ => continue, - }; - - failed_nodes.push(format!( - "{id:.16}\t{host}\t\t\t{status}\t{last_seen}", - id = node, - host = host, - status = status, - last_seen = last_seen, - )); - } - } - - if failed_nodes.len() > 1 { - println!("\n==== FAILED NODES ===="); - format_table(failed_nodes); - if drain_msg { - println!(); - println!("Your cluster is expecting to drain data from nodes that are currently unavailable."); - println!( - "If these nodes are definitely dead, please review the layout history with" - ); - println!( - "`garage layout history` and use `garage layout skip-dead-nodes` to force progress." - ); - } - } - - if print_staging_role_changes(&layout) { - println!(); - println!( - "Please use `garage layout show` to check the proposed new layout and apply it." - ); - println!(); - } - - Ok(()) - } - - pub async fn cmd_connect(&self, opt: ConnectNodeOpt) -> Result<(), Error> { - let res = self - .api_request(ConnectClusterNodesRequest(vec![opt.node])) - .await?; - if res.0.len() != 1 { - return Err(Error::Message(format!("unexpected response: {:?}", res))); - } - let res = res.0.into_iter().next().unwrap(); - if res.success { - println!("Success."); - Ok(()) - } else { - Err(Error::Message(format!( - "Failure: {}", - res.error.unwrap_or_default() - ))) - } - } -} diff --git a/src/garage/cli/remote/key.rs b/src/garage/cli/remote/key.rs deleted file mode 100644 index 68de358f..00000000 --- a/src/garage/cli/remote/key.rs +++ /dev/null @@ -1,320 +0,0 @@ -use std::borrow::Cow; - -use format_table::format_table; - -use chrono::Local; - -use garage_util::error::*; - -use garage_api_admin::api::*; - -use crate::cli::remote::*; -use crate::cli::structs::*; - -impl Cli { - pub async fn cmd_key(&self, cmd: KeyOperation) -> Result<(), Error> { - match cmd { - KeyOperation::List => self.cmd_list_keys().await, - KeyOperation::Info(query) => self.cmd_key_info(query).await, - KeyOperation::Create(query) => self.cmd_create_key(query).await, - KeyOperation::Rename(query) => self.cmd_rename_key(query).await, - KeyOperation::Set(opt) => self.cmd_update_key(opt).await, - KeyOperation::Delete(query) => self.cmd_delete_key(query).await, - KeyOperation::Allow(query) => self.cmd_allow_key(query).await, - KeyOperation::Deny(query) => self.cmd_deny_key(query).await, - KeyOperation::Import(query) => self.cmd_import_key(query).await, - KeyOperation::DeleteExpired { yes } => self.cmd_delete_expired_keys(yes).await, - } - } - - pub async fn cmd_list_keys(&self) -> Result<(), Error> { - let mut keys = self.api_request(ListKeysRequest).await?; - - keys.0.sort_by_key(|x| x.created); - - let mut table = vec!["ID\tCreated\tName\tExpiration".to_string()]; - for key in keys.0.iter() { - let exp = if key.expired { - Cow::from("expired") - } else { - key.expiration - .map(|x| x.with_timezone(&Local).to_string().into()) - .unwrap_or(Cow::Borrowed("never")) - }; - table.push(format!( - "{}\t{}\t{}\t{}", - key.id, - key.created - .map(|x| x.with_timezone(&Local).date_naive().to_string()) - .unwrap_or_default(), - key.name, - exp - )); - } - format_table(table); - - Ok(()) - } - - pub async fn cmd_key_info(&self, opt: KeyInfoOpt) -> Result<(), Error> { - let key = self - .api_request(GetKeyInfoRequest { - id: None, - search: Some(opt.key_pattern), - show_secret_key: opt.show_secret, - }) - .await?; - - print_key_info(&key); - - Ok(()) - } - - pub async fn cmd_create_key(&self, opt: KeyNewOpt) -> Result<(), Error> { - let key = self - .api_request(CreateKeyRequest(UpdateKeyRequestBody { - name: Some(opt.name), - expiration: parse_expires_in(&opt.expires_in)?, - never_expires: false, - allow: None, - deny: None, - })) - .await?; - - print_key_info(&key.0); - - Ok(()) - } - - pub async fn cmd_rename_key(&self, opt: KeyRenameOpt) -> Result<(), Error> { - let key = self - .api_request(GetKeyInfoRequest { - id: None, - search: Some(opt.key_pattern), - show_secret_key: false, - }) - .await?; - - let new_key = self - .api_request(UpdateKeyRequest { - id: key.access_key_id, - body: UpdateKeyRequestBody { - name: Some(opt.new_name), - expiration: None, - never_expires: false, - allow: None, - deny: None, - }, - }) - .await?; - - print_key_info(&new_key.0); - - Ok(()) - } - - pub async fn cmd_update_key(&self, opt: KeySetOpt) -> Result<(), Error> { - let key = self - .api_request(GetKeyInfoRequest { - id: None, - search: Some(opt.key_pattern), - show_secret_key: false, - }) - .await?; - - let new_key = self - .api_request(UpdateKeyRequest { - id: key.access_key_id, - body: UpdateKeyRequestBody { - name: None, - expiration: parse_expires_in(&opt.expires_in)?, - never_expires: opt.never_expires, - allow: None, - deny: None, - }, - }) - .await?; - - print_key_info(&new_key.0); - - Ok(()) - } - - pub async fn cmd_delete_key(&self, opt: KeyDeleteOpt) -> Result<(), Error> { - let key = self - .api_request(GetKeyInfoRequest { - id: None, - search: Some(opt.key_pattern), - show_secret_key: false, - }) - .await?; - - if !opt.yes { - println!("About to delete key {}...", key.access_key_id); - return Err(Error::Message( - "Add --yes flag to really perform this operation".to_string(), - )); - } - - self.api_request(DeleteKeyRequest { - id: key.access_key_id.clone(), - }) - .await?; - - println!("Access key {} has been deleted.", key.access_key_id); - - Ok(()) - } - - pub async fn cmd_allow_key(&self, opt: KeyPermOpt) -> Result<(), Error> { - let key = self - .api_request(GetKeyInfoRequest { - id: None, - search: Some(opt.key_pattern), - show_secret_key: false, - }) - .await?; - - let new_key = self - .api_request(UpdateKeyRequest { - id: key.access_key_id, - body: UpdateKeyRequestBody { - name: None, - expiration: None, - never_expires: false, - allow: Some(KeyPerm { - create_bucket: opt.create_bucket, - }), - deny: None, - }, - }) - .await?; - - print_key_info(&new_key.0); - - Ok(()) - } - - pub async fn cmd_deny_key(&self, opt: KeyPermOpt) -> Result<(), Error> { - let key = self - .api_request(GetKeyInfoRequest { - id: None, - search: Some(opt.key_pattern), - show_secret_key: false, - }) - .await?; - - let new_key = self - .api_request(UpdateKeyRequest { - id: key.access_key_id, - body: UpdateKeyRequestBody { - name: None, - expiration: None, - never_expires: false, - allow: None, - deny: Some(KeyPerm { - create_bucket: opt.create_bucket, - }), - }, - }) - .await?; - - print_key_info(&new_key.0); - - Ok(()) - } - - pub async fn cmd_import_key(&self, opt: KeyImportOpt) -> Result<(), Error> { - if !opt.yes { - return Err(Error::Message("This command is intended to re-import keys that were previously generated by Garage. If you want to create a new key, use `garage key new` instead. Add the --yes flag if you really want to re-import a key.".to_string())); - } - - let new_key = self - .api_request(ImportKeyRequest { - name: Some(opt.name), - access_key_id: opt.key_id, - secret_access_key: opt.secret_key, - }) - .await?; - - print_key_info(&new_key.0); - - Ok(()) - } - - pub async fn cmd_delete_expired_keys(&self, yes: bool) -> Result<(), Error> { - let mut list = self.api_request(ListKeysRequest).await?.0; - - list.retain(|key| key.expired); - - if !yes { - return Err(Error::Message(format!( - "This would delete {} access keys, add the --yes flag to proceed.", - list.len(), - ))); - } - - for key in list.iter() { - let id = key.id.clone(); - println!("Deleting access key `{}` ({})", key.name, id); - self.api_request(DeleteKeyRequest { id }).await?; - } - - println!("{} access keys have been deleted.", list.len()); - - Ok(()) - } -} - -fn print_key_info(key: &GetKeyInfoResponse) { - println!("==== ACCESS KEY INFORMATION ===="); - - let mut table = vec![ - format!("Key ID:\t{}", key.access_key_id), - format!("Key name:\t{}", key.name), - format!( - "Secret key:\t{}", - key.secret_access_key.as_deref().unwrap_or("(redacted)") - ), - ]; - - if let Some(c) = key.created { - table.push(format!("Created:\t{}", c.with_timezone(&Local))); - } - - table.extend([ - format!( - "Validity:\t{}", - if key.expired { "EXPIRED" } else { "valid" } - ), - format!( - "Expiration:\t{}", - key.expiration - .map(|x| x.with_timezone(&Local).to_string().into()) - .unwrap_or(Cow::Borrowed("never")) - ), - String::new(), - format!("Can create buckets:\t{}", key.permissions.create_bucket), - ]); - format_table(table); - - println!(); - println!("==== BUCKETS FOR THIS KEY ===="); - let mut bucket_info = vec!["Permissions\tID\tGlobal aliases\tLocal aliases".to_string()]; - bucket_info.extend(key.buckets.iter().map(|bucket| { - let rflag = if bucket.permissions.read { "R" } else { " " }; - let wflag = if bucket.permissions.write { "W" } else { " " }; - let oflag = if bucket.permissions.owner { "O" } else { " " }; - format!( - "{}{}{}\t{:.16}\t{}\t{}", - rflag, - wflag, - oflag, - bucket.id, - table_list_abbr(&bucket.global_aliases), - bucket.local_aliases.join(","), - ) - })); - - format_table(bucket_info); -} diff --git a/src/garage/cli/remote/layout.rs b/src/garage/cli/remote/layout.rs deleted file mode 100644 index 10be1029..00000000 --- a/src/garage/cli/remote/layout.rs +++ /dev/null @@ -1,474 +0,0 @@ -use bytesize::ByteSize; -use format_table::format_table; - -use garage_util::error::*; - -use garage_api_admin::api::*; - -use crate::cli::remote::*; -use crate::cli::structs::*; - -impl Cli { - pub async fn layout_command_dispatch(&self, cmd: LayoutOperation) -> Result<(), Error> { - match cmd { - LayoutOperation::Show => self.cmd_show_layout().await, - LayoutOperation::Assign(assign_opt) => self.cmd_assign_role(assign_opt).await, - LayoutOperation::Remove(remove_opt) => self.cmd_remove_role(remove_opt).await, - LayoutOperation::Config(config_opt) => self.cmd_config_layout(config_opt).await, - LayoutOperation::Apply(apply_opt) => self.cmd_apply_layout(apply_opt).await, - LayoutOperation::Revert(revert_opt) => self.cmd_revert_layout(revert_opt).await, - LayoutOperation::History => self.cmd_layout_history().await, - LayoutOperation::SkipDeadNodes(opt) => self.cmd_skip_dead_nodes(opt).await, - } - } - - pub async fn cmd_show_layout(&self) -> Result<(), Error> { - let layout = self.api_request(GetClusterLayoutRequest).await?; - - println!("==== CURRENT CLUSTER LAYOUT ===="); - print_cluster_layout(&layout, "No nodes currently have a role in the cluster.\nSee `garage status` to view available nodes."); - println!(); - println!("Current cluster layout version: {}", layout.version); - - let has_role_changes = print_staging_role_changes(&layout); - if has_role_changes { - let res_apply = self.api_request(PreviewClusterLayoutChangesRequest).await?; - - // this will print the stats of what partitions - // will move around when we apply - match res_apply { - PreviewClusterLayoutChangesResponse::Success { - message, - new_layout, - } => { - println!(); - println!("==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ===="); - print_cluster_layout(&new_layout, "No nodes have a role in the new layout."); - println!(); - - for line in message.iter() { - println!("{}", line); - } - println!("To enact the staged role changes, type:"); - println!(); - println!(" garage layout apply --version {}", new_layout.version); - println!(); - println!("You can also revert all proposed changes with: garage layout revert"); - } - PreviewClusterLayoutChangesResponse::Error { error } => { - println!("Error while trying to compute the assignment: {}", error); - println!("This new layout cannot yet be applied."); - println!("You can also revert all proposed changes with: garage layout revert"); - } - } - } - - Ok(()) - } - - pub async fn cmd_assign_role(&self, opt: AssignRoleOpt) -> Result<(), Error> { - let status = self.api_request(GetClusterStatusRequest).await?; - let layout = self.api_request(GetClusterLayoutRequest).await?; - - let mut actions = vec![]; - - for node in opt.replace.iter() { - let id = find_matching_node(&status, &layout, node)?; - - actions.push(NodeRoleChange { - id, - action: NodeRoleChangeEnum::Remove { remove: true }, - }); - } - - for node in opt.node_ids.iter() { - let id = find_matching_node(&status, &layout, node)?; - - let current = get_staged_or_current_role(&id, &layout); - - let zone = opt - .zone - .clone() - .or_else(|| current.as_ref().map(|c| c.zone.clone())) - .ok_or_message("Please specify a zone with the -z flag")?; - - let capacity = if opt.gateway { - if opt.capacity.is_some() { - return Err(Error::Message("Please specify only -c or -g".into())); - } - None - } else if let Some(cap) = opt.capacity { - Some(cap.as_u64()) - } else { - current.as_ref().ok_or_message("Please specify a capacity with the -c flag, or set node explicitly as gateway with -g")?.capacity - }; - - let tags = if !opt.tags.is_empty() { - opt.tags.clone() - } else if let Some(cur) = current.as_ref() { - cur.tags.clone() - } else { - vec![] - }; - - actions.push(NodeRoleChange { - id, - action: NodeRoleChangeEnum::Update(NodeAssignedRole { - zone, - capacity, - tags, - }), - }); - } - - self.api_request(UpdateClusterLayoutRequest { - roles: actions, - parameters: None, - }) - .await?; - - println!("Role changes are staged but not yet committed."); - println!("Use `garage layout show` to view staged role changes,"); - println!("and `garage layout apply` to enact staged changes."); - Ok(()) - } - - pub async fn cmd_remove_role(&self, opt: RemoveRoleOpt) -> Result<(), Error> { - let status = self.api_request(GetClusterStatusRequest).await?; - let layout = self.api_request(GetClusterLayoutRequest).await?; - - let id = find_matching_node(&status, &layout, &opt.node_id)?; - - let actions = vec![NodeRoleChange { - id, - action: NodeRoleChangeEnum::Remove { remove: true }, - }]; - - self.api_request(UpdateClusterLayoutRequest { - roles: actions, - parameters: None, - }) - .await?; - - println!("Role removal is staged but not yet committed."); - println!("Use `garage layout show` to view staged role changes,"); - println!("and `garage layout apply` to enact staged changes."); - Ok(()) - } - - pub async fn cmd_config_layout(&self, config_opt: ConfigLayoutOpt) -> Result<(), Error> { - let mut did_something = false; - match config_opt.redundancy { - None => (), - Some(r_str) => { - let r = parse_zone_redundancy(&r_str)?; - - self.api_request(UpdateClusterLayoutRequest { - roles: vec![], - parameters: Some(LayoutParameters { zone_redundancy: r }), - }) - .await?; - println!( - "The zone redundancy parameter has been set to '{}'.", - display_zone_redundancy(r) - ); - did_something = true; - } - } - - if !did_something { - return Err(Error::Message( - "Please specify an action for `garage layout config`".into(), - )); - } - - Ok(()) - } - - pub async fn cmd_apply_layout(&self, apply_opt: ApplyLayoutOpt) -> Result<(), Error> { - let missing_version_error = r#" -Please pass the new layout version number to ensure that you are writing the correct version of the cluster layout. -To know the correct value of the new layout version, invoke `garage layout show` and review the proposed changes. - "#; - - let req = ApplyClusterLayoutRequest { - version: apply_opt.version.ok_or_message(missing_version_error)?, - }; - let res = self.api_request(req).await?; - - for line in res.message.iter() { - println!("{}", line); - } - - println!("New cluster layout with updated role assignment has been applied in cluster."); - println!("Data will now be moved around between nodes accordingly."); - - Ok(()) - } - - pub async fn cmd_revert_layout(&self, revert_opt: RevertLayoutOpt) -> Result<(), Error> { - if !revert_opt.yes { - return Err(Error::Message( - "Please add the --yes flag to run the layout revert operation".into(), - )); - } - - self.api_request(RevertClusterLayoutRequest).await?; - - println!("All proposed role changes in cluster layout have been canceled."); - Ok(()) - } - - pub async fn cmd_layout_history(&self) -> Result<(), Error> { - let history = self.api_request(GetClusterLayoutHistoryRequest).await?; - - println!("==== LAYOUT HISTORY ===="); - let mut table = vec!["Version\tStatus\tStorage nodes\tGateway nodes".to_string()]; - for ver in history.versions.iter() { - table.push(format!( - "#{}\t{:?}\t{}\t{}", - ver.version, ver.status, ver.storage_nodes, ver.gateway_nodes, - )); - } - format_table(table); - println!(); - - if let Some(update_trackers) = history.update_trackers { - println!("==== UPDATE TRACKERS ===="); - println!("Several layout versions are currently live in the cluster, and data is being migrated."); - println!( - "This is the internal data that Garage stores to know which nodes have what data." - ); - println!(); - let mut table = vec!["Node\tAck\tSync\tSync_ack".to_string()]; - for (node, trackers) in update_trackers.iter() { - table.push(format!( - "{:.16}\t#{}\t#{}\t#{}", - node, trackers.ack, trackers.sync, trackers.sync_ack, - )); - } - table[1..].sort(); - format_table(table); - - println!(); - println!( - "If some nodes are not catching up to the latest layout version in the update trackers," - ); - println!( - "it might be because they are offline or unable to complete a sync successfully." - ); - if history.min_ack < history.current_version { - println!( - "You may force progress using `garage layout skip-dead-nodes --version {}`", - history.current_version - ); - } else { - println!( - "You may force progress using `garage layout skip-dead-nodes --version {} --allow-missing-data`.", - history.current_version - ); - } - } else { - println!( - "Your cluster is currently in a stable state with a single live layout version." - ); - println!("No metadata migration is in progress. Note that the migration of data blocks is not tracked,"); - println!( - "so you might want to keep old nodes online until their data directories become empty." - ); - } - - Ok(()) - } - - pub async fn cmd_skip_dead_nodes(&self, opt: SkipDeadNodesOpt) -> Result<(), Error> { - let res = self - .api_request(ClusterLayoutSkipDeadNodesRequest { - version: opt.version, - allow_missing_data: opt.allow_missing_data, - }) - .await?; - - if !res.sync_updated.is_empty() || !res.ack_updated.is_empty() { - for node in res.ack_updated.iter() { - println!("Increased the ACK tracker for node {:.16}", node); - } - for node in res.sync_updated.iter() { - println!("Increased the SYNC tracker for node {:.16}", node); - } - Ok(()) - } else if !opt.allow_missing_data { - Err(Error::Message("Nothing was done, try passing the `--allow-missing-data` flag to force progress even when not enough nodes can complete a metadata sync.".into())) - } else { - Err(Error::Message( - "Sorry, there is nothing I can do for you. Please wait patiently. If you ask for help, please send the output of the `garage layout history` command.".into(), - )) - } - } -} - -// -------------------------- -// ---- helper functions ---- -// -------------------------- - -pub fn capacity_string(v: Option) -> String { - match v { - Some(c) => ByteSize::b(c).display().iec().to_string(), - None => "gateway".to_string(), - } -} - -pub fn get_staged_or_current_role( - id: &str, - layout: &GetClusterLayoutResponse, -) -> Option { - for node in layout.staged_role_changes.iter() { - if node.id == id { - return match &node.action { - NodeRoleChangeEnum::Remove { .. } => None, - NodeRoleChangeEnum::Update(role) => Some(role.clone()), - }; - } - } - - for node in layout.roles.iter() { - if node.id == id { - return Some(NodeAssignedRole { - zone: node.zone.clone(), - capacity: node.capacity, - tags: node.tags.clone(), - }); - } - } - - None -} - -pub fn find_matching_node( - status: &GetClusterStatusResponse, - layout: &GetClusterLayoutResponse, - pattern: &str, -) -> Result { - let all_node_ids_iter = status - .nodes - .iter() - .map(|x| x.id.as_str()) - .chain(layout.roles.iter().map(|x| x.id.as_str())); - - let mut candidates = vec![]; - for c in all_node_ids_iter { - if c.starts_with(pattern) && !candidates.contains(&c) { - candidates.push(c); - } - } - if candidates.len() != 1 { - Err(Error::Message(format!( - "{} nodes match '{}'", - candidates.len(), - pattern, - ))) - } else { - Ok(candidates[0].to_string()) - } -} - -pub fn print_cluster_layout(layout: &GetClusterLayoutResponse, empty_msg: &str) { - let mut table = vec!["ID\tTags\tZone\tCapacity\tUsable capacity".to_string()]; - for role in layout.roles.iter() { - let tags = role.tags.join(","); - if let (Some(capacity), Some(usable_capacity)) = (role.capacity, role.usable_capacity) { - table.push(format!( - "{:.16}\t[{}]\t{}\t{}\t{} ({:.1}%)", - role.id, - tags, - role.zone, - capacity_string(role.capacity), - ByteSize::b(usable_capacity).display().iec(), - (100.0 * usable_capacity as f32) / (capacity as f32) - )); - } else { - table.push(format!( - "{:.16}\t[{}]\t{}\t{}", - role.id, - tags, - role.zone, - capacity_string(role.capacity), - )); - } - } - if table.len() > 1 { - format_table(table); - println!(); - println!( - "Zone redundancy: {}", - display_zone_redundancy(layout.parameters.zone_redundancy), - ); - } else { - println!("{}", empty_msg); - } -} - -pub fn print_staging_role_changes(layout: &GetClusterLayoutResponse) -> bool { - let has_role_changes = !layout.staged_role_changes.is_empty(); - - let has_layout_changes = layout.staged_parameters.is_some(); - - if has_role_changes || has_layout_changes { - println!(); - println!("==== STAGED ROLE CHANGES ===="); - if has_role_changes { - let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()]; - for change in layout.staged_role_changes.iter() { - match &change.action { - NodeRoleChangeEnum::Update(NodeAssignedRole { - tags, - zone, - capacity, - }) => { - let tags = tags.join(","); - table.push(format!( - "{:.16}\t[{}]\t{}\t{}", - change.id, - tags, - zone, - capacity_string(*capacity), - )); - } - NodeRoleChangeEnum::Remove { .. } => { - table.push(format!("{:.16}\tREMOVED", change.id)); - } - } - } - format_table(table); - println!(); - } - if let Some(p) = layout.staged_parameters.as_ref() { - println!( - "Zone redundancy: {}", - display_zone_redundancy(p.zone_redundancy) - ); - } - true - } else { - false - } -} - -pub fn display_zone_redundancy(z: ZoneRedundancy) -> String { - match z { - ZoneRedundancy::Maximum => "maximum".into(), - ZoneRedundancy::AtLeast(x) => x.to_string(), - } -} - -pub fn parse_zone_redundancy(s: &str) -> Result { - match s { - "none" | "max" | "maximum" => Ok(ZoneRedundancy::Maximum), - x => { - let v = x.parse::().map_err(|_| { - Error::Message("zone redundancy must be 'none'/'max' or an integer".into()) - })?; - Ok(ZoneRedundancy::AtLeast(v)) - } - } -} diff --git a/src/garage/cli/remote/mod.rs b/src/garage/cli/remote/mod.rs deleted file mode 100644 index d1a20989..00000000 --- a/src/garage/cli/remote/mod.rs +++ /dev/null @@ -1,174 +0,0 @@ -pub mod admin_token; -pub mod bucket; -pub mod cluster; -pub mod key; -pub mod layout; - -pub mod block; -pub mod node; -pub mod worker; - -use std::convert::TryFrom; -use std::sync::Arc; -use std::time::Duration; - -use chrono::{DateTime, Utc}; - -use garage_util::error::*; - -use garage_rpc::*; - -use garage_api_admin::api::*; -use garage_api_admin::api_server::{AdminRpc as ProxyRpc, AdminRpcResponse as ProxyRpcResponse}; -use garage_api_admin::RequestHandler; - -use crate::cli::structs::*; - -pub struct Cli { - pub proxy_rpc_endpoint: Arc>, - pub rpc_host: NodeID, -} - -impl Cli { - pub async fn handle(&self, cmd: Command) -> Result<(), Error> { - match cmd { - Command::Status => self.cmd_status().await, - Command::Node(NodeOperation::Connect(connect_opt)) => { - self.cmd_connect(connect_opt).await - } - Command::Layout(layout_opt) => self.layout_command_dispatch(layout_opt).await, - Command::Bucket(bo) => self.cmd_bucket(bo).await, - Command::AdminToken(to) => self.cmd_admin_token(to).await, - Command::Key(ko) => self.cmd_key(ko).await, - Command::Worker(wo) => self.cmd_worker(wo).await, - Command::Block(bo) => self.cmd_block(bo).await, - Command::Meta(mo) => self.cmd_meta(mo).await, - Command::Stats(so) => self.cmd_stats(so).await, - Command::Repair(ro) => self.cmd_repair(ro).await, - Command::JsonApi { endpoint, payload } => self.cmd_json_api(endpoint, payload).await, - - _ => unreachable!(), - } - } - - pub async fn api_request(&self, req: T) -> Result<::Response, Error> - where - T: RequestHandler, - AdminApiRequest: From, - ::Response: TryFrom, - { - let req = AdminApiRequest::from(req); - let req_name = req.name(); - match self - .proxy_rpc_endpoint - .call(&self.rpc_host, ProxyRpc::Proxy(req), PRIO_NORMAL) - .await?? - { - ProxyRpcResponse::ProxyApiOkResponse(resp) => { - ::Response::try_from(resp).map_err(|_| { - Error::Message(format!("{} returned unexpected response", req_name)) - }) - } - ProxyRpcResponse::ApiErrorResponse { - http_code, - error_code, - message, - } => Err(Error::Message(format!( - "{} returned {} ({}): {}", - req_name, error_code, http_code, message - ))), - m => Err(Error::unexpected_rpc_message(m)), - } - } - - pub async fn local_api_request( - &self, - req: T, - ) -> Result<::Response, Error> - where - T: RequestHandler, - MultiRequest: RequestHandler::Response>>, - AdminApiRequest: From>, - as RequestHandler>::Response: TryFrom, - { - let req = MultiRequest { - node: hex::encode(self.rpc_host), - body: req, - }; - let resp = self.api_request(req).await?; - - if let Some((_, e)) = resp.error.into_iter().next() { - return Err(Error::Message(e)); - } - if resp.success.len() != 1 { - return Err(Error::Message(format!( - "{} responses returned, expected 1", - resp.success.len() - ))); - } - Ok(resp.success.into_iter().next().unwrap().1) - } - - pub async fn cmd_json_api(&self, endpoint: String, payload: String) -> Result<(), Error> { - let payload: serde_json::Value = if payload == "-" { - serde_json::from_reader(&std::io::stdin())? - } else { - serde_json::from_str(&payload)? - }; - - let request: AdminApiRequest = serde_json::from_value(serde_json::json!({ - endpoint.clone(): payload, - }))?; - - let resp = match self - .proxy_rpc_endpoint - .call(&self.rpc_host, ProxyRpc::Proxy(request), PRIO_NORMAL) - .await?? - { - ProxyRpcResponse::ProxyApiOkResponse(resp) => resp, - ProxyRpcResponse::ApiErrorResponse { - http_code, - error_code, - message, - } => { - return Err(Error::Message(format!( - "{} ({}): {}", - error_code, http_code, message - ))) - } - m => return Err(Error::unexpected_rpc_message(m)), - }; - - if let serde_json::Value::Object(map) = serde_json::to_value(&resp)? { - if let Some(inner) = map.get(&endpoint) { - serde_json::to_writer_pretty(std::io::stdout(), &inner)?; - return Ok(()); - } - } - - Err(Error::Message(format!( - "Invalid response: {}", - serde_json::to_string(&resp)? - ))) - } -} - -pub fn table_list_abbr, S: AsRef>(values: T) -> String { - let mut iter = values.into_iter(); - - match iter.next() { - Some(first) => match iter.count() { - 0 => first.as_ref().to_string(), - n => format!("{}, ... ({})", first.as_ref(), n + 1), - }, - None => String::new(), - } -} - -pub fn parse_expires_in(expires_in: &Option) -> Result>, Error> { - expires_in - .as_ref() - .map(|x| parse_duration::parse::parse(x).map(|dur| Utc::now() + dur)) - .transpose() - .ok_or_message("Invalid duration passed for --expires-in parameter") -} diff --git a/src/garage/cli/remote/node.rs b/src/garage/cli/remote/node.rs deleted file mode 100644 index 482f239d..00000000 --- a/src/garage/cli/remote/node.rs +++ /dev/null @@ -1,120 +0,0 @@ -use format_table::format_table; - -use garage_util::error::*; - -use garage_api_admin::api::*; - -use crate::cli::remote::*; -use crate::cli::structs::*; - -impl Cli { - pub async fn cmd_meta(&self, cmd: MetaOperation) -> Result<(), Error> { - let MetaOperation::Snapshot { all } = cmd; - - let res = self - .api_request(CreateMetadataSnapshotRequest { - node: if all { - "*".to_string() - } else { - hex::encode(self.rpc_host) - }, - body: LocalCreateMetadataSnapshotRequest, - }) - .await?; - - let mut table = vec!["Node\tResult".to_string()]; - for (node, _) in res.success.iter() { - table.push(format!("{:.16}\tSnapshot created", node)); - } - for (node, err) in res.error.iter() { - table.push(format!("{:.16}\tError: {}", node, err)); - } - format_table(table); - - if !res.error.is_empty() { - return Err(Error::Message(format!( - "{} nodes returned an error", - res.error.len() - ))); - } - - Ok(()) - } - - pub async fn cmd_stats(&self, cmd: StatsOpt) -> Result<(), Error> { - let res = self - .api_request(GetNodeStatisticsRequest { - node: if cmd.all_nodes { - "*".to_string() - } else { - hex::encode(self.rpc_host) - }, - body: LocalGetNodeStatisticsRequest, - }) - .await?; - - for (node, res) in res.success.iter() { - println!("==== NODE [{:.16}] ====", node); - println!("{}\n", res.freeform); - } - - for (node, err) in res.error.iter() { - println!("==== NODE [{:.16}] ====", node); - println!("Error: {}\n", err); - } - - let res = self.api_request(GetClusterStatisticsRequest).await?; - println!("==== CLUSTER STATISTICS ===="); - println!("{}\n", res.freeform); - - Ok(()) - } - - pub async fn cmd_repair(&self, cmd: RepairOpt) -> Result<(), Error> { - if !cmd.yes { - return Err(Error::Message( - "Please add --yes to start the repair operation".into(), - )); - } - - let repair_type = match cmd.what { - RepairWhat::Tables => RepairType::Tables, - RepairWhat::Blocks => RepairType::Blocks, - RepairWhat::Versions => RepairType::Versions, - RepairWhat::MultipartUploads => RepairType::MultipartUploads, - RepairWhat::BlockRefs => RepairType::BlockRefs, - RepairWhat::BlockRc => RepairType::BlockRc, - RepairWhat::Rebalance => RepairType::Rebalance, - RepairWhat::Scrub { cmd } => RepairType::Scrub(match cmd { - ScrubCmd::Start => ScrubCommand::Start, - ScrubCmd::Cancel => ScrubCommand::Cancel, - ScrubCmd::Pause => ScrubCommand::Pause, - ScrubCmd::Resume => ScrubCommand::Resume, - }), - RepairWhat::Aliases => RepairType::Aliases, - RepairWhat::ClearResyncQueue => RepairType::ClearResyncQueue, - }; - - let res = self - .api_request(LaunchRepairOperationRequest { - node: if cmd.all_nodes { - "*".to_string() - } else { - hex::encode(self.rpc_host) - }, - body: LocalLaunchRepairOperationRequest { repair_type }, - }) - .await?; - - let mut table = vec![]; - for (node, err) in res.error.iter() { - table.push(format!("{:.16}\tError: {}", node, err)); - } - for (node, _) in res.success.iter() { - table.push(format!("{:.16}\tRepair launched", node)); - } - format_table(table); - - Ok(()) - } -} diff --git a/src/garage/cli/remote/worker.rs b/src/garage/cli/remote/worker.rs deleted file mode 100644 index f1cf1636..00000000 --- a/src/garage/cli/remote/worker.rs +++ /dev/null @@ -1,213 +0,0 @@ -use format_table::format_table; - -use garage_util::error::*; - -use garage_api_admin::api::*; - -use crate::cli::remote::*; -use crate::cli::structs::*; - -impl Cli { - pub async fn cmd_worker(&self, cmd: WorkerOperation) -> Result<(), Error> { - match cmd { - WorkerOperation::List { opt } => self.cmd_list_workers(opt).await, - WorkerOperation::Info { tid } => self.cmd_worker_info(tid).await, - WorkerOperation::Get { - all_nodes, - variable, - } => self.cmd_get_var(all_nodes, variable).await, - WorkerOperation::Set { - all_nodes, - variable, - value, - } => self.cmd_set_var(all_nodes, variable, value).await, - } - } - - pub async fn cmd_list_workers(&self, opt: WorkerListOpt) -> Result<(), Error> { - let mut list = self - .local_api_request(LocalListWorkersRequest { - busy_only: opt.busy, - error_only: opt.errors, - }) - .await? - .0; - - list.sort_by_key(|info| { - ( - match info.state { - WorkerStateResp::Busy | WorkerStateResp::Throttled { .. } => 0, - WorkerStateResp::Idle => 1, - WorkerStateResp::Done => 2, - }, - info.id, - ) - }); - - let mut table = - vec!["TID\tState\tName\tTranq\tDone\tQueue\tErrors\tConsec\tLast".to_string()]; - let tf = timeago::Formatter::new(); - for info in list.iter() { - let err_ago = info - .last_error - .as_ref() - .map(|x| tf.convert(Duration::from_secs(x.secs_ago))) - .unwrap_or_default(); - let (total_err, consec_err) = if info.errors > 0 { - (info.errors.to_string(), info.consecutive_errors.to_string()) - } else { - ("-".into(), "-".into()) - }; - - table.push(format!( - "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}", - info.id, - format_worker_state(&info.state), - info.name, - info.tranquility - .as_ref() - .map(ToString::to_string) - .unwrap_or_else(|| "-".into()), - info.progress.as_deref().unwrap_or("-"), - info.queue_length - .as_ref() - .map(ToString::to_string) - .unwrap_or_else(|| "-".into()), - total_err, - consec_err, - err_ago, - )); - } - format_table(table); - - Ok(()) - } - - pub async fn cmd_worker_info(&self, tid: usize) -> Result<(), Error> { - let info = self - .local_api_request(LocalGetWorkerInfoRequest { id: tid as u64 }) - .await? - .0; - - let mut table = vec![]; - table.push(format!("Task id:\t{}", info.id)); - table.push(format!("Worker name:\t{}", info.name)); - match &info.state { - WorkerStateResp::Throttled { duration_secs } => { - table.push(format!( - "Worker state:\tBusy (throttled, paused for {:.3}s)", - duration_secs - )); - } - s => { - table.push(format!("Worker state:\t{}", format_worker_state(s))); - } - } - if let Some(tql) = info.tranquility { - table.push(format!("Tranquility:\t{}", tql)); - } - - table.push("".into()); - table.push(format!("Total errors:\t{}", info.errors)); - table.push(format!("Consecutive errs:\t{}", info.consecutive_errors)); - if let Some(err) = info.last_error { - table.push(format!("Last error:\t{}", err.message)); - let tf = timeago::Formatter::new(); - table.push(format!( - "Last error time:\t{}", - tf.convert(Duration::from_secs(err.secs_ago)) - )); - } - - table.push("".into()); - if let Some(p) = info.progress { - table.push(format!("Progress:\t{}", p)); - } - if let Some(ql) = info.queue_length { - table.push(format!("Queue length:\t{}", ql)); - } - if let Some(pe) = info.persistent_errors { - table.push(format!("Persistent errors:\t{}", pe)); - } - - for (i, s) in info.freeform.iter().enumerate() { - if i == 0 { - if table.last() != Some(&"".into()) { - table.push("".into()); - } - table.push(format!("Message:\t{}", s)); - } else { - table.push(format!("\t{}", s)); - } - } - format_table(table); - - Ok(()) - } - - pub async fn cmd_get_var(&self, all: bool, var: Option) -> Result<(), Error> { - let res = self - .api_request(GetWorkerVariableRequest { - node: if all { - "*".to_string() - } else { - hex::encode(self.rpc_host) - }, - body: LocalGetWorkerVariableRequest { variable: var }, - }) - .await?; - - let mut table = vec![]; - for (node, vars) in res.success.iter() { - for (key, val) in vars.0.iter() { - table.push(format!("{:.16}\t{}\t{}", node, key, val)); - } - } - format_table(table); - - for (node, err) in res.error.iter() { - eprintln!("{:.16}: error: {}", node, err); - } - - Ok(()) - } - - pub async fn cmd_set_var( - &self, - all: bool, - variable: String, - value: String, - ) -> Result<(), Error> { - let res = self - .api_request(SetWorkerVariableRequest { - node: if all { - "*".to_string() - } else { - hex::encode(self.rpc_host) - }, - body: LocalSetWorkerVariableRequest { variable, value }, - }) - .await?; - - let mut table = vec![]; - for (node, kv) in res.success.iter() { - table.push(format!("{:.16}\t{}\t{}", node, kv.variable, kv.value)); - } - format_table(table); - - for (node, err) in res.error.iter() { - eprintln!("{:.16}: error: {}", node, err); - } - - Ok(()) - } -} - -fn format_worker_state(s: &WorkerStateResp) -> &'static str { - match s { - WorkerStateResp::Busy => "Busy", - WorkerStateResp::Throttled { .. } => "Busy*", - WorkerStateResp::Idle => "Idle", - WorkerStateResp::Done => "Done", - } -} diff --git a/src/garage/cli/structs.rs b/src/garage/cli/structs.rs index f623c60d..386a213b 100644 --- a/src/garage/cli/structs.rs +++ b/src/garage/cli/structs.rs @@ -1,14 +1,15 @@ -use structopt::{clap::Shell, StructOpt}; +use serde::{Deserialize, Serialize}; +use structopt::StructOpt; use garage_util::version::garage_version; -use crate::cli::local::convert_db; +use crate::cli::convert_db; #[derive(StructOpt, Debug)] pub enum Command { /// Run Garage server #[structopt(name = "server", version = garage_version())] - Server(ServerOpt), + Server, /// Get network status #[structopt(name = "status", version = garage_version())] @@ -30,10 +31,6 @@ pub enum Command { #[structopt(name = "key", version = garage_version())] Key(KeyOperation), - /// Operations on admin API tokens - #[structopt(name = "admin-token", version = garage_version())] - AdminToken(AdminTokenOperation), - /// Start repair of node data on remote node #[structopt(name = "repair", version = garage_version())] Repair(RepairOpt), @@ -62,51 +59,8 @@ pub enum Command { /// Convert metadata db between database engine formats #[structopt(name = "convert-db", version = garage_version())] ConvertDb(convert_db::ConvertDbOpt), - - /// Output openapi JSON schema for admin api - #[structopt(name = "admin-api-schema", version = garage_version(), setting(structopt::clap::AppSettings::Hidden))] - AdminApiSchema, - - /// Directly invoke the admin API using a JSON payload. - /// The result is printed to `stdout` in JSON format. - #[structopt(name = "json-api", version = garage_version())] - JsonApi { - /// The admin API endpoint to invoke, e.g. `GetClusterStatus` - endpoint: String, - /// The JSON payload, or `-` to read from `stdin` - #[structopt(default_value = "null")] - payload: String, - }, - - /// Generate completions for a shell - #[structopt(name = "completions", version = garage_version())] - Completions { shell: Shell }, } -// --------------------------- -// ---- garage server ... ---- -// --------------------------- - -#[derive(StructOpt, Debug)] -pub struct ServerOpt { - /// Automatically configure a single-node layout in the cluster. - /// Garage will refuse to run if the cluster already has other nodes. - #[structopt(long = "single-node")] - pub(crate) single_node: bool, - /// Configure a default S3 API key using environment variables `GARAGE_DEFAULT_ACCESS_KEY` and - /// `GARAGE_DEFAULT_SECRET_KEY`. Requires `--single-node`. - #[structopt(long = "default-access-key")] - pub(crate) default_access_key: bool, - /// Configure a default bucket using environment variable `GARAGE_DEFAULT_BUCKET`. - /// Implies `--default-access-key`. Requires `--single-node`. - #[structopt(long = "default-bucket")] - pub(crate) default_bucket: bool, -} - -// ------------------------- -// ---- garage node ... ---- -// ------------------------- - #[derive(StructOpt, Debug)] pub enum NodeOperation { /// Print the full node ID (public key) of this Garage node, and its publicly reachable IP @@ -134,10 +88,6 @@ pub struct ConnectNodeOpt { pub(crate) node: String, } -// --------------------------- -// ---- garage layout ... ---- -// --------------------------- - #[derive(StructOpt, Debug)] pub enum LayoutOperation { /// Assign role to Garage node @@ -240,11 +190,7 @@ pub struct SkipDeadNodesOpt { pub(crate) allow_missing_data: bool, } -// --------------------------- -// ---- garage bucket ... ---- -// --------------------------- - -#[derive(StructOpt, Debug)] +#[derive(Serialize, Deserialize, StructOpt, Debug)] pub enum BucketOperation { /// List buckets #[structopt(name = "list", version = garage_version())] @@ -289,13 +235,9 @@ pub enum BucketOperation { /// Clean up (abort) old incomplete multipart uploads #[structopt(name = "cleanup-incomplete-uploads", version = garage_version())] CleanupIncompleteUploads(CleanupIncompleteUploadsOpt), - - /// Inspect an object in a bucket - #[structopt(name = "inspect-object", version = garage_version())] - InspectObject(InspectObjectOpt), } -#[derive(StructOpt, Debug)] +#[derive(Serialize, Deserialize, StructOpt, Debug)] pub struct WebsiteOpt { /// Create #[structopt(long = "allow")] @@ -317,13 +259,13 @@ pub struct WebsiteOpt { pub error_document: Option, } -#[derive(StructOpt, Debug)] +#[derive(Serialize, Deserialize, StructOpt, Debug)] pub struct BucketOpt { /// Bucket name pub name: String, } -#[derive(StructOpt, Debug)] +#[derive(Serialize, Deserialize, StructOpt, Debug)] pub struct DeleteBucketOpt { /// Bucket name pub name: String, @@ -333,7 +275,7 @@ pub struct DeleteBucketOpt { pub yes: bool, } -#[derive(StructOpt, Debug)] +#[derive(Serialize, Deserialize, StructOpt, Debug)] pub struct AliasBucketOpt { /// Existing bucket name (its alias in global namespace or its full hex uuid) pub existing_bucket: String, @@ -346,7 +288,7 @@ pub struct AliasBucketOpt { pub local: Option, } -#[derive(StructOpt, Debug)] +#[derive(Serialize, Deserialize, StructOpt, Debug)] pub struct UnaliasBucketOpt { /// Bucket name pub name: String, @@ -356,7 +298,7 @@ pub struct UnaliasBucketOpt { pub local: Option, } -#[derive(StructOpt, Debug)] +#[derive(Serialize, Deserialize, StructOpt, Debug)] pub struct PermBucketOpt { /// Access key name or ID #[structopt(long = "key")] @@ -379,7 +321,7 @@ pub struct PermBucketOpt { pub bucket: String, } -#[derive(StructOpt, Debug)] +#[derive(Serialize, Deserialize, StructOpt, Debug)] pub struct SetQuotasOpt { /// Bucket name pub bucket: String, @@ -394,7 +336,7 @@ pub struct SetQuotasOpt { pub max_objects: Option, } -#[derive(StructOpt, Debug)] +#[derive(Serialize, Deserialize, StructOpt, Debug)] pub struct CleanupIncompleteUploadsOpt { /// Abort multipart uploads older than this value #[structopt(long = "older-than", default_value = "1d")] @@ -405,19 +347,7 @@ pub struct CleanupIncompleteUploadsOpt { pub buckets: Vec, } -#[derive(StructOpt, Debug)] -pub struct InspectObjectOpt { - /// Name or ID of bucket - pub bucket: String, - /// Key of object to inspect - pub key: String, -} - -// ------------------------ -// ---- garage key ... ---- -// ------------------------ - -#[derive(StructOpt, Debug)] +#[derive(Serialize, Deserialize, StructOpt, Debug)] pub enum KeyOperation { /// List keys #[structopt(name = "list", version = garage_version())] @@ -450,21 +380,9 @@ pub enum KeyOperation { /// Import key #[structopt(name = "import", version = garage_version())] Import(KeyImportOpt), - - /// Set parameters for an access key - #[structopt(name = "set", version = garage_version())] - Set(KeySetOpt), - - /// Delete all expired access keys - #[structopt(name = "delete-expired", version = garage_version())] - DeleteExpired { - /// Confirm deletion - #[structopt(long = "yes")] - yes: bool, - }, } -#[derive(StructOpt, Debug)] +#[derive(Serialize, Deserialize, StructOpt, Debug)] pub struct KeyInfoOpt { /// ID or name of the key pub key_pattern: String, @@ -473,32 +391,14 @@ pub struct KeyInfoOpt { pub show_secret: bool, } -#[derive(StructOpt, Debug)] +#[derive(Serialize, Deserialize, StructOpt, Debug)] pub struct KeyNewOpt { /// Name of the key #[structopt(default_value = "Unnamed key")] pub name: String, - /// Set an expiration time for the access key - /// (see `docs.rs/parse_duration` for date format) - #[structopt(long = "expires-in")] - pub expires_in: Option, } -#[derive(StructOpt, Debug)] -pub struct KeySetOpt { - /// ID or name of the key - pub key_pattern: String, - - /// Set an expiration time for the access key - /// (see `docs.rs/parse_duration` for date format) - #[structopt(long = "expires-in")] - pub expires_in: Option, - /// Set the access key to never expire - #[structopt(long = "never-expires")] - pub never_expires: bool, -} - -#[derive(StructOpt, Debug)] +#[derive(Serialize, Deserialize, StructOpt, Debug)] pub struct KeyRenameOpt { /// ID or name of the key pub key_pattern: String, @@ -507,7 +407,7 @@ pub struct KeyRenameOpt { pub new_name: String, } -#[derive(StructOpt, Debug)] +#[derive(Serialize, Deserialize, StructOpt, Debug)] pub struct KeyDeleteOpt { /// ID or name of the key pub key_pattern: String, @@ -517,17 +417,17 @@ pub struct KeyDeleteOpt { pub yes: bool, } -#[derive(StructOpt, Debug)] +#[derive(Serialize, Deserialize, StructOpt, Debug)] pub struct KeyPermOpt { /// ID or name of the key pub key_pattern: String, - /// Flag that allows key to create buckets using S3's `CreateBucket` call + /// Flag that allows key to create buckets using S3's CreateBucket call #[structopt(long = "create-bucket")] pub create_bucket: bool, } -#[derive(StructOpt, Debug)] +#[derive(Serialize, Deserialize, StructOpt, Debug)] pub struct KeyImportOpt { /// Access key ID pub key_id: String, @@ -544,110 +444,7 @@ pub struct KeyImportOpt { pub yes: bool, } -// -------------------------------- -// ---- garage admin-token ... ---- -// -------------------------------- - -#[derive(StructOpt, Debug)] -pub enum AdminTokenOperation { - /// List all admin API tokens - #[structopt(name = "list", version = garage_version())] - List, - - /// Fetch info about a specific admin API token - #[structopt(name = "info", version = garage_version())] - Info { - /// Name or prefix of the ID of the token to look up - api_token: String, - }, - - /// Create new admin API token - #[structopt(name = "create", version = garage_version())] - Create(AdminTokenCreateOp), - - /// Rename an admin API token - #[structopt(name = "rename", version = garage_version())] - Rename { - /// Name or prefix of the ID of the token to rename - api_token: String, - /// New name of the admintoken - new_name: String, - }, - - /// Set parameters for an admin API token - #[structopt(name = "set", version = garage_version())] - Set(AdminTokenSetOp), - - /// Delete an admin API token - #[structopt(name = "delete", version = garage_version())] - Delete { - /// Name or prefix of the ID of the token to delete - api_token: String, - /// Confirm deletion - #[structopt(long = "yes")] - yes: bool, - }, - - /// Delete all expired admin API tokens - #[structopt(name = "delete-expired", version = garage_version())] - DeleteExpired { - /// Confirm deletion - #[structopt(long = "yes")] - yes: bool, - }, -} - -#[derive(StructOpt, Debug, Clone)] -pub struct AdminTokenCreateOp { - /// Set a name for the token - pub name: Option, - /// Set an expiration time for the token (see `docs.rs/parse_duration` for date - /// format) - #[structopt(long = "expires-in")] - pub expires_in: Option, - /// Set a limited scope for the token, as a comma-separated list of - /// admin API functions (e.g. `GetClusterStatus`, etc.). The default scope - /// is `*`, which allows access to all admin API functions. - /// Note that granting a scope that allows `CreateAdminToken` or - /// `UpdateAdminToken` allows for privilege escalation, and is therefore - /// equivalent to `*`. - #[structopt(long = "scope")] - pub scope: Option, - /// Print only the newly generated API token to stdout - #[structopt(short = "q", long = "quiet")] - pub quiet: bool, -} - -#[derive(StructOpt, Debug, Clone)] -pub struct AdminTokenSetOp { - /// Name or prefix of the ID of the token to modify - pub api_token: String, - - /// Set an expiration time for the token (see `docs.rs/parse_duration` for date - /// format) - #[structopt(long = "expires-in")] - pub expires_in: Option, - /// Set the token to never expire - #[structopt(long = "never-expires")] - pub never_expires: bool, - - /// Set a limited scope for the token, as a comma-separated list of - /// admin API functions (e.g. `GetClusterStatus`, etc.), or `*` to allow - /// all admin API functions. - /// Use `--scope=+Scope1,Scope2` to add scopes to the existing list, - /// and `--scope=-Scope1,Scope2` to remove scopes from the existing list. - /// Note that granting a scope that allows `CreateAdminToken` or - /// `UpdateAdminToken` allows for privilege escalation, and is therefore - /// equivalent to `*`. - #[structopt(long = "scope")] - pub scope: Option, -} - -// --------------------------- -// ---- garage repair ... ---- -// --------------------------- - -#[derive(StructOpt, Debug, Clone)] +#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)] pub struct RepairOpt { /// Launch repair operation on all nodes #[structopt(short = "a", long = "all-nodes")] @@ -661,7 +458,7 @@ pub struct RepairOpt { pub what: RepairWhat, } -#[derive(StructOpt, Debug, Eq, PartialEq, Clone)] +#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)] pub enum RepairWhat { /// Do a full sync of metadata tables #[structopt(name = "tables", version = garage_version())] @@ -699,7 +496,7 @@ pub enum RepairWhat { Rebalance, } -#[derive(StructOpt, Debug, Eq, PartialEq, Clone)] +#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)] pub enum ScrubCmd { /// Start scrub #[structopt(name = "start", version = garage_version())] @@ -713,13 +510,15 @@ pub enum ScrubCmd { /// Cancel scrub in progress #[structopt(name = "cancel", version = garage_version())] Cancel, + /// Set tranquility level for in-progress and future scrubs + #[structopt(name = "set-tranquility", version = garage_version())] + SetTranquility { + #[structopt()] + tranquility: u32, + }, } -// ----------------------------------- -// ---- garage offline-repair ... ---- -// ----------------------------------- - -#[derive(StructOpt, Debug, Clone)] +#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)] pub struct OfflineRepairOpt { /// Confirm the launch of the repair operation #[structopt(long = "yes")] @@ -729,7 +528,7 @@ pub struct OfflineRepairOpt { pub what: OfflineRepairWhat, } -#[derive(StructOpt, Debug, Eq, PartialEq, Clone)] +#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)] pub enum OfflineRepairWhat { /// Repair K2V item counters #[cfg(feature = "k2v")] @@ -740,22 +539,19 @@ pub enum OfflineRepairWhat { ObjectCounters, } -// -------------------------- -// ---- garage stats ... ---- -// -------------------------- - -#[derive(StructOpt, Debug, Clone)] +#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)] pub struct StatsOpt { /// Gather statistics from all nodes #[structopt(short = "a", long = "all-nodes")] pub all_nodes: bool, + + /// Don't show global cluster stats (internal use in RPC) + #[structopt(skip)] + #[serde(default)] + pub skip_global: bool, } -// --------------------------- -// ---- garage worker ... ---- -// --------------------------- - -#[derive(StructOpt, Debug, Eq, PartialEq, Clone)] +#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)] pub enum WorkerOperation { /// List all workers on Garage node #[structopt(name = "list", version = garage_version())] @@ -788,7 +584,7 @@ pub enum WorkerOperation { }, } -#[derive(StructOpt, Debug, Eq, PartialEq, Clone, Copy)] +#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone, Copy)] pub struct WorkerListOpt { /// Show only busy workers #[structopt(short = "b", long = "busy")] @@ -798,11 +594,7 @@ pub struct WorkerListOpt { pub errors: bool, } -// -------------------------- -// ---- garage block ... ---- -// -------------------------- - -#[derive(StructOpt, Debug, Eq, PartialEq, Clone)] +#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)] pub enum BlockOperation { /// List all blocks that currently have a resync error #[structopt(name = "list-errors", version = garage_version())] @@ -834,11 +626,7 @@ pub enum BlockOperation { }, } -// ------------------------- -// ---- garage meta ... ---- -// ------------------------- - -#[derive(StructOpt, Debug, Eq, PartialEq, Clone, Copy)] +#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone, Copy)] pub enum MetaOperation { /// Save a snapshot of the metadata db file #[structopt(name = "snapshot", version = garage_version())] diff --git a/src/garage/cli/util.rs b/src/garage/cli/util.rs new file mode 100644 index 00000000..21c14f42 --- /dev/null +++ b/src/garage/cli/util.rs @@ -0,0 +1,457 @@ +use std::collections::HashMap; +use std::time::Duration; + +use format_table::format_table; +use garage_util::background::*; +use garage_util::crdt::*; +use garage_util::data::*; +use garage_util::error::*; +use garage_util::time::*; + +use garage_block::manager::BlockResyncErrorInfo; + +use garage_model::bucket_table::*; +use garage_model::key_table::*; +use garage_model::s3::mpu_table::{self, MultipartUpload}; +use garage_model::s3::object_table; +use garage_model::s3::version_table::*; + +use crate::cli::structs::WorkerListOpt; + +pub fn print_bucket_list(bl: Vec) { + println!("List of buckets:"); + + let mut table = vec![]; + for bucket in bl { + let aliases = bucket + .aliases() + .iter() + .filter(|(_, _, active)| *active) + .map(|(name, _, _)| name.to_string()) + .collect::>(); + let local_aliases_n = match &bucket + .local_aliases() + .iter() + .filter(|(_, _, active)| *active) + .collect::>()[..] + { + [] => "".into(), + [((k, n), _, _)] => format!("{}:{}", k, n), + s => format!("[{} local aliases]", s.len()), + }; + + table.push(format!( + "\t{}\t{}\t{}", + aliases.join(","), + local_aliases_n, + hex::encode(bucket.id), + )); + } + format_table(table); +} + +pub fn print_key_list(kl: Vec<(String, String)>) { + println!("List of keys:"); + let mut table = vec![]; + for key in kl { + table.push(format!("\t{}\t{}", key.0, key.1)); + } + format_table(table); +} + +pub fn print_key_info(key: &Key, relevant_buckets: &HashMap) { + let bucket_global_aliases = |b: &Uuid| { + if let Some(bucket) = relevant_buckets.get(b) { + if let Some(p) = bucket.state.as_option() { + return p + .aliases + .items() + .iter() + .filter(|(_, _, active)| *active) + .map(|(a, _, _)| a.clone()) + .collect::>() + .join(", "); + } + } + + "".to_string() + }; + + match &key.state { + Deletable::Present(p) => { + println!("Key name: {}", p.name.get()); + println!("Key ID: {}", key.key_id); + println!("Secret key: {}", p.secret_key); + println!("Can create buckets: {}", p.allow_create_bucket.get()); + println!("\nKey-specific bucket aliases:"); + let mut table = vec![]; + for (alias_name, _, alias) in p.local_aliases.items().iter() { + if let Some(bucket_id) = alias { + table.push(format!( + "\t{}\t{}\t{}", + alias_name, + bucket_global_aliases(bucket_id), + hex::encode(bucket_id) + )); + } + } + format_table(table); + + println!("\nAuthorized buckets:"); + let mut table = vec![]; + for (bucket_id, perm) in p.authorized_buckets.items().iter() { + if !perm.is_any() { + continue; + } + let rflag = if perm.allow_read { "R" } else { " " }; + let wflag = if perm.allow_write { "W" } else { " " }; + let oflag = if perm.allow_owner { "O" } else { " " }; + let local_aliases = p + .local_aliases + .items() + .iter() + .filter(|(_, _, a)| *a == Some(*bucket_id)) + .map(|(a, _, _)| a.clone()) + .collect::>() + .join(", "); + table.push(format!( + "\t{}{}{}\t{}\t{}\t{:?}", + rflag, + wflag, + oflag, + bucket_global_aliases(bucket_id), + local_aliases, + bucket_id + )); + } + format_table(table); + } + Deletable::Deleted => { + println!("Key {} is deleted.", key.key_id); + } + } +} + +pub fn print_bucket_info( + bucket: &Bucket, + relevant_keys: &HashMap, + counters: &HashMap, + mpu_counters: &HashMap, +) { + let key_name = |k| { + relevant_keys + .get(k) + .map(|k| k.params().unwrap().name.get().as_str()) + .unwrap_or("") + }; + + println!("Bucket: {}", hex::encode(bucket.id)); + match &bucket.state { + Deletable::Deleted => println!("Bucket is deleted."), + Deletable::Present(p) => { + let size = + bytesize::ByteSize::b(*counters.get(object_table::BYTES).unwrap_or(&0) as u64); + println!( + "\nSize: {} ({})", + size.to_string_as(true), + size.to_string_as(false) + ); + println!( + "Objects: {}", + *counters.get(object_table::OBJECTS).unwrap_or(&0) + ); + println!( + "Unfinished uploads (multipart and non-multipart): {}", + *counters.get(object_table::UNFINISHED_UPLOADS).unwrap_or(&0) + ); + println!( + "Unfinished multipart uploads: {}", + *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0) + ); + let mpu_size = + bytesize::ByteSize::b(*mpu_counters.get(mpu_table::BYTES).unwrap_or(&0) as u64); + println!( + "Size of unfinished multipart uploads: {} ({})", + mpu_size.to_string_as(true), + mpu_size.to_string_as(false), + ); + + println!("\nWebsite access: {}", p.website_config.get().is_some()); + + let quotas = p.quotas.get(); + if quotas.max_size.is_some() || quotas.max_objects.is_some() { + println!("\nQuotas:"); + if let Some(ms) = quotas.max_size { + let ms = bytesize::ByteSize::b(ms); + println!( + " maximum size: {} ({})", + ms.to_string_as(true), + ms.to_string_as(false) + ); + } + if let Some(mo) = quotas.max_objects { + println!(" maximum number of objects: {}", mo); + } + } + + println!("\nGlobal aliases:"); + for (alias, _, active) in p.aliases.items().iter() { + if *active { + println!(" {}", alias); + } + } + + println!("\nKey-specific aliases:"); + let mut table = vec![]; + for ((key_id, alias), _, active) in p.local_aliases.items().iter() { + if *active { + table.push(format!("\t{} ({})\t{}", key_id, key_name(key_id), alias)); + } + } + format_table(table); + + println!("\nAuthorized keys:"); + let mut table = vec![]; + for (k, perm) in p.authorized_keys.items().iter() { + if !perm.is_any() { + continue; + } + let rflag = if perm.allow_read { "R" } else { " " }; + let wflag = if perm.allow_write { "W" } else { " " }; + let oflag = if perm.allow_owner { "O" } else { " " }; + table.push(format!( + "\t{}{}{}\t{}\t{}", + rflag, + wflag, + oflag, + k, + key_name(k) + )); + } + format_table(table); + } + }; +} + +pub fn find_matching_node( + cand: impl std::iter::Iterator, + pattern: &str, +) -> Result { + let mut candidates = vec![]; + for c in cand { + if hex::encode(c).starts_with(pattern) && !candidates.contains(&c) { + candidates.push(c); + } + } + if candidates.len() != 1 { + Err(Error::Message(format!( + "{} nodes match '{}'", + candidates.len(), + pattern, + ))) + } else { + Ok(candidates[0]) + } +} + +pub fn print_worker_list(wi: HashMap, wlo: WorkerListOpt) { + let mut wi = wi.into_iter().collect::>(); + wi.sort_by_key(|(tid, info)| { + ( + match info.state { + WorkerState::Busy | WorkerState::Throttled(_) => 0, + WorkerState::Idle => 1, + WorkerState::Done => 2, + }, + *tid, + ) + }); + + let mut table = vec!["TID\tState\tName\tTranq\tDone\tQueue\tErrors\tConsec\tLast".to_string()]; + for (tid, info) in wi.iter() { + if wlo.busy && !matches!(info.state, WorkerState::Busy | WorkerState::Throttled(_)) { + continue; + } + if wlo.errors && info.errors == 0 { + continue; + } + + let tf = timeago::Formatter::new(); + let err_ago = info + .last_error + .as_ref() + .map(|(_, t)| tf.convert(Duration::from_millis(now_msec() - t))) + .unwrap_or_default(); + let (total_err, consec_err) = if info.errors > 0 { + (info.errors.to_string(), info.consecutive_errors.to_string()) + } else { + ("-".into(), "-".into()) + }; + + table.push(format!( + "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}", + tid, + info.state, + info.name, + info.status + .tranquility + .as_ref() + .map(ToString::to_string) + .unwrap_or_else(|| "-".into()), + info.status.progress.as_deref().unwrap_or("-"), + info.status + .queue_length + .as_ref() + .map(ToString::to_string) + .unwrap_or_else(|| "-".into()), + total_err, + consec_err, + err_ago, + )); + } + format_table(table); +} + +pub fn print_worker_info(tid: usize, info: WorkerInfo) { + let mut table = vec![]; + table.push(format!("Task id:\t{}", tid)); + table.push(format!("Worker name:\t{}", info.name)); + match info.state { + WorkerState::Throttled(t) => { + table.push(format!( + "Worker state:\tBusy (throttled, paused for {:.3}s)", + t + )); + } + s => { + table.push(format!("Worker state:\t{}", s)); + } + }; + if let Some(tql) = info.status.tranquility { + table.push(format!("Tranquility:\t{}", tql)); + } + + table.push("".into()); + table.push(format!("Total errors:\t{}", info.errors)); + table.push(format!("Consecutive errs:\t{}", info.consecutive_errors)); + if let Some((s, t)) = info.last_error { + table.push(format!("Last error:\t{}", s)); + let tf = timeago::Formatter::new(); + table.push(format!( + "Last error time:\t{}", + tf.convert(Duration::from_millis(now_msec() - t)) + )); + } + + table.push("".into()); + if let Some(p) = info.status.progress { + table.push(format!("Progress:\t{}", p)); + } + if let Some(ql) = info.status.queue_length { + table.push(format!("Queue length:\t{}", ql)); + } + if let Some(pe) = info.status.persistent_errors { + table.push(format!("Persistent errors:\t{}", pe)); + } + + for (i, s) in info.status.freeform.iter().enumerate() { + if i == 0 { + if table.last() != Some(&"".into()) { + table.push("".into()); + } + table.push(format!("Message:\t{}", s)); + } else { + table.push(format!("\t{}", s)); + } + } + format_table(table); +} + +pub fn print_worker_vars(wv: Vec<(Uuid, String, String)>) { + let table = wv + .into_iter() + .map(|(n, k, v)| format!("{:?}\t{}\t{}", n, k, v)) + .collect::>(); + format_table(table); +} + +pub fn print_block_error_list(el: Vec) { + let now = now_msec(); + let tf = timeago::Formatter::new(); + let mut tf2 = timeago::Formatter::new(); + tf2.ago(""); + + let mut table = vec!["Hash\tRC\tErrors\tLast error\tNext try".into()]; + for e in el { + let next_try = if e.next_try > now { + tf2.convert(Duration::from_millis(e.next_try - now)) + } else { + "asap".to_string() + }; + table.push(format!( + "{}\t{}\t{}\t{}\tin {}", + hex::encode(e.hash.as_slice()), + e.refcount, + e.error_count, + tf.convert(Duration::from_millis(now - e.last_try)), + next_try + )); + } + format_table(table); +} + +pub fn print_block_info( + hash: Hash, + refcount: u64, + versions: Vec>, + uploads: Vec, +) { + println!("Block hash: {}", hex::encode(hash.as_slice())); + println!("Refcount: {}", refcount); + println!(); + + let mut table = vec!["Version\tBucket\tKey\tMPU\tDeleted".into()]; + let mut nondeleted_count = 0; + for v in versions.iter() { + match v { + Ok(ver) => { + match &ver.backlink { + VersionBacklink::Object { bucket_id, key } => { + table.push(format!( + "{:?}\t{:?}\t{}\t\t{:?}", + ver.uuid, + bucket_id, + key, + ver.deleted.get() + )); + } + VersionBacklink::MultipartUpload { upload_id } => { + let upload = uploads.iter().find(|x| x.upload_id == *upload_id); + table.push(format!( + "{:?}\t{:?}\t{}\t{:?}\t{:?}", + ver.uuid, + upload.map(|u| u.bucket_id).unwrap_or_default(), + upload.map(|u| u.key.as_str()).unwrap_or_default(), + upload_id, + ver.deleted.get() + )); + } + } + if !ver.deleted.get() { + nondeleted_count += 1; + } + } + Err(vh) => { + table.push(format!("{:?}\t\t\t\tyes", vh)); + } + } + } + format_table(table); + + if refcount != nondeleted_count { + println!(); + println!( + "Warning: refcount does not match number of non-deleted versions, you should try `garage repair block-rc`." + ); + } +} diff --git a/src/garage/main.rs b/src/garage/main.rs index 62d10c82..2703bedd 100644 --- a/src/garage/main.rs +++ b/src/garage/main.rs @@ -4,9 +4,12 @@ #[macro_use] extern crate tracing; +mod admin; mod cli; +mod repair; mod secrets; mod server; +#[cfg(feature = "telemetry-otlp")] mod tracing_setup; #[cfg(not(any(feature = "bundled-libs", feature = "system-libs")))] @@ -22,7 +25,6 @@ use std::net::SocketAddr; use std::path::PathBuf; use structopt::StructOpt; -use utoipa::OpenApi; use garage_net::util::parse_and_resolve_peer_addr; use garage_net::NetworkKey; @@ -32,9 +34,10 @@ use garage_util::error::*; use garage_rpc::system::*; use garage_rpc::*; -use garage_api_admin::api_server::{AdminRpc as ProxyRpc, ADMIN_RPC_PATH as PROXY_RPC_PATH}; +use garage_model::helper::error::Error as HelperError; -use cli::structs::*; +use admin::*; +use cli::*; use secrets::Secrets; #[derive(StructOpt, Debug)] @@ -43,7 +46,7 @@ use secrets::Secrets; about = "S3-compatible object store for self-hosted geo-distributed deployments" )] struct Opt { - /// Host to connect to for admin operations, in the format: `@:` + /// Host to connect to for admin operations, in the format: @: #[structopt(short = "h", long = "rpc-host", env = "GARAGE_RPC_HOST")] pub rpc_host: Option, @@ -63,33 +66,28 @@ struct Opt { cmd: Command, } -fn main() { +#[tokio::main] +async fn main() { // Initialize version and features info let features = &[ - #[cfg(feature = "bundled-libs")] - "bundled-libs", - #[cfg(feature = "consul-discovery")] - "consul-discovery", - #[cfg(feature = "fjall")] - "fjall", - #[cfg(feature = "journald")] - "journald", #[cfg(feature = "k2v")] "k2v", - #[cfg(feature = "kubernetes-discovery")] - "kubernetes-discovery", #[cfg(feature = "lmdb")] "lmdb", - #[cfg(feature = "metrics")] - "metrics", #[cfg(feature = "sqlite")] "sqlite", - #[cfg(feature = "syslog")] - "syslog", - #[cfg(feature = "system-libs")] - "system-libs", + #[cfg(feature = "consul-discovery")] + "consul-discovery", + #[cfg(feature = "kubernetes-discovery")] + "kubernetes-discovery", + #[cfg(feature = "metrics")] + "metrics", #[cfg(feature = "telemetry-otlp")] "telemetry-otlp", + #[cfg(feature = "bundled-libs")] + "bundled-libs", + #[cfg(feature = "system-libs")] + "system-libs", ][..]; if let Some(git_version) = option_env!("GIT_VERSION") { garage_util::version::init_version(git_version); @@ -144,11 +142,19 @@ fn main() { sodiumoxide::init().expect("Unable to init sodiumoxide"); - let res = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build() - .expect("build tokio multi_thread runtime failed") - .block_on(run(opt)); + let res = match opt.cmd { + Command::Server => server::run_server(opt.config_file, opt.secrets).await, + Command::OfflineRepair(repair_opt) => { + repair::offline::offline_repair(opt.config_file, opt.secrets, repair_opt).await + } + Command::ConvertDb(conv_opt) => { + cli::convert_db::do_conversion(conv_opt).map_err(From::from) + } + Command::Node(NodeOperation::NodeId(node_id_opt)) => { + node_id_command(opt.config_file, node_id_opt.quiet) + } + _ => cli_command(opt).await, + }; if let Err(e) = res { eprintln!("Error: {}", e); @@ -156,47 +162,13 @@ fn main() { } } -async fn run(opt: Opt) -> Result<(), Error> { - match opt.cmd { - Command::Server(sopt) => server::run_server(opt.config_file, opt.secrets, sopt).await, - Command::OfflineRepair(repair_opt) => { - cli::local::repair::offline_repair(opt.config_file, opt.secrets, repair_opt).await - } - Command::ConvertDb(conv_opt) => { - cli::local::convert_db::do_conversion(conv_opt).map_err(From::from) - } - Command::Node(NodeOperation::NodeId(node_id_opt)) => { - cli::local::init::node_id_command(opt.config_file, node_id_opt.quiet) - } - Command::AdminApiSchema => { - println!( - "{}", - garage_api_admin::openapi::ApiDoc::openapi() - .to_pretty_json() - .unwrap() - ); - Ok(()) - } - Command::Completions { shell } => { - cli::local::completions::generate_completions(shell); - Ok(()) - } - _ => cli_command(opt).await, - } -} - -/// # Safety -/// -/// should be called before tokio runtime initialization -/// to limit multithread problem with `std::env::set_var` which is unsafe fn init_logging(opt: &Opt) { if std::env::var("RUST_LOG").is_err() { let default_log = match &opt.cmd { - Command::Server(_) => "netapp=info,garage=info", + Command::Server => "netapp=info,garage=info", _ => "netapp=warn,garage=warn", }; - - unsafe { std::env::set_var("RUST_LOG", default_log) }; + std::env::set_var("RUST_LOG", default_log) } let env_filter = tracing_subscriber::filter::EnvFilter::from_default_env(); @@ -317,7 +289,7 @@ async fn cli_command(opt: Opt) -> Result<(), Error> { (id, addrs[0], false) } else { let node_id = garage_rpc::system::read_node_id(&config.as_ref().unwrap().metadata_dir) - .err_context(cli::local::init::READ_KEY_ERROR)?; + .err_context(READ_KEY_ERROR)?; if let Some(a) = config.as_ref().and_then(|c| c.rpc_public_addr.as_ref()) { use std::net::ToSocketAddrs; let a = a @@ -346,12 +318,13 @@ async fn cli_command(opt: Opt) -> Result<(), Error> { Err(e).err_context("Unable to connect to destination RPC host. Check that you are using the same value of rpc_secret as them, and that you have their correct full-length node ID (public key).")?; } - let proxy_rpc_endpoint = netapp.endpoint::(PROXY_RPC_PATH.into()); + let system_rpc_endpoint = netapp.endpoint::(SYSTEM_RPC_PATH.into()); + let admin_rpc_endpoint = netapp.endpoint::(ADMIN_RPC_PATH.into()); - let cli = cli::remote::Cli { - proxy_rpc_endpoint, - rpc_host: id, - }; - - cli.handle(opt.cmd).await + match cli_command_dispatch(opt.cmd, &system_rpc_endpoint, &admin_rpc_endpoint, id).await { + Err(HelperError::Internal(i)) => Err(Error::Message(format!("Internal error: {}", i))), + Err(HelperError::BadRequest(b)) => Err(Error::Message(b)), + Err(e) => Err(Error::Message(format!("{}", e))), + Ok(x) => Ok(x), + } } diff --git a/src/garage/repair/mod.rs b/src/garage/repair/mod.rs new file mode 100644 index 00000000..4699ace5 --- /dev/null +++ b/src/garage/repair/mod.rs @@ -0,0 +1,2 @@ +pub mod offline; +pub mod online; diff --git a/src/garage/cli/local/repair.rs b/src/garage/repair/offline.rs similarity index 100% rename from src/garage/cli/local/repair.rs rename to src/garage/repair/offline.rs diff --git a/src/api/admin/repair.rs b/src/garage/repair/online.rs similarity index 65% rename from src/api/admin/repair.rs rename to src/garage/repair/online.rs index 1d5665d1..6a7dafcf 100644 --- a/src/api/admin/repair.rs +++ b/src/garage/repair/online.rs @@ -5,14 +5,6 @@ use std::time::Duration; use async_trait::async_trait; use tokio::sync::watch; -use garage_util::background::*; -use garage_util::data::*; -use garage_util::error::{Error as GarageError, OkOrMessage}; -use garage_util::migrate::Migrate; - -use garage_table::replication::*; -use garage_table::*; - use garage_block::manager::BlockManager; use garage_block::repair::ScrubWorkerCommand; @@ -22,89 +14,91 @@ use garage_model::s3::mpu_table::*; use garage_model::s3::object_table::*; use garage_model::s3::version_table::*; -use crate::api::*; -use crate::error::Error; -use crate::{Admin, RequestHandler}; +use garage_table::replication::*; +use garage_table::*; + +use garage_util::background::*; +use garage_util::data::*; +use garage_util::error::Error; +use garage_util::migrate::Migrate; + +use crate::*; const RC_REPAIR_ITER_COUNT: usize = 64; -impl RequestHandler for LocalLaunchRepairOperationRequest { - type Response = LocalLaunchRepairOperationResponse; - - async fn handle( - self, - garage: &Arc, - admin: &Admin, - ) -> Result { - let bg = &admin.background; - match self.repair_type { - RepairType::Tables => { - info!("Launching a full sync of tables"); - garage.bucket_table.syncer.add_full_sync()?; - garage.object_table.syncer.add_full_sync()?; - garage.version_table.syncer.add_full_sync()?; - garage.block_ref_table.syncer.add_full_sync()?; - garage.key_table.syncer.add_full_sync()?; - } - RepairType::Versions => { - info!("Repairing the versions table"); - bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairVersions)); - } - RepairType::MultipartUploads => { - info!("Repairing the multipart uploads table"); - bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairMpu)); - } - RepairType::BlockRefs => { - info!("Repairing the block refs table"); - bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairBlockRefs)); - } - RepairType::BlockRc => { - info!("Repairing the block reference counters"); - bg.spawn_worker(BlockRcRepair::new( - garage.block_manager.clone(), - garage.block_ref_table.clone(), - )); - } - RepairType::Blocks => { - info!("Repairing the stored blocks"); - bg.spawn_worker(garage_block::repair::RepairWorker::new( - garage.block_manager.clone(), - )); - } - RepairType::Scrub(cmd) => { - let cmd = match cmd { - ScrubCommand::Start => ScrubWorkerCommand::Start, - ScrubCommand::Pause => { - ScrubWorkerCommand::Pause(Duration::from_secs(3600 * 24)) - } - ScrubCommand::Resume => ScrubWorkerCommand::Resume, - ScrubCommand::Cancel => ScrubWorkerCommand::Cancel, - }; - info!("Sending command to scrub worker: {:?}", cmd); - garage.block_manager.send_scrub_command(cmd).await?; - } - RepairType::Rebalance => { - info!("Rebalancing the stored blocks among storage locations"); - bg.spawn_worker(garage_block::repair::RebalanceWorker::new( - garage.block_manager.clone(), - )); - } - RepairType::Aliases => { - info!("Repairing bucket aliases (foreground)"); - garage.locked_helper().await.repair_aliases().await?; - } - RepairType::ClearResyncQueue => { - info!("Clearing resync queue (foreground)"); - let garage = garage.clone(); - tokio::task::spawn_blocking(move || { - garage.block_manager.resync.clear_resync_queue() - }) - .await - .map_err(garage_util::error::Error::from)??; - } +pub async fn launch_online_repair( + garage: &Arc, + bg: &BackgroundRunner, + opt: RepairOpt, +) -> Result<(), Error> { + match opt.what { + RepairWhat::Tables => { + info!("Launching a full sync of tables"); + garage.bucket_table.syncer.add_full_sync()?; + garage.object_table.syncer.add_full_sync()?; + garage.version_table.syncer.add_full_sync()?; + garage.block_ref_table.syncer.add_full_sync()?; + garage.key_table.syncer.add_full_sync()?; + } + RepairWhat::Versions => { + info!("Repairing the versions table"); + bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairVersions)); + } + RepairWhat::MultipartUploads => { + info!("Repairing the multipart uploads table"); + bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairMpu)); + } + RepairWhat::BlockRefs => { + info!("Repairing the block refs table"); + bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairBlockRefs)); + } + RepairWhat::BlockRc => { + info!("Repairing the block reference counters"); + bg.spawn_worker(BlockRcRepair::new( + garage.block_manager.clone(), + garage.block_ref_table.clone(), + )); + } + RepairWhat::Blocks => { + info!("Repairing the stored blocks"); + bg.spawn_worker(garage_block::repair::RepairWorker::new( + garage.block_manager.clone(), + )); + } + RepairWhat::Scrub { cmd } => { + let cmd = match cmd { + ScrubCmd::Start => ScrubWorkerCommand::Start, + ScrubCmd::Pause => ScrubWorkerCommand::Pause(Duration::from_secs(3600 * 24)), + ScrubCmd::Resume => ScrubWorkerCommand::Resume, + ScrubCmd::Cancel => ScrubWorkerCommand::Cancel, + ScrubCmd::SetTranquility { tranquility } => { + garage + .block_manager + .scrub_persister + .set_with(|x| x.tranquility = tranquility)?; + return Ok(()); + } + }; + info!("Sending command to scrub worker: {:?}", cmd); + garage.block_manager.send_scrub_command(cmd).await?; + } + RepairWhat::Rebalance => { + info!("Rebalancing the stored blocks among storage locations"); + bg.spawn_worker(garage_block::repair::RebalanceWorker::new( + garage.block_manager.clone(), + )); + } + RepairWhat::Aliases => { + info!("Repairing bucket aliases (foreground)"); + garage.locked_helper().await.repair_aliases().await?; + } + RepairWhat::ClearResyncQueue => { + let garage = garage.clone(); + tokio::task::spawn_blocking(move || garage.block_manager.resync.clear_resync_queue()) + .await?? } - Ok(LocalLaunchRepairOperationResponse) } + Ok(()) } // ---- @@ -118,7 +112,7 @@ trait TableRepair: Send + Sync + 'static { &mut self, garage: &Garage, entry: <::T as TableSchema>::E, - ) -> impl Future> + Send; + ) -> impl Future> + Send; } struct TableRepairWorker { @@ -154,10 +148,7 @@ impl Worker for TableRepairWorker { } } - async fn work( - &mut self, - _must_exit: &mut watch::Receiver, - ) -> Result { + async fn work(&mut self, _must_exit: &mut watch::Receiver) -> Result { let (item_bytes, next_pos) = match R::table(&self.garage).data.store.get_gt(&self.pos)? { Some((k, v)) => (v, k), None => { @@ -199,7 +190,7 @@ impl TableRepair for RepairVersions { &garage.version_table } - async fn process(&mut self, garage: &Garage, version: Version) -> Result { + async fn process(&mut self, garage: &Garage, version: Version) -> Result { if !version.deleted.get() { let ref_exists = match &version.backlink { VersionBacklink::Object { bucket_id, key } => garage @@ -245,11 +236,7 @@ impl TableRepair for RepairBlockRefs { &garage.block_ref_table } - async fn process( - &mut self, - garage: &Garage, - mut block_ref: BlockRef, - ) -> Result { + async fn process(&mut self, garage: &Garage, mut block_ref: BlockRef) -> Result { if !block_ref.deleted.get() { let ref_exists = garage .version_table @@ -284,11 +271,7 @@ impl TableRepair for RepairMpu { &garage.mpu_table } - async fn process( - &mut self, - garage: &Garage, - mut mpu: MultipartUpload, - ) -> Result { + async fn process(&mut self, garage: &Garage, mut mpu: MultipartUpload) -> Result { if !mpu.deleted.get() { let ref_exists = garage .object_table @@ -345,7 +328,7 @@ impl BlockRcRepair { #[async_trait] impl Worker for BlockRcRepair { fn name(&self) -> String { - "Block refcount repair worker".into() + format!("Block refcount repair worker") } fn status(&self) -> WorkerStatus { @@ -355,10 +338,7 @@ impl Worker for BlockRcRepair { } } - async fn work( - &mut self, - _must_exit: &mut watch::Receiver, - ) -> Result { + async fn work(&mut self, _must_exit: &mut watch::Receiver) -> Result { for _i in 0..RC_REPAIR_ITER_COUNT { let next1 = self .block_manager diff --git a/src/garage/secrets.rs b/src/garage/secrets.rs index e89a3b45..17781efe 100644 --- a/src/garage/secrets.rs +++ b/src/garage/secrets.rs @@ -17,32 +17,32 @@ pub struct Secrets { )] pub allow_world_readable_secrets: Option, - /// RPC secret network key, used to replace `rpc_secret` in config.toml when running the + /// RPC secret network key, used to replace rpc_secret in config.toml when running the /// daemon or doing admin operations #[structopt(short = "s", long = "rpc-secret", env = "GARAGE_RPC_SECRET")] pub rpc_secret: Option, - /// RPC secret network key, used to replace `rpc_secret` in config.toml and rpc-secret + /// RPC secret network key, used to replace rpc_secret in config.toml and rpc-secret /// when running the daemon or doing admin operations #[structopt(long = "rpc-secret-file", env = "GARAGE_RPC_SECRET_FILE")] pub rpc_secret_file: Option, - /// Admin API authentication token, replaces `admin.admin_token` in config.toml when + /// Admin API authentication token, replaces admin.admin_token in config.toml when /// running the Garage daemon #[structopt(long = "admin-token", env = "GARAGE_ADMIN_TOKEN")] pub admin_token: Option, - /// Admin API authentication token file path, replaces `admin.admin_token` in config.toml + /// Admin API authentication token file path, replaces admin.admin_token in config.toml /// and admin-token when running the Garage daemon #[structopt(long = "admin-token-file", env = "GARAGE_ADMIN_TOKEN_FILE")] pub admin_token_file: Option, - /// Metrics API authentication token, replaces `admin.metrics_token` in config.toml when + /// Metrics API authentication token, replaces admin.metrics_token in config.toml when /// running the Garage daemon #[structopt(long = "metrics-token", env = "GARAGE_METRICS_TOKEN")] pub metrics_token: Option, - /// Metrics API authentication token file path, replaces `admin.metrics_token` in config.toml + /// Metrics API authentication token file path, replaces admin.metrics_token in config.toml /// and metrics-token when running the Garage daemon #[structopt(long = "metrics-token-file", env = "GARAGE_METRICS_TOKEN_FILE")] pub metrics_token_file: Option, diff --git a/src/garage/server.rs b/src/garage/server.rs index d7aae075..b81ae334 100644 --- a/src/garage/server.rs +++ b/src/garage/server.rs @@ -1,5 +1,4 @@ use std::path::PathBuf; -use std::sync::Arc; use tokio::sync::watch; @@ -15,9 +14,10 @@ use garage_web::WebServer; #[cfg(feature = "k2v")] use garage_api_k2v::api_server::K2VApiServer; +use crate::admin::*; use crate::secrets::{fill_secrets, Secrets}; -use crate::tracing_setup::init_tracing; -use crate::ServerOpt; +#[cfg(feature = "telemetry-otlp")] +use crate::tracing_setup::*; async fn wait_from(mut chan: watch::Receiver) { while !*chan.borrow() { @@ -27,12 +27,8 @@ async fn wait_from(mut chan: watch::Receiver) { } } -pub async fn run_server( - config_file: PathBuf, - secrets: Secrets, - opt: ServerOpt, -) -> Result<(), Error> { - info!("Loading configuration from {}...", config_file.display()); +pub async fn run_server(config_file: PathBuf, secrets: Secrets) -> Result<(), Error> { + info!("Loading configuration..."); let config = fill_secrets(read_config(config_file)?, secrets)?; // ---- Initialize Garage internals ---- @@ -50,9 +46,6 @@ pub async fn run_server( info!("Initializing Garage main data store..."); let garage = Garage::new(config.clone())?; - // Handle --single-node, --default-bucket and --default-access-key - initial_config(&garage, opt).await?; - info!("Initializing background runner..."); let watch_cancel = watch_shutdown_signal(); let (background, await_background_done) = BackgroundRunner::new(watch_cancel.clone()); @@ -60,15 +53,19 @@ pub async fn run_server( info!("Spawning Garage workers..."); garage.spawn_workers(&background)?; - if let Some(admin_trace_sink) = &config.admin.trace_sink { + if config.admin.trace_sink.is_some() { info!("Initialize tracing..."); - init_tracing(admin_trace_sink, garage.system.id)?; + + #[cfg(feature = "telemetry-otlp")] + init_tracing(config.admin.trace_sink.as_ref().unwrap(), garage.system.id)?; + + #[cfg(not(feature = "telemetry-otlp"))] + error!("Garage was built without OTLP exporter, admin.trace_sink is ignored."); } info!("Initialize Admin API server and metrics collector..."); let admin_server = AdminApiServer::new( garage.clone(), - background.clone(), #[cfg(feature = "metrics")] metrics_exporter, ); @@ -76,6 +73,9 @@ pub async fn run_server( info!("Launching internal Garage cluster communications..."); let run_system = tokio::spawn(garage.system.clone().run(watch_cancel.clone())); + info!("Create admin RPC handler..."); + AdminRpcHandler::new(garage.clone(), background.clone()); + // ---- Launch public-facing API servers ---- let mut servers = vec![]; @@ -93,7 +93,7 @@ pub async fn run_server( )); } - if let Some(k2v_api) = &config.k2v_api { + if config.k2v_api.is_some() { #[cfg(feature = "k2v")] { info!("Initializing K2V API server..."); @@ -101,7 +101,7 @@ pub async fn run_server( "K2V API", tokio::spawn(K2VApiServer::run( garage.clone(), - k2v_api.api_bind_addr.clone(), + config.k2v_api.as_ref().unwrap().api_bind_addr.clone(), config.s3_api.s3_region.clone(), watch_cancel.clone(), )), @@ -113,7 +113,7 @@ pub async fn run_server( if let Some(web_config) = &config.s3_web { info!("Initializing web server..."); - let web_server = WebServer::new(garage.clone(), web_config); + let web_server = WebServer::new(garage.clone(), &web_config); servers.push(( "Web", tokio::spawn(web_server.run(web_config.bind_addr.clone(), watch_cancel.clone())), @@ -173,176 +173,6 @@ pub async fn run_server( Ok(()) } -async fn initial_config(garage: &Arc, opt: ServerOpt) -> Result<(), Error> { - use garage_model::bucket_alias_table::is_valid_bucket_name; - use garage_model::bucket_table::Bucket; - use garage_model::key_table::*; - use garage_model::permission::BucketKeyPerm; - use garage_rpc::layout::*; - use garage_rpc::replication_mode::ReplicationFactor; - use garage_table::*; - use garage_util::time::now_msec; - - if opt.single_node { - if garage.replication_factor != ReplicationFactor::new(1).unwrap() { - return Err(Error::Message( - "Single-node mode requires replication_factor = 1 in the configuration file." - .into(), - )); - } - - let layout_version = garage.system.cluster_layout().inner().current().version; - - if layout_version > 1 { - return Err(Error::Message("Refusing to run in single-node mode: layout version is already superior to 1. Remove the --single-node flag to run the server in full mode.".into())); - } - - if layout_version == 0 { - // Setup initial layout - let mut layout = garage.system.cluster_layout().inner().clone(); - let our_id = garage.system.id; - - // Check no other nodes are present in the system - let nodes = garage.system.get_known_nodes(); - if nodes.iter().any(|x| x.id != our_id) { - return Err(Error::Message("Refusing to run in single-node mode: more nodes are already present in the cluster.".into())); - } - - // Automatically determine this node's capacity - let capacity = garage - .system - .local_status() - .data_disk_avail - .map(|(_avail, total)| total) - .unwrap_or(1024 * 1024 * 1024); // Default to 1GB - - assert!(layout.current().roles.is_empty()); - - layout.staging.get_mut().roles.clear(); - layout.staging.get_mut().roles.update_in_place( - our_id, - NodeRoleV(Some(NodeRole { - zone: "dc1".to_string(), - capacity: Some(capacity), - tags: vec!["default".to_string()], - })), - ); - - let (layout, msg) = layout.apply_staged_changes(1)?; - info!( - "Created initial layout for single-node configuration:\n{}", - msg.join("\n") - ); - - garage - .system - .layout_manager - .update_cluster_layout(&layout) - .await?; - } - } - - if (opt.default_bucket || opt.default_access_key) && !opt.single_node { - return Err(Error::Message( - "Flags --default-access-key and --default-bucket can only be used in single-node mode." - .into(), - )); - } - - if opt.default_access_key || opt.default_bucket { - let rdenv = |name: &str| { - std::env::var(name) - .map_err(|_| Error::Message(format!("Environment variable `{}` is not set", name))) - }; - - // Create default access key if it does not exist - let key_id = rdenv("GARAGE_DEFAULT_ACCESS_KEY")?; - let secret_key = rdenv("GARAGE_DEFAULT_SECRET_KEY")?; - - let existing_key = garage.key_table.get(&EmptyKey, &key_id).await?; - - let key = match existing_key { - Some(key) => { - match key.state.as_option() { - None => return Err(Error::Message(format!("Access key {} was deleted in the cluster, cannot add it back", key_id))), - Some(st) if st.secret_key != secret_key => return Err(Error::Message(format!("Access key {} is associated with a secret key different than the one given in GARAGE_DEFAULT_SECRET_KEY", key_id))), - _ => (), - } - - key - } - None => { - info!("Creating default access key `{}`", key_id); - - let mut key = Key::import(&key_id, &secret_key, "default access key") - .map_err(|e| Error::Message(format!("Invalid default access key: {}", e)))?; - key.state - .as_option_mut() - .unwrap() - .allow_create_bucket - .update(true); - garage.key_table.insert(&key).await?; - - key - } - }; - - if opt.default_bucket { - // Create default bucket if it does not exist - let bucket_name = rdenv("GARAGE_DEFAULT_BUCKET")?; - - if !is_valid_bucket_name(&bucket_name, garage.config.allow_punycode) { - return Err(Error::Message( - "Invalid default bucket name, see S3 specification for allowed bucket names." - .into(), - )); - } - - let helper = garage.locked_helper().await; - - let bucket = match helper.bucket().resolve_global_bucket_fast(&bucket_name)? { - Some(bucket) => bucket, - None => { - info!("Creating default bucket `{}`", bucket_name); - - let bucket = Bucket::new(); - garage.bucket_table.insert(&bucket).await?; - - helper - .set_global_bucket_alias(bucket.id, &bucket_name) - .await - .map_err(|e| { - Error::Message(format!("Cannot create default bucket: {}", e)) - })?; - - bucket - } - }; - - helper - .set_bucket_key_permissions( - bucket.id, - &key.key_id, - BucketKeyPerm { - timestamp: now_msec(), - allow_read: true, - allow_write: true, - allow_owner: true, - }, - ) - .await - .map_err(|e| { - Error::Message(format!( - "Cannot configure permissions on default bucket: {}", - e - )) - })?; - } - } - - Ok(()) -} - #[cfg(unix)] fn watch_shutdown_signal() -> watch::Receiver { use tokio::signal::unix::*; diff --git a/src/garage/tests/common/client.rs b/src/garage/tests/common/client.rs index 164888a4..7a6612cb 100644 --- a/src/garage/tests/common/client.rs +++ b/src/garage/tests/common/client.rs @@ -12,7 +12,7 @@ pub fn build_client(key: &Key) -> Client { .endpoint_url(format!("http://127.0.0.1:{}", DEFAULT_PORT)) .region(super::REGION) .credentials_provider(credentials) - .behavior_version(BehaviorVersion::latest()) + .behavior_version(BehaviorVersion::v2024_03_28()) .build(); Client::from_conf(config) diff --git a/src/garage/tests/common/custom_requester.rs b/src/garage/tests/common/custom_requester.rs index ee78ad2d..6a8eed38 100644 --- a/src/garage/tests/common/custom_requester.rs +++ b/src/garage/tests/common/custom_requester.rs @@ -244,7 +244,7 @@ impl<'a> RequestBuilder<'a> { ); all_headers.insert( HeaderName::from_static("x-amz-trailer"), - HeaderValue::from_str(trailer_algorithm).unwrap(), + HeaderValue::from_str(&trailer_algorithm).unwrap(), ); all_headers.insert( @@ -252,8 +252,8 @@ impl<'a> RequestBuilder<'a> { to_streaming_unsigned_trailer_body( &self.body, *chunk_size, - trailer_algorithm, - trailer_value, + &trailer_algorithm, + &trailer_value, ) .len() .to_string() @@ -330,8 +330,8 @@ impl<'a> RequestBuilder<'a> { } => to_streaming_unsigned_trailer_body( &self.body, *chunk_size, - trailer_algorithm, - trailer_value, + &trailer_algorithm, + &trailer_value, ), _ => self.body.clone(), }; diff --git a/src/garage/tests/common/garage.rs b/src/garage/tests/common/garage.rs index 45c62d40..2b0a381c 100644 --- a/src/garage/tests/common/garage.rs +++ b/src/garage/tests/common/garage.rs @@ -1,8 +1,7 @@ +use std::mem::MaybeUninit; use std::path::{Path, PathBuf}; use std::process; -use std::sync::{Mutex, OnceLock}; - -use serde_json::json; +use std::sync::Once; use super::ext::*; @@ -19,7 +18,7 @@ pub struct Key { } pub struct Instance { - process: Mutex, + process: process::Child, pub path: PathBuf, pub default_key: Key, pub s3_port: u16, @@ -110,7 +109,7 @@ api_bind_addr = "127.0.0.1:{admin_port}" .expect("Could not start garage"); Instance { - process: Mutex::new(child), + process: child, path, default_key: Key::default(), s3_port: port, @@ -161,11 +160,9 @@ api_bind_addr = "127.0.0.1:{admin_port}" .expect_success_status("Could not apply garage node layout"); } - fn terminate(&self) { + fn terminate(&mut self) { // TODO: Terminate "gracefully" the process with SIGTERM instead of directly SIGKILL it. self.process - .lock() - .expect("could not lock access to garage child process mutex") .kill() .expect("Could not terminate garage process"); } @@ -198,37 +195,57 @@ api_bind_addr = "127.0.0.1:{admin_port}" let mut key = Key::default(); let mut cmd = self.command(); - let base = cmd.args(["json-api", "CreateKey"]); + let base = cmd.args(["key", "create"]); let with_name = match maybe_name { - Some(name) => base.args([serde_json::to_string(&json!({"name": name})).unwrap()]), - None => base.args(["{}"]), + Some(name) => base.args([name]), + None => base, }; let output = with_name.expect_success_output("Could not create key"); - let stdout: serde_json::Value = serde_json::from_slice(&output.stdout).unwrap(); + let stdout = String::from_utf8(output.stdout).unwrap(); - key.id = stdout["accessKeyId"].as_str().unwrap().to_string(); - key.secret = stdout["secretAccessKey"].as_str().unwrap().to_string(); + for line in stdout.lines() { + if let Some(key_id) = line.strip_prefix("Key ID: ") { + key.id = key_id.to_owned(); + continue; + } + if let Some(key_secret) = line.strip_prefix("Secret key: ") { + key.secret = key_secret.to_owned(); + continue; + } + } + assert!(!key.id.is_empty(), "Invalid key: Key ID is empty"); + assert!(!key.secret.is_empty(), "Invalid key: Key secret is empty"); key } } -static INSTANCE: OnceLock = OnceLock::new(); +static mut INSTANCE: MaybeUninit = MaybeUninit::uninit(); +static INSTANCE_INIT: Once = Once::new(); #[static_init::destructor] extern "C" fn terminate_instance() { - if let Some(instance) = INSTANCE.get() { - instance.terminate(); + if INSTANCE_INIT.is_completed() { + // This block is sound as it depends on `INSTANCE_INIT` being completed, meaning `INSTANCE` + // is actually initialized. + unsafe { + INSTANCE.assume_init_mut().terminate(); + } } } pub fn instance() -> &'static Instance { - INSTANCE.get_or_init(|| { + INSTANCE_INIT.call_once(|| unsafe { let mut instance = Instance::new(); instance.setup(); - instance - }) + + INSTANCE.write(instance); + }); + + // This block is sound as it depends on `INSTANCE_INIT` being completed by calling `call_once` (blocking), + // meaning `INSTANCE` is actually initialized. + unsafe { INSTANCE.assume_init_ref() } } pub fn command(config_path: &Path) -> process::Command { diff --git a/src/garage/tests/common/mod.rs b/src/garage/tests/common/mod.rs index a7e08de1..1273bad1 100644 --- a/src/garage/tests/common/mod.rs +++ b/src/garage/tests/common/mod.rs @@ -75,7 +75,7 @@ impl Context { bucket_name } - /// Build a `K2vClient` for a given bucket + /// Build a K2vClient for a given bucket #[cfg(feature = "k2v")] pub fn k2v_client(&self, bucket: &str) -> K2vClient { let config = k2v_client::K2vClientConfig { diff --git a/src/garage/tests/s3/list.rs b/src/garage/tests/s3/list.rs index 8b423a55..1b0c006d 100644 --- a/src/garage/tests/s3/list.rs +++ b/src/garage/tests/s3/list.rs @@ -123,7 +123,7 @@ async fn test_listobjectsv2() { (Some(k), None) if k.len() == 1 => cnt_key += 1, (None, Some(pfx)) if pfx.len() == 1 => cnt_pfx += 1, _ => unreachable!("logic error"), - } + }; if next.is_none() { break; } @@ -331,7 +331,7 @@ async fn test_listobjectsv1() { (Some(k), None) if k.len() == 1 => cnt_key += 1, (None, Some(pfx)) if pfx.len() == 1 => cnt_pfx += 1, _ => unreachable!("logic error"), - } + }; if next.is_none() { break; } diff --git a/src/garage/tests/s3/mod.rs b/src/garage/tests/s3/mod.rs index fa081389..e75b1397 100644 --- a/src/garage/tests/s3/mod.rs +++ b/src/garage/tests/s3/mod.rs @@ -2,7 +2,6 @@ mod list; mod multipart; mod objects; mod presigned; -mod signature_encoding; mod simple; mod ssec; mod streaming_signature; diff --git a/src/garage/tests/s3/multipart.rs b/src/garage/tests/s3/multipart.rs index fa129dfd..cc424f59 100644 --- a/src/garage/tests/s3/multipart.rs +++ b/src/garage/tests/s3/multipart.rs @@ -314,7 +314,7 @@ async fn test_multipart_with_checksum() { .build(); let expected_checksum = calculate_sha1( - &[ + &vec![ BASE64_STANDARD.decode(&ck1).unwrap(), BASE64_STANDARD.decode(&ck2).unwrap(), BASE64_STANDARD.decode(&ck3).unwrap(), @@ -466,7 +466,7 @@ async fn test_uploadlistpart() { .await .unwrap(); - assert_eq!(r.part_number_marker.as_deref(), None); + assert!(r.part_number_marker.is_none()); assert_eq!(r.next_part_number_marker.as_deref(), Some("1")); assert_eq!(r.max_parts.unwrap(), 1_i32); assert!(r.is_truncated.unwrap()); diff --git a/src/garage/tests/s3/signature_encoding.rs b/src/garage/tests/s3/signature_encoding.rs deleted file mode 100644 index 6fbb4078..00000000 --- a/src/garage/tests/s3/signature_encoding.rs +++ /dev/null @@ -1,72 +0,0 @@ -use crate::common; - -use aws_sdk_s3::presigning::PresigningConfig; -use bytes::Bytes; -use http::{Request, StatusCode}; -use http_body_util::Full; -use std::time::Duration; - -#[tokio::test] -async fn test_signature_encoding() { - let ctx = common::context(); - let bucket = ctx.create_bucket("signature-encoding"); - - let obj_key = "key@good~.txt"; - let obj_content = "hello world of special characters"; - - let _put_obj_info = ctx - .client - .put_object() - .bucket(&bucket) - .key(obj_key) - .body(obj_content.as_bytes().to_vec().into()) - .send() - .await - .expect("failed to put object"); - - let _get_obj = ctx - .client - .get_object() - .bucket(&bucket) - .key(obj_key) - .send() - .await - .expect("failed to get object"); - - let presign_config = PresigningConfig::builder() - .expires_in(Duration::from_secs(10)) - .build() - .expect("failed to build presigning config"); - let presigned_request = ctx - .client - .get_object() - .bucket(&bucket) - .key(obj_key) - .presigned(presign_config) - .await - .expect("failed to construct presigned request"); - - let altered_url = presigned_request - .uri() - .replace("%40", "@") - .replace("~", "%7E"); - - let client = ctx.custom_request.client(); - let req_builder = Request::builder() - .method(presigned_request.method()) - .uri(altered_url); - let req = presigned_request - .headers() - .fold(req_builder, |req_builder, (key, value)| { - req_builder.header(key, value) - }) - .body(Full::new(Bytes::new())) - .expect("failed to construct request from presigned_request"); - - let res = client - .request(req) - .await - .expect("failed to execute presigned request"); - - assert_eq!(res.status(), StatusCode::OK); -} diff --git a/src/garage/tests/s3/ssec.rs b/src/garage/tests/s3/ssec.rs index 02c9b14a..d8f11950 100644 --- a/src/garage/tests/s3/ssec.rs +++ b/src/garage/tests/s3/ssec.rs @@ -19,7 +19,7 @@ async fn test_ssec_object() { .map(|x| ((x * 3792) % 256) as u8) .collect::>(); - for data in [bytes1, bytes2] { + for data in vec![bytes1, bytes2] { let stream = ByteStream::new(data.clone().into()); // Write encrypted object @@ -399,7 +399,6 @@ async fn test_multipart_upload() { } } -#[expect(clippy::too_many_arguments)] async fn test_read_encrypted( ctx: &Context, bucket: &str, diff --git a/src/garage/tests/s3/streaming_signature.rs b/src/garage/tests/s3/streaming_signature.rs index abd4f1ec..a86feefc 100644 --- a/src/garage/tests/s3/streaming_signature.rs +++ b/src/garage/tests/s3/streaming_signature.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use base64::prelude::*; -use crc_fast::{checksum as crc_checksum, CrcAlgorithm}; +use crc32fast::Hasher as Crc32; use crate::common; use crate::common::ext::CommandExt; @@ -69,8 +69,9 @@ async fn test_putobject_streaming() { { let etag = "\"46cf18a9b447991b450cad3facf5937e\""; - let crc32 = crc_checksum(CrcAlgorithm::Crc32IsoHdlc, &BODY[..]) as u32; - let crc32 = BASE64_STANDARD.encode(&u32::to_be_bytes(crc32)[..]); + let mut crc32 = Crc32::new(); + crc32.update(&BODY[..]); + let crc32 = BASE64_STANDARD.encode(&u32::to_be_bytes(crc32.finalize())[..]); let mut headers = HashMap::new(); headers.insert("x-amz-checksum-crc32".to_owned(), crc32.clone()); @@ -128,8 +129,7 @@ async fn test_putobject_streaming_unsigned_trailer() { let mut headers = HashMap::new(); headers.insert("content-type".to_owned(), content_type.to_owned()); - let empty_crc32 = crc_checksum(CrcAlgorithm::Crc32IsoHdlc, &[]) as u32; - let empty_crc32 = BASE64_STANDARD.encode(&u32::to_be_bytes(empty_crc32)[..]); + let empty_crc32 = BASE64_STANDARD.encode(&u32::to_be_bytes(Crc32::new().finalize())[..]); let res = ctx .custom_request @@ -180,8 +180,9 @@ async fn test_putobject_streaming_unsigned_trailer() { { let etag = "\"46cf18a9b447991b450cad3facf5937e\""; - let crc32 = crc_checksum(CrcAlgorithm::Crc32IsoHdlc, &BODY[..]) as u32; - let crc32 = BASE64_STANDARD.encode(&u32::to_be_bytes(crc32)[..]); + let mut crc32 = Crc32::new(); + crc32.update(&BODY[..]); + let crc32 = BASE64_STANDARD.encode(&u32::to_be_bytes(crc32.finalize())[..]); // try sending with wrong crc32, check that it fails let err_res = ctx diff --git a/src/garage/tests/s3/website.rs b/src/garage/tests/s3/website.rs index a2be948b..bbac3de5 100644 --- a/src/garage/tests/s3/website.rs +++ b/src/garage/tests/s3/website.rs @@ -4,12 +4,8 @@ use crate::json_body; use assert_json_diff::assert_json_eq; use aws_sdk_s3::{ - error::ProvideErrorMetadata, primitives::ByteStream, - types::{ - Condition, CorsConfiguration, CorsRule, ErrorDocument, IndexDocument, Protocol, Redirect, - RoutingRule, WebsiteConfiguration, - }, + types::{CorsConfiguration, CorsRule, ErrorDocument, IndexDocument, WebsiteConfiguration}, }; use http::{Request, StatusCode}; use http_body_util::BodyExt; @@ -382,17 +378,15 @@ async fn test_website_s3_api() { .unwrap(); // Check CORS are deleted from the API - assert_eq!( - ctx.client - .get_bucket_cors() - .bucket(&bucket) - .send() - .await - .unwrap_err() - .into_service_error() - .code(), - Some("NoSuchCORSConfiguration") - ); + // @FIXME check what is the expected behavior when GetBucketCors is called on a bucket without + // any CORS. + assert!(ctx + .client + .get_bucket_cors() + .bucket(&bucket) + .send() + .await + .is_err()); // Test CORS are not sent anymore on a previously allowed request { @@ -461,18 +455,12 @@ async fn test_website_check_domain() { res_body, json!({ "code": "InvalidRequest", - "message": "Bad request: Missing argument `domain` for endpoint", + "message": "Bad request: No domain query string found", "region": "garage-integ-test", "path": "/check", }) ); - // FIXME: Edge case with empty domain - // Currently, empty domain is interpreted as an absent parameter - // due to logic in router_macros.rs, so this test fails. - // Maybe we want empty parameters to be acceptable? But that might - // break a lot of S3 stuff. - /* let admin_req = || { Request::builder() .method("GET") @@ -496,7 +484,6 @@ async fn test_website_check_domain() { "path": "/check", }) ); - */ let admin_req = || { Request::builder() @@ -547,491 +534,6 @@ async fn test_website_check_domain() { ); } -#[tokio::test] -async fn test_website_redirect_full_bucket() { - const BCKT_NAME: &str = "my-redirect-full"; - let ctx = common::context(); - let bucket = ctx.create_bucket(BCKT_NAME); - - let conf = WebsiteConfiguration::builder() - .routing_rules( - RoutingRule::builder() - .condition(Condition::builder().key_prefix_equals("").build()) - .redirect( - Redirect::builder() - .protocol(Protocol::Https) - .host_name("other.tld") - .replace_key_prefix_with("") - .build(), - ) - .build(), - ) - .build(); - - ctx.client - .put_bucket_website() - .bucket(&bucket) - .website_configuration(conf) - .send() - .await - .unwrap(); - - { - let req = Request::builder() - .method("GET") - .uri(format!("http://127.0.0.1:{}/my-path", ctx.garage.web_port)) - .header("Host", format!("{}.web.garage", BCKT_NAME)) - .body(Body::new(Bytes::new())) - .unwrap(); - - let client = Client::builder(TokioExecutor::new()).build_http(); - let resp = client.request(req).await.unwrap(); - assert_eq!(resp.status(), StatusCode::FOUND); - assert_eq!( - resp.headers() - .get(hyper::header::LOCATION) - .unwrap() - .to_str() - .unwrap(), - "https://other.tld/my-path" - ); - } - - { - let req = Request::builder() - .method("GET") - .uri(format!("http://127.0.0.1:{}/my-path/", ctx.garage.web_port)) - .header("Host", format!("{}.web.garage", BCKT_NAME)) - .body(Body::new(Bytes::new())) - .unwrap(); - - let client = Client::builder(TokioExecutor::new()).build_http(); - let resp = client.request(req).await.unwrap(); - assert_eq!(resp.status(), StatusCode::FOUND); - assert_eq!( - resp.headers() - .get(hyper::header::LOCATION) - .unwrap() - .to_str() - .unwrap(), - "https://other.tld/my-path/" - ); - } - - { - let req = Request::builder() - .method("GET") - .uri(format!("http://127.0.0.1:{}/", ctx.garage.web_port)) - .header("Host", format!("{}.web.garage", BCKT_NAME)) - .body(Body::new(Bytes::new())) - .unwrap(); - - let client = Client::builder(TokioExecutor::new()).build_http(); - let resp = client.request(req).await.unwrap(); - assert_eq!(resp.status(), StatusCode::FOUND); - assert_eq!( - resp.headers() - .get(hyper::header::LOCATION) - .unwrap() - .to_str() - .unwrap(), - "https://other.tld/" - ); - } -} - -#[tokio::test] -async fn test_website_redirect() { - const BCKT_NAME: &str = "my-redirect"; - let ctx = common::context(); - let bucket = ctx.create_bucket(BCKT_NAME); - - ctx.client - .put_object() - .bucket(&bucket) - .key("index.html") - .body(ByteStream::from_static(b"index")) - .send() - .await - .unwrap(); - ctx.client - .put_object() - .bucket(&bucket) - .key("404.html") - .body(ByteStream::from_static(b"main 404")) - .send() - .await - .unwrap(); - ctx.client - .put_object() - .bucket(&bucket) - .key("static-file") - .body(ByteStream::from_static(b"static file")) - .send() - .await - .unwrap(); - - let mut conf = WebsiteConfiguration::builder() - .index_document( - IndexDocument::builder() - .suffix("home.html") - .build() - .unwrap(), - ) - .error_document(ErrorDocument::builder().key("404.html").build().unwrap()); - - for (prefix, condition) in [("unconditional", false), ("conditional", true)] { - let code = condition.then(|| "404".to_string()); - conf = conf - // simple redirect - .routing_rules( - RoutingRule::builder() - .condition( - Condition::builder() - .set_http_error_code_returned_equals(code.clone()) - .key_prefix_equals(format!("{prefix}/redirect-prefix/")) - .build(), - ) - .redirect( - Redirect::builder() - .http_redirect_code("302") - .replace_key_prefix_with("other-prefix/") - .build(), - ) - .build(), - ) - .routing_rules( - RoutingRule::builder() - .condition( - Condition::builder() - .set_http_error_code_returned_equals(code.clone()) - .key_prefix_equals(format!("{prefix}/redirect-prefix-307/")) - .build(), - ) - .redirect( - Redirect::builder() - .http_redirect_code("307") - .replace_key_prefix_with("other-prefix/") - .build(), - ) - .build(), - ) - // simple redirect - .routing_rules( - RoutingRule::builder() - .condition( - Condition::builder() - .set_http_error_code_returned_equals(code.clone()) - .key_prefix_equals(format!("{prefix}/redirect-fixed/")) - .build(), - ) - .redirect( - Redirect::builder() - .http_redirect_code("302") - .replace_key_with("fixed_key") - .build(), - ) - .build(), - ) - // stream other file - .routing_rules( - RoutingRule::builder() - .condition( - Condition::builder() - .set_http_error_code_returned_equals(code.clone()) - .key_prefix_equals(format!("{prefix}/stream-fixed/")) - .build(), - ) - .redirect( - Redirect::builder() - .http_redirect_code("200") - .replace_key_with("static-file") - .build(), - ) - .build(), - ) - // stream other file as error - .routing_rules( - RoutingRule::builder() - .condition( - Condition::builder() - .set_http_error_code_returned_equals(code.clone()) - .key_prefix_equals(format!("{prefix}/stream-404/")) - .build(), - ) - .redirect( - Redirect::builder() - .http_redirect_code("404") - .replace_key_with("static-file") - .build(), - ) - .build(), - ) - // fail to stream other file - .routing_rules( - RoutingRule::builder() - .condition( - Condition::builder() - .set_http_error_code_returned_equals(code.clone()) - .key_prefix_equals(format!("{prefix}/stream-missing/")) - .build(), - ) - .redirect( - Redirect::builder() - .http_redirect_code("200") - .replace_key_with("missing-file") - .build(), - ) - .build(), - ); - } - let conf = conf.build(); - - ctx.client - .put_bucket_website() - .bucket(&bucket) - .website_configuration(conf.clone()) - .send() - .await - .unwrap(); - - let stored_cfg = ctx - .client - .get_bucket_website() - .bucket(&bucket) - .send() - .await - .unwrap(); - assert_eq!(stored_cfg.index_document, conf.index_document); - assert_eq!(stored_cfg.error_document, conf.error_document); - assert_eq!(stored_cfg.routing_rules, conf.routing_rules); - - let req = |path| { - Request::builder() - .method("GET") - .uri(format!( - "http://127.0.0.1:{}/{}/path", - ctx.garage.web_port, path - )) - .header("Host", format!("{}.web.garage", BCKT_NAME)) - .body(Body::new(Bytes::new())) - .unwrap() - }; - - test_redirect_helper("unconditional", true, &req).await; - test_redirect_helper("conditional", true, &req).await; - for prefix in ["unconditional", "conditional"] { - for rule_path in [ - "redirect-prefix", - "redirect-prefix-307", - "redirect-fixed", - "stream-fixed", - "stream-404", - "stream-missing", - ] { - ctx.client - .put_object() - .bucket(&bucket) - .key(format!("{prefix}/{rule_path}/path")) - .body(ByteStream::from_static(b"i exist")) - .send() - .await - .unwrap(); - } - } - test_redirect_helper("unconditional", true, &req).await; - test_redirect_helper("conditional", false, &req).await; -} - -async fn test_redirect_helper( - prefix: &str, - should_see_redirect: bool, - req: impl Fn(String) -> Request>, -) { - use http::header; - let client = Client::builder(TokioExecutor::new()).build_http(); - let expected_body = b"i exist".as_ref(); - - let resp = client - .request(req(format!("{prefix}/redirect-prefix"))) - .await - .unwrap(); - if should_see_redirect { - assert_eq!(resp.status(), StatusCode::FOUND); - assert_eq!( - resp.headers() - .get(header::LOCATION) - .unwrap() - .to_str() - .unwrap(), - "/other-prefix/path" - ); - assert!(resp - .into_body() - .collect() - .await - .unwrap() - .to_bytes() - .is_empty()); - } else { - assert_eq!(resp.status(), StatusCode::OK); - assert!(resp.headers().get(header::LOCATION).is_none()); - assert_eq!( - resp.into_body().collect().await.unwrap().to_bytes(), - expected_body, - ); - } - - let resp = client - .request(req(format!("{prefix}/redirect-prefix-307"))) - .await - .unwrap(); - if should_see_redirect { - assert_eq!(resp.status(), StatusCode::TEMPORARY_REDIRECT); - assert_eq!( - resp.headers() - .get(header::LOCATION) - .unwrap() - .to_str() - .unwrap(), - "/other-prefix/path" - ); - assert!(resp - .into_body() - .collect() - .await - .unwrap() - .to_bytes() - .is_empty()); - } else { - assert_eq!(resp.status(), StatusCode::OK); - assert!(resp.headers().get(header::LOCATION).is_none()); - assert_eq!( - resp.into_body().collect().await.unwrap().to_bytes(), - expected_body, - ); - } - - let resp = client - .request(req(format!("{prefix}/redirect-fixed"))) - .await - .unwrap(); - if should_see_redirect { - assert_eq!(resp.status(), StatusCode::FOUND); - assert_eq!( - resp.headers() - .get(header::LOCATION) - .unwrap() - .to_str() - .unwrap(), - "/fixed_key" - ); - assert!(resp - .into_body() - .collect() - .await - .unwrap() - .to_bytes() - .is_empty()); - } else { - assert_eq!(resp.status(), StatusCode::OK); - assert!(resp.headers().get(header::LOCATION).is_none()); - assert_eq!( - resp.into_body().collect().await.unwrap().to_bytes(), - expected_body, - ); - } - let resp = client - .request(req(format!("{prefix}/stream-fixed"))) - .await - .unwrap(); - if should_see_redirect { - assert_eq!(resp.status(), StatusCode::OK); - assert!(resp.headers().get(header::LOCATION).is_none()); - assert_eq!( - resp.into_body().collect().await.unwrap().to_bytes(), - b"static file".as_ref(), - ); - } else { - assert_eq!(resp.status(), StatusCode::OK); - assert!(resp.headers().get(header::LOCATION).is_none()); - assert_eq!( - resp.into_body().collect().await.unwrap().to_bytes(), - expected_body, - ); - } - let resp = client - .request(req(format!("{prefix}/stream-404"))) - .await - .unwrap(); - if should_see_redirect { - assert_eq!(resp.status(), StatusCode::NOT_FOUND); - assert!(resp.headers().get(header::LOCATION).is_none()); - assert_eq!( - resp.into_body().collect().await.unwrap().to_bytes(), - b"static file".as_ref(), - ); - } else { - assert_eq!(resp.status(), StatusCode::OK); - assert!(resp.headers().get(header::LOCATION).is_none()); - assert_eq!( - resp.into_body().collect().await.unwrap().to_bytes(), - expected_body, - ); - } - let resp = client - .request(req(format!("{prefix}/stream-404"))) - .await - .unwrap(); - if should_see_redirect { - assert_eq!(resp.status(), StatusCode::NOT_FOUND); - assert!(resp.headers().get(header::LOCATION).is_none()); - assert_eq!( - resp.into_body().collect().await.unwrap().to_bytes(), - b"static file".as_ref(), - ); - } else { - assert_eq!(resp.status(), StatusCode::OK); - assert!(resp.headers().get(header::LOCATION).is_none()); - assert_eq!( - resp.into_body().collect().await.unwrap().to_bytes(), - expected_body, - ); - } -} - -#[tokio::test] -async fn test_website_invalid_redirect() { - const BCKT_NAME: &str = "my-invalid-redirect"; - let ctx = common::context(); - let bucket = ctx.create_bucket(BCKT_NAME); - - let conf = WebsiteConfiguration::builder() - .routing_rules( - RoutingRule::builder() - .condition(Condition::builder().key_prefix_equals("").build()) - .redirect( - Redirect::builder() - .protocol(Protocol::Https) - .host_name("other.tld") - .replace_key_prefix_with("") - // we don't allow 200 with hostname - .http_redirect_code("200") - .build(), - ) - .build(), - ) - .build(); - - ctx.client - .put_bucket_website() - .bucket(&bucket) - .website_configuration(conf) - .send() - .await - .unwrap_err(); -} - #[tokio::test] async fn test_website_puny() { const BCKT_NAME: &str = "xn--pda.eu"; diff --git a/src/garage/tracing_setup.rs b/src/garage/tracing_setup.rs index 8cc591ec..55fc4094 100644 --- a/src/garage/tracing_setup.rs +++ b/src/garage/tracing_setup.rs @@ -1,54 +1,37 @@ -pub use telemetry::init_tracing; +use std::time::Duration; -#[cfg(not(feature = "telemetry-otlp"))] -mod telemetry { - use garage_util::data::Uuid; - use garage_util::error::Error; +use opentelemetry::sdk::{ + trace::{self, IdGenerator, Sampler}, + Resource, +}; +use opentelemetry::KeyValue; +use opentelemetry_otlp::WithExportConfig; - #[expect(clippy::unnecessary_wraps)] - pub fn init_tracing(_: &str, _: Uuid) -> Result<(), Error> { - error!("Garage was built without OTLP exporter, admin.trace_sink is ignored."); - Ok(()) - } -} - -#[cfg(feature = "telemetry-otlp")] -mod telemetry { - use std::time::Duration; - - use opentelemetry::sdk::{ - trace::{self, IdGenerator, Sampler}, - Resource, - }; - use opentelemetry::KeyValue; - use opentelemetry_otlp::WithExportConfig; - - use garage_util::data::*; - use garage_util::error::*; - - pub fn init_tracing(export_to: &str, node_id: Uuid) -> Result<(), Error> { - let node_id = hex::encode(&node_id.as_slice()[..8]); - - opentelemetry_otlp::new_pipeline() - .tracing() - .with_exporter( - opentelemetry_otlp::new_exporter() - .tonic() - .with_endpoint(export_to) - .with_timeout(Duration::from_secs(3)), - ) - .with_trace_config( - trace::config() - .with_id_generator(IdGenerator::default()) - .with_sampler(Sampler::AlwaysOn) - .with_resource(Resource::new(vec![ - KeyValue::new("service.name", "garage"), - KeyValue::new("service.instance.id", node_id), - ])), - ) - .install_batch(opentelemetry::runtime::Tokio) - .ok_or_message("Unable to initialize tracing")?; - - Ok(()) - } +use garage_util::data::*; +use garage_util::error::*; + +pub fn init_tracing(export_to: &str, node_id: Uuid) -> Result<(), Error> { + let node_id = hex::encode(&node_id.as_slice()[..8]); + + opentelemetry_otlp::new_pipeline() + .tracing() + .with_exporter( + opentelemetry_otlp::new_exporter() + .tonic() + .with_endpoint(export_to) + .with_timeout(Duration::from_secs(3)), + ) + .with_trace_config( + trace::config() + .with_id_generator(IdGenerator::default()) + .with_sampler(Sampler::AlwaysOn) + .with_resource(Resource::new(vec![ + KeyValue::new("service.name", "garage"), + KeyValue::new("service.instance.id", node_id), + ])), + ) + .install_batch(opentelemetry::runtime::Tokio) + .ok_or_message("Unable to initialize tracing")?; + + Ok(()) } diff --git a/src/k2v-client/Cargo.toml b/src/k2v-client/Cargo.toml index 3c1076c0..bbd09b19 100644 --- a/src/k2v-client/Cargo.toml +++ b/src/k2v-client/Cargo.toml @@ -1,10 +1,7 @@ [package] name = "k2v-client" version = "0.0.4" -authors = [ - "Trinity Pointard ", - "Alex Auvolat ", -] +authors = ["Trinity Pointard ", "Alex Auvolat "] edition = "2018" license = "AGPL-3.0" description = "Client library for the Garage K2V protocol" @@ -45,6 +42,3 @@ path = "lib.rs" name = "k2v-cli" path = "bin/k2v-cli.rs" required-features = ["cli"] - -[lints] -workspace = true diff --git a/src/k2v-client/bin/k2v-cli.rs b/src/k2v-client/bin/k2v-cli.rs index d361c6dc..b1c2169b 100644 --- a/src/k2v-client/bin/k2v-cli.rs +++ b/src/k2v-client/bin/k2v-cli.rs @@ -388,12 +388,10 @@ impl Filter { } } -/// # Safety -/// -/// initialize `RUST_LOG` env var before start tokio runtime to limit multithread problem with `std::env::set_var` which is unsafe -fn main() -> Result<(), Error> { +#[tokio::main] +async fn main() -> Result<(), Error> { if std::env::var("RUST_LOG").is_err() { - unsafe { std::env::set_var("RUST_LOG", "warn") }; + std::env::set_var("RUST_LOG", "warn") } tracing_subscriber::fmt() @@ -414,14 +412,6 @@ fn main() -> Result<(), Error> { let client = K2vClient::new(config)?; - tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .unwrap() - .block_on(run(args)) -} - -async fn run(args: Args) -> Result<(), Error> { match args.command { Command::Insert { partition_key, @@ -613,5 +603,6 @@ async fn run(args: Args) -> Result<(), Error> { } } } + Ok(()) } diff --git a/src/k2v-client/lib.rs b/src/k2v-client/lib.rs index 42bfa9f6..fe8fd3e0 100644 --- a/src/k2v-client/lib.rs +++ b/src/k2v-client/lib.rs @@ -95,7 +95,7 @@ impl K2vClient { }) } - /// Perform a `ReadItem` request, reading the value(s) stored for a single pk+sk. + /// Perform a ReadItem request, reading the value(s) stored for a single pk+sk. pub async fn read_item( &self, partition_key: &str, @@ -134,7 +134,7 @@ impl K2vClient { } } - /// Perform a `PollItem` request, waiting for the value(s) stored for a single pk+sk to be + /// Perform a PollItem request, waiting for the value(s) stored for a single pk+sk to be /// updated. pub async fn poll_item( &self, @@ -190,7 +190,7 @@ impl K2vClient { } } - /// Perform a `PollRange` request, waiting for any change in a given range of keys + /// Perform a PollRange request, waiting for any change in a given range of keys /// to occur pub async fn poll_range( &self, @@ -239,7 +239,7 @@ impl K2vClient { })) } - /// Perform an `InsertItem` request, inserting a value for a single pk+sk. + /// Perform an InsertItem request, inserting a value for a single pk+sk. pub async fn insert_item( &self, partition_key: &str, @@ -258,7 +258,7 @@ impl K2vClient { Ok(()) } - /// Perform a `DeleteItem` request, deleting the value(s) stored for a single pk+sk. + /// Perform a DeleteItem request, deleting the value(s) stored for a single pk+sk. pub async fn delete_item( &self, partition_key: &str, @@ -274,7 +274,7 @@ impl K2vClient { Ok(()) } - /// Perform a `ReadIndex` request, listing partition key which have at least one associated + /// Perform a ReadIndex request, listing partition key which have at least one associated /// sort key, and which matches the filter. pub async fn read_index( &self, @@ -300,7 +300,7 @@ impl K2vClient { }) } - /// Perform an `InsertBatch` request, inserting multiple values at once. Note: this operation is + /// Perform an InsertBatch request, inserting multiple values at once. Note: this operation is /// *not* atomic: it is possible for some sub-operations to fails and others to success. In /// that case, failure is reported. pub async fn insert_batch(&self, operations: &[BatchInsertOp<'_>]) -> Result<(), Error> { @@ -312,7 +312,7 @@ impl K2vClient { Ok(()) } - /// Perform a `ReadBatch` request, reading multiple values or range of values at once. + /// Perform a ReadBatch request, reading multiple values or range of values at once. pub async fn read_batch( &self, operations: &[BatchReadOp<'_>], @@ -346,7 +346,7 @@ impl K2vClient { .collect()) } - /// Perform a `DeleteBatch` request, deleting multiple values or range of values at once, without + /// Perform a DeleteBatch request, deleting multiple values or range of values at once, without /// providing causality information. pub async fn delete_batch(&self, operations: &[BatchDeleteOp<'_>]) -> Result, Error> { let url = self.build_url(None, &[("delete", "")]); @@ -371,7 +371,7 @@ impl K2vClient { use sha2::{Digest, Sha256}; let mut hasher = Sha256::new(); hasher.update(req.body()); - let hash = hex::encode(hasher.finalize()); + let hash = hex::encode(&hasher.finalize()); req.headers_mut() .insert(AMZ_CONTENT_SHA256, hash.try_into().unwrap()); @@ -588,7 +588,7 @@ impl Serialize for K2vValue { } } -/// A set of `K2vValue` and associated causality information. +/// A set of K2vValue and associated causality information. #[derive(Debug, Clone, Serialize)] pub struct CausalValue { pub causality: CausalityToken, @@ -621,12 +621,12 @@ pub struct PollRangeFilter<'a> { pub prefix: Option<&'a str>, } -/// Response to a `poll_range` query +/// Response to a poll_range query #[derive(Debug, Default, Clone, Serialize)] pub struct PollRangeResult { - /// List of items that have changed since last `PollRange` call. + /// List of items that have changed since last PollRange call. pub items: BTreeMap, - /// opaque string representing items already seen for future `PollRange` calls. + /// opaque string representing items already seen for future PollRange calls. pub seen_marker: String, } @@ -647,15 +647,15 @@ struct PollRangeResponse { } impl<'a> Filter<'a> { - fn query_params(&self) -> Vec<(&'static str, std::borrow::Cow<'_, str>)> { + fn query_params(&self) -> Vec<(&'static str, std::borrow::Cow)> { let mut res = Vec::<(&'static str, std::borrow::Cow)>::with_capacity(8); - if let Some(start) = self.start { + if let Some(start) = self.start.as_deref() { res.push(("start", start.into())); } - if let Some(end) = self.end { + if let Some(end) = self.end.as_deref() { res.push(("end", end.into())); } - if let Some(prefix) = self.prefix { + if let Some(prefix) = self.prefix.as_deref() { res.push(("prefix", prefix.into())); } if let Some(limit) = &self.limit { @@ -696,7 +696,7 @@ pub struct PartitionInfo { pub bytes: u64, } -/// Single sub-operation of an `InsertBatch`. +/// Single sub-operation of an InsertBatch. #[derive(Debug, Clone, Serialize)] pub struct BatchInsertOp<'a> { #[serde(rename = "pk")] @@ -709,7 +709,7 @@ pub struct BatchInsertOp<'a> { pub value: K2vValue, } -/// Single sub-operation of a `ReadBatch`. +/// Single sub-operation of a ReadBatch. #[derive(Debug, Default, Clone, Deserialize, Serialize)] #[serde(rename_all = "camelCase")] pub struct BatchReadOp<'a> { @@ -743,7 +743,7 @@ struct BatchReadItem { v: Vec, } -/// Single sub-operation of a `DeleteBatch` +/// Single sub-operation of a DeleteBatch #[derive(Debug, Clone, Deserialize, Serialize)] #[serde(rename_all = "camelCase")] pub struct BatchDeleteOp<'a> { diff --git a/src/model/Cargo.toml b/src/model/Cargo.toml index 12683a0e..289c0024 100644 --- a/src/model/Cargo.toml +++ b/src/model/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_model" -version = "2.3.0" +version = "1.3.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -21,7 +21,6 @@ garage_block.workspace = true garage_util.workspace = true garage_net.workspace = true -argon2.workspace = true async-trait.workspace = true blake2.workspace = true chrono.workspace = true @@ -41,11 +40,8 @@ futures.workspace = true tokio.workspace = true [features] -default = ["lmdb", "sqlite"] -k2v = ["garage_util/k2v"] -lmdb = ["garage_db/lmdb"] -sqlite = ["garage_db/sqlite"] -fjall = ["garage_db/fjall"] - -[lints] -workspace = true +default = [ "lmdb", "sqlite" ] +k2v = [ "garage_util/k2v" ] +lmdb = [ "garage_db/lmdb" ] +sqlite = [ "garage_db/sqlite" ] +fjall = [ "garage_db/fjall" ] diff --git a/src/model/admin_token_table.rs b/src/model/admin_token_table.rs deleted file mode 100644 index 25c7bd62..00000000 --- a/src/model/admin_token_table.rs +++ /dev/null @@ -1,190 +0,0 @@ -use base64::prelude::*; - -use garage_util::crdt::{self, Crdt}; -use garage_util::time::now_msec; - -use garage_table::{EmptyKey, Entry, TableSchema}; - -pub use crate::key_table::KeyFilter; - -mod v2 { - use garage_util::crdt; - use serde::{Deserialize, Serialize}; - - #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] - pub struct AdminApiToken { - /// An admin API token is a bearer token of the following form: - /// `.` - /// Only the prefix is saved here, it is used as an identifier. - /// The entire API token is hashed and saved in `token_hash` in `state`. - pub prefix: String, - - /// If the token is not deleted, its parameters - pub state: crdt::Deletable, - } - - #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] - pub struct AdminApiTokenParams { - /// Creation date - pub created: u64, - - /// The entire API token hashed as a password - pub token_hash: String, - - /// User-defined name - pub name: crdt::Lww, - - /// The optional time of expiration of the token - pub expiration: crdt::Lww>, - - /// The scope of the token, i.e. list of authorized admin API calls - pub scope: crdt::Lww, - } - - #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] - pub struct AdminApiTokenScope(pub Vec); - - impl garage_util::migrate::InitialFormat for AdminApiToken { - const VERSION_MARKER: &'static [u8] = b"G2admtok"; - } -} - -pub use v2::*; - -impl Crdt for AdminApiTokenParams { - fn merge(&mut self, o: &Self) { - self.name.merge(&o.name); - self.expiration.merge(&o.expiration); - self.scope.merge(&o.scope); - } -} - -impl Crdt for AdminApiToken { - fn merge(&mut self, other: &Self) { - self.state.merge(&other.state); - } -} - -impl Crdt for AdminApiTokenScope { - fn merge(&mut self, other: &Self) { - self.0.retain(|x| other.0.contains(x)); - } -} - -impl AdminApiToken { - /// Create a new admin API token. - /// Returns the `AdminApiToken` object, which contains the hashed bearer token, - /// as well as the plaintext bearer token. - pub fn new(name: &str) -> (Self, String) { - use argon2::{ - password_hash::{rand_core::OsRng, PasswordHasher, SaltString}, - Argon2, - }; - - let prefix = hex::encode(&rand::random::<[u8; 12]>()[..]); - let secret = BASE64_URL_SAFE_NO_PAD.encode(&rand::random::<[u8; 32]>()[..]); - let token = format!("{}.{}", prefix, secret); - - let salt = SaltString::generate(&mut OsRng); - let argon2 = Argon2::default(); - let hashed_token = argon2 - .hash_password(token.as_bytes(), &salt) - .expect("could not hash admin API token") - .to_string(); - - let ret = AdminApiToken { - prefix, - state: crdt::Deletable::present(AdminApiTokenParams { - created: now_msec(), - token_hash: hashed_token, - name: crdt::Lww::new(name.to_string()), - expiration: crdt::Lww::new(None), - scope: crdt::Lww::new(AdminApiTokenScope(vec!["*".to_string()])), - }), - }; - - (ret, token) - } - - pub fn delete(prefix: String) -> Self { - Self { - prefix, - state: crdt::Deletable::Deleted, - } - } - - /// Returns true if this represents a deleted admin token - pub fn is_deleted(&self) -> bool { - self.state.is_deleted() - } - - /// Returns an option representing the params (None if in deleted state) - pub fn params(&self) -> Option<&AdminApiTokenParams> { - self.state.as_option() - } - - /// Mutable version of `.state()` - pub fn params_mut(&mut self) -> Option<&mut AdminApiTokenParams> { - self.state.as_option_mut() - } - - /// Scope, if not deleted, or empty slice - pub fn scope(&self) -> &[String] { - self.state - .as_option() - .map(|x| &x.scope.get().0[..]) - .unwrap_or_default() - } -} - -impl AdminApiTokenParams { - pub fn is_expired(&self, ts_now: u64) -> bool { - match *self.expiration.get() { - None => false, - Some(exp) => ts_now >= exp, - } - } - - pub fn has_scope(&self, endpoint: &str) -> bool { - self.scope.get().0.iter().any(|x| x == "*" || x == endpoint) - } -} - -impl Entry for AdminApiToken { - fn partition_key(&self) -> &EmptyKey { - &EmptyKey - } - fn sort_key(&self) -> &String { - &self.prefix - } - fn is_tombstone(&self) -> bool { - self.is_deleted() - } -} - -pub struct AdminApiTokenTable; - -impl TableSchema for AdminApiTokenTable { - const TABLE_NAME: &'static str = "admin_token"; - - type P = EmptyKey; - type S = String; - type E = AdminApiToken; - type Filter = KeyFilter; - - fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { - match filter { - KeyFilter::Deleted(df) => df.apply(entry.state.is_deleted()), - KeyFilter::MatchesAndNotDeleted(pat) => { - let pat = pat.to_lowercase(); - entry - .params() - .map(|p| { - entry.prefix.to_lowercase().starts_with(&pat) - || p.name.get().to_lowercase() == pat - }) - .unwrap_or(false) - } - } - } -} diff --git a/src/model/bucket_table.rs b/src/model/bucket_table.rs index 0b1766b7..f1cc032e 100644 --- a/src/model/bucket_table.rs +++ b/src/model/bucket_table.rs @@ -35,7 +35,7 @@ mod v08 { /// Map of aliases that are or have been given to this bucket /// in the global namespace /// (not authoritative: this is just used as an indication to - /// map back to aliases when doing `ListBuckets`) + /// map back to aliases when doing ListBuckets) pub aliases: crdt::LwwMap, /// Map of aliases that are or have been given to this bucket /// in namespaces local to keys @@ -119,129 +119,14 @@ mod v08 { impl garage_util::migrate::InitialFormat for Bucket {} } -mod v2 { - use crate::permission::BucketKeyPerm; - use garage_util::crdt; - use garage_util::data::Uuid; - use serde::{Deserialize, Serialize}; - - use super::v08; - - pub use v08::{BucketQuotas, CorsRule, LifecycleExpiration, LifecycleFilter, LifecycleRule}; - - #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] - pub struct Bucket { - /// ID of the bucket - pub id: Uuid, - /// State, and configuration if not deleted, of the bucket - pub state: crdt::Deletable, - } - - /// Configuration for a bucket - #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] - pub struct BucketParams { - /// Bucket's creation date - pub creation_date: u64, - /// Map of key with access to the bucket, and what kind of access they give - pub authorized_keys: crdt::Map, - - /// Map of aliases that are or have been given to this bucket - /// in the global namespace - /// (not authoritative: this is just used as an indication to - /// map back to aliases when doing `ListBuckets`) - pub aliases: crdt::LwwMap, - /// Map of aliases that are or have been given to this bucket - /// in namespaces local to keys - /// key = (access key id, alias name) - pub local_aliases: crdt::LwwMap<(String, String), bool>, - - /// Whether this bucket is allowed for website access - /// (under all of its global alias names), - /// and if so, the website configuration XML document - pub website_config: crdt::Lww>, - /// CORS rules - pub cors_config: crdt::Lww>>, - /// Lifecycle configuration - pub lifecycle_config: crdt::Lww>>, - /// Bucket quotas - pub quotas: crdt::Lww, - } - - #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] - pub struct WebsiteConfig { - pub index_document: String, - pub error_document: Option, - // this field is currently unused, but present so adding it in the future doesn't - // need a new migration - pub redirect_all: Option, - pub routing_rules: Vec, - } - - #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] - pub struct RedirectAll { - pub hostname: String, - pub protocol: String, - } - - #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] - pub struct RoutingRule { - pub condition: Option, - pub redirect: Redirect, - } - - #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] - pub struct RedirectCondition { - pub http_error_code: Option, - pub prefix: Option, - } - - #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] - pub struct Redirect { - pub hostname: Option, - pub http_redirect_code: u16, - pub protocol: Option, - pub replace_key_prefix: Option, - pub replace_key: Option, - } - - impl garage_util::migrate::Migrate for Bucket { - const VERSION_MARKER: &'static [u8] = b"G2bkt"; - - type Previous = v08::Bucket; - - fn migrate(old: v08::Bucket) -> Bucket { - Bucket { - id: old.id, - state: old.state.map(|x| BucketParams { - creation_date: x.creation_date, - authorized_keys: x.authorized_keys, - aliases: x.aliases, - local_aliases: x.local_aliases, - website_config: x.website_config.map(|wc_opt| { - wc_opt.map(|wc| WebsiteConfig { - index_document: wc.index_document, - error_document: wc.error_document, - redirect_all: None, - routing_rules: vec![], - }) - }), - cors_config: x.cors_config, - lifecycle_config: x.lifecycle_config, - quotas: x.quotas, - }), - } - } - } -} - -pub use v2::*; +pub use v08::*; impl AutoCrdt for BucketQuotas { const WARN_IF_DIFFERENT: bool = true; } impl BucketParams { - /// Create an empty `BucketParams` with no authorized keys and no website access + /// Create an empty BucketParams with no authorized keys and no website access fn new() -> Self { BucketParams { creation_date: now_msec(), diff --git a/src/model/garage.rs b/src/model/garage.rs index 4ba11bc5..f4f6f693 100644 --- a/src/model/garage.rs +++ b/src/model/garage.rs @@ -24,7 +24,6 @@ use crate::s3::mpu_table::*; use crate::s3::object_table::*; use crate::s3::version_table::*; -use crate::admin_token_table::*; use crate::bucket_alias_table::*; use crate::bucket_table::*; use crate::helper; @@ -51,8 +50,6 @@ pub struct Garage { /// The block manager pub block_manager: Arc, - /// Table containing admin API keys - pub admin_token_table: Arc>, /// Table containing buckets pub bucket_table: Arc>, /// Table containing bucket aliases @@ -150,30 +147,29 @@ impl Garage { info!("Initialize membership management system..."); let system = System::new(network_key, replication_factor, consistency_mode, &config)?; + let data_rep_param = TableShardedReplication { + system: system.clone(), + replication_factor: replication_factor.into(), + write_quorum: replication_factor.write_quorum(consistency_mode), + read_quorum: 1, + }; + let meta_rep_param = TableShardedReplication { - layout_manager: system.layout_manager.clone(), - consistency_mode, + system: system.clone(), + replication_factor: replication_factor.into(), + write_quorum: replication_factor.write_quorum(consistency_mode), + read_quorum: replication_factor.read_quorum(consistency_mode), }; let control_rep_param = TableFullReplication { system: system.clone(), - consistency_mode, }; info!("Initialize block manager..."); - let block_write_quorum = replication_factor.write_quorum(consistency_mode); - let block_manager = BlockManager::new(&db, &config, block_write_quorum, system.clone())?; + let block_manager = BlockManager::new(&db, &config, data_rep_param, system.clone())?; block_manager.register_bg_vars(&mut bg_vars); // ---- admin tables ---- - info!("Initialize admin_token_table..."); - let admin_token_table = Table::new( - AdminApiTokenTable, - control_rep_param.clone(), - system.clone(), - &db, - ); - info!("Initialize bucket_table..."); let bucket_table = Table::new(BucketTable, control_rep_param.clone(), system.clone(), &db); @@ -263,7 +259,6 @@ impl Garage { db, system, block_manager, - admin_token_table, bucket_table, bucket_alias_table, key_table, @@ -283,7 +278,6 @@ impl Garage { pub fn spawn_workers(self: &Arc, bg: &BackgroundRunner) -> Result<(), Error> { self.block_manager.spawn_workers(bg); - self.admin_token_table.spawn_workers(bg); self.bucket_table.spawn_workers(bg); self.bucket_alias_table.spawn_workers(bg); self.key_table.spawn_workers(bg); diff --git a/src/model/helper/bucket.rs b/src/model/helper/bucket.rs index b1caded5..e5506d7e 100644 --- a/src/model/helper/bucket.rs +++ b/src/model/helper/bucket.rs @@ -1,7 +1,7 @@ use std::time::Duration; use garage_util::data::*; -use garage_util::error::{Error as GarageError, OkOrMessage}; +use garage_util::error::OkOrMessage; use garage_util::time::*; use garage_table::util::*; @@ -16,172 +16,104 @@ pub struct BucketHelper<'a>(pub(crate) &'a Garage); #[allow(clippy::ptr_arg)] impl<'a> BucketHelper<'a> { - // ================ - // Local functions to find buckets FAST. - // This is only for the fast path in API requests. - // They do not provide the read-after-write guarantee - // when used in conjunction with other operations that - // modify buckets and bucket aliases. - // ================ - - /// Return bucket corresponding to global bucket name, if it exists - /// (and is not a tombstone entry). - /// - /// The name can be of two forms: - /// 1. A global bucket alias - /// 2. The full ID of a bucket encoded in hex - /// - /// Note that there is no possible ambiguity between the two forms, - /// as the maximum length of a bucket name is 63 characters, and the full - /// hex id is 64 chars long. - /// - /// This will not do any network interaction to check the alias and - /// bucket tables, it will only check the local copy of the table. - /// As a consequence, it does not provide read-after-write guarantees. - pub fn resolve_global_bucket_fast( + pub async fn resolve_global_bucket_name( &self, bucket_name: &String, - ) -> Result, GarageError> { + ) -> Result, Error> { + // Bucket names in Garage are aliases, true bucket identifiers + // are 32-byte UUIDs. This function resolves bucket names into + // their full identifier by looking up in the bucket_alias_table. + // This function also allows buckets to be identified by their + // full UUID (hex-encoded). Here, if the name to be resolved is a + // hex string of the correct length, it is directly parsed as a bucket + // identifier which is returned. There is no risk of this conflicting + // with an actual bucket name: bucket names are max 63 chars long by + // the AWS spec, and hex-encoded UUIDs are 64 chars long. let hexbucket = hex::decode(bucket_name.as_str()) .ok() .and_then(|by| Uuid::try_from(&by)); - let bucket_id = match hexbucket { - Some(id) => id, - None => { - let alias = self - .0 - .bucket_alias_table - .get_local(&EmptyKey, bucket_name)? - .and_then(|x| *x.state.get()); - match alias { - Some(id) => id, - None => return Ok(None), - } - } - }; - Ok(self - .0 - .bucket_table - .get_local(&EmptyKey, &bucket_id)? - .filter(|x| !x.state.is_deleted())) - } - - /// Return bucket corresponding to a bucket name from the perspective of - /// a given access key, if it exists (and is not a tombstone entry). - /// - /// The name can be of three forms: - /// 1. A global bucket alias - /// 2. A local bucket alias - /// 3. The full ID of a bucket encoded in hex - /// - /// This will not do any network interaction, it will only check the local - /// copy of the bucket and global alias table. It will also resolve local - /// aliases directly using the data provided in the `api_key` parameter. - /// As a consequence, it does not provide read-after-write guarantees. - /// - /// In case no such bucket is found, this function returns a `NoSuchBucket` error. - #[allow(clippy::ptr_arg)] - pub fn resolve_bucket_fast( - &self, - bucket_name: &String, - api_key: &Key, - ) -> Result { - let api_key_params = api_key - .state - .as_option() - .ok_or_message("Key should not be deleted at this point")?; - - let bucket_opt = - if let Some(Some(bucket_id)) = api_key_params.local_aliases.get(bucket_name) { - self.0 - .bucket_table - .get_local(&EmptyKey, bucket_id)? - .filter(|x| !x.state.is_deleted()) - } else { - self.resolve_global_bucket_fast(bucket_name)? - }; - bucket_opt.ok_or_else(|| Error::NoSuchBucket(bucket_name.to_string())) - } - - // ================ - // Global functions that do quorum reads/writes, - // for admin operations. - // ================ - - /// This is the same as `resolve_global_bucket_fast`, - /// except that it does quorum reads to ensure consistency. - pub async fn resolve_global_bucket( - &self, - bucket_name: &String, - ) -> Result, GarageError> { - let hexbucket = hex::decode(bucket_name.as_str()) - .ok() - .and_then(|by| Uuid::try_from(&by)); - let bucket_id = match hexbucket { - Some(id) => id, - None => { - let alias = self - .0 - .bucket_alias_table - .get(&EmptyKey, bucket_name) - .await? - .and_then(|x| *x.state.get()); - match alias { - Some(id) => id, - None => return Ok(None), - } - } - }; - Ok(self - .0 - .bucket_table - .get(&EmptyKey, &bucket_id) - .await? - .filter(|x| !x.state.is_deleted())) - } - - /// Return bucket corresponding to a bucket name from the perspective of - /// a given access key, if it exists (and is not a tombstone entry). - /// - /// This is the same as `resolve_bucket_fast`, with the following differences: - /// - /// - this function does quorum reads to ensure consistency. - /// - this function fetches the Key entry from the key table to ensure up-to-date data - /// - this function returns None if the bucket is not found, instead of `HelperError::NoSuchBucket` - #[allow(clippy::ptr_arg)] - pub async fn resolve_bucket( - &self, - bucket_name: &String, - key_id: &String, - ) -> Result, GarageError> { - let local_alias = self - .0 - .key_table - .get(&EmptyKey, key_id) - .await? - .and_then(|k| k.state.into_option()) - .ok_or_else(|| GarageError::Message(format!("access key {} has been deleted", key_id)))? - .local_aliases - .get(bucket_name) - .copied() - .flatten(); - - if let Some(bucket_id) = local_alias { + if let Some(bucket_id) = hexbucket { Ok(self .0 .bucket_table .get(&EmptyKey, &bucket_id) .await? - .filter(|x| !x.state.is_deleted())) + .filter(|x| !x.state.is_deleted()) + .map(|_| bucket_id)) } else { - Ok(self.resolve_global_bucket(bucket_name).await?) + Ok(self + .0 + .bucket_alias_table + .get(&EmptyKey, bucket_name) + .await? + .and_then(|x| *x.state.get())) } } + #[allow(clippy::ptr_arg)] + pub async fn resolve_bucket(&self, bucket_name: &String, api_key: &Key) -> Result { + let api_key_params = api_key + .state + .as_option() + .ok_or_message("Key should not be deleted at this point")?; + + if let Some(Some(bucket_id)) = api_key_params.local_aliases.get(bucket_name) { + Ok(*bucket_id) + } else { + Ok(self + .resolve_global_bucket_name(bucket_name) + .await? + .ok_or_else(|| Error::NoSuchBucket(bucket_name.to_string()))?) + } + } + + /// Find a bucket by its global alias or a prefix of its uuid + pub async fn admin_get_existing_matching_bucket( + &self, + pattern: &String, + ) -> Result { + if let Some(uuid) = self.resolve_global_bucket_name(pattern).await? { + return Ok(uuid); + } else if pattern.len() >= 2 { + let hexdec = pattern + .get(..pattern.len() & !1) + .and_then(|x| hex::decode(x).ok()); + if let Some(hex) = hexdec { + let mut start = [0u8; 32]; + start + .as_mut_slice() + .get_mut(..hex.len()) + .ok_or_bad_request("invalid length")? + .copy_from_slice(&hex); + let mut candidates = self + .0 + .bucket_table + .get_range( + &EmptyKey, + Some(start.into()), + Some(DeletedFilter::NotDeleted), + 10, + EnumerationOrder::Forward, + ) + .await? + .into_iter() + .collect::>(); + candidates.retain(|x| hex::encode(x.id).starts_with(pattern)); + if candidates.len() == 1 { + return Ok(candidates.into_iter().next().unwrap().id); + } + } + } + Err(Error::BadRequest(format!( + "Bucket not found / several matching buckets: {}", + pattern + ))) + } + /// Returns a Bucket if it is present in bucket table, /// even if it is in deleted state. Querying a non-existing /// bucket ID returns an internal error. - pub(crate) async fn get_internal_bucket(&self, bucket_id: Uuid) -> Result { + pub async fn get_internal_bucket(&self, bucket_id: Uuid) -> Result { Ok(self .0 .bucket_table @@ -227,7 +159,7 @@ impl<'a> BucketHelper<'a> { .0 .system .cluster_layout() - .all_nongateway_nodes()? + .all_nongateway_nodes() .to_vec(); let k2vindexes = self .0 diff --git a/src/model/helper/key.rs b/src/model/helper/key.rs index 00d8d5c6..b8a99d55 100644 --- a/src/model/helper/key.rs +++ b/src/model/helper/key.rs @@ -3,7 +3,7 @@ use garage_util::error::OkOrMessage; use crate::garage::Garage; use crate::helper::error::*; -use crate::key_table::Key; +use crate::key_table::{Key, KeyFilter}; pub struct KeyHelper<'a>(pub(crate) &'a Garage); @@ -33,4 +33,33 @@ impl<'a> KeyHelper<'a> { .filter(|b| !b.state.is_deleted()) .ok_or_else(|| Error::NoSuchAccessKey(key_id.to_string())) } + + /// Returns a Key if it is present in key table, + /// looking it up by key ID or by a match on its name, + /// only if it is in non-deleted state. + /// Querying a non-existing key ID or a deleted key + /// returns a bad request error. + pub async fn get_existing_matching_key(&self, pattern: &str) -> Result { + let candidates = self + .0 + .key_table + .get_range( + &EmptyKey, + None, + Some(KeyFilter::MatchesAndNotDeleted(pattern.to_string())), + 10, + EnumerationOrder::Forward, + ) + .await? + .into_iter() + .collect::>(); + if candidates.len() != 1 { + Err(Error::BadRequest(format!( + "{} matching keys", + candidates.len() + ))) + } else { + Ok(candidates.into_iter().next().unwrap()) + } + } } diff --git a/src/model/helper/locked.rs b/src/model/helper/locked.rs index a1ad5b2b..98344b63 100644 --- a/src/model/helper/locked.rs +++ b/src/model/helper/locked.rs @@ -17,13 +17,13 @@ use crate::helper::key::KeyHelper; use crate::key_table::*; use crate::permission::BucketKeyPerm; -/// A `LockedHelper` is the mandatory struct to hold when doing operations +/// A LockedHelper is the mandatory struct to hold when doing operations /// that modify access keys or bucket aliases. This structure takes /// a lock to a unit value that is in the globally-shared Garage struct. /// /// This avoid several concurrent requests to modify the list of buckets /// and aliases at the same time, ending up in inconsistent states. -/// This DOES NOT FIX THE FUNDAMENTAL ISSUE as `CreateBucket` requests handled +/// This DOES NOT FIX THE FUNDAMENTAL ISSUE as CreateBucket requests handled /// by different API nodes can still break the cluster, but it is a first /// fix that allows consistency to be maintained if all such requests are /// directed to a single node, which is doable for many deployments. @@ -37,7 +37,7 @@ pub struct LockedHelper<'a>( impl<'a> Drop for LockedHelper<'a> { fn drop(&mut self) { // make it explicit that the mutexguard lives until here - drop(self.1.take()); + drop(self.1.take()) } } @@ -167,7 +167,7 @@ impl<'a> LockedHelper<'a> { } /// Ensures a bucket does not have a certain global alias. - /// Contrarily to `unset_global_bucket_alias`, this does not + /// Contrarily to unset_global_bucket_alias, this does not /// fail on any condition other than: /// - bucket cannot be found (its fine if it is in deleted state) /// - alias cannot be found (its fine if it points to nothing or @@ -335,7 +335,7 @@ impl<'a> LockedHelper<'a> { } /// Ensures a bucket does not have a certain local alias. - /// Contrarily to `unset_local_bucket_alias`, this does not + /// Contrarily to unset_local_bucket_alias, this does not /// fail on any condition other than: /// - bucket cannot be found (its fine if it is in deleted state) /// - key cannot be found (its fine if alias in key points to nothing diff --git a/src/model/index_counter.rs b/src/model/index_counter.rs index 50abdec3..aa13ee7b 100644 --- a/src/model/index_counter.rs +++ b/src/model/index_counter.rs @@ -84,16 +84,17 @@ impl Entry for CounterEntry { impl CounterEntry { pub fn filtered_values(&self, layout: &LayoutHelper) -> HashMap { - self.filtered_values_internal(layout.all_nongateway_nodes().ok()) + let nodes = layout.all_nongateway_nodes(); + self.filtered_values_with_nodes(&nodes) } - fn filtered_values_internal(&self, nodes_opt: Option<&[Uuid]>) -> HashMap { + pub fn filtered_values_with_nodes(&self, nodes: &[Uuid]) -> HashMap { let mut ret = HashMap::new(); for (name, vals) in self.values.iter() { let new_vals = vals .node_values .iter() - .filter(|(n, _)| nodes_opt.map(|nodes| nodes.contains(n)).unwrap_or(true)) + .filter(|(n, _)| nodes.contains(n)) .map(|(_, (_, v))| *v) .collect::>(); if !new_vals.is_empty() { @@ -152,7 +153,7 @@ impl TableSchema for CounterTable { } let is_tombstone = entry - .filtered_values_internal(Some(&filter.1[..])) + .filtered_values_with_nodes(&filter.1[..]) .iter() .all(|(_, v)| *v == 0); filter.0.apply(is_tombstone) diff --git a/src/model/k2v/causality.rs b/src/model/k2v/causality.rs index 654932bd..7d311ede 100644 --- a/src/model/k2v/causality.rs +++ b/src/model/k2v/causality.rs @@ -1,9 +1,9 @@ -//! Implements a `CausalContext`, which is a set of timestamps for each +//! Implements a CausalContext, which is a set of timestamps for each //! node -- a vector clock --, indicating that the versions with //! timestamps <= these numbers have been seen and can be //! overwritten by a subsequent write. //! -//! The textual representation of a `CausalContext`, which we call a +//! The textual representation of a CausalContext, which we call a //! "causality token", is used in the API and must be sent along with //! each write or delete operation to indicate the previously seen //! versions that we want to overwrite or delete. diff --git a/src/model/k2v/item_table.rs b/src/model/k2v/item_table.rs index 9378a1d1..9e3ba5a5 100644 --- a/src/model/k2v/item_table.rs +++ b/src/model/k2v/item_table.rs @@ -56,7 +56,7 @@ mod v08 { pub use v08::*; impl K2VItem { - /// Creates a new `K2VItem` when no previous entry existed in the db + /// Creates a new K2VItem when no previous entry existed in the db pub fn new(bucket_id: Uuid, partition_key: String, sort_key: String) -> Self { Self { partition: K2VItemPartition { @@ -67,7 +67,7 @@ impl K2VItem { items: BTreeMap::new(), } } - /// Updates a `K2VItem` with a new value or a deletion event + /// Updates a K2VItem with a new value or a deletion event pub fn update( &mut self, this_node: Uuid, diff --git a/src/model/k2v/rpc.rs b/src/model/k2v/rpc.rs index 8fcf8309..821f4549 100644 --- a/src/model/k2v/rpc.rs +++ b/src/model/k2v/rpc.rs @@ -126,7 +126,7 @@ impl K2VRpcHandler { .item_table .data .replication - .storage_nodes(&partition.hash())?; + .storage_nodes(&partition.hash()); who.sort(); self.system @@ -165,7 +165,7 @@ impl K2VRpcHandler { .item_table .data .replication - .storage_nodes(&partition.hash())?; + .storage_nodes(&partition.hash()); who.sort(); call_list.entry(who).or_default().push(InsertedItem { @@ -222,7 +222,7 @@ impl K2VRpcHandler { .item_table .data .replication - .storage_nodes(&poll_key.partition.hash())?; + .storage_nodes(&poll_key.partition.hash()); let rpc = self.system.rpc_helper().try_call_many( &self.endpoint, @@ -233,7 +233,7 @@ impl K2VRpcHandler { timeout_msec, }, RequestStrategy::with_priority(PRIO_NORMAL) - .with_quorum(self.item_table.data.replication.read_quorum()?) + .with_quorum(self.item_table.data.replication.read_quorum()) .send_all_at_once(true) .without_timeout(), ); @@ -283,8 +283,8 @@ impl K2VRpcHandler { .item_table .data .replication - .storage_nodes(&range.partition.hash())?; - let quorum = self.item_table.data.replication.read_quorum()?; + .storage_nodes(&range.partition.hash()); + let quorum = self.item_table.data.replication.read_quorum(); let msg = K2VRpc::PollRange { range, seen_str, @@ -451,7 +451,10 @@ impl K2VRpcHandler { let mut value = self .item_table - .get_local(&key.partition, &key.sort_key)? + .data + .read_entry(&key.partition, &key.sort_key)? + .map(|bytes| self.item_table.data.decode_entry(&bytes[..])) + .transpose()? .unwrap_or_else(|| { K2VItem::new( key.partition.bucket_id, diff --git a/src/model/k2v/seen.rs b/src/model/k2v/seen.rs index 9616fcb4..59d4ca5b 100644 --- a/src/model/k2v/seen.rs +++ b/src/model/k2v/seen.rs @@ -1,4 +1,4 @@ -//! Implements a `RangeSeenMarker`, a data type used in the `PollRange` API +//! Implements a RangeSeenMarker, a data type used in the PollRange API //! to indicate which items in the range have already been seen //! and which have not been seen yet. //! @@ -71,7 +71,7 @@ impl RangeSeenMarker { pub fn canonicalize(&mut self) { let self_vc = &self.vector_clock; - self.items.retain(|_sk, vc| vclock_gt(vc, self_vc)); + self.items.retain(|_sk, vc| vclock_gt(vc, self_vc)) } pub fn encode(&mut self) -> Result { diff --git a/src/model/key_table.rs b/src/model/key_table.rs index 72ca0182..efb95f08 100644 --- a/src/model/key_table.rs +++ b/src/model/key_table.rs @@ -2,7 +2,6 @@ use serde::{Deserialize, Serialize}; use garage_util::crdt::{self, Crdt}; use garage_util::data::*; -use garage_util::time::now_msec; use garage_table::{DeletedFilter, EmptyKey, Entry, TableSchema}; @@ -27,7 +26,7 @@ mod v08 { /// Configuration for a key #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] pub struct KeyParams { - /// The `secret_key` associated (immutable) + /// The secret_key associated (immutable) pub secret_key: String, /// Name for the key @@ -49,82 +48,13 @@ mod v08 { impl garage_util::migrate::InitialFormat for Key {} } -mod v2 { - use crate::permission::BucketKeyPerm; - use garage_util::crdt; - use garage_util::data::Uuid; - use serde::{Deserialize, Serialize}; - - use super::v08; - - /// An api key - #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] - pub struct Key { - /// The id of the key (immutable), used as partition key - pub key_id: String, - - /// Internal state of the key - pub state: crdt::Deletable, - } - - /// Configuration for a key - #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] - pub struct KeyParams { - /// Key's creation date, if known (older versions of Garage didn't keep track - /// of this information) - pub created: Option, - /// The `secret_key` associated (immutable) - pub secret_key: String, - - /// Name for the key - pub name: crdt::Lww, - /// The optional time of expiration of the key - pub expiration: crdt::Lww>, - - /// Flag to allow users having this key to create buckets - pub allow_create_bucket: crdt::Lww, - - /// If the key is present: it gives some permissions, - /// a map of bucket IDs (uuids) to permissions. - /// Otherwise no permissions are granted to key - pub authorized_buckets: crdt::Map, - - /// A key can have a local view of buckets names it is - /// the only one to see, this is the namespace for these aliases - pub local_aliases: crdt::LwwMap>, - } - - impl garage_util::migrate::Migrate for Key { - const VERSION_MARKER: &'static [u8] = b"G2key"; - - type Previous = v08::Key; - - fn migrate(old: v08::Key) -> Key { - Key { - key_id: old.key_id, - state: old.state.map(|x| KeyParams { - created: None, - secret_key: x.secret_key, - name: x.name, - expiration: crdt::Lww::raw(0, None), - allow_create_bucket: x.allow_create_bucket, - authorized_buckets: x.authorized_buckets, - local_aliases: x.local_aliases, - }), - } - } - } -} - -pub use v2::*; +pub use v08::*; impl KeyParams { fn new(secret_key: &str, name: &str) -> Self { KeyParams { - created: Some(now_msec()), secret_key: secret_key.to_string(), name: crdt::Lww::new(name.to_string()), - expiration: crdt::Lww::new(None), allow_create_bucket: crdt::Lww::new(false), authorized_buckets: crdt::Map::new(), local_aliases: crdt::LwwMap::new(), @@ -135,7 +65,6 @@ impl KeyParams { impl Crdt for KeyParams { fn merge(&mut self, o: &Self) { self.name.merge(&o.name); - self.expiration.merge(&o.expiration); self.allow_create_bucket.merge(&o.allow_create_bucket); self.authorized_buckets.merge(&o.authorized_buckets); self.local_aliases.merge(&o.local_aliases); @@ -155,23 +84,12 @@ impl Key { /// Import a key from it's parts pub fn import(key_id: &str, secret_key: &str, name: &str) -> Result { - if key_id.len() < 8 { - return Err("Key identifiers should be at least 8 characters long"); + if key_id.len() != 26 || &key_id[..2] != "GK" || hex::decode(&key_id[2..]).is_err() { + return Err("The specified key ID is not a valid Garage key ID (starts with `GK`, followed by 12 hex-encoded bytes)"); } - if !key_id - .chars() - .all(|c| c.is_ascii_alphanumeric() || "-_.".contains(c)) - { - return Err("Key identifiers should be composed only of ASCII alphanumeric characters and characters '-', '_' and '.'"); - } - - if secret_key.len() < 16 { - return Err("Secret keys should be at least 16 characters long"); - } - - if !secret_key.chars().all(|c| c.is_ascii_graphic()) { - return Err("Secret keys should be composed only of graphic ASCII characters (U+0021 to U+007E)"); + if secret_key.len() != 64 || hex::decode(&secret_key).is_err() { + return Err("The specified secret key is not a valid Garage secret key (composed of 32 hex-encoded bytes)"); } Ok(Self { @@ -227,15 +145,6 @@ impl Key { } } -impl KeyParams { - pub fn is_expired(&self, ts_now: u64) -> bool { - match *self.expiration.get() { - None => false, - Some(exp) => ts_now >= exp, - } - } -} - impl Entry for Key { fn partition_key(&self) -> &EmptyKey { &EmptyKey diff --git a/src/model/lib.rs b/src/model/lib.rs index b4dc1e81..1939a7a9 100644 --- a/src/model/lib.rs +++ b/src/model/lib.rs @@ -5,7 +5,6 @@ pub mod permission; pub mod index_counter; -pub mod admin_token_table; pub mod bucket_alias_table; pub mod bucket_table; pub mod key_table; diff --git a/src/model/s3/block_ref_table.rs b/src/model/s3/block_ref_table.rs index 67db109e..57eb7b16 100644 --- a/src/model/s3/block_ref_table.rs +++ b/src/model/s3/block_ref_table.rs @@ -98,7 +98,7 @@ pub fn block_ref_recount_fn( .upgrade() .ok_or_message("cannot upgrade weak ptr to block_ref_table") .map_err(db::TxError::Abort)?; - calculate_refcount(&table, tx, block) + Ok(calculate_refcount(&table, tx, block)?) }) } diff --git a/src/model/s3/lifecycle_worker.rs b/src/model/s3/lifecycle_worker.rs index f014337f..af00437e 100644 --- a/src/model/s3/lifecycle_worker.rs +++ b/src/model/s3/lifecycle_worker.rs @@ -41,7 +41,6 @@ pub struct LifecycleWorker { persister: PersisterShared, } -#[expect(clippy::large_enum_variant)] enum State { Completed(NaiveDate), Running { @@ -65,11 +64,7 @@ pub fn register_bg_vars( vars: &mut vars::BgVars, ) { vars.register_ro(persister, "lifecycle-last-completed", |p| { - p.get_with(|x| { - x.last_completed - .clone() - .unwrap_or_else(|| "never".to_string()) - }) + p.get_with(|x| x.last_completed.clone().unwrap_or("never".to_string())) }); } @@ -373,7 +368,6 @@ async fn process_object( Ok(Skip::NextObject) } -#[expect(clippy::nonminimal_bool)] fn check_size_filter(version_data: &ObjectVersionData, filter: &LifecycleFilter) -> bool { let size = match version_data { ObjectVersionData::Inline(meta, _) | ObjectVersionData::FirstBlock(meta, _) => meta.size, diff --git a/src/model/s3/mpu_table.rs b/src/model/s3/mpu_table.rs index 52734d27..c9f79caf 100644 --- a/src/model/s3/mpu_table.rs +++ b/src/model/s3/mpu_table.rs @@ -31,12 +31,12 @@ mod v09 { /// The timestamp at which the multipart upload was created pub timestamp: u64, /// Is this multipart upload deleted - /// The `MultipartUpload` is marked as deleted as soon as the + /// The MultipartUpload is marked as deleted as soon as the /// multipart upload is either completed or aborted pub deleted: crdt::Bool, /// List of uploaded parts, key = (part number, timestamp) /// In case of retries, all versions for each part are kept - /// Everything is cleaned up only once the `MultipartUpload` is marked deleted + /// Everything is cleaned up only once the MultipartUpload is marked deleted pub parts: crdt::Map, // Back link to bucket+key so that we can find the object this mpu @@ -58,9 +58,9 @@ mod v09 { /// The version of an uploaded part #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] pub struct MpuPart { - /// Links to a Version in `VersionTable` + /// Links to a Version in VersionTable pub version: Uuid, - /// `ETag` of the content of this part (known only once done uploading) + /// ETag of the content of this part (known only once done uploading) pub etag: Option, /// Checksum requested by x-amz-checksum-algorithm #[serde(default)] @@ -160,8 +160,8 @@ impl Crdt for MpuPart { (x, _) => x, }; self.checksum = match (self.checksum.take(), &other.checksum) { - (None, Some(_)) => other.checksum, - (Some(x), Some(y)) if x < *y => other.checksum, + (None, Some(_)) => other.checksum.clone(), + (Some(x), Some(y)) if x < *y => other.checksum.clone(), (x, _) => x, }; } diff --git a/src/model/s3/object_table.rs b/src/model/s3/object_table.rs index aff4bbfd..6c33b79b 100644 --- a/src/model/s3/object_table.rs +++ b/src/model/s3/object_table.rs @@ -249,7 +249,7 @@ mod v010 { #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] pub enum ObjectVersionEncryption { SseC { - /// Encrypted serialized `ObjectVersionInner` struct. + /// Encrypted serialized ObjectVersionInner struct. /// This is never compressed, just encrypted using AES256-GCM. #[serde(with = "serde_bytes")] inner: Vec, @@ -257,13 +257,6 @@ mod v010 { /// (compression happens before encryption, whereas for non-encrypted /// objects, compression is handled at the level of the block manager) compressed: bool, - /// Whether the encryption uses an Object Encryption Key derived - /// from the master SSE-C key, instead of the master SSE-C key itself. - /// This is the case of objects created in Garage v2+. - /// This field is kept for compatibility with Garage v2.0.0-beta1, - /// which did not yet implement the v2 module below. - #[serde(default)] - use_oek: bool, }, Plaintext { /// Plain-text headers @@ -284,7 +277,6 @@ mod v010 { pub enum ChecksumAlgorithm { Crc32, Crc32c, - Crc64Nvme, Sha1, Sha256, } @@ -294,7 +286,6 @@ mod v010 { pub enum ChecksumValue { Crc32(#[serde(with = "serde_bytes")] [u8; 4]), Crc32c(#[serde(with = "serde_bytes")] [u8; 4]), - Crc64Nvme(#[serde(with = "serde_bytes")] [u8; 8]), Sha1(#[serde(with = "serde_bytes")] [u8; 20]), Sha256(#[serde(with = "serde_bytes")] [u8; 32]), } @@ -380,222 +371,7 @@ mod v010 { } } -mod v2 { - use garage_util::data::{Hash, Uuid}; - use garage_util::migrate::Migrate; - use serde::{Deserialize, Serialize}; - - use super::v010; - pub use v010::{ChecksumAlgorithm, ChecksumValue}; - - /// An object - #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] - pub struct Object { - /// The bucket in which the object is stored, used as partition key - pub bucket_id: Uuid, - - /// The key at which the object is stored in its bucket, used as sorting key - pub key: String, - - /// The list of currently stored versions of the object - pub(super) versions: Vec, - } - - /// Information about a version of an object - #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] - pub struct ObjectVersion { - /// Id of the version - pub uuid: Uuid, - /// Timestamp of when the object was created - pub timestamp: u64, - /// State of the version - pub state: ObjectVersionState, - } - - /// State of an object version - #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] - pub enum ObjectVersionState { - /// The version is being received - Uploading { - /// Indicates whether this is a multipart upload - multipart: bool, - /// Checksum algorithm and algorithm type to use - checksum_algorithm: Option<(ChecksumAlgorithm, ChecksumType)>, - /// Encryption params + headers to be included in the final object - encryption: ObjectVersionEncryption, - }, - /// The version is fully received - Complete(ObjectVersionData), - /// The version uploaded containded errors or the upload was explicitly aborted - Aborted, - } - - /// Data stored in object version - #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] - pub enum ObjectVersionData { - /// The object was deleted, this Version is a tombstone to mark it as such - DeleteMarker, - /// The object is short, it's stored inlined. - /// It is never compressed. For encrypted objects, it is encrypted using - /// AES256-GCM, like the encrypted headers. - Inline(ObjectVersionMeta, #[serde(with = "serde_bytes")] Vec), - /// The object is not short, Hash of first block is stored here, next segments hashes are - /// stored in the version table - FirstBlock(ObjectVersionMeta, Hash), - } - - /// Metadata about the object version - #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] - pub struct ObjectVersionMeta { - /// Size of the object. If object is encrypted/compressed, - /// this is always the size of the unencrypted/uncompressed data - pub size: u64, - /// etag of the object - pub etag: String, - /// Encryption params + headers (encrypted or plaintext) - pub encryption: ObjectVersionEncryption, - } - - /// Encryption information + metadata - #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] - pub enum ObjectVersionEncryption { - SseC { - /// Encrypted serialized `ObjectVersionInner` struct. - /// This is never compressed, just encrypted using AES256-GCM. - #[serde(with = "serde_bytes")] - inner: Vec, - /// Whether data blocks are compressed in addition to being encrypted - /// (compression happens before encryption, whereas for non-encrypted - /// objects, compression is handled at the level of the block manager) - compressed: bool, - /// Whether the encryption uses an Object Encryption Key derived - /// from the master SSE-C key, instead of the master SSE-C key itself. - /// This is the case of objects created in Garage v2+ - use_oek: bool, - }, - Plaintext { - /// Plain-text headers - inner: ObjectVersionMetaInner, - }, - } - - /// Vector of headers, as tuples of the format (header name, header value) - /// Note: checksum can be Some(_) with `checksum_type` = None for objects that - /// have been migrated from Garage version before v2.0, as the distinction between - /// full-object and composite checksums was not implemented yet. - #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] - pub struct ObjectVersionMetaInner { - pub headers: HeaderList, - pub checksum: Option, - // checksum_type has to be stored separately, because when migrating - // from older versions of Garage, we can't know the correct value in - // ObjectVersionMetaInner::migrate (because it cannot take an argument - // that says whether the object was multipart or not) - pub checksum_type: Option, - } - - pub type HeaderList = Vec<(String, String)>; - - #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug, Serialize, Deserialize)] - pub enum ChecksumType { - FullObject, - Composite, - } - - impl garage_util::migrate::Migrate for Object { - const VERSION_MARKER: &'static [u8] = b"G2s3ob"; - - type Previous = v010::Object; - - fn migrate(old: v010::Object) -> Object { - Object { - bucket_id: old.bucket_id, - key: old.key, - versions: old.versions.into_iter().map(migrate_version).collect(), - } - } - } - - fn migrate_version(old: v010::ObjectVersion) -> ObjectVersion { - ObjectVersion { - uuid: old.uuid, - timestamp: old.timestamp, - state: match old.state { - v010::ObjectVersionState::Uploading { - multipart, - checksum_algorithm, - encryption, - } => ObjectVersionState::Uploading { - multipart, - checksum_algorithm: checksum_algorithm.map(|algo| match multipart { - false => (algo, ChecksumType::FullObject), - true => (algo, ChecksumType::Composite), - }), - encryption: migrate_encryption(encryption), - }, - v010::ObjectVersionState::Complete(d) => { - ObjectVersionState::Complete(migrate_data(d)) - } - v010::ObjectVersionState::Aborted => ObjectVersionState::Aborted, - }, - } - } - - fn migrate_data(old: v010::ObjectVersionData) -> ObjectVersionData { - match old { - v010::ObjectVersionData::DeleteMarker => ObjectVersionData::DeleteMarker, - v010::ObjectVersionData::Inline(meta, data) => { - ObjectVersionData::Inline(migrate_meta(meta), data) - } - v010::ObjectVersionData::FirstBlock(meta, fb) => { - ObjectVersionData::FirstBlock(migrate_meta(meta), fb) - } - } - } - - fn migrate_meta(old: v010::ObjectVersionMeta) -> ObjectVersionMeta { - ObjectVersionMeta { - size: old.size, - etag: old.etag, - encryption: migrate_encryption(old.encryption), - } - } - - fn migrate_encryption(old: v010::ObjectVersionEncryption) -> ObjectVersionEncryption { - match old { - v010::ObjectVersionEncryption::SseC { - inner, - compressed, - use_oek, - } => ObjectVersionEncryption::SseC { - inner, - compressed, - use_oek, - }, - v010::ObjectVersionEncryption::Plaintext { inner } => { - ObjectVersionEncryption::Plaintext { - inner: ObjectVersionMetaInner::migrate(inner), - } - } - } - } - - impl Migrate for ObjectVersionMetaInner { - const VERSION_MARKER: &'static [u8] = b"G2s3om"; - - type Previous = v010::ObjectVersionMetaInner; - - fn migrate(old: v010::ObjectVersionMetaInner) -> ObjectVersionMetaInner { - ObjectVersionMetaInner { - headers: old.headers, - checksum: old.checksum, - checksum_type: None, - } - } - } -} - -pub use v2::*; +pub use v010::*; impl Object { /// Initialize an Object struct from parts @@ -665,9 +441,9 @@ impl ObjectVersion { /// Is the object version currently being uploaded /// - /// matches only multipart uploads if `check_multipart` is Some(true) - /// matches only non-multipart uploads if `check_multipart` is Some(false) - /// matches both if `check_multipart` is None + /// matches only multipart uploads if check_multipart is Some(true) + /// matches only non-multipart uploads if check_multipart is Some(false) + /// matches both if check_multipart is None pub fn is_uploading(&self, check_multipart: Option) -> bool { match &self.state { ObjectVersionState::Uploading { multipart, .. } => { @@ -711,7 +487,6 @@ impl ChecksumValue { match self { ChecksumValue::Crc32(_) => ChecksumAlgorithm::Crc32, ChecksumValue::Crc32c(_) => ChecksumAlgorithm::Crc32c, - ChecksumValue::Crc64Nvme(_) => ChecksumAlgorithm::Crc64Nvme, ChecksumValue::Sha1(_) => ChecksumAlgorithm::Sha1, ChecksumValue::Sha256(_) => ChecksumAlgorithm::Sha256, } @@ -763,9 +538,9 @@ pub enum ObjectFilter { IsData, /// Is the object version currently being uploaded /// - /// matches only multipart uploads if `check_multipart` is Some(true) - /// matches only non-multipart uploads if `check_multipart` is Some(false) - /// matches both if `check_multipart` is None + /// matches only multipart uploads if check_multipart is Some(true) + /// matches only non-multipart uploads if check_multipart is Some(false) + /// matches both if check_multipart is None IsUploading { check_multipart: Option }, } diff --git a/src/model/snapshot.rs b/src/model/snapshot.rs index d09e8e4a..8e8995f9 100644 --- a/src/model/snapshot.rs +++ b/src/model/snapshot.rs @@ -1,5 +1,5 @@ use std::fs; -use std::path::Path; +use std::path::PathBuf; use std::sync::Arc; use std::sync::Mutex; use std::time::{Duration, Instant}; @@ -20,7 +20,7 @@ static SNAPSHOT_MUTEX: Mutex<()> = Mutex::new(()); // ================ snapshotting logic ===================== -/// Run `snapshot_metadata` in a blocking thread and async await on it +/// Run snapshot_metadata in a blocking thread and async await on it pub async fn async_snapshot_metadata(garage: &Arc) -> Result<(), Error> { let garage = garage.clone(); let worker = tokio::task::spawn_blocking(move || snapshot_metadata(&garage)); @@ -67,9 +67,9 @@ pub fn snapshot_metadata(garage: &Garage) -> Result<(), Error> { Ok(()) } -fn cleanup_snapshots(snapshots_dir: &Path) -> Result<(), Error> { +fn cleanup_snapshots(snapshots_dir: &PathBuf) -> Result<(), Error> { let mut snapshots = - fs::read_dir(snapshots_dir)?.collect::, std::io::Error>>()?; + fs::read_dir(&snapshots_dir)?.collect::, std::io::Error>>()?; snapshots.retain(|x| x.file_name().len() > 8); snapshots.sort_by_key(|x| x.file_name()); @@ -130,7 +130,7 @@ impl Worker for AutoSnapshotWorker { async_snapshot_metadata(&self.garage).await?; - let rand_factor = 1f32 + rand::rng().random::() / 5f32; + let rand_factor = 1f32 + thread_rng().gen::() / 5f32; self.next_snapshot = Instant::now() + self.snapshot_interval.mul_f32(rand_factor); Ok(WorkerState::Idle) diff --git a/src/net/Cargo.toml b/src/net/Cargo.toml index c2182a5d..71f42c68 100644 --- a/src/net/Cargo.toml +++ b/src/net/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_net" -version = "2.3.0" +version = "1.3.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -27,7 +27,6 @@ rmp-serde.workspace = true hex.workspace = true rand.workspace = true -socket2.workspace = true log.workspace = true arc-swap.workspace = true @@ -43,6 +42,3 @@ opentelemetry-contrib = { workspace = true, optional = true } [dev-dependencies] pretty_env_logger.workspace = true - -[lints] -workspace = true diff --git a/src/net/bytes_buf.rs b/src/net/bytes_buf.rs index a3a5ca34..1d928ffb 100644 --- a/src/net/bytes_buf.rs +++ b/src/net/bytes_buf.rs @@ -17,7 +17,7 @@ pub struct BytesBuf { } impl BytesBuf { - /// Creates a new empty `BytesBuf` + /// Creates a new empty BytesBuf pub fn new() -> Self { Self { buf: VecDeque::new(), @@ -25,13 +25,13 @@ impl BytesBuf { } } - /// Returns the number of bytes stored in the `BytesBuf` + /// Returns the number of bytes stored in the BytesBuf #[inline] pub fn len(&self) -> usize { self.buf_len } - /// Returns true iff the `BytesBuf` contains zero bytes + /// Returns true iff the BytesBuf contains zero bytes #[inline] pub fn is_empty(&self) -> bool { self.buf_len == 0 @@ -63,7 +63,7 @@ impl BytesBuf { } } - /// Takes at most `max_len` bytes from the left of the buffer + /// Takes at most max_len bytes from the left of the buffer pub fn take_max(&mut self, max_len: usize) -> Bytes { if self.buf_len <= max_len { self.take_all() @@ -73,7 +73,7 @@ impl BytesBuf { } /// Take exactly len bytes from the left of the buffer, returns None if - /// the `BytesBuf` doesn't contain enough data + /// the BytesBuf doesn't contain enough data pub fn take_exact(&mut self, len: usize) -> Option { if self.buf_len < len { None @@ -130,7 +130,7 @@ impl BytesBuf { /// Return the content as a stream of individual chunks pub fn into_stream(self) -> ByteStream { use futures::stream::StreamExt; - Box::pin(futures::stream::iter(self.buf).map(Ok)) + Box::pin(futures::stream::iter(self.buf).map(|x| Ok(x))) } } @@ -161,6 +161,7 @@ mod test { #[test] fn test_bytes_buf() { let mut buf = BytesBuf::new(); + assert!(buf.len() == 0); assert!(buf.is_empty()); buf.extend(Bytes::from(b"Hello, world!".to_vec())); @@ -175,6 +176,7 @@ mod test { buf.take_all(), Bytes::from(b"Hello, world!1234567890".to_vec()) ); + assert!(buf.len() == 0); assert!(buf.is_empty()); buf.extend(Bytes::from(b"1234567890".to_vec())); @@ -191,6 +193,7 @@ mod test { buf.take_exact(11), Some(Bytes::from(b"llo, world!".to_vec())) ); + assert!(buf.len() == 0); assert!(buf.is_empty()); } } diff --git a/src/net/client.rs b/src/net/client.rs index 5834b4ab..20e1dacd 100644 --- a/src/net/client.rs +++ b/src/net/client.rs @@ -180,7 +180,8 @@ impl ClientConn { "Too many inflight requests! RequestID collision. Interrupting previous request." ); let _ = old_ch.send(Box::pin(futures::stream::once(async move { - Err(std::io::Error::other( + Err(std::io::Error::new( + std::io::ErrorKind::Other, "RequestID collision, too many inflight requests", )) }))); @@ -277,7 +278,7 @@ impl Stream for CancelOnDropStream { let res = this.stream.poll_next(cx); if matches!(res, Poll::Ready(None)) { if let Some(c) = this.cancel.take() { - std::mem::forget(c); + std::mem::forget(c) } } res diff --git a/src/net/endpoint.rs b/src/net/endpoint.rs index af484231..3ab1048a 100644 --- a/src/net/endpoint.rs +++ b/src/net/endpoint.rs @@ -87,7 +87,7 @@ where { pub(crate) fn new(netapp: Arc, path: String) -> Self { Self { - _phantom: PhantomData, + _phantom: PhantomData::default(), netapp, path, handler: ArcSwapOption::from(None), diff --git a/src/net/error.rs b/src/net/error.rs index 7010d51e..f67794ed 100644 --- a/src/net/error.rs +++ b/src/net/error.rs @@ -72,7 +72,7 @@ where fn log_err(self, msg: &'static str) { if let Err(e) = self { error!("Error: {}: {}", msg, Into::::into(e)); - } + }; } } diff --git a/src/net/lib.rs b/src/net/lib.rs index 47cac5d6..8e30e40f 100644 --- a/src/net/lib.rs +++ b/src/net/lib.rs @@ -10,7 +10,7 @@ //! //! Of particular interest, read the documentation for the `netapp::NetApp` type, //! the `message::Message` trait, and `proto::RequestPriority` to learn more -//! about message prioritization. +//! about message priorization. //! Also check out the examples to learn how to use this crate. pub mod bytes_buf; diff --git a/src/net/message.rs b/src/net/message.rs index 6774520f..59afb058 100644 --- a/src/net/message.rs +++ b/src/net/message.rs @@ -14,7 +14,7 @@ use crate::util::*; /// Priority of a request (click to read more about priorities). /// -/// This priority value is used to prioritize messages +/// This priority value is used to priorize messages /// in the send queue of the client, and their responses in the send queue of the /// server. Lower values mean higher priority. /// @@ -78,7 +78,7 @@ impl OrderTag { /// let tag_2 = stream.order(2); /// ``` pub fn stream() -> OrderTagStream { - OrderTagStream(rand::rng().random()) + OrderTagStream(thread_rng().gen()) } } impl OrderTagStream { @@ -100,10 +100,10 @@ pub trait Message: Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static // ---- -/// The `Req` is a helper object used to create requests and attach them -/// a stream of data. If the stream is a fixed Bytes and not a `ByteStream`, -/// `Req` is cheaply cloneable to allow the request to be sent to different -/// peers (Clone will panic if the stream is a `ByteStream`). +/// The Req is a helper object used to create requests and attach them +/// a stream of data. If the stream is a fixed Bytes and not a ByteStream, +/// Req is cheaply cloneable to allow the request to be sent to different +/// peers (Clone will panic if the stream is a ByteStream). pub struct Req { pub(crate) msg: Arc, pub(crate) msg_ser: Option, @@ -260,7 +260,7 @@ where // ---- -/// The `Resp` represents a full response from a RPC that may have +/// The Resp represents a full response from a RPC that may have /// an attached stream. pub struct Resp { pub(crate) _phantom: PhantomData, @@ -382,7 +382,7 @@ impl AttachedStream { // ---- ---- -/// Encoding for requests into a `ByteStream`: +/// Encoding for requests into a ByteStream: /// - priority: u8 /// - path length: u8 /// - path: [u8; path length] @@ -457,18 +457,16 @@ impl ReqEnc { } } -/// Encoding for responses into a `ByteStream`: -/// +/// Encoding for responses into a ByteStream: /// IF SUCCESS: /// - 0: u8 /// - msg len: u32 /// - msg [u8; ..] /// - the attached stream as the rest of the encoded stream -/// /// IF ERROR: /// - message length + 1: u8 /// - error code: u8 -/// - message: [u8; `message_length`] +/// - message: [u8; message_length] pub(crate) struct RespEnc { msg: Bytes, stream: Option, @@ -495,7 +493,10 @@ impl RespEnc { (res_stream, order_tag) } Err(err) => { - let err = std::io::Error::other(format!("netapp error: {}", err)); + let err = std::io::Error::new( + std::io::ErrorKind::Other, + format!("netapp error: {}", err), + ); ( Box::pin(futures::stream::once(async move { Err(err) })), None, diff --git a/src/net/netapp.rs b/src/net/netapp.rs index 4a53f1d1..36c6fc88 100644 --- a/src/net/netapp.rs +++ b/src/net/netapp.rs @@ -1,7 +1,6 @@ use std::collections::HashMap; use std::net::{IpAddr, SocketAddr}; use std::sync::{Arc, RwLock}; -use std::time::Duration; use log::{debug, error, info, trace, warn}; @@ -35,30 +34,12 @@ pub type NetworkKey = sodiumoxide::crypto::auth::Key; /// composed of 8 bytes for Netapp version and 8 bytes for client version pub(crate) type VersionTag = [u8; 16]; -/// Value of `garage_net` version used in the version tag -/// We are no longer using prefix `netapp` as `garage_net` is forked from the netapp crate. -/// Since Garage v1.0, we have replaced the prefix by `grgnet` (shorthand for `garage_net`). +/// Value of garage_net version used in the version tag +/// We are no longer using prefix `netapp` as garage_net is forked from the netapp crate. +/// Since Garage v1.0, we have replaced the prefix by `grgnet` (shorthand for garage_net). pub(crate) const NETAPP_VERSION_TAG: u64 = 0x6772676e65740010; // grgnet 0x0010 (1.0) -/// Time a connection must be idle before the first keepalive probe is sent. -const TCP_KEEPALIVE_TIME: Duration = Duration::from_secs(30); -/// Interval between keepalive probes after the first. -const TCP_KEEPALIVE_INTERVAL: Duration = Duration::from_secs(10); - -/// Timeout for outgoing TCP connection attempts. -/// Caps per-address connection time instead of relying on the kernel's -/// TCP SYN timeout (75-130s on Linux, ~20s on macOS). -const CONNECT_TIMEOUT: Duration = Duration::from_secs(10); - -fn set_keepalive(stream: &TcpStream) -> Result<(), std::io::Error> { - let sock_ref = socket2::SockRef::from(stream); - let keepalive = socket2::TcpKeepalive::new() - .with_time(TCP_KEEPALIVE_TIME) - .with_interval(TCP_KEEPALIVE_INTERVAL); - sock_ref.set_tcp_keepalive(&keepalive) -} - -/// `HelloMessage` is sent by the client on a Netapp connection to indicate +/// HelloMessage is sent by the client on a Netapp connection to indicate /// that they are also a server and ready to receive incoming connections /// at the specified address and port. If the client doesn't know their /// public address, they don't need to specify it and we look at the @@ -76,9 +57,9 @@ impl Message for HelloMessage { type OnConnectHandler = Box; type OnDisconnectHandler = Box; -/// `NetApp` is the main class that handles incoming and outgoing connections. +/// NetApp is the main class that handles incoming and outgoing connections. /// -/// `NetApp` can be used in a stand-alone fashion or together with a peering strategy. +/// NetApp can be used in a stand-alone fashion or together with a peering strategy. /// If using it alone, you will want to set `on_connect` and `on_disconnect` events /// in order to manage information about the current peer list. pub struct NetApp { @@ -110,7 +91,7 @@ struct ListenParams { } impl NetApp { - /// Creates a new instance of `NetApp`, which can serve either as a full p2p node, + /// Creates a new instance of NetApp, which can serve either as a full p2p node, /// or just as a passive client. To upgrade to a full p2p node, spawn a listener /// using `.listen()` /// @@ -199,13 +180,13 @@ impl NetApp { .is_some() { panic!("Redefining endpoint: {}", path); - } + }; endpoint } /// Main listening process for our app. This future runs during the whole /// run time of our application. - /// If this is not called, the `NetApp` instance remains a passive client. + /// If this is not called, the NetApp instance remains a passive client. pub async fn listen( self: Arc, listen_addr: SocketAddr, @@ -271,14 +252,7 @@ impl NetApp { _ = must_exit.changed() => continue, }; - if let Err(e) = set_keepalive(&socket) { - warn!( - "Failed to set keepalive on connection from {}: {}", - peer_addr, e - ); - } - - debug!( + info!( "Incoming connection from {}, negotiating handshake...", peer_addr ); @@ -336,17 +310,10 @@ impl NetApp { TcpSocket::new_v6()? }; socket.bind(SocketAddr::new(addr, 0))?; - tokio::time::timeout(CONNECT_TIMEOUT, socket.connect(ip)) - .await - .map_err(|_| Error::Message(format!("connect to {} timed out", ip)))?? + socket.connect(ip).await? } - None => tokio::time::timeout(CONNECT_TIMEOUT, TcpStream::connect(ip)) - .await - .map_err(|_| Error::Message(format!("connect to {} timed out", ip)))??, + None => TcpStream::connect(ip).await?, }; - if let Err(e) = set_keepalive(&stream) { - warn!("Failed to set keepalive on connection to {}: {}", ip, e); - } info!("Connected to {}, negotiating handshake...", ip); ClientConn::init(self, stream, id).await?; Ok(()) diff --git a/src/net/peering.rs b/src/net/peering.rs index b96b6f52..08378a08 100644 --- a/src/net/peering.rs +++ b/src/net/peering.rs @@ -6,7 +6,6 @@ use std::time::{Duration, Instant}; use arc_swap::ArcSwap; use log::{debug, info, trace, warn}; -use rand::seq::SliceRandom; use serde::{Deserialize, Serialize}; use tokio::select; @@ -28,16 +27,6 @@ const LOOP_DELAY: Duration = Duration::from_secs(1); const FAILED_PING_THRESHOLD: usize = 4; const DEFAULT_PING_TIMEOUT_MILLIS: u64 = 10_000; -const ADDR_MAX_CONSECUTIVE_FAILURES: usize = 3; -const KEEP_MAX_ADDRS: usize = 5; - -/// A known address for a peer, with connection status tracking. -#[derive(Debug, Clone)] -struct KnownAddr { - addr: SocketAddr, - last_success: Option, - consecutive_failures: usize, -} // -- Protocol messages -- @@ -64,8 +53,8 @@ impl Message for PeerListMessage { #[derive(Debug)] struct PeerInfoInternal { - /// Per-address connection tracking: success history and failure counts - known_addrs: Vec, + // known_addrs contains all of the addresses everyone gave us + known_addrs: Vec, state: PeerConnState, last_send_ping: Option, @@ -77,15 +66,7 @@ struct PeerInfoInternal { impl PeerInfoInternal { fn new(state: PeerConnState, known_addr: Option) -> Self { Self { - known_addrs: known_addr - .map(|addr| { - vec![KnownAddr { - addr, - last_success: None, - consecutive_failures: 0, - }] - }) - .unwrap_or_default(), + known_addrs: known_addr.map(|x| vec![x]).unwrap_or_default(), state, last_send_ping: None, last_seen: None, @@ -94,27 +75,20 @@ impl PeerInfoInternal { } } fn add_addr(&mut self, addr: SocketAddr) -> bool { - if let Some(ka) = self.known_addrs.iter_mut().find(|ka| ka.addr == addr) { - // Reset failure count when an address is re-advertised (via gossip - // or incoming connection), since it may have become reachable again. - ka.consecutive_failures = 0; - false - } else { - self.known_addrs.push(KnownAddr { - addr, - last_success: None, - consecutive_failures: 0, - }); + if !self.known_addrs.contains(&addr) { + self.known_addrs.push(addr); // If we are learning a new address for this node, // we want to retry connecting self.state = match self.state { PeerConnState::Trying(_) => PeerConnState::Trying(0), - PeerConnState::Waiting(_, _) | PeerConnState::Abandoned => { + PeerConnState::Waiting(_, _) | PeerConnState::Abandonned => { PeerConnState::Waiting(0, Instant::now()) } x @ (PeerConnState::Ourself | PeerConnState::Connected { .. }) => x, }; true + } else { + false } } } @@ -145,7 +119,7 @@ impl PeerInfo { } } -/// `PeerConnState`: possible states for our tentative connections to given peer +/// PeerConnState: possible states for our tentative connections to given peer /// This structure is only interested in recording connection info for outgoing /// TCP connections #[derive(Copy, Clone, Debug, PartialEq, Eq)] @@ -164,7 +138,7 @@ pub enum PeerConnState { Trying(usize), /// We abandoned trying to connect to this peer (too many failed attempts) - Abandoned, + Abandonned, } impl PeerConnState { @@ -262,7 +236,7 @@ impl PeeringManager { ); known_hosts.update_hash(); - let strategy = Arc::new(Self { + let strat = Arc::new(Self { netapp: netapp.clone(), known_hosts: RwLock::new(known_hosts), public_peer_list: ArcSwap::new(Arc::new(Vec::new())), @@ -272,22 +246,22 @@ impl PeeringManager { ping_timeout_millis: DEFAULT_PING_TIMEOUT_MILLIS.into(), }); - strategy.update_public_peer_list(&strategy.known_hosts.read().unwrap()); + strat.update_public_peer_list(&strat.known_hosts.read().unwrap()); - strategy.ping_endpoint.set_handler(strategy.clone()); - strategy.peer_list_endpoint.set_handler(strategy.clone()); + strat.ping_endpoint.set_handler(strat.clone()); + strat.peer_list_endpoint.set_handler(strat.clone()); - let strategy2 = strategy.clone(); + let strat2 = strat.clone(); netapp.on_connected(move |id: NodeID, addr: SocketAddr, is_incoming: bool| { - strategy2.on_connected(id, addr, is_incoming); + strat2.on_connected(id, addr, is_incoming); }); - let strategy2 = strategy.clone(); + let strat2 = strat.clone(); netapp.on_disconnected(move |id: NodeID, is_incoming: bool| { - strategy2.on_disconnected(id, is_incoming); + strat2.on_disconnected(id, is_incoming); }); - strategy + strat } /// Run the full mesh peering strategy. @@ -349,7 +323,7 @@ impl PeeringManager { hex::encode(&id[..8]), h.known_addrs .iter() - .map(|ka| ka.addr.to_string()) + .map(|x| format!("{}", x)) .collect::>() .join(", "), i + 1 @@ -515,43 +489,23 @@ impl PeeringManager { } } - async fn try_connect(self: Arc, id: NodeID, mut addresses: Vec) { - // Sort addresses: most recently successful first, then shuffle addresses that - // were never successful so that they all get a fair chance. - addresses.sort_by(|a, b| match (a.last_success, b.last_success) { - (Some(ta), Some(tb)) => tb.cmp(&ta), - (Some(_), None) => std::cmp::Ordering::Less, - (None, Some(_)) => std::cmp::Ordering::Greater, - (None, None) => std::cmp::Ordering::Equal, - }); - let first_none = addresses - .iter() - .position(|ka| ka.last_success.is_none()) - .unwrap_or(addresses.len()); - addresses[first_none..].shuffle(&mut rand::rng()); - - let mut failed_addrs = Vec::new(); + async fn try_connect(self: Arc, id: NodeID, addresses: Vec) { let conn_addr = { let mut ret = None; - for ka in addresses.iter() { - debug!( - "Trying address {} for peer {}", - ka.addr, - hex::encode(&id[..8]) - ); - match self.netapp.clone().try_connect(ka.addr, id).await { + for addr in addresses.iter() { + debug!("Trying address {} for peer {}", addr, hex::encode(&id[..8])); + match self.netapp.clone().try_connect(*addr, id).await { Ok(()) => { - ret = Some(ka.addr); + ret = Some(*addr); break; } Err(e) => { debug!( "Error connecting to {} at {}: {}", hex::encode(&id[..8]), - ka.addr, + addr, e ); - failed_addrs.push(ka.addr); } } } @@ -566,58 +520,20 @@ impl PeeringManager { hex::encode(&id[..8]), addresses.len() ); - } - - // Update failure/success tracking and prune stale addresses - let mut known_hosts = self.known_hosts.write().unwrap(); - if let Some(host) = known_hosts.list.get_mut(&id) { - if conn_addr.is_none() { + let mut known_hosts = self.known_hosts.write().unwrap(); + if let Some(host) = known_hosts.list.get_mut(&id) { host.state = match host.state { - PeerConnState::Trying(i) if i >= CONN_MAX_RETRIES => PeerConnState::Abandoned, PeerConnState::Trying(i) => { - PeerConnState::Waiting(i + 1, Instant::now() + CONN_RETRY_INTERVAL) + if i >= CONN_MAX_RETRIES { + PeerConnState::Abandonned + } else { + PeerConnState::Waiting(i + 1, Instant::now() + CONN_RETRY_INTERVAL) + } } _ => PeerConnState::Waiting(0, Instant::now() + CONN_RETRY_INTERVAL), }; + self.update_public_peer_list(&known_hosts); } - - // Register successes and failures in known address list - for ka in host.known_addrs.iter_mut() { - if conn_addr == Some(ka.addr) { - ka.last_success = Some(Instant::now()); - ka.consecutive_failures = 0; - } else if failed_addrs.contains(&ka.addr) { - ka.consecutive_failures += 1; - } - } - - // If the address list is too big, prune some addresses to keep only a limited number of them. - let before = host.known_addrs.len(); - - while host.known_addrs.len() > KEEP_MAX_ADDRS { - // Prioritize pruning addresses that have too many failures. - // Then, prioritize pruning addresses that were used the longest time ago. - let i_prune = host - .known_addrs - .iter() - .enumerate() - .min_by_key(|(_, ka)| pruning_sort_key(ka)) - .unwrap() - .0; - host.known_addrs.remove(i_prune); - } - - let pruned = before - host.known_addrs.len(); - if pruned > 0 { - info!( - "Pruned {} stale address(es) for peer {} ({} remaining)", - pruned, - hex::encode(&id[..8]), - host.known_addrs.len() - ); - } - - self.update_public_peer_list(&known_hosts); } } @@ -697,62 +613,3 @@ impl EndpointHandler for PeeringManager { PeerListMessage { list: peer_list } } } - -fn pruning_sort_key(ka: &KnownAddr) -> (bool, Option) { - ( - ka.consecutive_failures < ADDR_MAX_CONSECUTIVE_FAILURES, - ka.last_success, - ) -} - -#[cfg(test)] -mod tests { - use super::*; - use std::time::{Duration, Instant}; - - #[test] - fn test_pruning_sort_key() { - let now = Instant::now(); - - let addrs = [ - // fourth pruned - KnownAddr { - addr: "0.0.0.0:1234".parse().unwrap(), - last_success: Some(now), - consecutive_failures: 0, - }, - // second pruned - KnownAddr { - addr: "0.0.0.0:1234".parse().unwrap(), - last_success: None, - consecutive_failures: 1, - }, - // third pruned - KnownAddr { - addr: "0.0.0.0:1234".parse().unwrap(), - last_success: Some(now - Duration::from_secs(60)), - consecutive_failures: 2, - }, - // first pruned - KnownAddr { - addr: "0.0.0.0:1234".parse().unwrap(), - last_success: None, - consecutive_failures: 3, - }, - ]; - - let prune = |a: &[KnownAddr]| { - a.iter() - .enumerate() - .min_by_key(|(_, ka)| pruning_sort_key(ka)) - .unwrap() - .0 - }; - - assert_eq!(prune(&addrs[..]), 3); - assert_eq!(prune(&addrs[..3]), 1); - assert_eq!(prune(&addrs[..2]), 1); - assert_eq!(prune(&addrs[1..3]), 0); - assert_eq!(prune(&addrs[1..]), 2); - } -} diff --git a/src/net/recv.rs b/src/net/recv.rs index 1a00ecf6..35a6d71a 100644 --- a/src/net/recv.rs +++ b/src/net/recv.rs @@ -42,8 +42,8 @@ impl Drop for Sender { } } -/// The `RecvLoop` trait, which is implemented both by the client and the server -/// connection objects (`ServerConn` and `ClientConn`) adds a method `.recv_loop()` +/// The RecvLoop trait, which is implemented both by the client and the server +/// connection objects (ServerConn and ClientConn) adds a method `.recv_loop()` /// and a prototype of a handler for received messages `.recv_handler()` that /// must be filled by implementors. `.recv_loop()` receives messages in a loop /// according to the protocol defined above: chunks of message in progress of being @@ -62,7 +62,7 @@ pub(crate) trait RecvLoop: Sync + 'static { trace!( "recv_loop({}): in_progress = {:?}", debug_name, - streams.keys().collect::>() + streams.iter().map(|(id, _)| id).collect::>() ); let mut header_id = [0u8; RequestID::BITS as usize / 8]; @@ -70,7 +70,7 @@ pub(crate) trait RecvLoop: Sync + 'static { Ok(_) => (), Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => break, Err(e) => return Err(e.into()), - } + }; let id = RequestID::from_be_bytes(header_id); let mut header_size = [0u8; ChunkLength::BITS as usize / 8]; @@ -79,7 +79,10 @@ pub(crate) trait RecvLoop: Sync + 'static { if size == CANCEL_REQUEST { if let Some(mut stream) = streams.remove(&id) { - stream.send(Err(std::io::Error::other("netapp: cancel requested"))); + let _ = stream.send(Err(std::io::Error::new( + std::io::ErrorKind::Other, + "netapp: cancel requested", + ))); stream.end(); } self.cancel_handler(id); @@ -89,7 +92,7 @@ pub(crate) trait RecvLoop: Sync + 'static { let has_cont = (size & CHUNK_FLAG_HAS_CONTINUATION) != 0; let is_error = (size & CHUNK_FLAG_ERROR) != 0; let size = (size & CHUNK_LENGTH_MASK) as usize; - let mut next_slice = vec![0; size]; + let mut next_slice = vec![0; size as usize]; read.read_exact(&mut next_slice[..]).await?; let packet = if is_error { @@ -132,7 +135,7 @@ pub(crate) trait RecvLoop: Sync + 'static { // If we cannot put packet in channel, it means that the // receiving end of the channel is disconnected. // We still need to reach eos before dropping this sender - sender.send(packet); + let _ = sender.send(packet); } if has_cont { diff --git a/src/net/send.rs b/src/net/send.rs index cf040888..6f1ac02c 100644 --- a/src/net/send.rs +++ b/src/net/send.rs @@ -264,8 +264,8 @@ impl DataFrame { } } -/// The `SendLoop` trait, which is implemented both by the client and the server -/// connection objects (`ServerConna` and `ClientConn`) adds a method `.send_loop()` +/// The SendLoop trait, which is implemented both by the client and the server +/// connection objects (ServerConna and ClientConn) adds a method `.send_loop()` /// that takes a channel of messages to send and an asynchronous writer, /// and sends messages from the channel to the async writer, putting them in a queue /// before being sent and doing the round-robin sending strategy. @@ -319,7 +319,7 @@ pub(crate) trait SendLoop: Sync { order_tag, data: ByteStreamReader::new(data), sent: 0, - }); + }) } Some(SendItem::Cancel(id)) => { trace!("send_loop({}): cancelling {}", debug_name, id); @@ -332,7 +332,7 @@ pub(crate) trait SendLoop: Sync { None => { msg_recv = None; } - } + }; } (id, data) = send_fut => { trace!( diff --git a/src/net/stream.rs b/src/net/stream.rs index 93971ba7..c973f9a7 100644 --- a/src/net/stream.rs +++ b/src/net/stream.rs @@ -14,18 +14,18 @@ use crate::bytes_buf::BytesBuf; /// When sent through Netapp, the Vec may be split in smaller chunk in such a way /// consecutive Vec may get merged, but Vec and error code may not be reordered /// -/// Items sent in the `ByteStream` may be errors of type `std::io::Error`. -/// An error indicates the end of the `ByteStream`: a reader should no longer read +/// Items sent in the ByteStream may be errors of type `std::io::Error`. +/// An error indicates the end of the ByteStream: a reader should no longer read /// after receiving an error, and a writer should stop writing after sending an error. pub type ByteStream = Pin + Send + Sync>>; -/// A packet sent in a `ByteStream`, which may contain either +/// A packet sent in a ByteStream, which may contain either /// a Bytes object or an error pub type Packet = Result; // ---- -/// A helper struct to read defined lengths of data from a `BytesStream` +/// A helper struct to read defined lengths of data from a BytesStream pub struct ByteStreamReader { stream: ByteStream, buf: BytesBuf, @@ -201,7 +201,7 @@ pub fn stream_asyncread(stream: ByteStream) -> impl AsyncRead + Send + Sync + 's tokio_util::io::StreamReader::new(stream) } -/// Reads all of the content of a `ByteStream` into a `BytesBuf` +/// Reads all of the content of a `ByteStream` into a BytesBuf /// that contains everything pub async fn read_stream_to_end(mut stream: ByteStream) -> Result { let mut buf = BytesBuf::new(); diff --git a/src/net/test.rs b/src/net/test.rs index bb45741b..3cf446bd 100644 --- a/src/net/test.rs +++ b/src/net/test.rs @@ -15,12 +15,12 @@ use crate::NodeID; #[tokio::test(flavor = "current_thread")] async fn test_with_basic_scheduler() { pretty_env_logger::init(); - run_test(19980).await; + run_test(19980).await } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn test_with_threaded_scheduler() { - run_test(19990).await; + run_test(19990).await } async fn run_test(port_base: u16) { diff --git a/src/net/util.rs b/src/net/util.rs index ab09e68f..35a3be1e 100644 --- a/src/net/util.rs +++ b/src/net/util.rs @@ -7,7 +7,7 @@ use tokio::sync::watch; use crate::netapp::*; -/// Utility function: encodes any serializable value in `MessagePack` binary format +/// Utility function: encodes any serializable value in MessagePack binary format /// using the RMP library. /// /// Field names and variant names are included in the serialization. @@ -80,7 +80,7 @@ pub fn parse_and_resolve_peer_addr(peer: &str) -> Option<(NodeID, Vec Option<(NodeID, Vec)> { let delim = peer.find('@')?; let (key, host) = peer.split_at(delim); diff --git a/src/rpc/Cargo.toml b/src/rpc/Cargo.toml index 7f4882ce..e23f4bca 100644 --- a/src/rpc/Cargo.toml +++ b/src/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_rpc" -version = "2.3.0" +version = "1.3.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -48,9 +48,6 @@ tokio.workspace = true opentelemetry.workspace = true [features] -kubernetes-discovery = ["kube", "k8s-openapi", "schemars"] -consul-discovery = ["reqwest", "thiserror"] -system-libs = ["sodiumoxide/use-pkg-config"] - -[lints] -workspace = true +kubernetes-discovery = [ "kube", "k8s-openapi", "schemars" ] +consul-discovery = [ "reqwest", "thiserror" ] +system-libs = [ "sodiumoxide/use-pkg-config" ] diff --git a/src/rpc/consul.rs b/src/rpc/consul.rs index 54d46679..760e9fcb 100644 --- a/src/rpc/consul.rs +++ b/src/rpc/consul.rs @@ -108,17 +108,17 @@ impl ConsulDiscovery { (None, None) => {} _ => return Err(ConsulError::InvalidTLSConfig), }, - ConsulDiscoveryAPI::Agent => {} - } - - if let Some(token) = &config.token { - let mut headers = reqwest::header::HeaderMap::new(); - headers.insert( - "x-consul-token", - reqwest::header::HeaderValue::from_str(token)?, - ); - builder = builder.default_headers(headers); - } + ConsulDiscoveryAPI::Agent => { + if let Some(token) = &config.token { + let mut headers = reqwest::header::HeaderMap::new(); + headers.insert( + "x-consul-token", + reqwest::header::HeaderValue::from_str(&token)?, + ); + builder = builder.default_headers(headers); + } + } + }; let client: reqwest::Client = builder.build()?; @@ -126,61 +126,38 @@ impl ConsulDiscovery { } // ---- READING FROM CONSUL CATALOG ---- - /// Query Consul for Garage nodes registered under the configured service name. - /// - /// This method supports querying multiple Consul datacenters for WAN or - /// multi-datacenter deployments. If `config.datacenters` is set and non-empty, - /// each listed datacenter is queried and the results are aggregated. Otherwise, - /// only the local datacenter is queried. `config.datacenters` does not need to be set - /// when all the datacenters are on the same LAN, in this case service discovery works normally - /// - /// # Returns - /// A list of `(NodeID, SocketAddr)` pairs corresponding to all valid discovered - /// nodes across the queried datacenters. + pub async fn get_consul_nodes(&self) -> Result, ConsulError> { + let url = format!( + "{}/v1/catalog/service/{}", + self.config.consul_http_addr, self.config.service_name + ); + + let http = self.client.get(&url).send().await?; + let entries: Vec = http.json().await?; + let mut ret = vec![]; - - let dcs_to_query: Vec> = match &self.config.datacenters { - dcs if !dcs.is_empty() => dcs.iter().map(|dc| Some(dc.as_str())).collect(), - _ => vec![None], - }; - - for dc in dcs_to_query { - let url = match dc { - Some(datacenter) => format!( - "{}/v1/catalog/service/{}?dc={}", - self.config.consul_http_addr, self.config.service_name, datacenter - ), - None => format!( - "{}/v1/catalog/service/{}", - self.config.consul_http_addr, self.config.service_name - ), - }; - - let http = self.client.get(&url).send().await?; - let entries: Vec = http.json().await?; - - for ent in entries { - let ip = ent.address.parse::().ok(); - let pubkey = ent - .meta - .get(&format!("{}-pubkey", META_PREFIX)) - .and_then(|k| hex::decode(k).ok()) - .and_then(|k| NodeID::from_slice(&k[..])); - if let (Some(ip), Some(pubkey)) = (ip, pubkey) { - ret.push((pubkey, SocketAddr::new(ip, ent.service_port))); - } else { - warn!( - "Could not process node spec from Consul: {:?} (invalid IP address or node ID/pubkey)", - ent - ); - } + for ent in entries { + let ip = ent.address.parse::().ok(); + let pubkey = ent + .meta + .get(&format!("{}-pubkey", META_PREFIX)) + .and_then(|k| hex::decode(k).ok()) + .and_then(|k| NodeID::from_slice(&k[..])); + if let (Some(ip), Some(pubkey)) = (ip, pubkey) { + ret.push((pubkey, SocketAddr::new(ip, ent.service_port))); + } else { + warn!( + "Could not process node spec from Consul: {:?} (invalid IP address or node ID/pubkey)", + ent + ); } } + debug!("Got nodes from Consul: {:?}", ret); - debug!("Got {} nodes from Consul", ret.len()); Ok(ret) } + // ---- PUBLISHING TO CONSUL CATALOG ---- pub async fn publish_consul_service( diff --git a/src/rpc/kubernetes.rs b/src/rpc/kubernetes.rs index f15afae0..85254bb5 100644 --- a/src/rpc/kubernetes.rs +++ b/src/rpc/kubernetes.rs @@ -66,7 +66,7 @@ pub async fn get_kubernetes_nodes( .and_then(|k| NodeID::from_slice(&k[..])); if let Some(pubkey) = pubkey { - ret.push((*pubkey, SocketAddr::new(node.spec.address, node.spec.port))); + ret.push((*pubkey, SocketAddr::new(node.spec.address, node.spec.port))) } } @@ -108,7 +108,7 @@ pub async fn publish_kubernetes_node( .await?; } else { nodes.create(&PostParams::default(), &node).await?; - } + }; Ok(()) } diff --git a/src/rpc/layout/graph_algo.rs b/src/rpc/layout/graph_algo.rs index c02da729..29d4a043 100644 --- a/src/rpc/layout/graph_algo.rs +++ b/src/rpc/layout/graph_algo.rs @@ -42,7 +42,7 @@ impl Edge for WeightedEdge {} /// Struct for the graph structure. We do encapsulation here to be able to both /// provide user friendly Vertex enum to address vertices, and to use internally usize -/// indices and Vec instead of `HashMap` in the graph algorithm to optimize execution speed. +/// indices and Vec instead of HashMap in the graph algorithm to optimize execution speed. pub struct Graph { vertex_to_id: HashMap, id_to_vertex: Vec, @@ -253,7 +253,7 @@ impl Graph { /// This function takes a flow, and a cost function on the edges, and tries to find an /// equivalent flow with a better cost, by finding improving overflow cycles. It uses - /// as subroutine the Bellman Ford algorithm run up to `path_length`. + /// as subroutine the Bellman Ford algorithm run up to path_length. /// We assume that the cost of edge (u,v) is the opposite of the cost of (v,u), and /// only one needs to be present in the cost function. pub fn optimize_flow_with_cost( @@ -290,7 +290,7 @@ impl Graph { Ok(()) } - /// Construct the weighted graph `G_f` from the flow and the cost function + /// Construct the weighted graph G_f from the flow and the cost function fn build_cost_graph(&self, cost: &CostFunction) -> Result, String> { let mut g = Graph::::new(&self.id_to_vertex); let nb_vertices = self.id_to_vertex.len(); @@ -323,11 +323,11 @@ impl Graph { Ok(()) } - /// This function lists the negative cycles it manages to find after `path_length` + /// This function lists the negative cycles it manages to find after path_length /// iterations of the main loop of the Bellman-Ford algorithm. For the classical - /// algorithm, `path_length` needs to be equal to the number of vertices. However, + /// algorithm, path_length needs to be equal to the number of vertices. However, /// for particular graph structures like in our case, the algorithm is still correct - /// when `path_length` is the length of the longest possible simple path. + /// when path_length is the length of the longest possible simple path. /// See the formal description of the algorithm for more details. fn list_negative_cycles(&self, path_length: usize) -> Vec> { let nb_vertices = self.graph.len(); @@ -355,7 +355,7 @@ impl Graph { // Remark that the cycle in prev is in the reverse order compared to the cycle // in the graph. Thus the .rev(). - cycles_prev + return cycles_prev .iter() .map(|cycle| { cycle @@ -364,7 +364,7 @@ impl Graph { .map(|id| self.id_to_vertex[*id]) .collect() }) - .collect() + .collect(); } } diff --git a/src/rpc/layout/helper.rs b/src/rpc/layout/helper.rs index 89ffe2ad..c08a5629 100644 --- a/src/rpc/layout/helper.rs +++ b/src/rpc/layout/helper.rs @@ -4,7 +4,6 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use serde::{Deserialize, Serialize}; use garage_util::data::*; -use garage_util::error::Error; use super::*; use crate::replication_mode::*; @@ -29,6 +28,7 @@ pub struct SyncLayoutDigest { } pub struct LayoutHelper { + replication_factor: ReplicationFactor, consistency_mode: ConsistencyMode, layout: Option, @@ -51,6 +51,7 @@ pub struct LayoutHelper { impl LayoutHelper { pub fn new( + replication_factor: ReplicationFactor, consistency_mode: ConsistencyMode, mut layout: LayoutHistory, mut ack_lock: HashMap, @@ -96,7 +97,8 @@ impl LayoutHelper { // consistency on those). // This value is calculated using quorums to allow progress even // if not all nodes have successfully completed a sync. - let sync_map_min = layout.calculate_sync_map_min_with_quorum(&all_nongateway_nodes); + let sync_map_min = + layout.calculate_sync_map_min_with_quorum(replication_factor, &all_nongateway_nodes); let trackers_hash = layout.calculate_trackers_hash(); let staging_hash = layout.calculate_staging_hash(); @@ -109,6 +111,7 @@ impl LayoutHelper { let is_check_ok = layout.check().is_ok(); LayoutHelper { + replication_factor, consistency_mode, layout: Some(layout), ack_map_min, @@ -131,6 +134,7 @@ impl LayoutHelper { let changed = f(self.layout.as_mut().unwrap()); if changed { *self = Self::new( + self.replication_factor, self.consistency_mode, self.layout.take().unwrap(), std::mem::take(&mut self.ack_lock), @@ -145,32 +149,12 @@ impl LayoutHelper { self.layout.as_ref().unwrap() } - /// Returns the current layout version - pub fn current(&self) -> Result<&LayoutVersion, Error> { - if !self.is_check_ok { - return Err(Error::LayoutNotReady); - } - Ok(self.inner().current()) + pub fn current(&self) -> &LayoutVersion { + self.inner().current() } - /// Returns all layout versions currently active in the cluster - pub fn versions(&self) -> Result<&[LayoutVersion], Error> { - if !self.is_check_ok { - return Err(Error::LayoutNotReady); - } - Ok(&self.inner().versions) - } - - /// Returns the latest layout version for which it is safe to read data from, - /// i.e. the version whose version number is `sync_map_min` - pub fn read_version(&self) -> Result<&LayoutVersion, Error> { - let sync_min = self.sync_map_min; - let versions = self.versions()?; - Ok(versions - .iter() - .find(|x| x.version == sync_min) - .or_else(|| versions.last()) - .unwrap()) + pub fn versions(&self) -> &[LayoutVersion] { + &self.inner().versions } pub fn is_check_ok(&self) -> bool { @@ -179,20 +163,14 @@ impl LayoutHelper { /// Return all nodes that have a role (gateway or storage) /// in one of the currently active layout versions - pub fn all_nodes(&self) -> Result<&[Uuid], Error> { - if !self.is_check_ok { - return Err(Error::LayoutNotReady); - } - Ok(&self.all_nodes) + pub fn all_nodes(&self) -> &[Uuid] { + &self.all_nodes } /// Return all nodes that are configured to store data /// in one of the currently active layout versions - pub fn all_nongateway_nodes(&self) -> Result<&[Uuid], Error> { - if !self.is_check_ok { - return Err(Error::LayoutNotReady); - } - Ok(&self.all_nongateway_nodes) + pub fn all_nongateway_nodes(&self) -> &[Uuid] { + &self.all_nongateway_nodes } pub fn ack_map_min(&self) -> u64 { @@ -203,20 +181,61 @@ impl LayoutHelper { self.sync_map_min } - // ---- helpers for layout synchronization ---- - pub fn sync_digest(&self) -> SyncLayoutDigest { SyncLayoutDigest { - current: self.inner().current().version, + current: self.current().version, ack_map_min: self.ack_map_min(), min_stored: self.inner().min_stored(), } } - pub(crate) fn digest(&self) -> RpcLayoutDigest { + pub fn read_nodes_of(&self, position: &Hash) -> Vec { + let sync_min = self.sync_map_min; + let version = self + .versions() + .iter() + .find(|x| x.version == sync_min) + .or(self.versions().last()) + .unwrap(); + version + .nodes_of(position, version.replication_factor) + .collect() + } + + pub fn storage_sets_of(&self, position: &Hash) -> Vec> { + self.versions() + .iter() + .map(|x| x.nodes_of(position, x.replication_factor).collect()) + .collect() + } + + pub fn storage_nodes_of(&self, position: &Hash) -> Vec { + let mut ret = vec![]; + for version in self.versions().iter() { + ret.extend(version.nodes_of(position, version.replication_factor)); + } + ret.sort(); + ret.dedup(); + ret + } + + pub fn current_storage_nodes_of(&self, position: &Hash) -> Vec { + let ver = self.current(); + ver.nodes_of(position, ver.replication_factor).collect() + } + + pub fn trackers_hash(&self) -> Hash { + self.trackers_hash + } + + pub fn staging_hash(&self) -> Hash { + self.staging_hash + } + + pub fn digest(&self) -> RpcLayoutDigest { RpcLayoutDigest { - current_version: self.inner().current().version, - active_versions: self.inner().versions.len(), + current_version: self.current().version, + active_versions: self.versions().len(), trackers_hash: self.trackers_hash, staging_hash: self.staging_hash, } @@ -260,18 +279,17 @@ impl LayoutHelper { pub(crate) fn update_ack_to_max_free(&mut self, local_node_id: Uuid) -> bool { let max_free = self - .inner() - .versions + .versions() .iter() .map(|x| x.version) - .find(|v| { - !self - .ack_lock + .skip_while(|v| { + self.ack_lock .get(v) .map(|x| x.load(Ordering::Relaxed) == 0) .unwrap_or(true) }) - .unwrap_or_else(|| self.inner().current().version); + .next() + .unwrap_or(self.current().version); let changed = self.update(|layout| { layout .update_trackers diff --git a/src/rpc/layout/history.rs b/src/rpc/layout/history.rs index b4659543..574c50c2 100644 --- a/src/rpc/layout/history.rs +++ b/src/rpc/layout/history.rs @@ -10,7 +10,7 @@ use crate::replication_mode::*; impl LayoutHistory { pub fn new(replication_factor: ReplicationFactor) -> Self { - let version = LayoutVersion::new(replication_factor); + let version = LayoutVersion::new(replication_factor.into()); let staging = LayoutStaging { parameters: Lww::::new(version.parameters), @@ -123,9 +123,13 @@ impl LayoutHistory { } } - /// This function calculates the minimum layout version from which - /// it is safe to read if we want to maintain read-after-write consistency. - pub(crate) fn calculate_sync_map_min_with_quorum(&self, all_nongateway_nodes: &[Uuid]) -> u64 { + pub(crate) fn calculate_sync_map_min_with_quorum( + &self, + replication_factor: ReplicationFactor, + all_nongateway_nodes: &[Uuid], + ) -> u64 { + // This function calculates the minimum layout version from which + // it is safe to read if we want to maintain read-after-write consistency. // In the general case the computation can be a bit expensive so // we try to optimize it in several ways. @@ -135,6 +139,8 @@ impl LayoutHistory { return self.current().version; } + let quorum = replication_factor.write_quorum(ConsistencyMode::Consistent); + let min_version = self.min_stored(); let global_min = self .update_trackers @@ -147,16 +153,7 @@ impl LayoutHistory { // This is represented by reading from the layout with version // number global_min, the smallest layout version for which all nodes // have completed a sync. - // - // While we currently do not support changing the replication factor - // between layout versions, this calculation is future-proofing for the - // case where this might be possible. - if self - .versions - .iter() - .filter(|v| v.version >= global_min) - .all(|v| v.write_quorum(ConsistencyMode::Consistent) == v.replication_factor) - { + if quorum == self.current().replication_factor { return global_min; } @@ -183,7 +180,9 @@ impl LayoutHistory { // Determine set of nodes for partition p in layout version v. // Sort the node set to avoid duplicate computations. - let mut set = v.nodes_of(&p_hash).collect::>(); + let mut set = v + .nodes_of(&p_hash, v.replication_factor) + .collect::>(); set.sort(); // If this set was already processed, skip it. @@ -198,8 +197,7 @@ impl LayoutHistory { .map(|x| self.update_trackers.sync_map.get(x, min_version)) .collect::>(); sync_values.sort(); - let set_min = - sync_values[sync_values.len() - v.write_quorum(ConsistencyMode::Consistent)]; + let set_min = sync_values[sync_values.len() - quorum]; if set_min < current_min { current_min = set_min; } @@ -269,9 +267,20 @@ impl LayoutHistory { changed } - pub fn apply_staged_changes(mut self, version: u64) -> Result<(Self, Message), Error> { - if version != self.current().version + 1 { - return Err(Error::Message("Invalid new layout version".into())); + pub fn apply_staged_changes(mut self, version: Option) -> Result<(Self, Message), Error> { + match version { + None => { + let error = r#" +Please pass the new layout version number to ensure that you are writing the correct version of the cluster layout. +To know the correct value of the new layout version, invoke `garage layout show` and review the proposed changes. + "#; + return Err(Error::Message(error.into())); + } + Some(v) => { + if v != self.current().version + 1 { + return Err(Error::Message("Invalid new layout version".into())); + } + } } // Compute new version and add it to history diff --git a/src/rpc/layout/manager.rs b/src/rpc/layout/manager.rs index 44746caa..bb8000bd 100644 --- a/src/rpc/layout/manager.rs +++ b/src/rpc/layout/manager.rs @@ -46,11 +46,11 @@ impl LayoutManager { let cluster_layout = match persist_cluster_layout.load() { Ok(x) => { - if x.current().replication_factor() != replication_factor { + if x.current().replication_factor != replication_factor.replication_factor() { return Err(Error::Message(format!( "Previous cluster layout has replication factor {}, which is different than the one specified in the config file ({}). The previous cluster layout can be purged, if you know what you are doing, simply by deleting the `cluster_layout` file in your metadata directory.", - x.current().replication_factor(), - replication_factor, + x.current().replication_factor, + replication_factor.replication_factor() ))); } x @@ -64,8 +64,12 @@ impl LayoutManager { } }; - let mut cluster_layout = - LayoutHelper::new(consistency_mode, cluster_layout, Default::default()); + let mut cluster_layout = LayoutHelper::new( + replication_factor, + consistency_mode, + cluster_layout, + Default::default(), + ); cluster_layout.update_update_trackers(node_id.into()); let layout = Arc::new(RwLock::new(cluster_layout)); @@ -105,7 +109,7 @@ impl LayoutManager { } pub fn add_table(&self, table_name: &'static str) { - let first_version = self.layout().inner().versions.first().unwrap().version; + let first_version = self.layout().versions().first().unwrap().version; self.table_sync_version .lock() @@ -116,7 +120,7 @@ impl LayoutManager { pub fn sync_table_until(self: &Arc, table_name: &'static str, version: u64) { let mut table_sync_version = self.table_sync_version.lock().unwrap(); *table_sync_version.get_mut(table_name).unwrap() = version; - let sync_until = *table_sync_version.values().min().unwrap(); + let sync_until = table_sync_version.iter().map(|(_, v)| *v).min().unwrap(); drop(table_sync_version); let mut layout = self.layout.write().unwrap(); @@ -139,20 +143,16 @@ impl LayoutManager { // ---- ACK LOCKING ---- - pub fn write_lock_with(self: &Arc, f: F) -> Result, Error> - where - F: FnOnce(&[LayoutVersion]) -> T, - { + pub fn write_sets_of(self: &Arc, position: &Hash) -> WriteLock>> { let layout = self.layout(); - let current_version = layout.current()?.version; - let versions = layout.versions()?; - let value = f(versions); + let version = layout.current().version; + let nodes = layout.storage_sets_of(position); layout .ack_lock - .get(¤t_version) + .get(&version) .unwrap() .fetch_add(1, Ordering::Relaxed); - Ok(WriteLock::new(current_version, self, value)) + WriteLock::new(version, self, nodes) } // ---- INTERNALS --- @@ -163,8 +163,7 @@ impl LayoutManager { let prev_layout_check = layout.is_check_ok(); if !prev_layout_check || adv.check().is_ok() { - let changed = layout.update(|l| l.merge(adv)); - if changed { + if layout.update(|l| l.merge(adv)) { layout.update_update_trackers(self.node_id); if prev_layout_check && !layout.is_check_ok() { panic!("Merged two correct layouts and got an incorrect layout."); @@ -182,8 +181,7 @@ impl LayoutManager { let prev_digest = layout.digest(); if layout.inner().update_trackers != *adv { - let changed = layout.update(|l| l.update_trackers.merge(adv)); - if changed { + if layout.update(|l| l.update_trackers.merge(adv)) { layout.update_update_trackers(self.node_id); assert!(layout.digest() != prev_digest); return Some(layout.inner().update_trackers.clone()); @@ -298,11 +296,11 @@ impl LayoutManager { adv.update_trackers ); - if adv.current().replication_factor() != self.replication_factor { + if adv.current().replication_factor != self.replication_factor.replication_factor() { let msg = format!( "Received a cluster layout from another node with replication factor {}, which is different from what we have in our configuration ({}). Discarding the cluster layout we received.", - adv.current().replication_factor(), - self.replication_factor, + adv.current().replication_factor, + self.replication_factor.replication_factor() ); error!("{}", msg); return Err(Error::Message(msg)); @@ -370,7 +368,7 @@ impl Drop for WriteLock { let layout = self.layout_manager.layout(); // acquire read lock if let Some(counter) = layout.ack_lock.get(&self.layout_version) { let prev_lock = counter.fetch_sub(1, Ordering::Relaxed); - if prev_lock == 1 && layout.current().unwrap().version > self.layout_version { + if prev_lock == 1 && layout.current().version > self.layout_version { drop(layout); // release read lock, write lock will be acquired self.layout_manager.ack_new_version(); } diff --git a/src/rpc/layout/mod.rs b/src/rpc/layout/mod.rs index e565d13e..ce21a524 100644 --- a/src/rpc/layout/mod.rs +++ b/src/rpc/layout/mod.rs @@ -1,3 +1,5 @@ +use std::fmt; + use bytesize::ByteSize; use garage_util::crdt::{AutoCrdt, Crdt}; @@ -23,7 +25,7 @@ pub use version::*; /// A partition id, which is stored on 16 bits /// i.e. we have up to 2**16 partitions. -/// (in practice we have exactly 2**`PARTITION_BITS` partitions) +/// (in practice we have exactly 2**PARTITION_BITS partitions) pub type Partition = u16; // TODO: make this constant parametrizable in the config file @@ -114,7 +116,7 @@ mod v09 { /// to know to what extent does it change with the layout update. pub partition_size: u64, /// Parameters used to compute the assignment currently given by - /// `ring_assignment_data` + /// ring_assignment_data pub parameters: LayoutParameters, pub roles: LwwMap, @@ -229,7 +231,7 @@ mod v010 { use std::collections::BTreeMap; pub use v09::{LayoutParameters, NodeRole, NodeRoleV, ZoneRedundancy}; - /// Number of old (non-live) versions to keep, see `LayoutHistory::old_versions` + /// Number of old (non-live) versions to keep, see LayoutHistory::old_versions pub const OLD_VERSION_COUNT: usize = 5; /// The history of cluster layouts, with trackers to keep a record @@ -238,8 +240,8 @@ mod v010 { pub struct LayoutHistory { /// The versions currently in use in the cluster pub versions: Vec, - /// At most 5 of the previous versions, not used by the `garage_table` - /// module, but useful for the `garage_block` module to find data blocks + /// At most 5 of the previous versions, not used by the garage_table + /// module, but useful for the garage_block module to find data blocks /// that have not yet been moved pub old_versions: Vec, @@ -260,7 +262,7 @@ mod v010 { /// Roles assigned to nodes in this version pub roles: LwwMap, /// Parameters used to compute the assignment currently given by - /// `ring_assignment_data` + /// ring_assignment_data pub parameters: LayoutParameters, /// The number of replicas for each data partition @@ -269,17 +271,17 @@ mod v010 { /// to know to what extent does it change with the layout update. pub partition_size: u64, - /// `node_id_vec`: a vector of node IDs with a role assigned + /// node_id_vec: a vector of node IDs with a role assigned /// in the system (this includes gateway nodes). /// The order here is different than the vec stored by `roles`, because: /// 1. non-gateway nodes are first so that they have lower numbers /// 2. nodes that don't have a role are excluded (but they need to /// stay in the CRDT as tombstones) pub node_id_vec: Vec, - /// number of non-gateway nodes, which are the first ids in `node_id_vec` + /// number of non-gateway nodes, which are the first ids in node_id_vec pub nongateway_node_count: usize, /// The assignation of data partitions to nodes, the values - /// are indices in `node_id_vec` + /// are indices in node_id_vec #[serde(with = "serde_bytes")] pub ring_assignment_data: Vec, } @@ -293,7 +295,7 @@ mod v010 { pub roles: LwwMap, } - /// The tracker of acknowledgments and data syncs around the cluster + /// The tracker of acknowlegments and data syncs around the cluster #[derive(Clone, Debug, Serialize, Deserialize, Default, PartialEq)] pub struct UpdateTrackers { /// The highest layout version number each node has ack'ed @@ -385,7 +387,7 @@ impl Crdt for LayoutStaging { impl NodeRole { pub fn capacity_string(&self) -> String { match self.capacity { - Some(c) => ByteSize::b(c).display().iec().to_string(), + Some(c) => ByteSize::b(c).to_string_as(false), None => "gateway".to_string(), } } @@ -395,6 +397,30 @@ impl NodeRole { } } +impl fmt::Display for ZoneRedundancy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ZoneRedundancy::Maximum => write!(f, "maximum"), + ZoneRedundancy::AtLeast(x) => write!(f, "{}", x), + } + } +} + +impl core::str::FromStr for ZoneRedundancy { + type Err = &'static str; + fn from_str(s: &str) -> Result { + match s { + "none" | "max" | "maximum" => Ok(ZoneRedundancy::Maximum), + x => { + let v = x + .parse::() + .map_err(|_| "zone redundancy must be 'none'/'max' or an integer")?; + Ok(ZoneRedundancy::AtLeast(v)) + } + } + } +} + impl UpdateTracker { fn merge(&mut self, other: &UpdateTracker) -> bool { let mut changed = false; @@ -429,7 +455,7 @@ impl UpdateTracker { } } - fn min_among(&self, storage_nodes: &[Uuid], min_version: u64) -> u64 { + pub fn min_among(&self, storage_nodes: &[Uuid], min_version: u64) -> u64 { storage_nodes .iter() .map(|x| self.get(x, min_version)) diff --git a/src/rpc/layout/test.rs b/src/rpc/layout/test.rs index aa146dfd..5462160b 100644 --- a/src/rpc/layout/test.rs +++ b/src/rpc/layout/test.rs @@ -2,6 +2,7 @@ use std::cmp::min; use std::collections::HashMap; use garage_util::crdt::Crdt; +use garage_util::error::*; use crate::layout::*; use crate::replication_mode::ReplicationFactor; @@ -20,22 +21,22 @@ use crate::replication_mode::ReplicationFactor; // number of tokens by zone : (A, 4), (B,1), (C,4), (D, 4), (E, 2) // With these parameters, the naive algo fails, whereas there is a solution: // (A,A,C,D,E) , (A,B,C,D,D) (A,C,C,D,E) -fn check_against_naive(cl: &LayoutVersion) -> bool { +fn check_against_naive(cl: &LayoutVersion) -> Result { let over_size = cl.partition_size + 1; let mut zone_token = HashMap::::new(); - let (zones, zone_to_id) = cl.generate_nongateway_zone_ids(); + let (zones, zone_to_id) = cl.generate_nongateway_zone_ids()?; if zones.is_empty() { - return false; + return Ok(false); } for z in zones.iter() { zone_token.insert(z.clone(), 0); } for uuid in cl.nongateway_nodes() { - let z = cl.expect_get_node_zone(uuid); - let c = cl.expect_get_node_capacity(uuid); + let z = cl.expect_get_node_zone(&uuid); + let c = cl.expect_get_node_capacity(&uuid); zone_token.insert( z.to_string(), zone_token[z] + min(NB_PARTITIONS, (c / over_size) as usize), @@ -65,7 +66,7 @@ fn check_against_naive(cl: &LayoutVersion) -> bool { { curr_zone += 1; if curr_zone >= zones.len() { - return true; + return Ok(true); } } id_zone_token[curr_zone] -= 1; @@ -76,7 +77,7 @@ fn check_against_naive(cl: &LayoutVersion) -> bool { } } - false + return Ok(false); } fn show_msg(msg: &Message) { @@ -123,35 +124,35 @@ fn test_assignment() { let mut cl = LayoutHistory::new(ReplicationFactor::new(3).unwrap()); update_layout(&mut cl, &node_capacity_vec, &node_zone_vec, 3); let v = cl.current().version; - let (mut cl, msg) = cl.apply_staged_changes(v + 1).unwrap(); + let (mut cl, msg) = cl.apply_staged_changes(Some(v + 1)).unwrap(); show_msg(&msg); assert_eq!(cl.check(), Ok(())); - assert!(check_against_naive(cl.current())); + assert!(check_against_naive(cl.current()).unwrap()); node_capacity_vec = vec![4000, 1000, 1000, 3000, 1000, 1000, 2000, 10000, 2000]; node_zone_vec = vec!["A", "B", "C", "C", "C", "B", "G", "H", "I"]; update_layout(&mut cl, &node_capacity_vec, &node_zone_vec, 2); let v = cl.current().version; - let (mut cl, msg) = cl.apply_staged_changes(v + 1).unwrap(); + let (mut cl, msg) = cl.apply_staged_changes(Some(v + 1)).unwrap(); show_msg(&msg); assert_eq!(cl.check(), Ok(())); - assert!(check_against_naive(cl.current())); + assert!(check_against_naive(cl.current()).unwrap()); node_capacity_vec = vec![4000, 1000, 2000, 7000, 1000, 1000, 2000, 10000, 2000]; update_layout(&mut cl, &node_capacity_vec, &node_zone_vec, 3); let v = cl.current().version; - let (mut cl, msg) = cl.apply_staged_changes(v + 1).unwrap(); + let (mut cl, msg) = cl.apply_staged_changes(Some(v + 1)).unwrap(); show_msg(&msg); assert_eq!(cl.check(), Ok(())); - assert!(check_against_naive(cl.current())); + assert!(check_against_naive(cl.current()).unwrap()); node_capacity_vec = vec![ 4000000, 4000000, 2000000, 7000000, 1000000, 9000000, 2000000, 10000, 2000000, ]; update_layout(&mut cl, &node_capacity_vec, &node_zone_vec, 1); let v = cl.current().version; - let (cl, msg) = cl.apply_staged_changes(v + 1).unwrap(); + let (cl, msg) = cl.apply_staged_changes(Some(v + 1)).unwrap(); show_msg(&msg); assert_eq!(cl.check(), Ok(())); - assert!(check_against_naive(cl.current())); + assert!(check_against_naive(cl.current()).unwrap()); } diff --git a/src/rpc/layout/version.rs b/src/rpc/layout/version.rs index 18728e63..a02fce89 100644 --- a/src/rpc/layout/version.rs +++ b/src/rpc/layout/version.rs @@ -11,13 +11,12 @@ use garage_util::error::*; use super::graph_algo::*; use super::*; -use crate::replication_mode::*; // The Message type will be used to collect information on the algorithm. pub type Message = Vec; impl LayoutVersion { - pub fn new(replication_factor: ReplicationFactor) -> Self { + pub fn new(replication_factor: usize) -> Self { // We set the default zone redundancy to be Maximum, meaning that the maximum // possible value will be used depending on the cluster topology let parameters = LayoutParameters { @@ -26,7 +25,7 @@ impl LayoutVersion { LayoutVersion { version: 0, - replication_factor: usize::from(replication_factor), + replication_factor, partition_size: 0, roles: LwwMap::new(), node_id_vec: Vec::new(), @@ -85,7 +84,7 @@ impl LayoutVersion { let mut count = 0; for nod in self.ring_assignment_data.iter() { if i as u8 == *nod { - count += 1; + count += 1 } } return Ok(count); @@ -115,35 +114,26 @@ impl LayoutVersion { } /// Return the n servers in which data for this hash should be replicated - pub fn nodes_of(&self, position: &Hash) -> impl Iterator + '_ { + pub fn nodes_of(&self, position: &Hash, n: usize) -> impl Iterator + '_ { + assert_eq!(n, self.replication_factor); + let data = &self.ring_assignment_data; - if data.len() != self.replication_factor * (1 << PARTITION_BITS) { - panic!(".nodes_of() called on invalid LayoutVersion (this is a bug)"); - } - - let partition_idx = self.partition_of(position) as usize; - let partition_start = partition_idx * self.replication_factor; - let partition_end = (partition_idx + 1) * self.replication_factor; - let partition_nodes = &data[partition_start..partition_end]; + let partition_nodes = if data.len() == self.replication_factor * (1 << PARTITION_BITS) { + let partition_idx = self.partition_of(position) as usize; + let partition_start = partition_idx * self.replication_factor; + let partition_end = (partition_idx + 1) * self.replication_factor; + &data[partition_start..partition_end] + } else { + warn!("Ring not yet ready, read/writes will be lost!"); + &[] + }; partition_nodes .iter() .map(move |i| self.node_id_vec[*i as usize]) } - pub fn replication_factor(&self) -> ReplicationFactor { - ReplicationFactor::new(self.replication_factor).unwrap() - } - - pub fn read_quorum(&self, consistency_mode: ConsistencyMode) -> usize { - self.replication_factor().read_quorum(consistency_mode) - } - - pub fn write_quorum(&self, consistency_mode: ConsistencyMode) -> usize { - self.replication_factor().write_quorum(consistency_mode) - } - // ===================== internal information extractors ====================== pub(crate) fn expect_get_node_capacity(&self, uuid: &Uuid) -> u64 { @@ -164,7 +154,7 @@ impl LayoutVersion { total_capacity } - /// Returns the effective value of the `zone_redundancy` parameter + /// Returns the effective value of the zone_redundancy parameter pub(crate) fn effective_zone_redundancy(&self) -> usize { match self.parameters.zone_redundancy { ZoneRedundancy::AtLeast(v) => v, @@ -271,7 +261,7 @@ impl LayoutVersion { // Check that the partition size stored is the one computed by the asignation // algorithm. let cl2 = self.clone(); - let (_, zone_to_id) = cl2.generate_nongateway_zone_ids(); + let (_, zone_to_id) = cl2.generate_nongateway_zone_ids().unwrap(); match cl2.compute_optimal_partition_size(&zone_to_id, zone_redundancy) { Ok(s) if s != self.partition_size => { return Err(format!( @@ -311,7 +301,7 @@ impl LayoutVersion { /// the former assignment (if any) to minimize the amount of /// data to be moved. /// Staged role changes must be merged with nodes roles before calling this function, - /// hence it must only be called from `apply_staged_changes()` and hence is not public. + /// hence it must only be called from apply_staged_changes() and hence is not public. fn calculate_partition_assignment(&mut self) -> Result { // We update the node ids, since the node role list might have changed with the // changes in the layout. We retrieve the old_assignment reframed with new ids @@ -330,7 +320,7 @@ impl LayoutVersion { // We generate for once numerical ids for the zones of non gateway nodes, // to use them as indices in the flow graphs. - let (id_to_zone, zone_to_id) = self.generate_nongateway_zone_ids(); + let (id_to_zone, zone_to_id) = self.generate_nongateway_zone_ids()?; if self.nongateway_nodes().len() < self.replication_factor { return Err(Error::Message(format!( @@ -359,13 +349,13 @@ impl LayoutVersion { if old_assignment_opt.is_some() { msg.push(format!( "Optimal partition size: {} ({} in previous layout)", - ByteSize::b(partition_size).display().iec(), - ByteSize::b(self.partition_size).display().iec() + ByteSize::b(partition_size).to_string_as(false), + ByteSize::b(self.partition_size).to_string_as(false) )); } else { msg.push(format!( "Optimal partition size: {}", - ByteSize::b(partition_size).display().iec() + ByteSize::b(partition_size).to_string_as(false) )); } // We write the partition size. @@ -402,11 +392,11 @@ impl LayoutVersion { Ok(msg) } - /// The `LwwMap` of node roles might have changed. This function updates the `node_id_vec` + /// The LwwMap of node roles might have changed. This function updates the node_id_vec /// and returns the assignment given by ring, with the new indices of the nodes, and /// None if the node is not present anymore. - /// We work with the assumption that only this function and `calculate_new_assignment` - /// do modify `assignment_ring` and `node_id_vec`. + /// We work with the assumption that only this function and calculate_new_assignment + /// do modify assignment_ring and node_id_vec. fn update_node_id_vec(&mut self) -> Result>>, Error> { // (1) We compute the new node list // Non gateway nodes should be coded on 8bits, hence they must be first in the list @@ -488,8 +478,10 @@ impl LayoutVersion { } /// This function generates ids for the zone of the nodes appearing in - /// `self.node_id_vec`. - pub(crate) fn generate_nongateway_zone_ids(&self) -> (Vec, HashMap) { + /// self.node_id_vec. + pub(crate) fn generate_nongateway_zone_ids( + &self, + ) -> Result<(Vec, HashMap), Error> { let mut id_to_zone = Vec::::new(); let mut zone_to_id = HashMap::::new(); @@ -500,7 +492,7 @@ impl LayoutVersion { id_to_zone.push(r.zone.clone()); } } - (id_to_zone, zone_to_id) + Ok((id_to_zone, zone_to_id)) } /// This function computes by dichotomy the largest realizable partition size, given @@ -525,16 +517,16 @@ impl LayoutVersion { let mut s_up = self.get_total_capacity(); while s_down + 1 < s_up { g = self.generate_flow_graph( - u64::midpoint(s_down, s_up), + (s_down + s_up) / 2, zone_to_id, &empty_set, zone_redundancy, )?; g.compute_maximal_flow()?; if g.get_flow_value()? < (NB_PARTITIONS * self.replication_factor) as i64 { - s_up = u64::midpoint(s_down, s_up); + s_up = (s_down + s_up) / 2; } else { - s_down = u64::midpoint(s_down, s_up); + s_down = (s_down + s_up) / 2; } } @@ -558,7 +550,7 @@ impl LayoutVersion { /// Generates the graph to compute the maximal flow corresponding to the optimal /// partition assignment. - /// `exclude_assoc` is the set of (partition, node) association that we are forbidden + /// exclude_assoc is the set of (partition, node) association that we are forbidden /// to use (hence we do not add the corresponding edge to the graph). This parameter /// is used to compute a first flow that uses only edges appearing in the previous /// assignment. This produces a solution that heuristically should be close to the @@ -720,16 +712,14 @@ impl LayoutVersion { let percent_cap = 100.0 * (used_cap as f32) / (total_cap as f32); msg.push(format!( "Usable capacity / total cluster capacity: {} / {} ({:.1} %)", - ByteSize::b(used_cap).display().iec(), - ByteSize::b(total_cap).display().iec(), + ByteSize::b(used_cap).to_string_as(false), + ByteSize::b(total_cap).to_string_as(false), percent_cap )); msg.push(format!( "Effective capacity (replication factor {}): {}", self.replication_factor, - ByteSize::b(used_cap / self.replication_factor as u64) - .display() - .iec() + ByteSize::b(used_cap / self.replication_factor as u64).to_string_as(false) )); if percent_cap < 80. { msg.push("".into()); @@ -833,13 +823,13 @@ impl LayoutVersion { let total_cap_n = self.expect_get_node_capacity(&self.node_id_vec[*n]); let tags_n = (self.node_role(&self.node_id_vec[*n]).ok_or(""))?.tags_string(); table.push(format!( - " {:?}\t[{}]\t{} ({} new)\t{}\t{} ({:.1}%)", + " {:?}\t{}\t{} ({} new)\t{}\t{} ({:.1}%)", self.node_id_vec[*n], tags_n, stored_partitions[*n], new_partitions[*n], - ByteSize::b(total_cap_n).display().iec(), - ByteSize::b(available_cap_n).display().iec(), + ByteSize::b(total_cap_n).to_string_as(false), + ByteSize::b(available_cap_n).to_string_as(false), (available_cap_n as f32) / (total_cap_n as f32) * 100.0, )); } @@ -849,8 +839,8 @@ impl LayoutVersion { replicated_partitions, stored_partitions_zone[z], //new_partitions_zone[z], - ByteSize::b(total_cap_z).display().iec(), - ByteSize::b(available_cap_z).display().iec(), + ByteSize::b(total_cap_z).to_string_as(false), + ByteSize::b(available_cap_z).to_string_as(false), percent_cap_z )); table.push("".into()); diff --git a/src/rpc/metrics.rs b/src/rpc/metrics.rs index 0676dec0..61f8fa79 100644 --- a/src/rpc/metrics.rs +++ b/src/rpc/metrics.rs @@ -1,6 +1,6 @@ use opentelemetry::{global, metrics::*}; -/// `TableMetrics` reference all counter used for metrics +/// TableMetrics reference all counter used for metrics pub struct RpcMetrics { pub(crate) rpc_counter: Counter, pub(crate) rpc_timeout_counter: Counter, diff --git a/src/rpc/replication_mode.rs b/src/rpc/replication_mode.rs index 7bb91978..a3a94085 100644 --- a/src/rpc/replication_mode.rs +++ b/src/rpc/replication_mode.rs @@ -38,10 +38,14 @@ impl ReplicationFactor { } } + pub fn replication_factor(&self) -> usize { + self.0 + } + pub fn read_quorum(&self, consistency_mode: ConsistencyMode) -> usize { match consistency_mode { ConsistencyMode::Dangerous | ConsistencyMode::Degraded => 1, - ConsistencyMode::Consistent => usize::from(*self).div_ceil(2), + ConsistencyMode::Consistent => self.replication_factor().div_ceil(2), } } @@ -49,7 +53,7 @@ impl ReplicationFactor { match consistency_mode { ConsistencyMode::Dangerous => 1, ConsistencyMode::Degraded | ConsistencyMode::Consistent => { - (usize::from(*self) + 1) - self.read_quorum(ConsistencyMode::Consistent) + (self.replication_factor() + 1) - self.read_quorum(ConsistencyMode::Consistent) } } } @@ -61,28 +65,30 @@ impl std::convert::From for usize { } } -impl std::fmt::Display for ReplicationFactor { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - self.0.fmt(f) - } -} - pub fn parse_replication_mode( config: &Config, ) -> Result<(ReplicationFactor, ConsistencyMode), Error> { match (&config.replication_mode, config.replication_factor, config.consistency_mode.as_str()) { - (Some(_replication_mode), _, _) => { - Err(Error::Message("The legacy replication_mode is no longer supported. Use replication_factor and consistency_mode instead.".into())) - } - (None, Some(replication_factor), consistency_mode) => { - let replication_factor = ReplicationFactor::new(replication_factor) - .ok_or_message("Invalid replication_factor in config file.")?; - let consistency_mode = ConsistencyMode::parse(consistency_mode) - .ok_or_message("Invalid consistency_mode in config file.")?; - Ok((replication_factor, consistency_mode)) - } - (None, None, _) => { - Err(Error::Message("The option replication_factor is required.".into())) - } - } + (Some(replication_mode), None, "consistent") => { + tracing::warn!("Legacy config option replication_mode in use. Please migrate to replication_factor and consistency_mode"); + let parsed_replication_mode = match replication_mode.as_str() { + "1" | "none" => Some((ReplicationFactor(1), ConsistencyMode::Consistent)), + "2" => Some((ReplicationFactor(2), ConsistencyMode::Consistent)), + "2-dangerous" => Some((ReplicationFactor(2), ConsistencyMode::Dangerous)), + "3" => Some((ReplicationFactor(3), ConsistencyMode::Consistent)), + "3-degraded" => Some((ReplicationFactor(3), ConsistencyMode::Degraded)), + "3-dangerous" => Some((ReplicationFactor(3), ConsistencyMode::Dangerous)), + _ => None, + }; + Some(parsed_replication_mode.ok_or_message("Invalid replication_mode in config file.")?) + }, + (None, Some(replication_factor), consistency_mode) => { + let replication_factor = ReplicationFactor::new(replication_factor) + .ok_or_message("Invalid replication_factor in config file.")?; + let consistency_mode = ConsistencyMode::parse(consistency_mode) + .ok_or_message("Invalid consistency_mode in config file.")?; + Some((replication_factor, consistency_mode)) + } + _ => None, + }.ok_or_message("Either the legacy replication_mode or replication_level and consistency_mode can be set, not both.") } diff --git a/src/rpc/rpc_helper.rs b/src/rpc/rpc_helper.rs index 0805c1cb..2505c2ce 100644 --- a/src/rpc/rpc_helper.rs +++ b/src/rpc/rpc_helper.rs @@ -66,7 +66,7 @@ impl Clone for RequestStrategy<()> { } impl RequestStrategy<()> { - /// Create a `RequestStrategy` with default timeout and not interrupting when quorum reached + /// Create a RequestStrategy with default timeout and not interrupting when quorum reached pub fn with_priority(prio: RequestPriority) -> Self { RequestStrategy { rs_quorum: None, @@ -109,7 +109,7 @@ impl RequestStrategy { self.rs_timeout = Timeout::Custom(timeout); self } - /// Extract `drop_on_complete` item + /// Extract drop_on_complete item fn extract_drop_on_complete(self) -> (RequestStrategy<()>, T) { ( RequestStrategy { @@ -162,7 +162,7 @@ impl RpcHelper { endpoint: &Endpoint, to: Uuid, msg: N, - strategy: RequestStrategy<()>, + strat: RequestStrategy<()>, ) -> Result where M: Rpc>, @@ -185,12 +185,12 @@ impl RpcHelper { let node_id = to.into(); let rpc_call = endpoint - .call_streaming(&node_id, msg, strategy.rs_priority) + .call_streaming(&node_id, msg, strat.rs_priority) .with_context(Context::current_with_span(span)) .record_duration(&self.0.metrics.rpc_duration, &metric_tags); let timeout = async { - match strategy.rs_timeout { + match strat.rs_timeout { Timeout::None => futures::future::pending().await, Timeout::Default => tokio::time::sleep(self.0.rpc_timeout).await, Timeout::Custom(t) => tokio::time::sleep(t).await, @@ -222,7 +222,7 @@ impl RpcHelper { endpoint: &Endpoint, to: &[Uuid], msg: N, - strategy: RequestStrategy<()>, + strat: RequestStrategy<()>, ) -> Result)>, Error> where M: Rpc>, @@ -237,7 +237,7 @@ impl RpcHelper { let resps = join_all( to.iter() - .map(|to| self.call(endpoint, *to, msg.clone(), strategy.clone())), + .map(|to| self.call(endpoint, *to, msg.clone(), strat.clone())), ) .with_context(Context::current_with_span(span)) .await; @@ -252,7 +252,7 @@ impl RpcHelper { &self, endpoint: &Endpoint, msg: N, - strategy: RequestStrategy<()>, + strat: RequestStrategy<()>, ) -> Result)>, Error> where M: Rpc>, @@ -266,13 +266,13 @@ impl RpcHelper { .iter() .map(|p| p.id.into()) .collect::>(); - self.call_many(endpoint, &to[..], msg, strategy).await + self.call_many(endpoint, &to[..], msg, strat).await } /// Make a RPC call to multiple servers, returning either a Vec of responses, /// or an error if quorum could not be reached due to too many errors /// - /// If `RequestStrategy` has `send_all_at_once` set, then all requests will be + /// If RequestStrategy has send_all_at_once set, then all requests will be /// sent at once, and `try_call_many` will return as soon as a quorum of /// responses is achieved, dropping and cancelling the remaining requests. /// @@ -336,16 +336,16 @@ impl RpcHelper { { // Once quorum is reached, other requests don't matter. // What we do here is only send the required number of requests - // to reach a quorum, prioritizing nodes with the lowest latency. + // to reach a quorum, priorizing nodes with the lowest latency. // When there are errors, we start new requests to compensate. // TODO: this could be made more aggressive, e.g. if after 2x the // average ping of a given request, the response is not yet received, // preemptively send an additional request to any remaining nodes. - // Reorder requests to prioritize closeness / low latency + // Reorder requests to priorize closeness / low latency let request_order = - self.request_order(self.0.layout.read().unwrap().current()?, to.iter().copied()); + self.request_order(&self.0.layout.read().unwrap().current(), to.iter().copied()); let send_all_at_once = strategy.rs_send_all_at_once.unwrap_or(false); // Build future for each request @@ -374,7 +374,7 @@ impl RpcHelper { // reach quorum, start some new requests. while send_all_at_once || successes.len() + resp_stream.len() < quorum { if let Some(fut) = requests.next() { - resp_stream.push(fut); + resp_stream.push(fut) } else { break; } @@ -413,7 +413,7 @@ impl RpcHelper { /// Make a RPC call to multiple servers, returning either a Vec of responses, /// or an error if quorum could not be reached due to too many errors /// - /// Contrary to `try_call_many`, this function is especially made for broadcast + /// Contrary to try_call_many, this function is especially made for broadcast /// write operations. In particular: /// /// - The request are sent to all specified nodes as soon as `try_write_many_sets` @@ -540,8 +540,6 @@ impl RpcHelper { // ---- functions not related to MAKING RPCs, but just determining to what nodes // they should be made and in which order ---- - #[expect(clippy::doc_overindented_list_items)] - #[expect(clippy::doc_lazy_continuation)] /// Determine to what nodes, and in what order, requests to read a data block /// should be sent. All nodes in the Vec returned by this function are tried /// one by one until there is one that returns the block (in block/manager.rs). @@ -560,7 +558,7 @@ impl RpcHelper { /// /// 1. ask first all nodes of all currently active layout versions /// -> ask the preferred node in all layout versions (older to newer), - /// then the second preferred onde in all versions, etc. + /// then the second preferred onde in all verions, etc. /// -> we start by the oldest active layout version first, because a majority /// of blocks will have been saved before the layout change /// 2. ask all nodes of historical layout versions, for blocks which have not @@ -569,32 +567,28 @@ impl RpcHelper { /// The preference order, for each layout version, is given by `request_order`, /// based on factors such as nodes being in the same datacenter, /// having low ping, etc. - pub fn block_read_nodes_of( - &self, - position: &Hash, - rpc_helper: &RpcHelper, - ) -> Result, Error> { + pub fn block_read_nodes_of(&self, position: &Hash, rpc_helper: &RpcHelper) -> Vec { let layout = self.0.layout.read().unwrap(); - let current_layout = layout.current()?; // Compute, for each layout version, the set of nodes that might store // the block, and put them in their preferred order as of `request_order`. - let mut vernodes = vec![]; - for ver in layout.versions()?.iter() { - let nodes = ver.nodes_of(position); - vernodes.push(rpc_helper.request_order(current_layout, nodes)); - } + let mut vernodes = layout.versions().iter().map(|ver| { + let nodes = ver.nodes_of(position, ver.replication_factor); + rpc_helper.request_order(layout.current(), nodes) + }); - let mut ret = if vernodes.len() == 1 { + let mut ret = if layout.versions().len() == 1 { // If we have only one active layout version, then these are the // only nodes we ask in step 1 - vernodes.into_iter().next().unwrap() + vernodes.next().unwrap() } else { + let vernodes = vernodes.collect::>(); + let mut nodes = Vec::::with_capacity(12); - for i in 0..current_layout.replication_factor { + for i in 0..layout.current().replication_factor { for vn in vernodes.iter() { if let Some(n) = vn.get(i) { - if !nodes.contains(n) { + if !nodes.contains(&n) { if *n == self.0.our_node_id { // it's always fast (almost free) to ask locally, // so always put that as first choice @@ -613,15 +607,15 @@ impl RpcHelper { // Second step: add nodes of older layout versions let old_ver_iter = layout.inner().old_versions.iter().rev(); for ver in old_ver_iter { - let nodes = ver.nodes_of(position); - for node in rpc_helper.request_order(current_layout, nodes) { + let nodes = ver.nodes_of(position, ver.replication_factor); + for node in rpc_helper.request_order(layout.current(), nodes) { if !ret.contains(&node) { ret.push(node); } } } - Ok(ret) + ret } fn request_order( @@ -637,8 +631,8 @@ impl RpcHelper { // The tuples are as follows: // (is another node?, is another zone?, latency, node ID, request future) // We store all of these tuples in a vec that we can sort. - // By sorting this vec, we prioritize ourself, then nodes in the same zone, - // and within a same zone we prioritize nodes with the lowest latency. + // By sorting this vec, we priorize ourself, then nodes in the same zone, + // and within a same zone we priorize nodes with the lowest latency. let mut nodes = nodes .map(|to| { let peer_zone = layout.get_node_zone(&to).unwrap_or(""); @@ -656,7 +650,7 @@ impl RpcHelper { }) .collect::>(); - // Sort requests by (prioritize ourself, prioritize same zone, prioritize low latency) + // Sort requests by (priorize ourself, priorize same zone, priorize low latency) nodes.sort_by_key(|(diffnode, diffzone, ping, _to)| (*diffnode, *diffzone, *ping)); nodes diff --git a/src/rpc/system.rs b/src/rpc/system.rs index 0cd301ff..2a52ae5d 100644 --- a/src/rpc/system.rs +++ b/src/rpc/system.rs @@ -45,7 +45,7 @@ const STATUS_EXCHANGE_INTERVAL: Duration = Duration::from_secs(10); /// Version tag used for version check upon Netapp connection. /// Cluster nodes with different version tags are deemed /// incompatible and will refuse to connect. -pub const GARAGE_VERSION_TAG: u64 = 0x6761726167650020; // garage 0x0020 (2.0) +pub const GARAGE_VERSION_TAG: u64 = 0x6761726167650010; // garage 0x0010 (1.0) /// RPC endpoint used for calls related to membership pub const SYSTEM_RPC_PATH: &str = "garage_rpc/system.rs/SystemRpc"; @@ -55,9 +55,9 @@ pub const SYSTEM_RPC_PATH: &str = "garage_rpc/system.rs/SystemRpc"; pub enum SystemRpc { /// Response to successful advertisements Ok, - /// Request to connect to a specific node (in `@:` format, pubkey = full-length node ID) + /// Request to connect to a specific node (in @: format, pubkey = full-length node ID) Connect(String), - /// Advertise Garage status. Answered with another `AdvertiseStatus`. + /// Advertise Garage status. Answered with another AdvertiseStatus. /// Exchanged with every node on a regular basis. AdvertiseStatus(NodeStatus), /// Get known nodes states @@ -65,9 +65,9 @@ pub enum SystemRpc { /// Return known nodes ReturnKnownNodes(Vec), - /// Ask other node its cluster layout. Answered with `AdvertiseClusterLayout` + /// Ask other node its cluster layout. Answered with AdvertiseClusterLayout PullClusterLayout, - /// Advertisement of cluster layout. Sent spontanously or in response to `PullClusterLayout` + /// Advertisement of cluster layout. Sent spontanously or in response to PullClusterLayout AdvertiseClusterLayout(LayoutHistory), /// Ask other node its cluster layout update trackers. PullClusterLayoutTrackers, @@ -124,9 +124,6 @@ pub struct NodeStatus { /// Hostname of the node pub hostname: Option, - /// Garage version of the node - pub garage_version: Option, - /// Replication factor configured on the node pub replication_factor: usize, @@ -372,10 +369,6 @@ impl System { &self.layout_manager.rpc_helper } - pub fn local_status(&self) -> NodeStatus { - self.local_status.read().unwrap().clone() - } - // ---- Administrative operations (directly available and // also available through RPC) ---- @@ -453,46 +446,32 @@ impl System { // Acquire a rwlock read-lock to the current cluster layout let layout = self.cluster_layout(); - let layout_versions = match layout.versions() { - Ok(v) => v, - Err(_) => { - // Layout not yet configured, special case - return ClusterHealth { - status: ClusterHealthStatus::Unavailable, - known_nodes: nodes.len(), - connected_nodes, - storage_nodes: 0, - storage_nodes_ok: 0, - partitions: 0, - partitions_quorum: 0, - partitions_all_ok: 0, - }; - } - }; - let current_layout = layout_versions.last().unwrap(); // Obtain information about nodes that have a role as storage nodes // in one of the active layout versions let mut storage_nodes = HashSet::::with_capacity(16); - for ver in layout_versions.iter() { + for ver in layout.versions().iter() { storage_nodes.extend( ver.roles .items() .iter() .filter(|(_, _, v)| matches!(v, NodeRoleV(Some(r)) if r.capacity.is_some())) .map(|(n, _, _)| *n), - ); + ) } let storage_nodes_ok = storage_nodes.iter().filter(|x| node_up(x)).count(); // Determine the number of partitions that have: // - a quorum of up nodes for all write sets (i.e. are available) // - for which all nodes in all write sets are up (i.e. are fully healthy) - let partitions = current_layout.partitions().collect::>(); + let partitions = layout.current().partitions().collect::>(); let mut partitions_quorum = 0; let mut partitions_all_ok = 0; for (_, hash) in partitions.iter() { - let mut write_sets = layout_versions.iter().map(|x| x.nodes_of(hash)); + let mut write_sets = layout + .versions() + .iter() + .map(|x| x.nodes_of(hash, x.replication_factor)); let has_quorum = write_sets .clone() .all(|set| set.filter(|x| node_up(x)).count() >= quorum); @@ -647,42 +626,26 @@ impl System { async fn discovery_loop(self: &Arc, mut stop_signal: watch::Receiver) { while !*stop_signal.borrow() { - let peers_up = self + let n_connected = self .peering .get_peer_list() .iter() .filter(|p| p.is_up()) - .map(|p| Uuid::from(p.id)) - .collect::>(); + .count(); - let do_bootstrap = match self.cluster_layout().all_nodes() { - Err(_) => { - debug!("doing bootstrap/discovery step (layout not configured)"); - true - } - Ok(all_nodes) => { - // Do bootstrap if we have fewer peers than the replication - // factor, - // or if some peers in the layout are not connected - let do_bootstrap = peers_up.len() < self.replication_factor.into() - || all_nodes.iter().any(|x| !peers_up.contains(x)); - if do_bootstrap { - debug!( - "doing bootstrap/discovery step (peers_up: {}, all_nodes: {})", - peers_up.len(), - all_nodes.len() - ); - } - do_bootstrap - } - }; + let not_configured = !self.cluster_layout().is_check_ok(); + let no_peers = n_connected < self.replication_factor.into(); + let expected_n_nodes = self.cluster_layout().all_nodes().len(); + let bad_peers = n_connected != expected_n_nodes; + + if not_configured || no_peers || bad_peers { + info!("Doing a bootstrap/discovery step (not_configured: {}, no_peers: {}, bad_peers: {})", not_configured, no_peers, bad_peers); - if do_bootstrap { let mut ping_list = resolve_peers(&self.bootstrap_peers).await; // Add peer list from list stored on disk if let Ok(peers) = self.persist_peer_list.load_async().await { - ping_list.extend(peers.0.iter().map(|(id, addr)| ((*id).into(), *addr))); + ping_list.extend(peers.0.iter().map(|(id, addr)| ((*id).into(), *addr))) } // Fetch peer list from Consul @@ -705,9 +668,9 @@ impl System { match create_kubernetes_crd().await { Ok(()) => (), Err(e) => { - error!("Failed to create kubernetes custom resource: {}", e); + error!("Failed to create kubernetes custom resource: {}", e) } - } + }; } match get_kubernetes_nodes(k).await { @@ -720,13 +683,12 @@ impl System { } } - if let Ok(all_nodes) = self.cluster_layout().all_nodes() { - if peers_up.len() >= self.replication_factor.into() { - // If the layout is configured, and we already have some connections - // to other nodes in the cluster, we can skip trying to connect to - // nodes that are not in the cluster layout. - ping_list.retain(|(id, _)| all_nodes.contains(&(*id).into())); - } + if !not_configured && !no_peers { + // If the layout is configured, and we already have some connections + // to other nodes in the cluster, we can skip trying to connect to + // nodes that are not in the cluster layout. + let layout = self.cluster_layout(); + ping_list.retain(|(id, _)| layout.all_nodes().contains(&(*id).into())); } for (node_id, node_addr) in ping_list { @@ -824,7 +786,6 @@ impl NodeStatus { .into_string() .unwrap_or_else(|_| "".to_string()), ), - garage_version: Some(garage_util::version::garage_version().to_string()), replication_factor: replication_factor.into(), layout_digest: layout_manager.layout().digest(), meta_disk_avail: None, @@ -835,7 +796,6 @@ impl NodeStatus { fn unknown() -> Self { NodeStatus { hostname: None, - garage_version: None, replication_factor: 0, layout_digest: Default::default(), meta_disk_avail: None, @@ -856,7 +816,6 @@ impl NodeStatus { }; let mount_avail = |path: &Path| match statvfs(path) { - #[allow(clippy::unnecessary_cast)] Ok(x) => { let avail = x.blocks_available() as u64 * x.fragment_size() as u64; let total = x.blocks() as u64 * x.fragment_size() as u64; @@ -908,7 +867,7 @@ impl NodeStatus { } /// Obtain the list of currently available IP addresses on all non-loopback -/// interfaces, optionally filtering them to be inside a given `IpNet`. +/// interfaces, optionally filtering them to be inside a given IpNet. fn get_default_ip(filter_ipnet: Option) -> Option { pnet_datalink::interfaces() .into_iter() @@ -952,13 +911,13 @@ fn get_rpc_public_addr(config: &Config) -> Option { let filter_subnet: Option = config .rpc_public_addr_subnet .as_ref() - .map(|filter_subnet_str| match filter_subnet_str.parse::() { + .and_then(|filter_subnet_str| match filter_subnet_str.parse::() { Ok(filter_subnet) => { let filter_subnet_trunc = filter_subnet.trunc(); if filter_subnet_trunc != filter_subnet { warn!("`rpc_public_addr_subnet` changed after applying netmask, continuing with {}", filter_subnet.trunc()); } - filter_subnet_trunc + Some(filter_subnet_trunc) } Err(e) => { panic!( diff --git a/src/rpc/system_metrics.rs b/src/rpc/system_metrics.rs index 3d6c6e8b..a64daec8 100644 --- a/src/rpc/system_metrics.rs +++ b/src/rpc/system_metrics.rs @@ -5,7 +5,7 @@ use opentelemetry::{global, metrics::*, KeyValue}; use crate::system::{ClusterHealthStatus, System}; -/// `TableMetrics` reference all counter used for metrics +/// TableMetrics reference all counter used for metrics pub struct SystemMetrics { // Static values pub(crate) _garage_build_info: ValueObserver, @@ -60,7 +60,7 @@ impl SystemMetrics { KeyValue::new("rustversion", garage_util::version::rust_version()), KeyValue::new("version", garage_util::version::garage_version()), ], - ); + ) }) .with_description("Garage build info") .init(), @@ -68,7 +68,7 @@ impl SystemMetrics { let replication_factor = system.replication_factor; meter .u64_value_observer("garage_replication_factor", move |observer| { - observer.observe(usize::from(replication_factor) as u64, &[]); + observer.observe(replication_factor.replication_factor() as u64, &[]) }) .with_description("Garage replication factor setting") .init() @@ -216,13 +216,10 @@ impl SystemMetrics { .u64_value_observer("cluster_layout_node_connected", move |observer| { let layout = system.cluster_layout(); let nodes = system.get_known_nodes(); - for id in layout.all_nodes().unwrap_or_default().iter() { + for id in layout.all_nodes().iter() { let mut kv = vec![KeyValue::new("id", format!("{:?}", id))]; - if let Some(role) = layout - .current() - .ok() - .and_then(|l| l.roles.get(id)) - .and_then(|r| r.0.as_ref()) + if let Some(role) = + layout.current().roles.get(id).and_then(|r| r.0.as_ref()) { kv.push(KeyValue::new("role_zone", role.zone.clone())); match role.capacity { @@ -263,13 +260,10 @@ impl SystemMetrics { .u64_value_observer("cluster_layout_node_disconnected_time", move |observer| { let layout = system.cluster_layout(); let nodes = system.get_known_nodes(); - for id in layout.all_nodes().unwrap_or_default().iter() { + for id in layout.all_nodes().iter() { let mut kv = vec![KeyValue::new("id", format!("{:?}", id))]; - if let Some(role) = layout - .current() - .ok() - .and_then(|l| l.roles.get(id)) - .and_then(|r| r.0.as_ref()) + if let Some(role) = + layout.current().roles.get(id).and_then(|r| r.0.as_ref()) { kv.push(KeyValue::new("role_zone", role.zone.clone())); match role.capacity { diff --git a/src/table/Cargo.toml b/src/table/Cargo.toml index e4c5f750..478dbd18 100644 --- a/src/table/Cargo.toml +++ b/src/table/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_table" -version = "2.3.0" +version = "1.3.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -33,6 +33,3 @@ serde_bytes.workspace = true futures.workspace = true futures-util.workspace = true tokio.workspace = true - -[lints] -workspace = true diff --git a/src/table/data.rs b/src/table/data.rs index 86bf7b93..1d0308ce 100644 --- a/src/table/data.rs +++ b/src/table/data.rs @@ -66,7 +66,6 @@ impl TableData { store.clone(), merkle_tree.clone(), merkle_todo.clone(), - insert_queue.clone(), gc_todo.clone(), ); @@ -254,7 +253,7 @@ impl TableData { // any node of the partition is unavailable. let pk_hash = Hash::try_from(&tree_key[..32]).unwrap(); // TODO: this probably breaks when the layout changes - let nodes = self.replication.storage_nodes(&pk_hash)?; + let nodes = self.replication.storage_nodes(&pk_hash); if nodes.first() == Some(&self.system.id) { GcTodoEntry::new(tree_key, new_bytes_hash).save(&self.gc_todo)?; } @@ -368,10 +367,6 @@ impl TableData { } } - pub fn insert_queue_approximate_len(&self) -> Result { - Ok(self.insert_queue.approximate_len()?) - } - pub fn gc_todo_approximate_len(&self) -> Result { Ok(self.gc_todo.approximate_len()?) } diff --git a/src/table/gc.rs b/src/table/gc.rs index af84b9df..1f30bd76 100644 --- a/src/table/gc.rs +++ b/src/table/gc.rs @@ -153,7 +153,7 @@ impl TableGc { let mut partitions = HashMap::new(); for entry in entries { let pkh = Hash::try_from(&entry.key[..32]).unwrap(); - let mut nodes = self.data.replication.storage_nodes(&pkh)?; + let mut nodes = self.data.replication.storage_nodes(&pkh); nodes.retain(|x| *x != self.system.id); nodes.sort(); @@ -334,16 +334,17 @@ impl Worker for GcWorker { } } -/// An entry stored in the `gc_todo` db tree associated with the table +/// An entry stored in the gc_todo db tree associated with the table /// Contains helper function for parsing, saving, and removing /// such entry in the db /// /// Format of an entry: -/// - key = 8 bytes: timestamp of tombstone (used to implement GC delay) -/// n bytes: key in the main data table +/// - key = 8 bytes: timestamp of tombstone +/// (used to implement GC delay) +/// n bytes: key in the main data table /// - value = hash of the table entry to delete (the tombstone) -/// for verification purpose, because we don't want to delete -/// things that aren't tombstones +/// for verification purpose, because we don't want to delete +/// things that aren't tombstones pub(crate) struct GcTodoEntry { tombstone_timestamp: u64, key: Vec, @@ -352,7 +353,7 @@ pub(crate) struct GcTodoEntry { } impl GcTodoEntry { - /// Creates a new `GcTodoEntry` (not saved in the db) from its components: + /// Creates a new GcTodoEntry (not saved in the db) from its components: /// the key of an entry in the table, and the hash of the associated /// serialized value pub(crate) fn new(key: Vec, value_hash: Hash) -> Self { @@ -364,7 +365,7 @@ impl GcTodoEntry { } } - /// Parses a `GcTodoEntry` from a (k, v) pair stored in the `gc_todo` tree + /// Parses a GcTodoEntry from a (k, v) pair stored in the gc_todo tree pub(crate) fn parse(db_k: &[u8], db_v: &[u8]) -> Self { Self { tombstone_timestamp: u64::from_be_bytes(db_k[0..8].try_into().unwrap()), @@ -374,13 +375,13 @@ impl GcTodoEntry { } } - /// Saves the `GcTodoEntry` in the `gc_todo` tree + /// Saves the GcTodoEntry in the gc_todo tree pub(crate) fn save(&self, gc_todo_tree: &db::Tree) -> Result<(), Error> { gc_todo_tree.insert(self.todo_table_key(), self.value_hash.as_slice())?; Ok(()) } - /// Removes the `GcTodoEntry` from the `gc_todo` tree if the + /// Removes the GcTodoEntry from the gc_todo tree if the /// hash of the serialized value is the same here as in the tree. /// This is useful to remove a todo entry only under the condition /// that it has not changed since the time it was read, i.e. diff --git a/src/table/merkle.rs b/src/table/merkle.rs index a162b225..7ba1f007 100644 --- a/src/table/merkle.rs +++ b/src/table/merkle.rs @@ -102,7 +102,7 @@ impl MerkleUpdater { partition: self .data .replication - .partition_of(&Hash::try_from(&k[0..32]).unwrap())?, + .partition_of(&Hash::try_from(&k[0..32]).unwrap()), prefix: vec![], }; self.data diff --git a/src/table/metrics.rs b/src/table/metrics.rs index b1a787f2..78593202 100644 --- a/src/table/metrics.rs +++ b/src/table/metrics.rs @@ -2,12 +2,11 @@ use opentelemetry::{global, metrics::*, KeyValue}; use garage_db as db; -/// `TableMetrics` reference all counter used for metrics +/// TableMetrics reference all counter used for metrics pub struct TableMetrics { pub(crate) _table_size: ValueObserver, pub(crate) _merkle_tree_size: ValueObserver, pub(crate) _merkle_todo_len: ValueObserver, - pub(crate) _insert_queue_len: ValueObserver, pub(crate) _gc_todo_len: ValueObserver, pub(crate) get_request_counter: BoundCounter, @@ -27,7 +26,6 @@ impl TableMetrics { store: db::Tree, merkle_tree: db::Tree, merkle_todo: db::Tree, - insert_queue: db::Tree, gc_todo: db::Tree, ) -> Self { let meter = global::meter(table_name); @@ -74,20 +72,6 @@ impl TableMetrics { ) .with_description("Merkle tree updater TODO queue length") .init(), - _insert_queue_len: meter - .u64_value_observer( - "table.insert_queue_length", - move |observer| { - if let Ok(v) = insert_queue.approximate_len() { - observer.observe( - v as u64, - &[KeyValue::new("table_name", table_name)], - ); - } - }, - ) - .with_description("Table insert queue length") - .init(), _gc_todo_len: meter .u64_value_observer( "table.gc_todo_queue_length", diff --git a/src/table/replication/fullcopy.rs b/src/table/replication/fullcopy.rs index f3ee51fe..1e52bb47 100644 --- a/src/table/replication/fullcopy.rs +++ b/src/table/replication/fullcopy.rs @@ -1,10 +1,8 @@ use std::sync::Arc; -use std::time::Duration; use garage_rpc::layout::*; -use garage_rpc::{replication_mode::ConsistencyMode, system::System}; +use garage_rpc::system::System; use garage_util::data::*; -use garage_util::error::Error; use crate::replication::*; @@ -23,96 +21,53 @@ use crate::replication::*; pub struct TableFullReplication { /// The membership manager of this node pub system: Arc, - pub consistency_mode: ConsistencyMode, } impl TableReplication for TableFullReplication { - type WriteSets = WriteLock>>; + type WriteSets = Vec>; - // Do anti-entropy every 10 seconds. - // Compared to sharded tables, anti-entropy is much less costly as there is - // a single partition hash to exchange. - // Also, it's generally a much bigger problem for fullcopy tables to be out of sync. - const ANTI_ENTROPY_INTERVAL: Duration = Duration::from_secs(10); - - fn storage_nodes(&self, _hash: &Hash) -> Result, Error> { - Ok(self.system.cluster_layout().all_nodes()?.to_vec()) - } - - fn read_nodes(&self, _hash: &Hash) -> Result, Error> { - Ok(self - .system - .cluster_layout() - .read_version()? - .all_nodes() - .to_vec()) - } - fn read_quorum(&self) -> Result { - match self.consistency_mode { - ConsistencyMode::Dangerous | ConsistencyMode::Degraded => Ok(1), - ConsistencyMode::Consistent => { - let layout = self.system.cluster_layout(); - let nodes = layout.read_version()?.all_nodes(); - Ok(nodes.len().div_ceil(2)) - } - } - } - - fn write_sets(&self, _hash: &Hash) -> Result { - self.system.layout_manager.write_lock_with(write_sets) - } - fn write_quorum(&self) -> Result { - match self.consistency_mode { - ConsistencyMode::Dangerous => Ok(1), - ConsistencyMode::Degraded | ConsistencyMode::Consistent => { - let layout = self.system.cluster_layout(); - let min_len = layout - .versions()? - .iter() - .map(|x| x.all_nodes().len()) - .min() - .unwrap(); - let max_quorum = layout - .versions()? - .iter() - .map(|x| x.all_nodes().len().div_euclid(2) + 1) - .max() - .unwrap(); - if min_len < max_quorum { - warn!("Write quorum will not be respected for TableFullReplication operations due to multiple active layout versions with vastly different number of nodes"); - Ok(std::cmp::max(1, min_len)) - } else { - Ok(max_quorum) - } - } - } - } - - fn partition_of(&self, _hash: &Hash) -> Result { - Ok(0u16) - } - - fn sync_partitions(&self) -> Result { + fn storage_nodes(&self, _hash: &Hash) -> Vec { let layout = self.system.cluster_layout(); - let layout_version = layout.ack_map_min(); + layout.current().all_nodes().to_vec() + } - let partitions = vec![SyncPartition { - partition: 0u16, - first_hash: [0u8; 32].into(), - last_hash: [0xff; 32].into(), - storage_sets: write_sets(layout.versions()?), - }]; + fn read_nodes(&self, _hash: &Hash) -> Vec { + vec![self.system.id] + } + fn read_quorum(&self) -> usize { + 1 + } - Ok(SyncPartitions { + fn write_sets(&self, hash: &Hash) -> Self::WriteSets { + vec![self.storage_nodes(hash)] + } + fn write_quorum(&self) -> usize { + let nmembers = self.system.cluster_layout().current().all_nodes().len(); + + let max_faults = if nmembers > 1 { 1 } else { 0 }; + + if nmembers > max_faults { + nmembers - max_faults + } else { + 1 + } + } + + fn partition_of(&self, _hash: &Hash) -> Partition { + 0u16 + } + + fn sync_partitions(&self) -> SyncPartitions { + let layout = self.system.cluster_layout(); + let layout_version = layout.current().version; + SyncPartitions { layout_version, - partitions, - }) + partitions: vec![SyncPartition { + partition: 0u16, + first_hash: [0u8; 32].into(), + last_hash: [0xff; 32].into(), + storage_sets: vec![layout.current().all_nodes().to_vec()], + }], + } } } - -fn write_sets(layout_versions: &[LayoutVersion]) -> Vec> { - layout_versions - .iter() - .map(|x| x.all_nodes().to_vec()) - .collect() -} diff --git a/src/table/replication/parameters.rs b/src/table/replication/parameters.rs index 0bdfebd0..3649fad3 100644 --- a/src/table/replication/parameters.rs +++ b/src/table/replication/parameters.rs @@ -1,36 +1,31 @@ -use std::time::Duration; - use garage_rpc::layout::*; use garage_util::data::*; -use garage_util::error::Error; /// Trait to describe how a table shall be replicated pub trait TableReplication: Send + Sync + 'static { type WriteSets: AsRef>> + AsMut>> + Send + Sync + 'static; - const ANTI_ENTROPY_INTERVAL: Duration; - // See examples in table_sharded.rs and table_fullcopy.rs // To understand various replication methods /// The entire list of all nodes that store a partition - fn storage_nodes(&self, hash: &Hash) -> Result, Error>; + fn storage_nodes(&self, hash: &Hash) -> Vec; /// Which nodes to send read requests to - fn read_nodes(&self, hash: &Hash) -> Result, Error>; + fn read_nodes(&self, hash: &Hash) -> Vec; /// Responses needed to consider a read successful - fn read_quorum(&self) -> Result; + fn read_quorum(&self) -> usize; /// Which nodes to send writes to - fn write_sets(&self, hash: &Hash) -> Result; + fn write_sets(&self, hash: &Hash) -> Self::WriteSets; /// Responses needed to consider a write successful in each set - fn write_quorum(&self) -> Result; + fn write_quorum(&self) -> usize; // Accessing partitions, for Merkle tree & sync /// Get partition for data with given hash - fn partition_of(&self, hash: &Hash) -> Result; + fn partition_of(&self, hash: &Hash) -> Partition; /// List of partitions and nodes to sync with in current layout - fn sync_partitions(&self) -> Result; + fn sync_partitions(&self) -> SyncPartitions; } #[derive(Debug)] diff --git a/src/table/replication/sharded.rs b/src/table/replication/sharded.rs index b8983a4a..e0245949 100644 --- a/src/table/replication/sharded.rs +++ b/src/table/replication/sharded.rs @@ -1,12 +1,9 @@ use std::sync::Arc; -use std::time::Duration; use garage_rpc::layout::*; -use garage_rpc::replication_mode::ConsistencyMode; +use garage_rpc::system::System; use garage_util::data::*; -use garage_util::error::Error; -use crate::replication::sharded::manager::LayoutManager; use crate::replication::*; /// Sharded replication schema: @@ -18,74 +15,54 @@ use crate::replication::*; #[derive(Clone)] pub struct TableShardedReplication { /// The membership manager of this node - pub layout_manager: Arc, - pub consistency_mode: ConsistencyMode, + pub system: Arc, + /// How many time each data should be replicated + pub replication_factor: usize, + /// How many nodes to contact for a read, should be at most `replication_factor` + pub read_quorum: usize, + /// How many nodes to contact for a write, should be at most `replication_factor` + pub write_quorum: usize, } impl TableReplication for TableShardedReplication { - // Do anti-entropy every 10 minutes - const ANTI_ENTROPY_INTERVAL: Duration = Duration::from_secs(10 * 60); - type WriteSets = WriteLock>>; - fn storage_nodes(&self, hash: &Hash) -> Result, Error> { - let mut ret = vec![]; - for version in self.layout_manager.layout().versions()?.iter() { - ret.extend(version.nodes_of(hash)); - } - ret.sort(); - ret.dedup(); - Ok(ret) + fn storage_nodes(&self, hash: &Hash) -> Vec { + self.system.cluster_layout().storage_nodes_of(hash) } - fn read_nodes(&self, hash: &Hash) -> Result, Error> { - Ok(self - .layout_manager - .layout() - .read_version()? - .nodes_of(hash) - .collect()) + fn read_nodes(&self, hash: &Hash) -> Vec { + self.system.cluster_layout().read_nodes_of(hash) + } + fn read_quorum(&self) -> usize { + self.read_quorum } - fn read_quorum(&self) -> Result { - Ok(self - .layout_manager - .layout() - .read_version()? - .read_quorum(self.consistency_mode)) + fn write_sets(&self, hash: &Hash) -> Self::WriteSets { + self.system.layout_manager.write_sets_of(hash) + } + fn write_quorum(&self) -> usize { + self.write_quorum } - fn write_sets(&self, hash: &Hash) -> Result { - self.layout_manager - .write_lock_with(|lvs| write_sets(lvs, hash)) + fn partition_of(&self, hash: &Hash) -> Partition { + self.system.cluster_layout().current().partition_of(hash) } - fn write_quorum(&self) -> Result { - Ok(self - .layout_manager - .layout() - .current()? - .write_quorum(self.consistency_mode)) - } - - fn partition_of(&self, hash: &Hash) -> Result { - Ok(self.layout_manager.layout().current()?.partition_of(hash)) - } - - fn sync_partitions(&self) -> Result { - let layout = self.layout_manager.layout(); - let layout_versions = layout.versions()?; + fn sync_partitions(&self) -> SyncPartitions { + let layout = self.system.cluster_layout(); let layout_version = layout.ack_map_min(); let mut partitions = layout - .current()? + .current() .partitions() .map(|(partition, first_hash)| { + let storage_sets = layout.storage_sets_of(&first_hash); SyncPartition { partition, first_hash, last_hash: [0u8; 32].into(), // filled in just after - storage_sets: write_sets(layout_versions, &first_hash), + storage_sets, } }) .collect::>(); @@ -98,16 +75,9 @@ impl TableReplication for TableShardedReplication { }; } - Ok(SyncPartitions { + SyncPartitions { layout_version, partitions, - }) + } } } - -fn write_sets(layout_versions: &[LayoutVersion], hash: &Hash) -> Vec> { - layout_versions - .iter() - .map(|x| x.nodes_of(hash).collect()) - .collect() -} diff --git a/src/table/schema.rs b/src/table/schema.rs index e3b81bdb..fc1a465e 100644 --- a/src/table/schema.rs +++ b/src/table/schema.rs @@ -22,7 +22,7 @@ impl PartitionKey for String { } } -/// Values of type `FixedBytes32` are assumed to be random, +/// Values of type FixedBytes32 are assumed to be random, /// either a hash or a random UUID. This means we can use /// them directly as an index into the hash table. impl PartitionKey for FixedBytes32 { diff --git a/src/table/sync.rs b/src/table/sync.rs index 5de5f7ae..2d43b9fc 100644 --- a/src/table/sync.rs +++ b/src/table/sync.rs @@ -27,6 +27,9 @@ use crate::merkle::*; use crate::replication::*; use crate::*; +// Do anti-entropy every 10 minutes +const ANTI_ENTROPY_INTERVAL: Duration = Duration::from_secs(10 * 60); + pub struct TableSyncer { system: Arc, data: Arc>, @@ -115,7 +118,7 @@ impl TableSyncer { ); let mut result_tracker = QuorumSetResultTracker::new( &partition.storage_sets, - self.data.replication.write_quorum()?, + self.data.replication.write_quorum(), ); let mut sync_futures = result_tracker @@ -179,7 +182,7 @@ impl TableSyncer { } if !items.is_empty() { - let nodes = self.data.replication.storage_nodes(begin)?; + let nodes = self.data.replication.storage_nodes(begin); if nodes.contains(&self.system.id) { warn!( "({}) Interrupting offload as partitions seem to have changed", @@ -187,7 +190,7 @@ impl TableSyncer { ); break; } - if nodes.len() < self.data.replication.write_quorum()? { + if nodes.len() < self.data.replication.write_quorum() { return Err(Error::Message( "Not offloading as we don't have a quorum of nodes to write to." .to_string(), @@ -502,22 +505,16 @@ impl SyncWorker { } fn add_full_sync(&mut self) { - match self.syncer.data.replication.sync_partitions() { - Ok(mut partitions) => { - debug!( - "{}: Adding full sync for ack layout version {}", - F::TABLE_NAME, - partitions.layout_version - ); + let mut partitions = self.syncer.data.replication.sync_partitions(); + info!( + "{}: Adding full sync for ack layout version {}", + F::TABLE_NAME, + partitions.layout_version + ); - partitions.partitions.shuffle(&mut rand::rng()); - self.todo = Some(partitions); - } - Err(e) => { - debug!("{}: Not adding full sync: {}", F::TABLE_NAME, e); - } - } - self.next_full_sync = Instant::now() + R::ANTI_ENTROPY_INTERVAL; + partitions.partitions.shuffle(&mut thread_rng()); + self.todo = Some(partitions); + self.next_full_sync = Instant::now() + ANTI_ENTROPY_INTERVAL; } } @@ -559,7 +556,7 @@ impl Worker for SyncWorker { } if todo.partitions.is_empty() { - debug!( + info!( "{}: Completed full sync for ack layout version {}", F::TABLE_NAME, todo.layout_version diff --git a/src/table/table.rs b/src/table/table.rs index 8ddd8378..c96f4731 100644 --- a/src/table/table.rs +++ b/src/table/table.rs @@ -119,7 +119,7 @@ impl Table { async fn insert_internal(&self, e: &F::E) -> Result<(), Error> { let hash = e.partition_key().hash(); - let who = self.data.replication.write_sets(&hash)?; + let who = self.data.replication.write_sets(&hash); let e_enc = Arc::new(ByteBuf::from(e.encode()?)); let rpc = TableRpc::::Update(vec![e_enc]); @@ -131,7 +131,7 @@ impl Table { who.as_ref(), rpc, RequestStrategy::with_priority(PRIO_NORMAL) - .with_quorum(self.data.replication.write_quorum()?), + .with_quorum(self.data.replication.write_quorum()), ) .await?; @@ -180,7 +180,7 @@ impl Table { // a quorum of nodes has answered OK, then the insert has succeeded and // consistency properties (read-after-write) are preserved. - let quorum = self.data.replication.write_quorum()?; + let quorum = self.data.replication.write_quorum(); // Serialize all entries and compute the write sets for each of them. // In the case of sharded table replication, this also takes an "ack lock" @@ -193,7 +193,7 @@ impl Table { for entry in entries.into_iter() { let entry = entry.borrow(); let hash = entry.partition_key().hash(); - let mut write_sets = self.data.replication.write_sets(&hash)?; + let mut write_sets = self.data.replication.write_sets(&hash); for set in write_sets.as_mut().iter_mut() { // Sort nodes in each write sets to merge write sets with same // nodes but in possibly different orders @@ -309,7 +309,7 @@ impl Table { sort_key: &F::S, ) -> Result, Error> { let hash = partition_key.hash(); - let who = self.data.replication.read_nodes(&hash)?; + let who = self.data.replication.read_nodes(&hash); let rpc = TableRpc::::ReadEntry(partition_key.clone(), sort_key.clone()); let resps = self @@ -320,7 +320,7 @@ impl Table { &who, rpc, RequestStrategy::with_priority(PRIO_NORMAL) - .with_quorum(self.data.replication.read_quorum()?), + .with_quorum(self.data.replication.read_quorum()), ) .await?; @@ -397,7 +397,7 @@ impl Table { enumeration_order: EnumerationOrder, ) -> Result, Error> { let hash = partition_key.hash(); - let who = self.data.replication.read_nodes(&hash)?; + let who = self.data.replication.read_nodes(&hash); let rpc = TableRpc::::ReadRange { partition: partition_key.clone(), @@ -415,7 +415,7 @@ impl Table { &who, rpc, RequestStrategy::with_priority(PRIO_NORMAL) - .with_quorum(self.data.replication.read_quorum()?), + .with_quorum(self.data.replication.read_quorum()), ) .await?; @@ -482,15 +482,6 @@ impl Table { Ok(ret_vec) } - pub fn get_local( - self: &Arc, - partition_key: &F::P, - sort_key: &F::S, - ) -> Result, Error> { - let bytes = self.data.read_entry(partition_key, sort_key)?; - bytes.map(|b| self.data.decode_entry(&b)).transpose() - } - // =============== UTILITY FUNCTION FOR CLIENT OPERATIONS =============== async fn repair_on_read(&self, who: &[Uuid], what: F::E) -> Result<(), Error> { diff --git a/src/util/Cargo.toml b/src/util/Cargo.toml index cbba213c..46fa6590 100644 --- a/src/util/Cargo.toml +++ b/src/util/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_util" -version = "2.3.0" +version = "1.3.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -52,6 +52,3 @@ mktemp.workspace = true [features] k2v = [] - -[lints] -workspace = true diff --git a/src/util/background/mod.rs b/src/util/background/mod.rs index 6b911b5e..607cd7a3 100644 --- a/src/util/background/mod.rs +++ b/src/util/background/mod.rs @@ -6,6 +6,7 @@ pub mod worker; use std::collections::HashMap; use std::sync::Arc; +use serde::{Deserialize, Serialize}; use tokio::sync::{mpsc, watch}; use worker::WorkerProcessor; @@ -17,7 +18,7 @@ pub struct BackgroundRunner { worker_info: Arc>>, } -#[derive(Clone, Debug)] +#[derive(Clone, Serialize, Deserialize, Debug)] pub struct WorkerInfo { pub name: String, pub status: WorkerStatus, @@ -27,9 +28,9 @@ pub struct WorkerInfo { pub last_error: Option<(String, u64)>, } -/// `WorkerStatus` is a struct returned by the worker with a bunch of canonical +/// WorkerStatus is a struct returned by the worker with a bunch of canonical /// fields to indicate their status to CLI users. All fields are optional. -#[derive(Clone, Debug, Default)] +#[derive(Clone, Serialize, Deserialize, Debug, Default)] pub struct WorkerStatus { pub tranquility: Option, pub progress: Option, @@ -39,7 +40,7 @@ pub struct WorkerStatus { } impl BackgroundRunner { - /// Create a new `BackgroundRunner` + /// Create a new BackgroundRunner pub fn new(stop_signal: watch::Receiver) -> (Arc, tokio::task::JoinHandle<()>) { let (send_worker, worker_out) = mpsc::unbounded_channel::>(); @@ -68,6 +69,7 @@ impl BackgroundRunner { { self.send_worker .send(Box::new(worker)) + .ok() .expect("Could not put worker in queue"); } } diff --git a/src/util/background/worker.rs b/src/util/background/worker.rs index 612a98aa..3c938b7e 100644 --- a/src/util/background/worker.rs +++ b/src/util/background/worker.rs @@ -6,6 +6,7 @@ use async_trait::async_trait; use futures::future::*; use futures::stream::FuturesUnordered; use futures::StreamExt; +use serde::{Deserialize, Serialize}; use tokio::select; use tokio::sync::{mpsc, watch}; @@ -17,7 +18,7 @@ use crate::time::now_msec; // will be interrupted in the middle of whatever they are doing. const EXIT_DEADLINE: Duration = Duration::from_secs(8); -#[derive(PartialEq, Copy, Clone, Debug)] +#[derive(PartialEq, Copy, Clone, Serialize, Deserialize, Debug)] pub enum WorkerState { Busy, Throttled(f32), @@ -25,6 +26,17 @@ pub enum WorkerState { Done, } +impl std::fmt::Display for WorkerState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + WorkerState::Busy => write!(f, "Busy"), + WorkerState::Throttled(_) => write!(f, "Busy*"), + WorkerState::Idle => write!(f, "Idle"), + WorkerState::Done => write!(f, "Done"), + } + } +} + #[async_trait] pub trait Worker: Send { fn name(&self) -> String; @@ -34,11 +46,11 @@ pub trait Worker: Send { } /// Work: do a basic unit of work, if one is available (otherwise, should return - /// `WorkerState::Idle` immediately). We will do our best to not interrupt this future in the + /// WorkerState::Idle immediately). We will do our best to not interrupt this future in the /// middle of processing, it will only be interrupted at the last minute when Garage is trying /// to exit and this hasn't returned yet. This function may return an error to indicate that /// its unit of work could not be processed due to an error: the error will be logged and - /// .`work()` will be called again after a short delay. + /// .work() will be called again after a short delay. async fn work(&mut self, must_exit: &mut watch::Receiver) -> Result; /// Wait for work: await for some task to become available. This future can be interrupted in diff --git a/src/util/config.rs b/src/util/config.rs index aafc21b5..eb889ebe 100644 --- a/src/util/config.rs +++ b/src/util/config.rs @@ -47,7 +47,7 @@ pub struct Config { /// Maximum number of parallel block writes per PUT request /// Higher values improve throughput but increase memory usage - /// Default: 3, Recommended: 10-30 for `NVMe`, 3-10 for HDD + /// Default: 3, Recommended: 10-30 for NVMe, 3-10 for HDD #[serde(default = "default_block_max_concurrent_writes_per_request")] pub block_max_concurrent_writes_per_request: usize, /// Number of replicas. Can be any positive integer, but uneven numbers are more favorable. @@ -95,7 +95,7 @@ pub struct Config { pub rpc_secret_file: Option, /// Address to bind for RPC pub rpc_bind_addr: SocketAddr, - /// Bind outgoing sockets to `rpc_bind_addr`'s IP address as well + /// Bind outgoing sockets to rpc_bind_addr's IP address as well #[serde(default)] pub rpc_bind_outgoing: bool, /// Public IP address of this node @@ -154,7 +154,7 @@ pub struct Config { pub allow_punycode: bool, } -/// Value for `data_dir`: either a single directory or a list of dirs with attributes +/// Value for data_dir: either a single directory or a list of dirs with attributes #[derive(Deserialize, Debug, Clone)] #[serde(untagged)] pub enum DataDirEnum { @@ -166,7 +166,7 @@ pub enum DataDirEnum { pub struct DataDir { /// Path to the data directory pub path: PathBuf, - /// Capacity of the drive (required if `read_only` is false) + /// Capacity of the drive (required if read_only is false) #[serde(default)] pub capacity: Option, /// Whether this is a legacy read-only path (capacity should be None) @@ -215,9 +215,6 @@ pub struct AdminConfig { pub metrics_token: Option, /// File to read metrics token from pub metrics_token_file: Option, - /// Whether to require an access token for accessing the metrics endpoint - #[serde(default)] - pub metrics_require_token: bool, /// Bearer token to use to access Admin API endpoints pub admin_token: Option, @@ -262,8 +259,6 @@ pub struct ConsulDiscoveryConfig { /// Additional service metadata to add #[serde(default)] pub meta: Option>, - #[serde(default)] - pub datacenters: Vec, } #[derive(Deserialize, Debug, Clone)] @@ -305,7 +300,6 @@ fn default_consistency_mode() -> String { "consistent".into() } -#[expect(clippy::unnecessary_wraps)] fn default_compression() -> Option { Some(1) } diff --git a/src/util/crdt/crdt.rs b/src/util/crdt/crdt.rs index ebdc66d1..fdf63084 100644 --- a/src/util/crdt/crdt.rs +++ b/src/util/crdt/crdt.rs @@ -26,14 +26,14 @@ pub trait Crdt { fn merge(&mut self, other: &Self); } -/// `Option` implements Crdt for any type T, even if T doesn't implement CRDT itself: when +/// Option implements Crdt for any type T, even if T doesn't implement CRDT itself: when /// different values are detected, they are always merged to None. This can be used for value /// types which shoulnd't be merged, instead of trying to merge things when we know we don't want -/// to merge them (which is what the `AutoCrdt` trait is used for most of the time). This cases -/// arises very often, for example with a Lww or a `LwwMap`: the value type has to be a CRDT so that +/// to merge them (which is what the AutoCrdt trait is used for most of the time). This cases +/// arises very often, for example with a Lww or a LwwMap: the value type has to be a CRDT so that /// we have a rule for what to do when timestamps aren't enough to disambiguate (in a distributed -/// system, anything can happen!), and with `AutoCrdt` the rule is to make an arbitrary (but -/// deterministic) choice between the two. When using an `Option` instead with this impl, ambiguity +/// system, anything can happen!), and with AutoCrdt the rule is to make an arbitrary (but +/// deterministic) choice between the two. When using an Option instead with this impl, ambiguity /// cases are explicitly stored as None, which allows us to detect the ambiguity and handle it in /// the way we want. (this can only work if we are happy with losing the value when an ambiguity /// arises) @@ -52,7 +52,7 @@ where /// defined by the merge rule: `a ⊔ b = max(a, b)`. Implement this trait for your type /// to enable this behavior. pub trait AutoCrdt: Ord + Clone + std::fmt::Debug { - /// `WARN_IF_DIFFERENT`: emit a warning when values differ. Set this to true if + /// WARN_IF_DIFFERENT: emit a warning when values differ. Set this to true if /// different values in your application should never happen. Set this to false /// if you are actually relying on the semantics of `a ⊔ b = max(a, b)`. const WARN_IF_DIFFERENT: bool; diff --git a/src/util/crdt/deletable.rs b/src/util/crdt/deletable.rs index 0594d850..e771aceb 100644 --- a/src/util/crdt/deletable.rs +++ b/src/util/crdt/deletable.rs @@ -9,16 +9,6 @@ pub enum Deletable { Deleted, } -impl Deletable { - /// Map value, used for migrations - pub fn map U>(self, f: F) -> Deletable { - match self { - Self::Present(x) => Deletable::::Present(f(x)), - Self::Deleted => Deletable::::Deleted, - } - } -} - impl Deletable { /// Create a new deletable object that isn't deleted pub fn present(v: T) -> Self { diff --git a/src/util/crdt/lww.rs b/src/util/crdt/lww.rs index f8b03b85..80747406 100644 --- a/src/util/crdt/lww.rs +++ b/src/util/crdt/lww.rs @@ -43,16 +43,6 @@ pub struct Lww { v: T, } -impl Lww { - /// Map value, used for migrations - pub fn map U>(self, f: F) -> Lww { - Lww:: { - ts: self.ts, - v: f(self.v), - } - } -} - impl Lww where T: Crdt, diff --git a/src/util/crdt/lww_map.rs b/src/util/crdt/lww_map.rs index 20a23913..def0ebeb 100644 --- a/src/util/crdt/lww_map.rs +++ b/src/util/crdt/lww_map.rs @@ -19,7 +19,7 @@ use crate::crdt::crdt::*; /// Internally, the map is stored as a vector of keys and values, sorted by ascending key order. /// This is why the key type `K` must implement `Ord` (and also to ensure a unique serialization, /// such that two values can be compared for equality based on their hashes). As a consequence, -/// insertions take `O(n)` time. This means that `LWWMap` should be used for reasonably small maps. +/// insertions take `O(n)` time. This means that LWWMap should be used for reasonably small maps. /// However, note that even if we were using a more efficient data structure such as a `BTreeMap`, /// the serialization cost `O(n)` would still have to be paid at each modification, so we are /// actually not losing anything here. diff --git a/src/util/data.rs b/src/util/data.rs index e4ce316b..1fe7dfe0 100644 --- a/src/util/data.rs +++ b/src/util/data.rs @@ -73,7 +73,7 @@ impl FixedBytes32 { pub fn to_vec(self) -> Vec { self.0.to_vec() } - /// Try building a `FixedBytes32` from a slice + /// Try building a FixedBytes32 from a slice /// Return None if the slice is not 32 bytes long pub fn try_from(by: &[u8]) -> Option { if by.len() != 32 { @@ -90,11 +90,11 @@ impl FixedBytes32 { if *byte == u8::MAX { *byte = 0; } else { - *byte += 1; + *byte = *byte + 1; return Some(ret); } } - None + return None; } } @@ -151,7 +151,7 @@ pub fn fasthash(data: &[u8]) -> FastHash { /// Generate a random 32 bytes UUID pub fn gen_uuid() -> Uuid { - rand::rng().random::<[u8; 32]>().into() + rand::thread_rng().gen::<[u8; 32]>().into() } #[cfg(test)] diff --git a/src/util/encode.rs b/src/util/encode.rs index 503d979d..c6815d49 100644 --- a/src/util/encode.rs +++ b/src/util/encode.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; -/// Serialize to `MessagePack`, without versioning -/// (see `garage_util::migrate` for functions that manage versioned +/// Serialize to MessagePack, without versioning +/// (see garage_util::migrate for functions that manage versioned /// data formats) pub fn nonversioned_encode(val: &T) -> Result, rmp_serde::encode::Error> where @@ -13,12 +13,12 @@ where Ok(wr) } -/// Deserialize from `MessagePack`, without versioning -/// (see `garage_util::migrate` for functions that manage versioned +/// Deserialize from MessagePack, without versioning +/// (see garage_util::migrate for functions that manage versioned /// data formats) pub fn nonversioned_decode(bytes: &[u8]) -> Result where - T: for<'de> Deserialize<'de>, + T: for<'de> Deserialize<'de> + ?Sized, { rmp_serde::decode::from_slice::<_>(bytes) } diff --git a/src/util/error.rs b/src/util/error.rs index c3d4ab58..170d2687 100644 --- a/src/util/error.rs +++ b/src/util/error.rs @@ -54,9 +54,6 @@ pub enum Error { #[error("Timeout")] Timeout, - #[error("Layout not ready")] - LayoutNotReady, - #[error("Could not reach quorum of {0} (sets={1:?}). {2} of {3} request succeeded, others returned errors: {4:?}")] Quorum(usize, Option, usize, usize, Vec), @@ -129,7 +126,7 @@ where } } -/// Trait to map any error type to `Error::Message` +/// Trait to map any error type to Error::Message pub trait OkOrMessage { type S; fn ok_or_message>(self, message: M) -> Result; diff --git a/src/util/metrics.rs b/src/util/metrics.rs index 2760afbf..b882a886 100644 --- a/src/util/metrics.rs +++ b/src/util/metrics.rs @@ -59,5 +59,5 @@ where // ---- pub fn gen_trace_id() -> TraceId { - rand::rng().random::<[u8; 16]>().into() + rand::thread_rng().gen::<[u8; 16]>().into() } diff --git a/src/util/migrate.rs b/src/util/migrate.rs index 0cb7da93..45147c74 100644 --- a/src/util/migrate.rs +++ b/src/util/migrate.rs @@ -54,7 +54,7 @@ impl Migrate for T { } } -/// Internal type used by `InitialFormat`, not meant for general use. +/// Internal type used by InitialFormat, not meant for general use. #[derive(Serialize, Deserialize)] pub enum NoPrevious {} diff --git a/src/web/Cargo.toml b/src/web/Cargo.toml index a41ab6ef..e0cb317f 100644 --- a/src/web/Cargo.toml +++ b/src/web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_web" -version = "2.3.0" +version = "1.3.1" authors = ["Alex Auvolat ", "Quentin Dufour "] edition = "2018" license = "AGPL-3.0" @@ -31,6 +31,3 @@ hyper.workspace = true tokio.workspace = true opentelemetry.workspace = true - -[lints] -workspace = true diff --git a/src/web/web_server.rs b/src/web/web_server.rs index 2d0cac2d..ea02ab0f 100644 --- a/src/web/web_server.rs +++ b/src/web/web_server.rs @@ -25,14 +25,12 @@ use garage_api_common::cors::{ }; use garage_api_common::generic_server::{server_loop, UnixListenerOn}; use garage_api_common::helpers::*; -use garage_api_s3::api_server::ResBody; use garage_api_s3::error::{ CommonErrorDerivative, Error as ApiError, OkOrBadRequest, OkOrInternalError, }; use garage_api_s3::get::{handle_get_without_ctx, handle_head_without_ctx}; use garage_api_s3::website::X_AMZ_WEBSITE_REDIRECT_LOCATION; -use garage_model::bucket_table::{self, RoutingRule}; use garage_model::garage::Garage; use garage_table::*; @@ -106,7 +104,7 @@ impl WebServer { } UnixOrTCPSocketAddress::UnixSocket(ref path) => { if path.exists() { - fs::remove_file(path)?; + fs::remove_file(path)? } let listener = UnixListener::bind(path)?; @@ -155,8 +153,8 @@ impl WebServer { .span_builder(format!("Web {} request", req.method())) .with_trace_id(gen_trace_id()) .with_attributes(vec![ - KeyValue::new("host", host_header.clone()), - KeyValue::new("method", req.method().to_string()), + KeyValue::new("host", format!("{}", host_header.clone())), + KeyValue::new("method", format!("{}", req.method())), KeyValue::new("uri", req.uri().to_string()), ]) .start(&tracer); @@ -262,71 +260,45 @@ impl WebServer { // Get path let path = req.uri().path().to_string(); let index = &website_config.index_document; - let routing_result = path_to_keys(&path, index, &website_config.routing_rules)?; + let (key, may_redirect) = path_to_keys(&path, index)?; debug!( - "Selected bucket: \"{}\" {:?}, routing to {:?}", - bucket_name, bucket_id, routing_result, + "Selected bucket: \"{}\" {:?}, target key: \"{}\", may redirect to: {:?}", + bucket_name, bucket_id, key, may_redirect ); - let ret_doc = match (req.method(), routing_result.main_target()) { - (&Method::OPTIONS, _) => handle_options_for_bucket(req, &bucket_params) + let ret_doc = match *req.method() { + Method::OPTIONS => handle_options_for_bucket(req, &bucket_params) .map_err(ApiError::from) .map(|res| res.map(|_empty_body: EmptyBody| empty_body())), - (_, Err((url, code))) => Ok(Response::builder() - .status(code) - .header("Location", url) - .body(empty_body()) - .unwrap()), - (_, Ok((key, code))) => { - handle_inner(self.garage.clone(), req, bucket_id, key, code).await + Method::HEAD => { + handle_head_without_ctx(self.garage.clone(), req, bucket_id, &key, None).await } - }; - - // Try handling errors if bucket configuration provided fallbacks - let ret_doc_with_redir = match (&ret_doc, &routing_result) { - ( - Err(ApiError::NoSuchKey), - RoutingResult::LoadOrRedirect { - redirect_if_exists, - redirect_url, - redirect_code, - .. - }, - ) => { - let redirect = if let Some(redirect_key) = redirect_if_exists { - self.check_key_exists(bucket_id, redirect_key.as_str()) - .await? - } else { - true - }; - if redirect { - Ok(Response::builder() - .status(redirect_code) - .header("Location", redirect_url) - .body(empty_body()) - .unwrap()) - } else { - ret_doc - } - } - ( - Err(ApiError::NoSuchKey), - RoutingResult::LoadOrAlternativeError { - redirect_key, - redirect_code, - .. - }, - ) => { - handle_inner( + Method::GET => { + handle_get_without_ctx( self.garage.clone(), req, bucket_id, - redirect_key, - *redirect_code, + &key, + None, + Default::default(), ) .await } + _ => Err(ApiError::bad_request("HTTP method not supported")), + }; + + // Try implicit redirect on error + let ret_doc_with_redir = match (&ret_doc, may_redirect) { + (Err(ApiError::NoSuchKey), ImplicitRedirect::To { key, url }) + if self.check_key_exists(bucket_id, key.as_str()).await? => + { + Ok(Response::builder() + .status(StatusCode::FOUND) + .header(LOCATION, url) + .body(empty_body()) + .unwrap()) + } (Ok(ret), _) if ret.headers().contains_key(X_AMZ_WEBSITE_REDIRECT_LOCATION) => { let redirect_location = ret.headers().get(X_AMZ_WEBSITE_REDIRECT_LOCATION).unwrap(); Ok(Response::builder() @@ -360,17 +332,17 @@ impl WebServer { // We want to return the error document // Create a fake HTTP request with path = the error document let req2 = Request::builder() - .method("GET") .uri(format!("http://{}/{}", host, &error_document)) .body(()) .unwrap(); - match handle_inner( + match handle_get_without_ctx( self.garage.clone(), &req2, bucket_id, &error_document, - error.http_status_code(), + None, + Default::default(), ) .await { @@ -385,6 +357,8 @@ impl WebServer { error ); + *error_doc.status_mut() = error.http_status_code(); + // Preserve error message in a special header for error_line in error.to_string().split('\n') { if let Ok(v) = HeaderValue::from_bytes(error_line.as_bytes()) { @@ -415,52 +389,6 @@ impl WebServer { } } -async fn handle_inner( - garage: Arc, - req: &Request<()>, - bucket_id: Uuid, - key: &str, - status_code: StatusCode, -) -> Result, ApiError> { - if status_code != StatusCode::OK { - // If we are returning an error document, discard all headers from - // the original request that would have influenced the result: - // - Range header, we don't want to return a subrange of the error document - // - Caching directives such as If-None-Match, etc, which are not relevant - let cleaned_req = Request::builder().uri(req.uri()).body(()).unwrap(); - - let mut ret = match *req.method() { - Method::HEAD => { - handle_head_without_ctx(garage, &cleaned_req, bucket_id, key, None).await? - } - Method::GET => { - handle_get_without_ctx( - garage, - &cleaned_req, - bucket_id, - key, - None, - Default::default(), - ) - .await? - } - _ => return Err(ApiError::bad_request("HTTP method not supported")), - }; - - *ret.status_mut() = status_code; - - Ok(ret) - } else { - match *req.method() { - Method::HEAD => handle_head_without_ctx(garage, req, bucket_id, key, None).await, - Method::GET => { - handle_get_without_ctx(garage, req, bucket_id, key, None, Default::default()).await - } - _ => Err(ApiError::bad_request("HTTP method not supported")), - } - } -} - fn error_to_res(e: Error) -> Response> { // If we are here, it is either that: // - there was an error before trying to get the requested URL @@ -497,44 +425,9 @@ fn error_to_res(e: Error) -> Response> { } #[derive(Debug, PartialEq)] -enum RoutingResult { - // Load a key and use `code` as status, or fallback to normal 404 handler if not found - LoadKey { - key: String, - code: StatusCode, - }, - // Load a key and use `200` as status, or fallback with a redirection using `redirect_code` - // as status - LoadOrRedirect { - key: String, - redirect_if_exists: Option, - redirect_url: String, - redirect_code: StatusCode, - }, - // Load a key and use `200` as status, or fallback by loading a different key and use - // `redirect_code` as status - LoadOrAlternativeError { - key: String, - redirect_key: String, - redirect_code: StatusCode, - }, - // Send an http redirect with `code` as status - Redirect { - url: String, - code: StatusCode, - }, -} - -impl RoutingResult { - // return Ok((key_to_deref, status_code)) or Err((redirect_target, status_code)) - fn main_target(&self) -> Result<(&str, StatusCode), (&str, StatusCode)> { - match self { - RoutingResult::LoadKey { key, code } => Ok((key, *code)), - RoutingResult::LoadOrRedirect { key, .. } => Ok((key, StatusCode::OK)), - RoutingResult::LoadOrAlternativeError { key, .. } => Ok((key, StatusCode::OK)), - RoutingResult::Redirect { url, code } => Err((url, *code)), - } - } +enum ImplicitRedirect { + No, + To { key: String, url: String }, } /// Path to key @@ -543,155 +436,36 @@ impl RoutingResult { /// When a path ends with "/", we append the index name to match traditional web server behavior /// which is also AWS S3 behavior. /// -/// Check: -fn path_to_keys( - path: &str, - index: &str, - routing_rules: &[RoutingRule], -) -> Result { +/// Check: https://docs.aws.amazon.com/AmazonS3/latest/userguide/IndexDocumentSupport.html +fn path_to_keys<'a>(path: &'a str, index: &str) -> Result<(String, ImplicitRedirect), Error> { let path_utf8 = percent_encoding::percent_decode_str(path).decode_utf8()?; let base_key = match path_utf8.strip_prefix("/") { Some(bk) => bk, None => return Err(Error::BadRequest("Path must start with a / (slash)".into())), }; - - let is_bucket_root = base_key.is_empty(); + let is_bucket_root = base_key.len() == 0; let is_trailing_slash = path_utf8.ends_with("/"); - let key = if is_bucket_root || is_trailing_slash { - // we can't store anything at the root, so we need to query the index - // if the key end with a slash, we always query the index - format!("{base_key}{index}") - } else { - // if the key doesn't end with `/`, leave it unmodified - base_key.to_string() - }; + match (is_bucket_root, is_trailing_slash) { + // It is not possible to store something at the root of the bucket (ie. empty key), + // the only option is to fetch the index + (true, _) => Ok((index.to_string(), ImplicitRedirect::No)), - let mut routing_rules_iter = routing_rules.iter(); - let key = loop { - let Some(routing_rule) = routing_rules_iter.next() else { - break key; - }; + // "If you create a folder structure in your bucket, you must have an index document at each level. In each folder, the index document must have the same name, for example, index.html. When a user specifies a URL that resembles a folder lookup, the presence or absence of a trailing slash determines the behavior of the website. For example, the following URL, with a trailing slash, returns the photos/index.html index document." + (false, true) => Ok((format!("{base_key}{index}"), ImplicitRedirect::No)), - let Ok(status_code) = StatusCode::from_u16(routing_rule.redirect.http_redirect_code) else { - continue; - }; - if let Some(condition) = &routing_rule.condition { - let suffix = if let Some(prefix) = &condition.prefix { - let Some(suffix) = base_key.strip_prefix(prefix) else { - continue; - }; - Some(suffix) - } else { - None - }; - let mut target = compute_redirect_target(&routing_rule.redirect, suffix); - let query_alternative_key = - status_code == StatusCode::OK || status_code == StatusCode::NOT_FOUND; - let redirect_on_error = - condition.http_error_code == Some(StatusCode::NOT_FOUND.as_u16()); - match (query_alternative_key, redirect_on_error) { - (false, false) => { - return Ok(RoutingResult::Redirect { - url: target, - code: status_code, - }) - } - (true, false) => { - // we need to remove the leading / - target.remove(0); - if status_code == StatusCode::OK { - break target; - } else { - return Ok(RoutingResult::LoadKey { - key: target, - code: status_code, - }); - } - } - (false, true) => { - return Ok(RoutingResult::LoadOrRedirect { - key, - redirect_if_exists: None, - redirect_url: target, - redirect_code: status_code, - }); - } - (true, true) => { - target.remove(0); - return Ok(RoutingResult::LoadOrAlternativeError { - key, - redirect_key: target, - redirect_code: status_code, - }); - } - } - } else { - let target = compute_redirect_target(&routing_rule.redirect, None); - return Ok(RoutingResult::Redirect { - url: target, - code: status_code, - }); - } - }; - - if is_bucket_root || is_trailing_slash { - Ok(RoutingResult::LoadKey { - key, - code: StatusCode::OK, - }) - } else { - Ok(RoutingResult::LoadOrRedirect { - redirect_if_exists: Some(format!("{key}/{index}")), - // we can't use `path` because key might have changed substantially in case of - // routing rules - redirect_url: percent_encoding::percent_encode( - format!("/{key}/").as_bytes(), - PATH_ENCODING_SET, - ) - .to_string(), - key, - redirect_code: StatusCode::FOUND, - }) + // "However, if you exclude the trailing slash from the preceding URL, Amazon S3 first looks for an object photos in the bucket. If the photos object is not found, it searches for an index document, photos/index.html. If that document is found, Amazon S3 returns a 302 Found message and points to the photos/ key. For subsequent requests to photos/, Amazon S3 returns photos/index.html. If the index document is not found, Amazon S3 returns an error." + (false, false) => Ok(( + base_key.to_string(), + ImplicitRedirect::To { + key: format!("{base_key}/{index}"), + url: format!("{path}/"), + }, + )), } } -// per https://url.spec.whatwg.org/#path-percent-encode-set -const PATH_ENCODING_SET: &percent_encoding::AsciiSet = &percent_encoding::CONTROLS - .add(b' ') - .add(b'"') - .add(b'#') - .add(b'<') - .add(b'>') - .add(b'?') - .add(b'`') - .add(b'{') - .add(b'}'); - -fn compute_redirect_target(redirect: &bucket_table::Redirect, suffix: Option<&str>) -> String { - let mut res = String::new(); - if let Some(hostname) = &redirect.hostname { - if let Some(protocol) = &redirect.protocol { - res.push_str(protocol); - res.push_str("://"); - } else { - res.push_str("//"); - } - res.push_str(hostname); - } - res.push('/'); - if let Some(replace_key_prefix) = &redirect.replace_key_prefix { - res.push_str(replace_key_prefix); - if let Some(suffix) = suffix { - res.push_str(suffix); - } - } else if let Some(replace_key) = &redirect.replace_key { - res.push_str(replace_key); - } - res -} - #[cfg(test)] mod tests { use super::*; @@ -699,39 +473,35 @@ mod tests { #[test] fn path_to_keys_test() -> Result<(), Error> { assert_eq!( - path_to_keys("/file%20.jpg", "index.html", &[])?, - RoutingResult::LoadOrRedirect { - key: "file .jpg".to_string(), - redirect_url: "/file%20.jpg/".to_string(), - redirect_if_exists: Some("file .jpg/index.html".to_string()), - redirect_code: StatusCode::FOUND, - } + path_to_keys("/file%20.jpg", "index.html")?, + ( + "file .jpg".to_string(), + ImplicitRedirect::To { + key: "file .jpg/index.html".to_string(), + url: "/file%20.jpg/".to_string() + } + ) ); assert_eq!( - path_to_keys("/%20t/", "index.html", &[])?, - RoutingResult::LoadKey { - key: " t/index.html".to_string(), - code: StatusCode::OK - } + path_to_keys("/%20t/", "index.html")?, + (" t/index.html".to_string(), ImplicitRedirect::No) ); assert_eq!( - path_to_keys("/", "index.html", &[])?, - RoutingResult::LoadKey { - key: "index.html".to_string(), - code: StatusCode::OK - } + path_to_keys("/", "index.html")?, + ("index.html".to_string(), ImplicitRedirect::No) ); assert_eq!( - path_to_keys("/hello", "index.html", &[])?, - RoutingResult::LoadOrRedirect { - key: "hello".to_string(), - redirect_url: "/hello/".to_string(), - redirect_if_exists: Some("hello/index.html".to_string()), - redirect_code: StatusCode::FOUND, - } + path_to_keys("/hello", "index.html")?, + ( + "hello".to_string(), + ImplicitRedirect::To { + key: "hello/index.html".to_string(), + url: "/hello/".to_string() + } + ) ); - assert!(path_to_keys("", "index.html", &[]).is_err()); - assert!(path_to_keys("i/am/relative", "index.html", &[]).is_err()); + assert!(path_to_keys("", "index.html").is_err()); + assert!(path_to_keys("i/am/relative", "index.html").is_err()); Ok(()) } } diff --git a/taplo.toml b/taplo.toml deleted file mode 100644 index 05486ef4..00000000 --- a/taplo.toml +++ /dev/null @@ -1,7 +0,0 @@ -include = ["**/Cargo.toml", "taplo.toml"] - -[formatting] -indent_string = " " -compact_inline_tables = false -compact_arrays = true -inline_table_expand = false diff --git a/typos.toml b/typos.toml deleted file mode 100644 index f76d002d..00000000 --- a/typos.toml +++ /dev/null @@ -1,6 +0,0 @@ -[default.extend-words] -PN = "PN" -substituters = "substituters" - -[files] -extend-exclude = ["CHANGELOG.md", "**.js", "**.svg", "doc/talks/*"]