diff --git a/.woodpecker/release.yaml b/.woodpecker/release.yaml
index 4133b92d..a94a9ccf 100644
--- a/.woodpecker/release.yaml
+++ b/.woodpecker/release.yaml
@@ -38,15 +38,7 @@ steps:
- matrix:
ARCH: i386
- - name: upgrade tests from v1.0.0
- image: nixpkgs/nix:nixos-24.05
- commands:
- - nix-shell --attr ci --run "./script/test-upgrade.sh v1.0.0 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
- when:
- - matrix:
- ARCH: amd64
-
- - name: upgrade tests from v0.8.4
+ - name: upgrade tests
image: nixpkgs/nix:nixos-24.05
commands:
- nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
diff --git a/Cargo.lock b/Cargo.lock
index 968126e0..7473d9af 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -301,9 +301,9 @@ dependencies = [
[[package]]
name = "aws-sdk-s3"
-version = "1.102.0"
+version = "1.120.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "75ddb925e840f49446aa6338b67abdbec04b4ebf923b7da038ec4c35afb916cd"
+checksum = "06673901e961f20fa8d7da907da48f7ad6c1b383e3726c22bd418900f015abe1"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -313,6 +313,7 @@ dependencies = [
"aws-smithy-eventstream",
"aws-smithy-http",
"aws-smithy-json",
+ "aws-smithy-observability",
"aws-smithy-runtime",
"aws-smithy-runtime-api",
"aws-smithy-types",
@@ -369,9 +370,9 @@ dependencies = [
[[package]]
name = "aws-smithy-checksums"
-version = "0.63.6"
+version = "0.63.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9054b4cc5eda331cde3096b1576dec45365c5cbbca61d1fffa5f236e251dfce7"
+checksum = "23374b9170cbbcc6f5df8dc5ebb9b6c5c28a3c8f599f0e8b8b10eb6f4a5c6e74"
dependencies = [
"aws-smithy-http",
"aws-smithy-types",
@@ -865,15 +866,41 @@ dependencies = [
]
[[package]]
-name = "crc-fast"
-version = "1.10.0"
+name = "crc"
+version = "3.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e75b2483e97a5a7da73ac68a05b629f9c53cff58d8ed1c77866079e18b00dba5"
+checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675"
dependencies = [
+ "crc-catalog",
+]
+
+[[package]]
+name = "crc-catalog"
+version = "2.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5"
+
+[[package]]
+name = "crc-fast"
+version = "1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2fd92aca2c6001b1bf5ba0ff84ee74ec8501b52bbef0cac80bf25a6c1d87a83d"
+dependencies = [
+ "crc",
"digest",
+ "rustversion",
"spin 0.10.0",
]
+[[package]]
+name = "crc32c"
+version = "0.6.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3a47af21622d091a8f0fb295b88bc886ac74efcc613efc19f5d0b21de5c89e47"
+dependencies = [
+ "rustc_version",
+]
+
[[package]]
name = "crc32fast"
version = "1.5.0"
@@ -1174,6 +1201,12 @@ version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
+[[package]]
+name = "foldhash"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb"
+
[[package]]
name = "form_urlencoded"
version = "1.2.2"
@@ -1278,7 +1311,7 @@ dependencies = [
[[package]]
name = "garage"
-version = "2.2.0"
+version = "1.3.1"
dependencies = [
"assert-json-diff",
"async-trait",
@@ -1289,7 +1322,7 @@ dependencies = [
"bytes",
"bytesize",
"chrono",
- "crc-fast",
+ "crc32fast",
"format_table",
"futures",
"garage_api_admin",
@@ -1318,6 +1351,7 @@ dependencies = [
"opentelemetry-otlp",
"opentelemetry-prometheus",
"parse_duration",
+ "serde",
"serde_json",
"sha1",
"sha2",
@@ -1329,21 +1363,16 @@ dependencies = [
"tracing",
"tracing-journald",
"tracing-subscriber",
- "utoipa",
]
[[package]]
name = "garage_api_admin"
-version = "2.2.0"
+version = "1.3.1"
dependencies = [
"argon2",
"async-trait",
- "bytesize",
- "chrono",
- "format_table",
"futures",
"garage_api_common",
- "garage_block",
"garage_model",
"garage_rpc",
"garage_table",
@@ -1353,7 +1382,6 @@ dependencies = [
"hyper 1.8.1",
"opentelemetry",
"opentelemetry-prometheus",
- "paste",
"prometheus",
"serde",
"serde_json",
@@ -1361,17 +1389,17 @@ dependencies = [
"tokio",
"tracing",
"url",
- "utoipa",
]
[[package]]
name = "garage_api_common"
-version = "2.2.0"
+version = "1.3.1"
dependencies = [
"base64 0.21.7",
"bytes",
"chrono",
- "crc-fast",
+ "crc32c",
+ "crc32fast",
"crypto-common",
"futures",
"garage_model",
@@ -1399,7 +1427,7 @@ dependencies = [
[[package]]
name = "garage_api_k2v"
-version = "2.2.0"
+version = "1.3.1"
dependencies = [
"base64 0.21.7",
"futures",
@@ -1422,14 +1450,15 @@ dependencies = [
[[package]]
name = "garage_api_s3"
-version = "2.2.0"
+version = "1.3.1"
dependencies = [
"aes-gcm",
"async-compression",
"base64 0.21.7",
"bytes",
"chrono",
- "crc-fast",
+ "crc32c",
+ "crc32fast",
"form_urlencoded",
"futures",
"garage_api_common",
@@ -1440,7 +1469,6 @@ dependencies = [
"garage_table",
"garage_util",
"hex",
- "hmac",
"http 1.4.0",
"http-body-util",
"http-range",
@@ -1467,7 +1495,7 @@ dependencies = [
[[package]]
name = "garage_block"
-version = "2.2.0"
+version = "1.3.1"
dependencies = [
"arc-swap",
"async-compression",
@@ -1478,6 +1506,7 @@ dependencies = [
"garage_db",
"garage_net",
"garage_rpc",
+ "garage_table",
"garage_util",
"hex",
"opentelemetry",
@@ -1491,7 +1520,7 @@ dependencies = [
[[package]]
name = "garage_db"
-version = "2.2.0"
+version = "1.3.1"
dependencies = [
"fjall",
"heed",
@@ -1506,9 +1535,8 @@ dependencies = [
[[package]]
name = "garage_model"
-version = "2.2.0"
+version = "1.3.1"
dependencies = [
- "argon2",
"async-trait",
"base64 0.21.7",
"blake2",
@@ -1534,7 +1562,7 @@ dependencies = [
[[package]]
name = "garage_net"
-version = "2.2.0"
+version = "1.3.1"
dependencies = [
"arc-swap",
"bytes",
@@ -1559,7 +1587,7 @@ dependencies = [
[[package]]
name = "garage_rpc"
-version = "2.2.0"
+version = "1.3.1"
dependencies = [
"arc-swap",
"async-trait",
@@ -1591,7 +1619,7 @@ dependencies = [
[[package]]
name = "garage_table"
-version = "2.2.0"
+version = "1.3.1"
dependencies = [
"arc-swap",
"async-trait",
@@ -1612,7 +1640,7 @@ dependencies = [
[[package]]
name = "garage_util"
-version = "2.2.0"
+version = "1.3.1"
dependencies = [
"arc-swap",
"async-trait",
@@ -1644,7 +1672,7 @@ dependencies = [
[[package]]
name = "garage_web"
-version = "2.2.0"
+version = "1.3.1"
dependencies = [
"garage_api_common",
"garage_api_s3",
@@ -1806,9 +1834,7 @@ version = "0.15.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
dependencies = [
- "allocator-api2",
- "equivalent",
- "foldhash",
+ "foldhash 0.1.5",
]
[[package]]
@@ -1816,6 +1842,11 @@ name = "hashbrown"
version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100"
+dependencies = [
+ "allocator-api2",
+ "equivalent",
+ "foldhash 0.2.0",
+]
[[package]]
name = "hashlink"
@@ -2273,8 +2304,6 @@ checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017"
dependencies = [
"equivalent",
"hashbrown 0.16.1",
- "serde",
- "serde_core",
]
[[package]]
@@ -2653,11 +2682,11 @@ checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
[[package]]
name = "lru"
-version = "0.12.5"
+version = "0.16.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38"
+checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593"
dependencies = [
- "hashbrown 0.15.5",
+ "hashbrown 0.16.1",
]
[[package]]
@@ -3079,12 +3108,6 @@ dependencies = [
"subtle",
]
-[[package]]
-name = "paste"
-version = "1.0.15"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
-
[[package]]
name = "path-absolutize"
version = "3.1.1"
@@ -4809,29 +4832,6 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
-[[package]]
-name = "utoipa"
-version = "5.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2fcc29c80c21c31608227e0912b2d7fddba57ad76b606890627ba8ee7964e993"
-dependencies = [
- "indexmap 2.13.0",
- "serde",
- "serde_json",
- "utoipa-gen",
-]
-
-[[package]]
-name = "utoipa-gen"
-version = "5.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6d79d08d92ab8af4c5e8a6da20c47ae3f61a0f1dabc1997cdf2d082b757ca08b"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.114",
-]
-
[[package]]
name = "uuid"
version = "1.4.1"
diff --git a/Cargo.toml b/Cargo.toml
index c6f5315d..df4005a3 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -24,18 +24,18 @@ default-members = ["src/garage"]
# Internal Garage crates
format_table = { version = "0.1.1", path = "src/format-table" }
-garage_api_common = { version = "2.2.0", path = "src/api/common" }
-garage_api_admin = { version = "2.2.0", path = "src/api/admin" }
-garage_api_s3 = { version = "2.2.0", path = "src/api/s3" }
-garage_api_k2v = { version = "2.2.0", path = "src/api/k2v" }
-garage_block = { version = "2.2.0", path = "src/block" }
-garage_db = { version = "2.2.0", path = "src/db", default-features = false }
-garage_model = { version = "2.2.0", path = "src/model", default-features = false }
-garage_net = { version = "2.2.0", path = "src/net" }
-garage_rpc = { version = "2.2.0", path = "src/rpc" }
-garage_table = { version = "2.2.0", path = "src/table" }
-garage_util = { version = "2.2.0", path = "src/util" }
-garage_web = { version = "2.2.0", path = "src/web" }
+garage_api_common = { version = "1.3.1", path = "src/api/common" }
+garage_api_admin = { version = "1.3.1", path = "src/api/admin" }
+garage_api_s3 = { version = "1.3.1", path = "src/api/s3" }
+garage_api_k2v = { version = "1.3.1", path = "src/api/k2v" }
+garage_block = { version = "1.3.1", path = "src/block" }
+garage_db = { version = "1.3.1", path = "src/db", default-features = false }
+garage_model = { version = "1.3.1", path = "src/model", default-features = false }
+garage_net = { version = "1.3.1", path = "src/net" }
+garage_rpc = { version = "1.3.1", path = "src/rpc" }
+garage_table = { version = "1.3.1", path = "src/table" }
+garage_util = { version = "1.3.1", path = "src/util" }
+garage_web = { version = "1.3.1", path = "src/web" }
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
# External crates from crates.io
@@ -48,8 +48,9 @@ blake2 = "0.10"
bytes = "1.0"
bytesize = "1.1"
cfg-if = "1.0"
-chrono = { version = "0.4", features = ["serde"] }
-crc-fast = "1.6"
+chrono = "0.4"
+crc32fast = "1.4"
+crc32c = "0.6"
crypto-common = "0.1"
gethostname = "0.4"
git-version = "0.3.4"
@@ -65,7 +66,6 @@ nix = { version = "0.29", default-features = false, features = ["fs"] }
nom = "7.1"
parking_lot = "0.12"
parse_duration = "2.1"
-paste = "1.0"
pin-project = "1.0.12"
pnet_datalink = "0.34"
rand = "0.8"
@@ -101,7 +101,6 @@ serde = { version = "1.0", default-features = false, features = ["derive", "rc"]
serde_bytes = "0.11"
serde_json = "1.0"
toml = { version = "0.8", default-features = false, features = ["parse"] }
-utoipa = { version = "5.3.1", features = ["chrono"] }
# newer version requires rust edition 2021
k8s-openapi = { version = "0.21", features = ["v1_24"] }
@@ -147,12 +146,8 @@ aws-smithy-runtime = { version = "1.8", default-features = false, features = ["t
aws-sdk-config = { version = "1.62", default-features = false }
aws-sdk-s3 = { version = "1.79", default-features = false, features = ["rt-tokio"] }
-[profile.dev]
-#lto = "thin" # disabled for now, adds 2-4 min to each CI build
-lto = "off"
-
[profile.release]
-lto = true
-codegen-units = 1
+lto = "thin"
+codegen-units = 16
opt-level = 3
-strip = true
+strip = "debuginfo"
diff --git a/doc/api/garage-admin-v0.html b/doc/api/garage-admin-v0.html
index 7eb11f25..dbdd9e1c 100644
--- a/doc/api/garage-admin-v0.html
+++ b/doc/api/garage-admin-v0.html
@@ -1,7 +1,7 @@
- Garage administration API v0
+ Garage Adminstration API v0
diff --git a/doc/api/garage-admin-v1.html b/doc/api/garage-admin-v1.html
index e98306b8..783d459e 100644
--- a/doc/api/garage-admin-v1.html
+++ b/doc/api/garage-admin-v1.html
@@ -1,7 +1,7 @@
- Garage administration API v1
+ Garage Adminstration API v0
diff --git a/doc/api/garage-admin-v2.html b/doc/api/garage-admin-v2.html
deleted file mode 100644
index b079e760..00000000
--- a/doc/api/garage-admin-v2.html
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-
- Garage administration API v2
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/doc/api/garage-admin-v2.json b/doc/api/garage-admin-v2.json
deleted file mode 100644
index 0081bf34..00000000
--- a/doc/api/garage-admin-v2.json
+++ /dev/null
@@ -1,4429 +0,0 @@
-{
- "openapi": "3.1.0",
- "info": {
- "title": "Garage administration API",
- "description": "Administrate your Garage cluster programatically, including status, layout, keys, buckets, and maintainance tasks.\n\n*Disclaimer: This API may change in future Garage versions. Read the changelog and upgrade your scripts before upgrading. Additionnaly, this specification is early stage and can contain bugs, so be careful and please report any issues on our issue tracker.*",
- "contact": {
- "name": "The Garage team",
- "url": "https://garagehq.deuxfleurs.fr/",
- "email": "garagehq@deuxfleurs.fr"
- },
- "license": {
- "name": "AGPL-3.0",
- "identifier": "AGPL-3.0"
- },
- "version": "v2.2.0"
- },
- "servers": [
- {
- "url": "http://localhost:3903/",
- "description": "A local server"
- }
- ],
- "paths": {
- "/check": {
- "get": {
- "tags": [
- "Special endpoints"
- ],
- "description": "\nStatic website domain name check. Checks whether a bucket is configured to serve\na static website for the requested domain. This is used by reverse proxies such\nas Caddy or Tricot, to avoid requesting TLS certificates for domain names that\ndo not correspond to an actual website.\n ",
- "operationId": "CheckDomain",
- "parameters": [
- {
- "name": "domain",
- "in": "query",
- "description": "The domain name to check for",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "The domain name redirects to a static website bucket"
- },
- "400": {
- "description": "No static website bucket exists for this domain"
- }
- },
- "security": [
- {}
- ]
- }
- },
- "/health": {
- "get": {
- "tags": [
- "Special endpoints"
- ],
- "description": "\nCheck cluster health. The status code returned by this function indicates\nwhether this Garage daemon can answer API requests.\nGarage will return `200 OK` even if some storage nodes are disconnected,\nas long as it is able to have a quorum of nodes for read and write operations.\n ",
- "operationId": "Health",
- "responses": {
- "200": {
- "description": "Garage is able to answer requests"
- },
- "503": {
- "description": "This Garage daemon is not able to handle requests"
- }
- },
- "security": [
- {}
- ]
- }
- },
- "/metrics": {
- "get": {
- "tags": [
- "Special endpoints"
- ],
- "description": "Prometheus metrics endpoint",
- "operationId": "Metrics",
- "responses": {
- "200": {
- "description": "Garage daemon metrics exported in Prometheus format"
- }
- },
- "security": [
- {},
- {
- "bearerAuth": []
- }
- ]
- }
- },
- "/v2/AddBucketAlias": {
- "post": {
- "tags": [
- "Bucket alias"
- ],
- "description": "Add an alias for the target bucket. This can be either a global or a local alias, depending on which fields are specified.",
- "operationId": "AddBucketAlias",
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/BucketAliasEnum"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Returns exhaustive information about the bucket",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/AddBucketAliasResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/AllowBucketKey": {
- "post": {
- "tags": [
- "Permission"
- ],
- "description": "\n⚠️ **DISCLAIMER**: Garage's developers are aware that this endpoint has an unconventional semantic. Be extra careful when implementing it, its behavior is not obvious.\n\nAllows a key to do read/write/owner operations on a bucket.\n\nFlags in permissions which have the value true will be activated. Other flags will remain unchanged (ie. they will keep their internal value).\n\nFor example, if you set read to true, the key will be allowed to read the bucket.\nIf you set it to false, the key will keeps its previous read permission.\nIf you want to disallow read for the key, check the DenyBucketKey operation.\n ",
- "operationId": "AllowBucketKey",
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/AllowBucketKeyRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Returns exhaustive information about the bucket",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/AllowBucketKeyResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/ApplyClusterLayout": {
- "post": {
- "tags": [
- "Cluster layout"
- ],
- "description": "\nApplies to the cluster the layout changes currently registered as staged layout changes.\n\n*Note: do not try to parse the `message` field of the response, it is given as an array of string specifically because its format is not stable.*\n ",
- "operationId": "ApplyClusterLayout",
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ApplyClusterLayoutRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "The updated cluster layout has been applied in the cluster",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ApplyClusterLayoutResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/CleanupIncompleteUploads": {
- "post": {
- "tags": [
- "Bucket"
- ],
- "description": "Removes all incomplete multipart uploads that are older than the specified number of seconds.",
- "operationId": "CleanupIncompleteUploads",
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/CleanupIncompleteUploadsRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "The bucket was cleaned up successfully",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/CleanupIncompleteUploadsResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/ClusterLayoutSkipDeadNodes": {
- "post": {
- "tags": [
- "Cluster layout"
- ],
- "description": "Force progress in layout update trackers",
- "operationId": "ClusterLayoutSkipDeadNodes",
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ClusterLayoutSkipDeadNodesRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Request has been taken into account",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ClusterLayoutSkipDeadNodesResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/ConnectClusterNodes": {
- "post": {
- "tags": [
- "Cluster"
- ],
- "description": "Instructs this Garage node to connect to other Garage nodes at specified `@`. `node_id` is generated automatically on node start.",
- "operationId": "ConnectClusterNodes",
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ConnectClusterNodesRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "The request has been handled correctly but it does not mean that all connection requests succeeded; some might have fail, you need to check the body!",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ConnectClusterNodesResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/CreateAdminToken": {
- "post": {
- "tags": [
- "Admin API token"
- ],
- "description": "Creates a new admin API token",
- "operationId": "CreateAdminToken",
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/UpdateAdminTokenRequestBody"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Admin token has been created",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/CreateAdminTokenResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/CreateBucket": {
- "post": {
- "tags": [
- "Bucket"
- ],
- "description": "\nCreates a new bucket, either with a global alias, a local one, or no alias at all.\nTechnically, you can also specify both `globalAlias` and `localAlias` and that would create two aliases.\n ",
- "operationId": "CreateBucket",
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/CreateBucketRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Returns exhaustive information about the bucket",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/CreateBucketResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/CreateKey": {
- "post": {
- "tags": [
- "Access key"
- ],
- "description": "Creates a new API access key.",
- "operationId": "CreateKey",
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/CreateKeyRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Access key has been created",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/CreateKeyResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/CreateMetadataSnapshot": {
- "post": {
- "tags": [
- "Node"
- ],
- "description": "\nInstruct one or several nodes to take a snapshot of their metadata databases.\n ",
- "operationId": "CreateMetadataSnapshot",
- "parameters": [
- {
- "name": "node",
- "in": "query",
- "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "Responses from individual cluster nodes",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/MultiResponse_LocalCreateMetadataSnapshotResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/DeleteAdminToken": {
- "post": {
- "tags": [
- "Admin API token"
- ],
- "description": "Delete an admin API token from the cluster, revoking all its permissions.",
- "operationId": "DeleteAdminToken",
- "parameters": [
- {
- "name": "id",
- "in": "query",
- "description": "Admin API token ID",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "Admin token has been deleted"
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/DeleteBucket": {
- "post": {
- "tags": [
- "Bucket"
- ],
- "description": "\nDeletes a storage bucket. A bucket cannot be deleted if it is not empty.\n\n**Warning:** this will delete all aliases associated with the bucket!\n ",
- "operationId": "DeleteBucket",
- "parameters": [
- {
- "name": "id",
- "in": "query",
- "description": "ID of the bucket to delete",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "Bucket has been deleted"
- },
- "400": {
- "description": "Bucket is not empty"
- },
- "404": {
- "description": "Bucket not found"
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/DeleteKey": {
- "post": {
- "tags": [
- "Access key"
- ],
- "description": "Delete a key from the cluster. Its access will be removed from all the buckets. Buckets are not automatically deleted and can be dangling. You should manually delete them before. ",
- "operationId": "DeleteKey",
- "parameters": [
- {
- "name": "id",
- "in": "query",
- "description": "Access key ID",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "Access key has been deleted"
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/DenyBucketKey": {
- "post": {
- "tags": [
- "Permission"
- ],
- "description": "\n⚠️ **DISCLAIMER**: Garage's developers are aware that this endpoint has an unconventional semantic. Be extra careful when implementing it, its behavior is not obvious.\n\nDenies a key from doing read/write/owner operations on a bucket.\n\nFlags in permissions which have the value true will be deactivated. Other flags will remain unchanged.\n\nFor example, if you set read to true, the key will be denied from reading.\nIf you set read to false, the key will keep its previous permissions.\nIf you want the key to have the reading permission, check the AllowBucketKey operation.\n ",
- "operationId": "DenyBucketKey",
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/DenyBucketKeyRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Returns exhaustive information about the bucket",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/DenyBucketKeyResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/GetAdminTokenInfo": {
- "get": {
- "tags": [
- "Admin API token"
- ],
- "description": "\nReturn information about a specific admin API token.\nYou can search by specifying the exact token identifier (`id`) or by specifying a pattern (`search`).\n ",
- "operationId": "GetAdminTokenInfo",
- "parameters": [
- {
- "name": "id",
- "in": "query",
- "description": "Admin API token ID",
- "required": false,
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "search",
- "in": "query",
- "description": "Partial token ID or name to search for",
- "required": false,
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "Information about the admin token",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/GetAdminTokenInfoResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/GetBlockInfo": {
- "post": {
- "tags": [
- "Block"
- ],
- "description": "\nGet detailed information about a data block stored on a Garage node, including all object versions and in-progress multipart uploads that contain a reference to this block.\n ",
- "operationId": "GetBlockInfo",
- "parameters": [
- {
- "name": "node",
- "in": "query",
- "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/LocalGetBlockInfoRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Detailed block information",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/MultiResponse_LocalGetBlockInfoResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/GetBucketInfo": {
- "get": {
- "tags": [
- "Bucket"
- ],
- "description": "\nGiven a bucket identifier (`id`) or a global alias (`alias`), get its information.\nIt includes its aliases, its web configuration, keys that have some permissions\non it, some statistics (number of objects, size), number of dangling multipart uploads,\nand its quotas (if any).\n ",
- "operationId": "GetBucketInfo",
- "parameters": [
- {
- "name": "id",
- "in": "query",
- "description": "Exact bucket ID to look up",
- "required": false,
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "globalAlias",
- "in": "query",
- "description": "Global alias of bucket to look up",
- "required": false,
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "search",
- "in": "query",
- "description": "Partial ID or alias to search for",
- "required": false,
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "Returns exhaustive information about the bucket",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/GetBucketInfoResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/GetClusterHealth": {
- "get": {
- "tags": [
- "Cluster"
- ],
- "description": "Returns the global status of the cluster, the number of connected nodes (over the number of known ones), the number of healthy storage nodes (over the declared ones), and the number of healthy partitions (over the total).",
- "operationId": "GetClusterHealth",
- "responses": {
- "200": {
- "description": "Cluster health report",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/GetClusterHealthResponse"
- }
- }
- }
- }
- }
- }
- },
- "/v2/GetClusterLayout": {
- "get": {
- "tags": [
- "Cluster layout"
- ],
- "description": "\nReturns the cluster's current layout, including:\n\n- Currently configured cluster layout\n- Staged changes to the cluster layout\n\n*Capacity is given in bytes*\n ",
- "operationId": "GetClusterLayout",
- "responses": {
- "200": {
- "description": "Current cluster layout",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/GetClusterLayoutResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/GetClusterLayoutHistory": {
- "get": {
- "tags": [
- "Cluster layout"
- ],
- "description": "\nReturns the history of layouts in the cluster\n ",
- "operationId": "GetClusterLayoutHistory",
- "responses": {
- "200": {
- "description": "Cluster layout history",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/GetClusterLayoutHistoryResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/GetClusterStatistics": {
- "get": {
- "tags": [
- "Cluster"
- ],
- "description": "\nFetch global cluster statistics.\n\n*Note: do not try to parse the `freeform` field of the response, it is given as a string specifically because its format is not stable.*\n ",
- "operationId": "GetClusterStatistics",
- "responses": {
- "200": {
- "description": "Global cluster statistics",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/GetClusterStatisticsResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/GetClusterStatus": {
- "get": {
- "tags": [
- "Cluster"
- ],
- "description": "\nReturns the cluster's current status, including:\n\n- ID of the node being queried and its version of the Garage daemon\n- Live nodes\n- Currently configured cluster layout\n- Staged changes to the cluster layout\n\n*Capacity is given in bytes*\n ",
- "operationId": "GetClusterStatus",
- "responses": {
- "200": {
- "description": "Cluster status report",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/GetClusterStatusResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/GetCurrentAdminTokenInfo": {
- "get": {
- "tags": [
- "Admin API token"
- ],
- "description": "\nReturn information about the calling admin API token.\n ",
- "operationId": "GetCurrentAdminTokenInfo",
- "responses": {
- "200": {
- "description": "Information about the admin token",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/GetCurrentAdminTokenInfoResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/GetKeyInfo": {
- "get": {
- "tags": [
- "Access key"
- ],
- "description": "\nReturn information about a specific key like its identifiers, its permissions and buckets on which it has permissions.\nYou can search by specifying the exact key identifier (`id`) or by specifying a pattern (`search`).\n\nFor confidentiality reasons, the secret key is not returned by default: you must pass the `showSecretKey` query parameter to get it.\n ",
- "operationId": "GetKeyInfo",
- "parameters": [
- {
- "name": "id",
- "in": "query",
- "description": "Access key ID",
- "required": false,
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "search",
- "in": "query",
- "description": "Partial key ID or name to search for",
- "required": false,
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "showSecretKey",
- "in": "query",
- "description": "Whether to return the secret access key",
- "required": false,
- "schema": {
- "type": "boolean"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "Information about the access key",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/GetKeyInfoResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/GetNodeInfo": {
- "get": {
- "tags": [
- "Node"
- ],
- "description": "\nReturn information about the Garage daemon running on one or several nodes.\n ",
- "operationId": "GetNodeInfo",
- "parameters": [
- {
- "name": "node",
- "in": "query",
- "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "Responses from individual cluster nodes",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/MultiResponse_LocalGetNodeInfoResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/GetNodeStatistics": {
- "get": {
- "tags": [
- "Node"
- ],
- "description": "\nFetch statistics for one or several Garage nodes.\n\n*Note: do not try to parse the `freeform` field of the response, it is given as a string specifically because its format is not stable.*\n ",
- "operationId": "GetNodeStatistics",
- "parameters": [
- {
- "name": "node",
- "in": "query",
- "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "Responses from individual cluster nodes",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/MultiResponse_LocalGetNodeStatisticsResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/GetWorkerInfo": {
- "post": {
- "tags": [
- "Worker"
- ],
- "description": "\nGet information about the specified background worker on one or several cluster nodes.\n ",
- "operationId": "GetWorkerInfo",
- "parameters": [
- {
- "name": "node",
- "in": "query",
- "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/LocalGetWorkerInfoRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Responses from individual cluster nodes",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/MultiResponse_LocalGetWorkerInfoResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/GetWorkerVariable": {
- "post": {
- "tags": [
- "Worker"
- ],
- "description": "\nFetch values of one or several worker variables, from one or several cluster nodes.\n ",
- "operationId": "GetWorkerVariable",
- "parameters": [
- {
- "name": "node",
- "in": "query",
- "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/LocalGetWorkerVariableRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Responses from individual cluster nodes",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/MultiResponse_LocalGetWorkerVariableResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/ImportKey": {
- "post": {
- "tags": [
- "Access key"
- ],
- "description": "\nImports an existing API key. This feature must only be used for migrations and backup restore.\n\n**Do not use it to generate custom key identifiers or you will break your Garage cluster.**\n ",
- "operationId": "ImportKey",
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ImportKeyRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Access key has been imported",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ImportKeyResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/InspectObject": {
- "get": {
- "tags": [
- "Bucket"
- ],
- "description": "\nReturns detailed information about an object in a bucket, including its internal state in Garage.\n\nThis API call can be used to list the data blocks referenced by an object,\nas well as to view metadata associated to the object.\n\nThis call may return a list of more than one version for the object, for instance in the\ncase where there is a currently stored version of the object, and a newer version whose\nupload is in progress and not yet finished.\n ",
- "operationId": "InspectObject",
- "parameters": [
- {
- "name": "bucketId",
- "in": "query",
- "required": true,
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "key",
- "in": "query",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "Returns exhaustive information about the object",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/InspectObjectResponse"
- }
- }
- }
- },
- "404": {
- "description": "Object not found"
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/LaunchRepairOperation": {
- "post": {
- "tags": [
- "Node"
- ],
- "description": "\nLaunch a repair operation on one or several cluster nodes.\n ",
- "operationId": "LaunchRepairOperation",
- "parameters": [
- {
- "name": "node",
- "in": "query",
- "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/LocalLaunchRepairOperationRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Responses from individual cluster nodes",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/MultiResponse_LocalLaunchRepairOperationResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/ListAdminTokens": {
- "get": {
- "tags": [
- "Admin API token"
- ],
- "description": "Returns all admin API tokens in the cluster.",
- "operationId": "ListAdminTokens",
- "responses": {
- "200": {
- "description": "Returns info about all admin API tokens",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ListAdminTokensResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/ListBlockErrors": {
- "get": {
- "tags": [
- "Block"
- ],
- "description": "\nList data blocks that are currently in an errored state on one or several Garage nodes.\n ",
- "operationId": "ListBlockErrors",
- "parameters": [
- {
- "name": "node",
- "in": "query",
- "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "Responses from individual cluster nodes",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/MultiResponse_LocalListBlockErrorsResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/ListBuckets": {
- "get": {
- "tags": [
- "Bucket"
- ],
- "description": "List all the buckets on the cluster with their UUID and their global and local aliases.",
- "operationId": "ListBuckets",
- "responses": {
- "200": {
- "description": "Returns the UUID of all the buckets and all their aliases",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ListBucketsResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/ListKeys": {
- "get": {
- "tags": [
- "Access key"
- ],
- "description": "Returns all API access keys in the cluster.",
- "operationId": "ListKeys",
- "responses": {
- "200": {
- "description": "Returns the key identifier (aka `AWS_ACCESS_KEY_ID`) and its associated, human friendly, name if any (otherwise return an empty string)",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ListKeysResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/ListWorkers": {
- "post": {
- "tags": [
- "Worker"
- ],
- "description": "\nList background workers currently running on one or several cluster nodes.\n ",
- "operationId": "ListWorkers",
- "parameters": [
- {
- "name": "node",
- "in": "query",
- "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/LocalListWorkersRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Responses from individual cluster nodes",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/MultiResponse_LocalListWorkersResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/PreviewClusterLayoutChanges": {
- "post": {
- "tags": [
- "Cluster layout"
- ],
- "description": "\nComputes a new layout taking into account the staged parameters, and returns it with detailed statistics. The new layout is not applied in the cluster.\n\n*Note: do not try to parse the `message` field of the response, it is given as an array of string specifically because its format is not stable.*\n ",
- "operationId": "PreviewClusterLayoutChanges",
- "responses": {
- "200": {
- "description": "Information about the new layout",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/PreviewClusterLayoutChangesResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/PurgeBlocks": {
- "post": {
- "tags": [
- "Block"
- ],
- "description": "\nPurge references to one or several missing data blocks.\n\nThis will remove all objects and in-progress multipart uploads that contain the specified data block(s). The objects will be permanently deleted from the buckets in which they appear. Use with caution.\n ",
- "operationId": "PurgeBlocks",
- "parameters": [
- {
- "name": "node",
- "in": "query",
- "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/LocalPurgeBlocksRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Responses from individual cluster nodes",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/MultiResponse_LocalPurgeBlocksResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/RemoveBucketAlias": {
- "post": {
- "tags": [
- "Bucket alias"
- ],
- "description": "Remove an alias for the target bucket. This can be either a global or a local alias, depending on which fields are specified.",
- "operationId": "RemoveBucketAlias",
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/BucketAliasEnum"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Returns exhaustive information about the bucket",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/RemoveBucketAliasResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/RetryBlockResync": {
- "post": {
- "tags": [
- "Block"
- ],
- "description": "\nInstruct Garage node(s) to retry the resynchronization of one or several missing data block(s).\n ",
- "operationId": "RetryBlockResync",
- "parameters": [
- {
- "name": "node",
- "in": "query",
- "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/LocalRetryBlockResyncRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Responses from individual cluster nodes",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/MultiResponse_LocalRetryBlockResyncResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/RevertClusterLayout": {
- "post": {
- "tags": [
- "Cluster layout"
- ],
- "description": "Clear staged layout changes",
- "operationId": "RevertClusterLayout",
- "responses": {
- "200": {
- "description": "All pending changes to the cluster layout have been erased",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/RevertClusterLayoutResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/SetWorkerVariable": {
- "post": {
- "tags": [
- "Worker"
- ],
- "description": "\nSet the value for a worker variable, on one or several cluster nodes.\n ",
- "operationId": "SetWorkerVariable",
- "parameters": [
- {
- "name": "node",
- "in": "query",
- "description": "Node ID to query, or `*` for all nodes, or `self` for the node responding to the request",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/LocalSetWorkerVariableRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Responses from individual cluster nodes",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/MultiResponse_LocalSetWorkerVariableResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/UpdateAdminToken": {
- "post": {
- "tags": [
- "Admin API token"
- ],
- "description": "\nUpdates information about the specified admin API token.\n ",
- "operationId": "UpdateAdminToken",
- "parameters": [
- {
- "name": "id",
- "in": "query",
- "description": "Admin API token ID",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/UpdateAdminTokenRequestBody"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Admin token has been updated",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/UpdateAdminTokenResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/UpdateBucket": {
- "post": {
- "tags": [
- "Bucket"
- ],
- "description": "\nAll fields (`websiteAccess` and `quotas`) are optional.\nIf they are present, the corresponding modifications are applied to the bucket, otherwise nothing is changed.\n\nIn `websiteAccess`: if `enabled` is `true`, `indexDocument` must be specified.\nThe field `errorDocument` is optional, if no error document is set a generic\nerror message is displayed when errors happen. Conversely, if `enabled` is\n`false`, neither `indexDocument` nor `errorDocument` must be specified.\n\nIn `quotas`: new values of `maxSize` and `maxObjects` must both be specified, or set to `null`\nto remove the quotas. An absent value will be considered the same as a `null`. It is not possible\nto change only one of the two quotas.\n ",
- "operationId": "UpdateBucket",
- "parameters": [
- {
- "name": "id",
- "in": "query",
- "description": "ID of the bucket to update",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/UpdateBucketRequestBody"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Bucket has been updated",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/UpdateBucketResponse"
- }
- }
- }
- },
- "404": {
- "description": "Bucket not found"
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/UpdateClusterLayout": {
- "post": {
- "tags": [
- "Cluster layout"
- ],
- "description": "\nSend modifications to the cluster layout. These modifications will be included in the staged role changes, visible in subsequent calls of `GET /GetClusterHealth`. Once the set of staged changes is satisfactory, the user may call `POST /ApplyClusterLayout` to apply the changed changes, or `POST /RevertClusterLayout` to clear all of the staged changes in the layout.\n\nSetting the capacity to `null` will configure the node as a gateway.\nOtherwise, capacity must be now set in bytes (before Garage 0.9 it was arbitrary weights).\nFor example to declare 100GB, you must set `capacity: 100000000000`.\n\nGarage uses internally the International System of Units (SI), it assumes that 1kB = 1000 bytes, and displays storage as kB, MB, GB (and not KiB, MiB, GiB that assume 1KiB = 1024 bytes).\n ",
- "operationId": "UpdateClusterLayout",
- "requestBody": {
- "description": "\nTo add a new node to the layout or to change the configuration of an existing node, simply set the values you want (`zone`, `capacity`, and `tags`).\nTo remove a node, simply pass the `remove: true` field.\nThis logic is represented in OpenAPI with a 'One Of' object.\n\nContrary to the CLI that may update only a subset of the fields capacity, zone and tags, when calling this API all of these values must be specified.\n ",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/UpdateClusterLayoutRequest"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Proposed changes have been added to the list of pending changes",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/UpdateClusterLayoutResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- },
- "/v2/UpdateKey": {
- "post": {
- "tags": [
- "Access key"
- ],
- "description": "\nUpdates information about the specified API access key.\n\n*Note: the secret key is not returned in the response, `null` is sent instead.*\n ",
- "operationId": "UpdateKey",
- "parameters": [
- {
- "name": "id",
- "in": "query",
- "description": "Access key ID",
- "required": true,
- "schema": {
- "type": "string"
- }
- }
- ],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/UpdateKeyRequestBody"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Access key has been updated",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/UpdateKeyResponse"
- }
- }
- }
- },
- "500": {
- "description": "Internal server error"
- }
- }
- }
- }
- },
- "components": {
- "schemas": {
- "AddBucketAliasResponse": {
- "$ref": "#/components/schemas/GetBucketInfoResponse"
- },
- "AllowBucketKeyRequest": {
- "$ref": "#/components/schemas/BucketKeyPermChangeRequest"
- },
- "AllowBucketKeyResponse": {
- "$ref": "#/components/schemas/GetBucketInfoResponse"
- },
- "ApiBucketKeyPerm": {
- "type": "object",
- "properties": {
- "owner": {
- "type": "boolean"
- },
- "read": {
- "type": "boolean"
- },
- "write": {
- "type": "boolean"
- }
- }
- },
- "ApiBucketQuotas": {
- "type": "object",
- "properties": {
- "maxObjects": {
- "type": [
- "integer",
- "null"
- ],
- "format": "int64",
- "minimum": 0
- },
- "maxSize": {
- "type": [
- "integer",
- "null"
- ],
- "format": "int64",
- "minimum": 0
- }
- }
- },
- "ApplyClusterLayoutRequest": {
- "type": "object",
- "required": [
- "version"
- ],
- "properties": {
- "version": {
- "type": "integer",
- "format": "int64",
- "description": "As a safety measure, the new version number of the layout must\nbe specified here",
- "minimum": 0
- }
- }
- },
- "ApplyClusterLayoutResponse": {
- "type": "object",
- "required": [
- "message",
- "layout"
- ],
- "properties": {
- "layout": {
- "$ref": "#/components/schemas/GetClusterLayoutResponse",
- "description": "Details about the new cluster layout"
- },
- "message": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "description": "Plain-text information about the layout computation\n(do not try to parse this)"
- }
- }
- },
- "BlockError": {
- "type": "object",
- "required": [
- "blockHash",
- "refcount",
- "errorCount",
- "lastTrySecsAgo",
- "nextTryInSecs"
- ],
- "properties": {
- "blockHash": {
- "type": "string"
- },
- "errorCount": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "lastTrySecsAgo": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "nextTryInSecs": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "refcount": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- }
- }
- },
- "BlockVersion": {
- "type": "object",
- "required": [
- "versionId",
- "refDeleted",
- "versionDeleted",
- "garbageCollected"
- ],
- "properties": {
- "backlink": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "$ref": "#/components/schemas/BlockVersionBacklink"
- }
- ]
- },
- "garbageCollected": {
- "type": "boolean"
- },
- "refDeleted": {
- "type": "boolean"
- },
- "versionDeleted": {
- "type": "boolean"
- },
- "versionId": {
- "type": "string"
- }
- }
- },
- "BlockVersionBacklink": {
- "oneOf": [
- {
- "type": "object",
- "required": [
- "object"
- ],
- "properties": {
- "object": {
- "type": "object",
- "required": [
- "bucketId",
- "key"
- ],
- "properties": {
- "bucketId": {
- "type": "string"
- },
- "key": {
- "type": "string"
- }
- }
- }
- }
- },
- {
- "type": "object",
- "required": [
- "upload"
- ],
- "properties": {
- "upload": {
- "type": "object",
- "required": [
- "uploadId",
- "uploadDeleted",
- "uploadGarbageCollected"
- ],
- "properties": {
- "bucketId": {
- "type": [
- "string",
- "null"
- ]
- },
- "key": {
- "type": [
- "string",
- "null"
- ]
- },
- "uploadDeleted": {
- "type": "boolean"
- },
- "uploadGarbageCollected": {
- "type": "boolean"
- },
- "uploadId": {
- "type": "string"
- }
- }
- }
- }
- }
- ]
- },
- "BucketAliasEnum": {
- "oneOf": [
- {
- "type": "object",
- "required": [
- "bucketId",
- "globalAlias"
- ],
- "properties": {
- "bucketId": {
- "type": "string"
- },
- "globalAlias": {
- "type": "string"
- }
- }
- },
- {
- "type": "object",
- "required": [
- "bucketId",
- "localAlias",
- "accessKeyId"
- ],
- "properties": {
- "accessKeyId": {
- "type": "string"
- },
- "bucketId": {
- "type": "string"
- },
- "localAlias": {
- "type": "string"
- }
- }
- }
- ]
- },
- "BucketKeyPermChangeRequest": {
- "type": "object",
- "required": [
- "bucketId",
- "accessKeyId",
- "permissions"
- ],
- "properties": {
- "accessKeyId": {
- "type": "string"
- },
- "bucketId": {
- "type": "string"
- },
- "permissions": {
- "$ref": "#/components/schemas/ApiBucketKeyPerm"
- }
- }
- },
- "BucketLocalAlias": {
- "type": "object",
- "required": [
- "accessKeyId",
- "alias"
- ],
- "properties": {
- "accessKeyId": {
- "type": "string"
- },
- "alias": {
- "type": "string"
- }
- }
- },
- "CleanupIncompleteUploadsRequest": {
- "type": "object",
- "required": [
- "bucketId",
- "olderThanSecs"
- ],
- "properties": {
- "bucketId": {
- "type": "string"
- },
- "olderThanSecs": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- }
- }
- },
- "CleanupIncompleteUploadsResponse": {
- "type": "object",
- "required": [
- "uploadsDeleted"
- ],
- "properties": {
- "uploadsDeleted": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- }
- }
- },
- "ClusterLayoutSkipDeadNodesRequest": {
- "type": "object",
- "required": [
- "version",
- "allowMissingData"
- ],
- "properties": {
- "allowMissingData": {
- "type": "boolean",
- "description": "Allow the skip even if a quorum of nodes could not be found for\nthe data among the remaining nodes"
- },
- "version": {
- "type": "integer",
- "format": "int64",
- "description": "Version number of the layout to assume is currently up-to-date.\nThis will generally be the current layout version.",
- "minimum": 0
- }
- }
- },
- "ClusterLayoutSkipDeadNodesResponse": {
- "type": "object",
- "required": [
- "ackUpdated",
- "syncUpdated"
- ],
- "properties": {
- "ackUpdated": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "description": "Nodes for which the ACK update tracker has been updated to `version`"
- },
- "syncUpdated": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "description": "If `allow_missing_data` is set,\nnodes for which the SYNC update tracker has been updated to `version`"
- }
- }
- },
- "ClusterLayoutVersion": {
- "type": "object",
- "required": [
- "version",
- "status",
- "storageNodes",
- "gatewayNodes"
- ],
- "properties": {
- "gatewayNodes": {
- "type": "integer",
- "format": "int64",
- "description": "Number of nodes with a gateway role in this layout version",
- "minimum": 0
- },
- "status": {
- "$ref": "#/components/schemas/ClusterLayoutVersionStatus",
- "description": "Status of this layout version"
- },
- "storageNodes": {
- "type": "integer",
- "format": "int64",
- "description": "Number of nodes with an assigned storage capacity in this layout version",
- "minimum": 0
- },
- "version": {
- "type": "integer",
- "format": "int64",
- "description": "Version number of this layout version",
- "minimum": 0
- }
- }
- },
- "ClusterLayoutVersionStatus": {
- "type": "string",
- "enum": [
- "Current",
- "Draining",
- "Historical"
- ]
- },
- "ConnectClusterNodesRequest": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "ConnectClusterNodesResponse": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/ConnectNodeResponse"
- }
- },
- "ConnectNodeResponse": {
- "type": "object",
- "required": [
- "success"
- ],
- "properties": {
- "error": {
- "type": [
- "string",
- "null"
- ],
- "description": "An error message if Garage did not manage to connect to this node"
- },
- "success": {
- "type": "boolean",
- "description": "`true` if Garage managed to connect to this node"
- }
- }
- },
- "CreateAdminTokenResponse": {
- "allOf": [
- {
- "$ref": "#/components/schemas/GetAdminTokenInfoResponse"
- },
- {
- "type": "object",
- "required": [
- "secretToken"
- ],
- "properties": {
- "secretToken": {
- "type": "string",
- "description": "The secret bearer token. **CAUTION:** This token will be shown only\nONCE, so this value MUST be remembered somewhere, or the token\nwill be unusable."
- }
- }
- }
- ]
- },
- "CreateBucketLocalAlias": {
- "type": "object",
- "required": [
- "accessKeyId",
- "alias"
- ],
- "properties": {
- "accessKeyId": {
- "type": "string"
- },
- "alias": {
- "type": "string"
- },
- "allow": {
- "$ref": "#/components/schemas/ApiBucketKeyPerm"
- }
- }
- },
- "CreateBucketRequest": {
- "type": "object",
- "properties": {
- "globalAlias": {
- "type": [
- "string",
- "null"
- ]
- },
- "localAlias": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "$ref": "#/components/schemas/CreateBucketLocalAlias"
- }
- ]
- }
- }
- },
- "CreateBucketResponse": {
- "$ref": "#/components/schemas/GetBucketInfoResponse"
- },
- "CreateKeyRequest": {
- "$ref": "#/components/schemas/UpdateKeyRequestBody"
- },
- "CreateKeyResponse": {
- "$ref": "#/components/schemas/GetKeyInfoResponse"
- },
- "DenyBucketKeyRequest": {
- "$ref": "#/components/schemas/BucketKeyPermChangeRequest"
- },
- "DenyBucketKeyResponse": {
- "$ref": "#/components/schemas/GetBucketInfoResponse"
- },
- "FreeSpaceResp": {
- "type": "object",
- "required": [
- "available",
- "total"
- ],
- "properties": {
- "available": {
- "type": "integer",
- "format": "int64",
- "description": "Number of bytes available",
- "minimum": 0
- },
- "total": {
- "type": "integer",
- "format": "int64",
- "description": "Total number of bytes",
- "minimum": 0
- }
- }
- },
- "GetAdminTokenInfoResponse": {
- "type": "object",
- "required": [
- "name",
- "expired",
- "scope"
- ],
- "properties": {
- "created": {
- "type": [
- "string",
- "null"
- ],
- "format": "date-time",
- "description": "Creation date"
- },
- "expiration": {
- "type": [
- "string",
- "null"
- ],
- "format": "date-time",
- "description": "Expiration time and date, formatted according to RFC 3339"
- },
- "expired": {
- "type": "boolean",
- "description": "Whether this admin token is expired already"
- },
- "id": {
- "type": [
- "string",
- "null"
- ],
- "description": "Identifier of the admin token (which is also a prefix of the full bearer token)"
- },
- "name": {
- "type": "string",
- "description": "Name of the admin API token"
- },
- "scope": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "description": "Scope of the admin API token, a list of admin endpoint names (such as\n`GetClusterStatus`, etc), or the special value `*` to allow all\nadmin endpoints"
- }
- }
- },
- "GetBucketInfoKey": {
- "type": "object",
- "required": [
- "accessKeyId",
- "name",
- "permissions",
- "bucketLocalAliases"
- ],
- "properties": {
- "accessKeyId": {
- "type": "string"
- },
- "bucketLocalAliases": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "name": {
- "type": "string"
- },
- "permissions": {
- "$ref": "#/components/schemas/ApiBucketKeyPerm"
- }
- }
- },
- "GetBucketInfoResponse": {
- "type": "object",
- "required": [
- "id",
- "created",
- "globalAliases",
- "websiteAccess",
- "keys",
- "objects",
- "bytes",
- "unfinishedUploads",
- "unfinishedMultipartUploads",
- "unfinishedMultipartUploadParts",
- "unfinishedMultipartUploadBytes",
- "quotas"
- ],
- "properties": {
- "bytes": {
- "type": "integer",
- "format": "int64",
- "description": "Total number of bytes used by objects in this bucket"
- },
- "created": {
- "type": "string",
- "format": "date-time",
- "description": "Bucket creation date"
- },
- "globalAliases": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "description": "List of global aliases for this bucket"
- },
- "id": {
- "type": "string",
- "description": "Identifier of the bucket"
- },
- "keys": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/GetBucketInfoKey"
- },
- "description": "List of access keys that have permissions granted on this bucket"
- },
- "objects": {
- "type": "integer",
- "format": "int64",
- "description": "Number of objects in this bucket"
- },
- "quotas": {
- "$ref": "#/components/schemas/ApiBucketQuotas",
- "description": "Quotas that apply to this bucket"
- },
- "unfinishedMultipartUploadBytes": {
- "type": "integer",
- "format": "int64",
- "description": "Total number of bytes used by unfinished multipart uploads in this bucket"
- },
- "unfinishedMultipartUploadParts": {
- "type": "integer",
- "format": "int64",
- "description": "Number of parts in unfinished multipart uploads in this bucket"
- },
- "unfinishedMultipartUploads": {
- "type": "integer",
- "format": "int64",
- "description": "Number of unfinished multipart uploads in this bucket"
- },
- "unfinishedUploads": {
- "type": "integer",
- "format": "int64",
- "description": "Number of unfinished uploads in this bucket"
- },
- "websiteAccess": {
- "type": "boolean",
- "description": "Whether website acces is enabled for this bucket"
- },
- "websiteConfig": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "$ref": "#/components/schemas/GetBucketInfoWebsiteResponse",
- "description": "Website configuration for this bucket"
- }
- ]
- }
- }
- },
- "GetBucketInfoWebsiteResponse": {
- "type": "object",
- "required": [
- "indexDocument"
- ],
- "properties": {
- "errorDocument": {
- "type": [
- "string",
- "null"
- ]
- },
- "indexDocument": {
- "type": "string"
- }
- }
- },
- "GetClusterHealthResponse": {
- "type": "object",
- "required": [
- "status",
- "knownNodes",
- "connectedNodes",
- "storageNodes",
- "storageNodesUp",
- "partitions",
- "partitionsQuorum",
- "partitionsAllOk"
- ],
- "properties": {
- "connectedNodes": {
- "type": "integer",
- "description": "the nubmer of nodes this Garage node currently has an open connection to",
- "minimum": 0
- },
- "knownNodes": {
- "type": "integer",
- "description": "the number of nodes this Garage node has had a TCP connection to since the daemon started",
- "minimum": 0
- },
- "partitions": {
- "type": "integer",
- "description": "the total number of partitions of the data (currently always 256)",
- "minimum": 0
- },
- "partitionsAllOk": {
- "type": "integer",
- "description": "the number of partitions for which we are connected to all storage nodes responsible of storing it",
- "minimum": 0
- },
- "partitionsQuorum": {
- "type": "integer",
- "description": "the number of partitions for which a quorum of write nodes is available",
- "minimum": 0
- },
- "status": {
- "type": "string",
- "description": "One of `healthy`, `degraded` or `unavailable`:\n- `healthy`: Garage node is connected to all storage nodes\n- `degraded`: Garage node is not connected to all storage nodes, but a quorum of write nodes is available for all partitions\n- `unavailable`: a quorum of write nodes is not available for some partitions"
- },
- "storageNodes": {
- "type": "integer",
- "description": "the number of storage nodes currently registered in the cluster layout",
- "minimum": 0
- },
- "storageNodesUp": {
- "type": "integer",
- "description": "the number of storage nodes to which a connection is currently open",
- "minimum": 0
- }
- }
- },
- "GetClusterLayoutHistoryResponse": {
- "type": "object",
- "required": [
- "currentVersion",
- "minAck",
- "versions"
- ],
- "properties": {
- "currentVersion": {
- "type": "integer",
- "format": "int64",
- "description": "The current version number of the cluster layout",
- "minimum": 0
- },
- "minAck": {
- "type": "integer",
- "format": "int64",
- "description": "All nodes in the cluster are aware of layout versions up to\nthis version number (at least)",
- "minimum": 0
- },
- "updateTrackers": {
- "type": [
- "object",
- "null"
- ],
- "description": "Detailed update trackers for nodes (see\n`https://garagehq.deuxfleurs.fr/blog/2023-12-preserving-read-after-write-consistency/`)",
- "additionalProperties": {
- "$ref": "#/components/schemas/NodeUpdateTrackers"
- },
- "propertyNames": {
- "type": "string"
- }
- },
- "versions": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/ClusterLayoutVersion"
- },
- "description": "Layout version history"
- }
- }
- },
- "GetClusterLayoutResponse": {
- "type": "object",
- "required": [
- "version",
- "roles",
- "parameters",
- "partitionSize",
- "stagedRoleChanges"
- ],
- "properties": {
- "parameters": {
- "$ref": "#/components/schemas/LayoutParameters",
- "description": "Layout parameters used when the current layout was computed"
- },
- "partitionSize": {
- "type": "integer",
- "format": "int64",
- "description": "The size, in bytes, of one Garage partition (= a shard)",
- "minimum": 0
- },
- "roles": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/LayoutNodeRole"
- },
- "description": "List of nodes that currently have a role in the cluster layout"
- },
- "stagedParameters": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "$ref": "#/components/schemas/LayoutParameters",
- "description": "Layout parameters to use when computing the next version of\nthe cluster layout"
- }
- ]
- },
- "stagedRoleChanges": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/NodeRoleChange"
- },
- "description": "List of nodes that will have a new role or whose role will be\nremoved in the next version of the cluster layout"
- },
- "version": {
- "type": "integer",
- "format": "int64",
- "description": "The current version number of the cluster layout",
- "minimum": 0
- }
- }
- },
- "GetClusterStatisticsResponse": {
- "type": "object",
- "required": [
- "freeform"
- ],
- "properties": {
- "freeform": {
- "type": "string"
- }
- }
- },
- "GetClusterStatusResponse": {
- "type": "object",
- "required": [
- "layoutVersion",
- "nodes"
- ],
- "properties": {
- "layoutVersion": {
- "type": "integer",
- "format": "int64",
- "description": "Current version number of the cluster layout",
- "minimum": 0
- },
- "nodes": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/NodeResp"
- },
- "description": "List of nodes that are either currently connected, part of the\ncurrent cluster layout, or part of an older cluster layout that\nis still active in the cluster (being drained)."
- }
- }
- },
- "GetCurrentAdminTokenInfoResponse": {
- "$ref": "#/components/schemas/GetAdminTokenInfoResponse"
- },
- "GetKeyInfoResponse": {
- "type": "object",
- "required": [
- "accessKeyId",
- "name",
- "expired",
- "permissions",
- "buckets"
- ],
- "properties": {
- "accessKeyId": {
- "type": "string"
- },
- "buckets": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/KeyInfoBucketResponse"
- }
- },
- "created": {
- "type": [
- "string",
- "null"
- ],
- "format": "date-time"
- },
- "expiration": {
- "type": [
- "string",
- "null"
- ],
- "format": "date-time"
- },
- "expired": {
- "type": "boolean"
- },
- "name": {
- "type": "string"
- },
- "permissions": {
- "$ref": "#/components/schemas/KeyPerm"
- },
- "secretAccessKey": {
- "type": [
- "string",
- "null"
- ]
- }
- }
- },
- "ImportKeyRequest": {
- "type": "object",
- "required": [
- "accessKeyId",
- "secretAccessKey"
- ],
- "properties": {
- "accessKeyId": {
- "type": "string"
- },
- "name": {
- "type": [
- "string",
- "null"
- ]
- },
- "secretAccessKey": {
- "type": "string"
- }
- }
- },
- "ImportKeyResponse": {
- "$ref": "#/components/schemas/GetKeyInfoResponse"
- },
- "InspectObjectBlock": {
- "type": "object",
- "required": [
- "partNumber",
- "offset",
- "hash",
- "size"
- ],
- "properties": {
- "hash": {
- "type": "string",
- "description": "Hash (blake2 sum) of the block's data"
- },
- "offset": {
- "type": "integer",
- "format": "int64",
- "description": "Offset of this block within the part",
- "minimum": 0
- },
- "partNumber": {
- "type": "integer",
- "format": "int64",
- "description": "Part number of the part containing this block, for multipart uploads",
- "minimum": 0
- },
- "size": {
- "type": "integer",
- "format": "int64",
- "description": "Length of the blocks's data",
- "minimum": 0
- }
- }
- },
- "InspectObjectResponse": {
- "type": "object",
- "required": [
- "bucketId",
- "key",
- "versions"
- ],
- "properties": {
- "bucketId": {
- "type": "string",
- "description": "ID of the bucket containing the inspected object"
- },
- "key": {
- "type": "string",
- "description": "Key of the inspected object"
- },
- "versions": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/InspectObjectVersion"
- },
- "description": "List of versions currently stored for this object"
- }
- }
- },
- "InspectObjectVersion": {
- "type": "object",
- "required": [
- "uuid",
- "timestamp",
- "encrypted",
- "uploading",
- "aborted",
- "deleteMarker",
- "inline"
- ],
- "properties": {
- "aborted": {
- "type": "boolean",
- "description": "Whether this is an aborted upload"
- },
- "blocks": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/InspectObjectBlock"
- },
- "description": "List of data blocks for this object version"
- },
- "deleteMarker": {
- "type": "boolean",
- "description": "Whether this version is a delete marker (a tombstone indicating that a previous version of\nthe object has been deleted)"
- },
- "encrypted": {
- "type": "boolean",
- "description": "Whether this object version was created with SSE-C encryption"
- },
- "etag": {
- "type": [
- "string",
- "null"
- ],
- "description": "Etag of this object version"
- },
- "headers": {
- "type": "array",
- "items": {
- "type": "array",
- "items": false,
- "prefixItems": [
- {
- "type": "string"
- },
- {
- "type": "string"
- }
- ]
- },
- "description": "Metadata (HTTP headers) associated with this object version"
- },
- "inline": {
- "type": "boolean",
- "description": "Whether the object's data is stored inline (for small objects)"
- },
- "size": {
- "type": [
- "integer",
- "null"
- ],
- "format": "int64",
- "description": "Size of the object, in bytes",
- "minimum": 0
- },
- "timestamp": {
- "type": "string",
- "format": "date-time",
- "description": "Creation timestamp of this object version"
- },
- "uploading": {
- "type": "boolean",
- "description": "Whether this object version is still uploading"
- },
- "uuid": {
- "type": "string",
- "description": "Version ID"
- }
- }
- },
- "KeyInfoBucketResponse": {
- "type": "object",
- "required": [
- "id",
- "globalAliases",
- "localAliases",
- "permissions"
- ],
- "properties": {
- "globalAliases": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "id": {
- "type": "string"
- },
- "localAliases": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "permissions": {
- "$ref": "#/components/schemas/ApiBucketKeyPerm"
- }
- }
- },
- "KeyPerm": {
- "type": "object",
- "properties": {
- "createBucket": {
- "type": "boolean"
- }
- }
- },
- "LayoutNodeRole": {
- "type": "object",
- "required": [
- "id",
- "zone",
- "tags"
- ],
- "properties": {
- "capacity": {
- "type": [
- "integer",
- "null"
- ],
- "format": "int64",
- "description": "Capacity (in bytes) assigned by the cluster administrator,\nabsent for gateway nodes",
- "minimum": 0
- },
- "id": {
- "type": "string",
- "description": "Identifier of the node"
- },
- "storedPartitions": {
- "type": [
- "integer",
- "null"
- ],
- "format": "int64",
- "description": "Number of partitions stored on this node\n(a result of the layout computation)",
- "minimum": 0
- },
- "tags": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "description": "List of tags assigned by the cluster administrator"
- },
- "usableCapacity": {
- "type": [
- "integer",
- "null"
- ],
- "format": "int64",
- "description": "Capacity (in bytes) that is actually usable on this node in the current\nlayout, which is equal to `stored_partitions` × `partition_size`",
- "minimum": 0
- },
- "zone": {
- "type": "string",
- "description": "Zone name assigned by the cluster administrator"
- }
- }
- },
- "LayoutParameters": {
- "type": "object",
- "required": [
- "zoneRedundancy"
- ],
- "properties": {
- "zoneRedundancy": {
- "$ref": "#/components/schemas/ZoneRedundancy",
- "description": "Minimum number of zones in which a data partition must be replicated"
- }
- }
- },
- "ListAdminTokensResponse": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/GetAdminTokenInfoResponse"
- }
- },
- "ListBucketsResponse": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/ListBucketsResponseItem"
- }
- },
- "ListBucketsResponseItem": {
- "type": "object",
- "required": [
- "id",
- "created",
- "globalAliases",
- "localAliases"
- ],
- "properties": {
- "created": {
- "type": "string",
- "format": "date-time"
- },
- "globalAliases": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "id": {
- "type": "string"
- },
- "localAliases": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/BucketLocalAlias"
- }
- }
- }
- },
- "ListKeysResponse": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/ListKeysResponseItem"
- }
- },
- "ListKeysResponseItem": {
- "type": "object",
- "required": [
- "id",
- "name",
- "expired"
- ],
- "properties": {
- "created": {
- "type": [
- "string",
- "null"
- ],
- "format": "date-time"
- },
- "expiration": {
- "type": [
- "string",
- "null"
- ],
- "format": "date-time"
- },
- "expired": {
- "type": "boolean"
- },
- "id": {
- "type": "string"
- },
- "name": {
- "type": "string"
- }
- }
- },
- "LocalCreateMetadataSnapshotResponse": {
- "default": null
- },
- "LocalGetBlockInfoRequest": {
- "type": "object",
- "required": [
- "blockHash"
- ],
- "properties": {
- "blockHash": {
- "type": "string"
- }
- }
- },
- "LocalGetBlockInfoResponse": {
- "type": "object",
- "required": [
- "blockHash",
- "refcount",
- "versions"
- ],
- "properties": {
- "blockHash": {
- "type": "string"
- },
- "refcount": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "versions": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/BlockVersion"
- }
- }
- }
- },
- "LocalGetNodeInfoResponse": {
- "type": "object",
- "required": [
- "nodeId",
- "garageVersion",
- "rustVersion",
- "dbEngine"
- ],
- "properties": {
- "dbEngine": {
- "type": "string"
- },
- "garageFeatures": {
- "type": [
- "array",
- "null"
- ],
- "items": {
- "type": "string"
- }
- },
- "garageVersion": {
- "type": "string"
- },
- "nodeId": {
- "type": "string"
- },
- "rustVersion": {
- "type": "string"
- }
- }
- },
- "LocalGetNodeStatisticsResponse": {
- "type": "object",
- "required": [
- "freeform"
- ],
- "properties": {
- "freeform": {
- "type": "string"
- }
- }
- },
- "LocalGetWorkerInfoRequest": {
- "type": "object",
- "required": [
- "id"
- ],
- "properties": {
- "id": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- }
- }
- },
- "LocalGetWorkerInfoResponse": {
- "$ref": "#/components/schemas/WorkerInfoResp"
- },
- "LocalGetWorkerVariableRequest": {
- "type": "object",
- "properties": {
- "variable": {
- "type": [
- "string",
- "null"
- ]
- }
- }
- },
- "LocalGetWorkerVariableResponse": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- },
- "propertyNames": {
- "type": "string"
- }
- },
- "LocalLaunchRepairOperationRequest": {
- "type": "object",
- "required": [
- "repairType"
- ],
- "properties": {
- "repairType": {
- "$ref": "#/components/schemas/RepairType"
- }
- }
- },
- "LocalLaunchRepairOperationResponse": {
- "default": null
- },
- "LocalListBlockErrorsResponse": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/BlockError"
- }
- },
- "LocalListWorkersRequest": {
- "type": "object",
- "properties": {
- "busyOnly": {
- "type": "boolean"
- },
- "errorOnly": {
- "type": "boolean"
- }
- }
- },
- "LocalListWorkersResponse": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/WorkerInfoResp"
- }
- },
- "LocalPurgeBlocksRequest": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "LocalPurgeBlocksResponse": {
- "type": "object",
- "required": [
- "blocksPurged",
- "objectsDeleted",
- "uploadsDeleted",
- "versionsDeleted",
- "blockRefsPurged"
- ],
- "properties": {
- "blockRefsPurged": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "blocksPurged": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "objectsDeleted": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "uploadsDeleted": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "versionsDeleted": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- }
- }
- },
- "LocalRetryBlockResyncRequest": {
- "oneOf": [
- {
- "type": "object",
- "required": [
- "all"
- ],
- "properties": {
- "all": {
- "type": "boolean"
- }
- }
- },
- {
- "type": "object",
- "required": [
- "blockHashes"
- ],
- "properties": {
- "blockHashes": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- }
- ]
- },
- "LocalRetryBlockResyncResponse": {
- "type": "object",
- "required": [
- "count"
- ],
- "properties": {
- "count": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- }
- }
- },
- "LocalSetWorkerVariableRequest": {
- "type": "object",
- "required": [
- "variable",
- "value"
- ],
- "properties": {
- "value": {
- "type": "string"
- },
- "variable": {
- "type": "string"
- }
- }
- },
- "LocalSetWorkerVariableResponse": {
- "type": "object",
- "required": [
- "variable",
- "value"
- ],
- "properties": {
- "value": {
- "type": "string"
- },
- "variable": {
- "type": "string"
- }
- }
- },
- "MultiResponse_LocalCreateMetadataSnapshotResponse": {
- "type": "object",
- "required": [
- "success",
- "error"
- ],
- "properties": {
- "error": {
- "type": "object",
- "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall",
- "additionalProperties": {
- "type": "string"
- },
- "propertyNames": {
- "type": "string"
- }
- },
- "success": {
- "type": "object",
- "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call",
- "additionalProperties": {
- "default": null
- },
- "propertyNames": {
- "type": "string"
- }
- }
- }
- },
- "MultiResponse_LocalGetBlockInfoResponse": {
- "type": "object",
- "required": [
- "success",
- "error"
- ],
- "properties": {
- "error": {
- "type": "object",
- "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall",
- "additionalProperties": {
- "type": "string"
- },
- "propertyNames": {
- "type": "string"
- }
- },
- "success": {
- "type": "object",
- "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call",
- "additionalProperties": {
- "type": "object",
- "required": [
- "blockHash",
- "refcount",
- "versions"
- ],
- "properties": {
- "blockHash": {
- "type": "string"
- },
- "refcount": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "versions": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/BlockVersion"
- }
- }
- }
- },
- "propertyNames": {
- "type": "string"
- }
- }
- }
- },
- "MultiResponse_LocalGetNodeInfoResponse": {
- "type": "object",
- "required": [
- "success",
- "error"
- ],
- "properties": {
- "error": {
- "type": "object",
- "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall",
- "additionalProperties": {
- "type": "string"
- },
- "propertyNames": {
- "type": "string"
- }
- },
- "success": {
- "type": "object",
- "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call",
- "additionalProperties": {
- "type": "object",
- "required": [
- "nodeId",
- "garageVersion",
- "rustVersion",
- "dbEngine"
- ],
- "properties": {
- "dbEngine": {
- "type": "string"
- },
- "garageFeatures": {
- "type": [
- "array",
- "null"
- ],
- "items": {
- "type": "string"
- }
- },
- "garageVersion": {
- "type": "string"
- },
- "nodeId": {
- "type": "string"
- },
- "rustVersion": {
- "type": "string"
- }
- }
- },
- "propertyNames": {
- "type": "string"
- }
- }
- }
- },
- "MultiResponse_LocalGetNodeStatisticsResponse": {
- "type": "object",
- "required": [
- "success",
- "error"
- ],
- "properties": {
- "error": {
- "type": "object",
- "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall",
- "additionalProperties": {
- "type": "string"
- },
- "propertyNames": {
- "type": "string"
- }
- },
- "success": {
- "type": "object",
- "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call",
- "additionalProperties": {
- "type": "object",
- "required": [
- "freeform"
- ],
- "properties": {
- "freeform": {
- "type": "string"
- }
- }
- },
- "propertyNames": {
- "type": "string"
- }
- }
- }
- },
- "MultiResponse_LocalGetWorkerInfoResponse": {
- "type": "object",
- "required": [
- "success",
- "error"
- ],
- "properties": {
- "error": {
- "type": "object",
- "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall",
- "additionalProperties": {
- "type": "string"
- },
- "propertyNames": {
- "type": "string"
- }
- },
- "success": {
- "type": "object",
- "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call",
- "additionalProperties": {
- "$ref": "#/components/schemas/WorkerInfoResp"
- },
- "propertyNames": {
- "type": "string"
- }
- }
- }
- },
- "MultiResponse_LocalGetWorkerVariableResponse": {
- "type": "object",
- "required": [
- "success",
- "error"
- ],
- "properties": {
- "error": {
- "type": "object",
- "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall",
- "additionalProperties": {
- "type": "string"
- },
- "propertyNames": {
- "type": "string"
- }
- },
- "success": {
- "type": "object",
- "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call",
- "additionalProperties": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- },
- "propertyNames": {
- "type": "string"
- }
- },
- "propertyNames": {
- "type": "string"
- }
- }
- }
- },
- "MultiResponse_LocalLaunchRepairOperationResponse": {
- "type": "object",
- "required": [
- "success",
- "error"
- ],
- "properties": {
- "error": {
- "type": "object",
- "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall",
- "additionalProperties": {
- "type": "string"
- },
- "propertyNames": {
- "type": "string"
- }
- },
- "success": {
- "type": "object",
- "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call",
- "additionalProperties": {
- "default": null
- },
- "propertyNames": {
- "type": "string"
- }
- }
- }
- },
- "MultiResponse_LocalListBlockErrorsResponse": {
- "type": "object",
- "required": [
- "success",
- "error"
- ],
- "properties": {
- "error": {
- "type": "object",
- "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall",
- "additionalProperties": {
- "type": "string"
- },
- "propertyNames": {
- "type": "string"
- }
- },
- "success": {
- "type": "object",
- "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call",
- "additionalProperties": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/BlockError"
- }
- },
- "propertyNames": {
- "type": "string"
- }
- }
- }
- },
- "MultiResponse_LocalListWorkersResponse": {
- "type": "object",
- "required": [
- "success",
- "error"
- ],
- "properties": {
- "error": {
- "type": "object",
- "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall",
- "additionalProperties": {
- "type": "string"
- },
- "propertyNames": {
- "type": "string"
- }
- },
- "success": {
- "type": "object",
- "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call",
- "additionalProperties": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/WorkerInfoResp"
- }
- },
- "propertyNames": {
- "type": "string"
- }
- }
- }
- },
- "MultiResponse_LocalPurgeBlocksResponse": {
- "type": "object",
- "required": [
- "success",
- "error"
- ],
- "properties": {
- "error": {
- "type": "object",
- "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall",
- "additionalProperties": {
- "type": "string"
- },
- "propertyNames": {
- "type": "string"
- }
- },
- "success": {
- "type": "object",
- "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call",
- "additionalProperties": {
- "type": "object",
- "required": [
- "blocksPurged",
- "objectsDeleted",
- "uploadsDeleted",
- "versionsDeleted",
- "blockRefsPurged"
- ],
- "properties": {
- "blockRefsPurged": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "blocksPurged": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "objectsDeleted": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "uploadsDeleted": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "versionsDeleted": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- }
- }
- },
- "propertyNames": {
- "type": "string"
- }
- }
- }
- },
- "MultiResponse_LocalRetryBlockResyncResponse": {
- "type": "object",
- "required": [
- "success",
- "error"
- ],
- "properties": {
- "error": {
- "type": "object",
- "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall",
- "additionalProperties": {
- "type": "string"
- },
- "propertyNames": {
- "type": "string"
- }
- },
- "success": {
- "type": "object",
- "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call",
- "additionalProperties": {
- "type": "object",
- "required": [
- "count"
- ],
- "properties": {
- "count": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- }
- }
- },
- "propertyNames": {
- "type": "string"
- }
- }
- }
- },
- "MultiResponse_LocalSetWorkerVariableResponse": {
- "type": "object",
- "required": [
- "success",
- "error"
- ],
- "properties": {
- "error": {
- "type": "object",
- "description": "Map of node id to error message, for nodes that were unable to complete the API\ncall",
- "additionalProperties": {
- "type": "string"
- },
- "propertyNames": {
- "type": "string"
- }
- },
- "success": {
- "type": "object",
- "description": "Map of node id to response returned by this node, for nodes that were able to\nsuccessfully complete the API call",
- "additionalProperties": {
- "type": "object",
- "required": [
- "variable",
- "value"
- ],
- "properties": {
- "value": {
- "type": "string"
- },
- "variable": {
- "type": "string"
- }
- }
- },
- "propertyNames": {
- "type": "string"
- }
- }
- }
- },
- "NodeAssignedRole": {
- "type": "object",
- "required": [
- "zone",
- "tags"
- ],
- "properties": {
- "capacity": {
- "type": [
- "integer",
- "null"
- ],
- "format": "int64",
- "description": "Capacity (in bytes) assigned by the cluster administrator,\nabsent for gateway nodes",
- "minimum": 0
- },
- "tags": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "description": "List of tags assigned by the cluster administrator"
- },
- "zone": {
- "type": "string",
- "description": "Zone name assigned by the cluster administrator"
- }
- }
- },
- "NodeResp": {
- "type": "object",
- "required": [
- "id",
- "isUp",
- "draining"
- ],
- "properties": {
- "addr": {
- "type": [
- "string",
- "null"
- ],
- "description": "Socket address used by other nodes to connect to this node for RPC"
- },
- "dataPartition": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "$ref": "#/components/schemas/FreeSpaceResp",
- "description": "Total and available space on the disk partition(s) containing the data\ndirectory(ies)"
- }
- ]
- },
- "draining": {
- "type": "boolean",
- "description": "Whether this node is part of an older layout version and is draining data."
- },
- "garageVersion": {
- "type": [
- "string",
- "null"
- ],
- "description": "Garage version"
- },
- "hostname": {
- "type": [
- "string",
- "null"
- ],
- "description": "Hostname of the node"
- },
- "id": {
- "type": "string",
- "description": "Full-length node identifier"
- },
- "isUp": {
- "type": "boolean",
- "description": "Whether this node is connected in the cluster"
- },
- "lastSeenSecsAgo": {
- "type": [
- "integer",
- "null"
- ],
- "format": "int64",
- "description": "For disconnected nodes, the number of seconds since last contact,\nor `null` if no contact was established since Garage restarted.",
- "minimum": 0
- },
- "metadataPartition": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "$ref": "#/components/schemas/FreeSpaceResp",
- "description": "Total and available space on the disk partition containing the\nmetadata directory"
- }
- ]
- },
- "role": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "$ref": "#/components/schemas/NodeAssignedRole",
- "description": "Role assigned to this node in the current cluster layout"
- }
- ]
- }
- }
- },
- "NodeRoleChange": {
- "allOf": [
- {
- "$ref": "#/components/schemas/NodeRoleChangeEnum"
- },
- {
- "type": "object",
- "required": [
- "id"
- ],
- "properties": {
- "id": {
- "type": "string",
- "description": "ID of the node for which this change applies"
- }
- }
- }
- ]
- },
- "NodeRoleChangeEnum": {
- "oneOf": [
- {
- "type": "object",
- "required": [
- "remove"
- ],
- "properties": {
- "remove": {
- "type": "boolean",
- "description": "Set `remove` to `true` to remove the node from the layout"
- }
- }
- },
- {
- "$ref": "#/components/schemas/NodeAssignedRole"
- }
- ]
- },
- "NodeRoleChangeRequest": {
- "oneOf": [
- {
- "type": "object",
- "required": [
- "id",
- "remove"
- ],
- "properties": {
- "id": {
- "type": "string",
- "description": "ID of the node for which this change applies"
- },
- "remove": {
- "type": "boolean",
- "description": "Set `remove` to `true` to remove the node from the layout"
- }
- }
- },
- {
- "$ref": "#/components/schemas/NodeRoleUpdate"
- }
- ]
- },
- "NodeRoleUpdate": {
- "allOf": [
- {
- "$ref": "#/components/schemas/NodeAssignedRole"
- },
- {
- "type": "object",
- "required": [
- "id"
- ],
- "properties": {
- "id": {
- "type": "string",
- "description": "ID of the node for which this change applies"
- }
- }
- }
- ]
- },
- "NodeUpdateTrackers": {
- "type": "object",
- "required": [
- "ack",
- "sync",
- "syncAck"
- ],
- "properties": {
- "ack": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "sync": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "syncAck": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- }
- }
- },
- "PreviewClusterLayoutChangesResponse": {
- "oneOf": [
- {
- "type": "object",
- "required": [
- "error"
- ],
- "properties": {
- "error": {
- "type": "string",
- "description": "Error message indicating that the layout could not be computed\nwith the provided configuration"
- }
- }
- },
- {
- "type": "object",
- "required": [
- "message",
- "newLayout"
- ],
- "properties": {
- "message": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "description": "Plain-text information about the layout computation\n(do not try to parse this)"
- },
- "newLayout": {
- "$ref": "#/components/schemas/GetClusterLayoutResponse",
- "description": "Details about the new cluster layout"
- }
- }
- }
- ]
- },
- "RemoveBucketAliasResponse": {
- "$ref": "#/components/schemas/GetBucketInfoResponse"
- },
- "RepairType": {
- "oneOf": [
- {
- "type": "string",
- "enum": [
- "tables"
- ]
- },
- {
- "type": "string",
- "enum": [
- "blocks"
- ]
- },
- {
- "type": "string",
- "enum": [
- "versions"
- ]
- },
- {
- "type": "string",
- "enum": [
- "multipartUploads"
- ]
- },
- {
- "type": "string",
- "enum": [
- "blockRefs"
- ]
- },
- {
- "type": "string",
- "enum": [
- "blockRc"
- ]
- },
- {
- "type": "string",
- "enum": [
- "rebalance"
- ]
- },
- {
- "type": "object",
- "required": [
- "scrub"
- ],
- "properties": {
- "scrub": {
- "$ref": "#/components/schemas/ScrubCommand"
- }
- }
- },
- {
- "type": "string",
- "enum": [
- "aliases"
- ]
- },
- {
- "type": "string",
- "enum": [
- "clearResyncQueue"
- ]
- }
- ]
- },
- "RevertClusterLayoutResponse": {
- "$ref": "#/components/schemas/GetClusterLayoutResponse"
- },
- "ScrubCommand": {
- "type": "string",
- "enum": [
- "start",
- "pause",
- "resume",
- "cancel"
- ]
- },
- "UpdateAdminTokenRequestBody": {
- "type": "object",
- "properties": {
- "expiration": {
- "type": [
- "string",
- "null"
- ],
- "format": "date-time",
- "description": "Expiration time and date, formatted according to RFC 3339"
- },
- "name": {
- "type": [
- "string",
- "null"
- ],
- "description": "Name of the admin API token"
- },
- "neverExpires": {
- "type": "boolean",
- "description": "Set the admin token to never expire"
- },
- "scope": {
- "type": [
- "array",
- "null"
- ],
- "items": {
- "type": "string"
- },
- "description": "Scope of the admin API token, a list of admin endpoint names (such as\n`GetClusterStatus`, etc), or the special value `*` to allow all\nadmin endpoints. **WARNING:** Granting a scope of `CreateAdminToken` or\n`UpdateAdminToken` trivially allows for privilege escalation, and is thus\nfunctionnally equivalent to granting a scope of `*`."
- }
- }
- },
- "UpdateAdminTokenResponse": {
- "$ref": "#/components/schemas/GetAdminTokenInfoResponse"
- },
- "UpdateBucketRequestBody": {
- "type": "object",
- "properties": {
- "quotas": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "$ref": "#/components/schemas/ApiBucketQuotas"
- }
- ]
- },
- "websiteAccess": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "$ref": "#/components/schemas/UpdateBucketWebsiteAccess"
- }
- ]
- }
- }
- },
- "UpdateBucketResponse": {
- "$ref": "#/components/schemas/GetBucketInfoResponse"
- },
- "UpdateBucketWebsiteAccess": {
- "type": "object",
- "required": [
- "enabled"
- ],
- "properties": {
- "enabled": {
- "type": "boolean"
- },
- "errorDocument": {
- "type": [
- "string",
- "null"
- ]
- },
- "indexDocument": {
- "type": [
- "string",
- "null"
- ]
- }
- }
- },
- "UpdateClusterLayoutRequest": {
- "type": "object",
- "properties": {
- "parameters": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "$ref": "#/components/schemas/LayoutParameters",
- "description": "New layout computation parameters to use"
- }
- ]
- },
- "roles": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/NodeRoleChangeRequest"
- },
- "description": "New node roles to assign or remove in the cluster layout"
- }
- }
- },
- "UpdateClusterLayoutResponse": {
- "$ref": "#/components/schemas/GetClusterLayoutResponse"
- },
- "UpdateKeyRequestBody": {
- "type": "object",
- "properties": {
- "allow": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "$ref": "#/components/schemas/KeyPerm",
- "description": "Permissions to allow for the key"
- }
- ]
- },
- "deny": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "$ref": "#/components/schemas/KeyPerm",
- "description": "Permissions to deny for the key"
- }
- ]
- },
- "expiration": {
- "type": [
- "string",
- "null"
- ],
- "format": "date-time",
- "description": "Expiration time and date, formatted according to RFC 3339"
- },
- "name": {
- "type": [
- "string",
- "null"
- ],
- "description": "Name of the API key"
- },
- "neverExpires": {
- "type": "boolean",
- "description": "Set the access key to never expire"
- }
- }
- },
- "UpdateKeyResponse": {
- "$ref": "#/components/schemas/GetKeyInfoResponse"
- },
- "WorkerInfoResp": {
- "type": "object",
- "required": [
- "id",
- "name",
- "state",
- "errors",
- "consecutiveErrors",
- "freeform"
- ],
- "properties": {
- "consecutiveErrors": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "errors": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "freeform": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "id": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- },
- "lastError": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "$ref": "#/components/schemas/WorkerLastError"
- }
- ]
- },
- "name": {
- "type": "string"
- },
- "persistentErrors": {
- "type": [
- "integer",
- "null"
- ],
- "format": "int64",
- "minimum": 0
- },
- "progress": {
- "type": [
- "string",
- "null"
- ]
- },
- "queueLength": {
- "type": [
- "integer",
- "null"
- ],
- "format": "int64",
- "minimum": 0
- },
- "state": {
- "$ref": "#/components/schemas/WorkerStateResp"
- },
- "tranquility": {
- "type": [
- "integer",
- "null"
- ],
- "format": "int32",
- "minimum": 0
- }
- }
- },
- "WorkerLastError": {
- "type": "object",
- "required": [
- "message",
- "secsAgo"
- ],
- "properties": {
- "message": {
- "type": "string"
- },
- "secsAgo": {
- "type": "integer",
- "format": "int64",
- "minimum": 0
- }
- }
- },
- "WorkerStateResp": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/Yolo"
- },
- {
- "$ref": "#/components/schemas/WorkerStateRespStrs"
- }
- ]
- },
- "Yolo": {
- "type": "object",
- "required": [
- "throttled"
- ],
- "properties": {
- "throttled": {
- "type": "object",
- "required": [
- "durationSecs"
- ],
- "properties": {
- "durationSecs": {
- "type": "number",
- "format": "float"
- }
- }
- }
- }
- },
- "WorkerStateRespStrs": {
- "type": "string",
- "enum": [
- "busy",
- "idle",
- "done"
- ]
- },
- "ZoneRedundancy": {
- "oneOf": [
- {
- "type": "object",
- "description": "Partitions must be replicated in at least this number of\ndistinct zones.",
- "required": [
- "atLeast"
- ],
- "properties": {
- "atLeast": {
- "type": "integer",
- "description": "Partitions must be replicated in at least this number of\ndistinct zones.",
- "minimum": 0
- }
- }
- },
- {
- "type": "string",
- "description": "Partitions must be replicated in as many zones as possible:\nas many zones as there are replicas, if there are enough distinct\nzones, or at least one in each zone otherwise.",
- "enum": [
- "maximum"
- ]
- }
- ]
- }
- },
- "securitySchemes": {
- "bearerAuth": {
- "type": "http",
- "scheme": "bearer"
- }
- }
- },
- "security": [
- {
- "bearerAuth": []
- }
- ]
-}
diff --git a/doc/book/connect/apps/index.md b/doc/book/connect/apps/index.md
index 25f7fcf7..f52d434b 100644
--- a/doc/book/connect/apps/index.md
+++ b/doc/book/connect/apps/index.md
@@ -12,9 +12,8 @@ In this section, we cover the following web applications:
| [Mastodon](#mastodon) | ✅ | Natively supported |
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
| [ejabberd](#ejabberd) | ✅ | `mod_s3_upload` |
-| [Ente](#ente) | ✅ | Natively supported |
-| [Pixelfed](#pixelfed) | ❓ | Natively supported |
-| [Pleroma](#pleroma) | ✅ | Natively supported |
+| [Pixelfed](#pixelfed) | ✅ | Natively supported |
+| [Pleroma](#pleroma) | ❓ | Not yet tested |
| [Lemmy](#lemmy) | ✅ | Supported with pict-rs |
| [Funkwhale](#funkwhale) | ❓ | Not yet tested |
| [Misskey](#misskey) | ❓ | Not yet tested |
@@ -568,186 +567,13 @@ The module can then be configured with:
Other configuration options can be found in the
[configuration YAML file](https://github.com/processone/ejabberd-contrib/blob/master/mod_s3_upload/conf/mod_s3_upload.yml).
-
-## Ente
-
-Ente is an alternative for Google Photos and Apple Photos. It [can be selfhosted](https://help.ente.io/self-hosting/) and is working fine with Garage as of May 2024.
-As a first step we need to create a bucket and a key for Ente:
-
-```bash
-garage bucket create ente
-garage key create ente-key
-# For the CORS setup to work, the key needs to be --owner as well, at least temporarily.
-garage bucket allow ente --read --write --owner --key ente-key
-```
-
-We also need to setup some CORS rules to allow the Ente frontend to access the bucket:
-
-```bash
-export CORS='{"CORSRules":[{"AllowedHeaders":["*"],"AllowedMethods":["GET", "PUT", "POST", "DELETE"],"AllowedOrigins":["*"], "ExposeHeaders":["ETag"]}]}'
-aws s3api put-bucket-cors --bucket ente --cors-configuration $CORS
-```
-
-Now we need to configure ente-server to use our bucket. This is explained [in the Ente S3 documentation](https://help.ente.io/self-hosting/guides/external-s3).
-Prepare a configuration file for ente's backend as `museum.yaml`:
-
-```yaml
-credentials-file: /credentials.yaml
-apps:
- public-albums: https://albums.example.tld # If you want to use the share album feature
- internal:
-hardcoded-ott:
- local-domain-suffix: "@example.com" # Your domain
- local-domain-value: 123456 # Custom One-Time Password since we are not sending mail by default
-key:
- # WARNING -- You MUST CHANGE the values below
- # Someone has made an image that can do it for you : https://github.com/EdyTheCow/ente-selfhost/blob/main/images/ente-server-tools/Dockerfile
- # Simply build it yourself or run docker run --rm ghcr.io/edythecow/ente-server-tools go run tools/gen-random-keys/main.go
- encryption: yvmG/RnzKrbCb9L3mgsmoxXr9H7i2Z4qlbT0mL3ln4w= # CHANGE THIS VALUE
- hash: KXYiG07wC7GIgvCSdg+WmyWdXDAn6XKYJtp/wkEU7x573+byBRAYtpTP0wwvi8i/4l37uicX1dVTUzwH3sLZyw== # CHANGE THIS VALUE
-jwt:
- secret: i2DecQmfGreG6q1vBj5tCokhlN41gcfS2cjOs9Po-u8= # CHANGE THIS VALUE
-```
-
-The full configuration file can be found [here](https://github.com/ente-io/ente/blob/main/server/configurations/local.yaml)
-Then prepare a credentials file as `credentials.yaml`
-
-```yaml
-db:
- host: postgres
- port: 5432
- name:
- user:
- password:
-
-s3:
- # Override the primary and secondary hot storage. The commented out values
- # are the defaults.
- #
- hot_storage:
- primary: b2-eu-cen
- # secondary: wasabi-eu-central-2-v3
-
- # If true, enable some workarounds to allow us to use a local minio instance
- # for object storage.
- #
- # 1. Disable SSL.
- # 2. Use "path" style S3 URLs (see `use_path_style_urls` below).
- # 3. Directly download the file during replication instead of going via the
- # Cloudflare worker.
- # 4. Do not specify storage classes when uploading objects (since minio does
- # not support them, specifically it doesn't support GLACIER).
- are_local_buckets: true
-
- # To use "path" style S3 URLs instead of DNS-based bucket access
- # default to true if you set "are_local_buckets: true"
- # use_path_style_urls: true
-
- b2-eu-cen: # Don't change this key, it is hardcoded
- key:
- secret:
- endpoint: garage:3900 # publically accessible endpoint of your garage instance
- region: garage
- bucket:
- use_path_style: true
- # you can specify secondary locations, names are hardcoded as well
- # wasabi-eu-central-2-v3:
- # scw-eu-fr-v3:
-
- # and you can also specify a bucket to be used for embeddings, preview etc..
- # default to the first bucket
- # derived-storage: wasabi-eu-central-2-derived
-```
-
-Finally you can run it with Docker :
-
-```bash
-docker run -d --name ente-server --restart unless-stopped -v /path/to/museum.yaml:/museum.yaml -v /path/to/credentials.yaml:/credentials.yaml -p 8080:8080 ghcr.io/ente-io/ente-server
-```
-
-For more information on deployment you can check the [ente documentation](https://help.ente.io/self-hosting/)
-
## Pixelfed
[Pixelfed Technical Documentation > Configuration](https://docs.pixelfed.org/technical-documentation/env.html#filesystem)
## Pleroma
-### Creating your bucket
-
-This is the usual Garage setup:
-
-```bash
-garage key new --name pleroma-key
-garage bucket create pleroma
-garage bucket allow pleroma --read --write --owner --key pleroma-key
-```
-
-We also need to expose these buckets publicly to serve their content to users:
-
-```bash
-garage bucket website --allow pleroma
-```
-
-Note the Key ID and Secret Key.
-
-### Configure Pleroma
-
-Update your Pleroma configuration like that in `/etc/pleroma/config.exs`.
-
-```
-config :pleroma, Pleroma.Upload,
- uploader: Pleroma.Uploaders.S3,
- base_url: "https://pleroma.garage.example.tld"
-
-config :ex_aws, :s3,
- access_key_id: "GW...",
- secret_access_key: "XXX",
- region: "garage",
- host: "api.garage.example.tld"
-```
-
-And restart Pleroma.
-
-You can found more information in [Pleroma Documentation > Pleroma.Uploaders.S3](https://docs-develop.pleroma.social/backend/configuration/cheatsheet/#pleromauploaderss3)
-
-### Migrating your data
-
-Pleroma have an internal migration tool that can encounter some fatal error
-
-```
-** (EXIT from #PID<0.98.0>) an exception was raised:
- ** (File.Error) could not stream "/var/lib/pleroma/uploads/09/f8": illegal operation on a directory
- (elixir 1.17.3) lib/file/stream.ex:100: anonymous fn/3 in Enumerable.File.Stream.reduce/3
- (elixir 1.17.3) lib/stream.ex:1675: anonymous fn/5 in Stream.resource/3
- (elixir 1.17.3) lib/stream.ex:1891: Enumerable.Stream.do_each/4
- (elixir 1.17.3) lib/task/supervised.ex:370: Task.Supervised.stream_reduce/7
- (elixir 1.17.3) lib/enum.ex:4423: Enum.map/2
- (ex_aws_s3 2.5.8) lib/ex_aws/s3/upload.ex:141: ExAws.Operation.ExAws.S3.Upload.perform/2
- (pleroma 2.10.0) lib/pleroma/uploaders/s3.ex:60: Pleroma.Uploaders.S3.put_file/1
- (pleroma 2.10.0) lib/pleroma/uploaders/uploader.ex:49: Pleroma.Uploaders.Uploader.put_file/2
-```
-
-So, use [your best tool](https://garagehq.deuxfleurs.fr/documentation/connect/cli/) to sync `/var/lib/pleroma/uploads/` in your S3.
-
-Then, to avoid some non existant problem (just in case of), run this command
-
-```bash
-while true
-do
- rm -vr $(./bin/pleroma_ctl uploads migrate_local S3 2>&1 | grep "could not stream" | awk -F '"' '{print $2}')
- sleep 5
-done
-```
-
-If you have many files, stop this command sometime and the command bellow (interactive) to delete local
-file after upload. Then restart the loop.
-
-```bash
-./bin/pleroma_ctl uploads migrate_local S3 --delete
-```
-
-And *voilà*
+[Pleroma Documentation > Pleroma.Uploaders.S3](https://docs-develop.pleroma.social/backend/configuration/cheatsheet/#pleromauploaderss3)
## Lemmy
diff --git a/doc/book/connect/backup.md b/doc/book/connect/backup.md
index dba6900d..7e97d777 100644
--- a/doc/book/connect/backup.md
+++ b/doc/book/connect/backup.md
@@ -207,13 +207,3 @@ $ plakar at @garageS3 ls
```
More information in Plakar documentation: https://www.plakar.io/docs/main/quickstart/
-
-## Synology HyperBackup
-
-HyperBackup can be configured to upload backups to garage using a custom S3 destination. However, the HyperBackup client hardcodes the `us-east-1` region that is a critical input to the v4 signature process. If garage is not set to `us-east-1`, HyperBackup will recognize available buckets, but fail during the final setup stage.
-
-In garage.toml:
-```toml
-[s3_api]
-s3_region = "us-east-1"
-```
diff --git a/doc/book/connect/cli.md b/doc/book/connect/cli.md
index 8329f0be..6529e4b2 100644
--- a/doc/book/connect/cli.md
+++ b/doc/book/connect/cli.md
@@ -149,15 +149,6 @@ rclone help
This will tremendously accelerate operations such as `rclone sync` or `rclone ncdu` by reducing the number
of ListObjects calls that are made.
-**Garage behind Cloudflare proxy:** when running Garage behind Cloudflare proxy, you might see `Response: error 403 Forbidden, Forbidden: Invalid signature` error in your garage logs or `AccessDenied: Forbidden: Invalid signature` error in rclone logs. Try adding `--s3-sign-accept-encoding=false` flag to your rclone command and see if the issue is resolved.
-
-```bash
-# this throws an error
-rclone lsd garage:
-
-# this should work
-rclone lsd --s3-sign-accept-encoding=false garage:
-```
## `s3cmd`
@@ -323,3 +314,4 @@ ls
```
And through the web interface at http://[::1]:8080/web/client
+
diff --git a/doc/book/cookbook/ansible.md b/doc/book/cookbook/ansible.md
index 58457881..8b0d2969 100644
--- a/doc/book/cookbook/ansible.md
+++ b/doc/book/cookbook/ansible.md
@@ -8,7 +8,7 @@ have published Ansible roles. We list them and compare them below.
## Comparison of Ansible roles
-| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | [eddster2309 ansible-role-garage](#eddster2309-ansible-role-garage) |
+| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | [eddster ansible-role-garage](#eddster-ansible-role-garage) |
|------------------------------------|---------------------------------------------|---------------------------------------------------------------|---------------------------------|
| **Runtime** | Systemd | Docker | Systemd |
| **Target OS** | Any Linux | Any Linux | Any Linux |
diff --git a/doc/book/cookbook/binary-packages.md b/doc/book/cookbook/binary-packages.md
index 1e399764..ce6beb7b 100644
--- a/doc/book/cookbook/binary-packages.md
+++ b/doc/book/cookbook/binary-packages.md
@@ -29,10 +29,6 @@ it's stable).
Garage is available in the official repositories under [extra](https://archlinux.org/packages/extra/x86_64/garage).
-```bash
-pacman -S garage
-```
-
## FreeBSD
```bash
@@ -44,9 +40,3 @@ pkg install garage
```bash
nix-shell -p garage
```
-
-## conda-forge
-
-```bash
-pixi global install garage
-```
diff --git a/doc/book/cookbook/from-source.md b/doc/book/cookbook/from-source.md
index d8cc01f7..7105c999 100644
--- a/doc/book/cookbook/from-source.md
+++ b/doc/book/cookbook/from-source.md
@@ -20,10 +20,10 @@ sudo apt-get update
sudo apt-get install build-essential
```
-## Building from source from the Forgejo repository
+## Building from source from the Gitea repository
The primary location for Garage's source code is the
-[Forgejo repository](https://git.deuxfleurs.fr/Deuxfleurs/garage),
+[Gitea repository](https://git.deuxfleurs.fr/Deuxfleurs/garage),
which contains all of the released versions as well as the code
for the developpement of the next version.
@@ -85,14 +85,11 @@ The following feature flags are available in v0.8.0:
| Feature flag | Enabled | Description |
| ------------ | ------- | ----------- |
| `bundled-libs` | *by default* | Use bundled version of sqlite3, zstd, lmdb and libsodium |
-| `consul-discovery` | optional | Enable automatic registration and discovery
of cluster nodes through the Consul API |
-| `fjall` | experimental | Enable using Fjall to store Garage's metadata |
-| `journald` | optional | Enable logging to systemd-journald with
`GARAGE_LOG_TO_JOURNALD=true` environment variable set |
+| `system-libs` | optional | Use system version of sqlite3, zstd, lmdb and libsodium
if available (exclusive with `bundled-libs`, build using
`cargo build --no-default-features --features system-libs`) |
| `k2v` | optional | Enable the experimental K2V API (if used, all nodes on your
Garage cluster must have it enabled as well) |
| `kubernetes-discovery` | optional | Enable automatic registration and discovery
of cluster nodes through the Kubernetes API |
-| `lmdb` | *by default* | Enable using LMDB to store Garage's metadata |
| `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API |
-| `sqlite` | *by default* | Enable using Sqlite3 to store Garage's metadata |
-| `syslog` | optional | Enable logging to Syslog with
`GARAGE_LOG_TO_SYSLOG=true` environment variable set |
-| `system-libs` | optional | Use system version of sqlite3, zstd, lmdb and libsodium
if available (exclusive with `bundled-libs`, build using
`cargo build --no-default-features --features system-libs`) |
| `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry |
+| `syslog` | optional | Enable logging to Syslog |
+| `lmdb` | *by default* | Enable using LMDB to store Garage's metadata |
+| `sqlite` | *by default* | Enable using Sqlite3 to store Garage's metadata |
diff --git a/doc/book/cookbook/kubernetes.md b/doc/book/cookbook/kubernetes.md
index 9673340b..f5bceec8 100644
--- a/doc/book/cookbook/kubernetes.md
+++ b/doc/book/cookbook/kubernetes.md
@@ -52,7 +52,7 @@ This is an example `values.overrride.yaml` for deploying in a microk8s cluster w
```yaml
garage:
# Use only 2 replicas per object
- replicationFactor: 2
+ replicationMode: "2"
# Start 4 instances (StatefulSets) of garage
deployment:
diff --git a/doc/book/cookbook/real-world.md b/doc/book/cookbook/real-world.md
index 681346cb..b9927c06 100644
--- a/doc/book/cookbook/real-world.md
+++ b/doc/book/cookbook/real-world.md
@@ -96,14 +96,14 @@ to store 2 TB of data in total.
## Get a Docker image
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
-We encourage you to use a fixed tag (eg. `v2.2.0`) and not the `latest` tag.
-For this example, we will use the latest published version at the time of the writing which is `v2.2.0` but it's up to you
+We encourage you to use a fixed tag (eg. `v1.3.0`) and not the `latest` tag.
+For this example, we will use the latest published version at the time of the writing which is `v1.3.0` but it's up to you
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
For example:
```
-sudo docker pull dxflrs/garage:v2.2.0
+sudo docker pull dxflrs/garage:v1.3.0
```
## Deploying and configuring Garage
@@ -171,7 +171,7 @@ docker run \
-v /etc/garage.toml:/etc/garage.toml \
-v /var/lib/garage/meta:/var/lib/garage/meta \
-v /var/lib/garage/data:/var/lib/garage/data \
- dxflrs/garage:v2.2.0
+ dxflrs/garage:v1.3.0
```
With this command line, Garage should be started automatically at each boot.
@@ -185,7 +185,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
version: "3"
services:
garage:
- image: dxflrs/garage:v2.2.0
+ image: dxflrs/garage:v1.3.0
network_mode: "host"
restart: unless-stopped
volumes:
diff --git a/doc/book/cookbook/reverse-proxy.md b/doc/book/cookbook/reverse-proxy.md
index d0f0c2bd..bdc1c549 100644
--- a/doc/book/cookbook/reverse-proxy.md
+++ b/doc/book/cookbook/reverse-proxy.md
@@ -7,7 +7,7 @@ The main reason to add a reverse proxy in front of Garage is to provide TLS to y
In production you will likely need your certificates signed by a certificate authority.
The most automated way is to use a provider supporting the [ACME protocol](https://datatracker.ietf.org/doc/html/rfc8555)
-such as [Let's Encrypt](https://letsencrypt.org/) or [ZeroSSL](https://zerossl.com/).
+such as [Let's Encrypt](https://letsencrypt.org/), [ZeroSSL](https://zerossl.com/) or [Buypass Go SSL](https://www.buypass.com/ssl/products/acme).
If you are only testing Garage, you can generate a self-signed certificate to follow the documentation:
@@ -97,7 +97,7 @@ server {
location / {
proxy_pass http://s3_backend;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header Host $http_host;
+ proxy_set_header Host $host;
# Disable buffering to a temporary file.
proxy_max_temp_file_size 0;
}
diff --git a/doc/book/design/goals.md b/doc/book/design/goals.md
index 3fe80e8f..efa3cd33 100644
--- a/doc/book/design/goals.md
+++ b/doc/book/design/goals.md
@@ -59,13 +59,11 @@ Garage themselves for the following tasks:
- Hosting of their homepage, [privacyguides.org](https://www.privacyguides.org/), and various other static sites
-- As a PowerDNS authoritative zone backend through [Lightning Stream](https://doc.powerdns.com/lightningstream/latest/index.html) and [LMDB](https://doc.powerdns.com/authoritative/backends/lmdb.html)
-
-- As a Mastodon media storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/)
+- As a Mastodon object storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/)
- As a PeerTube storage backend for [neat.tube](https://neat.tube/)
- As a [Matrix media backend](https://github.com/matrix-org/synapse-s3-storage-provider)
Triplebit's Garage cluster is a multi-site cluster currently composed of
-15 storage nodes in 3 physical locations.
+10 nodes in 3 physical locations.
diff --git a/doc/book/operations/durability-repairs.md b/doc/book/operations/durability-repairs.md
index 8a307c84..fdf163e2 100644
--- a/doc/book/operations/durability-repairs.md
+++ b/doc/book/operations/durability-repairs.md
@@ -42,7 +42,7 @@ You may pause an ongoing scrub using `garage repair scrub pause`, but note that
the scrub will resume automatically 24 hours later as Garage will not let your
cluster run without a regular scrub. If the scrub procedure is too intensive
for your servers and is slowing down your workload, the recommended solution
-is to increase the "scrub tranquility" using `garage worker set scrub-tranquility`.
+is to increase the "scrub tranquility" using `garage repair scrub set-tranquility`.
A higher tranquility value will make Garage take longer pauses between two block
verifications. Of course, scrubbing the entire data store will also take longer.
diff --git a/doc/book/operations/recovering.md b/doc/book/operations/recovering.md
index fb20656d..05322b67 100644
--- a/doc/book/operations/recovering.md
+++ b/doc/book/operations/recovering.md
@@ -161,7 +161,4 @@ your recovery options are as follows:
- **Option 3: restoring a filesystem-level snapshot.** If you are using ZFS or
BTRFS to snapshot your metadata partition, refer to their specific
- documentation on rolling back or copying files from an old snapshot.
- Note that, depending on the properties of the filesystem and of the DB engine,
- if these snapshots were taken during a write operation to the database, they may
- also be corrupted and thus unfit for recovery.
+ documentation on rolling back or copying files from an old snapshot.
diff --git a/doc/book/quick-start/_index.md b/doc/book/quick-start/_index.md
index d0ebf778..633b785a 100644
--- a/doc/book/quick-start/_index.md
+++ b/doc/book/quick-start/_index.md
@@ -132,7 +132,7 @@ docker run \
-v /path/to/garage.toml:/etc/garage.toml \
-v /path/to/garage/meta:/var/lib/garage/meta \
-v /path/to/garage/data:/var/lib/garage/data \
- dxflrs/garage:v2.2.0
+ dxflrs/garage:v1.3.0
```
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
diff --git a/doc/book/reference-manual/admin-api.md b/doc/book/reference-manual/admin-api.md
index e96fcaff..fcf49e8c 100644
--- a/doc/book/reference-manual/admin-api.md
+++ b/doc/book/reference-manual/admin-api.md
@@ -6,167 +6,41 @@ weight = 40
The Garage administration API is accessible through a dedicated server whose
listen address is specified in the `[admin]` section of the configuration
file (see [configuration file
-reference](@/documentation/reference-manual/configuration.md)).
+reference](@/documentation/reference-manual/configuration.md))
-The current version of the admin API is v2. No breaking changes to the Garage
-administration API will be published outside of a major release.
+**WARNING.** At this point, there is no commitment to the stability of the APIs described in this document.
+We will bump the version numbers prefixed to each API endpoint each time the syntax
+or semantics change, meaning that code that relies on these endpoint will break
+when changes are introduced.
+
+Versions:
+ - Before Garage 0.7.2 - no admin API
+ - Garage 0.7.2 - admin APIv0
+ - Garage 0.9.0 - admin APIv1, deprecate admin APIv0
-History of previous versions:
- - Before Garage v0.7.2 - no admin API
- - Garage v0.7.2 - admin API v0
- - Garage v0.9.0 - admin API v1, deprecate admin API v0
- - Garage v2.0.0 - admin API v2, deprecate admin API v1
## Access control
-### Using an API token
+The admin API uses two different tokens for access control, that are specified in the config file's `[admin]` section:
-Administration API tokens tokens are used as simple HTTP bearer tokens. In
-other words, to authenticate access to an admin API endpoint, add the following
-HTTP header to your request:
+- `metrics_token`: the token for accessing the Metrics endpoint (if this token
+ is not set in the config file, the Metrics endpoint can be accessed without
+ access control);
+
+- `admin_token`: the token for accessing all of the other administration
+ endpoints (if this token is not set in the config file, access to these
+ endpoints is disabled entirely).
+
+These tokens are used as simple HTTP bearer tokens. In other words, to
+authenticate access to an admin API endpoint, add the following HTTP header
+to your request:
```
Authorization: Bearer
```
-### User-defined API tokens
-
-Cluster administrators may dynamically define administration tokens using the CLI commands under `garage admin-token`.
-Such tokens may be limited in scope, meaning that they may enable access to only a subset of API calls.
-They may also have an expiration date to limit their use in time.
-
-Here is an example to create an administration token that is valid for 30 days
-and gives access to only a subset of API calls, allowing it to create buckets
-and access keys and give keys permissions on buckets:
-
-```bash
-$ garage admin-token create --expires-in 30d \
- --scope ListBuckets,GetBucketInfo,ListKeys,GetKeyInfo,CreateBucket,CreateKey,AllowBucketKey,DenyBucketKey \
- my-token
-This is your secret bearer token, it will not be shown again by Garage:
-
- 8ed1830b10a276ff57061950.kOSIpxWK9zSGbTO9Xadpv3YndSFWma0_snXcYHaORXk
-
-==== ADMINISTRATION TOKEN INFORMATION ====
-Token ID: 8ed1830b10a276ff57061950
-Token name: my-token
-Created: 2025-06-15 15:12:44.160 +02:00
-Validity: valid
-Expiration: 2025-07-15 15:12:44.117 +02:00
-
-Scope: ListBuckets
- GetBucketInfo
- ListKeys
- GetKeyInfo
- CreateBucket
- CreateKey
- AllowBucketKey
- DenyBucketKey
-```
-
-When running this command, your token will be shown only once and **will never
-be shown again by Garage**, so make sure to save it directly. The token is
-hashed internally, and is identified by its prefix (32 hex digits followed by a
-dot) which is saved in clear.
-
-When running `garage admin-token list`, you might see something like this:
-
-```
-ID Created Name Expiration Scope
-- - metrics_token (from daemon configuration) never Metrics
-8ed1830b10a276ff57061950 2025-06-15 my-token 2025-07-15 15:12:44.117 +02:00 ListBuckets, ... (8)
-```
-
-### Master API tokens
-
-The admin API can also use two different master tokens for access control,
-specified in the config file's `[admin]` section:
-
-- `metrics_token`: the token for accessing the Metrics endpoint. If this token
- is not set in the config file, the Metrics endpoint can be accessed without
- access control.
-
-- `admin_token`: the token for accessing all of the other administration
- endpoints. If this token is not set in the config file, access to these
- endpoints is only possible with a user-defined admin token.
-
-With the introduction of multiple user-defined admin tokens, the use of master
-API tokens is now discouraged.
-
-
-## Using the admin API
-
-All of the admin API endpoints are described in the OpenAPI specification:
-
- - APIv2 - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v2.html) - [OpenAPI JSON](https://garagehq.deuxfleurs.fr/api/garage-admin-v2.json)
- - APIv1 (deprecated) - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html) - [OpenAPI YAML](https://garagehq.deuxfleurs.fr/api/garage-admin-v1.yml)
- - APIv0 (deprecated) - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.html) - [OpenAPI YAML](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.yml)
-
-Making a request to the API from the command line can be as simple as running:
-
-```bash
-curl -H 'Authorization: Bearer s3cr3t' http://localhost:3903/v2/GetClusterStatus | jq
-```
-
-For more advanced use cases, we recommend using an SDK.
-[Go to the "Build your own app" section to know how to use our SDKs](@/documentation/build/_index.md)
-
-### Making API calls from the `garage` CLI
-
-Since v2.0.0, the `garage` binary provides a subcommand `garage json-api` that
-allows you to invoke the API without making an HTTP request. This can be
-useful for scripting Garage deployments.
-
-`garage json-api` proxies API calls through Garage's internal RPC protocol,
-therefore it does not require any form of authentication: RPC connection
-parameters are discovered automatically to contact the locally-running Garage
-instance (as when running any other `garage` CLI command).
-
-For simple calls that take no parameters, usage is as follows:
-
-```
-$ garage json-api GetClusterHealth
-{
- "connectedNodes": 3,
- "knownNodes": 3,
- "partitions": 256,
- "partitionsAllOk": 256,
- "partitionsQuorum": 256,
- "status": "healthy",
- "storageNodes": 3,
- "storageNodesOk": 3
-}
-```
-
-If you need to specify a JSON body for your call, you can add it directly after
-the name of the function you are calling:
-
-```
-$ garage json-api CreateAdminToken '{"name": "test"}'
-```
-
-Or you can feed it through stdin by adding a `-` as the last command parameter:
-
-```
-$ garage json-api CreateAdminToken -
-{"name": "test"}
-
-```
-
-For admin API calls that would have taken query parameters in their HTTP version, these parameters can be passed in the JSON body object:
-
-```
-$ garage json-api GetAdminTokenInfo '{"id":"b0e6e0ace2c0b2aca4cdb2de"}'
-```
-
-For admin API calls that take both query parameters and a JSON body, combine them in the following fashion:
-
-```
-$ garage json-api UpdateAdminToken '{"id":"b0e6e0ace2c0b2aca4cdb2de", "body":{"name":"not a test"}}'
-```
-
-## Special administration API endpoints
+## Administration API endpoints
### Metrics `GET /metrics`
@@ -209,7 +83,7 @@ content-length: 102
date: Tue, 08 Aug 2023 07:22:38 GMT
Garage is fully operational
-Consult the full health check API endpoint at /v2/GetClusterHealth for more details
+Consult the full health check API endpoint at /v0/health for more details
```
### On-demand TLS `GET /check`
@@ -252,7 +126,23 @@ $ curl -so /dev/null -w "%{http_code}" http://localhost:3903/check?domain=exampl
200
```
+
**References:**
- [Using On-Demand TLS](https://caddyserver.com/docs/automatic-https#using-on-demand-tls)
- [Add option for a backend check to approve use of on-demand TLS](https://github.com/caddyserver/caddy/pull/1939)
- [Serving tens of thousands of domains over HTTPS with Caddy](https://caddy.community/t/serving-tens-of-thousands-of-domains-over-https-with-caddy/11179)
+
+### Cluster operations
+
+These endpoints have a dedicated OpenAPI spec.
+ - APIv1 - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html) - [OpenAPI YAML](https://garagehq.deuxfleurs.fr/api/garage-admin-v1.yml)
+ - APIv0 (deprecated) - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.html) - [OpenAPI YAML](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.yml)
+
+Requesting the API from the command line can be as simple as running:
+
+```bash
+curl -H 'Authorization: Bearer s3cr3t' http://localhost:3903/v0/status | jq
+```
+
+For more advanced use cases, we recommend using a SDK.
+[Go to the "Build your own app" section to know how to use our SDKs](@/documentation/build/_index.md)
diff --git a/doc/book/reference-manual/configuration.md b/doc/book/reference-manual/configuration.md
index 642a16a1..1f583fe6 100644
--- a/doc/book/reference-manual/configuration.md
+++ b/doc/book/reference-manual/configuration.md
@@ -51,20 +51,17 @@ allow_punycode = false
[consul_discovery]
api = "catalog"
-consul_http_addr = "https://127.0.0.1:8500"
-tls_skip_verify = false
+consul_http_addr = "http://127.0.0.1:8500"
service_name = "garage-daemon"
-
ca_cert = "/etc/consul/consul-ca.crt"
client_cert = "/etc/consul/consul-client.crt"
client_key = "/etc/consul/consul-key.crt"
-
# for `agent` API mode, unset client_cert and client_key, and optionally enable `token`
# token = "abcdef-01234-56789"
-
+tls_skip_verify = false
tags = [ "dns-enabled" ]
meta = { dns-acl = "allow trusted" }
-datacenters = ["dc1", "dc2", "dc3"]
+
[kubernetes_discovery]
namespace = "garage"
@@ -85,7 +82,6 @@ add_host_to_metrics = true
[admin]
api_bind_addr = "0.0.0.0:3903"
metrics_token = "BCAdFjoa9G0KJR0WXnHHm7fs1ZAbfpI8iIZ+Z/a2NgI="
-metrics_require_token = true
admin_token = "UkLeGWEvHnXBqnueR3ISEMWpOnm40jH2tM2HnnL/0F4="
trace_sink = "http://localhost:4317"
```
@@ -101,9 +97,9 @@ The following gives details about each available configuration option.
Top-level configuration options, in alphabetical order:
[`allow_punycode`](#allow_punycode),
[`allow_world_readable_secrets`](#allow_world_readable_secrets),
-[`block_max_concurrent_reads`](#block_max_concurrent_reads),
-[`block_max_concurrent_writes_per_request`](#block_max_concurrent_writes_per_request),
+[`block_max_concurrent_reads`](`block_max_concurrent_reads),
[`block_ram_buffer_max`](#block_ram_buffer_max),
+[`block_max_concurrent_writes_per_request`](#block_max_concurrent_writes_per_request),
[`block_size`](#block_size),
[`bootstrap_peers`](#bootstrap_peers),
[`compression_level`](#compression_level),
@@ -131,14 +127,12 @@ The `[consul_discovery]` section:
[`client_cert`](#consul_client_cert_and_key),
[`client_key`](#consul_client_cert_and_key),
[`consul_http_addr`](#consul_http_addr),
-[`datacenters`](#consul_datacenters)
[`meta`](#consul_tags_and_meta),
[`service_name`](#consul_service_name),
[`tags`](#consul_tags_and_meta),
[`tls_skip_verify`](#consul_tls_skip_verify),
[`token`](#consul_token).
-
The `[kubernetes_discovery]` section:
[`namespace`](#kube_namespace),
[`service_name`](#kube_service_name),
@@ -156,7 +150,6 @@ The `[s3_web]` section:
The `[admin]` section:
[`api_bind_addr`](#admin_api_bind_addr),
-[`metrics_require_token`](#admin_metrics_require_token),
[`metrics_token`/`metrics_token_file`](#admin_metrics_token),
[`admin_token`/`admin_token_file`](#admin_token),
[`trace_sink`](#admin_trace_sink),
@@ -343,7 +336,7 @@ Since `v0.8.0`, Garage can use alternative storage backends as follows:
| --------- | ----------------- | ------------- |
| [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `/db.lmdb/` |
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `/db.sqlite` |
-| [Fjall](https://github.com/fjall-rs/fjall) (**experimental support** since `v1.3.0`/`v2.1.0`) | `"fjall"` | `/db.fjall/` |
+| [Fjall](https://github.com/fjall-rs/fjall) (**experimental support** since `v1.3.0`) | `"fjall"` | `/db.fjall/` |
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `/db/` |
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
@@ -352,16 +345,8 @@ old Sled metadata databases to another engine.
Performance characteristics of the different DB engines are as follows:
-- **LMDB:** the recommended database engine for high-performance distributed clusters
- with `replication_factor` ≥ 2.
- LMDB works well, but is known to have the following limitations:
-
- - LMDB is prone to database corruption after an unclean shutdown (e.g. a process kill
- or a power outage). It is recommended to configure
- [`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval) to be
- able to easily recover from this situation. With `replication_factor` ≥ 2,
- metadata can also be reconstructed from remote nodes upon corruption
- (see [Recovering from failures](@/documentation/operations/recovering.md#corrupted_meta)).
+- LMDB: the recommended database engine for high-performance distributed clusters.
+LMDB works very well, but is known to have the following limitations:
- The data format of LMDB is not portable between architectures, so for
instance the Garage database of an x86-64 node cannot be moved to an ARM64
@@ -371,21 +356,30 @@ Performance characteristics of the different DB engines are as follows:
node to very small database sizes due to how LMDB works; it is therefore
not recommended.
+ - Several users have reported corrupted LMDB database files after an unclean
+ shutdown (e.g. a power outage). This situation can generally be recovered
+ from if your cluster is geo-replicated (by rebuilding your metadata db from
+ other nodes), or if you have saved regular snapshots at the filesystem
+ level.
+
- Keys in LMDB are limited to 511 bytes. This limit translates to limits on
object keys in S3 and sort keys in K2V that are limted to 479 bytes.
-- **Sqlite:** Garage supports Sqlite as an alternative storage backend for
- metadata, which does not have the issues listed above for LMDB. Sqlite is
- slower than LMDB, so it is not the best choice for high-performance storage
- clusters.
+- Sqlite: Garage supports Sqlite as an alternative storage backend for
+ metadata, which does not have the issues listed above for LMDB.
+ On versions 0.8.x and earlier, Sqlite should be avoided due to abysmal
+ performance, which was fixed with the addition of `metadata_fsync`.
+ Sqlite is still probably slower than LMDB due to the way we use it,
+ so it is not the best choice for high-performance storage clusters,
+ but it should work fine in many cases.
-- **Fjall:** a storage engine based on LSM trees, which theoretically allow for
+- Fjall: a storage engine based on LSM trees, which theoretically allow for
higher write throughput than other storage engines that are based on B-trees.
Using Fjall could potentially improve Garage's performance significantly in
write-heavy workloads. **Support for Fjall is experimental at this point**,
- we have added it to Garage for evaluation purposes only. **Use it only with
- test data, and report any issues to our bug tracker. Do not use it for
- production workloads.**
+ we have added it to Garage for evaluation purposes only. **Do not use it for
+ production-critical workloads.**
+
It is possible to convert Garage's metadata directory from one format to another
using the `garage convert-db` command, which should be used as follows:
@@ -444,8 +438,7 @@ if geographical replication is used.
#### `metadata_auto_snapshot_interval` (since `v0.9.4`) {#metadata_auto_snapshot_interval}
If this value is set, Garage will automatically take a snapshot of the metadata
-DB file at a regular interval and save it in the metadata directory,
-or in [`metadata_snapshots_dir`](#metadata_snapshots_dir) if it is set.
+DB file at a regular interval and save it in the metadata directory.
This parameter can take any duration string that can be parsed by
the [`parse_duration`](https://docs.rs/parse_duration/latest/parse_duration/#syntax) crate.
@@ -454,19 +447,14 @@ corrupted, for instance after an unclean shutdown. See [this
page](@/documentation/operations/recovering.md#corrupted_meta) for details.
Garage keeps only the two most recent snapshots of the metadata DB and deletes
older ones automatically.
-You can also create metadata snapshots manually at any point using the
-`garage meta snapshot` command.
-
-Using snapshots created by Garage is the best option to make snapshots of your
-node's metadata for potential recovery, as they are guaranteed to be clean and
-consistent, contrarily to filesystem-level snapshots that may be taken while
-some writes are in-flight and thus might be corrupted.
Note that taking a metadata snapshot is a relatively intensive operation as the
entire data file is copied. A snapshot being taken might have performance
impacts on the Garage node while it is running. If the cluster is under heavy
write load when a snapshot operation is running, this might also cause the
database file to grow in size significantly as pages cannot be recycled easily.
+For this reason, it might be better to use filesystem-level snapshots instead
+if possible.
#### `disable_scrub` {#disable_scrub}
@@ -560,13 +548,13 @@ metric in Prometheus: a non-zero number of such events indicates an I/O
bottleneck on HDD read speed.
-#### `block_max_concurrent_writes_per_request` (since `v1.3.1` / `v2.2.0`) {#block_max_concurrent_writes_per_request}
+#### `block_max_concurrent_writes_per_request` (since `v2.1.0`) {#block_max_concurrent_writes_per_request}
This parameter is designed to adapt to the concurrent write performance of
-different storage media. Maximum number of parallel block writes per put request.
-Higher values may improve throughput but increase memory usage.
+different storage media.Maximum number of parallel block writes per put request
+Higher values improve throughput but increase memory usage.
-Default value: 3. Recommended values: 10-30 for NVMe, 3-10 for spinning HDD.
+Default: 3, Recommended: 10-30 for NVMe, 3-10 for HDD
#### `lmdb_map_size` {#lmdb_map_size}
@@ -740,18 +728,6 @@ node_prefix "" {
}
```
-
-#### `datacenters` {#consul_datacenters}
-
-Optional list of datacenters that allow garage to do service discovery when Consul is configured in WAN federation.
-
-Example: `datacenters = ["dc1", "dc2", "dc3"]`
-
-In a WAN configuration, by default the Consul services API only responds with
-local LAN services. When a list of datacenters is specified using this option,
-Garage will query the consul server API by datacenter directly, allowing for
-Garage to discover nodes across the Consul WAN.
-
#### `tags` and `meta` {#consul_tags_and_meta}
Additional list of tags and map of service meta to add during service registration.
@@ -848,34 +824,10 @@ See [administration API reference](@/documentation/reference-manual/admin-api.md
Alternatively, since `v0.8.5`, a path can be used to create a unix socket. Note that for security reasons,
the socket will have 0220 mode. Make sure to set user and group permissions accordingly.
-#### `admin_token`, `admin_token_file` or `GARAGE_ADMIN_TOKEN`, `GARAGE_ADMIN_TOKEN_FILE` (env) {#admin_token}
-
-The token for accessing all administration functions on the admin endpoint,
-with the exception of the metrics endpoint (see `metrics_token`).
-
-You can use any random string for this value. We recommend generating a random
-token with `openssl rand -base64 32`.
-
-For Garage version earlier than `v2.0`, if this token is not set,
-access to these endpoints is disabled entirely.
-
-Since Garage `v2.0`, additional admin API tokens can be defined dynamically
-in your Garage cluster using administration commands. This new admin token system
-is more flexible since it allows admin tokens to have an expiration date,
-and to have a scope restricted to certain admin API functions. If `admin_token`
-is set, it behaves as an admin token without expiration and with full scope.
-Otherwise, only admin API tokens defined dynamically can be used.
-
-`admin_token` was introduced in Garage `v0.7.2`.
-`admin_token_file` and the `GARAGE_ADMIN_TOKEN` environment variable are supported since Garage `v0.8.2`.
-
-`GARAGE_ADMIN_TOKEN_FILE` is supported since `v0.8.5` / `v0.9.1`.
-
#### `metrics_token`, `metrics_token_file` or `GARAGE_METRICS_TOKEN`, `GARAGE_METRICS_TOKEN_FILE` (env) {#admin_metrics_token}
-The token for accessing the Prometheus metrics endpoint (`/metrics`).
-If this token is not set, and unless `metrics_require_token` is set to `true`,
-the metrics endpoint can be accessed without access control.
+The token for accessing the Metrics endpoint. If this token is not set, the
+Metrics endpoint can be accessed without access control.
You can use any random string for this value. We recommend generating a random token with `openssl rand -base64 32`.
@@ -884,12 +836,17 @@ You can use any random string for this value. We recommend generating a random t
`GARAGE_METRICS_TOKEN_FILE` is supported since `v0.8.5` / `v0.9.1`.
-#### `metrics_require_token` (since `v2.0.0`) {#admin_metrics_require_token}
+#### `admin_token`, `admin_token_file` or `GARAGE_ADMIN_TOKEN`, `GARAGE_ADMIN_TOKEN_FILE` (env) {#admin_token}
-If this is set to `true`, accessing the metrics endpoint will always require
-an access token. Valid tokens include the `metrics_token` if it is set,
-and admin API token defined dynamicaly in Garage which have
-the `Metrics` endpoint in their scope.
+The token for accessing all of the other administration endpoints. If this
+token is not set, access to these endpoints is disabled entirely.
+
+You can use any random string for this value. We recommend generating a random token with `openssl rand -base64 32`.
+
+`admin_token` was introduced in Garage `v0.7.2`.
+`admin_token_file` and the `GARAGE_ADMIN_TOKEN` environment variable are supported since Garage `v0.8.2`.
+
+`GARAGE_ADMIN_TOKEN_FILE` is supported since `v0.8.5` / `v0.9.1`.
#### `trace_sink` {#admin_trace_sink}
diff --git a/doc/book/reference-manual/features.md b/doc/book/reference-manual/features.md
index a74b1467..481aef01 100644
--- a/doc/book/reference-manual/features.md
+++ b/doc/book/reference-manual/features.md
@@ -129,5 +129,5 @@ related to objects stored in an S3 bucket.
In the context of our research project, [Aérogramme](https://aerogramme.deuxfleurs.fr),
K2V is used to provide metadata and log storage for operations on encrypted e-mail storage.
-Learn more on the specification of K2V [here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/commit/f8be15c37db857e177d543de7be863692628d567/doc/drafts/k2v-spec.md)
+Learn more on the specification of K2V [here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/k2v/doc/drafts/k2v-spec.md)
and on how to enable it in Garage [here](@/documentation/reference-manual/k2v.md).
diff --git a/doc/book/reference-manual/k2v.md b/doc/book/reference-manual/k2v.md
index f8e003d9..c01f641e 100644
--- a/doc/book/reference-manual/k2v.md
+++ b/doc/book/reference-manual/k2v.md
@@ -16,7 +16,7 @@ the `k2v` feature flag enabled can be obtained from our download page under
with `-k2v` (example: `v0.7.2-k2v`).
The specification of the K2V API can be found
-[here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/commit/f8be15c37db857e177d543de7be863692628d567/doc/drafts/k2v-spec.md).
+[here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/main/doc/drafts/k2v-spec.md).
This document also includes a high-level overview of K2V's design.
The K2V API uses AWSv4 signatures for authentification, same as the S3 API.
diff --git a/doc/book/working-documents/migration-04.md b/doc/book/working-documents/migration-04.md
index 5aae2a42..52c56737 100644
--- a/doc/book/working-documents/migration-04.md
+++ b/doc/book/working-documents/migration-04.md
@@ -1,6 +1,6 @@
+++
title = "Migrating from 0.3 to 0.4"
-weight = 80
+weight = 20
+++
**Migrating from 0.3 to 0.4 is unsupported. This document is only intended to
diff --git a/doc/book/working-documents/migration-06.md b/doc/book/working-documents/migration-06.md
index 5fa29120..006b036b 100644
--- a/doc/book/working-documents/migration-06.md
+++ b/doc/book/working-documents/migration-06.md
@@ -1,6 +1,6 @@
+++
title = "Migrating from 0.5 to 0.6"
-weight = 75
+weight = 15
+++
**This guide explains how to migrate to 0.6 if you have an existing 0.5 cluster.
diff --git a/doc/book/working-documents/migration-07.md b/doc/book/working-documents/migration-07.md
index 392c75a9..03cdfedc 100644
--- a/doc/book/working-documents/migration-07.md
+++ b/doc/book/working-documents/migration-07.md
@@ -1,6 +1,6 @@
+++
title = "Migrating from 0.6 to 0.7"
-weight = 74
+weight = 14
+++
**This guide explains how to migrate to 0.7 if you have an existing 0.6 cluster.
We don't recommend trying to migrate to 0.7 directly from 0.5 or older.**
diff --git a/doc/book/working-documents/migration-08.md b/doc/book/working-documents/migration-08.md
index 17fe078b..b7c4c783 100644
--- a/doc/book/working-documents/migration-08.md
+++ b/doc/book/working-documents/migration-08.md
@@ -1,6 +1,6 @@
+++
title = "Migrating from 0.7 to 0.8"
-weight = 73
+weight = 13
+++
**This guide explains how to migrate to 0.8 if you have an existing 0.7 cluster.
diff --git a/doc/book/working-documents/migration-09.md b/doc/book/working-documents/migration-09.md
index cf5f309c..ba758093 100644
--- a/doc/book/working-documents/migration-09.md
+++ b/doc/book/working-documents/migration-09.md
@@ -1,6 +1,6 @@
+++
title = "Migrating from 0.8 to 0.9"
-weight = 72
+weight = 12
+++
**This guide explains how to migrate to 0.9 if you have an existing 0.8 cluster.
diff --git a/doc/book/working-documents/migration-1.md b/doc/book/working-documents/migration-1.md
index 9a04d101..b6c0bb85 100644
--- a/doc/book/working-documents/migration-1.md
+++ b/doc/book/working-documents/migration-1.md
@@ -1,6 +1,6 @@
+++
title = "Migrating from 0.9 to 1.0"
-weight = 71
+weight = 11
+++
**This guide explains how to migrate to 1.0 if you have an existing 0.9 cluster.
diff --git a/doc/book/working-documents/migration-2.md b/doc/book/working-documents/migration-2.md
deleted file mode 100644
index 01d984b3..00000000
--- a/doc/book/working-documents/migration-2.md
+++ /dev/null
@@ -1,70 +0,0 @@
-+++
-title = "Migrating from 1.0 to 2.0"
-weight = 70
-+++
-
-**This guide explains how to migrate to v2.x if you have an existing v1.x.x cluster.
-We don't recommend trying to migrate to v2.x directly from v0.9.x or older.**
-
-This migration procedure has been tested on several clusters without issues.
-However, it is still a *critical procedure* that might cause issues.
-**Make sure to back up all your data before attempting it!**
-
-You might also want to read our [general documentation on upgrading Garage](@/documentation/operations/upgrading.md).
-
-## Changes introduced in v2.0
-
-The following are **breaking changes** in Garage v2.0 that require your attention when migrating:
-
-- The administration API has been completely reworked.
- Some calls to the `/v1/` endpoints will still work but most will not.
- New endpoints are prefixed by `/v2/`. **You will need to update all your code that makes use of the admin API.**
-
-- `replication_mode` is no longer a supported configuration parameter,
- please use `replication_factor` and `consistency_mode` instead.
-
-## Migration procedure
-
-The migration to Garage v2.0 can be done with almost no downtime,
-by restarting all nodes at once in the new version.
-
-The migration steps are as follows:
-
-1. Do a `garage repair --all-nodes --yes tables`, check the logs and check that
- all data seems to be synced correctly between nodes. If you have time, do
- additional `garage repair` procedures (`blocks`, `versions`, `block_refs`,
- etc.)
-
-2. Ensure you have a snapshot of your Garage installation that you can restore
- to in case the upgrade goes wrong, with one of the following options:
-
- - You may use the `garage meta snapshot --all` command
- to make a backup snapshot of the metadata directories of your nodes
- for backup purposes. Once this command has completed, copy the following
- files and directories from the `metadata_dir` of all your nodes
- to somewhere safe: `snapshots`, `cluster_layout`, `data_layout`,
- `node_key`, `node_key.pub`. (If you have set the `metadata_snapshots_dir`
- to a different value in your config file, back up that directory instead.)
-
- - If you are running a filesystem such as ZFS or BTRFS that support
- snapshotting, you can create a filesystem-level snapshot of the `metadata_dir`
- of all your nodes to be used as a restoration point if needed.
-
- - You may also make a back-up manually: turn off each node
- individually; back up its metadata folder (for instance, use the following
- command if your metadata directory is `/var/lib/garage/meta`: `cd
- /var/lib/garage ; tar -acf meta-v1.0.tar.zst meta/`); turn it back on
- again. This will allow you to take a backup of all nodes without
- impacting global cluster availability. You can do all nodes of a single
- zone at once as this does not impact the availability of Garage.
-
-3. Prepare your updated binaries and configuration files for Garage v2.0.
- **Remember to update your configuration file to remove `replication_mode` and replace it by `replication_factor`.**
-
-4. Shut down all v1.0 nodes simultaneously, and restart them all simultaneously
- in v2.0. Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to
- achieve this as fast as possible. Garage v2.0 should be in a working state
- as soon as enough nodes have started.
-
-5. Monitor your cluster in the following hours to see if it works well under
- your production load.
diff --git a/doc/book/working-documents/testing-strategy.md b/doc/book/working-documents/testing-strategy.md
index 3c29e47b..fff706d7 100644
--- a/doc/book/working-documents/testing-strategy.md
+++ b/doc/book/working-documents/testing-strategy.md
@@ -1,6 +1,6 @@
+++
title = "Testing strategy"
-weight = 100
+weight = 30
+++
diff --git a/doc/drafts/admin-api.md b/doc/drafts/admin-api.md
index 18dfb627..3ee948cb 100644
--- a/doc/drafts/admin-api.md
+++ b/doc/drafts/admin-api.md
@@ -13,12 +13,8 @@ We will bump the version numbers prefixed to each API endpoint each time the syn
or semantics change, meaning that code that relies on these endpoints will break
when changes are introduced.
-The Garage administration API was introduced in version 0.7.2, and was
-changed several times.
-
-**THIS DOCUMENT IS DEPRECATED.** We now have an OpenAPI spec which is automatically generated
-from Garage's source code and is always up-to-date. See `doc/api/garage-admin-v2.html`.
-Text in this document is no longer kept in sync with the admin API's actual behavior.
+The Garage administration API was introduced in version 0.7.2, this document
+does not apply to older versions of Garage.
## Access control
@@ -56,28 +52,34 @@ Returns an HTTP status 200 if the node is ready to answer user's requests,
and an HTTP status 503 (Service Unavailable) if there are some partitions
for which a quorum of nodes is not available.
A simple textual message is also returned in a body with content-type `text/plain`.
-See `/v2/GetClusterHealth` for an API that also returns JSON output.
-
-### Other special endpoints
-
-#### CheckDomain `GET /check?domain=`
-
-Checks whether this Garage cluster serves a website for domain ``.
-Returns HTTP 200 Ok if yes, or HTTP 4xx if no website is available for this domain.
+See `/v1/health` for an API that also returns JSON output.
### Cluster operations
-#### GetClusterStatus `GET /v2/GetClusterStatus`
+#### GetClusterStatus `GET /v1/status`
Returns the cluster's current status in JSON, including:
+- ID of the node being queried and its version of the Garage daemon
- Live nodes
- Currently configured cluster layout
+- Staged changes to the cluster layout
Example response body:
```json
{
+ "node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
+ "garageVersion": "v1.3.0",
+ "garageFeatures": [
+ "k2v",
+ "lmdb",
+ "sqlite",
+ "metrics",
+ "bundled-libs"
+ ],
+ "rustVersion": "1.68.0",
+ "dbEngine": "LMDB (using Heed crate)",
"layoutVersion": 5,
"nodes": [
{
@@ -167,7 +169,7 @@ Example response body:
}
```
-#### GetClusterHealth `GET /v2/GetClusterHealth`
+#### GetClusterHealth `GET /v1/health`
Returns the cluster's current health in JSON format, with the following variables:
@@ -200,7 +202,7 @@ Example response body:
}
```
-#### ConnectClusterNodes `POST /v2/ConnectClusterNodes`
+#### ConnectClusterNodes `POST /v1/connect`
Instructs this Garage node to connect to other Garage nodes at specified addresses.
@@ -230,7 +232,7 @@ Example response:
]
```
-#### GetClusterLayout `GET /v2/GetClusterLayout`
+#### GetClusterLayout `GET /v1/layout`
Returns the cluster's current layout in JSON, including:
@@ -291,7 +293,7 @@ Example response body:
}
```
-#### UpdateClusterLayout `POST /v2/UpdateClusterLayout`
+#### UpdateClusterLayout `POST /v1/layout`
Send modifications to the cluster layout. These modifications will
be included in the staged role changes, visible in subsequent calls
@@ -328,7 +330,7 @@ This returns the new cluster layout with the proposed staged changes,
as returned by GetClusterLayout.
-#### ApplyClusterLayout `POST /v2/ApplyClusterLayout`
+#### ApplyClusterLayout `POST /v1/layout/apply`
Applies to the cluster the layout changes currently registered as
staged layout changes.
@@ -348,11 +350,23 @@ existing layout in the cluster.
This returns the message describing all the calculations done to compute the new
layout, as well as the description of the layout as returned by GetClusterLayout.
-#### RevertClusterLayout `POST /v2/RevertClusterLayout`
+#### RevertClusterLayout `POST /v1/layout/revert`
Clears all of the staged layout changes.
-This requests contains an empty body.
+Request body format:
+
+```json
+{
+ "version": 13
+}
+```
+
+Reverting the staged changes is done by incrementing the version number
+and clearing the contents of the staged change list.
+Similarly to the CLI, the body must include the incremented
+version number, which MUST be 1 + the value of the currently
+existing layout in the cluster.
This returns the new cluster layout with all changes reverted,
as returned by GetClusterLayout.
@@ -360,7 +374,7 @@ as returned by GetClusterLayout.
### Access key operations
-#### ListKeys `GET /v2/ListKeys`
+#### ListKeys `GET /v1/key`
Returns all API access keys in the cluster.
@@ -379,8 +393,8 @@ Example response:
]
```
-#### GetKeyInfo `GET /v2/GetKeyInfo?id=`
-#### GetKeyInfo `GET /v2/GetKeyInfo?search=`
+#### GetKeyInfo `GET /v1/key?id=`
+#### GetKeyInfo `GET /v1/key?search=`
Returns information about the requested API access key.
@@ -454,7 +468,7 @@ Example response:
}
```
-#### CreateKey `POST /v2/CreateKey`
+#### CreateKey `POST /v1/key`
Creates a new API access key.
@@ -469,7 +483,7 @@ Request body format:
This returns the key info, including the created secret key,
in the same format as the result of GetKeyInfo.
-#### ImportKey `POST /v2/ImportKey`
+#### ImportKey `POST /v1/key/import`
Imports an existing API key.
This will check that the imported key is in the valid format, i.e.
@@ -487,7 +501,7 @@ Request body format:
This returns the key info in the same format as the result of GetKeyInfo.
-#### UpdateKey `POST /v2/UpdateKey?id=`
+#### UpdateKey `POST /v1/key?id=`
Updates information about the specified API access key.
@@ -509,14 +523,14 @@ The possible flags in `allow` and `deny` are: `createBucket`.
This returns the key info in the same format as the result of GetKeyInfo.
-#### DeleteKey `POST /v2/DeleteKey?id=`
+#### DeleteKey `DELETE /v1/key?id=`
Deletes an API access key.
### Bucket operations
-#### ListBuckets `GET /v2/ListBuckets`
+#### ListBuckets `GET /v1/bucket`
Returns all storage buckets in the cluster.
@@ -558,8 +572,8 @@ Example response:
]
```
-#### GetBucketInfo `GET /v2/GetBucketInfo?id=`
-#### GetBucketInfo `GET /v2/GetBucketInfo?globalAlias=`
+#### GetBucketInfo `GET /v1/bucket?id=`
+#### GetBucketInfo `GET /v1/bucket?globalAlias=`
Returns information about the requested storage bucket.
@@ -602,7 +616,7 @@ Example response:
}
```
-#### CreateBucket `POST /v2/CreateBucket`
+#### CreateBucket `POST /v1/bucket`
Creates a new storage bucket.
@@ -642,7 +656,7 @@ or no alias at all.
Technically, you can also specify both `globalAlias` and `localAlias` and that would create
two aliases, but I don't see why you would want to do that.
-#### UpdateBucket `POST /v2/UpdateBucket?id=`
+#### UpdateBucket `PUT /v1/bucket?id=`
Updates configuration of the given bucket.
@@ -674,38 +688,16 @@ In `quotas`: new values of `maxSize` and `maxObjects` must both be specified, or
to remove the quotas. An absent value will be considered the same as a `null`. It is not possible
to change only one of the two quotas.
-#### DeleteBucket `POST /v2/DeleteBucket?id=`
+#### DeleteBucket `DELETE /v1/bucket?id=`
Deletes a storage bucket. A bucket cannot be deleted if it is not empty.
Warning: this will delete all aliases associated with the bucket!
-#### CleanupIncompleteUploads `POST /v2/CleanupIncompleteUploads`
-
-Cleanup all incomplete uploads in a bucket that are older than a specified number
-of seconds.
-
-Request body format:
-
-```json
-{
- "bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
- "olderThanSecs": 3600
-}
-```
-
-Response format
-
-```json
-{
- "uploadsDeleted": 12
-}
-```
-
### Operations on permissions for keys on buckets
-#### AllowBucketKey `POST /v2/AllowBucketKey`
+#### BucketAllowKey `POST /v1/bucket/allow`
Allows a key to do read/write/owner operations on a bucket.
@@ -726,7 +718,7 @@ Request body format:
Flags in `permissions` which have the value `true` will be activated.
Other flags will remain unchanged.
-#### DenyBucketKey `POST /v2/DenyBucketKey`
+#### BucketDenyKey `POST /v1/bucket/deny`
Denies a key from doing read/write/owner operations on a bucket.
@@ -750,35 +742,19 @@ Other flags will remain unchanged.
### Operations on bucket aliases
-#### AddBucketAlias `POST /v2/AddBucketAlias`
+#### GlobalAliasBucket `PUT /v1/bucket/alias/global?id=&alias=`
-Creates an alias for a bucket in the namespace of a specific access key.
-To create a global alias, specify the `globalAlias` field.
-To create a local alias, specify the `localAlias` and `accessKeyId` fields.
+Empty body. Creates a global alias for a bucket.
-Request body format:
+#### GlobalUnaliasBucket `DELETE /v1/bucket/alias/global?id=&alias=`
-```json
-{
- "bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
- "globalAlias": "my-bucket"
-}
-```
+Removes a global alias for a bucket.
-or:
+#### LocalAliasBucket `PUT /v1/bucket/alias/local?id=&accessKeyId=&alias=`
-```json
-{
- "bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
- "accessKeyId": "GK31c2f218a2e44f485b94239e",
- "localAlias": "my-bucket"
-}
-```
+Empty body. Creates a local alias for a bucket in the namespace of a specific access key.
-#### RemoveBucketAlias `POST /v2/RemoveBucketAlias`
+#### LocalUnaliasBucket `DELETE /v1/bucket/alias/local?id=&accessKeyId&alias=`
-Removes an alias for a bucket in the namespace of a specific access key.
-To remove a global alias, specify the `globalAlias` field.
-To remove a local alias, specify the `localAlias` and `accessKeyId` fields.
+Removes a local alias for a bucket in the namespace of a specific access key.
-Request body format: same as AddBucketAlias.
diff --git a/doc/talks/2025-10-06-josy/.gitignore b/doc/talks/2025-10-06-josy/.gitignore
deleted file mode 100644
index 9f1f00e6..00000000
--- a/doc/talks/2025-10-06-josy/.gitignore
+++ /dev/null
@@ -1,17 +0,0 @@
-*
-
-!*.txt
-!*.md
-
-!assets
-
-!.gitignore
-!*.svg
-!*.png
-!*.jpg
-!*.tex
-!Makefile
-!.gitignore
-!assets/*.drawio.pdf
-
-!talk.pdf
diff --git a/doc/talks/2025-10-06-josy/Makefile b/doc/talks/2025-10-06-josy/Makefile
deleted file mode 100644
index f0aae6a8..00000000
--- a/doc/talks/2025-10-06-josy/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-ASSETS=../assets/lattice/lattice1.pdf_tex \
- ../assets/lattice/lattice2.pdf_tex \
- ../assets/lattice/lattice3.pdf_tex \
- ../assets/lattice/lattice4.pdf_tex \
- ../assets/lattice/lattice5.pdf_tex \
- ../assets/lattice/lattice6.pdf_tex \
- ../assets/lattice/lattice7.pdf_tex \
- ../assets/lattice/lattice8.pdf_tex \
- ../assets/logos/deuxfleurs.pdf \
- ../assets/timeline-22-24.pdf
-
-talk.pdf: talk.tex $(ASSETS)
- pdflatex talk.tex
-
-%.pdf: %.svg
- inkscape -D -z --file=$^ --export-pdf=$@
-
-%.pdf_tex: %.svg
- inkscape -D -z --file=$^ --export-pdf=$@ --export-latex
diff --git a/doc/talks/2025-10-06-josy/talk.pdf b/doc/talks/2025-10-06-josy/talk.pdf
deleted file mode 100644
index 2194908a..00000000
Binary files a/doc/talks/2025-10-06-josy/talk.pdf and /dev/null differ
diff --git a/doc/talks/2025-10-06-josy/talk.tex b/doc/talks/2025-10-06-josy/talk.tex
deleted file mode 100644
index aa483766..00000000
--- a/doc/talks/2025-10-06-josy/talk.tex
+++ /dev/null
@@ -1,702 +0,0 @@
-\nonstopmode
-\documentclass[aspectratio=169,xcolor={svgnames}]{beamer}
-\usepackage[utf8]{inputenc}
-% \usepackage[frenchb]{babel}
-\usepackage{amsmath}
-\usepackage{mathtools}
-\usepackage{breqn}
-\usepackage{multirow}
-\usetheme{boxes}
-\usepackage{graphicx}
-\usepackage{import}
-\usepackage{adjustbox}
-\usepackage[absolute,overlay]{textpos}
-%\useoutertheme[footline=authortitle,subsection=false]{miniframes}
-%\useoutertheme[footline=authorinstitute,subsection=false]{miniframes}
-\useoutertheme{infolines}
-\setbeamertemplate{headline}{}
-
-\beamertemplatenavigationsymbolsempty
-
-\definecolor{TitleOrange}{RGB}{255,137,0}
-\setbeamercolor{title}{fg=TitleOrange}
-\setbeamercolor{frametitle}{fg=TitleOrange}
-
-\definecolor{ListOrange}{RGB}{255,145,5}
-\setbeamertemplate{itemize item}{\color{ListOrange}$\blacktriangleright$}
-
-\definecolor{verygrey}{RGB}{70,70,70}
-\setbeamercolor{normal text}{fg=verygrey}
-
-
-\usepackage{tabu}
-\usepackage{multicol}
-\usepackage{vwcol}
-\usepackage{stmaryrd}
-\usepackage{graphicx}
-
-\usepackage[normalem]{ulem}
-
-\AtBeginSection[]{
- \begin{frame}
- \vfill
- \centering
- \begin{beamercolorbox}[sep=8pt,center,shadow=true,rounded=true]{title}
- \usebeamerfont{title}\insertsectionhead\par%
- \end{beamercolorbox}
- \vfill
- \end{frame}
-}
-
-\title{Garage, an S3 backend as reliable as possible}
-\author{Garage Authors}
-\date{JoSy S3, 2025-10-08}
-
-\begin{document}
-
-\begin{frame}
- \centering
- \includegraphics[width=.3\linewidth]{../../sticker/Garage.png}
- \vspace{1em}
-
- {\large\bf Garage, an S3 backend as reliable as possible}
- \vspace{1em}
-
- \url{https://garagehq.deuxfleurs.fr/}\\
- \url{mailto:garagehq@deuxfleurs.fr}\\
- \texttt{\#garage:deuxfleurs.fr} on Matrix
-\end{frame}
-
-
-\section{Meet Garage}
-
-\begin{frame}
- \frametitle{A non-profit initiative}
-
-
- \begin{columns}[t]
- \begin{column}{.2\textwidth}
- \centering
- \adjincludegraphics[width=.5\linewidth, valign=t]{../assets/logos/deuxfleurs.pdf}
- \end{column}
- \begin{column}{.8\textwidth}
- \textbf{Part of a degrowth initiative}\\
- Garage has been created at Deuxfleurs where we experiment running Internet services without datacenter on commodity and refurbished hardware.
- \end{column}
-
- \end{columns}
- \vspace{2em}
- \begin{columns}[t]
- \begin{column}{.2\textwidth}
- \centering
- \adjincludegraphics[width=.5\linewidth, valign=t]{../assets/community.png}
- \end{column}
- \begin{column}{.8\textwidth}
- \textbf{Developed by a community}\\
- {\small Some recent contributors: Arthur C, Charles H, dongdigua, Etienne L, Jonah A, Julien K, Lapineige, MagicRR, Milas B, Niklas M, RockWolf, Schwitzd, trinity-1686a, Xavier S, babykart, Baptiste J, eddster2309, James O'C, Joker9944, Maximilien R, Renjaya RZ, Yureka...}
- \end{column}
-
- \end{columns}
- \vspace{2em}
- \begin{columns}[t]
- \begin{column}{.2\textwidth}
- \centering
- \adjincludegraphics[width=.5\linewidth, valign=t]{../assets/logos/AGPLv3_Logo.png}
- \end{column}
- \begin{column}{.8\textwidth}
- \textbf{Owned by nobody, open-core is impossible, zero VC money}\\
- AGPL + no Contributor License Agreement = Garage ownership spreads among hundredth of contributors.
- \end{column}
-
- \end{columns}
-\end{frame}
-
-\begin{frame}
- \frametitle{Getting support for Garage}
- \begin{columns}[t]
- \begin{column}{.2\textwidth}
- \centering
- \adjincludegraphics[width=.4\linewidth, valign=t]{../assets/alex.jpg}
- \end{column}
- \begin{column}{.4\textwidth}
- \textbf{Alex Auvolat}\\
- PhD; co-founder of Deuxfleurs\\
- Garage maintainer, Freelance
- \end{column}
- \begin{column}{.3\textwidth}
- \centering
- \adjincludegraphics[width=.4\linewidth, valign=t]{../assets/support.png}
- \end{column}
- \begin{column}{.1\textwidth}
- ~
- \end{column}
- \end{columns}
- \vspace{2em}
- \begin{columns}[t]
- \begin{column}{.2\textwidth}
- \centering
- \adjincludegraphics[width=.4\linewidth, valign=t]{../assets/quentin.jpg}
- \end{column}
- \begin{column}{.4\textwidth}
- \textbf{Quentin Dufour}\\
- PhD; co-founder of Deuxfleurs\\
- Garage contributor, Freelance
- \end{column}
- \begin{column}{.4\textwidth}
- For support requests, write at: \\
- \url{garagehq@deuxfleurs.fr}
- \end{column}
- \end{columns}
- \vspace{2em}
- \begin{columns}[t]
- \begin{column}{.2\textwidth}
- \centering
- \adjincludegraphics[width=.4\linewidth, valign=t]{../assets/armael.jpg}
- \end{column}
- \begin{column}{.4\textwidth}
- \textbf{Armaël Guéneau}\\
- PhD; member of Deuxfleurs\\
- Garage contributor, Freelance
- \end{column}
- \begin{column}{.4\textwidth}
- Eligible: email support, architecture design, specific feature development, etc.
- \end{column}
- \end{columns}
-
-
-\end{frame}
-
-\begin{frame}
- \frametitle{Our initial goal}
-
- \centering
- \Large
-
- Being a self-sovereign community to be free of our degrowth choice
-
- $\big\downarrow$
-
- As web citizens, datacenters are big black boxes. \\
- We want to leave them to autonoumously manage our servers.
-
- $\big\downarrow$
-
- We want reliable services without relying on dedicated hardware or places.
-
-\end{frame}
-
-\begin{frame}
- \frametitle{Building a resilient system with cheap stuff}
-
- \only<1,4-7>{
- \begin{itemize}
- \item \textcolor<5->{gray}{Commodity hardware (e.g. old desktop PCs)\\
- \vspace{.5em}
- \visible<4->{{\footnotesize (can die at any time)}}}
- \vspace{1.5em}
- \item<5-> \textcolor<7->{gray}{Regular Internet (e.g. FTTB, FTTH) and power grid connections\\
- \vspace{.5em}
- \visible<6->{{\footnotesize (can be unavailable randomly)}}}
- \vspace{1.5em}
- \item<7-> \textbf{Geographical redundancy} (multi-site replication)
- \end{itemize}
- }
- \only<2>{
- \begin{center}
- \includegraphics[width=.8\linewidth]{../assets/neptune.jpg}
- \end{center}
- }
- \only<3>{
- \begin{center}
- \includegraphics[width=.8\linewidth]{../assets/atuin.jpg}
- \end{center}
- }
- \only<8>{
- \begin{center}
- \includegraphics[width=.8\linewidth]{../assets/inframap_jdll2023.pdf}
- \end{center}
- }
-\end{frame}
-
-\begin{frame}
- \frametitle{Object storage: a crucial component}
- \begin{center}
- \includegraphics[height=6em]{../assets/logos/Amazon-S3.jpg}
- \hspace{3em}
- \visible<2->{\includegraphics[height=5em]{../assets/logos/minio.png}}
- \hspace{3em}
- \visible<3>{\includegraphics[height=6em]{../../logo/garage_hires_crop.png}}
- \end{center}
- \vspace{1em}
- S3: a de-facto standard, many compatible applications
-
- \vspace{1em}
- \visible<2->{MinIO is self-hostable but not suited for geo-distributed deployments}
-
- \vspace{1em}
- \visible<3->{\textbf{Garage is a self-hosted drop-in replacement for the Amazon S3 object store}}
-\end{frame}
-
-\begin{frame}
- \frametitle{CRDTs / weak consistency instead of consensus}
-
- \underline{Internally, Garage uses only CRDTs} (conflict-free replicated data types)
-
- \vspace{2em}
- Why not Raft, Paxos, ...? Issues of consensus algorithms:
-
- \vspace{1em}
- \begin{itemize}
- \item<2-> \textbf{Software complexity}
- \vspace{1em}
- \item<3-> \textbf{Performance issues:}
- \vspace{.5em}
- \begin{itemize}
- \item<4-> The leader is a \textbf{bottleneck} for all requests\\
- \vspace{.5em}
- \item<5-> \textbf{Sensitive to higher latency} between nodes
- \vspace{.5em}
- \item<6-> \textbf{Takes time to reconverge} when disrupted (e.g. node going down)
- \end{itemize}
- \end{itemize}
-\end{frame}
-
-\begin{frame}
- \frametitle{The data model of object storage}
- Object storage is basically a \textbf{key-value store}:
- \vspace{.5em}
-
- {\scriptsize
- \begin{center}
- \begin{tabular}{|l|p{7cm}|}
- \hline
- \textbf{Key: file path + name} & \textbf{Value: file data + metadata} \\
- \hline
- \hline
- \texttt{index.html} &
- \texttt{Content-Type: text/html; charset=utf-8} \newline
- \texttt{Content-Length: 24929} \newline
- \texttt{} \\
- \hline
- \texttt{img/logo.svg} &
- \texttt{Content-Type: text/svg+xml} \newline
- \texttt{Content-Length: 13429} \newline
- \texttt{} \\
- \hline
- \texttt{download/index.html} &
- \texttt{Content-Type: text/html; charset=utf-8} \newline
- \texttt{Content-Length: 26563} \newline
- \texttt{} \\
- \hline
- \end{tabular}
- \end{center}
- }
-
- \vspace{1em}
- \begin{itemize}
- \item<2> Maps well to CRDT data types
- \end{itemize}
-\end{frame}
-
-\begin{frame}
- \frametitle{Performance gains in practice}
- \begin{center}
- \includegraphics[width=.8\linewidth]{../assets/perf/endpoint_latency_0.7_0.8_minio.png}
- \end{center}
-\end{frame}
-
-% ======================================== OPERATING
-% ======================================== OPERATING
-% ======================================== OPERATING
-
-
-\section{Production clusters}
-
-\begin{frame}
- \frametitle{Deployment kinds}
-
- \includegraphics[width=.9\linewidth]{../assets/cluster_kind.png}
- \vspace{1em}
-
-\end{frame}
-
-\begin{frame}
- \frametitle{How big they are?}
-
- \includegraphics[width=.9\linewidth]{../assets/cluster_size.png}
- \vspace{1em}
-
- \textit{"Petabyte storage setup for a video site. Nginx as CDN in-front using garage-s3-website feature. Each storage node has ~64TB storage with raid10, no replication within garage. 25gbit nic. haproxy to loadbalance across 5 nodes. mostly reads with very few writes."}
-
- \vspace{1em}
- \textit{"We currently manage 7 Garage nodes, 28TB total storage, 6M blocks for 3M objects and 4TB of object data. We have been running Garage in production for 2.5 years."}
-
-\end{frame}
-
-\begin{frame}
- \frametitle{Operating Garage}
- \begin{center}
- \only<1-2>{
- \includegraphics[width=.9\linewidth]{../assets/screenshots/garage_status_0.10.png}
- \\\vspace{1em}
- \visible<2>{\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_status_unhealthy_0.10.png}}
- }
- \end{center}
-\end{frame}
-
-\begin{frame}
- \frametitle{Garage's architecture}
- \begin{center}
- \only<1>{\includegraphics[width=.45\linewidth]{../assets/garage.drawio.pdf}}%
- \only<2>{\includegraphics[width=.6\linewidth]{../assets/garage_sync.drawio.pdf}}%
- \end{center}
-\end{frame}
-
-\begin{frame}
- \frametitle{Digging deeper}
- \begin{center}
- \only<1>{\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_stats_0.10.png}}
- \only<2>{\includegraphics[width=.5\linewidth]{../assets/screenshots/garage_worker_list_0.10.png}}
- \only<3>{\includegraphics[width=.6\linewidth]{../assets/screenshots/garage_worker_param_0.10.png}}
- \end{center}
-\end{frame}
-
-\begin{frame}
- \frametitle{Potential limitations and bottlenecks}
- \begin{itemize}
- \item Global:
- \begin{itemize}
- \item Max. $\sim$100 nodes per cluster (excluding gateways)
- \end{itemize}
- \vspace{1em}
- \item Metadata:
- \begin{itemize}
- \item One big bucket = bottleneck, object list on 3 nodes only
- \end{itemize}
- \vspace{1em}
- \item Block manager:
- \begin{itemize}
- \item Lots of small files on disk
- \item Processing the resync queue can be slow
- \end{itemize}
- \end{itemize}
-\end{frame}
-
-\begin{frame}
- \frametitle{Deployment advice for very large clusters}
- \begin{itemize}
- \item Metadata storage:
- \begin{itemize}
- \item ZFS mirror (x2) on fast NVMe
- \item Use LMDB storage engine
- \end{itemize}
- \vspace{.5em}
- \item Data block storage:
- \begin{itemize}
- \item Use Garage's native multi-HDD support
- \item XFS on individual drives
- \item Increase block size (1MB $\to$ 10MB, requires more RAM and good networking)
- \item Tune \texttt{resync-tranquility} and \texttt{resync-worker-count} dynamically
- \end{itemize}
- \vspace{.5em}
- \item Other :
- \begin{itemize}
- \item Split data over several buckets
- \item Use less than 100 storage nodes
- \item Use gateway nodes
- \end{itemize}
- \vspace{.5em}
- \end{itemize}
-\end{frame}
-
-
-\begin{frame}
- \frametitle{Focus on Deuxfleurs}
-
- Host institutional websites, partnership with a web agency.
- Matrix media backend.
-
- Plan to use it as an email backend for an internally developed email server.
-
-\end{frame}
-
-
-% ======================================== TIMELINE
-% ======================================== TIMELINE
-% ======================================== TIMELINE
-
-\section{Recent developments}
-
-% ====================== v0.7.0 ===============================
-
-\begin{frame}
- \begin{center}
- \includegraphics[width=.8\linewidth]{../assets/tl.drawio.png}
- \end{center}
-\end{frame}
-
-\begin{frame}
- \frametitle{April 2022 - Garage v0.7.0}
- Focus on \underline{observability and ecosystem integration}
- \vspace{2em}
- \begin{itemize}
- \item \textbf{Monitoring:} metrics and traces, using OpenTelemetry
- \vspace{1em}
- \item Replication modes with 1 or 2 copies / weaker consistency
- \vspace{1em}
- \item Kubernetes integration for node discovery
- \vspace{1em}
- \item Admin API (v0.7.2)
- \end{itemize}
-\end{frame}
-
-\begin{frame}
- \frametitle{Metrics (Prometheus + Grafana)}
- \begin{center}
- \includegraphics[width=.9\linewidth]{../assets/screenshots/grafana_dashboard.png}
- \end{center}
-\end{frame}
-
-\begin{frame}
- \frametitle{Traces (Jaeger)}
- \begin{center}
- \includegraphics[width=.8\linewidth]{../assets/screenshots/jaeger_listobjects.png}
- \end{center}
-\end{frame}
-
-% ====================== v0.8.0 ===============================
-
-\begin{frame}
- \begin{center}
- \includegraphics[width=.8\linewidth]{../assets/tl.drawio.png}
- \end{center}
-\end{frame}
-
-\begin{frame}
- \frametitle{November 2022 - Garage v0.8.0}
- Focus on \underline{performance}
- \vspace{2em}
- \begin{itemize}
- \item \textbf{Alternative metadata DB engines} (LMDB, Sqlite)
- \vspace{1em}
- \item \textbf{Performance improvements:} block streaming, various optimizations...
- \vspace{1em}
- \item Bucket quotas (max size, max \#objects)
- \vspace{1em}
- \item Quality of life improvements, observability, etc.
- \end{itemize}
-\end{frame}
-
-\begin{frame}
- \frametitle{About metadata DB engines}
- \textbf{Issues with Sled:}
- \vspace{1em}
- \begin{itemize}
- \item Huge files on disk
- \vspace{.5em}
- \item Unpredictable performance, especially on HDD
- \vspace{.5em}
- \item API limitations
- \vspace{.5em}
- \item Not actively maintained
- \end{itemize}
-
- \vspace{2em}
- \textbf{LMDB:} very stable, good performance, file size is reasonable\\
- \textbf{Sqlite} also available as a second choice
-
- \vspace{1em}
- Sled will be removed in Garage v1.0
-\end{frame}
-
-\begin{frame}
- \frametitle{DB engine performance comparison}
- \begin{center}
- \includegraphics[width=.6\linewidth]{../assets/perf/db_engine.png}
- \end{center}
- NB: Sqlite was slow due to synchronous mode, now configurable
-\end{frame}
-
-\begin{frame}
- \frametitle{Block streaming}
- \begin{center}
- \only<1>{\includegraphics[width=.8\linewidth]{../assets/schema-streaming-1.png}}
- \only<2>{\includegraphics[width=.8\linewidth]{../assets/schema-streaming-2.png}}
- \end{center}
-\end{frame}
-
-\begin{frame}
- \frametitle{TTFB benchmark}
- \begin{center}
- \includegraphics[width=.8\linewidth]{../assets/perf/ttfb.png}
- \end{center}
-\end{frame}
-
-\begin{frame}
- \frametitle{Throughput benchmark}
- \begin{center}
- \includegraphics[width=.7\linewidth]{../assets/perf/io-0.7-0.8-minio.png}
- \end{center}
-\end{frame}
-
-% ====================== v0.9.0 ===============================
-
-\begin{frame}
- \begin{center}
- \includegraphics[width=.8\linewidth]{../assets/tl.drawio.png}
- \end{center}
-\end{frame}
-
-\begin{frame}
- \frametitle{October 2023 - Garage v0.9.0}
- Focus on \underline{streamlining \& usability}
- \vspace{2em}
- \begin{itemize}
- \item Support multiple HDDs per node
- \vspace{1em}
- \item S3 compatibility:
- \vspace{1em}
- \begin{itemize}
- \item support basic lifecycle configurations
- \vspace{.5em}
- \item allow for multipart upload part retries
- \end{itemize}
- \vspace{1em}
- \item LMDB by default, deprecation of Sled
- \vspace{1em}
- \item New layout computation algorithm
- \end{itemize}
-\end{frame}
-
-
-\begin{frame}
- \frametitle{Layout computation}
- \begin{overprint}
- \onslide<1>
- \begin{center}
- \includegraphics[width=\linewidth, trim=0 0 0 -4cm]{../assets/screenshots/garage_status_0.9_prod_zonehl.png}
- \end{center}
- \onslide<2>
- \begin{center}
- \includegraphics[width=.7\linewidth]{../assets/map.png}
- \end{center}
- \end{overprint}
- \vspace{1em}
- Garage stores replicas on different zones when possible
-\end{frame}
-
-\begin{frame}
- \frametitle{What a "layout" is}
- \textbf{A layout is a precomputed index table:}
- \vspace{1em}
-
- {\footnotesize
- \begin{center}
- \begin{tabular}{|l|l|l|l|}
- \hline
- \textbf{Partition} & \textbf{Node 1} & \textbf{Node 2} & \textbf{Node 3} \\
- \hline
- \hline
- Partition 0 & df-ymk (bespin) & Abricot (scorpio) & Courgette (neptune) \\
- \hline
- Partition 1 & Ananas (scorpio) & Courgette (neptune) & df-ykl (bespin) \\
- \hline
- Partition 2 & df-ymf (bespin) & Celeri (neptune) & Abricot (scorpio) \\
- \hline
- \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ \\
- \hline
- Partition 255 & Concombre (neptune) & df-ykl (bespin) & Abricot (scorpio) \\
- \hline
- \end{tabular}
- \end{center}
- }
-
- \vspace{2em}
- \visible<2->{
- The index table is built centrally using an optimal algorithm,\\
- then propagated to all nodes
- }
-
- \vspace{1em}
- \visible<3->{
- \footnotesize
- Oulamara, M., \& Auvolat, A. (2023). \emph{An algorithm for geo-distributed and redundant storage in Garage}.\\ arXiv preprint arXiv:2302.13798.
- }
-\end{frame}
-
-
-
-% ====================== v1.0.0 ===============================
-
-\begin{frame}
- \begin{center}
- \includegraphics[width=.8\linewidth]{../assets/tl.drawio.png}
- \end{center}
-\end{frame}
-
-\begin{frame}
- \frametitle{April 2024 - Garage v1.0.0}
- Focus on \underline{consistency, security \& stability}
- \vspace{2em}
- \begin{itemize}
- \item Fix consistency issues when reshuffling data (Jepsen testing)
- \vspace{1em}
- \item \textbf{Security audit} by Radically Open Security
- \vspace{1em}
- \item Misc. S3 features (SSE-C, checksums, ...) and compatibility fixes
- \end{itemize}
-\end{frame}
-
-% ====================== v2.0.0 ===============================
-
-\begin{frame}
- \begin{center}
- \includegraphics[width=.8\linewidth]{../assets/tl.drawio.png}
- \end{center}
-\end{frame}
-
-\begin{frame}
- \frametitle{Garage v2.0.0}
- Focus on \underline{}
- \vspace{2em}
- \begin{itemize}
- \item TODO
- \end{itemize}
-\end{frame}
-
-
-\begin{frame}
- \frametitle{Currently funding...}
-
- \textit{...}
-\end{frame}
-
-\begin{frame}
- \frametitle{We run community surveys}
- \begin{center}
- \includegraphics[width=.6\linewidth]{../assets/survey_requested_features.png}
- \end{center}
-\end{frame}
-
-% ======================================== END
-% ======================================== END
-% ======================================== END
-
-\begin{frame}
- \frametitle{Where to find us}
- \begin{center}
- \includegraphics[width=.25\linewidth]{../../logo/garage_hires.png}\\
- \vspace{-1em}
- \url{https://garagehq.deuxfleurs.fr/}\\
- \url{mailto:garagehq@deuxfleurs.fr}\\
- \texttt{\#garage:deuxfleurs.fr} on Matrix
-
- \vspace{1.5em}
- \includegraphics[width=.06\linewidth]{../assets/logos/rust_logo.png}
- \includegraphics[width=.13\linewidth]{../assets/logos/AGPLv3_Logo.png}
- \end{center}
-\end{frame}
-
-\end{document}
-
-%% vim: set ts=4 sw=4 tw=0 noet spelllang=en :
diff --git a/doc/talks/assets/armael.jpg b/doc/talks/assets/armael.jpg
deleted file mode 100644
index 54b97662..00000000
Binary files a/doc/talks/assets/armael.jpg and /dev/null differ
diff --git a/doc/talks/assets/cluster_kind.png b/doc/talks/assets/cluster_kind.png
deleted file mode 100644
index 80f8f4b5..00000000
Binary files a/doc/talks/assets/cluster_kind.png and /dev/null differ
diff --git a/doc/talks/assets/cluster_size.png b/doc/talks/assets/cluster_size.png
deleted file mode 100644
index b4b0f5ce..00000000
Binary files a/doc/talks/assets/cluster_size.png and /dev/null differ
diff --git a/doc/talks/assets/community.png b/doc/talks/assets/community.png
deleted file mode 100644
index 06c7a1af..00000000
Binary files a/doc/talks/assets/community.png and /dev/null differ
diff --git a/doc/talks/assets/quentin.jpg b/doc/talks/assets/quentin.jpg
deleted file mode 100644
index a68d9d7b..00000000
Binary files a/doc/talks/assets/quentin.jpg and /dev/null differ
diff --git a/doc/talks/assets/support.png b/doc/talks/assets/support.png
deleted file mode 100644
index c20d179b..00000000
Binary files a/doc/talks/assets/support.png and /dev/null differ
diff --git a/doc/talks/assets/tl.drawio.png b/doc/talks/assets/tl.drawio.png
deleted file mode 100644
index c60c310a..00000000
Binary files a/doc/talks/assets/tl.drawio.png and /dev/null differ
diff --git a/flake.lock b/flake.lock
index e265d0c3..211b70e0 100644
--- a/flake.lock
+++ b/flake.lock
@@ -12,17 +12,16 @@
"original": {
"owner": "ipetkov",
"repo": "crane",
- "rev": "6fe74265bbb6d016d663b1091f015e2976c4a527",
"type": "github"
}
},
"flake-compat": {
"locked": {
- "lastModified": 1761640442,
- "narHash": "sha256-AtrEP6Jmdvrqiv4x2xa5mrtaIp3OEe8uBYCDZDS+hu8=",
+ "lastModified": 1717312683,
+ "narHash": "sha256-FrlieJH50AuvagamEvWMIE6D2OAnERuDboFDYAED/dE=",
"owner": "nix-community",
"repo": "flake-compat",
- "rev": "4a56054d8ffc173222d09dad23adf4ba946c8884",
+ "rev": "38fd3954cf65ce6faf3d0d45cd26059e059f07ea",
"type": "github"
},
"original": {
diff --git a/flake.nix b/flake.nix
index 979fa8eb..01a077c4 100644
--- a/flake.nix
+++ b/flake.nix
@@ -11,8 +11,7 @@
"github:oxalica/rust-overlay/ab726555a9a72e6dc80649809147823a813fa95b";
inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs";
- # Crane as of 2025-01-24
- inputs.crane.url = "github:ipetkov/crane/6fe74265bbb6d016d663b1091f015e2976c4a527";
+ inputs.crane.url = "github:ipetkov/crane";
inputs.flake-compat.url = "github:nix-community/flake-compat";
inputs.flake-utils.url = "github:numtide/flake-utils";
@@ -90,9 +89,6 @@
cargo-outdated
cargo-machete
nixpkgs-fmt
- openssl
- socat
- killall
];
};
};
diff --git a/nix/build_index.nix b/nix/build_index.nix
index 92931eea..7869566f 100644
--- a/nix/build_index.nix
+++ b/nix/build_index.nix
@@ -167,7 +167,7 @@ let
Sources:
diff --git a/script/dev-bucket.sh b/script/dev-bucket.sh
index 82e73652..708c2c43 100755
--- a/script/dev-bucket.sh
+++ b/script/dev-bucket.sh
@@ -17,19 +17,13 @@ else
fi
$GARAGE_BIN -c /tmp/config.1.toml bucket create eprouvette
-if [ "$GARAGE_OLDVER" = "v08" ]; then
+if [ "$GARAGE_08" = "1" ]; then
KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key new --name opérateur)
- ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
- SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'`
-elif [ "$GARAGE_OLDVER" = "v1" ]; then
- KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key create opérateur)
- ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
- SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'`
else
- KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml json-api CreateKey '{"name":"opérateur"}')
- ACCESS_KEY=`echo $KEY_INFO|jq -r .accessKeyId`
- SECRET_KEY=`echo $KEY_INFO|jq -r .secretAccessKey`
+ KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key create opérateur)
fi
+ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
+SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'`
$GARAGE_BIN -c /tmp/config.1.toml bucket allow eprouvette --read --write --owner --key $ACCESS_KEY
echo "$ACCESS_KEY $SECRET_KEY" > /tmp/garage.s3
diff --git a/script/dev-cluster.sh b/script/dev-cluster.sh
index 81a37099..998ffdb9 100755
--- a/script/dev-cluster.sh
+++ b/script/dev-cluster.sh
@@ -30,12 +30,6 @@ for count in $(seq 1 3); do
CONF_PATH="/tmp/config.$count.toml"
LABEL="\e[${FANCYCOLORS[$count]}[$count]\e[49m"
-if [ "$GARAGE_OLDVER" == "v08" ]; then
- REPLICATION_MODE="replication_mode = \"3\""
-else
- REPLICATION_MODE="replication_factor = 3"
-fi
-
cat > $CONF_PATH <&1|grep -q HEALTHY ; do
sleep 1
done
-if [ "$GARAGE_OLDVER" = "v08" ]; then
+if [ "$GARAGE_08" = "1" ]; then
$GARAGE_BIN -c /tmp/config.1.toml status \
| grep 'NO ROLE' \
| grep -Po '^[0-9a-f]+' \
diff --git a/script/dev-env-aws.sh b/script/dev-env-aws.sh
index 808f9cf1..41f1fdde 100644
--- a/script/dev-env-aws.sh
+++ b/script/dev-env-aws.sh
@@ -1,6 +1,7 @@
export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
export AWS_DEFAULT_REGION='garage'
+export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
# FUTUREWORK: set AWS_ENDPOINT_URL instead, once nixpkgs bumps awscli to >=2.13.0.
function aws { command aws --endpoint-url http://127.0.0.1:3911 $@ ; }
diff --git a/script/helm/garage/Chart.yaml b/script/helm/garage/Chart.yaml
index 6c93b37f..b3a7b921 100644
--- a/script/helm/garage/Chart.yaml
+++ b/script/helm/garage/Chart.yaml
@@ -2,8 +2,8 @@ apiVersion: v2
name: garage
description: S3-compatible object store for small self-hosted geo-distributed deployments
type: application
-version: 0.9.2
-appVersion: "v2.2.0"
+version: 0.7.3
+appVersion: "v1.3.1"
home: https://garagehq.deuxfleurs.fr/
icon: https://garagehq.deuxfleurs.fr/images/garage-logo.svg
diff --git a/script/helm/garage/README.md b/script/helm/garage/README.md
index 67626030..bdf69ec4 100644
--- a/script/helm/garage/README.md
+++ b/script/helm/garage/README.md
@@ -1,6 +1,6 @@
# garage
-  
+  
S3-compatible object store for small self-hosted geo-distributed deployments
@@ -15,7 +15,6 @@ S3-compatible object store for small self-hosted geo-distributed deployments
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | |
-| commonLabels | object | `{}` | Extra labels for all resources |
| deployment.kind | string | `"StatefulSet"` | Switchable to DaemonSet |
| deployment.podManagementPolicy | string | `"OrderedReady"` | If using statefulset, allow Parallel or OrderedReady (default) |
| deployment.replicaCount | int | `3` | Number of StatefulSet replicas/garage nodes to start |
@@ -23,16 +22,15 @@ S3-compatible object store for small self-hosted geo-distributed deployments
| extraVolumeMounts | object | `{}` | |
| extraVolumes | object | `{}` | |
| fullnameOverride | string | `""` | |
-| garage.blockSize | string | `"1048576"` | Defaults is 1MB An increase can result in better performance in certain scenarios https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block_size |
+| garage.blockSize | string | `"1048576"` | Defaults is 1MB An increase can result in better performance in certain scenarios https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size |
| garage.bootstrapPeers | list | `[]` | This is not required if you use the integrated kubernetes discovery |
-| garage.compressionLevel | string | `"1"` | zstd compression level of stored blocks https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression_level |
-| garage.dbEngine | string | `"lmdb"` | Can be changed for better performance on certain systems https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db_engine |
+| garage.compressionLevel | string | `"1"` | zstd compression level of stored blocks https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level |
+| garage.dbEngine | string | `"lmdb"` | Can be changed for better performance on certain systems https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0 |
| garage.existingConfigMap | string | `""` | if not empty string, allow using an existing ConfigMap for the garage.toml, if set, ignores garage.toml |
| garage.garageTomlString | string | `""` | String Template for the garage configuration if set, ignores above values. Values can be templated, see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/ |
| garage.kubernetesSkipCrd | bool | `false` | Set to true if you want to use k8s discovery but install the CRDs manually outside of the helm chart, for example if you operate at namespace level without cluster ressources |
-| garage.replicationFactor | string | `"3"` | Default to 3 replicas, see the replication_factor section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication_factor |
-| garage.consistencyMode | string | `"consistent"` | Default to read-after-write consistency, see the consistency_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#consistency_mode |
| garage.metadataAutoSnapshotInterval | string | `""` | If this value is set, Garage will automatically take a snapshot of the metadata DB file at a regular interval and save it in the metadata directory. https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#metadata_auto_snapshot_interval |
+| garage.replicationMode | string | `"3"` | Default to 3 replicas, see the replication_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode |
| garage.rpcBindAddr | string | `"[::]:3901"` | |
| garage.rpcSecret | string | `""` | If not given, a random secret will be generated and stored in a Secret object |
| garage.s3.api.region | string | `"garage"` | |
diff --git a/script/helm/garage/templates/_helpers.tpl b/script/helm/garage/templates/_helpers.tpl
index 2ffb90c6..037a5f1c 100644
--- a/script/helm/garage/templates/_helpers.tpl
+++ b/script/helm/garage/templates/_helpers.tpl
@@ -27,7 +27,7 @@ If release name contains chart name it will be used as a full name.
Create the name of the rpc secret
*/}}
{{- define "garage.rpcSecretName" -}}
-{{- .Values.garage.existingRpcSecret | default (printf "%s-rpc-secret" (include "garage.fullname" .)) -}}
+{{- printf "%s-rpc-secret" (include "garage.fullname" .) -}}
{{- end }}
{{/*
@@ -47,9 +47,6 @@ helm.sh/chart: {{ include "garage.chart" . }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
-{{- with .Values.commonLabels }}
-{{- toYaml . | nindent 0 }}
-{{- end }}
{{- end }}
{{/*
diff --git a/script/helm/garage/templates/configmap.yaml b/script/helm/garage/templates/configmap.yaml
index 280861e4..ab5b84db 100644
--- a/script/helm/garage/templates/configmap.yaml
+++ b/script/helm/garage/templates/configmap.yaml
@@ -15,8 +15,7 @@ data:
block_size = {{ .Values.garage.blockSize }}
- replication_factor = {{ .Values.garage.replicationFactor }}
- consistency_mode = "{{ .Values.garage.consistencyMode }}"
+ replication_mode = "{{ .Values.garage.replicationMode }}"
compression_level = {{ .Values.garage.compressionLevel }}
@@ -28,16 +27,8 @@ data:
# rpc_secret will be populated by the init container from a k8s secret object
rpc_secret = "__RPC_SECRET_REPLACE__"
- bootstrap_peers = [
- {{- range $index, $peer := .Values.garage.bootstrapPeers }}
- {{- if $index}}, {{ end }}{{ $peer | quote }}
- {{ end }}
- ]
+ bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
- {{- if .Values.garage.additionalTopLevelConfig }}
- {{ .Values.garage.additionalTopLevelConfig | nindent 4 }}
- {{- end }}
-
[kubernetes_discovery]
namespace = "{{ .Release.Namespace }}"
service_name = "{{ include "garage.fullname" . }}"
diff --git a/script/helm/garage/templates/secret.yaml b/script/helm/garage/templates/secret.yaml
index c0c45b93..54749424 100644
--- a/script/helm/garage/templates/secret.yaml
+++ b/script/helm/garage/templates/secret.yaml
@@ -1,4 +1,3 @@
-{{- if not .Values.garage.existingRpcSecret }}
apiVersion: v1
kind: Secret
metadata:
@@ -13,4 +12,3 @@ data:
{{- $prevRpcSecret := $prevSecretData.rpcSecret | default "" | b64dec }}
{{/* Priority is: 1. from values, 2. previous value, 3. generate random */}}
rpcSecret: {{ .Values.garage.rpcSecret | default $prevRpcSecret | default (include "jupyterhub.randHex" 64) | b64enc | quote }}
-{{- end }}
diff --git a/script/helm/garage/templates/workload.yaml b/script/helm/garage/templates/workload.yaml
index 4264253e..d144cb41 100644
--- a/script/helm/garage/templates/workload.yaml
+++ b/script/helm/garage/templates/workload.yaml
@@ -21,7 +21,7 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
- {{- include "garage.labels" . | nindent 8 }}
+ {{- include "garage.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
diff --git a/script/helm/garage/values.yaml b/script/helm/garage/values.yaml
index ad0dbc53..5e419fe2 100644
--- a/script/helm/garage/values.yaml
+++ b/script/helm/garage/values.yaml
@@ -2,32 +2,23 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
-# -- Additional labels to add to all resources created by this chart
-commonLabels: {}
-# app.kubernetes.io/part-of: storage
-# team: platform
-
# Garage configuration. These values go to garage.toml
garage:
# -- Can be changed for better performance on certain systems
- # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db_engine
+ # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0
dbEngine: "lmdb"
# -- Defaults is 1MB
# An increase can result in better performance in certain scenarios
- # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block_size
+ # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size
blockSize: "1048576"
- # -- Default to 3 replicas, see the replication_factor section at
- # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication_factor
- replicationFactor: "3"
-
- # -- By default, enable read-after-write consistency guarantees, see the consistency_mode section at
- # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#consistency_mode
- consistencyMode: "consistent"
+ # -- Default to 3 replicas, see the replication_mode section at
+ # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode
+ replicationMode: "3"
# -- zstd compression level of stored blocks
- # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression_level
+ # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level
compressionLevel: "1"
# -- If this value is set, Garage will automatically take a snapshot of the metadata DB file at a regular interval and save it in the metadata directory.
@@ -37,10 +28,6 @@ garage:
rpcBindAddr: "[::]:3901"
# -- If not given, a random secret will be generated and stored in a Secret object
rpcSecret: ""
- # -- If you want to provide an rpcSecret within an existing k8s secret,
- # specify the secret name here, and store the value under the secret key `rpcSecret`
- # the default secret will not be created
- existingRpcSecret: ""
# -- This is not required if you use the integrated kubernetes discovery
bootstrapPeers: []
# -- Set to true if you want to use k8s discovery but install the CRDs manually outside
@@ -54,12 +41,6 @@ garage:
rootDomain: ".web.garage.tld"
index: "index.html"
- # -- Additional configuration to append to garage.toml. Use a multi-line string for custom config.
- # Example:
- # additionalTopLevelConfig: |-
- # data_fsync = true
- additionalTopLevelConfig: ""
-
# -- if not empty string, allow using an existing ConfigMap for the garage.toml,
# if set, ignores garage.toml
existingConfigMap: ""
@@ -127,7 +108,6 @@ podSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
- fsGroupChangePolicy: "OnRootMismatch"
runAsNonRoot: true
securityContext:
diff --git a/script/jepsen.garage/src/jepsen/garage/daemon.clj b/script/jepsen.garage/src/jepsen/garage/daemon.clj
index 9267a03a..0ea773fb 100644
--- a/script/jepsen.garage/src/jepsen/garage/daemon.clj
+++ b/script/jepsen.garage/src/jepsen/garage/daemon.clj
@@ -43,7 +43,7 @@
"rpc_bind_addr = \"0.0.0.0:3901\"\n"
"rpc_public_addr = \"" node ":3901\"\n"
"db_engine = \"lmdb\"\n"
- "replication_factor = 3\n"
+ "replication_mode = \"3\"\n"
"data_dir = \"" data-dir "\"\n"
"metadata_dir = \"" meta-dir "\"\n"
"[s3_api]\n"
diff --git a/script/k8s/config.yaml b/script/k8s/config.yaml
index bfefd999..8cf40fc2 100644
--- a/script/k8s/config.yaml
+++ b/script/k8s/config.yaml
@@ -8,7 +8,7 @@ data:
metadata_dir = "/tmp/meta"
data_dir = "/tmp/data"
- replication_factor = 3
+ replication_mode = "3"
rpc_bind_addr = "[::]:3901"
rpc_secret = "1799bccfd7411eddcf9ebd316bc1f5287ad12a68094e1c6ac6abde7e6feae1ec"
diff --git a/script/telemetry/grafana-garage-dashboard-prometheus.json b/script/telemetry/grafana-garage-dashboard-prometheus.json
index 1e127f8a..28ef1ec0 100644
--- a/script/telemetry/grafana-garage-dashboard-prometheus.json
+++ b/script/telemetry/grafana-garage-dashboard-prometheus.json
@@ -694,7 +694,32 @@
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "10.83.2.3:3903"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": false,
+ "tooltip": false,
+ "viz": true
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 8,
diff --git a/script/test-smoke.sh b/script/test-smoke.sh
index eee206ba..acf56a90 100755
--- a/script/test-smoke.sh
+++ b/script/test-smoke.sh
@@ -112,23 +112,6 @@ if [ -z "$SKIP_S3CMD" ]; then
done
fi
-# BOTO3
-if [ -z "$SKIP_BOTO3" ]; then
- echo "🛠️ Testing with boto3 for STREAMING-UNSIGNED-PAYLOAD-TRAILER"
- source ${SCRIPT_FOLDER}/dev-env-aws.sh
- AWS_ENDPOINT_URL=https://localhost:4443 python <> /tmp/garage.log 2>&1 &
sleep 3
echo "🛠️ Retrieving data from old cluster"
-rclone copy garage:eprouvette/test_dotgit /tmp/test_dotgit \
- --stats=1s --stats-log-level=NOTICE --stats-one-line --fast-list
+rclone copy garage:eprouvette/test_dotgit /tmp/test_dotgit --stats=1s --stats-log-level=NOTICE --stats-one-line --fast-list
if ! diff <(find "${SCRIPT_FOLDER}/../.git" -type f | xargs md5sum | cut -d ' ' -f 1 | sort) <(find /tmp/test_dotgit -type f | xargs md5sum | cut -d ' ' -f 1 | sort); then
echo "TEST FAILURE: directories are different"
@@ -93,23 +68,6 @@ if ! diff <(find "${SCRIPT_FOLDER}/../.git" -type f | xargs md5sum | cut -d ' '
fi
rm -r /tmp/test_dotgit
-if [ "$DO_SSEC_TEST" = "1" ]; then
- rclone copy garage:eprouvette/test-ssec /tmp/test_ssec_out \
- --s3-sse-customer-algorithm AES256 \
- --s3-sse-customer-key-base64 "$SSEC_KEY" \
- --stats=1s --stats-log-level=NOTICE --stats-one-line
- if ! diff "/tmp/test_ssec_out/test-upgrade.sh" "${SCRIPT_FOLDER}/test-upgrade.sh"; then
- echo "SSEC-FAILURE (small file)"
- exit 1
- fi
- if ! diff "/tmp/test_ssec_out/randfile-for-upgrade" "/tmp/randfile-for-upgrade"; then
- echo "SSEC-FAILURE (big file)"
- exit 1
- fi
- rm -r /tmp/test_ssec_out
- rm /tmp/randfile-for-upgrade
-fi
-
echo "🏁 Teardown"
rm -rf /tmp/garage-{data,meta}-*
rm -rf /tmp/config.*.toml
diff --git a/shell.nix b/shell.nix
index dee340c4..c3dedca8 100644
--- a/shell.nix
+++ b/shell.nix
@@ -26,8 +26,6 @@ in
s3cmd
minio-client
rclone
- (python313.withPackages (ps: [ ps.boto3 ]))
-
socat
psmisc
which
@@ -39,7 +37,6 @@ in
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
function to_s3 {
- AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED AWS_RESPONSE_CHECKSUM_VALIDATION=WHEN_REQUIRED \
aws \
--endpoint-url https://garage.deuxfleurs.fr \
--region garage \
@@ -96,7 +93,6 @@ in
nix-build nix/build_index.nix
- AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED AWS_RESPONSE_CHECKSUM_VALIDATION=WHEN_REQUIRED \
aws \
--endpoint-url https://garage.deuxfleurs.fr \
--region garage \
@@ -104,7 +100,6 @@ in
result/share/_releases.json \
s3://garagehq.deuxfleurs.fr/
- AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED AWS_RESPONSE_CHECKSUM_VALIDATION=WHEN_REQUIRED \
aws \
--endpoint-url https://garage.deuxfleurs.fr \
--region garage \
diff --git a/src/api/admin/Cargo.toml b/src/api/admin/Cargo.toml
index 8736059a..656c6825 100644
--- a/src/api/admin/Cargo.toml
+++ b/src/api/admin/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "garage_api_admin"
-version = "2.2.0"
+version = "1.3.1"
authors = ["Alex Auvolat "]
edition = "2018"
license = "AGPL-3.0"
@@ -14,9 +14,7 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
-format_table.workspace = true
garage_model.workspace = true
-garage_block.workspace = true
garage_table.workspace = true
garage_util.workspace = true
garage_rpc.workspace = true
@@ -24,11 +22,8 @@ garage_api_common.workspace = true
argon2.workspace = true
async-trait.workspace = true
-bytesize.workspace = true
-chrono.workspace = true
thiserror.workspace = true
hex.workspace = true
-paste.workspace = true
tracing.workspace = true
futures.workspace = true
@@ -39,7 +34,6 @@ url.workspace = true
serde.workspace = true
serde_json.workspace = true
-utoipa.workspace = true
opentelemetry.workspace = true
opentelemetry-prometheus = { workspace = true, optional = true }
@@ -47,4 +41,3 @@ prometheus = { workspace = true, optional = true }
[features]
metrics = [ "opentelemetry-prometheus", "prometheus" ]
-k2v = [ "garage_model/k2v" ]
diff --git a/src/api/admin/admin_token.rs b/src/api/admin/admin_token.rs
deleted file mode 100644
index 0f9c66d2..00000000
--- a/src/api/admin/admin_token.rs
+++ /dev/null
@@ -1,292 +0,0 @@
-use std::sync::Arc;
-
-use chrono::{DateTime, Utc};
-
-use garage_table::*;
-use garage_util::time::now_msec;
-
-use garage_model::admin_token_table::*;
-use garage_model::garage::Garage;
-
-use crate::api::*;
-use crate::error::*;
-use crate::{Admin, RequestHandler};
-
-impl RequestHandler for ListAdminTokensRequest {
- type Response = ListAdminTokensResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let now = now_msec();
-
- let mut res = garage
- .admin_token_table
- .get_range(
- &EmptyKey,
- None,
- Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
- 10000,
- EnumerationOrder::Forward,
- )
- .await?
- .iter()
- .map(|t| admin_token_info_results(t, now))
- .collect::>();
-
- if garage.config.admin.metrics_token.is_some() {
- res.insert(
- 0,
- GetAdminTokenInfoResponse {
- id: None,
- created: None,
- name: "metrics_token (from daemon configuration)".into(),
- expiration: None,
- expired: false,
- scope: vec!["Metrics".into()],
- },
- );
- }
-
- if garage.config.admin.admin_token.is_some() {
- res.insert(
- 0,
- GetAdminTokenInfoResponse {
- id: None,
- created: None,
- name: "admin_token (from daemon configuration)".into(),
- expiration: None,
- expired: false,
- scope: vec!["*".into()],
- },
- );
- }
-
- Ok(ListAdminTokensResponse(res))
- }
-}
-
-impl RequestHandler for GetAdminTokenInfoRequest {
- type Response = GetAdminTokenInfoResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let token = match (self.id, self.search) {
- (Some(id), None) => get_existing_admin_token(garage, &id).await?,
- (None, Some(search)) => {
- let candidates = garage
- .admin_token_table
- .get_range(
- &EmptyKey,
- None,
- Some(KeyFilter::MatchesAndNotDeleted(search.to_string())),
- 10,
- EnumerationOrder::Forward,
- )
- .await?
- .into_iter()
- .collect::>();
- if candidates.len() != 1 {
- return Err(Error::bad_request(format!(
- "{} matching admin tokens",
- candidates.len()
- )));
- }
- candidates.into_iter().next().unwrap()
- }
- _ => {
- return Err(Error::bad_request(
- "Either id or search must be provided (but not both)",
- ));
- }
- };
-
- Ok(admin_token_info_results(&token, now_msec()))
- }
-}
-
-impl RequestHandler for CreateAdminTokenRequest {
- type Response = CreateAdminTokenResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let (mut token, secret) = if self.0.name.is_some() {
- AdminApiToken::new("")
- } else {
- AdminApiToken::new(&format!("token_{}", Utc::now().format("%Y%m%d_%H%M")))
- };
-
- apply_token_updates(&mut token, self.0)?;
-
- garage.admin_token_table.insert(&token).await?;
-
- Ok(CreateAdminTokenResponse {
- secret_token: secret,
- info: admin_token_info_results(&token, now_msec()),
- })
- }
-}
-
-impl RequestHandler for UpdateAdminTokenRequest {
- type Response = UpdateAdminTokenResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let mut token = get_existing_admin_token(&garage, &self.id).await?;
-
- apply_token_updates(&mut token, self.body)?;
-
- garage.admin_token_table.insert(&token).await?;
-
- Ok(UpdateAdminTokenResponse(admin_token_info_results(
- &token,
- now_msec(),
- )))
- }
-}
-
-impl RequestHandler for DeleteAdminTokenRequest {
- type Response = DeleteAdminTokenResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let token = get_existing_admin_token(&garage, &self.id).await?;
-
- garage
- .admin_token_table
- .insert(&AdminApiToken::delete(token.prefix))
- .await?;
-
- Ok(DeleteAdminTokenResponse)
- }
-}
-
-impl RequestHandler for GetCurrentAdminTokenInfoRequest {
- type Response = GetCurrentAdminTokenInfoResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let now = now_msec();
-
- if garage
- .config
- .admin
- .metrics_token
- .as_ref()
- .is_some_and(|s| s == &self.admin_token)
- {
- return Ok(GetCurrentAdminTokenInfoResponse(
- GetAdminTokenInfoResponse {
- id: None,
- created: None,
- name: "metrics_token (from daemon configuration)".into(),
- expiration: None,
- expired: false,
- scope: vec!["Metrics".into()],
- },
- ));
- }
-
- if garage
- .config
- .admin
- .admin_token
- .as_ref()
- .is_some_and(|s| s == &self.admin_token)
- {
- return Ok(GetCurrentAdminTokenInfoResponse(
- GetAdminTokenInfoResponse {
- id: None,
- created: None,
- name: "admin_token (from daemon configuration)".into(),
- expiration: None,
- expired: false,
- scope: vec!["*".into()],
- },
- ));
- }
-
- let (prefix, _) = self.admin_token.split_once('.').unwrap();
- let token = get_existing_admin_token(&garage, &prefix.to_string()).await?;
-
- Ok(GetCurrentAdminTokenInfoResponse(admin_token_info_results(
- &token, now,
- )))
- }
-}
-
-// ---- helpers ----
-
-fn admin_token_info_results(token: &AdminApiToken, now: u64) -> GetAdminTokenInfoResponse {
- let params = token.params().unwrap();
-
- GetAdminTokenInfoResponse {
- id: Some(token.prefix.clone()),
- created: Some(
- DateTime::from_timestamp_millis(params.created as i64)
- .expect("invalid timestamp stored in db"),
- ),
- name: params.name.get().to_string(),
- expiration: params.expiration.get().map(|x| {
- DateTime::from_timestamp_millis(x as i64).expect("invalid timestamp stored in db")
- }),
- expired: params.is_expired(now),
- scope: params.scope.get().0.clone(),
- }
-}
-
-async fn get_existing_admin_token(garage: &Garage, id: &String) -> Result {
- garage
- .admin_token_table
- .get(&EmptyKey, id)
- .await?
- .filter(|k| !k.state.is_deleted())
- .ok_or_else(|| Error::NoSuchAdminToken(id.to_string()))
-}
-
-fn apply_token_updates(
- token: &mut AdminApiToken,
- updates: UpdateAdminTokenRequestBody,
-) -> Result<(), Error> {
- if updates.never_expires && updates.expiration.is_some() {
- return Err(Error::bad_request(
- "cannot specify `expiration` and `never_expires`",
- ));
- }
-
- let params = token.params_mut().unwrap();
-
- if let Some(name) = updates.name {
- params.name.update(name);
- }
- if let Some(expiration) = updates.expiration {
- params
- .expiration
- .update(Some(expiration.timestamp_millis() as u64));
- }
- if updates.never_expires {
- params.expiration.update(None);
- }
- if let Some(scope) = updates.scope {
- params.scope.update(AdminApiTokenScope(scope));
- }
-
- Ok(())
-}
diff --git a/src/api/admin/api.rs b/src/api/admin/api.rs
deleted file mode 100644
index da25b6a2..00000000
--- a/src/api/admin/api.rs
+++ /dev/null
@@ -1,1352 +0,0 @@
-use std::collections::HashMap;
-use std::convert::TryFrom;
-use std::net::SocketAddr;
-use std::sync::Arc;
-
-use chrono::{DateTime, Utc};
-use paste::paste;
-use serde::{Deserialize, Serialize};
-use utoipa::{IntoParams, ToSchema};
-
-use garage_rpc::*;
-
-use garage_model::garage::Garage;
-
-use garage_api_common::{common_error::CommonError, helpers::is_default};
-
-use crate::api_server::{find_matching_nodes, AdminRpc, AdminRpcResponse};
-use crate::error::Error;
-use crate::macros::*;
-use crate::{Admin, RequestHandler};
-
-// This generates the following:
-//
-// - An enum AdminApiRequest that contains a variant for all endpoints
-//
-// - An enum AdminApiResponse that contains a variant for all non-special endpoints.
-// This enum is serialized in api_server.rs, without the enum tag,
-// which gives directly the JSON response corresponding to the API call.
-// This enum does not implement Deserialize as its meaning can be ambiguous.
-//
-// - An enum TaggedAdminApiResponse that contains the same variants, but
-// serializes as a tagged enum. This allows it to be transmitted through
-// Garage RPC and deserialized correctly upon receival.
-// Conversion from untagged to tagged can be done using the `.tagged()` method.
-//
-// - AdminApiRequest::name() that returns the name of the endpoint
-//
-// - impl EndpointHandler for AdminApiHandler, that uses the impl EndpointHandler
-// of each request type below for non-special endpoints
-admin_endpoints![
- // Special endpoints of the Admin API
- @special Options,
- @special CheckDomain,
- @special Health,
- @special Metrics,
-
- // Cluster operations
- GetClusterStatus,
- GetClusterHealth,
- GetClusterStatistics,
- ConnectClusterNodes,
-
- // Admin tokens operations
- ListAdminTokens,
- GetAdminTokenInfo,
- CreateAdminToken,
- UpdateAdminToken,
- DeleteAdminToken,
- GetCurrentAdminTokenInfo,
-
- // Layout operations
- GetClusterLayout,
- GetClusterLayoutHistory,
- UpdateClusterLayout,
- PreviewClusterLayoutChanges,
- ApplyClusterLayout,
- RevertClusterLayout,
- ClusterLayoutSkipDeadNodes,
-
- // Access key operations
- ListKeys,
- GetKeyInfo,
- CreateKey,
- ImportKey,
- UpdateKey,
- DeleteKey,
-
- // Bucket operations
- ListBuckets,
- GetBucketInfo,
- CreateBucket,
- UpdateBucket,
- DeleteBucket,
- CleanupIncompleteUploads,
- InspectObject,
-
- // Operations on permissions for keys on buckets
- AllowBucketKey,
- DenyBucketKey,
-
- // Operations on bucket aliases
- AddBucketAlias,
- RemoveBucketAlias,
-
- // Node operations
- GetNodeInfo,
- GetNodeStatistics,
- CreateMetadataSnapshot,
- LaunchRepairOperation,
-
- // Worker operations
- ListWorkers,
- GetWorkerInfo,
- GetWorkerVariable,
- SetWorkerVariable,
-
- // Block operations
- ListBlockErrors,
- GetBlockInfo,
- RetryBlockResync,
- PurgeBlocks,
-];
-
-local_admin_endpoints![
- // Node operations
- GetNodeInfo,
- GetNodeStatistics,
- CreateMetadataSnapshot,
- LaunchRepairOperation,
- // Background workers
- ListWorkers,
- GetWorkerInfo,
- GetWorkerVariable,
- SetWorkerVariable,
- // Block operations
- ListBlockErrors,
- GetBlockInfo,
- RetryBlockResync,
- PurgeBlocks,
-];
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct MultiRequest {
- pub node: String,
- pub body: RB,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct MultiResponse {
- /// Map of node id to response returned by this node, for nodes that were able to
- /// successfully complete the API call
- pub success: HashMap,
- /// Map of node id to error message, for nodes that were unable to complete the API
- /// call
- pub error: HashMap,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)]
-#[into_params(parameter_in = Query)]
-pub struct MultiRequestQueryParams {
- /// Node ID to query, or `*` for all nodes, or `self` for the node responding to the request
- pub node: String,
-}
-
-// **********************************************
-// Special endpoints
-//
-// These endpoints don't have associated *Response structs
-// because they directly produce an http::Response
-// **********************************************
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct OptionsRequest;
-
-#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)]
-#[into_params(parameter_in = Query)]
-pub struct CheckDomainRequest {
- /// The domain name to check for
- pub domain: String,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct HealthRequest;
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct MetricsRequest;
-
-// **********************************************
-// Cluster operations
-// **********************************************
-
-// ---- GetClusterStatus ----
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct GetClusterStatusRequest;
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct GetClusterStatusResponse {
- /// Current version number of the cluster layout
- pub layout_version: u64,
- /// List of nodes that are either currently connected, part of the
- /// current cluster layout, or part of an older cluster layout that
- /// is still active in the cluster (being drained).
- pub nodes: Vec,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, Default, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct NodeResp {
- /// Full-length node identifier
- pub id: String,
- /// Garage version
- pub garage_version: Option,
- /// Socket address used by other nodes to connect to this node for RPC
- #[schema(value_type = Option)]
- pub addr: Option,
- /// Hostname of the node
- pub hostname: Option,
- /// Whether this node is connected in the cluster
- pub is_up: bool,
- /// For disconnected nodes, the number of seconds since last contact,
- /// or `null` if no contact was established since Garage restarted.
- pub last_seen_secs_ago: Option,
- /// Role assigned to this node in the current cluster layout
- pub role: Option,
- /// Whether this node is part of an older layout version and is draining data.
- pub draining: bool,
- /// Total and available space on the disk partition(s) containing the data
- /// directory(ies)
- #[serde(default, skip_serializing_if = "Option::is_none")]
- pub data_partition: Option,
- /// Total and available space on the disk partition containing the
- /// metadata directory
- #[serde(default, skip_serializing_if = "Option::is_none")]
- pub metadata_partition: Option,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct NodeAssignedRole {
- /// Zone name assigned by the cluster administrator
- pub zone: String,
- /// List of tags assigned by the cluster administrator
- pub tags: Vec,
- /// Capacity (in bytes) assigned by the cluster administrator,
- /// absent for gateway nodes
- pub capacity: Option,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct FreeSpaceResp {
- /// Number of bytes available
- pub available: u64,
- /// Total number of bytes
- pub total: u64,
-}
-
-// ---- GetClusterHealth ----
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct GetClusterHealthRequest;
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct GetClusterHealthResponse {
- /// One of `healthy`, `degraded` or `unavailable`:
- /// - `healthy`: Garage node is connected to all storage nodes
- /// - `degraded`: Garage node is not connected to all storage nodes, but a quorum of write nodes is available for all partitions
- /// - `unavailable`: a quorum of write nodes is not available for some partitions
- pub status: String,
- /// the number of nodes this Garage node has had a TCP connection to since the daemon started
- pub known_nodes: usize,
- /// the nubmer of nodes this Garage node currently has an open connection to
- pub connected_nodes: usize,
- /// the number of storage nodes currently registered in the cluster layout
- pub storage_nodes: usize,
- /// the number of storage nodes to which a connection is currently open
- pub storage_nodes_up: usize,
- /// the total number of partitions of the data (currently always 256)
- pub partitions: usize,
- /// the number of partitions for which a quorum of write nodes is available
- pub partitions_quorum: usize,
- /// the number of partitions for which we are connected to all storage nodes responsible of storing it
- pub partitions_all_ok: usize,
-}
-
-// ---- GetClusterStatistics ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, Default)]
-pub struct GetClusterStatisticsRequest;
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct GetClusterStatisticsResponse {
- pub freeform: String,
-}
-
-// ---- ConnectClusterNodes ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct ConnectClusterNodesRequest(pub Vec);
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct ConnectClusterNodesResponse(pub Vec);
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct ConnectNodeResponse {
- /// `true` if Garage managed to connect to this node
- pub success: bool,
- /// An error message if Garage did not manage to connect to this node
- pub error: Option,
-}
-
-// **********************************************
-// Admin token operations
-// **********************************************
-
-// ---- ListAdminTokens ----
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct ListAdminTokensRequest;
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct ListAdminTokensResponse(pub Vec);
-
-// ---- GetAdminTokenInfo ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)]
-#[into_params(parameter_in = Query)]
-#[serde(rename_all = "camelCase")]
-pub struct GetAdminTokenInfoRequest {
- /// Admin API token ID
- pub id: Option,
- /// Partial token ID or name to search for
- pub search: Option,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct GetAdminTokenInfoResponse {
- /// Identifier of the admin token (which is also a prefix of the full bearer token)
- pub id: Option,
- /// Creation date
- pub created: Option>,
- /// Name of the admin API token
- pub name: String,
- /// Expiration time and date, formatted according to RFC 3339
- pub expiration: Option>,
- /// Whether this admin token is expired already
- pub expired: bool,
- /// Scope of the admin API token, a list of admin endpoint names (such as
- /// `GetClusterStatus`, etc), or the special value `*` to allow all
- /// admin endpoints
- pub scope: Vec,
-}
-
-// ---- CreateAdminToken ----
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct CreateAdminTokenRequest(pub UpdateAdminTokenRequestBody);
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct CreateAdminTokenResponse {
- /// The secret bearer token. **CAUTION:** This token will be shown only
- /// ONCE, so this value MUST be remembered somewhere, or the token
- /// will be unusable.
- pub secret_token: String,
- #[serde(flatten)]
- pub info: GetAdminTokenInfoResponse,
-}
-
-// ---- UpdateAdminToken ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)]
-#[into_params(parameter_in = Query)]
-pub struct UpdateAdminTokenRequest {
- /// Admin API token ID
- pub id: String,
- #[param(ignore = true)]
- pub body: UpdateAdminTokenRequestBody,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct UpdateAdminTokenRequestBody {
- /// Name of the admin API token
- pub name: Option,
- /// Expiration time and date, formatted according to RFC 3339
- pub expiration: Option>,
- /// Set the admin token to never expire
- #[serde(default)]
- pub never_expires: bool,
- /// Scope of the admin API token, a list of admin endpoint names (such as
- /// `GetClusterStatus`, etc), or the special value `*` to allow all
- /// admin endpoints. **WARNING:** Granting a scope of `CreateAdminToken` or
- /// `UpdateAdminToken` trivially allows for privilege escalation, and is thus
- /// functionnally equivalent to granting a scope of `*`.
- pub scope: Option>,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct UpdateAdminTokenResponse(pub GetAdminTokenInfoResponse);
-
-// ---- DeleteAdminToken ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)]
-#[into_params(parameter_in = Query)]
-pub struct DeleteAdminTokenRequest {
- /// Admin API token ID
- pub id: String,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct DeleteAdminTokenResponse;
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct GetCurrentAdminTokenInfoRequest {
- pub admin_token: String,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct GetCurrentAdminTokenInfoResponse(pub GetAdminTokenInfoResponse);
-
-// **********************************************
-// Layout operations
-// **********************************************
-
-// ---- GetClusterLayout ----
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct GetClusterLayoutRequest;
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct GetClusterLayoutResponse {
- /// The current version number of the cluster layout
- pub version: u64,
- /// List of nodes that currently have a role in the cluster layout
- pub roles: Vec,
- /// Layout parameters used when the current layout was computed
- pub parameters: LayoutParameters,
- /// The size, in bytes, of one Garage partition (= a shard)
- pub partition_size: u64,
- /// List of nodes that will have a new role or whose role will be
- /// removed in the next version of the cluster layout
- pub staged_role_changes: Vec,
- /// Layout parameters to use when computing the next version of
- /// the cluster layout
- pub staged_parameters: Option,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct LayoutNodeRole {
- /// Identifier of the node
- pub id: String,
- /// Zone name assigned by the cluster administrator
- pub zone: String,
- /// List of tags assigned by the cluster administrator
- pub tags: Vec,
- /// Capacity (in bytes) assigned by the cluster administrator,
- /// absent for gateway nodes
- pub capacity: Option,
- /// Number of partitions stored on this node
- /// (a result of the layout computation)
- pub stored_partitions: Option,
- /// Capacity (in bytes) that is actually usable on this node in the current
- /// layout, which is equal to `stored_partitions` × `partition_size`
- pub usable_capacity: Option,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct NodeRoleChange {
- /// ID of the node for which this change applies
- pub id: String,
- #[serde(flatten)]
- pub action: NodeRoleChangeEnum,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(untagged)]
-pub enum NodeRoleChangeEnum {
- #[serde(rename_all = "camelCase")]
- Remove {
- /// Set `remove` to `true` to remove the node from the layout
- remove: bool,
- },
- #[serde(rename_all = "camelCase")]
- Update(NodeAssignedRole),
-}
-
-#[derive(Copy, Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct LayoutParameters {
- /// Minimum number of zones in which a data partition must be replicated
- pub zone_redundancy: ZoneRedundancy,
-}
-
-#[derive(Copy, Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub enum ZoneRedundancy {
- /// Partitions must be replicated in at least this number of
- /// distinct zones.
- AtLeast(usize),
- /// Partitions must be replicated in as many zones as possible:
- /// as many zones as there are replicas, if there are enough distinct
- /// zones, or at least one in each zone otherwise.
- Maximum,
-}
-
-// ---- GetClusterLayoutHistory ----
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct GetClusterLayoutHistoryRequest;
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct GetClusterLayoutHistoryResponse {
- /// The current version number of the cluster layout
- pub current_version: u64,
- /// All nodes in the cluster are aware of layout versions up to
- /// this version number (at least)
- pub min_ack: u64,
- /// Layout version history
- pub versions: Vec,
- /// Detailed update trackers for nodes (see
- /// `https://garagehq.deuxfleurs.fr/blog/2023-12-preserving-read-after-write-consistency/`)
- pub update_trackers: Option>,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct ClusterLayoutVersion {
- /// Version number of this layout version
- pub version: u64,
- /// Status of this layout version
- pub status: ClusterLayoutVersionStatus,
- /// Number of nodes with an assigned storage capacity in this layout version
- pub storage_nodes: u64,
- /// Number of nodes with a gateway role in this layout version
- pub gateway_nodes: u64,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub enum ClusterLayoutVersionStatus {
- /// This is the most up-to-date layout version
- Current,
- /// This version is still active in the cluster because metadata
- /// is being rebalanced or migrated from old nodes
- Draining,
- /// This version is no longer active in the cluster for metadata
- /// reads and writes. Note that there is still the possibility
- /// that data blocks are being migrated away from nodes in this
- /// layout version.
- Historical,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct NodeUpdateTrackers {
- pub ack: u64,
- pub sync: u64,
- pub sync_ack: u64,
-}
-
-// ---- UpdateClusterLayout ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct UpdateClusterLayoutRequest {
- /// New node roles to assign or remove in the cluster layout
- #[serde(default)]
- pub roles: Vec,
- /// New layout computation parameters to use
- #[serde(default)]
- pub parameters: Option,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct UpdateClusterLayoutResponse(pub GetClusterLayoutResponse);
-
-// ---- PreviewClusterLayoutChanges ----
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct PreviewClusterLayoutChangesRequest;
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(untagged)]
-pub enum PreviewClusterLayoutChangesResponse {
- #[serde(rename_all = "camelCase")]
- Error {
- /// Error message indicating that the layout could not be computed
- /// with the provided configuration
- error: String,
- },
- #[serde(rename_all = "camelCase")]
- Success {
- /// Plain-text information about the layout computation
- /// (do not try to parse this)
- message: Vec,
- /// Details about the new cluster layout
- new_layout: GetClusterLayoutResponse,
- },
-}
-
-// ---- ApplyClusterLayout ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct ApplyClusterLayoutRequest {
- /// As a safety measure, the new version number of the layout must
- /// be specified here
- pub version: u64,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct ApplyClusterLayoutResponse {
- /// Plain-text information about the layout computation
- /// (do not try to parse this)
- pub message: Vec,
- /// Details about the new cluster layout
- pub layout: GetClusterLayoutResponse,
-}
-
-// ---- RevertClusterLayout ----
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct RevertClusterLayoutRequest;
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct RevertClusterLayoutResponse(pub GetClusterLayoutResponse);
-
-// ---- ClusterLayoutSkipDeadNodes ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct ClusterLayoutSkipDeadNodesRequest {
- /// Version number of the layout to assume is currently up-to-date.
- /// This will generally be the current layout version.
- pub version: u64,
- /// Allow the skip even if a quorum of nodes could not be found for
- /// the data among the remaining nodes
- pub allow_missing_data: bool,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct ClusterLayoutSkipDeadNodesResponse {
- /// Nodes for which the ACK update tracker has been updated to `version`
- pub ack_updated: Vec,
- /// If `allow_missing_data` is set,
- /// nodes for which the SYNC update tracker has been updated to `version`
- pub sync_updated: Vec,
-}
-
-// **********************************************
-// Access key operations
-// **********************************************
-
-// ---- ListKeys ----
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct ListKeysRequest;
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct ListKeysResponse(pub Vec);
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct ListKeysResponseItem {
- pub id: String,
- pub name: String,
- pub created: Option>,
- pub expiration: Option>,
- pub expired: bool,
-}
-
-// ---- GetKeyInfo ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)]
-#[into_params(parameter_in = Query)]
-#[serde(rename_all = "camelCase")]
-pub struct GetKeyInfoRequest {
- /// Access key ID
- pub id: Option,
- /// Partial key ID or name to search for
- pub search: Option,
- /// Whether to return the secret access key
- #[serde(default)]
- pub show_secret_key: bool,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct GetKeyInfoResponse {
- pub access_key_id: String,
- pub created: Option>,
- pub name: String,
- pub expiration: Option>,
- pub expired: bool,
- #[serde(default, skip_serializing_if = "is_default")]
- pub secret_access_key: Option,
- pub permissions: KeyPerm,
- pub buckets: Vec,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct KeyPerm {
- #[serde(default)]
- pub create_bucket: bool,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct KeyInfoBucketResponse {
- pub id: String,
- pub global_aliases: Vec,
- pub local_aliases: Vec,
- pub permissions: ApiBucketKeyPerm,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, Default, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct ApiBucketKeyPerm {
- #[serde(default)]
- pub read: bool,
- #[serde(default)]
- pub write: bool,
- #[serde(default)]
- pub owner: bool,
-}
-
-// ---- CreateKey ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct CreateKeyRequest(pub UpdateKeyRequestBody);
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct CreateKeyResponse(pub GetKeyInfoResponse);
-
-// ---- ImportKey ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct ImportKeyRequest {
- pub access_key_id: String,
- pub secret_access_key: String,
- pub name: Option,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct ImportKeyResponse(pub GetKeyInfoResponse);
-
-// ---- UpdateKey ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)]
-#[into_params(parameter_in = Query)]
-pub struct UpdateKeyRequest {
- /// Access key ID
- pub id: String,
- #[param(ignore = true)]
- pub body: UpdateKeyRequestBody,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct UpdateKeyResponse(pub GetKeyInfoResponse);
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct UpdateKeyRequestBody {
- /// Name of the API key
- pub name: Option,
- /// Expiration time and date, formatted according to RFC 3339
- pub expiration: Option>,
- /// Set the access key to never expire
- #[serde(default)]
- pub never_expires: bool,
- /// Permissions to allow for the key
- pub allow: Option,
- /// Permissions to deny for the key
- pub deny: Option,
-}
-
-// ---- DeleteKey ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)]
-#[into_params(parameter_in = Query)]
-pub struct DeleteKeyRequest {
- /// Access key ID
- pub id: String,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct DeleteKeyResponse;
-
-// **********************************************
-// Bucket operations
-// **********************************************
-
-// ---- ListBuckets ----
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct ListBucketsRequest;
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct ListBucketsResponse(pub Vec);
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct ListBucketsResponseItem {
- pub id: String,
- pub created: DateTime,
- pub global_aliases: Vec,
- pub local_aliases: Vec,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct BucketLocalAlias {
- pub access_key_id: String,
- pub alias: String,
-}
-
-// ---- GetBucketInfo ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)]
-#[into_params(parameter_in = Query)]
-#[serde(rename_all = "camelCase")]
-pub struct GetBucketInfoRequest {
- /// Exact bucket ID to look up
- pub id: Option,
- /// Global alias of bucket to look up
- pub global_alias: Option,
- /// Partial ID or alias to search for
- pub search: Option,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct GetBucketInfoResponse {
- /// Identifier of the bucket
- pub id: String,
- /// Bucket creation date
- pub created: DateTime,
- /// List of global aliases for this bucket
- pub global_aliases: Vec,
- /// Whether website acces is enabled for this bucket
- pub website_access: bool,
- #[serde(default)]
- /// Website configuration for this bucket
- pub website_config: Option,
- /// List of access keys that have permissions granted on this bucket
- pub keys: Vec,
- /// Number of objects in this bucket
- pub objects: i64,
- /// Total number of bytes used by objects in this bucket
- pub bytes: i64,
- /// Number of unfinished uploads in this bucket
- pub unfinished_uploads: i64,
- /// Number of unfinished multipart uploads in this bucket
- pub unfinished_multipart_uploads: i64,
- /// Number of parts in unfinished multipart uploads in this bucket
- pub unfinished_multipart_upload_parts: i64,
- /// Total number of bytes used by unfinished multipart uploads in this bucket
- pub unfinished_multipart_upload_bytes: i64,
- /// Quotas that apply to this bucket
- pub quotas: ApiBucketQuotas,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct GetBucketInfoWebsiteResponse {
- pub index_document: String,
- pub error_document: Option,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct GetBucketInfoKey {
- pub access_key_id: String,
- pub name: String,
- pub permissions: ApiBucketKeyPerm,
- pub bucket_local_aliases: Vec,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct ApiBucketQuotas {
- pub max_size: Option,
- pub max_objects: Option,
-}
-
-// ---- CreateBucket ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct CreateBucketRequest {
- pub global_alias: Option,
- pub local_alias: Option,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct CreateBucketResponse(pub GetBucketInfoResponse);
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct CreateBucketLocalAlias {
- pub access_key_id: String,
- pub alias: String,
- #[serde(default)]
- pub allow: ApiBucketKeyPerm,
-}
-
-// ---- UpdateBucket ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)]
-#[into_params(parameter_in = Query)]
-pub struct UpdateBucketRequest {
- /// ID of the bucket to update
- pub id: String,
- #[param(ignore = true)]
- pub body: UpdateBucketRequestBody,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct UpdateBucketResponse(pub GetBucketInfoResponse);
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct UpdateBucketRequestBody {
- pub website_access: Option,
- pub quotas: Option,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct UpdateBucketWebsiteAccess {
- pub enabled: bool,
- pub index_document: Option,
- pub error_document: Option,
-}
-
-// ---- DeleteBucket ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)]
-#[into_params(parameter_in = Query)]
-pub struct DeleteBucketRequest {
- /// ID of the bucket to delete
- pub id: String,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct DeleteBucketResponse;
-
-// ---- CleanupIncompleteUploads ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct CleanupIncompleteUploadsRequest {
- pub bucket_id: String,
- pub older_than_secs: u64,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct CleanupIncompleteUploadsResponse {
- pub uploads_deleted: u64,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)]
-#[into_params(parameter_in = Query)]
-#[serde(rename_all = "camelCase")]
-pub struct InspectObjectRequest {
- pub bucket_id: String,
- pub key: String,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct InspectObjectResponse {
- /// ID of the bucket containing the inspected object
- pub bucket_id: String,
- /// Key of the inspected object
- pub key: String,
- /// List of versions currently stored for this object
- pub versions: Vec,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, Default)]
-#[serde(rename_all = "camelCase")]
-pub struct InspectObjectVersion {
- /// Version ID
- pub uuid: String,
- /// Creation timestamp of this object version
- pub timestamp: DateTime,
- /// Whether this object version was created with SSE-C encryption
- pub encrypted: bool,
- /// Whether this object version is still uploading
- pub uploading: bool,
- /// Whether this is an aborted upload
- pub aborted: bool,
- /// Whether this version is a delete marker (a tombstone indicating that a previous version of
- /// the object has been deleted)
- pub delete_marker: bool,
- /// Whether the object's data is stored inline (for small objects)
- pub inline: bool,
- /// Size of the object, in bytes
- pub size: Option,
- /// Etag of this object version
- pub etag: Option,
- /// Metadata (HTTP headers) associated with this object version
- #[serde(default, skip_serializing_if = "Vec::is_empty")]
- pub headers: Vec<(String, String)>,
- /// List of data blocks for this object version
- #[serde(default, skip_serializing_if = "Vec::is_empty")]
- pub blocks: Vec,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct InspectObjectBlock {
- /// Part number of the part containing this block, for multipart uploads
- pub part_number: u64,
- /// Offset of this block within the part
- pub offset: u64,
- /// Hash (blake2 sum) of the block's data
- pub hash: String,
- /// Length of the blocks's data
- pub size: u64,
-}
-
-// **********************************************
-// Operations on permissions for keys on buckets
-// **********************************************
-
-// ---- AllowBucketKey ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct AllowBucketKeyRequest(pub BucketKeyPermChangeRequest);
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct AllowBucketKeyResponse(pub GetBucketInfoResponse);
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct BucketKeyPermChangeRequest {
- pub bucket_id: String,
- pub access_key_id: String,
- pub permissions: ApiBucketKeyPerm,
-}
-
-// ---- DenyBucketKey ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct DenyBucketKeyRequest(pub BucketKeyPermChangeRequest);
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct DenyBucketKeyResponse(pub GetBucketInfoResponse);
-
-// **********************************************
-// Operations on bucket aliases
-// **********************************************
-
-// ---- AddBucketAlias ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct AddBucketAliasRequest {
- pub bucket_id: String,
- #[serde(flatten)]
- pub alias: BucketAliasEnum,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct AddBucketAliasResponse(pub GetBucketInfoResponse);
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(untagged)]
-pub enum BucketAliasEnum {
- #[serde(rename_all = "camelCase")]
- Global { global_alias: String },
- #[serde(rename_all = "camelCase")]
- Local {
- local_alias: String,
- access_key_id: String,
- },
-}
-
-// ---- RemoveBucketAlias ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct RemoveBucketAliasRequest {
- pub bucket_id: String,
- #[serde(flatten)]
- pub alias: BucketAliasEnum,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct RemoveBucketAliasResponse(pub GetBucketInfoResponse);
-
-// **********************************************
-// Node operations
-// **********************************************
-
-// ---- GetNodeInfo ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, Default)]
-pub struct LocalGetNodeInfoRequest;
-
-#[derive(Debug, Clone, Serialize, Deserialize, Default, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct LocalGetNodeInfoResponse {
- pub node_id: String,
- pub garage_version: String,
- pub garage_features: Option>,
- pub rust_version: String,
- pub db_engine: String,
-}
-
-// ---- GetNodeStatistics ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, Default)]
-pub struct LocalGetNodeStatisticsRequest;
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct LocalGetNodeStatisticsResponse {
- pub freeform: String,
-}
-
-// ---- CreateMetadataSnapshot ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, Default)]
-pub struct LocalCreateMetadataSnapshotRequest;
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct LocalCreateMetadataSnapshotResponse;
-
-// ---- LaunchRepairOperation ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct LocalLaunchRepairOperationRequest {
- pub repair_type: RepairType,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub enum RepairType {
- Tables,
- Blocks,
- Versions,
- MultipartUploads,
- BlockRefs,
- BlockRc,
- Rebalance,
- Scrub(ScrubCommand),
- Aliases,
- ClearResyncQueue,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub enum ScrubCommand {
- Start,
- Pause,
- Resume,
- Cancel,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct LocalLaunchRepairOperationResponse;
-
-// **********************************************
-// Worker operations
-// **********************************************
-
-// ---- ListWorkers ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, Default, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct LocalListWorkersRequest {
- #[serde(default)]
- pub busy_only: bool,
- #[serde(default)]
- pub error_only: bool,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct LocalListWorkersResponse(pub Vec);
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct WorkerInfoResp {
- pub id: u64,
- pub name: String,
- pub state: WorkerStateResp,
- pub errors: u64,
- pub consecutive_errors: u64,
- pub last_error: Option,
- pub tranquility: Option,
- pub progress: Option,
- pub queue_length: Option,
- pub persistent_errors: Option,
- pub freeform: Vec,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub enum WorkerStateResp {
- Busy,
- #[serde(rename_all = "camelCase")]
- Throttled {
- duration_secs: f32,
- },
- Idle,
- Done,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct WorkerLastError {
- pub message: String,
- pub secs_ago: u64,
-}
-
-// ---- GetWorkerInfo ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct LocalGetWorkerInfoRequest {
- pub id: u64,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct LocalGetWorkerInfoResponse(pub WorkerInfoResp);
-
-// ---- GetWorkerVariable ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct LocalGetWorkerVariableRequest {
- pub variable: Option,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct LocalGetWorkerVariableResponse(pub HashMap);
-
-// ---- SetWorkerVariable ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct LocalSetWorkerVariableRequest {
- pub variable: String,
- pub value: String,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct LocalSetWorkerVariableResponse {
- pub variable: String,
- pub value: String,
-}
-
-// **********************************************
-// Block operations
-// **********************************************
-
-// ---- ListBlockErrors ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, Default)]
-pub struct LocalListBlockErrorsRequest;
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-pub struct LocalListBlockErrorsResponse(pub Vec);
-
-#[derive(Serialize, Deserialize, Clone, Debug, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct BlockError {
- pub block_hash: String,
- pub refcount: u64,
- pub error_count: u64,
- pub last_try_secs_ago: u64,
- pub next_try_in_secs: u64,
-}
-
-// ---- GetBlockInfo ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct LocalGetBlockInfoRequest {
- pub block_hash: String,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct LocalGetBlockInfoResponse {
- pub block_hash: String,
- pub refcount: u64,
- pub versions: Vec,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct BlockVersion {
- pub version_id: String,
- pub ref_deleted: bool,
- pub version_deleted: bool,
- pub garbage_collected: bool,
- pub backlink: Option,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub enum BlockVersionBacklink {
- #[serde(rename_all = "camelCase")]
- Object { bucket_id: String, key: String },
- #[serde(rename_all = "camelCase")]
- Upload {
- upload_id: String,
- upload_deleted: bool,
- upload_garbage_collected: bool,
- bucket_id: Option,
- key: Option,
- },
-}
-
-// ---- RetryBlockResync ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(untagged)]
-pub enum LocalRetryBlockResyncRequest {
- #[serde(rename_all = "camelCase")]
- All { all: bool },
- #[serde(rename_all = "camelCase")]
- Blocks { block_hashes: Vec },
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct LocalRetryBlockResyncResponse {
- pub count: u64,
-}
-
-// ---- PurgeBlocks ----
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct LocalPurgeBlocksRequest(pub Vec);
-
-#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct LocalPurgeBlocksResponse {
- pub blocks_purged: u64,
- pub objects_deleted: u64,
- pub uploads_deleted: u64,
- pub versions_deleted: u64,
- pub block_refs_purged: u64,
-}
diff --git a/src/api/admin/api_server.rs b/src/api/admin/api_server.rs
index 19a88024..6f0c474f 100644
--- a/src/api/admin/api_server.rs
+++ b/src/api/admin/api_server.rs
@@ -1,237 +1,333 @@
-use std::borrow::Cow;
+use std::collections::HashMap;
use std::sync::Arc;
-use http::header::{HeaderValue, ACCESS_CONTROL_ALLOW_ORIGIN, AUTHORIZATION};
-use hyper::{body::Incoming as IncomingBody, Request, Response};
-use serde::{Deserialize, Serialize};
+use argon2::password_hash::PasswordHash;
+
+use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
+use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
use tokio::sync::watch;
use opentelemetry::trace::SpanRef;
#[cfg(feature = "metrics")]
use opentelemetry_prometheus::PrometheusExporter;
+#[cfg(feature = "metrics")]
+use prometheus::{Encoder, TextEncoder};
use garage_model::garage::Garage;
-use garage_rpc::{Endpoint as RpcEndpoint, *};
-use garage_table::EmptyKey;
-use garage_util::background::BackgroundRunner;
-use garage_util::data::Uuid;
+use garage_rpc::system::ClusterHealthStatus;
use garage_util::error::Error as GarageError;
use garage_util::socket_address::UnixOrTCPSocketAddress;
-use garage_util::time::now_msec;
use garage_api_common::generic_server::*;
use garage_api_common::helpers::*;
-use crate::api::*;
+use crate::bucket::*;
+use crate::cluster::*;
use crate::error::*;
+use crate::key::*;
use crate::router_v0;
-use crate::router_v1;
-use crate::Authorization;
-use crate::RequestHandler;
-
-// ---- FOR RPC ----
-
-pub const ADMIN_RPC_PATH: &str = "garage_api/admin/rpc.rs/Rpc";
-
-#[derive(Debug, Serialize, Deserialize)]
-pub enum AdminRpc {
- Proxy(AdminApiRequest),
- Internal(LocalAdminApiRequest),
-}
-
-#[derive(Debug, Serialize, Deserialize)]
-pub enum AdminRpcResponse {
- ProxyApiOkResponse(TaggedAdminApiResponse),
- InternalApiOkResponse(LocalAdminApiResponse),
- ApiErrorResponse {
- http_code: u16,
- error_code: String,
- message: String,
- },
-}
-
-impl Rpc for AdminRpc {
- type Response = Result;
-}
-
-impl EndpointHandler for AdminApiServer {
- async fn handle(
- self: &Arc,
- message: &AdminRpc,
- _from: NodeID,
- ) -> Result {
- match message {
- AdminRpc::Proxy(req) => {
- info!("Proxied admin API request: {}", req.name());
- let res = req.clone().handle(&self.garage, &self).await;
- match res {
- Ok(res) => Ok(AdminRpcResponse::ProxyApiOkResponse(res.tagged())),
- Err(e) => Ok(AdminRpcResponse::ApiErrorResponse {
- http_code: e.http_status_code().as_u16(),
- error_code: e.code().to_string(),
- message: e.to_string(),
- }),
- }
- }
- AdminRpc::Internal(req) => {
- info!("Internal admin API request: {}", req.name());
- let res = req.clone().handle(&self.garage, &self).await;
- match res {
- Ok(res) => Ok(AdminRpcResponse::InternalApiOkResponse(res)),
- Err(e) => Ok(AdminRpcResponse::ApiErrorResponse {
- http_code: e.http_status_code().as_u16(),
- error_code: e.code().to_string(),
- message: e.to_string(),
- }),
- }
- }
- }
- }
-}
-
-// ---- FOR HTTP ----
+use crate::router_v1::{Authorization, Endpoint};
pub type ResBody = BoxBody;
pub struct AdminApiServer {
garage: Arc,
#[cfg(feature = "metrics")]
- pub(crate) exporter: PrometheusExporter,
+ exporter: PrometheusExporter,
metrics_token: Option,
- metrics_require_token: bool,
admin_token: Option,
- pub(crate) background: Arc,
- pub(crate) endpoint: Arc>,
-}
-
-pub enum HttpEndpoint {
- Old(router_v1::Endpoint),
- New(String),
}
impl AdminApiServer {
pub fn new(
garage: Arc,
- background: Arc,
#[cfg(feature = "metrics")] exporter: PrometheusExporter,
- ) -> Arc {
+ ) -> Self {
let cfg = &garage.config.admin;
let metrics_token = cfg.metrics_token.as_deref().map(hash_bearer_token);
let admin_token = cfg.admin_token.as_deref().map(hash_bearer_token);
- let metrics_require_token = cfg.metrics_require_token;
-
- let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into());
- let admin = Arc::new(Self {
+ Self {
garage,
#[cfg(feature = "metrics")]
exporter,
metrics_token,
- metrics_require_token,
admin_token,
- background,
- endpoint,
- });
- admin.endpoint.set_handler(admin.clone());
- admin
+ }
}
pub async fn run(
- self: Arc,
+ self,
bind_addr: UnixOrTCPSocketAddress,
must_exit: watch::Receiver,
) -> Result<(), GarageError> {
let region = self.garage.config.s3_api.s3_region.clone();
- ApiServer::new(region, ArcAdminApiServer(self))
+ ApiServer::new(region, self)
.run_server(bind_addr, Some(0o220), must_exit)
.await
}
- async fn handle_http_api(
+ fn handle_options(&self, _req: &Request) -> Result, Error> {
+ Ok(Response::builder()
+ .status(StatusCode::NO_CONTENT)
+ .header(ALLOW, "OPTIONS, GET, POST")
+ .header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS, GET, POST")
+ .header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
+ .body(empty_body())?)
+ }
+
+ async fn handle_check_domain(
&self,
req: Request,
- endpoint: HttpEndpoint,
) -> Result, Error> {
- let auth_header = req.headers().get(AUTHORIZATION).cloned();
+ let query_params: HashMap = req
+ .uri()
+ .query()
+ .map(|v| {
+ url::form_urlencoded::parse(v.as_bytes())
+ .into_owned()
+ .collect()
+ })
+ .unwrap_or_else(HashMap::new);
- let request = match endpoint {
- HttpEndpoint::Old(endpoint_v1) => AdminApiRequest::from_v1(endpoint_v1, req).await?,
- HttpEndpoint::New(_) => AdminApiRequest::from_request(req).await?,
+ let has_domain_key = query_params.contains_key("domain");
+
+ if !has_domain_key {
+ return Err(Error::bad_request("No domain query string found"));
+ }
+
+ let domain = query_params
+ .get("domain")
+ .ok_or_internal_error("Could not parse domain query string")?;
+
+ if self.check_domain(domain).await? {
+ Ok(Response::builder()
+ .status(StatusCode::OK)
+ .body(string_body(format!(
+ "Domain '{domain}' is managed by Garage"
+ )))?)
+ } else {
+ Err(Error::bad_request(format!(
+ "Domain '{domain}' is not managed by Garage"
+ )))
+ }
+ }
+
+ async fn check_domain(&self, domain: &str) -> Result {
+ // Resolve bucket from domain name, inferring if the website must be activated for the
+ // domain to be valid.
+ let (bucket_name, must_check_website) = if let Some(bname) = self
+ .garage
+ .config
+ .s3_api
+ .root_domain
+ .as_ref()
+ .and_then(|rd| host_to_bucket(domain, rd))
+ {
+ (bname.to_string(), false)
+ } else if let Some(bname) = self
+ .garage
+ .config
+ .s3_web
+ .as_ref()
+ .and_then(|sw| host_to_bucket(domain, sw.root_domain.as_str()))
+ {
+ (bname.to_string(), true)
+ } else {
+ (domain.to_string(), true)
};
- let (global_token_hash, token_required) = match request.authorization_type() {
- Authorization::None => (None, false),
- Authorization::MetricsToken => (
- self.metrics_token.as_deref(),
- self.metrics_token.is_some() || self.metrics_require_token,
+ let bucket_id = match self
+ .garage
+ .bucket_helper()
+ .resolve_global_bucket_name(&bucket_name)
+ .await?
+ {
+ Some(bucket_id) => bucket_id,
+ None => return Ok(false),
+ };
+
+ if !must_check_website {
+ return Ok(true);
+ }
+
+ let bucket = self
+ .garage
+ .bucket_helper()
+ .get_existing_bucket(bucket_id)
+ .await?;
+
+ let bucket_state = bucket.state.as_option().unwrap();
+ let bucket_website_config = bucket_state.website_config.get();
+
+ match bucket_website_config {
+ Some(_v) => Ok(true),
+ None => Ok(false),
+ }
+ }
+
+ fn handle_health(&self) -> Result, Error> {
+ let health = self.garage.system.health();
+
+ let (status, status_str) = match health.status {
+ ClusterHealthStatus::Healthy => (StatusCode::OK, "Garage is fully operational"),
+ ClusterHealthStatus::Degraded => (
+ StatusCode::OK,
+ "Garage is operational but some storage nodes are unavailable",
+ ),
+ ClusterHealthStatus::Unavailable => (
+ StatusCode::SERVICE_UNAVAILABLE,
+ "Quorum is not available for some/all partitions, reads and writes will fail",
),
- Authorization::AdminToken => (self.admin_token.as_deref(), true),
};
+ let status_str = format!(
+ "{}\nConsult the full health check API endpoint at /v1/health for more details\n",
+ status_str
+ );
- if token_required {
- verify_authorization(&self.garage, global_token_hash, auth_header, request.name())?;
- }
+ Ok(Response::builder()
+ .status(status)
+ .header(http::header::CONTENT_TYPE, "text/plain")
+ .body(string_body(status_str))?)
+ }
- match request {
- AdminApiRequest::Options(req) => req.handle(&self.garage, &self).await,
- AdminApiRequest::CheckDomain(req) => req.handle(&self.garage, &self).await,
- AdminApiRequest::Health(req) => req.handle(&self.garage, &self).await,
- AdminApiRequest::Metrics(req) => req.handle(&self.garage, &self).await,
- req => {
- let res = req.handle(&self.garage, &self).await?;
- let mut res = json_ok_response(&res)?;
- res.headers_mut()
- .insert(ACCESS_CONTROL_ALLOW_ORIGIN, HeaderValue::from_static("*"));
- Ok(res)
- }
+ fn handle_metrics(&self) -> Result, Error> {
+ #[cfg(feature = "metrics")]
+ {
+ use opentelemetry::trace::Tracer;
+
+ let mut buffer = vec![];
+ let encoder = TextEncoder::new();
+
+ let tracer = opentelemetry::global::tracer("garage");
+ let metric_families = tracer.in_span("admin/gather_metrics", |_| {
+ self.exporter.registry().gather()
+ });
+
+ encoder
+ .encode(&metric_families, &mut buffer)
+ .ok_or_internal_error("Could not serialize metrics")?;
+
+ Ok(Response::builder()
+ .status(StatusCode::OK)
+ .header(http::header::CONTENT_TYPE, encoder.format_type())
+ .body(bytes_body(buffer.into()))?)
}
+ #[cfg(not(feature = "metrics"))]
+ Err(Error::bad_request(
+ "Garage was built without the metrics feature".to_string(),
+ ))
}
}
-struct ArcAdminApiServer(Arc);
-
-impl ApiHandler for ArcAdminApiServer {
+impl ApiHandler for AdminApiServer {
const API_NAME: &'static str = "admin";
const API_NAME_DISPLAY: &'static str = "Admin";
- type Endpoint = HttpEndpoint;
+ type Endpoint = Endpoint;
type Error = Error;
- fn parse_endpoint(&self, req: &Request) -> Result {
+ fn parse_endpoint(&self, req: &Request) -> Result {
if req.uri().path().starts_with("/v0/") {
let endpoint_v0 = router_v0::Endpoint::from_request(req)?;
- let endpoint_v1 = router_v1::Endpoint::from_v0(endpoint_v0)?;
- Ok(HttpEndpoint::Old(endpoint_v1))
- } else if req.uri().path().starts_with("/v1/") {
- let endpoint_v1 = router_v1::Endpoint::from_request(req)?;
- Ok(HttpEndpoint::Old(endpoint_v1))
+ Endpoint::from_v0(endpoint_v0)
} else {
- Ok(HttpEndpoint::New(req.uri().path().to_string()))
+ Endpoint::from_request(req)
}
}
async fn handle(
&self,
req: Request,
- endpoint: HttpEndpoint,
+ endpoint: Endpoint,
) -> Result, Error> {
- self.0.handle_http_api(req, endpoint).await
- }
+ let required_auth_hash =
+ match endpoint.authorization_type() {
+ Authorization::None => None,
+ Authorization::MetricsToken => self.metrics_token.as_deref(),
+ Authorization::AdminToken => match self.admin_token.as_deref() {
+ None => return Err(Error::forbidden(
+ "Admin token isn't configured, admin API access is disabled for security.",
+ )),
+ Some(t) => Some(t),
+ },
+ };
- fn key_id_from_request(&self, req: &Request) -> Option {
- let auth_header = req.headers().get(AUTHORIZATION)?;
- let token = parse_authorization(auth_header).ok()?;
- let key_id = token.split_once('.')?.0;
- Some(key_id.to_string())
+ if let Some(password_hash) = required_auth_hash {
+ match req.headers().get("Authorization") {
+ None => return Err(Error::forbidden("Authorization token must be provided")),
+ Some(authorization) => {
+ verify_bearer_token(&authorization, password_hash)?;
+ }
+ }
+ }
+
+ match endpoint {
+ Endpoint::Options => self.handle_options(&req),
+ Endpoint::CheckDomain => self.handle_check_domain(req).await,
+ Endpoint::Health => self.handle_health(),
+ Endpoint::Metrics => self.handle_metrics(),
+ Endpoint::GetClusterStatus => handle_get_cluster_status(&self.garage).await,
+ Endpoint::GetClusterHealth => handle_get_cluster_health(&self.garage).await,
+ Endpoint::ConnectClusterNodes => handle_connect_cluster_nodes(&self.garage, req).await,
+ // Layout
+ Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await,
+ Endpoint::UpdateClusterLayout => handle_update_cluster_layout(&self.garage, req).await,
+ Endpoint::ApplyClusterLayout => handle_apply_cluster_layout(&self.garage, req).await,
+ Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage).await,
+ // Keys
+ Endpoint::ListKeys => handle_list_keys(&self.garage).await,
+ Endpoint::GetKeyInfo {
+ id,
+ search,
+ show_secret_key,
+ } => {
+ let show_secret_key = show_secret_key.map(|x| x == "true").unwrap_or(false);
+ handle_get_key_info(&self.garage, id, search, show_secret_key).await
+ }
+ Endpoint::CreateKey => handle_create_key(&self.garage, req).await,
+ Endpoint::ImportKey => handle_import_key(&self.garage, req).await,
+ Endpoint::UpdateKey { id } => handle_update_key(&self.garage, id, req).await,
+ Endpoint::DeleteKey { id } => handle_delete_key(&self.garage, id).await,
+ // Buckets
+ Endpoint::ListBuckets => handle_list_buckets(&self.garage).await,
+ Endpoint::GetBucketInfo { id, global_alias } => {
+ handle_get_bucket_info(&self.garage, id, global_alias).await
+ }
+ Endpoint::CreateBucket => handle_create_bucket(&self.garage, req).await,
+ Endpoint::DeleteBucket { id } => handle_delete_bucket(&self.garage, id).await,
+ Endpoint::UpdateBucket { id } => handle_update_bucket(&self.garage, id, req).await,
+ // Bucket-key permissions
+ Endpoint::BucketAllowKey => {
+ handle_bucket_change_key_perm(&self.garage, req, true).await
+ }
+ Endpoint::BucketDenyKey => {
+ handle_bucket_change_key_perm(&self.garage, req, false).await
+ }
+ // Bucket aliasing
+ Endpoint::GlobalAliasBucket { id, alias } => {
+ handle_global_alias_bucket(&self.garage, id, alias).await
+ }
+ Endpoint::GlobalUnaliasBucket { id, alias } => {
+ handle_global_unalias_bucket(&self.garage, id, alias).await
+ }
+ Endpoint::LocalAliasBucket {
+ id,
+ access_key_id,
+ alias,
+ } => handle_local_alias_bucket(&self.garage, id, access_key_id, alias).await,
+ Endpoint::LocalUnaliasBucket {
+ id,
+ access_key_id,
+ alias,
+ } => handle_local_unalias_bucket(&self.garage, id, access_key_id, alias).await,
+ }
}
}
-impl ApiEndpoint for HttpEndpoint {
- fn name(&self) -> Cow<'static, str> {
- match self {
- Self::Old(endpoint_v1) => Cow::Borrowed(endpoint_v1.name()),
- Self::New(path) => Cow::Owned(path.clone()),
- }
+impl ApiEndpoint for Endpoint {
+ fn name(&self) -> &'static str {
+ Endpoint::name(self)
}
fn add_span_attributes(&self, _span: SpanRef<'_>) {}
@@ -251,91 +347,20 @@ fn hash_bearer_token(token: &str) -> String {
.to_string()
}
-fn parse_authorization(auth_header: &hyper::http::HeaderValue) -> Result<&str, Error> {
- let token = auth_header
+fn verify_bearer_token(token: &hyper::http::HeaderValue, password_hash: &str) -> Result<(), Error> {
+ use argon2::{password_hash::PasswordVerifier, Argon2};
+
+ let parsed_hash = PasswordHash::new(&password_hash).unwrap();
+
+ token
.to_str()?
.strip_prefix("Bearer ")
- .ok_or_else(|| Error::forbidden("Invalid Authorization header"))?
- .trim();
- Ok(token)
-}
-
-fn verify_authorization(
- garage: &Garage,
- global_token_hash: Option<&str>,
- auth_header: Option,
- endpoint_name: &str,
-) -> Result<(), Error> {
- use argon2::{password_hash::PasswordHash, password_hash::PasswordVerifier, Argon2};
-
- let invalid_msg = "Invalid bearer token";
-
- let token = match &auth_header {
- None => {
- return Err(Error::forbidden(
- "Bearer token must be provided in Authorization header",
- ))
- }
- Some(authorization) => parse_authorization(authorization)?,
- };
-
- let token_hash_string = if let Some((prefix, _)) = token.split_once('.') {
- garage
- .admin_token_table
- .get_local(&EmptyKey, &prefix.to_string())?
- .and_then(|k| k.state.into_option())
- .filter(|p| !p.is_expired(now_msec()))
- // GetCurrentAdminTokenInfo endpoint must be accessible even if it is not in the token scopes
- .filter(|p| p.has_scope(endpoint_name) || endpoint_name == "GetCurrentAdminTokenInfo")
- .ok_or_else(|| Error::forbidden(invalid_msg))?
- .token_hash
- } else {
- global_token_hash
- .ok_or_else(|| Error::forbidden(invalid_msg))?
- .to_string()
- };
-
- let token_hash =
- PasswordHash::new(&token_hash_string).ok_or_internal_error("Could not parse token hash")?;
-
- Argon2::default()
- .verify_password(token.as_bytes(), &token_hash)
- .map_err(|_| Error::forbidden(invalid_msg))?;
+ .and_then(|token| {
+ Argon2::default()
+ .verify_password(token.trim().as_bytes(), &parsed_hash)
+ .ok()
+ })
+ .ok_or_else(|| Error::forbidden("Invalid authorization token"))?;
Ok(())
}
-
-pub(crate) fn find_matching_nodes(garage: &Garage, spec: &str) -> Result, Error> {
- if spec == "self" {
- Ok(vec![garage.system.id])
- } else {
- // Collect all nodes currently up and/or in cluster layout
- let mut res = vec![];
- if let Ok(all_nodes) = garage.system.cluster_layout().all_nodes() {
- res = all_nodes.to_vec();
- }
- for node in garage.system.get_known_nodes() {
- if node.is_up && !res.contains(&node.id) {
- res.push(node.id);
- }
- }
-
- if spec == "*" {
- // match all nodes
- Ok(res)
- } else {
- // filter nodes that match spec
- res.retain(|node| hex::encode(node).starts_with(spec));
- if res.is_empty() {
- Err(Error::bad_request(format!("No nodes matching {}", spec)))
- } else if res.len() > 1 {
- Err(Error::bad_request(format!(
- "Multiple nodes matching {}: {:?}",
- spec, res
- )))
- } else {
- Ok(res)
- }
- }
- }
-}
diff --git a/src/api/admin/block.rs b/src/api/admin/block.rs
deleted file mode 100644
index 586f8554..00000000
--- a/src/api/admin/block.rs
+++ /dev/null
@@ -1,284 +0,0 @@
-use std::sync::Arc;
-
-use garage_util::data::*;
-use garage_util::error::Error as GarageError;
-use garage_util::time::now_msec;
-
-use garage_table::EmptyKey;
-
-use garage_model::garage::Garage;
-use garage_model::s3::object_table::*;
-use garage_model::s3::version_table::*;
-
-use garage_api_common::common_error::CommonErrorDerivative;
-
-use crate::api::*;
-use crate::error::*;
-use crate::{Admin, RequestHandler};
-
-impl RequestHandler for LocalListBlockErrorsRequest {
- type Response = LocalListBlockErrorsResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let errors = garage.block_manager.list_resync_errors()?;
- let now = now_msec();
- let errors = errors
- .into_iter()
- .map(|e| BlockError {
- block_hash: hex::encode(&e.hash),
- refcount: e.refcount,
- error_count: e.error_count,
- last_try_secs_ago: now.saturating_sub(e.last_try) / 1000,
- next_try_in_secs: e.next_try.saturating_sub(now) / 1000,
- })
- .collect();
- Ok(LocalListBlockErrorsResponse(errors))
- }
-}
-
-impl RequestHandler for LocalGetBlockInfoRequest {
- type Response = LocalGetBlockInfoResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let hash = find_block_hash_by_prefix(garage, &self.block_hash)?;
- let refcount = garage.block_manager.get_block_rc(&hash)?;
- let block_refs = garage
- .block_ref_table
- .get_range(&hash, None, None, 10000, Default::default())
- .await?;
- let mut versions = vec![];
- for br in block_refs {
- if let Some(v) = garage.version_table.get(&br.version, &EmptyKey).await? {
- let bl = match &v.backlink {
- VersionBacklink::MultipartUpload { upload_id } => {
- if let Some(u) = garage.mpu_table.get(upload_id, &EmptyKey).await? {
- BlockVersionBacklink::Upload {
- upload_id: hex::encode(&upload_id),
- upload_deleted: u.deleted.get(),
- upload_garbage_collected: false,
- bucket_id: Some(hex::encode(&u.bucket_id)),
- key: Some(u.key.to_string()),
- }
- } else {
- BlockVersionBacklink::Upload {
- upload_id: hex::encode(&upload_id),
- upload_deleted: true,
- upload_garbage_collected: true,
- bucket_id: None,
- key: None,
- }
- }
- }
- VersionBacklink::Object { bucket_id, key } => BlockVersionBacklink::Object {
- bucket_id: hex::encode(&bucket_id),
- key: key.to_string(),
- },
- };
- versions.push(BlockVersion {
- version_id: hex::encode(&br.version),
- ref_deleted: br.deleted.get(),
- version_deleted: v.deleted.get(),
- garbage_collected: false,
- backlink: Some(bl),
- });
- } else {
- versions.push(BlockVersion {
- version_id: hex::encode(&br.version),
- ref_deleted: br.deleted.get(),
- version_deleted: true,
- garbage_collected: true,
- backlink: None,
- });
- }
- }
- Ok(LocalGetBlockInfoResponse {
- block_hash: hex::encode(&hash),
- refcount,
- versions,
- })
- }
-}
-
-impl RequestHandler for LocalRetryBlockResyncRequest {
- type Response = LocalRetryBlockResyncResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- match self {
- Self::All { all: true } => {
- let blocks = garage.block_manager.list_resync_errors()?;
- for b in blocks.iter() {
- garage.block_manager.resync.clear_backoff(&b.hash)?;
- }
- Ok(LocalRetryBlockResyncResponse {
- count: blocks.len() as u64,
- })
- }
- Self::All { all: false } => Err(Error::bad_request("nonsense")),
- Self::Blocks { block_hashes } => {
- for hash in block_hashes.iter() {
- let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
- let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
- garage.block_manager.resync.clear_backoff(&hash)?;
- }
- Ok(LocalRetryBlockResyncResponse {
- count: block_hashes.len() as u64,
- })
- }
- }
- }
-}
-
-impl RequestHandler for LocalPurgeBlocksRequest {
- type Response = LocalPurgeBlocksResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let mut obj_dels = 0;
- let mut mpu_dels = 0;
- let mut ver_dels = 0;
- let mut br_dels = 0;
-
- for hash in self.0.iter() {
- let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
- let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
- let block_refs = garage
- .block_ref_table
- .get_range(&hash, None, None, 10000, Default::default())
- .await?;
-
- for br in block_refs {
- if let Some(version) = garage.version_table.get(&br.version, &EmptyKey).await? {
- handle_block_purge_version_backlink(
- garage,
- &version,
- &mut obj_dels,
- &mut mpu_dels,
- )
- .await?;
-
- if !version.deleted.get() {
- let deleted_version = Version::new(version.uuid, version.backlink, true);
- garage.version_table.insert(&deleted_version).await?;
- ver_dels += 1;
- }
- }
- if !br.deleted.get() {
- let mut br = br;
- br.deleted.set();
- garage.block_ref_table.insert(&br).await?;
- br_dels += 1;
- }
- }
- }
-
- Ok(LocalPurgeBlocksResponse {
- blocks_purged: self.0.len() as u64,
- block_refs_purged: br_dels,
- versions_deleted: ver_dels,
- objects_deleted: obj_dels,
- uploads_deleted: mpu_dels,
- })
- }
-}
-
-fn find_block_hash_by_prefix(garage: &Arc, prefix: &str) -> Result {
- if prefix.len() < 4 {
- return Err(Error::bad_request(
- "Please specify at least 4 characters of the block hash",
- ));
- }
-
- let prefix_bin = hex::decode(&prefix[..prefix.len() & !1]).ok_or_bad_request("invalid hash")?;
-
- let iter = garage
- .block_ref_table
- .data
- .store
- .range(&prefix_bin[..]..)
- .map_err(GarageError::from)?;
- let mut found = None;
- for item in iter {
- let (k, _v) = item.map_err(GarageError::from)?;
- let hash = Hash::try_from(&k[..32]).unwrap();
- if &hash.as_slice()[..prefix_bin.len()] != prefix_bin {
- break;
- }
- if hex::encode(hash.as_slice()).starts_with(prefix) {
- match &found {
- Some(x) if *x == hash => (),
- Some(_) => {
- return Err(Error::bad_request(format!(
- "Several blocks match prefix `{}`",
- prefix
- )));
- }
- None => {
- found = Some(hash);
- }
- }
- }
- }
-
- found.ok_or_else(|| Error::NoSuchBlock(prefix.to_string()))
-}
-
-async fn handle_block_purge_version_backlink(
- garage: &Arc,
- version: &Version,
- obj_dels: &mut u64,
- mpu_dels: &mut u64,
-) -> Result<(), Error> {
- let (bucket_id, key, ov_id) = match &version.backlink {
- VersionBacklink::Object { bucket_id, key } => (*bucket_id, key.clone(), version.uuid),
- VersionBacklink::MultipartUpload { upload_id } => {
- if let Some(mut mpu) = garage.mpu_table.get(upload_id, &EmptyKey).await? {
- if !mpu.deleted.get() {
- mpu.parts.clear();
- mpu.deleted.set();
- garage.mpu_table.insert(&mpu).await?;
- *mpu_dels += 1;
- }
- (mpu.bucket_id, mpu.key.clone(), *upload_id)
- } else {
- return Ok(());
- }
- }
- };
-
- if let Some(object) = garage.object_table.get(&bucket_id, &key).await? {
- let ov = object.versions().iter().rev().find(|v| v.is_complete());
- if let Some(ov) = ov {
- if ov.uuid == ov_id {
- let del_uuid = gen_uuid();
- let deleted_object = Object::new(
- bucket_id,
- key,
- vec![ObjectVersion {
- uuid: del_uuid,
- timestamp: ov.timestamp + 1,
- state: ObjectVersionState::Complete(ObjectVersionData::DeleteMarker),
- }],
- );
- garage.object_table.insert(&deleted_object).await?;
- *obj_dels += 1;
- }
- }
- }
-
- Ok(())
-}
diff --git a/src/api/admin/bucket.rs b/src/api/admin/bucket.rs
index 77541e4f..207693b6 100644
--- a/src/api/admin/bucket.rs
+++ b/src/api/admin/bucket.rs
@@ -1,8 +1,8 @@
use std::collections::HashMap;
use std::sync::Arc;
-use std::time::Duration;
-use chrono::DateTime;
+use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
+use serde::{Deserialize, Serialize};
use garage_util::crdt::*;
use garage_util::data::*;
@@ -18,603 +18,102 @@ use garage_model::s3::mpu_table;
use garage_model::s3::object_table::*;
use garage_api_common::common_error::CommonError;
+use garage_api_common::helpers::*;
-use crate::api::*;
+use crate::api_server::ResBody;
use crate::error::*;
-use crate::{Admin, RequestHandler};
+use crate::key::ApiBucketKeyPerm;
-impl RequestHandler for ListBucketsRequest {
- type Response = ListBucketsResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let buckets = garage
- .bucket_table
- .get_range(
- &EmptyKey,
- None,
- Some(DeletedFilter::NotDeleted),
- 10000,
- EnumerationOrder::Forward,
- )
- .await?;
-
- let res = buckets
- .into_iter()
- .map(|b| {
- let state = b.state.as_option().unwrap();
- ListBucketsResponseItem {
- id: hex::encode(b.id),
- created: DateTime::from_timestamp_millis(state.creation_date as i64)
- .expect("invalid timestamp stored in db"),
- global_aliases: state
- .aliases
- .items()
- .iter()
- .filter(|(_, _, a)| *a)
- .map(|(n, _, _)| n.to_string())
- .collect::>(),
- local_aliases: state
- .local_aliases
- .items()
- .iter()
- .filter(|(_, _, a)| *a)
- .map(|((k, n), _, _)| BucketLocalAlias {
- access_key_id: k.to_string(),
- alias: n.to_string(),
- })
- .collect::>(),
- }
- })
- .collect::>();
-
- Ok(ListBucketsResponse(res))
- }
-}
-
-impl RequestHandler for GetBucketInfoRequest {
- type Response = GetBucketInfoResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let bucket_id = match (self.id, self.global_alias, self.search) {
- (Some(id), None, None) => parse_bucket_id(&id)?,
- (None, Some(ga), None) => garage
- .bucket_alias_table
- .get(&EmptyKey, &ga)
- .await?
- .and_then(|x| *x.state.get())
- .ok_or_else(|| HelperError::NoSuchBucket(ga.to_string()))?,
- (None, None, Some(search)) => {
- let helper = garage.bucket_helper();
- if let Some(bucket) = helper.resolve_global_bucket(&search).await? {
- bucket.id
- } else {
- let hexdec = if search.len() >= 2 {
- search
- .get(..search.len() & !1)
- .and_then(|x| hex::decode(x).ok())
- } else {
- None
- };
- let hex = hexdec
- .ok_or_else(|| Error::Common(CommonError::NoSuchBucket(search.clone())))?;
-
- let mut start = [0u8; 32];
- start
- .as_mut_slice()
- .get_mut(..hex.len())
- .ok_or_bad_request("invalid length")?
- .copy_from_slice(&hex);
- let mut candidates = garage
- .bucket_table
- .get_range(
- &EmptyKey,
- Some(start.into()),
- Some(DeletedFilter::NotDeleted),
- 10,
- EnumerationOrder::Forward,
- )
- .await?
- .into_iter()
- .collect::>();
- candidates.retain(|x| hex::encode(x.id).starts_with(&search));
- if candidates.is_empty() {
- return Err(Error::Common(CommonError::NoSuchBucket(search.clone())));
- } else if candidates.len() == 1 {
- candidates.into_iter().next().unwrap().id
- } else {
- return Err(Error::bad_request(format!(
- "Several matching buckets: {}",
- search
- )));
- }
- }
- }
- _ => {
- return Err(Error::bad_request(
- "Either id, globalAlias or search must be provided (but not several of them)",
- ));
- }
- };
-
- bucket_info_results(garage, bucket_id).await
- }
-}
-
-impl RequestHandler for CreateBucketRequest {
- type Response = CreateBucketResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let helper = garage.locked_helper().await;
-
- if let Some(ga) = &self.global_alias {
- if !is_valid_bucket_name(ga, garage.config.allow_punycode) {
- return Err(Error::bad_request(format!(
- "{}: {}",
- ga, INVALID_BUCKET_NAME_MESSAGE
- )));
- }
-
- if let Some(alias) = garage.bucket_alias_table.get(&EmptyKey, ga).await? {
- if alias.state.get().is_some() {
- return Err(CommonError::BucketAlreadyExists.into());
- }
- }
- }
-
- if let Some(la) = &self.local_alias {
- if !is_valid_bucket_name(&la.alias, garage.config.allow_punycode) {
- return Err(Error::bad_request(format!(
- "{}: {}",
- la.alias, INVALID_BUCKET_NAME_MESSAGE
- )));
- }
-
- let key = helper.key().get_existing_key(&la.access_key_id).await?;
- let state = key.state.as_option().unwrap();
- if matches!(state.local_aliases.get(&la.alias), Some(_)) {
- return Err(Error::bad_request("Local alias already exists"));
- }
- }
-
- let bucket = Bucket::new();
- garage.bucket_table.insert(&bucket).await?;
-
- if let Some(ga) = &self.global_alias {
- helper.set_global_bucket_alias(bucket.id, ga).await?;
- }
-
- if let Some(la) = &self.local_alias {
- helper
- .set_local_bucket_alias(bucket.id, &la.access_key_id, &la.alias)
- .await?;
-
- if la.allow.read || la.allow.write || la.allow.owner {
- helper
- .set_bucket_key_permissions(
- bucket.id,
- &la.access_key_id,
- BucketKeyPerm {
- timestamp: now_msec(),
- allow_read: la.allow.read,
- allow_write: la.allow.write,
- allow_owner: la.allow.owner,
- },
- )
- .await?;
- }
- }
-
- Ok(CreateBucketResponse(
- bucket_info_results(garage, bucket.id).await?,
- ))
- }
-}
-
-impl RequestHandler for DeleteBucketRequest {
- type Response = DeleteBucketResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let helper = garage.locked_helper().await;
-
- let bucket_id = parse_bucket_id(&self.id)?;
-
- let mut bucket = helper.bucket().get_existing_bucket(bucket_id).await?;
- let state = bucket.state.as_option().unwrap();
-
- // Check bucket is empty
- if !helper.bucket().is_bucket_empty(bucket_id).await? {
- return Err(CommonError::BucketNotEmpty.into());
- }
-
- // --- done checking, now commit ---
- // 1. delete authorization from keys that had access
- for (key_id, perm) in bucket.authorized_keys() {
- if perm.is_any() {
- helper
- .set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS)
- .await?;
- }
- }
- // 2. delete all local aliases
- for ((key_id, alias), _, active) in state.local_aliases.items().iter() {
- if *active {
- helper
- .purge_local_bucket_alias(bucket.id, key_id, alias)
- .await?;
- }
- }
- // 3. delete all global aliases
- for (alias, _, active) in state.aliases.items().iter() {
- if *active {
- helper.purge_global_bucket_alias(bucket.id, alias).await?;
- }
- }
-
- // 4. delete bucket
- bucket.state = Deletable::delete();
- garage.bucket_table.insert(&bucket).await?;
-
- Ok(DeleteBucketResponse)
- }
-}
-
-impl RequestHandler for UpdateBucketRequest {
- type Response = UpdateBucketResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let bucket_id = parse_bucket_id(&self.id)?;
-
- let mut bucket = garage
- .bucket_helper()
- .get_existing_bucket(bucket_id)
- .await?;
-
- let state = bucket.state.as_option_mut().unwrap();
-
- if let Some(wa) = self.body.website_access {
- if wa.enabled {
- let (redirect_all, routing_rules) = match state.website_config.get() {
- Some(wc) => (wc.redirect_all.clone(), wc.routing_rules.clone()),
- None => (None, Vec::new()),
- };
- state.website_config.update(Some(WebsiteConfig {
- index_document: wa.index_document.ok_or_bad_request(
- "Please specify indexDocument when enabling website access.",
- )?,
- error_document: wa.error_document,
- redirect_all,
- routing_rules,
- }));
- } else {
- if wa.index_document.is_some() || wa.error_document.is_some() {
- return Err(Error::bad_request(
- "Cannot specify indexDocument or errorDocument when disabling website access.",
- ));
- }
- state.website_config.update(None);
- }
- }
-
- if let Some(q) = self.body.quotas {
- state.quotas.update(BucketQuotas {
- max_size: q.max_size,
- max_objects: q.max_objects,
- });
- }
-
- garage.bucket_table.insert(&bucket).await?;
-
- Ok(UpdateBucketResponse(
- bucket_info_results(garage, bucket.id).await?,
- ))
- }
-}
-
-impl RequestHandler for CleanupIncompleteUploadsRequest {
- type Response = CleanupIncompleteUploadsResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let duration = Duration::from_secs(self.older_than_secs);
-
- let bucket_id = parse_bucket_id(&self.bucket_id)?;
-
- let count = garage
- .bucket_helper()
- .cleanup_incomplete_uploads(&bucket_id, duration)
- .await?;
-
- Ok(CleanupIncompleteUploadsResponse {
- uploads_deleted: count as u64,
- })
- }
-}
-
-impl RequestHandler for InspectObjectRequest {
- type Response = InspectObjectResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let bucket_id = parse_bucket_id(&self.bucket_id)?;
-
- let object = garage
- .object_table
- .get(&bucket_id, &self.key)
- .await?
- .ok_or_else(|| Error::NoSuchKey)?;
-
- let mut versions = vec![];
- for obj_ver in object.versions().iter() {
- let ver = garage.version_table.get(&obj_ver.uuid, &EmptyKey).await?;
- let blocks = ver
- .map(|v| {
- v.blocks
- .items()
- .iter()
- .map(|(vk, vb)| InspectObjectBlock {
- part_number: vk.part_number,
- offset: vk.offset,
- hash: hex::encode(&vb.hash),
- size: vb.size,
- })
- .collect::>()
- })
- .unwrap_or_default();
- let uuid = hex::encode(&obj_ver.uuid);
- let timestamp = DateTime::from_timestamp_millis(obj_ver.timestamp as i64)
- .expect("invalid timestamp in db");
- match &obj_ver.state {
- ObjectVersionState::Uploading { encryption, .. } => {
- versions.push(InspectObjectVersion {
- uuid,
- timestamp,
- encrypted: !matches!(encryption, ObjectVersionEncryption::Plaintext { .. }),
- uploading: true,
- headers: match encryption {
- ObjectVersionEncryption::Plaintext { inner } => inner.headers.clone(),
- _ => vec![],
- },
- blocks,
- ..Default::default()
- });
- }
- ObjectVersionState::Complete(data) => match data {
- ObjectVersionData::DeleteMarker => {
- versions.push(InspectObjectVersion {
- uuid,
- timestamp,
- delete_marker: true,
- ..Default::default()
- });
- }
- ObjectVersionData::Inline(meta, _) => {
- versions.push(InspectObjectVersion {
- uuid,
- timestamp,
- inline: true,
- size: Some(meta.size),
- etag: Some(meta.etag.clone()),
- encrypted: !matches!(
- meta.encryption,
- ObjectVersionEncryption::Plaintext { .. }
- ),
- headers: match &meta.encryption {
- ObjectVersionEncryption::Plaintext { inner } => {
- inner.headers.clone()
- }
- _ => vec![],
- },
- ..Default::default()
- });
- }
- ObjectVersionData::FirstBlock(meta, _) => {
- versions.push(InspectObjectVersion {
- uuid,
- timestamp,
- size: Some(meta.size),
- etag: Some(meta.etag.clone()),
- encrypted: !matches!(
- meta.encryption,
- ObjectVersionEncryption::Plaintext { .. }
- ),
- headers: match &meta.encryption {
- ObjectVersionEncryption::Plaintext { inner } => {
- inner.headers.clone()
- }
- _ => vec![],
- },
- blocks,
- ..Default::default()
- });
- }
- },
- ObjectVersionState::Aborted => {
- versions.push(InspectObjectVersion {
- uuid,
- timestamp,
- aborted: true,
- blocks,
- ..Default::default()
- });
- }
- }
- }
-
- Ok(InspectObjectResponse {
- bucket_id: hex::encode(&object.bucket_id),
- key: object.key,
- versions,
- })
- }
-}
-
-// ---- BUCKET/KEY PERMISSIONS ----
-
-impl RequestHandler for AllowBucketKeyRequest {
- type Response = AllowBucketKeyResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let res = handle_bucket_change_key_perm(garage, self.0, true).await?;
- Ok(AllowBucketKeyResponse(res))
- }
-}
-
-impl RequestHandler for DenyBucketKeyRequest {
- type Response = DenyBucketKeyResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let res = handle_bucket_change_key_perm(garage, self.0, false).await?;
- Ok(DenyBucketKeyResponse(res))
- }
-}
-
-pub async fn handle_bucket_change_key_perm(
- garage: &Arc,
- req: BucketKeyPermChangeRequest,
- new_perm_flag: bool,
-) -> Result {
- let helper = garage.locked_helper().await;
-
- let bucket_id = parse_bucket_id(&req.bucket_id)?;
-
- let bucket = helper.bucket().get_existing_bucket(bucket_id).await?;
- let state = bucket.state.as_option().unwrap();
-
- let key = helper.key().get_existing_key(&req.access_key_id).await?;
-
- let mut perm = state
- .authorized_keys
- .get(&key.key_id)
- .cloned()
- .unwrap_or(BucketKeyPerm::NO_PERMISSIONS);
-
- if req.permissions.read {
- perm.allow_read = new_perm_flag;
- }
- if req.permissions.write {
- perm.allow_write = new_perm_flag;
- }
- if req.permissions.owner {
- perm.allow_owner = new_perm_flag;
- }
-
- helper
- .set_bucket_key_permissions(bucket.id, &key.key_id, perm)
+pub async fn handle_list_buckets(garage: &Arc) -> Result, Error> {
+ let buckets = garage
+ .bucket_table
+ .get_range(
+ &EmptyKey,
+ None,
+ Some(DeletedFilter::NotDeleted),
+ 10000,
+ EnumerationOrder::Forward,
+ )
.await?;
- bucket_info_results(garage, bucket.id).await
+ let res = buckets
+ .into_iter()
+ .map(|b| {
+ let state = b.state.as_option().unwrap();
+ ListBucketResultItem {
+ id: hex::encode(b.id),
+ global_aliases: state
+ .aliases
+ .items()
+ .iter()
+ .filter(|(_, _, a)| *a)
+ .map(|(n, _, _)| n.to_string())
+ .collect::>(),
+ local_aliases: state
+ .local_aliases
+ .items()
+ .iter()
+ .filter(|(_, _, a)| *a)
+ .map(|((k, n), _, _)| BucketLocalAlias {
+ access_key_id: k.to_string(),
+ alias: n.to_string(),
+ })
+ .collect::>(),
+ }
+ })
+ .collect::>();
+
+ Ok(json_ok_response(&res)?)
}
-// ---- BUCKET ALIASES ----
+#[derive(Serialize)]
+#[serde(rename_all = "camelCase")]
+struct ListBucketResultItem {
+ id: String,
+ global_aliases: Vec,
+ local_aliases: Vec,
+}
-impl RequestHandler for AddBucketAliasRequest {
- type Response = AddBucketAliasResponse;
+#[derive(Serialize)]
+#[serde(rename_all = "camelCase")]
+struct BucketLocalAlias {
+ access_key_id: String,
+ alias: String,
+}
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let bucket_id = parse_bucket_id(&self.bucket_id)?;
+#[derive(Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+struct ApiBucketQuotas {
+ max_size: Option,
+ max_objects: Option,
+}
- let helper = garage.locked_helper().await;
-
- match self.alias {
- BucketAliasEnum::Global { global_alias } => {
- helper
- .set_global_bucket_alias(bucket_id, &global_alias)
- .await?
- }
- BucketAliasEnum::Local {
- local_alias,
- access_key_id,
- } => {
- helper
- .set_local_bucket_alias(bucket_id, &access_key_id, &local_alias)
- .await?
- }
+pub async fn handle_get_bucket_info(
+ garage: &Arc,
+ id: Option,
+ global_alias: Option,
+) -> Result, Error> {
+ let bucket_id = match (id, global_alias) {
+ (Some(id), None) => parse_bucket_id(&id)?,
+ (None, Some(ga)) => garage
+ .bucket_helper()
+ .resolve_global_bucket_name(&ga)
+ .await?
+ .ok_or_else(|| HelperError::NoSuchBucket(ga.to_string()))?,
+ _ => {
+ return Err(Error::bad_request(
+ "Either id or globalAlias must be provided (but not both)",
+ ));
}
+ };
- Ok(AddBucketAliasResponse(
- bucket_info_results(garage, bucket_id).await?,
- ))
- }
+ bucket_info_results(garage, bucket_id).await
}
-impl RequestHandler for RemoveBucketAliasRequest {
- type Response = RemoveBucketAliasResponse;
-
- async fn handle(
- self,
- garage: &Arc,
- _admin: &Admin,
- ) -> Result {
- let bucket_id = parse_bucket_id(&self.bucket_id)?;
-
- let helper = garage.locked_helper().await;
-
- match self.alias {
- BucketAliasEnum::Global { global_alias } => {
- helper
- .unset_global_bucket_alias(bucket_id, &global_alias)
- .await?
- }
- BucketAliasEnum::Local {
- local_alias,
- access_key_id,
- } => {
- helper
- .unset_local_bucket_alias(bucket_id, &access_key_id, &local_alias)
- .await?
- }
- }
-
- Ok(RemoveBucketAliasResponse(
- bucket_info_results(garage, bucket_id).await?,
- ))
- }
-}
-
-// ---- HELPER ----
-
async fn bucket_info_results(
garage: &Arc