Compare commits
340 commits
main-v1
...
1686a/tran
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8bbf7e98c9 | ||
|
|
c7d6eb631f | ||
|
|
5eb381a22a | ||
|
|
3b6daa7d0f | ||
|
|
7d05d9d520 | ||
|
|
f0b443652a | ||
|
|
cd5cd37ecc | ||
|
|
e5f87fb51e | ||
|
|
b0ee7dd3c9 | ||
|
|
4a0be692b3 | ||
|
|
a5d047b5ae | ||
|
|
24e11e99ee | ||
|
|
bddd23136b | ||
|
|
a426b00e87 | ||
|
|
8772db8228 | ||
|
|
5307b3f762 | ||
|
|
6dbcdcd784 | ||
|
|
f2f6669bf0 | ||
|
|
21db7a4d4b | ||
|
|
ea9597819c | ||
|
|
141b3f24f1 | ||
|
|
209263eb93 | ||
|
|
295c850380 | ||
|
|
33d37b24bd | ||
|
|
46f6967934 | ||
|
|
c0b574361e | ||
|
|
f2e00781bb | ||
|
|
32da94cbbe | ||
|
|
014bebfa1f | ||
|
|
e9fbde3adf | ||
|
|
1d1cfb0e29 | ||
|
|
e331f88c85 | ||
|
|
4650fbd49c | ||
|
|
43ed68c558 | ||
|
|
d1bc921ec2 | ||
|
|
ef36e4c8b2 |
||
|
|
582b168b6a | ||
|
|
c821d4974a | ||
|
|
eb1b621a5a | ||
|
|
0c32294485 | ||
|
|
21500545bb | ||
|
|
a525d0e36a | ||
|
|
29e869ac88 | ||
|
|
cd641a9ed2 | ||
|
|
68876f0b08 | ||
|
|
ba1f30d393 | ||
|
|
9018ed9b97 | ||
|
|
13ded6dd35 | ||
|
|
4d96719d96 | ||
|
|
77ef3c85ea | ||
|
|
79c7126b67 |
||
|
|
77b6233496 |
||
|
|
547fe30a05 |
||
|
|
bff5068efc |
||
|
|
60eee993b4 | ||
|
|
d30bb2acb1 | ||
|
|
730c613807 | ||
|
|
9e356347c6 | ||
|
|
1d3c0511b1 | ||
|
|
f50b342c00 | ||
|
|
cf22e7b71d | ||
|
|
dc8d93698b | ||
|
|
006e78ccea | ||
|
|
276e55ae8b | ||
|
|
ab6d9633ac | ||
|
|
cf2f058f60 | ||
|
|
02677af546 | ||
|
|
fced78c283 |
||
|
|
d211c1e291 |
||
|
|
318ef40c4e |
||
|
|
c9c50b741e | ||
|
|
a0a2c1db88 |
||
|
|
04d549acc7 | ||
|
|
a9cd3f3426 | ||
|
|
8f7e92b6a7 |
||
|
|
c2d54b4136 | ||
|
|
2f7a649870 | ||
|
|
c4836916b0 | ||
|
|
b830bdd1dd | ||
|
|
161b185464 | ||
|
|
8f0d10b0b1 | ||
|
|
6bac369ee2 | ||
|
|
96d303b05e |
||
|
|
3a1dce59f7 | ||
|
|
6fd2cf7966 | ||
|
|
6aecd9718f | ||
|
|
0412013229 | ||
|
|
090dbb412a | ||
|
|
12367d307b | ||
|
|
675c1c156d | ||
|
|
eac3a60050 | ||
|
|
a5580c99fe | ||
|
|
8083bb4a0f | ||
|
|
00cdefa6b3 | ||
|
|
4dee3e6e04 | ||
|
|
5333285b50 | ||
|
|
ef913843f7 | ||
|
|
1fe932d07f | ||
|
|
7e5bb51287 | ||
|
|
00a5c3d8a2 | ||
|
|
2e7a6fccba | ||
|
|
09041035d5 |
||
|
|
ae64ecf10c |
||
|
|
c8ac4a2105 | ||
|
|
280c1303fa |
||
|
|
0f5b3878ca |
||
|
|
d863247f9f | ||
|
|
6c740ff05c | ||
|
|
95d9905524 | ||
|
|
53fe77860b | ||
|
|
6d5e971974 | ||
|
|
4d8407dc0f | ||
|
|
006fb18aea | ||
|
|
b43f309ec7 | ||
|
|
df4721387c | ||
|
|
9c067c0cbd | ||
|
|
742129f4a3 | ||
|
|
7a256b2ebb | ||
|
|
909359ca4c | ||
|
|
3148fa3afe | ||
|
|
6b06459b99 | ||
|
|
4c139bcbca | ||
|
|
4758d8881f | ||
|
|
61e19310c8 | ||
|
|
17fe11fa81 | ||
|
|
16128fca63 |
||
|
|
29570f3192 | ||
|
|
c35c1b5b9b | ||
|
|
7e203f634e | ||
|
|
99f7c0fc4b | ||
|
|
fb95a8819f | ||
|
|
665addc03b | ||
|
|
7949927291 | ||
|
|
2ddb29ca35 | ||
|
|
30d8ec5368 | ||
|
|
47772eb525 | ||
|
|
c1ed770e64 | ||
|
|
7e80e86934 | ||
|
|
4deb57815a |
||
|
|
df343dd808 | ||
|
|
17c73bafa2 | ||
|
|
d8058e7475 | ||
|
|
385fbc606d | ||
|
|
6f9d6919a9 | ||
|
|
91fde4105d | ||
|
|
d975960be3 | ||
|
|
6508acbe71 | ||
|
|
985ad68ade | ||
|
|
b7a853b01f | ||
|
|
66faef9fb6 | ||
|
|
13f67b6cd8 | ||
|
|
0dabf9b22f | ||
|
|
e226fb413f | ||
|
|
708a84f1d6 |
||
|
|
0465475599 | ||
|
|
0a45317b3b | ||
|
|
bb3b832024 | ||
|
|
f8be15c37d | ||
|
|
1e05fc1d53 | ||
|
|
e5eff872f5 | ||
|
|
605ee4cdb1 |
||
|
|
71aef8770e |
||
|
|
b4f6ab963c | ||
|
|
9a31b9c077 | ||
|
|
58a96dc687 | ||
|
|
7bbb3ff9cf | ||
|
|
f04af18193 | ||
|
|
67e0fcc6ea | ||
|
|
78f03aec78 | ||
|
|
56a23d936e | ||
|
|
9b6e45ca1f | ||
|
|
27666ed265 | ||
|
|
e8e722cc66 | ||
|
|
80f818eb6c | ||
|
|
f899e023a0 | ||
|
|
7556c536ae | ||
|
|
2a20319fa9 | ||
|
|
42baa29e50 | ||
|
|
f461348790 | ||
|
|
4a8f7e15ce | ||
|
|
44587d295a | ||
|
|
dc1a4ffd76 | ||
|
|
53005c91a5 | ||
|
|
b7a153b892 | ||
|
|
bc8e6af223 | ||
|
|
78b1481461 | ||
|
|
7ab1d176d4 | ||
|
|
b15d55ea9f | ||
|
|
c13af97b81 | ||
|
|
d1d5c67ba7 | ||
|
|
77125e9464 | ||
|
|
cfd10480ee | ||
|
|
fbb40c4ea0 | ||
|
|
e475c7f802 | ||
|
|
589a992af8 | ||
|
|
768794daae | ||
|
|
abe0546ab0 | ||
|
|
47fe96279b | ||
|
|
45bdf54e7e | ||
|
|
a4b431163c | ||
|
|
db54bf96c7 | ||
|
|
cbcdab4e24 | ||
|
|
38ca35eb0f | ||
|
|
a2d87a012d | ||
|
|
899292ee28 | ||
|
|
c8e9c45889 | ||
|
|
e79b485aa8 | ||
|
|
d38d62f4d7 | ||
|
|
2885806e00 | ||
|
|
52437e4298 | ||
|
|
abcef7a3fd | ||
|
|
5d338f0b8f | ||
|
|
590c9bb4db | ||
|
|
c56b7e20c3 | ||
|
|
2f21181ccb | ||
|
|
2d1c073d2f | ||
|
|
5e7307cbf3 | ||
|
|
fd0e23e984 | ||
|
|
d7506b282c | ||
|
|
6bbdca2e48 | ||
|
|
c6d6cc1fc3 | ||
|
|
5fa6df6ee3 | ||
|
|
c6bed26347 | ||
|
|
d25e631a4a | ||
|
|
514eb29874 | ||
|
|
8ba6454e21 | ||
|
|
9dcc5232a6 | ||
|
|
1e13a66b42 | ||
|
|
2c9e849bbf | ||
|
|
34baade499 | ||
|
|
2f2a96b51d | ||
|
|
c9156f6828 | ||
|
|
4629ee25f7 | ||
|
|
a826c361a9 | ||
|
|
fb6db494cc | ||
|
|
97e2fa5b8b | ||
|
|
cfd259190f | ||
|
|
48e0436f29 | ||
|
|
9c745548c4 | ||
|
|
f7d9c2b383 | ||
|
|
e6862c5d3d | ||
|
|
d032e2017c | ||
|
|
0b12debf6c | ||
|
|
795b4a41b7 | ||
|
|
fd2472d488 | ||
|
|
d2a064bb1b | ||
|
|
88b4623bf1 | ||
|
|
325f79012c | ||
|
|
eb40475f1e | ||
|
|
22c0420607 | ||
|
|
1bd7689301 | ||
|
|
ec0da3b644 | ||
|
|
9511b20153 | ||
|
|
d067a40b3f | ||
|
|
ff6ec62d54 | ||
|
|
004eb94e14 | ||
|
|
46f620119b | ||
|
|
576d0d950e | ||
|
|
85a07c87d7 | ||
|
|
1f645830a4 | ||
|
|
5f308bd688 | ||
|
|
df758e8e0d | ||
|
|
e83864af24 | ||
|
|
3b49dd9e63 | ||
|
|
cef8d75983 | ||
|
|
cd0728cd20 | ||
|
|
0951b5db75 | ||
|
|
3d94eb8d4b | ||
|
|
004866caac | ||
|
|
913e6da41b | ||
|
|
e4881e62f1 | ||
|
|
7ccbfda26d | ||
|
|
6b19d7628e | ||
|
|
411f1d495c | ||
|
|
ba68506c36 | ||
|
|
21c83ab311 | ||
|
|
2e03d90585 | ||
|
|
29ce490dd6 | ||
|
|
c3e8e5e38c | ||
|
|
62a3003cca | ||
|
|
3151695011 | ||
|
|
f034e834fa | ||
|
|
bf0f792418 | ||
|
|
61f3de6496 | ||
|
|
71655c1e89 | ||
|
|
7c8fc04b96 | ||
|
|
f914db057a | ||
|
|
406b6da163 | ||
|
|
9f468b4439 | ||
|
|
97be7b38fa | ||
|
|
6a1079c412 | ||
|
|
b1629dd355 | ||
|
|
d405a9f839 | ||
|
|
7b9c047b11 | ||
|
|
10bbb26b30 | ||
|
|
89ff9f5576 | ||
|
|
bdaf55ab3f | ||
|
|
e96014ca60 | ||
|
|
568c4954e9 | ||
|
|
fe937c2901 | ||
|
|
3192088aac | ||
|
|
5a89350b38 | ||
|
|
3caea5fc06 | ||
|
|
ebc0e9319e | ||
|
|
f8c6a8373d | ||
|
|
076ce04fe5 | ||
|
|
f37d5d2b08 | ||
|
|
819f4f0050 | ||
|
|
69ddaafc60 | ||
|
|
145130481e | ||
|
|
6ed78abb5c | ||
|
|
19454c1679 | ||
|
|
1c03941b19 | ||
|
|
4f0b923c4f | ||
|
|
420bbc162d | ||
|
|
12ea4cda5f | ||
|
|
5fefbd94e9 | ||
|
|
ba810b2e81 | ||
|
|
f8ed3fdbc4 | ||
|
|
2daeb89834 | ||
|
|
4cb45bd398 | ||
|
|
d5ad797ad7 | ||
|
|
a99925e0ed | ||
|
|
f538dc34d3 | ||
|
|
ed58f8b0fe | ||
|
|
5037b97dd4 | ||
|
|
af1a530834 | ||
|
|
c99bfe69ea | ||
|
|
831f2b0207 | ||
|
|
c1eb1610ba | ||
|
|
5560a963e0 | ||
|
|
2aaba39ddc | ||
|
|
47467df83e | ||
|
|
9b7fea4cb0 | ||
|
|
44ce6ae5b4 | ||
|
|
22487ceddf | ||
|
|
6ccfbb2986 | ||
|
|
c939d2a936 | ||
|
|
65e9dde8c9 | ||
|
|
c9b733a4a6 |
|
|
@ -16,6 +16,16 @@ steps:
|
|||
commands:
|
||||
- nix-build -j4 --attr flakePackages.fmt
|
||||
|
||||
- name: check typos
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-shell --attr ci --run typos
|
||||
|
||||
- name: check lints with clippy
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.clippy
|
||||
|
||||
- name: build
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
|
|
|
|||
|
|
@ -38,7 +38,15 @@ steps:
|
|||
- matrix:
|
||||
ARCH: i386
|
||||
|
||||
- name: upgrade tests
|
||||
- name: upgrade tests from v1.0.0
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-shell --attr ci --run "./script/test-upgrade.sh v1.0.0 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
|
||||
when:
|
||||
- matrix:
|
||||
ARCH: amd64
|
||||
|
||||
- name: upgrade tests from v0.8.4
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
|
||||
|
|
|
|||
136
Cargo.lock
generated
|
|
@ -301,9 +301,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "aws-sdk-s3"
|
||||
version = "1.120.0"
|
||||
version = "1.102.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06673901e961f20fa8d7da907da48f7ad6c1b383e3726c22bd418900f015abe1"
|
||||
checksum = "75ddb925e840f49446aa6338b67abdbec04b4ebf923b7da038ec4c35afb916cd"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
|
|
@ -313,7 +313,6 @@ dependencies = [
|
|||
"aws-smithy-eventstream",
|
||||
"aws-smithy-http",
|
||||
"aws-smithy-json",
|
||||
"aws-smithy-observability",
|
||||
"aws-smithy-runtime",
|
||||
"aws-smithy-runtime-api",
|
||||
"aws-smithy-types",
|
||||
|
|
@ -370,9 +369,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "aws-smithy-checksums"
|
||||
version = "0.63.13"
|
||||
version = "0.63.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "23374b9170cbbcc6f5df8dc5ebb9b6c5c28a3c8f599f0e8b8b10eb6f4a5c6e74"
|
||||
checksum = "9054b4cc5eda331cde3096b1576dec45365c5cbbca61d1fffa5f236e251dfce7"
|
||||
dependencies = [
|
||||
"aws-smithy-http",
|
||||
"aws-smithy-types",
|
||||
|
|
@ -865,42 +864,16 @@ dependencies = [
|
|||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crc"
|
||||
version = "3.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675"
|
||||
dependencies = [
|
||||
"crc-catalog",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crc-catalog"
|
||||
version = "2.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5"
|
||||
|
||||
[[package]]
|
||||
name = "crc-fast"
|
||||
version = "1.9.0"
|
||||
version = "1.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2fd92aca2c6001b1bf5ba0ff84ee74ec8501b52bbef0cac80bf25a6c1d87a83d"
|
||||
checksum = "e75b2483e97a5a7da73ac68a05b629f9c53cff58d8ed1c77866079e18b00dba5"
|
||||
dependencies = [
|
||||
"crc",
|
||||
"digest",
|
||||
"rustversion",
|
||||
"spin 0.10.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crc32c"
|
||||
version = "0.6.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3a47af21622d091a8f0fb295b88bc886ac74efcc613efc19f5d0b21de5c89e47"
|
||||
dependencies = [
|
||||
"rustc_version",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crc32fast"
|
||||
version = "1.5.0"
|
||||
|
|
@ -1201,12 +1174,6 @@ version = "0.1.5"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
|
||||
|
||||
[[package]]
|
||||
name = "foldhash"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb"
|
||||
|
||||
[[package]]
|
||||
name = "form_urlencoded"
|
||||
version = "1.2.2"
|
||||
|
|
@ -1311,7 +1278,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage"
|
||||
version = "1.3.1"
|
||||
version = "2.2.0"
|
||||
dependencies = [
|
||||
"assert-json-diff",
|
||||
"async-trait",
|
||||
|
|
@ -1322,7 +1289,7 @@ dependencies = [
|
|||
"bytes",
|
||||
"bytesize",
|
||||
"chrono",
|
||||
"crc32fast",
|
||||
"crc-fast",
|
||||
"format_table",
|
||||
"futures",
|
||||
"garage_api_admin",
|
||||
|
|
@ -1351,7 +1318,6 @@ dependencies = [
|
|||
"opentelemetry-otlp",
|
||||
"opentelemetry-prometheus",
|
||||
"parse_duration",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha1",
|
||||
"sha2",
|
||||
|
|
@ -1363,16 +1329,21 @@ dependencies = [
|
|||
"tracing",
|
||||
"tracing-journald",
|
||||
"tracing-subscriber",
|
||||
"utoipa",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "garage_api_admin"
|
||||
version = "1.3.1"
|
||||
version = "2.2.0"
|
||||
dependencies = [
|
||||
"argon2",
|
||||
"async-trait",
|
||||
"bytesize",
|
||||
"chrono",
|
||||
"format_table",
|
||||
"futures",
|
||||
"garage_api_common",
|
||||
"garage_block",
|
||||
"garage_model",
|
||||
"garage_rpc",
|
||||
"garage_table",
|
||||
|
|
@ -1382,6 +1353,7 @@ dependencies = [
|
|||
"hyper 1.8.1",
|
||||
"opentelemetry",
|
||||
"opentelemetry-prometheus",
|
||||
"paste",
|
||||
"prometheus",
|
||||
"serde",
|
||||
"serde_json",
|
||||
|
|
@ -1389,17 +1361,17 @@ dependencies = [
|
|||
"tokio",
|
||||
"tracing",
|
||||
"url",
|
||||
"utoipa",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "garage_api_common"
|
||||
version = "1.3.1"
|
||||
version = "2.2.0"
|
||||
dependencies = [
|
||||
"base64 0.21.7",
|
||||
"bytes",
|
||||
"chrono",
|
||||
"crc32c",
|
||||
"crc32fast",
|
||||
"crc-fast",
|
||||
"crypto-common",
|
||||
"futures",
|
||||
"garage_model",
|
||||
|
|
@ -1427,7 +1399,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_api_k2v"
|
||||
version = "1.3.1"
|
||||
version = "2.2.0"
|
||||
dependencies = [
|
||||
"base64 0.21.7",
|
||||
"futures",
|
||||
|
|
@ -1450,15 +1422,14 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_api_s3"
|
||||
version = "1.3.1"
|
||||
version = "2.2.0"
|
||||
dependencies = [
|
||||
"aes-gcm",
|
||||
"async-compression",
|
||||
"base64 0.21.7",
|
||||
"bytes",
|
||||
"chrono",
|
||||
"crc32c",
|
||||
"crc32fast",
|
||||
"crc-fast",
|
||||
"form_urlencoded",
|
||||
"futures",
|
||||
"garage_api_common",
|
||||
|
|
@ -1469,6 +1440,7 @@ dependencies = [
|
|||
"garage_table",
|
||||
"garage_util",
|
||||
"hex",
|
||||
"hmac",
|
||||
"http 1.4.0",
|
||||
"http-body-util",
|
||||
"http-range",
|
||||
|
|
@ -1495,7 +1467,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_block"
|
||||
version = "1.3.1"
|
||||
version = "2.2.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-compression",
|
||||
|
|
@ -1506,7 +1478,6 @@ dependencies = [
|
|||
"garage_db",
|
||||
"garage_net",
|
||||
"garage_rpc",
|
||||
"garage_table",
|
||||
"garage_util",
|
||||
"hex",
|
||||
"opentelemetry",
|
||||
|
|
@ -1520,7 +1491,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_db"
|
||||
version = "1.3.1"
|
||||
version = "2.2.0"
|
||||
dependencies = [
|
||||
"fjall",
|
||||
"heed",
|
||||
|
|
@ -1535,8 +1506,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_model"
|
||||
version = "1.3.1"
|
||||
version = "2.2.0"
|
||||
dependencies = [
|
||||
"argon2",
|
||||
"async-trait",
|
||||
"base64 0.21.7",
|
||||
"blake2",
|
||||
|
|
@ -1562,7 +1534,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_net"
|
||||
version = "1.3.1"
|
||||
version = "2.2.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"bytes",
|
||||
|
|
@ -1587,7 +1559,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_rpc"
|
||||
version = "1.3.1"
|
||||
version = "2.2.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
|
|
@ -1619,7 +1591,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_table"
|
||||
version = "1.3.1"
|
||||
version = "2.2.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
|
|
@ -1640,7 +1612,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_util"
|
||||
version = "1.3.1"
|
||||
version = "2.2.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
|
|
@ -1672,7 +1644,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_web"
|
||||
version = "1.3.1"
|
||||
version = "2.2.0"
|
||||
dependencies = [
|
||||
"garage_api_common",
|
||||
"garage_api_s3",
|
||||
|
|
@ -1834,7 +1806,9 @@ version = "0.15.5"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
|
||||
dependencies = [
|
||||
"foldhash 0.1.5",
|
||||
"allocator-api2",
|
||||
"equivalent",
|
||||
"foldhash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -1842,11 +1816,6 @@ name = "hashbrown"
|
|||
version = "0.16.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100"
|
||||
dependencies = [
|
||||
"allocator-api2",
|
||||
"equivalent",
|
||||
"foldhash 0.2.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hashlink"
|
||||
|
|
@ -2304,6 +2273,8 @@ checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017"
|
|||
dependencies = [
|
||||
"equivalent",
|
||||
"hashbrown 0.16.1",
|
||||
"serde",
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -2682,11 +2653,11 @@ checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
|
|||
|
||||
[[package]]
|
||||
name = "lru"
|
||||
version = "0.16.3"
|
||||
version = "0.12.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593"
|
||||
checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38"
|
||||
dependencies = [
|
||||
"hashbrown 0.16.1",
|
||||
"hashbrown 0.15.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -3108,6 +3079,12 @@ dependencies = [
|
|||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "paste"
|
||||
version = "1.0.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
|
||||
|
||||
[[package]]
|
||||
name = "path-absolutize"
|
||||
version = "3.1.1"
|
||||
|
|
@ -4832,6 +4809,29 @@ version = "0.2.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
|
||||
|
||||
[[package]]
|
||||
name = "utoipa"
|
||||
version = "5.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2fcc29c80c21c31608227e0912b2d7fddba57ad76b606890627ba8ee7964e993"
|
||||
dependencies = [
|
||||
"indexmap 2.13.0",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"utoipa-gen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "utoipa-gen"
|
||||
version = "5.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6d79d08d92ab8af4c5e8a6da20c47ae3f61a0f1dabc1997cdf2d082b757ca08b"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.114",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "1.4.1"
|
||||
|
|
|
|||
51
Cargo.toml
|
|
@ -24,22 +24,22 @@ default-members = ["src/garage"]
|
|||
|
||||
# Internal Garage crates
|
||||
format_table = { version = "0.1.1", path = "src/format-table" }
|
||||
garage_api_common = { version = "1.3.1", path = "src/api/common" }
|
||||
garage_api_admin = { version = "1.3.1", path = "src/api/admin" }
|
||||
garage_api_s3 = { version = "1.3.1", path = "src/api/s3" }
|
||||
garage_api_k2v = { version = "1.3.1", path = "src/api/k2v" }
|
||||
garage_block = { version = "1.3.1", path = "src/block" }
|
||||
garage_db = { version = "1.3.1", path = "src/db", default-features = false }
|
||||
garage_model = { version = "1.3.1", path = "src/model", default-features = false }
|
||||
garage_net = { version = "1.3.1", path = "src/net" }
|
||||
garage_rpc = { version = "1.3.1", path = "src/rpc" }
|
||||
garage_table = { version = "1.3.1", path = "src/table" }
|
||||
garage_util = { version = "1.3.1", path = "src/util" }
|
||||
garage_web = { version = "1.3.1", path = "src/web" }
|
||||
garage_api_common = { version = "2.2.0", path = "src/api/common" }
|
||||
garage_api_admin = { version = "2.2.0", path = "src/api/admin" }
|
||||
garage_api_s3 = { version = "2.2.0", path = "src/api/s3" }
|
||||
garage_api_k2v = { version = "2.2.0", path = "src/api/k2v" }
|
||||
garage_block = { version = "2.2.0", path = "src/block" }
|
||||
garage_db = { version = "2.2.0", path = "src/db", default-features = false }
|
||||
garage_model = { version = "2.2.0", path = "src/model", default-features = false }
|
||||
garage_net = { version = "2.2.0", path = "src/net" }
|
||||
garage_rpc = { version = "2.2.0", path = "src/rpc" }
|
||||
garage_table = { version = "2.2.0", path = "src/table" }
|
||||
garage_util = { version = "2.2.0", path = "src/util" }
|
||||
garage_web = { version = "2.2.0", path = "src/web" }
|
||||
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
||||
|
||||
# External crates from crates.io
|
||||
arc-swap = "1.0"
|
||||
arc-swap = "1.1"
|
||||
argon2 = "0.5"
|
||||
async-trait = "0.1.7"
|
||||
backtrace = "0.3"
|
||||
|
|
@ -48,9 +48,8 @@ blake2 = "0.10"
|
|||
bytes = "1.0"
|
||||
bytesize = "1.1"
|
||||
cfg-if = "1.0"
|
||||
chrono = "0.4"
|
||||
crc32fast = "1.4"
|
||||
crc32c = "0.6"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
crc-fast = "1.6"
|
||||
crypto-common = "0.1"
|
||||
gethostname = "0.4"
|
||||
git-version = "0.3.4"
|
||||
|
|
@ -66,6 +65,7 @@ nix = { version = "0.29", default-features = false, features = ["fs"] }
|
|||
nom = "7.1"
|
||||
parking_lot = "0.12"
|
||||
parse_duration = "2.1"
|
||||
paste = "1.0"
|
||||
pin-project = "1.0.12"
|
||||
pnet_datalink = "0.34"
|
||||
rand = "0.8"
|
||||
|
|
@ -95,12 +95,13 @@ fjall = "2.4"
|
|||
async-compression = { version = "0.4", features = ["tokio", "zstd"] }
|
||||
zstd = { version = "0.13", default-features = false }
|
||||
|
||||
quick-xml = { version = "0.26", features = [ "serialize" ] }
|
||||
quick-xml = { version = "0.26", features = ["serialize"] }
|
||||
rmp-serde = "1.1.2"
|
||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
||||
serde_bytes = "0.11"
|
||||
serde_json = "1.0"
|
||||
toml = { version = "0.8", default-features = false, features = ["parse"] }
|
||||
utoipa = { version = "5.3.1", features = ["chrono"] }
|
||||
|
||||
# newer version requires rust edition 2021
|
||||
k8s-openapi = { version = "0.21", features = ["v1_24"] }
|
||||
|
|
@ -114,7 +115,7 @@ httpdate = "1.0"
|
|||
http-range = "0.1"
|
||||
http-body-util = "0.1"
|
||||
hyper = { version = "1.0", default-features = false }
|
||||
hyper-util = { version = "0.1", features = [ "full" ] }
|
||||
hyper-util = { version = "0.1", features = ["full"] }
|
||||
multer = "3.0"
|
||||
percent-encoding = "2.2"
|
||||
roxmltree = "0.19"
|
||||
|
|
@ -122,11 +123,11 @@ url = "2.3"
|
|||
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
tokio = { version = "1.0", default-features = false, features = ["net", "rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
|
||||
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
|
||||
tokio-util = { version = "0.7", features = ["compat", "io"] }
|
||||
tokio-stream = { version = "0.1", features = ["net"] }
|
||||
|
||||
opentelemetry = { version = "0.17", features = [ "rt-tokio", "metrics", "trace" ] }
|
||||
opentelemetry = { version = "0.17", features = ["rt-tokio", "metrics", "trace"] }
|
||||
opentelemetry-prometheus = "0.10"
|
||||
opentelemetry-otlp = "0.10"
|
||||
opentelemetry-contrib = "0.9"
|
||||
|
|
@ -146,8 +147,12 @@ aws-smithy-runtime = { version = "1.8", default-features = false, features = ["t
|
|||
aws-sdk-config = { version = "1.62", default-features = false }
|
||||
aws-sdk-s3 = { version = "1.79", default-features = false, features = ["rt-tokio"] }
|
||||
|
||||
[profile.dev]
|
||||
#lto = "thin" # disabled for now, adds 2-4 min to each CI build
|
||||
lto = "off"
|
||||
|
||||
[profile.release]
|
||||
lto = "thin"
|
||||
codegen-units = 16
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
opt-level = 3
|
||||
strip = "debuginfo"
|
||||
strip = true
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Garage Adminstration API v0</title>
|
||||
<title>Garage administration API v0</title>
|
||||
<!-- needed for adaptive design -->
|
||||
<meta charset="utf-8"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
|
|
|
|||
|
|
@ -3,10 +3,10 @@ info:
|
|||
version: v0.8.0
|
||||
title: Garage Administration API v0+garage-v0.8.0
|
||||
description: |
|
||||
Administrate your Garage cluster programatically, including status, layout, keys, buckets, and maintainance tasks.
|
||||
|
||||
*Disclaimer: The API is not stable yet, hence its v0 tag. The API can change at any time, and changes can include breaking backward compatibility. Read the changelog and upgrade your scripts before upgrading. Additionnaly, this specification is very early stage and can contain bugs, especially on error return codes/types that are not tested yet. Do not expect a well finished and polished product!*
|
||||
paths:
|
||||
Administrate your Garage cluster programmatically, including status, layout, keys, buckets, and maintenance tasks.
|
||||
|
||||
*Disclaimer: The API is not stable yet, hence its v0 tag. The API can change at any time, and changes can include breaking backward compatibility. Read the changelog and upgrade your scripts before upgrading. Additionally, this specification is very early stage and can contain bugs, especially on error return codes/types that are not tested yet. Do not expect a well finished and polished product!*
|
||||
paths:
|
||||
/status:
|
||||
get:
|
||||
tags:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Garage Adminstration API v0</title>
|
||||
<title>Garage administration API v1</title>
|
||||
<!-- needed for adaptive design -->
|
||||
<meta charset="utf-8"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
|
|
|
|||
|
|
@ -3,10 +3,10 @@ info:
|
|||
version: v0.9.0
|
||||
title: Garage Administration API v0+garage-v0.9.0
|
||||
description: |
|
||||
Administrate your Garage cluster programatically, including status, layout, keys, buckets, and maintainance tasks.
|
||||
|
||||
*Disclaimer: The API is not stable yet, hence its v0 tag. The API can change at any time, and changes can include breaking backward compatibility. Read the changelog and upgrade your scripts before upgrading. Additionnaly, this specification is very early stage and can contain bugs, especially on error return codes/types that are not tested yet. Do not expect a well finished and polished product!*
|
||||
paths:
|
||||
Administrate your Garage cluster programmatically, including status, layout, keys, buckets, and maintenance tasks.
|
||||
|
||||
*Disclaimer: The API is not stable yet, hence its v0 tag. The API can change at any time, and changes can include breaking backward compatibility. Read the changelog and upgrade your scripts before upgrading. Additionally, this specification is very early stage and can contain bugs, especially on error return codes/types that are not tested yet. Do not expect a well finished and polished product!*
|
||||
paths:
|
||||
/health:
|
||||
get:
|
||||
tags:
|
||||
|
|
@ -440,7 +440,7 @@ paths:
|
|||
- "false"
|
||||
example: "true"
|
||||
required: false
|
||||
description: "Wether or not the secret key should be returned in the response"
|
||||
description: "Whether or not the secret key should be returned in the response"
|
||||
responses:
|
||||
'500':
|
||||
description: "The server can not handle your request. Check your connectivity with the rest of the cluster."
|
||||
|
|
|
|||
24
doc/api/garage-admin-v2.html
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Garage administration API v2</title>
|
||||
<!-- needed for adaptive design -->
|
||||
<meta charset="utf-8"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<link href="./css/redoc.css" rel="stylesheet">
|
||||
|
||||
<!--
|
||||
Redoc doesn't change outer page styles
|
||||
-->
|
||||
<style>
|
||||
body {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<redoc spec-url='./garage-admin-v2.json'></redoc>
|
||||
<script src="./redoc.standalone.js"> </script>
|
||||
</body>
|
||||
</html>
|
||||
4430
doc/api/garage-admin-v2.json
Normal file
|
|
@ -51,4 +51,4 @@ We are currently building this SDK for [Python](@/documentation/build/python.md#
|
|||
|
||||
More information:
|
||||
- [In the reference manual](@/documentation/reference-manual/admin-api.md)
|
||||
- [Full specifiction](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.html)
|
||||
- [Full specification](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.html)
|
||||
|
|
|
|||
|
|
@ -5,13 +5,13 @@ weight = 99
|
|||
|
||||
## S3
|
||||
|
||||
If you are developping a new application, you may want to use Garage to store your user's media.
|
||||
If you are developing a new application, you may want to use Garage to store your user's media.
|
||||
|
||||
The S3 API that Garage uses is a standard REST API, so as long as you can make HTTP requests,
|
||||
you can query it. You can check the [S3 REST API Reference](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Operations_Amazon_Simple_Storage_Service.html) from Amazon to learn more.
|
||||
|
||||
Developping your own wrapper around the REST API is time consuming and complicated.
|
||||
Instead, there are some libraries already avalaible.
|
||||
Developing your own wrapper around the REST API is time consuming and complicated.
|
||||
Instead, there are some libraries already available.
|
||||
|
||||
Some of them are maintained by Amazon, some by Minio, others by the community.
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ To configure S3-compatible software to interact with Garage,
|
|||
you will need the following parameters:
|
||||
|
||||
- An **API endpoint**: this corresponds to the HTTP or HTTPS address
|
||||
used to contact the Garage server. When runing Garage locally this will usually
|
||||
used to contact the Garage server. When running Garage locally this will usually
|
||||
be `http://127.0.0.1:3900`. In a real-world setting, you would usually have a reverse-proxy
|
||||
that adds TLS support and makes your Garage server available under a public hostname
|
||||
such as `https://garage.example.com`.
|
||||
|
|
|
|||
|
|
@ -12,8 +12,9 @@ In this section, we cover the following web applications:
|
|||
| [Mastodon](#mastodon) | ✅ | Natively supported |
|
||||
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
|
||||
| [ejabberd](#ejabberd) | ✅ | `mod_s3_upload` |
|
||||
| [Pixelfed](#pixelfed) | ✅ | Natively supported |
|
||||
| [Pleroma](#pleroma) | ❓ | Not yet tested |
|
||||
| [Ente](#ente) | ✅ | Natively supported |
|
||||
| [Pixelfed](#pixelfed) | ❓ | Natively supported |
|
||||
| [Pleroma](#pleroma) | ✅ | Natively supported |
|
||||
| [Lemmy](#lemmy) | ✅ | Supported with pict-rs |
|
||||
| [Funkwhale](#funkwhale) | ❓ | Not yet tested |
|
||||
| [Misskey](#misskey) | ❓ | Not yet tested |
|
||||
|
|
@ -53,7 +54,7 @@ garage bucket allow nextcloud --read --write --key nextcloud-key
|
|||
|
||||
Now edit your Nextcloud configuration file to enable object storage.
|
||||
On my installation, the config. file is located at the following path: `/var/www/nextcloud/config/config.php`.
|
||||
We will add a new root key to the `$CONFIG` dictionnary named `objectstore`:
|
||||
We will add a new root key to the `$CONFIG` dictionary named `objectstore`:
|
||||
|
||||
```php
|
||||
<?php
|
||||
|
|
@ -412,7 +413,7 @@ mc mirror --newer-than "3h" ./public/system/ garage/mastodon-data
|
|||
|
||||
## Matrix
|
||||
|
||||
Matrix is a chat communication protocol. Its main stable server implementation, [Synapse](https://matrix-org.github.io/synapse/latest/), provides a module to store media on a S3 backend. Additionally, a server independent media store supporting S3 has been developped by the community, it has been made possible thanks to how the matrix API has been designed and will work with implementations like Conduit, Dendrite, etc.
|
||||
Matrix is a chat communication protocol. Its main stable server implementation, [Synapse](https://matrix-org.github.io/synapse/latest/), provides a module to store media on a S3 backend. Additionally, a server independent media store supporting S3 has been developed by the community, it has been made possible thanks to how the matrix API has been designed and will work with implementations like Conduit, Dendrite, etc.
|
||||
|
||||
### synapse-s3-storage-provider (synapse only)
|
||||
|
||||
|
|
@ -449,7 +450,7 @@ media_storage_providers:
|
|||
|
||||
Note that uploaded media will also be stored locally and this behavior can not be deactivated, it is even required for
|
||||
some operations like resizing images.
|
||||
In fact, your local filesysem is considered as a cache but without any automated way to garbage collect it.
|
||||
In fact, your local filesystem is considered as a cache but without any automated way to garbage collect it.
|
||||
|
||||
We can build our garbage collector with `s3_media_upload`, a tool provided with the module.
|
||||
If you installed the module with the command provided before, you should be able to bring it in your path:
|
||||
|
|
@ -567,13 +568,186 @@ The module can then be configured with:
|
|||
Other configuration options can be found in the
|
||||
[configuration YAML file](https://github.com/processone/ejabberd-contrib/blob/master/mod_s3_upload/conf/mod_s3_upload.yml).
|
||||
|
||||
|
||||
## Ente
|
||||
|
||||
Ente is an alternative for Google Photos and Apple Photos. It [can be selfhosted](https://help.ente.io/self-hosting/) and is working fine with Garage as of May 2024.
|
||||
As a first step we need to create a bucket and a key for Ente:
|
||||
|
||||
```bash
|
||||
garage bucket create ente
|
||||
garage key create ente-key
|
||||
# For the CORS setup to work, the key needs to be --owner as well, at least temporarily.
|
||||
garage bucket allow ente --read --write --owner --key ente-key
|
||||
```
|
||||
|
||||
We also need to setup some CORS rules to allow the Ente frontend to access the bucket:
|
||||
|
||||
```bash
|
||||
export CORS='{"CORSRules":[{"AllowedHeaders":["*"],"AllowedMethods":["GET", "PUT", "POST", "DELETE"],"AllowedOrigins":["*"], "ExposeHeaders":["ETag"]}]}'
|
||||
aws s3api put-bucket-cors --bucket ente --cors-configuration $CORS
|
||||
```
|
||||
|
||||
Now we need to configure ente-server to use our bucket. This is explained [in the Ente S3 documentation](https://help.ente.io/self-hosting/guides/external-s3).
|
||||
Prepare a configuration file for ente's backend as `museum.yaml`:
|
||||
|
||||
```yaml
|
||||
credentials-file: /credentials.yaml
|
||||
apps:
|
||||
public-albums: https://albums.example.tld # If you want to use the share album feature
|
||||
internal:
|
||||
hardcoded-ott:
|
||||
local-domain-suffix: "@example.com" # Your domain
|
||||
local-domain-value: 123456 # Custom One-Time Password since we are not sending mail by default
|
||||
key:
|
||||
# WARNING -- You MUST CHANGE the values below
|
||||
# Someone has made an image that can do it for you : https://github.com/EdyTheCow/ente-selfhost/blob/main/images/ente-server-tools/Dockerfile
|
||||
# Simply build it yourself or run docker run --rm ghcr.io/edythecow/ente-server-tools go run tools/gen-random-keys/main.go
|
||||
encryption: yvmG/RnzKrbCb9L3mgsmoxXr9H7i2Z4qlbT0mL3ln4w= # CHANGE THIS VALUE
|
||||
hash: KXYiG07wC7GIgvCSdg+WmyWdXDAn6XKYJtp/wkEU7x573+byBRAYtpTP0wwvi8i/4l37uicX1dVTUzwH3sLZyw== # CHANGE THIS VALUE
|
||||
jwt:
|
||||
secret: i2DecQmfGreG6q1vBj5tCokhlN41gcfS2cjOs9Po-u8= # CHANGE THIS VALUE
|
||||
```
|
||||
|
||||
The full configuration file can be found [here](https://github.com/ente-io/ente/blob/main/server/configurations/local.yaml)
|
||||
Then prepare a credentials file as `credentials.yaml`
|
||||
|
||||
```yaml
|
||||
db:
|
||||
host: postgres
|
||||
port: 5432
|
||||
name: <ente_db_name>
|
||||
user: <pguser>
|
||||
password: <pgpass>
|
||||
|
||||
s3:
|
||||
# Override the primary and secondary hot storage. The commented out values
|
||||
# are the defaults.
|
||||
#
|
||||
hot_storage:
|
||||
primary: b2-eu-cen
|
||||
# secondary: wasabi-eu-central-2-v3
|
||||
|
||||
# If true, enable some workarounds to allow us to use a local minio instance
|
||||
# for object storage.
|
||||
#
|
||||
# 1. Disable SSL.
|
||||
# 2. Use "path" style S3 URLs (see `use_path_style_urls` below).
|
||||
# 3. Directly download the file during replication instead of going via the
|
||||
# Cloudflare worker.
|
||||
# 4. Do not specify storage classes when uploading objects (since minio does
|
||||
# not support them, specifically it doesn't support GLACIER).
|
||||
are_local_buckets: true
|
||||
|
||||
# To use "path" style S3 URLs instead of DNS-based bucket access
|
||||
# default to true if you set "are_local_buckets: true"
|
||||
# use_path_style_urls: true
|
||||
|
||||
b2-eu-cen: # Don't change this key, it is hardcoded
|
||||
key: <keyID>
|
||||
secret: <keySecret>
|
||||
endpoint: garage:3900 # publicly accessible endpoint of your garage instance
|
||||
region: garage
|
||||
bucket: <yourbucketName>
|
||||
use_path_style: true
|
||||
# you can specify secondary locations, names are hardcoded as well
|
||||
# wasabi-eu-central-2-v3:
|
||||
# scw-eu-fr-v3:
|
||||
|
||||
# and you can also specify a bucket to be used for embeddings, preview etc..
|
||||
# default to the first bucket
|
||||
# derived-storage: wasabi-eu-central-2-derived
|
||||
```
|
||||
|
||||
Finally you can run it with Docker :
|
||||
|
||||
```bash
|
||||
docker run -d --name ente-server --restart unless-stopped -v /path/to/museum.yaml:/museum.yaml -v /path/to/credentials.yaml:/credentials.yaml -p 8080:8080 ghcr.io/ente-io/ente-server
|
||||
```
|
||||
|
||||
For more information on deployment you can check the [ente documentation](https://help.ente.io/self-hosting/)
|
||||
|
||||
## Pixelfed
|
||||
|
||||
[Pixelfed Technical Documentation > Configuration](https://docs.pixelfed.org/technical-documentation/env.html#filesystem)
|
||||
|
||||
## Pleroma
|
||||
|
||||
[Pleroma Documentation > Pleroma.Uploaders.S3](https://docs-develop.pleroma.social/backend/configuration/cheatsheet/#pleromauploaderss3)
|
||||
### Creating your bucket
|
||||
|
||||
This is the usual Garage setup:
|
||||
|
||||
```bash
|
||||
garage key new --name pleroma-key
|
||||
garage bucket create pleroma
|
||||
garage bucket allow pleroma --read --write --owner --key pleroma-key
|
||||
```
|
||||
|
||||
We also need to expose these buckets publicly to serve their content to users:
|
||||
|
||||
```bash
|
||||
garage bucket website --allow pleroma
|
||||
```
|
||||
|
||||
Note the Key ID and Secret Key.
|
||||
|
||||
### Configure Pleroma
|
||||
|
||||
Update your Pleroma configuration like that in `/etc/pleroma/config.exs`.
|
||||
|
||||
```
|
||||
config :pleroma, Pleroma.Upload,
|
||||
uploader: Pleroma.Uploaders.S3,
|
||||
base_url: "https://pleroma.garage.example.tld"
|
||||
|
||||
config :ex_aws, :s3,
|
||||
access_key_id: "GW...",
|
||||
secret_access_key: "XXX",
|
||||
region: "garage",
|
||||
host: "api.garage.example.tld"
|
||||
```
|
||||
|
||||
And restart Pleroma.
|
||||
|
||||
You can found more information in [Pleroma Documentation > Pleroma.Uploaders.S3](https://docs-develop.pleroma.social/backend/configuration/cheatsheet/#pleromauploaderss3)
|
||||
|
||||
### Migrating your data
|
||||
|
||||
Pleroma have an internal migration tool that can encounter some fatal error
|
||||
|
||||
```
|
||||
** (EXIT from #PID<0.98.0>) an exception was raised:
|
||||
** (File.Error) could not stream "/var/lib/pleroma/uploads/09/f8": illegal operation on a directory
|
||||
(elixir 1.17.3) lib/file/stream.ex:100: anonymous fn/3 in Enumerable.File.Stream.reduce/3
|
||||
(elixir 1.17.3) lib/stream.ex:1675: anonymous fn/5 in Stream.resource/3
|
||||
(elixir 1.17.3) lib/stream.ex:1891: Enumerable.Stream.do_each/4
|
||||
(elixir 1.17.3) lib/task/supervised.ex:370: Task.Supervised.stream_reduce/7
|
||||
(elixir 1.17.3) lib/enum.ex:4423: Enum.map/2
|
||||
(ex_aws_s3 2.5.8) lib/ex_aws/s3/upload.ex:141: ExAws.Operation.ExAws.S3.Upload.perform/2
|
||||
(pleroma 2.10.0) lib/pleroma/uploaders/s3.ex:60: Pleroma.Uploaders.S3.put_file/1
|
||||
(pleroma 2.10.0) lib/pleroma/uploaders/uploader.ex:49: Pleroma.Uploaders.Uploader.put_file/2
|
||||
```
|
||||
|
||||
So, use [your best tool](https://garagehq.deuxfleurs.fr/documentation/connect/cli/) to sync `/var/lib/pleroma/uploads/` in your S3.
|
||||
|
||||
Then, to avoid some non existent problem (just in case of), run this command
|
||||
|
||||
```bash
|
||||
while true
|
||||
do
|
||||
rm -vr $(./bin/pleroma_ctl uploads migrate_local S3 2>&1 | grep "could not stream" | awk -F '"' '{print $2}')
|
||||
sleep 5
|
||||
done
|
||||
```
|
||||
|
||||
If you have many files, stop this command sometime and the command bellow (interactive) to delete local
|
||||
file after upload. Then restart the loop.
|
||||
|
||||
```bash
|
||||
./bin/pleroma_ctl uploads migrate_local S3 --delete
|
||||
```
|
||||
|
||||
And *voilà*
|
||||
|
||||
## Lemmy
|
||||
|
||||
|
|
|
|||
|
|
@ -207,3 +207,13 @@ $ plakar at @garageS3 ls
|
|||
```
|
||||
|
||||
More information in Plakar documentation: https://www.plakar.io/docs/main/quickstart/
|
||||
|
||||
## Synology HyperBackup
|
||||
|
||||
HyperBackup can be configured to upload backups to garage using a custom S3 destination. However, the HyperBackup client hardcodes the `us-east-1` region that is a critical input to the v4 signature process. If garage is not set to `us-east-1`, HyperBackup will recognize available buckets, but fail during the final setup stage.
|
||||
|
||||
In garage.toml:
|
||||
```toml
|
||||
[s3_api]
|
||||
s3_region = "us-east-1"
|
||||
```
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ Some commands:
|
|||
# list buckets
|
||||
mc ls garage/
|
||||
|
||||
# list objets in a bucket
|
||||
# list objects in a bucket
|
||||
mc ls garage/my_files
|
||||
|
||||
# copy from your filesystem to garage
|
||||
|
|
@ -149,6 +149,15 @@ rclone help
|
|||
This will tremendously accelerate operations such as `rclone sync` or `rclone ncdu` by reducing the number
|
||||
of ListObjects calls that are made.
|
||||
|
||||
**Garage behind Cloudflare proxy:** when running Garage behind Cloudflare proxy, you might see `Response: error 403 Forbidden, Forbidden: Invalid signature` error in your garage logs or `AccessDenied: Forbidden: Invalid signature` error in rclone logs. Try adding `--s3-sign-accept-encoding=false` flag to your rclone command and see if the issue is resolved.
|
||||
|
||||
```bash
|
||||
# this throws an error
|
||||
rclone lsd garage:
|
||||
|
||||
# this should work
|
||||
rclone lsd --s3-sign-accept-encoding=false garage:
|
||||
```
|
||||
|
||||
## `s3cmd`
|
||||
|
||||
|
|
@ -209,7 +218,7 @@ Within Cyberduck, a
|
|||
available within the `Preferences -> Profiles` section. This can enabled and
|
||||
then connections to Garage may be configured.
|
||||
|
||||
### Instuctions for the CLI
|
||||
### Instructions for the CLI
|
||||
|
||||
To configure duck (Cyberduck's CLI tool), start by creating its folder hierarchy:
|
||||
|
||||
|
|
@ -314,4 +323,3 @@ ls
|
|||
```
|
||||
|
||||
And through the web interface at http://[::1]:8080/web/client
|
||||
|
||||
|
|
|
|||
|
|
@ -201,11 +201,9 @@ on the binary cache, the client will download the result from the cache instead
|
|||
|
||||
### Channels
|
||||
|
||||
Channels additionnaly serve Nix definitions, ie. a `.nix` file referencing
|
||||
Channels additionally serve Nix definitions, ie. a `.nix` file referencing
|
||||
all the derivations you want to serve.
|
||||
|
||||
## Gitlab
|
||||
|
||||
*External link:* [Gitlab Documentation > Object storage](https://docs.gitlab.com/ee/administration/object_storage.html)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -8,12 +8,12 @@ have published Ansible roles. We list them and compare them below.
|
|||
|
||||
## Comparison of Ansible roles
|
||||
|
||||
| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | [eddster ansible-role-garage](#eddster-ansible-role-garage) |
|
||||
| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | [eddster2309 ansible-role-garage](#eddster2309-ansible-role-garage) |
|
||||
|------------------------------------|---------------------------------------------|---------------------------------------------------------------|---------------------------------|
|
||||
| **Runtime** | Systemd | Docker | Systemd |
|
||||
| **Target OS** | Any Linux | Any Linux | Any Linux |
|
||||
| **Architecture** | amd64, arm64, i686 | amd64, arm64 | arm64, arm, 386, amd64 |
|
||||
| **Additional software** | None | Traefik | Ngnix and Keepalived (optional) |
|
||||
| **Additional software** | None | Traefik | Nginx and Keepalived (optional) |
|
||||
| **Automatic node connection** | ❌ | ✅ | ✅ |
|
||||
| **Layout management** | ❌ | ✅ | ✅ |
|
||||
| **Manage buckets & keys** | ❌ | ✅ (basic) | ✅ |
|
||||
|
|
|
|||
|
|
@ -29,6 +29,10 @@ it's stable).
|
|||
|
||||
Garage is available in the official repositories under [extra](https://archlinux.org/packages/extra/x86_64/garage).
|
||||
|
||||
```bash
|
||||
pacman -S garage
|
||||
```
|
||||
|
||||
## FreeBSD
|
||||
|
||||
```bash
|
||||
|
|
@ -40,3 +44,9 @@ pkg install garage
|
|||
```bash
|
||||
nix-shell -p garage
|
||||
```
|
||||
|
||||
## conda-forge
|
||||
|
||||
```bash
|
||||
pixi global install garage
|
||||
```
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ by adding encryption at different levels.
|
|||
|
||||
We would be very curious to know your needs and thougs about ideas such as
|
||||
encryption practices and things like key management, as we want Garage to be a
|
||||
serious base platform for the developpment of secure, encrypted applications.
|
||||
serious base platform for the development of secure, encrypted applications.
|
||||
Do not hesitate to come talk to us if you have any thoughts or questions on the
|
||||
subject.
|
||||
|
||||
|
|
@ -59,7 +59,7 @@ For standard S3 API requests, Garage does not encrypt data at rest by itself.
|
|||
For the most generic at rest encryption of data, we recommend setting up your
|
||||
storage partitions on encrypted LUKS devices.
|
||||
|
||||
If you are developping your own client software that makes use of S3 storage,
|
||||
If you are developing your own client software that makes use of S3 storage,
|
||||
we recommend implementing data encryption directly on the client side and never
|
||||
transmitting plaintext data to Garage. This makes it easy to use an external
|
||||
untrusted storage provider if necessary.
|
||||
|
|
@ -108,14 +108,14 @@ Protects against the following threats:
|
|||
|
||||
- Stolen HDD
|
||||
|
||||
Crucially, does not protect againt malicious sysadmins or remote attackers that
|
||||
Crucially, does not protect against malicious sysadmins or remote attackers that
|
||||
might gain access to your servers.
|
||||
|
||||
Methods include full-disk encryption with tools such as LUKS.
|
||||
|
||||
## Encrypting data on the client side
|
||||
|
||||
Protects againt the following threats:
|
||||
Protects against the following threats:
|
||||
|
||||
- A honest-but-curious administrator
|
||||
- A malicious administrator that tries to corrupt your data
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ There are three methods to expose buckets as website:
|
|||
|
||||
1. using the PutBucketWebsite S3 API call, which is allowed for access keys that have the owner permission bit set
|
||||
|
||||
2. from the Garage CLI, by an adminstrator of the cluster
|
||||
2. from the Garage CLI, by an administrator of the cluster
|
||||
|
||||
3. using the Garage administration API
|
||||
|
||||
|
|
|
|||
|
|
@ -20,12 +20,12 @@ sudo apt-get update
|
|||
sudo apt-get install build-essential
|
||||
```
|
||||
|
||||
## Building from source from the Gitea repository
|
||||
## Building from source from the Forgejo repository
|
||||
|
||||
The primary location for Garage's source code is the
|
||||
[Gitea repository](https://git.deuxfleurs.fr/Deuxfleurs/garage),
|
||||
[Forgejo repository](https://git.deuxfleurs.fr/Deuxfleurs/garage),
|
||||
which contains all of the released versions as well as the code
|
||||
for the developpement of the next version.
|
||||
for the development of the next version.
|
||||
|
||||
Clone the repository and enter it as follows:
|
||||
|
||||
|
|
@ -41,7 +41,7 @@ git tag # List available tags
|
|||
git checkout v0.8.0 # Change v0.8.0 with the version you wish to build
|
||||
```
|
||||
|
||||
Otherwise you will be building a developpement build from the `main` branch
|
||||
Otherwise you will be building a development build from the `main` branch
|
||||
that includes all of the changes to be released in the next version.
|
||||
Be careful that such a build might be unstable or contain bugs,
|
||||
and could be incompatible with nodes that run stable versions of Garage.
|
||||
|
|
@ -85,11 +85,14 @@ The following feature flags are available in v0.8.0:
|
|||
| Feature flag | Enabled | Description |
|
||||
| ------------ | ------- | ----------- |
|
||||
| `bundled-libs` | *by default* | Use bundled version of sqlite3, zstd, lmdb and libsodium |
|
||||
| `system-libs` | optional | Use system version of sqlite3, zstd, lmdb and libsodium<br>if available (exclusive with `bundled-libs`, build using<br>`cargo build --no-default-features --features system-libs`) |
|
||||
| `consul-discovery` | optional | Enable automatic registration and discovery<br>of cluster nodes through the Consul API |
|
||||
| `fjall` | experimental | Enable using Fjall to store Garage's metadata |
|
||||
| `journald` | optional | Enable logging to systemd-journald with<br>`GARAGE_LOG_TO_JOURNALD=true` environment variable set |
|
||||
| `k2v` | optional | Enable the experimental K2V API (if used, all nodes on your<br>Garage cluster must have it enabled as well) |
|
||||
| `kubernetes-discovery` | optional | Enable automatic registration and discovery<br>of cluster nodes through the Kubernetes API |
|
||||
| `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API |
|
||||
| `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry |
|
||||
| `syslog` | optional | Enable logging to Syslog |
|
||||
| `lmdb` | *by default* | Enable using LMDB to store Garage's metadata |
|
||||
| `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API |
|
||||
| `sqlite` | *by default* | Enable using Sqlite3 to store Garage's metadata |
|
||||
| `syslog` | optional | Enable logging to Syslog with<br>`GARAGE_LOG_TO_SYSLOG=true` environment variable set |
|
||||
| `system-libs` | optional | Use system version of sqlite3, zstd, lmdb and libsodium<br>if available (exclusive with `bundled-libs`, build using<br>`cargo build --no-default-features --features system-libs`) |
|
||||
| `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry |
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ Or deploy with custom values:
|
|||
helm install --create-namespace --namespace garage garage ./garage -f values.override.yaml
|
||||
```
|
||||
|
||||
If you want to manage the CustomRessourceDefinition used by garage for its `kubernetes_discovery` outside of the helm chart, add `garage.kubernetesSkipCrd: true` to your custom values and use the kustomization before deploying the helm chart:
|
||||
If you want to manage the CustomResourceDefinition used by garage for its `kubernetes_discovery` outside of the helm chart, add `garage.kubernetesSkipCrd: true` to your custom values and use the kustomization before deploying the helm chart:
|
||||
|
||||
```bash
|
||||
kubectl apply -k ../k8s/crd
|
||||
|
|
@ -47,12 +47,12 @@ All possible configuration values can be found with:
|
|||
helm show values ./garage
|
||||
```
|
||||
|
||||
This is an example `values.overrride.yaml` for deploying in a microk8s cluster with a https s3 api ingress route:
|
||||
This is an example `values.override.yaml` for deploying in a microk8s cluster with a https s3 api ingress route:
|
||||
|
||||
```yaml
|
||||
garage:
|
||||
# Use only 2 replicas per object
|
||||
replicationMode: "2"
|
||||
replicationFactor: 2
|
||||
|
||||
# Start 4 instances (StatefulSets) of garage
|
||||
deployment:
|
||||
|
|
|
|||
|
|
@ -96,14 +96,14 @@ to store 2 TB of data in total.
|
|||
## Get a Docker image
|
||||
|
||||
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||
We encourage you to use a fixed tag (eg. `v1.3.0`) and not the `latest` tag.
|
||||
For this example, we will use the latest published version at the time of the writing which is `v1.3.0` but it's up to you
|
||||
We encourage you to use a fixed tag (eg. `v2.2.0`) and not the `latest` tag.
|
||||
For this example, we will use the latest published version at the time of the writing which is `v2.2.0` but it's up to you
|
||||
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
sudo docker pull dxflrs/garage:v1.3.0
|
||||
sudo docker pull dxflrs/garage:v2.2.0
|
||||
```
|
||||
|
||||
## Deploying and configuring Garage
|
||||
|
|
@ -171,7 +171,7 @@ docker run \
|
|||
-v /etc/garage.toml:/etc/garage.toml \
|
||||
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
||||
-v /var/lib/garage/data:/var/lib/garage/data \
|
||||
dxflrs/garage:v1.3.0
|
||||
dxflrs/garage:v2.2.0
|
||||
```
|
||||
|
||||
With this command line, Garage should be started automatically at each boot.
|
||||
|
|
@ -185,7 +185,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
|
|||
version: "3"
|
||||
services:
|
||||
garage:
|
||||
image: dxflrs/garage:v1.3.0
|
||||
image: dxflrs/garage:v2.2.0
|
||||
network_mode: "host"
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ The main reason to add a reverse proxy in front of Garage is to provide TLS to y
|
|||
|
||||
In production you will likely need your certificates signed by a certificate authority.
|
||||
The most automated way is to use a provider supporting the [ACME protocol](https://datatracker.ietf.org/doc/html/rfc8555)
|
||||
such as [Let's Encrypt](https://letsencrypt.org/), [ZeroSSL](https://zerossl.com/) or [Buypass Go SSL](https://www.buypass.com/ssl/products/acme).
|
||||
such as [Let's Encrypt](https://letsencrypt.org/) or [ZeroSSL](https://zerossl.com/).
|
||||
|
||||
If you are only testing Garage, you can generate a self-signed certificate to follow the documentation:
|
||||
|
||||
|
|
@ -97,7 +97,7 @@ server {
|
|||
location / {
|
||||
proxy_pass http://s3_backend;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header Host $http_host;
|
||||
# Disable buffering to a temporary file.
|
||||
proxy_max_temp_file_size 0;
|
||||
}
|
||||
|
|
@ -272,7 +272,7 @@ Add the following configuration section [to compress response](https://doc.traef
|
|||
|
||||
### Add caching response
|
||||
|
||||
Traefik's caching middleware is only available on [entreprise version](https://doc.traefik.io/traefik-enterprise/middlewares/http-cache/), however the freely-available [Souin plugin](https://github.com/darkweak/souin#tr%C3%A6fik-container) can also do the job. (section to be completed)
|
||||
Traefik's caching middleware is only available on [enterprise version](https://doc.traefik.io/traefik-enterprise/middlewares/http-cache/), however the freely-available [Souin plugin](https://github.com/darkweak/souin#tr%C3%A6fik-container) can also do the job. (section to be completed)
|
||||
|
||||
### Complete example
|
||||
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ WantedBy=multi-user.target
|
|||
id is dynamically allocated by systemd (set with `DynamicUser=true`). It cannot
|
||||
access (read or write) home folders (`/home`, `/root` and `/run/user`), the
|
||||
rest of the filesystem can only be read but not written, only the path seen as
|
||||
`/var/lib/garage` is writable as seen by the service. Additionnaly, the process
|
||||
`/var/lib/garage` is writable as seen by the service. Additionally, the process
|
||||
can not gain new privileges over time.
|
||||
|
||||
For this to work correctly, your `garage.toml` must be set with
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ perspective. It will allow you to understand if Garage is a good fit for
|
|||
you, how to better use it, how to contribute to it, what can Garage could
|
||||
and could not do, etc.
|
||||
|
||||
- **[Goals and use cases](@/documentation/design/goals.md):** This page explains why Garage was concieved and what practical use cases it targets.
|
||||
- **[Goals and use cases](@/documentation/design/goals.md):** This page explains why Garage was conceived and what practical use cases it targets.
|
||||
|
||||
- **[Related work](@/documentation/design/related-work.md):** This pages presents the theoretical background on which Garage is built, and describes other software storage solutions and why they didn't work for us.
|
||||
|
||||
|
|
@ -31,5 +31,3 @@ We love to talk and hear about Garage, that's why we keep a log here:
|
|||
- [(en, 2021-04-28) Distributed object storage is centralised](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/commit/b1f60579a13d3c5eba7f74b1775c84639ea9b51a/doc/talks/2021-04-28_spirals-team/talk.pdf)
|
||||
|
||||
- [(fr, 2020-12-02) Garage : jouer dans la cour des grands quand on est un hébergeur associatif](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/commit/b1f60579a13d3c5eba7f74b1775c84639ea9b51a/doc/talks/2020-12-02_wide-team/talk.pdf)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -15,14 +15,14 @@ The more a user request will require intra-cluster requests to complete, the mor
|
|||
This is especially true for sequential requests: requests that must wait the result of another request to be sent.
|
||||
We designed Garage without consensus algorithms (eg. Paxos or Raft) to minimize the number of sequential and parallel requests.
|
||||
|
||||
This serie of benchmarks quantifies the impact of this design choice.
|
||||
This series of benchmarks quantifies the impact of this design choice.
|
||||
|
||||
### On a simple simulated network
|
||||
|
||||
We start with a controlled environment, all the instances are running on the same (powerful enough) machine.
|
||||
|
||||
To control the network latency, we simulate the network with [mknet](https://git.deuxfleurs.fr/trinity-1686a/mknet) (a tool we developped, based on `tc` and the linux network stack).
|
||||
To mesure S3 endpoints latency, we use our own tool [s3lat](https://git.deuxfleurs.fr/quentin/s3lat/) to observe only the intra-cluster latency and not some contention on the nodes (CPU, RAM, disk I/O, network bandwidth, etc.).
|
||||
To control the network latency, we simulate the network with [mknet](https://git.deuxfleurs.fr/trinity-1686a/mknet) (a tool we developed, based on `tc` and the linux network stack).
|
||||
To measure S3 endpoints latency, we use our own tool [s3lat](https://git.deuxfleurs.fr/quentin/s3lat/) to observe only the intra-cluster latency and not some contention on the nodes (CPU, RAM, disk I/O, network bandwidth, etc.).
|
||||
Compared to other benchmark tools, S3Lat sends only one (small) request at the same time and measures its latency.
|
||||
We selected 5 standard endpoints that are often in the critical path: ListBuckets, ListObjects, GetObject, PutObject and RemoveObject.
|
||||
|
||||
|
|
@ -32,7 +32,7 @@ In this first benchmark, we consider 5 instances that are located in a different
|
|||
|
||||
Compared to garage, minio latency drastically increases on 3 endpoints: GetObject, PutObject, RemoveObject.
|
||||
|
||||
We suppose that these requests on minio make transactions over Raft, involving 4 sequential requests: 1) sending the message to the leader, 2) having the leader dispatch it to the other nodes, 3) waiting for the confirmation of followers and finally 4) commiting it. With our current configuration, one Raft transaction will take around 400 ms. GetObject seems to correlate to 1 transaction while PutObject and RemoveObject seems to correlate to 2 or 3. Reviewing minio code would be required to confirm this hypothesis.
|
||||
We suppose that these requests on minio make transactions over Raft, involving 4 sequential requests: 1) sending the message to the leader, 2) having the leader dispatch it to the other nodes, 3) waiting for the confirmation of followers and finally 4) committing it. With our current configuration, one Raft transaction will take around 400 ms. GetObject seems to correlate to 1 transaction while PutObject and RemoveObject seems to correlate to 2 or 3. Reviewing minio code would be required to confirm this hypothesis.
|
||||
|
||||
Conversely, garage uses an architecture similar to DynamoDB and never require global cluster coordination to answer a request.
|
||||
Instead, garage can always contact the right node in charge of the requested data, and can answer in as low as one request in the case of GetObject and PutObject. We also observed that Garage latency, while often lower to minio, is more dispersed: garage is still in beta and has not received any performance optimization yet.
|
||||
|
|
@ -50,7 +50,7 @@ We plot a similar graph as before:
|
|||
|
||||
This new graph is very similar to the one before, neither minio or garage seems to benefit from this new topology, but they also do not suffer from it.
|
||||
|
||||
Considering garage, this is expected: nodes in the same DC are put in the same zone, and then data are spread on different zones for data resiliency and availaibility.
|
||||
Considering garage, this is expected: nodes in the same DC are put in the same zone, and then data are spread on different zones for data resiliency and availability.
|
||||
Then, in the default mode, requesting data requires to query at least 2 zones to be sure that we have the most up to date information.
|
||||
These requests will involve at least one inter-DC communication.
|
||||
In other words, we prioritize data availability and synchronization over raw performances.
|
||||
|
|
|
|||
|
|
@ -59,11 +59,13 @@ Garage themselves for the following tasks:
|
|||
|
||||
- Hosting of their homepage, [privacyguides.org](https://www.privacyguides.org/), and various other static sites
|
||||
|
||||
- As a Mastodon object storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/)
|
||||
- As a PowerDNS authoritative zone backend through [Lightning Stream](https://doc.powerdns.com/lightningstream/latest/index.html) and [LMDB](https://doc.powerdns.com/authoritative/backends/lmdb.html)
|
||||
|
||||
- As a Mastodon media storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/)
|
||||
|
||||
- As a PeerTube storage backend for [neat.tube](https://neat.tube/)
|
||||
|
||||
- As a [Matrix media backend](https://github.com/matrix-org/synapse-s3-storage-provider)
|
||||
|
||||
Triplebit's Garage cluster is a multi-site cluster currently composed of
|
||||
10 nodes in 3 physical locations.
|
||||
15 storage nodes in 3 physical locations.
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ delete a tombstone, the following condition has to be met:
|
|||
|
||||
- All nodes responsible for storing this entry are aware of the existence of
|
||||
the tombstone, i.e. they cannot hold another version of the entry that is
|
||||
superseeded by the tombstone. This ensures that deleting the tombstone is
|
||||
superseded by the tombstone. This ensures that deleting the tombstone is
|
||||
safe and that no deleted value will come back in the system.
|
||||
|
||||
Garage uses atomic database operations (such as compare-and-swap and
|
||||
|
|
@ -141,4 +141,3 @@ rebalance of data, this would have led to the disk utilization to explode
|
|||
during the rebalancing, only to shrink again after 24 hours. The 10-minute
|
||||
delay is a compromise that gives good security while not having this problem of
|
||||
disk space explosion on rebalance.
|
||||
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ However, Amazon S3 source code is not open but alternatives were proposed.
|
|||
We identified Minio, Pithos, Swift and Ceph.
|
||||
Minio/Ceph enforces a total order, so properties similar to a (relaxed) filesystem.
|
||||
Swift and Pithos are probably the most similar to AWS S3 with their consistent hashing ring.
|
||||
However Pithos is not maintained anymore. More precisely the company that published Pithos version 1 has developped a second version 2 but has not open sourced it.
|
||||
However Pithos is not maintained anymore. More precisely the company that published Pithos version 1 has developed a second version 2 but has not open sourced it.
|
||||
Some tests conducted by the [ACIDES project](https://acides.org/) have shown that Openstack Swift consumes way more resources (CPU+RAM) that we can afford. Furthermore, people developing Swift have not designed their software for geo-distribution.
|
||||
|
||||
There were many attempts in research too. I am only thinking to [LBFS](https://pdos.csail.mit.edu/papers/lbfs:sosp01/lbfs.pdf) that was used as a basis for Seafile. But none of them have been effectively implemented yet.
|
||||
|
|
@ -63,7 +63,7 @@ Due to its industry oriented design, Ceph is also far from being *Simple* to ope
|
|||
In a certain way, Ceph and MinIO are closer together than they are from Garage or OpenStack Swift.
|
||||
|
||||
**[Pithos](https://github.com/exoscale/pithos):**
|
||||
Pithos has been abandonned and should probably not used yet, in the following we explain why we did not pick their design.
|
||||
Pithos has been abandoned and should probably not used yet, in the following we explain why we did not pick their design.
|
||||
Pithos was relying as a S3 proxy in front of Cassandra (and was working with Scylla DB too).
|
||||
From its designers' mouth, storing data in Cassandra has shown its limitations justifying the project abandonment.
|
||||
They built a closed-source version 2 that does not store blobs in the database (only metadata) but did not communicate further on it.
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ This logic is defined in `nix/build_index.nix`.
|
|||
For each commit, we first pass the code to a formatter (rustfmt) and a linter (clippy).
|
||||
Then we try to build it in debug mode and run both unit tests and our integration tests.
|
||||
|
||||
Additionnaly, when releasing, our integration tests are run on the release build for amd64 and i686.
|
||||
Additionally, when releasing, our integration tests are run on the release build for amd64 and i686.
|
||||
|
||||
## Generated Artifacts
|
||||
|
||||
|
|
@ -32,7 +32,7 @@ We generate the following binary artifacts for now:
|
|||
- **os**: linux
|
||||
- **format**: static binary, docker container
|
||||
|
||||
Additionnaly we also build two web pages and one JSON document:
|
||||
Additionally we also build two web pages and one JSON document:
|
||||
- the documentation (this website)
|
||||
- [the release page](https://garagehq.deuxfleurs.fr/_releases.html)
|
||||
- [the release list in JSON format](https://garagehq.deuxfleurs.fr/_releases.json)
|
||||
|
|
@ -67,7 +67,7 @@ nix copy --to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/
|
|||
The previous command will only send the built package and not its dependencies.
|
||||
In the case of our CI pipeline, we want to cache all intermediate build steps
|
||||
as well. This can be done using this quite involved command (here as an example
|
||||
for the `pkgs.amd64.relase` package):
|
||||
for the `pkgs.amd64.release` package):
|
||||
|
||||
```bash
|
||||
nix copy -j8 \
|
||||
|
|
@ -174,5 +174,3 @@ drone sign --save Deuxfleurs/garage
|
|||
```
|
||||
|
||||
Looking at the file, you will see that most of the commands are `nix-shell` and `nix-build` commands with various parameters.
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ You may pause an ongoing scrub using `garage repair scrub pause`, but note that
|
|||
the scrub will resume automatically 24 hours later as Garage will not let your
|
||||
cluster run without a regular scrub. If the scrub procedure is too intensive
|
||||
for your servers and is slowing down your workload, the recommended solution
|
||||
is to increase the "scrub tranquility" using `garage repair scrub set-tranquility`.
|
||||
is to increase the "scrub tranquility" using `garage worker set scrub-tranquility`.
|
||||
A higher tranquility value will make Garage take longer pauses between two block
|
||||
verifications. Of course, scrubbing the entire data store will also take longer.
|
||||
|
||||
|
|
|
|||
|
|
@ -242,7 +242,7 @@ dc3 Tags Partitions Capacity Usable capacity
|
|||
TOTAL 256 (256 unique) 2.0 GB 1000.0 MB (50.0%)
|
||||
```
|
||||
|
||||
As we can see, the node that was moved to `dc3` (node4) is only used at 25% (approximatively),
|
||||
As we can see, the node that was moved to `dc3` (node4) is only used at 25% (approximately),
|
||||
whereas the node that was already in `dc3` (node3) is used at 75%.
|
||||
|
||||
This can be explained by the following:
|
||||
|
|
@ -260,7 +260,7 @@ This can be explained by the following:
|
|||
data can be removed to be moved to node1.
|
||||
|
||||
- Garage will move data in equal proportions from all possible sources, in this
|
||||
case it means that it will tranfer 25% of the entire data set from node3 to
|
||||
case it means that it will transfer 25% of the entire data set from node3 to
|
||||
node1 and another 25% from node4 to node1.
|
||||
|
||||
This explains why node3 ends with 75% utilization (100% from before minus 25%
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ First of all, Garage divides the set of all possible block hashes
|
|||
in a fixed number of slices (currently 1024), and assigns
|
||||
to each slice a primary storage location among the specified data directories.
|
||||
The number of slices having their primary location in each data directory
|
||||
is proportionnal to the capacity specified in the config file.
|
||||
is proportional to the capacity specified in the config file.
|
||||
|
||||
When Garage receives a block to write, it will always write it in the primary
|
||||
directory of the slice that contains its hash.
|
||||
|
|
|
|||
|
|
@ -161,4 +161,7 @@ your recovery options are as follows:
|
|||
|
||||
- **Option 3: restoring a filesystem-level snapshot.** If you are using ZFS or
|
||||
BTRFS to snapshot your metadata partition, refer to their specific
|
||||
documentation on rolling back or copying files from an old snapshot.
|
||||
documentation on rolling back or copying files from an old snapshot.
|
||||
Note that, depending on the properties of the filesystem and of the DB engine,
|
||||
if these snapshots were taken during a write operation to the database, they may
|
||||
also be corrupted and thus unfit for recovery.
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ From a high level perspective, a major upgrade looks like this:
|
|||
10. Enable API access (reverse step 1)
|
||||
11. Monitor your cluster while load comes back, check that all your applications are happy with this new version
|
||||
|
||||
### Major upgarades with minimal downtime
|
||||
### Major upgrades with minimal downtime
|
||||
|
||||
There is only one operation that has to be coordinated cluster-wide: the switch of one version of the internal RPC protocol to the next.
|
||||
This means that an upgrade with very limited downtime can simply be performed from one major version to the next by restarting all nodes
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ docker run \
|
|||
-v /path/to/garage.toml:/etc/garage.toml \
|
||||
-v /path/to/garage/meta:/var/lib/garage/meta \
|
||||
-v /path/to/garage/data:/var/lib/garage/data \
|
||||
dxflrs/garage:v1.3.0
|
||||
dxflrs/garage:v2.2.0
|
||||
```
|
||||
|
||||
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
|
||||
|
|
|
|||
|
|
@ -6,41 +6,167 @@ weight = 40
|
|||
The Garage administration API is accessible through a dedicated server whose
|
||||
listen address is specified in the `[admin]` section of the configuration
|
||||
file (see [configuration file
|
||||
reference](@/documentation/reference-manual/configuration.md))
|
||||
reference](@/documentation/reference-manual/configuration.md)).
|
||||
|
||||
**WARNING.** At this point, there is no commitment to the stability of the APIs described in this document.
|
||||
We will bump the version numbers prefixed to each API endpoint each time the syntax
|
||||
or semantics change, meaning that code that relies on these endpoint will break
|
||||
when changes are introduced.
|
||||
|
||||
Versions:
|
||||
- Before Garage 0.7.2 - no admin API
|
||||
- Garage 0.7.2 - admin APIv0
|
||||
- Garage 0.9.0 - admin APIv1, deprecate admin APIv0
|
||||
The current version of the admin API is v2. No breaking changes to the Garage
|
||||
administration API will be published outside of a major release.
|
||||
|
||||
History of previous versions:
|
||||
|
||||
- Before Garage v0.7.2 - no admin API
|
||||
- Garage v0.7.2 - admin API v0
|
||||
- Garage v0.9.0 - admin API v1, deprecate admin API v0
|
||||
- Garage v2.0.0 - admin API v2, deprecate admin API v1
|
||||
|
||||
## Access control
|
||||
|
||||
The admin API uses two different tokens for access control, that are specified in the config file's `[admin]` section:
|
||||
### Using an API token
|
||||
|
||||
- `metrics_token`: the token for accessing the Metrics endpoint (if this token
|
||||
is not set in the config file, the Metrics endpoint can be accessed without
|
||||
access control);
|
||||
|
||||
- `admin_token`: the token for accessing all of the other administration
|
||||
endpoints (if this token is not set in the config file, access to these
|
||||
endpoints is disabled entirely).
|
||||
|
||||
These tokens are used as simple HTTP bearer tokens. In other words, to
|
||||
authenticate access to an admin API endpoint, add the following HTTP header
|
||||
to your request:
|
||||
Administration API tokens tokens are used as simple HTTP bearer tokens. In
|
||||
other words, to authenticate access to an admin API endpoint, add the following
|
||||
HTTP header to your request:
|
||||
|
||||
```
|
||||
Authorization: Bearer <token>
|
||||
```
|
||||
|
||||
## Administration API endpoints
|
||||
### User-defined API tokens
|
||||
|
||||
Cluster administrators may dynamically define administration tokens using the CLI commands under `garage admin-token`.
|
||||
Such tokens may be limited in scope, meaning that they may enable access to only a subset of API calls.
|
||||
They may also have an expiration date to limit their use in time.
|
||||
|
||||
Here is an example to create an administration token that is valid for 30 days
|
||||
and gives access to only a subset of API calls, allowing it to create buckets
|
||||
and access keys and give keys permissions on buckets:
|
||||
|
||||
```bash
|
||||
$ garage admin-token create --expires-in 30d \
|
||||
--scope ListBuckets,GetBucketInfo,ListKeys,GetKeyInfo,CreateBucket,CreateKey,AllowBucketKey,DenyBucketKey \
|
||||
my-token
|
||||
This is your secret bearer token, it will not be shown again by Garage:
|
||||
|
||||
8ed1830b10a276ff57061950.kOSIpxWK9zSGbTO9Xadpv3YndSFWma0_snXcYHaORXk
|
||||
|
||||
==== ADMINISTRATION TOKEN INFORMATION ====
|
||||
Token ID: 8ed1830b10a276ff57061950
|
||||
Token name: my-token
|
||||
Created: 2025-06-15 15:12:44.160 +02:00
|
||||
Validity: valid
|
||||
Expiration: 2025-07-15 15:12:44.117 +02:00
|
||||
|
||||
Scope: ListBuckets
|
||||
GetBucketInfo
|
||||
ListKeys
|
||||
GetKeyInfo
|
||||
CreateBucket
|
||||
CreateKey
|
||||
AllowBucketKey
|
||||
DenyBucketKey
|
||||
```
|
||||
|
||||
When running this command, your token will be shown only once and **will never
|
||||
be shown again by Garage**, so make sure to save it directly. The token is
|
||||
hashed internally, and is identified by its prefix (32 hex digits followed by a
|
||||
dot) which is saved in clear.
|
||||
|
||||
When running `garage admin-token list`, you might see something like this:
|
||||
|
||||
```
|
||||
ID Created Name Expiration Scope
|
||||
- - metrics_token (from daemon configuration) never Metrics
|
||||
8ed1830b10a276ff57061950 2025-06-15 my-token 2025-07-15 15:12:44.117 +02:00 ListBuckets, ... (8)
|
||||
```
|
||||
|
||||
### Master API tokens
|
||||
|
||||
The admin API can also use two different master tokens for access control,
|
||||
specified in the config file's `[admin]` section:
|
||||
|
||||
- `metrics_token`: the token for accessing the Metrics endpoint. If this token
|
||||
is not set in the config file, the Metrics endpoint can be accessed without
|
||||
access control.
|
||||
|
||||
- `admin_token`: the token for accessing all of the other administration
|
||||
endpoints. If this token is not set in the config file, access to these
|
||||
endpoints is only possible with a user-defined admin token.
|
||||
|
||||
With the introduction of multiple user-defined admin tokens, the use of master
|
||||
API tokens is now discouraged.
|
||||
|
||||
|
||||
## Using the admin API
|
||||
|
||||
All of the admin API endpoints are described in the OpenAPI specification:
|
||||
|
||||
- APIv2 - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v2.html) - [OpenAPI JSON](https://garagehq.deuxfleurs.fr/api/garage-admin-v2.json)
|
||||
- APIv1 (deprecated) - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html) - [OpenAPI YAML](https://garagehq.deuxfleurs.fr/api/garage-admin-v1.yml)
|
||||
- APIv0 (deprecated) - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.html) - [OpenAPI YAML](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.yml)
|
||||
|
||||
Making a request to the API from the command line can be as simple as running:
|
||||
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer s3cr3t' http://localhost:3903/v2/GetClusterStatus | jq
|
||||
```
|
||||
|
||||
For more advanced use cases, we recommend using an SDK.
|
||||
[Go to the "Build your own app" section to know how to use our SDKs](@/documentation/build/_index.md)
|
||||
|
||||
### Making API calls from the `garage` CLI
|
||||
|
||||
Since v2.0.0, the `garage` binary provides a subcommand `garage json-api` that
|
||||
allows you to invoke the API without making an HTTP request. This can be
|
||||
useful for scripting Garage deployments.
|
||||
|
||||
`garage json-api` proxies API calls through Garage's internal RPC protocol,
|
||||
therefore it does not require any form of authentication: RPC connection
|
||||
parameters are discovered automatically to contact the locally-running Garage
|
||||
instance (as when running any other `garage` CLI command).
|
||||
|
||||
For simple calls that take no parameters, usage is as follows:
|
||||
|
||||
```
|
||||
$ garage json-api GetClusterHealth
|
||||
{
|
||||
"connectedNodes": 3,
|
||||
"knownNodes": 3,
|
||||
"partitions": 256,
|
||||
"partitionsAllOk": 256,
|
||||
"partitionsQuorum": 256,
|
||||
"status": "healthy",
|
||||
"storageNodes": 3,
|
||||
"storageNodesOk": 3
|
||||
}
|
||||
```
|
||||
|
||||
If you need to specify a JSON body for your call, you can add it directly after
|
||||
the name of the function you are calling:
|
||||
|
||||
```
|
||||
$ garage json-api CreateAdminToken '{"name": "test"}'
|
||||
```
|
||||
|
||||
Or you can feed it through stdin by adding a `-` as the last command parameter:
|
||||
|
||||
```
|
||||
$ garage json-api CreateAdminToken -
|
||||
{"name": "test"}
|
||||
<EOF>
|
||||
```
|
||||
|
||||
For admin API calls that would have taken query parameters in their HTTP version, these parameters can be passed in the JSON body object:
|
||||
|
||||
```
|
||||
$ garage json-api GetAdminTokenInfo '{"id":"b0e6e0ace2c0b2aca4cdb2de"}'
|
||||
```
|
||||
|
||||
For admin API calls that take both query parameters and a JSON body, combine them in the following fashion:
|
||||
|
||||
```
|
||||
$ garage json-api UpdateAdminToken '{"id":"b0e6e0ace2c0b2aca4cdb2de", "body":{"name":"not a test"}}'
|
||||
```
|
||||
|
||||
## Special administration API endpoints
|
||||
|
||||
### Metrics `GET /metrics`
|
||||
|
||||
|
|
@ -83,7 +209,7 @@ content-length: 102
|
|||
date: Tue, 08 Aug 2023 07:22:38 GMT
|
||||
|
||||
Garage is fully operational
|
||||
Consult the full health check API endpoint at /v0/health for more details
|
||||
Consult the full health check API endpoint at /v2/GetClusterHealth for more details
|
||||
```
|
||||
|
||||
### On-demand TLS `GET /check`
|
||||
|
|
@ -126,23 +252,7 @@ $ curl -so /dev/null -w "%{http_code}" http://localhost:3903/check?domain=exampl
|
|||
200
|
||||
```
|
||||
|
||||
|
||||
**References:**
|
||||
- [Using On-Demand TLS](https://caddyserver.com/docs/automatic-https#using-on-demand-tls)
|
||||
- [Add option for a backend check to approve use of on-demand TLS](https://github.com/caddyserver/caddy/pull/1939)
|
||||
- [Serving tens of thousands of domains over HTTPS with Caddy](https://caddy.community/t/serving-tens-of-thousands-of-domains-over-https-with-caddy/11179)
|
||||
|
||||
### Cluster operations
|
||||
|
||||
These endpoints have a dedicated OpenAPI spec.
|
||||
- APIv1 - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html) - [OpenAPI YAML](https://garagehq.deuxfleurs.fr/api/garage-admin-v1.yml)
|
||||
- APIv0 (deprecated) - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.html) - [OpenAPI YAML](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.yml)
|
||||
|
||||
Requesting the API from the command line can be as simple as running:
|
||||
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer s3cr3t' http://localhost:3903/v0/status | jq
|
||||
```
|
||||
|
||||
For more advanced use cases, we recommend using a SDK.
|
||||
[Go to the "Build your own app" section to know how to use our SDKs](@/documentation/build/_index.md)
|
||||
|
|
|
|||
|
|
@ -51,17 +51,20 @@ allow_punycode = false
|
|||
|
||||
[consul_discovery]
|
||||
api = "catalog"
|
||||
consul_http_addr = "http://127.0.0.1:8500"
|
||||
consul_http_addr = "https://127.0.0.1:8500"
|
||||
tls_skip_verify = false
|
||||
service_name = "garage-daemon"
|
||||
|
||||
ca_cert = "/etc/consul/consul-ca.crt"
|
||||
client_cert = "/etc/consul/consul-client.crt"
|
||||
client_key = "/etc/consul/consul-key.crt"
|
||||
|
||||
# for `agent` API mode, unset client_cert and client_key, and optionally enable `token`
|
||||
# token = "abcdef-01234-56789"
|
||||
tls_skip_verify = false
|
||||
|
||||
tags = [ "dns-enabled" ]
|
||||
meta = { dns-acl = "allow trusted" }
|
||||
|
||||
datacenters = ["dc1", "dc2", "dc3"]
|
||||
|
||||
[kubernetes_discovery]
|
||||
namespace = "garage"
|
||||
|
|
@ -82,6 +85,7 @@ add_host_to_metrics = true
|
|||
[admin]
|
||||
api_bind_addr = "0.0.0.0:3903"
|
||||
metrics_token = "BCAdFjoa9G0KJR0WXnHHm7fs1ZAbfpI8iIZ+Z/a2NgI="
|
||||
metrics_require_token = true
|
||||
admin_token = "UkLeGWEvHnXBqnueR3ISEMWpOnm40jH2tM2HnnL/0F4="
|
||||
trace_sink = "http://localhost:4317"
|
||||
```
|
||||
|
|
@ -97,9 +101,9 @@ The following gives details about each available configuration option.
|
|||
Top-level configuration options, in alphabetical order:
|
||||
[`allow_punycode`](#allow_punycode),
|
||||
[`allow_world_readable_secrets`](#allow_world_readable_secrets),
|
||||
[`block_max_concurrent_reads`](`block_max_concurrent_reads),
|
||||
[`block_ram_buffer_max`](#block_ram_buffer_max),
|
||||
[`block_max_concurrent_reads`](#block_max_concurrent_reads),
|
||||
[`block_max_concurrent_writes_per_request`](#block_max_concurrent_writes_per_request),
|
||||
[`block_ram_buffer_max`](#block_ram_buffer_max),
|
||||
[`block_size`](#block_size),
|
||||
[`bootstrap_peers`](#bootstrap_peers),
|
||||
[`compression_level`](#compression_level),
|
||||
|
|
@ -127,12 +131,14 @@ The `[consul_discovery]` section:
|
|||
[`client_cert`](#consul_client_cert_and_key),
|
||||
[`client_key`](#consul_client_cert_and_key),
|
||||
[`consul_http_addr`](#consul_http_addr),
|
||||
[`datacenters`](#consul_datacenters)
|
||||
[`meta`](#consul_tags_and_meta),
|
||||
[`service_name`](#consul_service_name),
|
||||
[`tags`](#consul_tags_and_meta),
|
||||
[`tls_skip_verify`](#consul_tls_skip_verify),
|
||||
[`token`](#consul_token).
|
||||
|
||||
|
||||
The `[kubernetes_discovery]` section:
|
||||
[`namespace`](#kube_namespace),
|
||||
[`service_name`](#kube_service_name),
|
||||
|
|
@ -150,6 +156,7 @@ The `[s3_web]` section:
|
|||
|
||||
The `[admin]` section:
|
||||
[`api_bind_addr`](#admin_api_bind_addr),
|
||||
[`metrics_require_token`](#admin_metrics_require_token),
|
||||
[`metrics_token`/`metrics_token_file`](#admin_metrics_token),
|
||||
[`admin_token`/`admin_token_file`](#admin_token),
|
||||
[`trace_sink`](#admin_trace_sink),
|
||||
|
|
@ -336,7 +343,7 @@ Since `v0.8.0`, Garage can use alternative storage backends as follows:
|
|||
| --------- | ----------------- | ------------- |
|
||||
| [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
|
||||
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
|
||||
| [Fjall](https://github.com/fjall-rs/fjall) (**experimental support** since `v1.3.0`) | `"fjall"` | `<metadata_dir>/db.fjall/` |
|
||||
| [Fjall](https://github.com/fjall-rs/fjall) (**experimental support** since `v1.3.0`/`v2.1.0`) | `"fjall"` | `<metadata_dir>/db.fjall/` |
|
||||
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
|
||||
|
||||
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
|
||||
|
|
@ -345,8 +352,16 @@ old Sled metadata databases to another engine.
|
|||
|
||||
Performance characteristics of the different DB engines are as follows:
|
||||
|
||||
- LMDB: the recommended database engine for high-performance distributed clusters.
|
||||
LMDB works very well, but is known to have the following limitations:
|
||||
- **LMDB:** the recommended database engine for high-performance distributed clusters
|
||||
with `replication_factor` ≥ 2.
|
||||
LMDB works well, but is known to have the following limitations:
|
||||
|
||||
- LMDB is prone to database corruption after an unclean shutdown (e.g. a process kill
|
||||
or a power outage). It is recommended to configure
|
||||
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval) to be
|
||||
able to easily recover from this situation. With `replication_factor` ≥ 2,
|
||||
metadata can also be reconstructed from remote nodes upon corruption
|
||||
(see [Recovering from failures](@/documentation/operations/recovering.md#corrupted_meta)).
|
||||
|
||||
- The data format of LMDB is not portable between architectures, so for
|
||||
instance the Garage database of an x86-64 node cannot be moved to an ARM64
|
||||
|
|
@ -356,30 +371,21 @@ LMDB works very well, but is known to have the following limitations:
|
|||
node to very small database sizes due to how LMDB works; it is therefore
|
||||
not recommended.
|
||||
|
||||
- Several users have reported corrupted LMDB database files after an unclean
|
||||
shutdown (e.g. a power outage). This situation can generally be recovered
|
||||
from if your cluster is geo-replicated (by rebuilding your metadata db from
|
||||
other nodes), or if you have saved regular snapshots at the filesystem
|
||||
level.
|
||||
|
||||
- Keys in LMDB are limited to 511 bytes. This limit translates to limits on
|
||||
object keys in S3 and sort keys in K2V that are limted to 479 bytes.
|
||||
object keys in S3 and sort keys in K2V that are limited to 479 bytes.
|
||||
|
||||
- Sqlite: Garage supports Sqlite as an alternative storage backend for
|
||||
metadata, which does not have the issues listed above for LMDB.
|
||||
On versions 0.8.x and earlier, Sqlite should be avoided due to abysmal
|
||||
performance, which was fixed with the addition of `metadata_fsync`.
|
||||
Sqlite is still probably slower than LMDB due to the way we use it,
|
||||
so it is not the best choice for high-performance storage clusters,
|
||||
but it should work fine in many cases.
|
||||
- **Sqlite:** Garage supports Sqlite as an alternative storage backend for
|
||||
metadata, which does not have the issues listed above for LMDB. Sqlite is
|
||||
slower than LMDB, so it is not the best choice for high-performance storage
|
||||
clusters.
|
||||
|
||||
- Fjall: a storage engine based on LSM trees, which theoretically allow for
|
||||
- **Fjall:** a storage engine based on LSM trees, which theoretically allow for
|
||||
higher write throughput than other storage engines that are based on B-trees.
|
||||
Using Fjall could potentially improve Garage's performance significantly in
|
||||
write-heavy workloads. **Support for Fjall is experimental at this point**,
|
||||
we have added it to Garage for evaluation purposes only. **Do not use it for
|
||||
production-critical workloads.**
|
||||
|
||||
we have added it to Garage for evaluation purposes only. **Use it only with
|
||||
test data, and report any issues to our bug tracker. Do not use it for
|
||||
production workloads.**
|
||||
|
||||
It is possible to convert Garage's metadata directory from one format to another
|
||||
using the `garage convert-db` command, which should be used as follows:
|
||||
|
|
@ -390,7 +396,7 @@ garage convert-db -a <input db engine> -i <input db path> \
|
|||
```
|
||||
|
||||
Make sure to specify the full database path as presented in the table above
|
||||
(third colummn), and not just the path to the metadata directory.
|
||||
(third column), and not just the path to the metadata directory.
|
||||
|
||||
#### `metadata_fsync` {#metadata_fsync}
|
||||
|
||||
|
|
@ -432,13 +438,14 @@ This might reduce the risk that a data block is lost in rare
|
|||
situations such as simultaneous node losing power,
|
||||
at the cost of a moderate drop in write performance.
|
||||
|
||||
Similarly to `metatada_fsync`, this is likely not necessary
|
||||
Similarly to `metadata_fsync`, this is likely not necessary
|
||||
if geographical replication is used.
|
||||
|
||||
#### `metadata_auto_snapshot_interval` (since `v0.9.4`) {#metadata_auto_snapshot_interval}
|
||||
|
||||
If this value is set, Garage will automatically take a snapshot of the metadata
|
||||
DB file at a regular interval and save it in the metadata directory.
|
||||
DB file at a regular interval and save it in the metadata directory,
|
||||
or in [`metadata_snapshots_dir`](#metadata_snapshots_dir) if it is set.
|
||||
This parameter can take any duration string that can be parsed by
|
||||
the [`parse_duration`](https://docs.rs/parse_duration/latest/parse_duration/#syntax) crate.
|
||||
|
||||
|
|
@ -447,14 +454,19 @@ corrupted, for instance after an unclean shutdown. See [this
|
|||
page](@/documentation/operations/recovering.md#corrupted_meta) for details.
|
||||
Garage keeps only the two most recent snapshots of the metadata DB and deletes
|
||||
older ones automatically.
|
||||
You can also create metadata snapshots manually at any point using the
|
||||
`garage meta snapshot` command.
|
||||
|
||||
Using snapshots created by Garage is the best option to make snapshots of your
|
||||
node's metadata for potential recovery, as they are guaranteed to be clean and
|
||||
consistent, contrarily to filesystem-level snapshots that may be taken while
|
||||
some writes are in-flight and thus might be corrupted.
|
||||
|
||||
Note that taking a metadata snapshot is a relatively intensive operation as the
|
||||
entire data file is copied. A snapshot being taken might have performance
|
||||
impacts on the Garage node while it is running. If the cluster is under heavy
|
||||
write load when a snapshot operation is running, this might also cause the
|
||||
database file to grow in size significantly as pages cannot be recycled easily.
|
||||
For this reason, it might be better to use filesystem-level snapshots instead
|
||||
if possible.
|
||||
|
||||
#### `disable_scrub` {#disable_scrub}
|
||||
|
||||
|
|
@ -542,19 +554,19 @@ awaits for one of the `block_max_concurrent_reads` slots to be available
|
|||
slot, it reads the entire block file to RAM and frees the slot as soon as the
|
||||
block file is finished reading. Only after the slot is released will the
|
||||
block's data start being transferred over the network. If the request fails to
|
||||
acquire a reading slot wihtin 15 seconds, it fails with a timeout error.
|
||||
acquire a reading slot within 15 seconds, it fails with a timeout error.
|
||||
Timeout events can be monitored through the `block_read_semaphore_timeouts`
|
||||
metric in Prometheus: a non-zero number of such events indicates an I/O
|
||||
bottleneck on HDD read speed.
|
||||
|
||||
|
||||
#### `block_max_concurrent_writes_per_request` (since `v2.1.0`) {#block_max_concurrent_writes_per_request}
|
||||
#### `block_max_concurrent_writes_per_request` (since `v1.3.1` / `v2.2.0`) {#block_max_concurrent_writes_per_request}
|
||||
|
||||
This parameter is designed to adapt to the concurrent write performance of
|
||||
different storage media.Maximum number of parallel block writes per put request
|
||||
Higher values improve throughput but increase memory usage.
|
||||
different storage media. Maximum number of parallel block writes per put request.
|
||||
Higher values may improve throughput but increase memory usage.
|
||||
|
||||
Default: 3, Recommended: 10-30 for NVMe, 3-10 for HDD
|
||||
Default value: 3. Recommended values: 10-30 for NVMe, 3-10 for spinning HDD.
|
||||
|
||||
#### `lmdb_map_size` {#lmdb_map_size}
|
||||
|
||||
|
|
@ -605,11 +617,11 @@ storing the secret as the `GARAGE_RPC_SECRET_FILE` environment variable.
|
|||
|
||||
#### `rpc_bind_addr` {#rpc_bind_addr}
|
||||
|
||||
The address and port on which to bind for inter-cluster communcations
|
||||
(reffered to as RPC for remote procedure calls).
|
||||
The address and port on which to bind for inter-cluster communications
|
||||
(referred to as RPC for remote procedure calls).
|
||||
The port specified here should be the same one that other nodes will used to contact
|
||||
the node, even in the case of a NAT: the NAT should be configured to forward the external
|
||||
port number to the same internal port nubmer. This means that if you have several nodes running
|
||||
port number to the same internal port number. This means that if you have several nodes running
|
||||
behind a NAT, they should each use a different RPC port number.
|
||||
|
||||
#### `rpc_bind_outgoing` (since `v0.9.2`) {#rpc_bind_outgoing}
|
||||
|
|
@ -728,6 +740,18 @@ node_prefix "" {
|
|||
}
|
||||
```
|
||||
|
||||
|
||||
#### `datacenters` {#consul_datacenters}
|
||||
|
||||
Optional list of datacenters that allow garage to do service discovery when Consul is configured in WAN federation.
|
||||
|
||||
Example: `datacenters = ["dc1", "dc2", "dc3"]`
|
||||
|
||||
In a WAN configuration, by default the Consul services API only responds with
|
||||
local LAN services. When a list of datacenters is specified using this option,
|
||||
Garage will query the consul server API by datacenter directly, allowing for
|
||||
Garage to discover nodes across the Consul WAN.
|
||||
|
||||
#### `tags` and `meta` {#consul_tags_and_meta}
|
||||
|
||||
Additional list of tags and map of service meta to add during service registration.
|
||||
|
|
@ -760,14 +784,14 @@ manually.
|
|||
#### `api_bind_addr` {#s3_api_bind_addr}
|
||||
|
||||
The IP and port on which to bind for accepting S3 API calls.
|
||||
This endpoint does not suport TLS: a reverse proxy should be used to provide it.
|
||||
This endpoint does not support TLS: a reverse proxy should be used to provide it.
|
||||
|
||||
Alternatively, since `v0.8.5`, a path can be used to create a unix socket with 0222 mode.
|
||||
|
||||
#### `s3_region` {#s3_region}
|
||||
|
||||
Garage will accept S3 API calls that are targetted to the S3 region defined here.
|
||||
API calls targetted to other regions will fail with a AuthorizationHeaderMalformed error
|
||||
Garage will accept S3 API calls that are targeted to the S3 region defined here.
|
||||
API calls targeted to other regions will fail with a AuthorizationHeaderMalformed error
|
||||
message that redirects the client to the correct region.
|
||||
|
||||
#### `root_domain` {#s3_root_domain}
|
||||
|
|
@ -775,7 +799,7 @@ message that redirects the client to the correct region.
|
|||
The optional suffix to access bucket using vhost-style in addition to path-style request.
|
||||
Note path-style requests are always enabled, whether or not vhost-style is configured.
|
||||
Configuring vhost-style S3 required a wildcard DNS entry, and possibly a wildcard TLS certificate,
|
||||
but might be required by softwares not supporting path-style requests.
|
||||
but might be required by software not supporting path-style requests.
|
||||
|
||||
If `root_domain` is `s3.garage.eu`, a bucket called `my-bucket` can be interacted with
|
||||
using the hostname `my-bucket.s3.garage.eu`.
|
||||
|
|
@ -791,7 +815,7 @@ behaviour of this module.
|
|||
|
||||
The IP and port on which to bind for accepting HTTP requests to buckets configured
|
||||
for website access.
|
||||
This endpoint does not suport TLS: a reverse proxy should be used to provide it.
|
||||
This endpoint does not support TLS: a reverse proxy should be used to provide it.
|
||||
|
||||
Alternatively, since `v0.8.5`, a path can be used to create a unix socket with 0222 mode.
|
||||
|
||||
|
|
@ -824,10 +848,34 @@ See [administration API reference](@/documentation/reference-manual/admin-api.md
|
|||
Alternatively, since `v0.8.5`, a path can be used to create a unix socket. Note that for security reasons,
|
||||
the socket will have 0220 mode. Make sure to set user and group permissions accordingly.
|
||||
|
||||
#### `admin_token`, `admin_token_file` or `GARAGE_ADMIN_TOKEN`, `GARAGE_ADMIN_TOKEN_FILE` (env) {#admin_token}
|
||||
|
||||
The token for accessing all administration functions on the admin endpoint,
|
||||
with the exception of the metrics endpoint (see `metrics_token`).
|
||||
|
||||
You can use any random string for this value. We recommend generating a random
|
||||
token with `openssl rand -base64 32`.
|
||||
|
||||
For Garage version earlier than `v2.0`, if this token is not set,
|
||||
access to these endpoints is disabled entirely.
|
||||
|
||||
Since Garage `v2.0`, additional admin API tokens can be defined dynamically
|
||||
in your Garage cluster using administration commands. This new admin token system
|
||||
is more flexible since it allows admin tokens to have an expiration date,
|
||||
and to have a scope restricted to certain admin API functions. If `admin_token`
|
||||
is set, it behaves as an admin token without expiration and with full scope.
|
||||
Otherwise, only admin API tokens defined dynamically can be used.
|
||||
|
||||
`admin_token` was introduced in Garage `v0.7.2`.
|
||||
`admin_token_file` and the `GARAGE_ADMIN_TOKEN` environment variable are supported since Garage `v0.8.2`.
|
||||
|
||||
`GARAGE_ADMIN_TOKEN_FILE` is supported since `v0.8.5` / `v0.9.1`.
|
||||
|
||||
#### `metrics_token`, `metrics_token_file` or `GARAGE_METRICS_TOKEN`, `GARAGE_METRICS_TOKEN_FILE` (env) {#admin_metrics_token}
|
||||
|
||||
The token for accessing the Metrics endpoint. If this token is not set, the
|
||||
Metrics endpoint can be accessed without access control.
|
||||
The token for accessing the Prometheus metrics endpoint (`/metrics`).
|
||||
If this token is not set, and unless `metrics_require_token` is set to `true`,
|
||||
the metrics endpoint can be accessed without access control.
|
||||
|
||||
You can use any random string for this value. We recommend generating a random token with `openssl rand -base64 32`.
|
||||
|
||||
|
|
@ -836,17 +884,12 @@ You can use any random string for this value. We recommend generating a random t
|
|||
|
||||
`GARAGE_METRICS_TOKEN_FILE` is supported since `v0.8.5` / `v0.9.1`.
|
||||
|
||||
#### `admin_token`, `admin_token_file` or `GARAGE_ADMIN_TOKEN`, `GARAGE_ADMIN_TOKEN_FILE` (env) {#admin_token}
|
||||
#### `metrics_require_token` (since `v2.0.0`) {#admin_metrics_require_token}
|
||||
|
||||
The token for accessing all of the other administration endpoints. If this
|
||||
token is not set, access to these endpoints is disabled entirely.
|
||||
|
||||
You can use any random string for this value. We recommend generating a random token with `openssl rand -base64 32`.
|
||||
|
||||
`admin_token` was introduced in Garage `v0.7.2`.
|
||||
`admin_token_file` and the `GARAGE_ADMIN_TOKEN` environment variable are supported since Garage `v0.8.2`.
|
||||
|
||||
`GARAGE_ADMIN_TOKEN_FILE` is supported since `v0.8.5` / `v0.9.1`.
|
||||
If this is set to `true`, accessing the metrics endpoint will always require
|
||||
an access token. Valid tokens include the `metrics_token` if it is set,
|
||||
and admin API token defined dynamically in Garage which have
|
||||
the `Metrics` endpoint in their scope.
|
||||
|
||||
#### `trace_sink` {#admin_trace_sink}
|
||||
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ to select the replication mode best suited to your use case (hint: in most cases
|
|||
|
||||
### Compression and deduplication
|
||||
|
||||
All data stored in Garage is deduplicated, and optionnally compressed using
|
||||
All data stored in Garage is deduplicated, and optionally compressed using
|
||||
Zstd. Objects uploaded to Garage are chunked in blocks of constant sizes (see
|
||||
[`block_size`](@/documentation/reference-manual/configuration.md#block_size)),
|
||||
and the hashes of individual blocks are used to dispatch them to storage nodes
|
||||
|
|
@ -84,13 +84,13 @@ exposing the same content under different domain names.
|
|||
|
||||
Garage also supports bucket aliases which are local to a single user:
|
||||
this allows different users to have different buckets with the same name, thus avoiding naming collisions.
|
||||
This can be helpfull for instance if you want to write an application that creates per-user buckets with always the same name.
|
||||
This can be helpful for instance if you want to write an application that creates per-user buckets with always the same name.
|
||||
|
||||
This feature is totally invisible to S3 clients and does not break compatibility with AWS.
|
||||
|
||||
### Cluster administration API
|
||||
|
||||
Garage provides a fully-fledged REST API to administer your cluster programatically.
|
||||
Garage provides a fully-fledged REST API to administer your cluster programmatically.
|
||||
Functionality included in the admin API include: setting up and monitoring
|
||||
cluster nodes, managing access credentials, and managing storage buckets and bucket aliases.
|
||||
A full reference of the administration API is available [here](@/documentation/reference-manual/admin-api.md).
|
||||
|
|
@ -100,7 +100,7 @@ A full reference of the administration API is available [here](@/documentation/r
|
|||
Garage makes some internal metrics available in the Prometheus data format,
|
||||
which allows you to build interactive dashboards to visualize the load and internal state of your storage cluster.
|
||||
|
||||
For developpers and performance-savvy administrators,
|
||||
For developers and performance-savvy administrators,
|
||||
Garage also supports exporting traces of what it does internally in OpenTelemetry format.
|
||||
This allows to monitor the time spent at various steps of the processing of requests,
|
||||
in order to detect potential performance bottlenecks.
|
||||
|
|
@ -129,5 +129,5 @@ related to objects stored in an S3 bucket.
|
|||
In the context of our research project, [Aérogramme](https://aerogramme.deuxfleurs.fr),
|
||||
K2V is used to provide metadata and log storage for operations on encrypted e-mail storage.
|
||||
|
||||
Learn more on the specification of K2V [here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/k2v/doc/drafts/k2v-spec.md)
|
||||
Learn more on the specification of K2V [here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/commit/f8be15c37db857e177d543de7be863692628d567/doc/drafts/k2v-spec.md)
|
||||
and on how to enable it in Garage [here](@/documentation/reference-manual/k2v.md).
|
||||
|
|
|
|||
|
|
@ -16,10 +16,10 @@ the `k2v` feature flag enabled can be obtained from our download page under
|
|||
with `-k2v` (example: `v0.7.2-k2v`).
|
||||
|
||||
The specification of the K2V API can be found
|
||||
[here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/main/doc/drafts/k2v-spec.md).
|
||||
[here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/commit/f8be15c37db857e177d543de7be863692628d567/doc/drafts/k2v-spec.md).
|
||||
This document also includes a high-level overview of K2V's design.
|
||||
|
||||
The K2V API uses AWSv4 signatures for authentification, same as the S3 API.
|
||||
The K2V API uses AWSv4 signatures for authentication, same as the S3 API.
|
||||
The AWS region used for signature calculation is always the same as the one
|
||||
defined for the S3 API in the config file.
|
||||
|
||||
|
|
@ -55,4 +55,3 @@ cargo build --features cli --bin k2v-cli
|
|||
The CLI utility is self-documented, run `k2v-cli --help` to learn how to use
|
||||
it. There is also a short README.md in the `src/k2v-client` folder with some
|
||||
instructions.
|
||||
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ we suppose that OpenIO supports presigned URLs.
|
|||
All endpoints that are missing on Garage will return a 501 Not Implemented.
|
||||
Some `x-amz-` headers are not implemented.
|
||||
|
||||
### Core endoints
|
||||
### Core endpoints
|
||||
|
||||
| Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
||||
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
||||
|
|
@ -135,12 +135,12 @@ If you need this feature, please [share your use case in our dedicated issue](ht
|
|||
**PutBucketLifecycleConfiguration:** The only actions supported are
|
||||
`AbortIncompleteMultipartUpload` and `Expiration` (without the
|
||||
`ExpiredObjectDeleteMarker` field). All other operations are dependent on
|
||||
either bucket versionning or storage classes which Garage currently does not
|
||||
either bucket versioning or storage classes which Garage currently does not
|
||||
implement. The deprecated `Prefix` member directly in the the `Rule`
|
||||
structure/XML tag is not supported, specified prefixes must be inside the
|
||||
`Filter` structure/XML tag.
|
||||
|
||||
**GetBucketVersioning:** Stub implementation which always returns "versionning not enabled", since Garage does not yet support bucket versionning.
|
||||
**GetBucketVersioning:** Stub implementation which always returns "versioning not enabled", since Garage does not yet support bucket versioning.
|
||||
|
||||
### Replication endpoints
|
||||
|
||||
|
|
@ -155,7 +155,7 @@ Please open an issue if you have a use case for replication.
|
|||
*Note: Ceph documentation briefly says that Ceph supports
|
||||
[replication through the S3 API](https://docs.ceph.com/en/latest/radosgw/multisite-sync-policy/#s3-replication-api)
|
||||
but with some limitations.
|
||||
Additionaly, replication endpoints are not documented in the S3 compatibility page so I don't know what kind of support we can expect.*
|
||||
Additionally, replication endpoints are not documented in the S3 compatibility page so I don't know what kind of support we can expect.*
|
||||
|
||||
### Locking objects
|
||||
|
||||
|
|
@ -197,7 +197,7 @@ Please open an issue if you have a use case.
|
|||
|
||||
### Vendor specific endpoints
|
||||
|
||||
<details><summary>Display Amazon specifc endpoints</summary>
|
||||
<details><summary>Display Amazon specific endpoints</summary>
|
||||
|
||||
|
||||
| Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
||||
|
|
@ -234,4 +234,3 @@ Please open an issue if you have a use case.
|
|||
| [SelectObjectContent](https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html) | ❌ Missing | ❌| ❌| ❌| ❌|
|
||||
|
||||
</details>
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ title = "S3 compatibility target"
|
|||
weight = 5
|
||||
+++
|
||||
|
||||
If there is a specific S3 functionnality you have a need for, feel free to open
|
||||
If there is a specific S3 functionality you have a need for, feel free to open
|
||||
a PR to put the corresponding endpoints higher in the list. Please explain
|
||||
your motivations for doing so in the PR message.
|
||||
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ Workflow for DELETE:
|
|||
1. Check write permission (LDAP)
|
||||
2. Get current version (or versions) in object table
|
||||
3. Do the deletion of those versions NOT IN A BACKGROUND JOB THIS TIME
|
||||
4. Return succes to the user if we were able to delete blocks from the blocks table and entries from the object table
|
||||
4. Return success to the user if we were able to delete blocks from the blocks table and entries from the object table
|
||||
|
||||
To delete a version:
|
||||
|
||||
|
|
@ -92,7 +92,7 @@ Known issue: if someone is reading from a version that we want to delete and the
|
|||
- file path = /meta/(first 3 hex digits of hash)/(rest of hash)
|
||||
- map block hash -> set of version UUIDs where it is referenced
|
||||
|
||||
Usefull metadata:
|
||||
Useful metadata:
|
||||
|
||||
- list of versions that reference this block in the Casandra table, so that we can do GC by checking in Cassandra that the lines still exist
|
||||
- list of other nodes that we know have acknowledged a write of this block, useful in the rebalancing algorithm
|
||||
|
|
|
|||
|
|
@ -49,12 +49,12 @@ The ring construction that selects `n_token` random positions for each nodes giv
|
|||
is not well-balanced: the space between the tokens varies a lot, and some partitions are thus bigger than others.
|
||||
This problem was demonstrated in the original Dynamo DB paper.
|
||||
|
||||
To solve this, we want to apply a better second method for partitionning our dataset:
|
||||
To solve this, we want to apply a better second method for partitioning our dataset:
|
||||
|
||||
1. fix an initially large number of partitions (say 1024) with evenly-spaced delimiters,
|
||||
|
||||
2. attribute each partition randomly to a node, with a probability
|
||||
proportionnal to its capacity (which `n_tokens` represented in the first
|
||||
proportional to its capacity (which `n_tokens` represented in the first
|
||||
method)
|
||||
|
||||
For now we continue using the multi-DC ring walking described above.
|
||||
|
|
@ -66,7 +66,7 @@ I have studied two ways to do the attribution of partitions to nodes, in a way t
|
|||
|
||||
MagLev provided significantly better balancing, as it guarantees that the exact
|
||||
same number of partitions is attributed to all nodes that have the same
|
||||
capacity (and that this number is proportionnal to the node's capacity, except
|
||||
capacity (and that this number is proportional to the node's capacity, except
|
||||
for large values), however in both cases:
|
||||
|
||||
- the distribution is still bad, because we use the naive multi-DC ring walking
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
+++
|
||||
title = "Migrating from 0.3 to 0.4"
|
||||
weight = 20
|
||||
weight = 80
|
||||
+++
|
||||
|
||||
**Migrating from 0.3 to 0.4 is unsupported. This document is only intended to
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
+++
|
||||
title = "Migrating from 0.5 to 0.6"
|
||||
weight = 15
|
||||
weight = 75
|
||||
+++
|
||||
|
||||
**This guide explains how to migrate to 0.6 if you have an existing 0.5 cluster.
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
+++
|
||||
title = "Migrating from 0.6 to 0.7"
|
||||
weight = 14
|
||||
weight = 74
|
||||
+++
|
||||
**This guide explains how to migrate to 0.7 if you have an existing 0.6 cluster.
|
||||
We don't recommend trying to migrate to 0.7 directly from 0.5 or older.**
|
||||
|
|
@ -19,7 +19,7 @@ The migration steps are as follows:
|
|||
2. Disable API and web access. Garage does not support disabling
|
||||
these endpoints but you can change the port number or stop your reverse
|
||||
proxy for instance.
|
||||
3. Check once again that your cluster is healty. Run again `garage repair --all-nodes --yes tables` which is quick.
|
||||
3. Check once again that your cluster is healthy. Run again `garage repair --all-nodes --yes tables` which is quick.
|
||||
Also check your queues are empty, run `garage stats` to query them.
|
||||
4. Turn off Garage v0.6
|
||||
5. Backup the metadata folder of all your nodes: `cd /var/lib/garage ; tar -acf meta-v0.6.tar.zst meta/`
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
+++
|
||||
title = "Migrating from 0.7 to 0.8"
|
||||
weight = 13
|
||||
weight = 73
|
||||
+++
|
||||
|
||||
**This guide explains how to migrate to 0.8 if you have an existing 0.7 cluster.
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
+++
|
||||
title = "Migrating from 0.8 to 0.9"
|
||||
weight = 12
|
||||
weight = 72
|
||||
+++
|
||||
|
||||
**This guide explains how to migrate to 0.9 if you have an existing 0.8 cluster.
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
+++
|
||||
title = "Migrating from 0.9 to 1.0"
|
||||
weight = 11
|
||||
weight = 71
|
||||
+++
|
||||
|
||||
**This guide explains how to migrate to 1.0 if you have an existing 0.9 cluster.
|
||||
|
|
|
|||
70
doc/book/working-documents/migration-2.md
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
+++
|
||||
title = "Migrating from 1.0 to 2.0"
|
||||
weight = 70
|
||||
+++
|
||||
|
||||
**This guide explains how to migrate to v2.x if you have an existing v1.x.x cluster.
|
||||
We don't recommend trying to migrate to v2.x directly from v0.9.x or older.**
|
||||
|
||||
This migration procedure has been tested on several clusters without issues.
|
||||
However, it is still a *critical procedure* that might cause issues.
|
||||
**Make sure to back up all your data before attempting it!**
|
||||
|
||||
You might also want to read our [general documentation on upgrading Garage](@/documentation/operations/upgrading.md).
|
||||
|
||||
## Changes introduced in v2.0
|
||||
|
||||
The following are **breaking changes** in Garage v2.0 that require your attention when migrating:
|
||||
|
||||
- The administration API has been completely reworked.
|
||||
Some calls to the `/v1/` endpoints will still work but most will not.
|
||||
New endpoints are prefixed by `/v2/`. **You will need to update all your code that makes use of the admin API.**
|
||||
|
||||
- `replication_mode` is no longer a supported configuration parameter,
|
||||
please use `replication_factor` and `consistency_mode` instead.
|
||||
|
||||
## Migration procedure
|
||||
|
||||
The migration to Garage v2.0 can be done with almost no downtime,
|
||||
by restarting all nodes at once in the new version.
|
||||
|
||||
The migration steps are as follows:
|
||||
|
||||
1. Do a `garage repair --all-nodes --yes tables`, check the logs and check that
|
||||
all data seems to be synced correctly between nodes. If you have time, do
|
||||
additional `garage repair` procedures (`blocks`, `versions`, `block_refs`,
|
||||
etc.)
|
||||
|
||||
2. Ensure you have a snapshot of your Garage installation that you can restore
|
||||
to in case the upgrade goes wrong, with one of the following options:
|
||||
|
||||
- You may use the `garage meta snapshot --all` command
|
||||
to make a backup snapshot of the metadata directories of your nodes
|
||||
for backup purposes. Once this command has completed, copy the following
|
||||
files and directories from the `metadata_dir` of all your nodes
|
||||
to somewhere safe: `snapshots`, `cluster_layout`, `data_layout`,
|
||||
`node_key`, `node_key.pub`. (If you have set the `metadata_snapshots_dir`
|
||||
to a different value in your config file, back up that directory instead.)
|
||||
|
||||
- If you are running a filesystem such as ZFS or BTRFS that support
|
||||
snapshotting, you can create a filesystem-level snapshot of the `metadata_dir`
|
||||
of all your nodes to be used as a restoration point if needed.
|
||||
|
||||
- You may also make a back-up manually: turn off each node
|
||||
individually; back up its metadata folder (for instance, use the following
|
||||
command if your metadata directory is `/var/lib/garage/meta`: `cd
|
||||
/var/lib/garage ; tar -acf meta-v1.0.tar.zst meta/`); turn it back on
|
||||
again. This will allow you to take a backup of all nodes without
|
||||
impacting global cluster availability. You can do all nodes of a single
|
||||
zone at once as this does not impact the availability of Garage.
|
||||
|
||||
3. Prepare your updated binaries and configuration files for Garage v2.0.
|
||||
**Remember to update your configuration file to remove `replication_mode` and replace it by `replication_factor`.**
|
||||
|
||||
4. Shut down all v1.0 nodes simultaneously, and restart them all simultaneously
|
||||
in v2.0. Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to
|
||||
achieve this as fast as possible. Garage v2.0 should be in a working state
|
||||
as soon as enough nodes have started.
|
||||
|
||||
5. Monitor your cluster in the following hours to see if it works well under
|
||||
your production load.
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
+++
|
||||
title = "Testing strategy"
|
||||
weight = 30
|
||||
weight = 100
|
||||
+++
|
||||
|
||||
|
||||
|
|
@ -28,11 +28,11 @@ We should try to test in least invasive ways, i.e. minimize the impact of the te
|
|||
- Not making `garage` a shared library (launch using `execve`, it's perfectly fine)
|
||||
|
||||
Instead, we should focus on building a clean outer interface for the `garage` binary,
|
||||
for example loading configuration using environnement variables instead of the configuration file if that's helpfull for writing the tests.
|
||||
for example loading configuration using environment variables instead of the configuration file if that's helpful for writing the tests.
|
||||
|
||||
There are two reasons for this:
|
||||
|
||||
- Keep the soure code clean and focused
|
||||
- Keep the source code clean and focused
|
||||
- Test something that is as close as possible as the true garage that will actually be running
|
||||
|
||||
Reminder: rules of simplicity, concerning changes to Garage's source code.
|
||||
|
|
@ -71,5 +71,3 @@ Interesting blog posts on the blog of the Sled database:
|
|||
Misc:
|
||||
- [mutagen](https://github.com/llogiq/mutagen) - mutation testing is a way to assert our test quality by mutating the code and see if the mutation makes the tests fail
|
||||
- [fuzzing](https://rust-fuzz.github.io/book/) - cargo supports fuzzing, it could be a way to test our software reliability in presence of garbage data.
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -13,8 +13,12 @@ We will bump the version numbers prefixed to each API endpoint each time the syn
|
|||
or semantics change, meaning that code that relies on these endpoints will break
|
||||
when changes are introduced.
|
||||
|
||||
The Garage administration API was introduced in version 0.7.2, this document
|
||||
does not apply to older versions of Garage.
|
||||
The Garage administration API was introduced in version 0.7.2, and was
|
||||
changed several times.
|
||||
|
||||
**THIS DOCUMENT IS DEPRECATED.** We now have an OpenAPI spec which is automatically generated
|
||||
from Garage's source code and is always up-to-date. See `doc/api/garage-admin-v2.html`.
|
||||
Text in this document is no longer kept in sync with the admin API's actual behavior.
|
||||
|
||||
|
||||
## Access control
|
||||
|
|
@ -52,34 +56,28 @@ Returns an HTTP status 200 if the node is ready to answer user's requests,
|
|||
and an HTTP status 503 (Service Unavailable) if there are some partitions
|
||||
for which a quorum of nodes is not available.
|
||||
A simple textual message is also returned in a body with content-type `text/plain`.
|
||||
See `/v1/health` for an API that also returns JSON output.
|
||||
See `/v2/GetClusterHealth` for an API that also returns JSON output.
|
||||
|
||||
### Other special endpoints
|
||||
|
||||
#### CheckDomain `GET /check?domain=<domain>`
|
||||
|
||||
Checks whether this Garage cluster serves a website for domain `<domain>`.
|
||||
Returns HTTP 200 Ok if yes, or HTTP 4xx if no website is available for this domain.
|
||||
|
||||
### Cluster operations
|
||||
|
||||
#### GetClusterStatus `GET /v1/status`
|
||||
#### GetClusterStatus `GET /v2/GetClusterStatus`
|
||||
|
||||
Returns the cluster's current status in JSON, including:
|
||||
|
||||
- ID of the node being queried and its version of the Garage daemon
|
||||
- Live nodes
|
||||
- Currently configured cluster layout
|
||||
- Staged changes to the cluster layout
|
||||
|
||||
Example response body:
|
||||
|
||||
```json
|
||||
{
|
||||
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||
"garageVersion": "v1.3.0",
|
||||
"garageFeatures": [
|
||||
"k2v",
|
||||
"lmdb",
|
||||
"sqlite",
|
||||
"metrics",
|
||||
"bundled-libs"
|
||||
],
|
||||
"rustVersion": "1.68.0",
|
||||
"dbEngine": "LMDB (using Heed crate)",
|
||||
"layoutVersion": 5,
|
||||
"nodes": [
|
||||
{
|
||||
|
|
@ -169,7 +167,7 @@ Example response body:
|
|||
}
|
||||
```
|
||||
|
||||
#### GetClusterHealth `GET /v1/health`
|
||||
#### GetClusterHealth `GET /v2/GetClusterHealth`
|
||||
|
||||
Returns the cluster's current health in JSON format, with the following variables:
|
||||
|
||||
|
|
@ -178,7 +176,7 @@ Returns the cluster's current health in JSON format, with the following variable
|
|||
- degraded: Garage node is not connected to all storage nodes, but a quorum of write nodes is available for all partitions
|
||||
- unavailable: a quorum of write nodes is not available for some partitions
|
||||
- `knownNodes`: the number of nodes this Garage node has had a TCP connection to since the daemon started
|
||||
- `connectedNodes`: the nubmer of nodes this Garage node currently has an open connection to
|
||||
- `connectedNodes`: the number of nodes this Garage node currently has an open connection to
|
||||
- `storageNodes`: the number of storage nodes currently registered in the cluster layout
|
||||
- `storageNodesOk`: the number of storage nodes to which a connection is currently open
|
||||
- `partitions`: the total number of partitions of the data (currently always 256)
|
||||
|
|
@ -202,7 +200,7 @@ Example response body:
|
|||
}
|
||||
```
|
||||
|
||||
#### ConnectClusterNodes `POST /v1/connect`
|
||||
#### ConnectClusterNodes `POST /v2/ConnectClusterNodes`
|
||||
|
||||
Instructs this Garage node to connect to other Garage nodes at specified addresses.
|
||||
|
||||
|
|
@ -232,7 +230,7 @@ Example response:
|
|||
]
|
||||
```
|
||||
|
||||
#### GetClusterLayout `GET /v1/layout`
|
||||
#### GetClusterLayout `GET /v2/GetClusterLayout`
|
||||
|
||||
Returns the cluster's current layout in JSON, including:
|
||||
|
||||
|
|
@ -293,7 +291,7 @@ Example response body:
|
|||
}
|
||||
```
|
||||
|
||||
#### UpdateClusterLayout `POST /v1/layout`
|
||||
#### UpdateClusterLayout `POST /v2/UpdateClusterLayout`
|
||||
|
||||
Send modifications to the cluster layout. These modifications will
|
||||
be included in the staged role changes, visible in subsequent calls
|
||||
|
|
@ -330,7 +328,7 @@ This returns the new cluster layout with the proposed staged changes,
|
|||
as returned by GetClusterLayout.
|
||||
|
||||
|
||||
#### ApplyClusterLayout `POST /v1/layout/apply`
|
||||
#### ApplyClusterLayout `POST /v2/ApplyClusterLayout`
|
||||
|
||||
Applies to the cluster the layout changes currently registered as
|
||||
staged layout changes.
|
||||
|
|
@ -350,23 +348,11 @@ existing layout in the cluster.
|
|||
This returns the message describing all the calculations done to compute the new
|
||||
layout, as well as the description of the layout as returned by GetClusterLayout.
|
||||
|
||||
#### RevertClusterLayout `POST /v1/layout/revert`
|
||||
#### RevertClusterLayout `POST /v2/RevertClusterLayout`
|
||||
|
||||
Clears all of the staged layout changes.
|
||||
|
||||
Request body format:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": 13
|
||||
}
|
||||
```
|
||||
|
||||
Reverting the staged changes is done by incrementing the version number
|
||||
and clearing the contents of the staged change list.
|
||||
Similarly to the CLI, the body must include the incremented
|
||||
version number, which MUST be 1 + the value of the currently
|
||||
existing layout in the cluster.
|
||||
This requests contains an empty body.
|
||||
|
||||
This returns the new cluster layout with all changes reverted,
|
||||
as returned by GetClusterLayout.
|
||||
|
|
@ -374,7 +360,7 @@ as returned by GetClusterLayout.
|
|||
|
||||
### Access key operations
|
||||
|
||||
#### ListKeys `GET /v1/key`
|
||||
#### ListKeys `GET /v2/ListKeys`
|
||||
|
||||
Returns all API access keys in the cluster.
|
||||
|
||||
|
|
@ -393,8 +379,8 @@ Example response:
|
|||
]
|
||||
```
|
||||
|
||||
#### GetKeyInfo `GET /v1/key?id=<acces key id>`
|
||||
#### GetKeyInfo `GET /v1/key?search=<pattern>`
|
||||
#### GetKeyInfo `GET /v2/GetKeyInfo?id=<access key id>`
|
||||
#### GetKeyInfo `GET /v2/GetKeyInfo?search=<pattern>`
|
||||
|
||||
Returns information about the requested API access key.
|
||||
|
||||
|
|
@ -402,7 +388,7 @@ If `id` is set, the key is looked up using its exact identifier (faster).
|
|||
If `search` is set, the key is looked up using its name or prefix
|
||||
of identifier (slower, all keys are enumerated to do this).
|
||||
|
||||
Optionnally, the query parameter `showSecretKey=true` can be set to reveal the
|
||||
Optionally, the query parameter `showSecretKey=true` can be set to reveal the
|
||||
associated secret access key.
|
||||
|
||||
Example response:
|
||||
|
|
@ -468,7 +454,7 @@ Example response:
|
|||
}
|
||||
```
|
||||
|
||||
#### CreateKey `POST /v1/key`
|
||||
#### CreateKey `POST /v2/CreateKey`
|
||||
|
||||
Creates a new API access key.
|
||||
|
||||
|
|
@ -483,7 +469,7 @@ Request body format:
|
|||
This returns the key info, including the created secret key,
|
||||
in the same format as the result of GetKeyInfo.
|
||||
|
||||
#### ImportKey `POST /v1/key/import`
|
||||
#### ImportKey `POST /v2/ImportKey`
|
||||
|
||||
Imports an existing API key.
|
||||
This will check that the imported key is in the valid format, i.e.
|
||||
|
|
@ -501,7 +487,7 @@ Request body format:
|
|||
|
||||
This returns the key info in the same format as the result of GetKeyInfo.
|
||||
|
||||
#### UpdateKey `POST /v1/key?id=<acces key id>`
|
||||
#### UpdateKey `POST /v2/UpdateKey?id=<access key id>`
|
||||
|
||||
Updates information about the specified API access key.
|
||||
|
||||
|
|
@ -523,14 +509,14 @@ The possible flags in `allow` and `deny` are: `createBucket`.
|
|||
|
||||
This returns the key info in the same format as the result of GetKeyInfo.
|
||||
|
||||
#### DeleteKey `DELETE /v1/key?id=<acces key id>`
|
||||
#### DeleteKey `POST /v2/DeleteKey?id=<access key id>`
|
||||
|
||||
Deletes an API access key.
|
||||
|
||||
|
||||
### Bucket operations
|
||||
|
||||
#### ListBuckets `GET /v1/bucket`
|
||||
#### ListBuckets `GET /v2/ListBuckets`
|
||||
|
||||
Returns all storage buckets in the cluster.
|
||||
|
||||
|
|
@ -572,8 +558,8 @@ Example response:
|
|||
]
|
||||
```
|
||||
|
||||
#### GetBucketInfo `GET /v1/bucket?id=<bucket id>`
|
||||
#### GetBucketInfo `GET /v1/bucket?globalAlias=<alias>`
|
||||
#### GetBucketInfo `GET /v2/GetBucketInfo?id=<bucket id>`
|
||||
#### GetBucketInfo `GET /v2/GetBucketInfo?globalAlias=<alias>`
|
||||
|
||||
Returns information about the requested storage bucket.
|
||||
|
||||
|
|
@ -616,7 +602,7 @@ Example response:
|
|||
}
|
||||
```
|
||||
|
||||
#### CreateBucket `POST /v1/bucket`
|
||||
#### CreateBucket `POST /v2/CreateBucket`
|
||||
|
||||
Creates a new storage bucket.
|
||||
|
||||
|
|
@ -656,7 +642,7 @@ or no alias at all.
|
|||
Technically, you can also specify both `globalAlias` and `localAlias` and that would create
|
||||
two aliases, but I don't see why you would want to do that.
|
||||
|
||||
#### UpdateBucket `PUT /v1/bucket?id=<bucket id>`
|
||||
#### UpdateBucket `POST /v2/UpdateBucket?id=<bucket id>`
|
||||
|
||||
Updates configuration of the given bucket.
|
||||
|
||||
|
|
@ -688,16 +674,38 @@ In `quotas`: new values of `maxSize` and `maxObjects` must both be specified, or
|
|||
to remove the quotas. An absent value will be considered the same as a `null`. It is not possible
|
||||
to change only one of the two quotas.
|
||||
|
||||
#### DeleteBucket `DELETE /v1/bucket?id=<bucket id>`
|
||||
#### DeleteBucket `POST /v2/DeleteBucket?id=<bucket id>`
|
||||
|
||||
Deletes a storage bucket. A bucket cannot be deleted if it is not empty.
|
||||
|
||||
Warning: this will delete all aliases associated with the bucket!
|
||||
|
||||
#### CleanupIncompleteUploads `POST /v2/CleanupIncompleteUploads`
|
||||
|
||||
Cleanup all incomplete uploads in a bucket that are older than a specified number
|
||||
of seconds.
|
||||
|
||||
Request body format:
|
||||
|
||||
```json
|
||||
{
|
||||
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||
"olderThanSecs": 3600
|
||||
}
|
||||
```
|
||||
|
||||
Response format
|
||||
|
||||
```json
|
||||
{
|
||||
"uploadsDeleted": 12
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### Operations on permissions for keys on buckets
|
||||
|
||||
#### BucketAllowKey `POST /v1/bucket/allow`
|
||||
#### AllowBucketKey `POST /v2/AllowBucketKey`
|
||||
|
||||
Allows a key to do read/write/owner operations on a bucket.
|
||||
|
||||
|
|
@ -718,7 +726,7 @@ Request body format:
|
|||
Flags in `permissions` which have the value `true` will be activated.
|
||||
Other flags will remain unchanged.
|
||||
|
||||
#### BucketDenyKey `POST /v1/bucket/deny`
|
||||
#### DenyBucketKey `POST /v2/DenyBucketKey`
|
||||
|
||||
Denies a key from doing read/write/owner operations on a bucket.
|
||||
|
||||
|
|
@ -742,19 +750,35 @@ Other flags will remain unchanged.
|
|||
|
||||
### Operations on bucket aliases
|
||||
|
||||
#### GlobalAliasBucket `PUT /v1/bucket/alias/global?id=<bucket id>&alias=<global alias>`
|
||||
#### AddBucketAlias `POST /v2/AddBucketAlias`
|
||||
|
||||
Empty body. Creates a global alias for a bucket.
|
||||
Creates an alias for a bucket in the namespace of a specific access key.
|
||||
To create a global alias, specify the `globalAlias` field.
|
||||
To create a local alias, specify the `localAlias` and `accessKeyId` fields.
|
||||
|
||||
#### GlobalUnaliasBucket `DELETE /v1/bucket/alias/global?id=<bucket id>&alias=<global alias>`
|
||||
Request body format:
|
||||
|
||||
Removes a global alias for a bucket.
|
||||
```json
|
||||
{
|
||||
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||
"globalAlias": "my-bucket"
|
||||
}
|
||||
```
|
||||
|
||||
#### LocalAliasBucket `PUT /v1/bucket/alias/local?id=<bucket id>&accessKeyId=<access key ID>&alias=<local alias>`
|
||||
or:
|
||||
|
||||
Empty body. Creates a local alias for a bucket in the namespace of a specific access key.
|
||||
```json
|
||||
{
|
||||
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||
"localAlias": "my-bucket"
|
||||
}
|
||||
```
|
||||
|
||||
#### LocalUnaliasBucket `DELETE /v1/bucket/alias/local?id=<bucket id>&accessKeyId<access key ID>&alias=<local alias>`
|
||||
#### RemoveBucketAlias `POST /v2/RemoveBucketAlias`
|
||||
|
||||
Removes a local alias for a bucket in the namespace of a specific access key.
|
||||
Removes an alias for a bucket in the namespace of a specific access key.
|
||||
To remove a global alias, specify the `globalAlias` field.
|
||||
To remove a local alias, specify the `localAlias` and `accessKeyId` fields.
|
||||
|
||||
Request body format: same as AddBucketAlias.
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ Triples in K2V are constituted of three fields:
|
|||
partition key in which the client wants to read/delete lists of items
|
||||
|
||||
- a sort key (`sk`), an utf8 string that defines the index of the triplet inside its
|
||||
partition; triplets are uniquely idendified by their partition key + sort key
|
||||
partition; triplets are uniquely identified by their partition key + sort key
|
||||
|
||||
- a value (`v`), an opaque binary blob associated to the partition key + sort key;
|
||||
they are transmitted as binary when possible but in most case in the JSON API
|
||||
|
|
@ -74,7 +74,7 @@ are obsoleted by the new write.
|
|||
|
||||
**Basic insertion.** To insert a new value `v4` with context `[(node1, t2), (node2, t3)]`, in a
|
||||
simple case where there was no insertion in-between reading the value
|
||||
mentionned above and writing `v4`, and supposing that node2 receives the
|
||||
mentioned above and writing `v4`, and supposing that node2 receives the
|
||||
InsertItem query:
|
||||
|
||||
- `node2` generates a timestamp `t4` such that `t4 > t3`.
|
||||
|
|
@ -332,7 +332,7 @@ Inserts a single item. This request does not use JSON, the body is sent directly
|
|||
|
||||
To supersede previous values, the HTTP header `X-Garage-Causality-Token` should
|
||||
be set to the causality token returned by a previous read on this key. This
|
||||
header can be ommitted for the first writes to the key.
|
||||
header can be omitted for the first writes to the key.
|
||||
|
||||
Example query:
|
||||
|
||||
|
|
@ -397,7 +397,7 @@ smallest partition key that exists. It returns partition keys in increasing
|
|||
order, or decreasing order if `reverse` is set to `true`,
|
||||
and stops when either of the following conditions is met:
|
||||
|
||||
1. if `end` is specfied, the partition key `end` is reached or surpassed (if it
|
||||
1. if `end` is specified, the partition key `end` is reached or surpassed (if it
|
||||
is reached exactly, it is not included in the result)
|
||||
|
||||
2. if `limit` is specified, `limit` partition keys have been listed
|
||||
|
|
@ -491,7 +491,7 @@ the triplet is inserted for the first time, the causality token should be set to
|
|||
|
||||
The value is expected to be a base64-encoded binary blob. The value `null` can
|
||||
also be used to delete the triplet while preserving causality information: this
|
||||
allows to know if a delete has happenned concurrently with an insert, in which
|
||||
allows to know if a delete has happened concurrently with an insert, in which
|
||||
case both are preserved and returned on reads (see below).
|
||||
|
||||
Partition keys and sort keys are utf8 strings which are stored sorted by
|
||||
|
|
@ -540,7 +540,7 @@ JSON struct with the following fields:
|
|||
|
||||
For each of the searches, triplets are listed and returned separately. The
|
||||
semantics of `prefix`, `start`, `end`, `limit` and `reverse` are the same as for ReadIndex. The
|
||||
additionnal parameter `singleItem` allows to get a single item, whose sort key
|
||||
additional parameter `singleItem` allows to get a single item, whose sort key
|
||||
is the one given in `start`. Parameters `conflictsOnly` and `tombstones`
|
||||
control additional filters on the items that are returned.
|
||||
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ To link the effective storage capacity of the cluster to partition assignment, w
|
|||
\end{equation}
|
||||
This assumption is justified by the dispersion of the hashing function, when the number of partitions is small relative to the number of stored blocks.
|
||||
|
||||
Every node $n$ wille store some number $p_n$ of partitions (it is the number of partitions $p$ such that $n$ appears in the $\alpha_p$). Hence the partitions stored by $n$ (and hence all partitions by our assumption) have there size bounded by $c_n/p_n$. This remark leads us to define the optimal size that we will want to maximize:
|
||||
Every node $n$ will store some number $p_n$ of partitions (it is the number of partitions $p$ such that $n$ appears in the $\alpha_p$). Hence the partitions stored by $n$ (and hence all partitions by our assumption) have there size bounded by $c_n/p_n$. This remark leads us to define the optimal size that we will want to maximize:
|
||||
|
||||
\begin{equation}
|
||||
\label{eq:optimal}
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ We would like to compute an assignment of nodes to partitions. We will impose so
|
|||
\end{equation}
|
||||
This assumption is justified by the dispersion of the hashing function, when the number of partitions is small relative to the number of stored large objects.
|
||||
|
||||
Every node $n$ wille store some number $k_n$ of partitions. Hence the partitions stored by $n$ (and hence all partitions by our assumption) have there size bounded by $c_n/k_n$. This remark leads us to define the optimal size that we will want to maximize:
|
||||
Every node $n$ will store some number $k_n$ of partitions. Hence the partitions stored by $n$ (and hence all partitions by our assumption) have there size bounded by $c_n/k_n$. This remark leads us to define the optimal size that we will want to maximize:
|
||||
|
||||
\begin{equation}
|
||||
\label{eq:optimal}
|
||||
|
|
@ -62,7 +62,7 @@ For now, in the following, we ask the following redundancy constraint:
|
|||
|
||||
\textbf{Mode 3:} every partition needs to be assignated to three nodes. We try to spread the three nodes over different zones as much as possible.
|
||||
|
||||
\textbf{Warning:} This is a working document written incrementaly. The last version of the algorithm is the \textbf{parametric assignment} described in the next section.
|
||||
\textbf{Warning:} This is a working document written incrementally. The last version of the algorithm is the \textbf{parametric assignment} described in the next section.
|
||||
|
||||
|
||||
\section{Computation of a parametric assignment}
|
||||
|
|
@ -318,7 +318,7 @@ $$
|
|||
$$
|
||||
which is the universal upper bound on $s^*$. Hence any optimal utilization $(n_v)$ can be modified to another optimal utilization such that $n_v\ge \hat{n}_v$
|
||||
|
||||
Because $z_0$ cannot store more than $N$ partition occurences, in any assignment, at least $2N$ partitions must be assignated to the zones $Z\setminus\{z_0\}$. Let $C_0 = C-c_{z_0}$. Suppose that there exists a zone $z_1\neq z_0$ such that $c_{z_1}/C_0 \ge 1/2$. Then, with the same argument as for $z_0$, we can define
|
||||
Because $z_0$ cannot store more than $N$ partition occurrences, in any assignment, at least $2N$ partitions must be assignated to the zones $Z\setminus\{z_0\}$. Let $C_0 = C-c_{z_0}$. Suppose that there exists a zone $z_1\neq z_0$ such that $c_{z_1}/C_0 \ge 1/2$. Then, with the same argument as for $z_0$, we can define
|
||||
$$\hat{n}_v = \left\lfloor\frac{c_v}{c_{z_1}}N\right\rfloor$$
|
||||
for every $v\in z_1$.
|
||||
|
||||
|
|
@ -351,7 +351,7 @@ Define $3N$ tokens $t_1,\ldots, t_{3N}\in V$ as follows:
|
|||
Then for $1\le i \le N$, define the triplet $T_i$ to be
|
||||
$(t_i, t_{i+N}, t_{i+2N})$. Since the same nodes of a zone appear contiguously, the three nodes of a triplet must belong to three distinct zones.
|
||||
|
||||
However simple, this solution to go from an utilization to an assignment has the drawback of not spreading the triplets: a node will tend to be associated to the same two other nodes for many partitions. Hence, during data transfer, it will tend to use only two link, instead of spreading the bandwith use over many other links to other nodes. To achieve this goal, we will reframe the search of an assignment as a flow problem. and in the flow algorithm, we will introduce randomness in the order of exploration. This will be sufficient to obtain a good dispersion of the triplets.
|
||||
However simple, this solution to go from an utilization to an assignment has the drawback of not spreading the triplets: a node will tend to be associated to the same two other nodes for many partitions. Hence, during data transfer, it will tend to use only two link, instead of spreading the bandwidth use over many other links to other nodes. To achieve this goal, we will reframe the search of an assignment as a flow problem. and in the flow algorithm, we will introduce randomness in the order of exploration. This will be sufficient to obtain a good dispersion of the triplets.
|
||||
|
||||
\begin{figure}
|
||||
\centering
|
||||
|
|
@ -436,7 +436,7 @@ T_3=(b,c,d').
|
|||
$$
|
||||
One can check that in this case, it is impossible to minimize both the number of zone and node changes.
|
||||
|
||||
Because of the redundancy constraint, we cannot use a greedy algorithm to just replace nodes in the triplets to try to get the new utilization rate: this could lead to blocking situation where there is still a hole to fill in a triplet but no available node satisfies the zone separation constraint. To circumvent this issue, we propose an algorithm based on finding cycles in a graph encoding of the assignment. As in section \ref{sec:opt_assign}, we can explore the neigbours in a random order in the graph algorithms, to spread the triplets distribution.
|
||||
Because of the redundancy constraint, we cannot use a greedy algorithm to just replace nodes in the triplets to try to get the new utilization rate: this could lead to blocking situation where there is still a hole to fill in a triplet but no available node satisfies the zone separation constraint. To circumvent this issue, we propose an algorithm based on finding cycles in a graph encoding of the assignment. As in section \ref{sec:opt_assign}, we can explore the neighbours in a random order in the graph algorithms, to spread the triplets distribution.
|
||||
|
||||
|
||||
\subsubsection{Minimizing the zone discrepancy}
|
||||
|
|
@ -550,8 +550,8 @@ We give some considerations of worst case complexity for these algorithms. In th
|
|||
Algorithm \ref{alg:util} can be implemented with complexity $O(\#V^2)$. The complexity of the function call at line \ref{lin:subutil} is $O(\#V)$. The difference between the sum of the subutilizations and $3N$ is at most the sum of the rounding errors when computing the $\hat{n}_v$. Hence it is bounded by $\#V$ and the loop at line \ref{lin:loopsub} is iterated at most $\#V$ times. Finding the minimizing $v$ at line \ref{lin:findmin} takes $O(\#V)$ operations (naively, we could also use a heap).
|
||||
|
||||
Algorithm \ref{alg:opt} can be implemented with complexity $O(N^3\times \#Z)$. The flow graph has $O(N+\#Z)$ vertices and $O(N\times \#Z)$ edges. Dinic's algorithm has complexity $O(\#\mathrm{Vertices}^2\#\mathrm{Edges})$ hence in our case it is $O(N^3\times \#Z)$.
|
||||
|
||||
Algorithm \ref{alg:mini} can be implented with complexity $O(N^3\# Z)$ under \eqref{hyp:A} and $O(N^3 \#Z \#V)$ under \eqref{hyp:B}.
|
||||
|
||||
Algorithm \ref{alg:mini} can be implemented with complexity $O(N^3\# Z)$ under \eqref{hyp:A} and $O(N^3 \#Z \#V)$ under \eqref{hyp:B}.
|
||||
The graph $G_T$ has $O(N)$ vertices and $O(N\times \#Z)$ edges under assumption \eqref{hyp:A} and respectively $O(N\times \#Z)$ vertices and $O(N\times \#V)$ edges under assumption \eqref{hyp:B}. The loop at line \ref{lin:repeat} is iterated at most $N$ times since the distance between $T$ and $T'$ decreases at every iteration. Bellman-Ford algorithm has complexity $O(\#\mathrm{Vertices}\#\mathrm{Edges})$, which in our case amounts to $O(N^2\# Z)$ under \eqref{hyp:A} and $O(N^2 \#Z \#V)$ under \eqref{hyp:B}.
|
||||
|
||||
\begin{algorithm}
|
||||
|
|
@ -637,7 +637,7 @@ We try to maximize $s^*$ defined in \eqref{eq:optimal}. So we can compute the op
|
|||
|
||||
\subsection{Computation of a candidate assignment}
|
||||
|
||||
To compute a candidate assignment (that does not optimize zone spreading nor distance to a previous assignment yet), we can use the folowing flow problem.
|
||||
To compute a candidate assignment (that does not optimize zone spreading nor distance to a previous assignment yet), we can use the following flow problem.
|
||||
|
||||
Define the oriented weighted graph $(X,E)$. The set of vertices $X$ contains the source $\mathbf{s}$, the sink $\mathbf{t}$, vertices
|
||||
$\mathbf{x}_p, \mathbf{u}^+_p, \mathbf{u}^-_p$ for every partition $p$, vertices $\mathbf{y}_{p,z}$ for every partition $p$ and zone $z$, and vertices $\mathbf{z}_v$ for every node $v$.
|
||||
|
|
@ -680,14 +680,14 @@ Given the flow $f$, let $G_f=(X',E_f)$ be the multi-graph where $X' = X\setminus
|
|||
\end{itemize}
|
||||
To summarize, arcs are oriented left to right if they correspond to a presence of flow in $f$, and right to left if they correspond to an absence of flow. They are positively weighted if we want them to stay at their current state, and negatively if we want them to switch. Let us compute the weight of such graph.
|
||||
|
||||
\begin{multline*}
|
||||
\begin{multiline*}
|
||||
w(G_f) = \sum_{e\in E_f} w(e_f) \\
|
||||
=
|
||||
(\alpha - \beta -\gamma) N_1 + (\alpha +\beta - \gamma) N_2 + (\alpha+\beta+\gamma) N_3
|
||||
\\ +
|
||||
\#V\times N - 4 \sum_p 3-\#(T_p\cap T'_p) \\
|
||||
=(\#V-12+\alpha-\beta-\gamma)\times N + 4Q_V + 2\beta N_2 + 2(\beta+\gamma) N_3 \\
|
||||
\end{multline*}
|
||||
\end{multiline*}
|
||||
|
||||
As for the mode 3-strict, one can check that the difference of two such graphs corresponding to the same $(n_v)$ is always eulerian. Hence we can navigate in this class with the same greedy algorithm that discovers positive cycles and flips them.
|
||||
|
||||
|
|
|
|||
17
doc/talks/2025-10-06-josy/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
*
|
||||
|
||||
!*.txt
|
||||
!*.md
|
||||
|
||||
!assets
|
||||
|
||||
!.gitignore
|
||||
!*.svg
|
||||
!*.png
|
||||
!*.jpg
|
||||
!*.tex
|
||||
!Makefile
|
||||
!.gitignore
|
||||
!assets/*.drawio.pdf
|
||||
|
||||
!talk.pdf
|
||||
19
doc/talks/2025-10-06-josy/Makefile
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
ASSETS=../assets/lattice/lattice1.pdf_tex \
|
||||
../assets/lattice/lattice2.pdf_tex \
|
||||
../assets/lattice/lattice3.pdf_tex \
|
||||
../assets/lattice/lattice4.pdf_tex \
|
||||
../assets/lattice/lattice5.pdf_tex \
|
||||
../assets/lattice/lattice6.pdf_tex \
|
||||
../assets/lattice/lattice7.pdf_tex \
|
||||
../assets/lattice/lattice8.pdf_tex \
|
||||
../assets/logos/deuxfleurs.pdf \
|
||||
../assets/timeline-22-24.pdf
|
||||
|
||||
talk.pdf: talk.tex $(ASSETS)
|
||||
pdflatex talk.tex
|
||||
|
||||
%.pdf: %.svg
|
||||
inkscape -D -z --file=$^ --export-pdf=$@
|
||||
|
||||
%.pdf_tex: %.svg
|
||||
inkscape -D -z --file=$^ --export-pdf=$@ --export-latex
|
||||
BIN
doc/talks/2025-10-06-josy/talk.pdf
Normal file
702
doc/talks/2025-10-06-josy/talk.tex
Normal file
|
|
@ -0,0 +1,702 @@
|
|||
\nonstopmode
|
||||
\documentclass[aspectratio=169,xcolor={svgnames}]{beamer}
|
||||
\usepackage[utf8]{inputenc}
|
||||
% \usepackage[frenchb]{babel}
|
||||
\usepackage{amsmath}
|
||||
\usepackage{mathtools}
|
||||
\usepackage{breqn}
|
||||
\usepackage{multirow}
|
||||
\usetheme{boxes}
|
||||
\usepackage{graphicx}
|
||||
\usepackage{import}
|
||||
\usepackage{adjustbox}
|
||||
\usepackage[absolute,overlay]{textpos}
|
||||
%\useoutertheme[footline=authortitle,subsection=false]{miniframes}
|
||||
%\useoutertheme[footline=authorinstitute,subsection=false]{miniframes}
|
||||
\useoutertheme{infolines}
|
||||
\setbeamertemplate{headline}{}
|
||||
|
||||
\beamertemplatenavigationsymbolsempty
|
||||
|
||||
\definecolor{TitleOrange}{RGB}{255,137,0}
|
||||
\setbeamercolor{title}{fg=TitleOrange}
|
||||
\setbeamercolor{frametitle}{fg=TitleOrange}
|
||||
|
||||
\definecolor{ListOrange}{RGB}{255,145,5}
|
||||
\setbeamertemplate{itemize item}{\color{ListOrange}$\blacktriangleright$}
|
||||
|
||||
\definecolor{verygrey}{RGB}{70,70,70}
|
||||
\setbeamercolor{normal text}{fg=verygrey}
|
||||
|
||||
|
||||
\usepackage{tabu}
|
||||
\usepackage{multicol}
|
||||
\usepackage{vwcol}
|
||||
\usepackage{stmaryrd}
|
||||
\usepackage{graphicx}
|
||||
|
||||
\usepackage[normalem]{ulem}
|
||||
|
||||
\AtBeginSection[]{
|
||||
\begin{frame}
|
||||
\vfill
|
||||
\centering
|
||||
\begin{beamercolorbox}[sep=8pt,center,shadow=true,rounded=true]{title}
|
||||
\usebeamerfont{title}\insertsectionhead\par%
|
||||
\end{beamercolorbox}
|
||||
\vfill
|
||||
\end{frame}
|
||||
}
|
||||
|
||||
\title{Garage, an S3 backend as reliable as possible}
|
||||
\author{Garage Authors}
|
||||
\date{JoSy S3, 2025-10-08}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\begin{frame}
|
||||
\centering
|
||||
\includegraphics[width=.3\linewidth]{../../sticker/Garage.png}
|
||||
\vspace{1em}
|
||||
|
||||
{\large\bf Garage, an S3 backend as reliable as possible}
|
||||
\vspace{1em}
|
||||
|
||||
\url{https://garagehq.deuxfleurs.fr/}\\
|
||||
\url{mailto:garagehq@deuxfleurs.fr}\\
|
||||
\texttt{\#garage:deuxfleurs.fr} on Matrix
|
||||
\end{frame}
|
||||
|
||||
|
||||
\section{Meet Garage}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{A non-profit initiative}
|
||||
|
||||
|
||||
\begin{columns}[t]
|
||||
\begin{column}{.2\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.5\linewidth, valign=t]{../assets/logos/deuxfleurs.pdf}
|
||||
\end{column}
|
||||
\begin{column}{.8\textwidth}
|
||||
\textbf{Part of a degrowth initiative}\\
|
||||
Garage has been created at Deuxfleurs where we experiment running Internet services without datacenter on commodity and refurbished hardware.
|
||||
\end{column}
|
||||
|
||||
\end{columns}
|
||||
\vspace{2em}
|
||||
\begin{columns}[t]
|
||||
\begin{column}{.2\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.5\linewidth, valign=t]{../assets/community.png}
|
||||
\end{column}
|
||||
\begin{column}{.8\textwidth}
|
||||
\textbf{Developed by a community}\\
|
||||
{\small Some recent contributors: Arthur C, Charles H, dongdigua, Etienne L, Jonah A, Julien K, Lapineige, MagicRR, Milas B, Niklas M, RockWolf, Schwitzd, trinity-1686a, Xavier S, babykart, Baptiste J, eddster2309, James O'C, Joker9944, Maximilien R, Renjaya RZ, Yureka...}
|
||||
\end{column}
|
||||
|
||||
\end{columns}
|
||||
\vspace{2em}
|
||||
\begin{columns}[t]
|
||||
\begin{column}{.2\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.5\linewidth, valign=t]{../assets/logos/AGPLv3_Logo.png}
|
||||
\end{column}
|
||||
\begin{column}{.8\textwidth}
|
||||
\textbf{Owned by nobody, open-core is impossible, zero VC money}\\
|
||||
AGPL + no Contributor License Agreement = Garage ownership spreads among hundredth of contributors.
|
||||
\end{column}
|
||||
|
||||
\end{columns}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Getting support for Garage}
|
||||
\begin{columns}[t]
|
||||
\begin{column}{.2\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.4\linewidth, valign=t]{../assets/alex.jpg}
|
||||
\end{column}
|
||||
\begin{column}{.4\textwidth}
|
||||
\textbf{Alex Auvolat}\\
|
||||
PhD; co-founder of Deuxfleurs\\
|
||||
Garage maintainer, Freelance
|
||||
\end{column}
|
||||
\begin{column}{.3\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.4\linewidth, valign=t]{../assets/support.png}
|
||||
\end{column}
|
||||
\begin{column}{.1\textwidth}
|
||||
~
|
||||
\end{column}
|
||||
\end{columns}
|
||||
\vspace{2em}
|
||||
\begin{columns}[t]
|
||||
\begin{column}{.2\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.4\linewidth, valign=t]{../assets/quentin.jpg}
|
||||
\end{column}
|
||||
\begin{column}{.4\textwidth}
|
||||
\textbf{Quentin Dufour}\\
|
||||
PhD; co-founder of Deuxfleurs\\
|
||||
Garage contributor, Freelance
|
||||
\end{column}
|
||||
\begin{column}{.4\textwidth}
|
||||
For support requests, write at: \\
|
||||
\url{garagehq@deuxfleurs.fr}
|
||||
\end{column}
|
||||
\end{columns}
|
||||
\vspace{2em}
|
||||
\begin{columns}[t]
|
||||
\begin{column}{.2\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.4\linewidth, valign=t]{../assets/armael.jpg}
|
||||
\end{column}
|
||||
\begin{column}{.4\textwidth}
|
||||
\textbf{Armaël Guéneau}\\
|
||||
PhD; member of Deuxfleurs\\
|
||||
Garage contributor, Freelance
|
||||
\end{column}
|
||||
\begin{column}{.4\textwidth}
|
||||
Eligible: email support, architecture design, specific feature development, etc.
|
||||
\end{column}
|
||||
\end{columns}
|
||||
|
||||
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Our initial goal}
|
||||
|
||||
\centering
|
||||
\Large
|
||||
|
||||
Being a self-sovereign community to be free of our degrowth choice
|
||||
|
||||
$\big\downarrow$
|
||||
|
||||
As web citizens, datacenters are big black boxes. \\
|
||||
We want to leave them to autonoumously manage our servers.
|
||||
|
||||
$\big\downarrow$
|
||||
|
||||
We want reliable services without relying on dedicated hardware or places.
|
||||
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Building a resilient system with cheap stuff}
|
||||
|
||||
\only<1,4-7>{
|
||||
\begin{itemize}
|
||||
\item \textcolor<5->{gray}{Commodity hardware (e.g. old desktop PCs)\\
|
||||
\vspace{.5em}
|
||||
\visible<4->{{\footnotesize (can die at any time)}}}
|
||||
\vspace{1.5em}
|
||||
\item<5-> \textcolor<7->{gray}{Regular Internet (e.g. FTTB, FTTH) and power grid connections\\
|
||||
\vspace{.5em}
|
||||
\visible<6->{{\footnotesize (can be unavailable randomly)}}}
|
||||
\vspace{1.5em}
|
||||
\item<7-> \textbf{Geographical redundancy} (multi-site replication)
|
||||
\end{itemize}
|
||||
}
|
||||
\only<2>{
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/neptune.jpg}
|
||||
\end{center}
|
||||
}
|
||||
\only<3>{
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/atuin.jpg}
|
||||
\end{center}
|
||||
}
|
||||
\only<8>{
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/inframap_jdll2023.pdf}
|
||||
\end{center}
|
||||
}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Object storage: a crucial component}
|
||||
\begin{center}
|
||||
\includegraphics[height=6em]{../assets/logos/Amazon-S3.jpg}
|
||||
\hspace{3em}
|
||||
\visible<2->{\includegraphics[height=5em]{../assets/logos/minio.png}}
|
||||
\hspace{3em}
|
||||
\visible<3>{\includegraphics[height=6em]{../../logo/garage_hires_crop.png}}
|
||||
\end{center}
|
||||
\vspace{1em}
|
||||
S3: a de-facto standard, many compatible applications
|
||||
|
||||
\vspace{1em}
|
||||
\visible<2->{MinIO is self-hostable but not suited for geo-distributed deployments}
|
||||
|
||||
\vspace{1em}
|
||||
\visible<3->{\textbf{Garage is a self-hosted drop-in replacement for the Amazon S3 object store}}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{CRDTs / weak consistency instead of consensus}
|
||||
|
||||
\underline{Internally, Garage uses only CRDTs} (conflict-free replicated data types)
|
||||
|
||||
\vspace{2em}
|
||||
Why not Raft, Paxos, ...? Issues of consensus algorithms:
|
||||
|
||||
\vspace{1em}
|
||||
\begin{itemize}
|
||||
\item<2-> \textbf{Software complexity}
|
||||
\vspace{1em}
|
||||
\item<3-> \textbf{Performance issues:}
|
||||
\vspace{.5em}
|
||||
\begin{itemize}
|
||||
\item<4-> The leader is a \textbf{bottleneck} for all requests\\
|
||||
\vspace{.5em}
|
||||
\item<5-> \textbf{Sensitive to higher latency} between nodes
|
||||
\vspace{.5em}
|
||||
\item<6-> \textbf{Takes time to reconverge} when disrupted (e.g. node going down)
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{The data model of object storage}
|
||||
Object storage is basically a \textbf{key-value store}:
|
||||
\vspace{.5em}
|
||||
|
||||
{\scriptsize
|
||||
\begin{center}
|
||||
\begin{tabular}{|l|p{7cm}|}
|
||||
\hline
|
||||
\textbf{Key: file path + name} & \textbf{Value: file data + metadata} \\
|
||||
\hline
|
||||
\hline
|
||||
\texttt{index.html} &
|
||||
\texttt{Content-Type: text/html; charset=utf-8} \newline
|
||||
\texttt{Content-Length: 24929} \newline
|
||||
\texttt{<binary blob>} \\
|
||||
\hline
|
||||
\texttt{img/logo.svg} &
|
||||
\texttt{Content-Type: text/svg+xml} \newline
|
||||
\texttt{Content-Length: 13429} \newline
|
||||
\texttt{<binary blob>} \\
|
||||
\hline
|
||||
\texttt{download/index.html} &
|
||||
\texttt{Content-Type: text/html; charset=utf-8} \newline
|
||||
\texttt{Content-Length: 26563} \newline
|
||||
\texttt{<binary blob>} \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{center}
|
||||
}
|
||||
|
||||
\vspace{1em}
|
||||
\begin{itemize}
|
||||
\item<2> Maps well to CRDT data types
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Performance gains in practice}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/perf/endpoint_latency_0.7_0.8_minio.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
% ======================================== OPERATING
|
||||
% ======================================== OPERATING
|
||||
% ======================================== OPERATING
|
||||
|
||||
|
||||
\section{Production clusters}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Deployment kinds}
|
||||
|
||||
\includegraphics[width=.9\linewidth]{../assets/cluster_kind.png}
|
||||
\vspace{1em}
|
||||
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{How big they are?}
|
||||
|
||||
\includegraphics[width=.9\linewidth]{../assets/cluster_size.png}
|
||||
\vspace{1em}
|
||||
|
||||
\textit{"Petabyte storage setup for a video site. Nginx as CDN in-front using garage-s3-website feature. Each storage node has ~64TB storage with raid10, no replication within garage. 25gbit nic. haproxy to loadbalance across 5 nodes. mostly reads with very few writes."}
|
||||
|
||||
\vspace{1em}
|
||||
\textit{"We currently manage 7 Garage nodes, 28TB total storage, 6M blocks for 3M objects and 4TB of object data. We have been running Garage in production for 2.5 years."}
|
||||
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Operating Garage}
|
||||
\begin{center}
|
||||
\only<1-2>{
|
||||
\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_status_0.10.png}
|
||||
\\\vspace{1em}
|
||||
\visible<2>{\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_status_unhealthy_0.10.png}}
|
||||
}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Garage's architecture}
|
||||
\begin{center}
|
||||
\only<1>{\includegraphics[width=.45\linewidth]{../assets/garage.drawio.pdf}}%
|
||||
\only<2>{\includegraphics[width=.6\linewidth]{../assets/garage_sync.drawio.pdf}}%
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Digging deeper}
|
||||
\begin{center}
|
||||
\only<1>{\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_stats_0.10.png}}
|
||||
\only<2>{\includegraphics[width=.5\linewidth]{../assets/screenshots/garage_worker_list_0.10.png}}
|
||||
\only<3>{\includegraphics[width=.6\linewidth]{../assets/screenshots/garage_worker_param_0.10.png}}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Potential limitations and bottlenecks}
|
||||
\begin{itemize}
|
||||
\item Global:
|
||||
\begin{itemize}
|
||||
\item Max. $\sim$100 nodes per cluster (excluding gateways)
|
||||
\end{itemize}
|
||||
\vspace{1em}
|
||||
\item Metadata:
|
||||
\begin{itemize}
|
||||
\item One big bucket = bottleneck, object list on 3 nodes only
|
||||
\end{itemize}
|
||||
\vspace{1em}
|
||||
\item Block manager:
|
||||
\begin{itemize}
|
||||
\item Lots of small files on disk
|
||||
\item Processing the resync queue can be slow
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Deployment advice for very large clusters}
|
||||
\begin{itemize}
|
||||
\item Metadata storage:
|
||||
\begin{itemize}
|
||||
\item ZFS mirror (x2) on fast NVMe
|
||||
\item Use LMDB storage engine
|
||||
\end{itemize}
|
||||
\vspace{.5em}
|
||||
\item Data block storage:
|
||||
\begin{itemize}
|
||||
\item Use Garage's native multi-HDD support
|
||||
\item XFS on individual drives
|
||||
\item Increase block size (1MB $\to$ 10MB, requires more RAM and good networking)
|
||||
\item Tune \texttt{resync-tranquility} and \texttt{resync-worker-count} dynamically
|
||||
\end{itemize}
|
||||
\vspace{.5em}
|
||||
\item Other :
|
||||
\begin{itemize}
|
||||
\item Split data over several buckets
|
||||
\item Use less than 100 storage nodes
|
||||
\item Use gateway nodes
|
||||
\end{itemize}
|
||||
\vspace{.5em}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Focus on Deuxfleurs}
|
||||
|
||||
Host institutional websites, partnership with a web agency.
|
||||
Matrix media backend.
|
||||
|
||||
Plan to use it as an email backend for an internally developed email server.
|
||||
|
||||
\end{frame}
|
||||
|
||||
|
||||
% ======================================== TIMELINE
|
||||
% ======================================== TIMELINE
|
||||
% ======================================== TIMELINE
|
||||
|
||||
\section{Recent developments}
|
||||
|
||||
% ====================== v0.7.0 ===============================
|
||||
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/tl.drawio.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{April 2022 - Garage v0.7.0}
|
||||
Focus on \underline{observability and ecosystem integration}
|
||||
\vspace{2em}
|
||||
\begin{itemize}
|
||||
\item \textbf{Monitoring:} metrics and traces, using OpenTelemetry
|
||||
\vspace{1em}
|
||||
\item Replication modes with 1 or 2 copies / weaker consistency
|
||||
\vspace{1em}
|
||||
\item Kubernetes integration for node discovery
|
||||
\vspace{1em}
|
||||
\item Admin API (v0.7.2)
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Metrics (Prometheus + Grafana)}
|
||||
\begin{center}
|
||||
\includegraphics[width=.9\linewidth]{../assets/screenshots/grafana_dashboard.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Traces (Jaeger)}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/screenshots/jaeger_listobjects.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
% ====================== v0.8.0 ===============================
|
||||
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/tl.drawio.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{November 2022 - Garage v0.8.0}
|
||||
Focus on \underline{performance}
|
||||
\vspace{2em}
|
||||
\begin{itemize}
|
||||
\item \textbf{Alternative metadata DB engines} (LMDB, Sqlite)
|
||||
\vspace{1em}
|
||||
\item \textbf{Performance improvements:} block streaming, various optimizations...
|
||||
\vspace{1em}
|
||||
\item Bucket quotas (max size, max \#objects)
|
||||
\vspace{1em}
|
||||
\item Quality of life improvements, observability, etc.
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{About metadata DB engines}
|
||||
\textbf{Issues with Sled:}
|
||||
\vspace{1em}
|
||||
\begin{itemize}
|
||||
\item Huge files on disk
|
||||
\vspace{.5em}
|
||||
\item Unpredictable performance, especially on HDD
|
||||
\vspace{.5em}
|
||||
\item API limitations
|
||||
\vspace{.5em}
|
||||
\item Not actively maintained
|
||||
\end{itemize}
|
||||
|
||||
\vspace{2em}
|
||||
\textbf{LMDB:} very stable, good performance, file size is reasonable\\
|
||||
\textbf{Sqlite} also available as a second choice
|
||||
|
||||
\vspace{1em}
|
||||
Sled will be removed in Garage v1.0
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{DB engine performance comparison}
|
||||
\begin{center}
|
||||
\includegraphics[width=.6\linewidth]{../assets/perf/db_engine.png}
|
||||
\end{center}
|
||||
NB: Sqlite was slow due to synchronous mode, now configurable
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Block streaming}
|
||||
\begin{center}
|
||||
\only<1>{\includegraphics[width=.8\linewidth]{../assets/schema-streaming-1.png}}
|
||||
\only<2>{\includegraphics[width=.8\linewidth]{../assets/schema-streaming-2.png}}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{TTFB benchmark}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/perf/ttfb.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Throughput benchmark}
|
||||
\begin{center}
|
||||
\includegraphics[width=.7\linewidth]{../assets/perf/io-0.7-0.8-minio.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
% ====================== v0.9.0 ===============================
|
||||
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/tl.drawio.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{October 2023 - Garage v0.9.0}
|
||||
Focus on \underline{streamlining \& usability}
|
||||
\vspace{2em}
|
||||
\begin{itemize}
|
||||
\item Support multiple HDDs per node
|
||||
\vspace{1em}
|
||||
\item S3 compatibility:
|
||||
\vspace{1em}
|
||||
\begin{itemize}
|
||||
\item support basic lifecycle configurations
|
||||
\vspace{.5em}
|
||||
\item allow for multipart upload part retries
|
||||
\end{itemize}
|
||||
\vspace{1em}
|
||||
\item LMDB by default, deprecation of Sled
|
||||
\vspace{1em}
|
||||
\item New layout computation algorithm
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Layout computation}
|
||||
\begin{overprint}
|
||||
\onslide<1>
|
||||
\begin{center}
|
||||
\includegraphics[width=\linewidth, trim=0 0 0 -4cm]{../assets/screenshots/garage_status_0.9_prod_zonehl.png}
|
||||
\end{center}
|
||||
\onslide<2>
|
||||
\begin{center}
|
||||
\includegraphics[width=.7\linewidth]{../assets/map.png}
|
||||
\end{center}
|
||||
\end{overprint}
|
||||
\vspace{1em}
|
||||
Garage stores replicas on different zones when possible
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{What a "layout" is}
|
||||
\textbf{A layout is a precomputed index table:}
|
||||
\vspace{1em}
|
||||
|
||||
{\footnotesize
|
||||
\begin{center}
|
||||
\begin{tabular}{|l|l|l|l|}
|
||||
\hline
|
||||
\textbf{Partition} & \textbf{Node 1} & \textbf{Node 2} & \textbf{Node 3} \\
|
||||
\hline
|
||||
\hline
|
||||
Partition 0 & df-ymk (bespin) & Abricot (scorpio) & Courgette (neptune) \\
|
||||
\hline
|
||||
Partition 1 & Ananas (scorpio) & Courgette (neptune) & df-ykl (bespin) \\
|
||||
\hline
|
||||
Partition 2 & df-ymf (bespin) & Celeri (neptune) & Abricot (scorpio) \\
|
||||
\hline
|
||||
\hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ \\
|
||||
\hline
|
||||
Partition 255 & Concombre (neptune) & df-ykl (bespin) & Abricot (scorpio) \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{center}
|
||||
}
|
||||
|
||||
\vspace{2em}
|
||||
\visible<2->{
|
||||
The index table is built centrally using an optimal algorithm,\\
|
||||
then propagated to all nodes
|
||||
}
|
||||
|
||||
\vspace{1em}
|
||||
\visible<3->{
|
||||
\footnotesize
|
||||
Oulamara, M., \& Auvolat, A. (2023). \emph{An algorithm for geo-distributed and redundant storage in Garage}.\\ arXiv preprint arXiv:2302.13798.
|
||||
}
|
||||
\end{frame}
|
||||
|
||||
|
||||
|
||||
% ====================== v1.0.0 ===============================
|
||||
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/tl.drawio.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{April 2024 - Garage v1.0.0}
|
||||
Focus on \underline{consistency, security \& stability}
|
||||
\vspace{2em}
|
||||
\begin{itemize}
|
||||
\item Fix consistency issues when reshuffling data (Jepsen testing)
|
||||
\vspace{1em}
|
||||
\item \textbf{Security audit} by Radically Open Security
|
||||
\vspace{1em}
|
||||
\item Misc. S3 features (SSE-C, checksums, ...) and compatibility fixes
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
% ====================== v2.0.0 ===============================
|
||||
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/tl.drawio.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Garage v2.0.0}
|
||||
Focus on \underline{}
|
||||
\vspace{2em}
|
||||
\begin{itemize}
|
||||
\item TODO
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Currently funding...}
|
||||
|
||||
\textit{...}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{We run community surveys}
|
||||
\begin{center}
|
||||
\includegraphics[width=.6\linewidth]{../assets/survey_requested_features.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
% ======================================== END
|
||||
% ======================================== END
|
||||
% ======================================== END
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Where to find us}
|
||||
\begin{center}
|
||||
\includegraphics[width=.25\linewidth]{../../logo/garage_hires.png}\\
|
||||
\vspace{-1em}
|
||||
\url{https://garagehq.deuxfleurs.fr/}\\
|
||||
\url{mailto:garagehq@deuxfleurs.fr}\\
|
||||
\texttt{\#garage:deuxfleurs.fr} on Matrix
|
||||
|
||||
\vspace{1.5em}
|
||||
\includegraphics[width=.06\linewidth]{../assets/logos/rust_logo.png}
|
||||
\includegraphics[width=.13\linewidth]{../assets/logos/AGPLv3_Logo.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\end{document}
|
||||
|
||||
%% vim: set ts=4 sw=4 tw=0 noet spelllang=en :
|
||||
BIN
doc/talks/assets/armael.jpg
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
doc/talks/assets/cluster_kind.png
Normal file
|
After Width: | Height: | Size: 50 KiB |
BIN
doc/talks/assets/cluster_size.png
Normal file
|
After Width: | Height: | Size: 24 KiB |
BIN
doc/talks/assets/community.png
Normal file
|
After Width: | Height: | Size: 6.1 KiB |
BIN
doc/talks/assets/quentin.jpg
Normal file
|
After Width: | Height: | Size: 123 KiB |
BIN
doc/talks/assets/support.png
Normal file
|
After Width: | Height: | Size: 7.9 KiB |
BIN
doc/talks/assets/tl.drawio.png
Normal file
|
After Width: | Height: | Size: 183 KiB |
7
flake.lock
generated
|
|
@ -12,16 +12,17 @@
|
|||
"original": {
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "6fe74265bbb6d016d663b1091f015e2976c4a527",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"locked": {
|
||||
"lastModified": 1717312683,
|
||||
"narHash": "sha256-FrlieJH50AuvagamEvWMIE6D2OAnERuDboFDYAED/dE=",
|
||||
"lastModified": 1761640442,
|
||||
"narHash": "sha256-AtrEP6Jmdvrqiv4x2xa5mrtaIp3OEe8uBYCDZDS+hu8=",
|
||||
"owner": "nix-community",
|
||||
"repo": "flake-compat",
|
||||
"rev": "38fd3954cf65ce6faf3d0d45cd26059e059f07ea",
|
||||
"rev": "4a56054d8ffc173222d09dad23adf4ba946c8884",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
|
|||
|
|
@ -11,7 +11,8 @@
|
|||
"github:oxalica/rust-overlay/ab726555a9a72e6dc80649809147823a813fa95b";
|
||||
inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
inputs.crane.url = "github:ipetkov/crane";
|
||||
# Crane as of 2025-01-24
|
||||
inputs.crane.url = "github:ipetkov/crane/6fe74265bbb6d016d663b1091f015e2976c4a527";
|
||||
|
||||
inputs.flake-compat.url = "github:nix-community/flake-compat";
|
||||
inputs.flake-utils.url = "github:numtide/flake-utils";
|
||||
|
|
@ -66,7 +67,7 @@
|
|||
clippy = lints.garage-cargo-clippy;
|
||||
};
|
||||
|
||||
# ---- developpment shell, for making native builds only ----
|
||||
# ---- development shell, for making native builds only ----
|
||||
devShells =
|
||||
let
|
||||
targets = compile {
|
||||
|
|
@ -89,6 +90,9 @@
|
|||
cargo-outdated
|
||||
cargo-machete
|
||||
nixpkgs-fmt
|
||||
openssl
|
||||
socat
|
||||
killall
|
||||
];
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -167,7 +167,7 @@ let
|
|||
</ul></p>
|
||||
<p> Sources:
|
||||
<ul>
|
||||
<li><a href="https://git.deuxfleurs.fr/Deuxfleurs/garage/src/${r.type}/${x.version}">gitea</a></li>
|
||||
<li><a href="https://git.deuxfleurs.fr/Deuxfleurs/garage/src/${r.type}/${x.version}">Forgejo</a></li>
|
||||
<li><a href="https://git.deuxfleurs.fr/Deuxfleurs/garage/archive/${x.version}.zip">.zip</a></li>
|
||||
<li><a href="https://git.deuxfleurs.fr/Deuxfleurs/garage/archive/${x.version}.tar.gz">.tar.gz</a></li>
|
||||
</ul></p>
|
||||
|
|
|
|||
|
|
@ -17,13 +17,19 @@ else
|
|||
fi
|
||||
|
||||
$GARAGE_BIN -c /tmp/config.1.toml bucket create eprouvette
|
||||
if [ "$GARAGE_08" = "1" ]; then
|
||||
if [ "$GARAGE_OLDVER" = "v08" ]; then
|
||||
KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key new --name opérateur)
|
||||
else
|
||||
ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
|
||||
SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'`
|
||||
elif [ "$GARAGE_OLDVER" = "v1" ]; then
|
||||
KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key create opérateur)
|
||||
ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
|
||||
SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'`
|
||||
else
|
||||
KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml json-api CreateKey '{"name":"opérateur"}')
|
||||
ACCESS_KEY=`echo $KEY_INFO|jq -r .accessKeyId`
|
||||
SECRET_KEY=`echo $KEY_INFO|jq -r .secretAccessKey`
|
||||
fi
|
||||
ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
|
||||
SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'`
|
||||
$GARAGE_BIN -c /tmp/config.1.toml bucket allow eprouvette --read --write --owner --key $ACCESS_KEY
|
||||
echo "$ACCESS_KEY $SECRET_KEY" > /tmp/garage.s3
|
||||
|
||||
|
|
|
|||
|
|
@ -30,6 +30,12 @@ for count in $(seq 1 3); do
|
|||
CONF_PATH="/tmp/config.$count.toml"
|
||||
LABEL="\e[${FANCYCOLORS[$count]}[$count]\e[49m"
|
||||
|
||||
if [ "$GARAGE_OLDVER" == "v08" ]; then
|
||||
REPLICATION_MODE="replication_mode = \"3\""
|
||||
else
|
||||
REPLICATION_MODE="replication_factor = 3"
|
||||
fi
|
||||
|
||||
cat > $CONF_PATH <<EOF
|
||||
block_size = 1048576 # objects are split in blocks of maximum this number of bytes
|
||||
metadata_dir = "/tmp/garage-meta-$count"
|
||||
|
|
@ -38,7 +44,7 @@ data_dir = "/tmp/garage-data-$count"
|
|||
rpc_bind_addr = "0.0.0.0:$((3900+$count))" # the port other Garage nodes will use to talk to this node
|
||||
rpc_public_addr = "127.0.0.1:$((3900+$count))"
|
||||
bootstrap_peers = []
|
||||
replication_mode = "3"
|
||||
$REPLICATION_MODE
|
||||
rpc_secret = "$NETWORK_SECRET"
|
||||
|
||||
[s3_api]
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ until $GARAGE_BIN -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do
|
|||
sleep 1
|
||||
done
|
||||
|
||||
if [ "$GARAGE_08" = "1" ]; then
|
||||
if [ "$GARAGE_OLDVER" = "v08" ]; then
|
||||
$GARAGE_BIN -c /tmp/config.1.toml status \
|
||||
| grep 'NO ROLE' \
|
||||
| grep -Po '^[0-9a-f]+' \
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
|
||||
export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
|
||||
export AWS_DEFAULT_REGION='garage'
|
||||
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
|
||||
# FUTUREWORK: set AWS_ENDPOINT_URL instead, once nixpkgs bumps awscli to >=2.13.0.
|
||||
function aws { command aws --endpoint-url http://127.0.0.1:3911 $@ ; }
|
||||
|
||||
|
|
|
|||
|
|
@ -2,8 +2,8 @@ apiVersion: v2
|
|||
name: garage
|
||||
description: S3-compatible object store for small self-hosted geo-distributed deployments
|
||||
type: application
|
||||
version: 0.7.3
|
||||
appVersion: "v1.3.1"
|
||||
version: 0.9.2
|
||||
appVersion: "v2.2.0"
|
||||
home: https://garagehq.deuxfleurs.fr/
|
||||
icon: https://garagehq.deuxfleurs.fr/images/garage-logo.svg
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# garage
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
S3-compatible object store for small self-hosted geo-distributed deployments
|
||||
|
||||
|
|
@ -15,6 +15,7 @@ S3-compatible object store for small self-hosted geo-distributed deployments
|
|||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| affinity | object | `{}` | |
|
||||
| commonLabels | object | `{}` | Extra labels for all resources |
|
||||
| deployment.kind | string | `"StatefulSet"` | Switchable to DaemonSet |
|
||||
| deployment.podManagementPolicy | string | `"OrderedReady"` | If using statefulset, allow Parallel or OrderedReady (default) |
|
||||
| deployment.replicaCount | int | `3` | Number of StatefulSet replicas/garage nodes to start |
|
||||
|
|
@ -22,15 +23,16 @@ S3-compatible object store for small self-hosted geo-distributed deployments
|
|||
| extraVolumeMounts | object | `{}` | |
|
||||
| extraVolumes | object | `{}` | |
|
||||
| fullnameOverride | string | `""` | |
|
||||
| garage.blockSize | string | `"1048576"` | Defaults is 1MB An increase can result in better performance in certain scenarios https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size |
|
||||
| garage.blockSize | string | `"1048576"` | Defaults is 1MB An increase can result in better performance in certain scenarios https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block_size |
|
||||
| garage.bootstrapPeers | list | `[]` | This is not required if you use the integrated kubernetes discovery |
|
||||
| garage.compressionLevel | string | `"1"` | zstd compression level of stored blocks https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level |
|
||||
| garage.dbEngine | string | `"lmdb"` | Can be changed for better performance on certain systems https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0 |
|
||||
| garage.compressionLevel | string | `"1"` | zstd compression level of stored blocks https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression_level |
|
||||
| garage.dbEngine | string | `"lmdb"` | Can be changed for better performance on certain systems https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db_engine |
|
||||
| garage.existingConfigMap | string | `""` | if not empty string, allow using an existing ConfigMap for the garage.toml, if set, ignores garage.toml |
|
||||
| garage.garageTomlString | string | `""` | String Template for the garage configuration if set, ignores above values. Values can be templated, see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/ |
|
||||
| garage.kubernetesSkipCrd | bool | `false` | Set to true if you want to use k8s discovery but install the CRDs manually outside of the helm chart, for example if you operate at namespace level without cluster ressources |
|
||||
| garage.kubernetesSkipCrd | bool | `false` | Set to true if you want to use k8s discovery but install the CRDs manually outside of the helm chart, for example if you operate at namespace level without cluster resources |
|
||||
| garage.replicationFactor | string | `"3"` | Default to 3 replicas, see the replication_factor section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication_factor |
|
||||
| garage.consistencyMode | string | `"consistent"` | Default to read-after-write consistency, see the consistency_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#consistency_mode |
|
||||
| garage.metadataAutoSnapshotInterval | string | `""` | If this value is set, Garage will automatically take a snapshot of the metadata DB file at a regular interval and save it in the metadata directory. https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#metadata_auto_snapshot_interval |
|
||||
| garage.replicationMode | string | `"3"` | Default to 3 replicas, see the replication_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode |
|
||||
| garage.rpcBindAddr | string | `"[::]:3901"` | |
|
||||
| garage.rpcSecret | string | `""` | If not given, a random secret will be generated and stored in a Secret object |
|
||||
| garage.s3.api.region | string | `"garage"` | |
|
||||
|
|
@ -74,7 +76,7 @@ S3-compatible object store for small self-hosted geo-distributed deployments
|
|||
| persistence.enabled | bool | `true` | |
|
||||
| persistence.meta.hostPath | string | `"/var/lib/garage/meta"` | |
|
||||
| persistence.meta.size | string | `"100Mi"` | |
|
||||
| podAnnotations | object | `{}` | additonal pod annotations |
|
||||
| podAnnotations | object | `{}` | additional pod annotations |
|
||||
| podSecurityContext.fsGroup | int | `1000` | |
|
||||
| podSecurityContext.runAsGroup | int | `1000` | |
|
||||
| podSecurityContext.runAsNonRoot | bool | `true` | |
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ If release name contains chart name it will be used as a full name.
|
|||
Create the name of the rpc secret
|
||||
*/}}
|
||||
{{- define "garage.rpcSecretName" -}}
|
||||
{{- printf "%s-rpc-secret" (include "garage.fullname" .) -}}
|
||||
{{- .Values.garage.existingRpcSecret | default (printf "%s-rpc-secret" (include "garage.fullname" .)) -}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
|
|
@ -47,6 +47,9 @@ helm.sh/chart: {{ include "garage.chart" . }}
|
|||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 0 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
|
|
|
|||
|
|
@ -13,9 +13,10 @@ data:
|
|||
|
||||
db_engine = "{{ .Values.garage.dbEngine }}"
|
||||
|
||||
block_size = {{ .Values.garage.blockSize }}
|
||||
block_size = "{{ .Values.garage.blockSize }}"
|
||||
|
||||
replication_mode = "{{ .Values.garage.replicationMode }}"
|
||||
replication_factor = {{ .Values.garage.replicationFactor }}
|
||||
consistency_mode = "{{ .Values.garage.consistencyMode }}"
|
||||
|
||||
compression_level = {{ .Values.garage.compressionLevel }}
|
||||
|
||||
|
|
@ -27,8 +28,16 @@ data:
|
|||
# rpc_secret will be populated by the init container from a k8s secret object
|
||||
rpc_secret = "__RPC_SECRET_REPLACE__"
|
||||
|
||||
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
|
||||
bootstrap_peers = [
|
||||
{{- range $index, $peer := .Values.garage.bootstrapPeers }}
|
||||
{{- if $index}}, {{ end }}{{ $peer | quote }}
|
||||
{{ end }}
|
||||
]
|
||||
|
||||
{{- if .Values.garage.additionalTopLevelConfig }}
|
||||
{{ .Values.garage.additionalTopLevelConfig | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
[kubernetes_discovery]
|
||||
namespace = "{{ .Release.Namespace }}"
|
||||
service_name = "{{ include "garage.fullname" . }}"
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
{{- if not .Values.garage.existingRpcSecret }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
|
|
@ -12,3 +13,4 @@ data:
|
|||
{{- $prevRpcSecret := $prevSecretData.rpcSecret | default "" | b64dec }}
|
||||
{{/* Priority is: 1. from values, 2. previous value, 3. generate random */}}
|
||||
rpcSecret: {{ .Values.garage.rpcSecret | default $prevRpcSecret | default (include "jupyterhub.randHex" 64) | b64enc | quote }}
|
||||
{{- end }}
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ spec:
|
|||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "garage.selectorLabels" . | nindent 8 }}
|
||||
{{- include "garage.labels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
|
|
|
|||
|
|
@ -2,23 +2,32 @@
|
|||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# -- Additional labels to add to all resources created by this chart
|
||||
commonLabels: {}
|
||||
# app.kubernetes.io/part-of: storage
|
||||
# team: platform
|
||||
|
||||
# Garage configuration. These values go to garage.toml
|
||||
garage:
|
||||
# -- Can be changed for better performance on certain systems
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db_engine
|
||||
dbEngine: "lmdb"
|
||||
|
||||
# -- Defaults is 1MB
|
||||
# An increase can result in better performance in certain scenarios
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block_size
|
||||
blockSize: "1048576"
|
||||
|
||||
# -- Default to 3 replicas, see the replication_mode section at
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode
|
||||
replicationMode: "3"
|
||||
# -- Default to 3 replicas, see the replication_factor section at
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication_factor
|
||||
replicationFactor: "3"
|
||||
|
||||
# -- By default, enable read-after-write consistency guarantees, see the consistency_mode section at
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#consistency_mode
|
||||
consistencyMode: "consistent"
|
||||
|
||||
# -- zstd compression level of stored blocks
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression_level
|
||||
compressionLevel: "1"
|
||||
|
||||
# -- If this value is set, Garage will automatically take a snapshot of the metadata DB file at a regular interval and save it in the metadata directory.
|
||||
|
|
@ -28,10 +37,14 @@ garage:
|
|||
rpcBindAddr: "[::]:3901"
|
||||
# -- If not given, a random secret will be generated and stored in a Secret object
|
||||
rpcSecret: ""
|
||||
# -- If you want to provide an rpcSecret within an existing k8s secret,
|
||||
# specify the secret name here, and store the value under the secret key `rpcSecret`
|
||||
# the default secret will not be created
|
||||
existingRpcSecret: ""
|
||||
# -- This is not required if you use the integrated kubernetes discovery
|
||||
bootstrapPeers: []
|
||||
# -- Set to true if you want to use k8s discovery but install the CRDs manually outside
|
||||
# of the helm chart, for example if you operate at namespace level without cluster ressources
|
||||
# of the helm chart, for example if you operate at namespace level without cluster resources
|
||||
kubernetesSkipCrd: false
|
||||
s3:
|
||||
api:
|
||||
|
|
@ -41,6 +54,12 @@ garage:
|
|||
rootDomain: ".web.garage.tld"
|
||||
index: "index.html"
|
||||
|
||||
# -- Additional configuration to append to garage.toml. Use a multi-line string for custom config.
|
||||
# Example:
|
||||
# additionalTopLevelConfig: |-
|
||||
# data_fsync = true
|
||||
additionalTopLevelConfig: ""
|
||||
|
||||
# -- if not empty string, allow using an existing ConfigMap for the garage.toml,
|
||||
# if set, ignores garage.toml
|
||||
existingConfigMap: ""
|
||||
|
|
@ -101,13 +120,14 @@ serviceAccount:
|
|||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
# -- additonal pod annotations
|
||||
# -- additional pod annotations
|
||||
podAnnotations: {}
|
||||
|
||||
podSecurityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
fsGroup: 1000
|
||||
fsGroupChangePolicy: "OnRootMismatch"
|
||||
runAsNonRoot: true
|
||||
|
||||
securityContext:
|
||||
|
|
@ -189,7 +209,7 @@ ingress:
|
|||
# - kubernetes.docker.internal
|
||||
|
||||
resources: {}
|
||||
# The following are indicative for a small-size deployement, for anything serious double them.
|
||||
# The following are indicative for a small-size deployment, for anything serious double them.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 1024Mi
|
||||
|
|
|
|||
|
|
@ -127,7 +127,7 @@ They are due to the download being interrupted in the middle (^C during first la
|
|||
Add `:force?` to the `cached-wget!` call in `daemon.clj` to re-download the binary,
|
||||
or restar the VMs to clear temporary files.
|
||||
|
||||
### In `jepsen.garage`: prefix wierdness
|
||||
### In `jepsen.garage`: prefix weirdness
|
||||
|
||||
In `store/garage set1/20231019T163358.615+0200`:
|
||||
|
||||
|
|
@ -146,12 +146,12 @@ and passing all values that were previously in the context (creds and prefix) as
|
|||
The reg2 test is our custom checker for CRDT read-after-write on individual object keys, acting as registers which can be updated.
|
||||
The test fails without the timestamp fix, which is expected as the clock scrambler will prevent nodes from having a correct ordering of objects.
|
||||
|
||||
With the timestamp fix (`--patch tsfix1`), the happenned-before relationship should at least be respected, meaning that when a PutObject call starts
|
||||
With the timestamp fix (`--patch tsfix1`), the happened-before relationship should at least be respected, meaning that when a PutObject call starts
|
||||
after another PutObject call has ended, the second call should overwrite the value of the first call, and that value should not be
|
||||
readable by future GetObject calls.
|
||||
However, we observed inconsistencies even with the timestamp fix.
|
||||
|
||||
The inconsistencies seemed to always happenned after writing a nil value, which translates to a DeleteObject call
|
||||
The inconsistencies seemed to always happened after writing a nil value, which translates to a DeleteObject call
|
||||
instead of a PutObject. By removing the possibility of writing nil values, therefore only doing
|
||||
PutObject calls, the issue disappears. There is therefore an issue to fix in DeleteObject.
|
||||
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@
|
|||
"rpc_bind_addr = \"0.0.0.0:3901\"\n"
|
||||
"rpc_public_addr = \"" node ":3901\"\n"
|
||||
"db_engine = \"lmdb\"\n"
|
||||
"replication_mode = \"3\"\n"
|
||||
"replication_factor = 3\n"
|
||||
"data_dir = \"" data-dir "\"\n"
|
||||
"metadata_dir = \"" meta-dir "\"\n"
|
||||
"[s3_api]\n"
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ data:
|
|||
metadata_dir = "/tmp/meta"
|
||||
data_dir = "/tmp/data"
|
||||
|
||||
replication_mode = "3"
|
||||
replication_factor = 3
|
||||
|
||||
rpc_bind_addr = "[::]:3901"
|
||||
rpc_secret = "1799bccfd7411eddcf9ebd316bc1f5287ad12a68094e1c6ac6abde7e6feae1ec"
|
||||
|
|
|
|||
|
|
@ -694,32 +694,7 @@
|
|||
]
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"__systemRef": "hideSeriesFrom",
|
||||
"matcher": {
|
||||
"id": "byNames",
|
||||
"options": {
|
||||
"mode": "exclude",
|
||||
"names": [
|
||||
"10.83.2.3:3903"
|
||||
],
|
||||
"prefix": "All except:",
|
||||
"readOnly": true
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
: '
|
||||
This script tests whether uploaded parts can be skipped in a
|
||||
CompleteMultipartUpoad
|
||||
CompleteMultipartUpload
|
||||
|
||||
On Minio: yes, parts can be skipped
|
||||
|
||||
|
|
@ -52,7 +52,7 @@
|
|||
|
||||
Conclusions:
|
||||
|
||||
- Skipping a part in a CompleteMultipartUpoad call is OK
|
||||
- Skipping a part in a CompleteMultipartUpload call is OK
|
||||
- The part is simply not included in the stored object
|
||||
- Sequential part renumbering counts only non-skipped parts
|
||||
'
|
||||
|
|
|
|||
|
|
@ -112,6 +112,23 @@ if [ -z "$SKIP_S3CMD" ]; then
|
|||
done
|
||||
fi
|
||||
|
||||
# BOTO3
|
||||
if [ -z "$SKIP_BOTO3" ]; then
|
||||
echo "🛠️ Testing with boto3 for STREAMING-UNSIGNED-PAYLOAD-TRAILER"
|
||||
source ${SCRIPT_FOLDER}/dev-env-aws.sh
|
||||
AWS_ENDPOINT_URL=https://localhost:4443 python <<EOF
|
||||
import boto3
|
||||
client = boto3.client('s3', verify=False)
|
||||
print("Put&delete hello world object")
|
||||
client.put_object(Body=b'hello world', Bucket='eprouvette', Key='test.s3.txt')
|
||||
client.delete_object(Bucket='eprouvette', Key='test.s3.txt')
|
||||
print("Put&delete big object")
|
||||
client.upload_file("/tmp/garage.3.rnd", 'eprouvette', 'garage.3.rnd')
|
||||
client.delete_object(Bucket='eprouvette', Key='garage.3.rnd')
|
||||
print("OK!")
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Minio Client
|
||||
if [ -z "$SKIP_MC" ]; then
|
||||
echo "🛠️ Testing with mc (minio client)"
|
||||
|
|
|
|||
|
|
@ -24,9 +24,17 @@ echo "============= insert data into old version cluster ================="
|
|||
export GARAGE_BIN=/tmp/old_garage
|
||||
if echo $OLD_VERSION | grep 'v0\.8\.'; then
|
||||
echo "Detected Garage v0.8.x"
|
||||
export GARAGE_08=1
|
||||
export GARAGE_OLDVER=v08
|
||||
elif (echo $OLD_VERSION | grep 'v0\.9\.') || (echo $OLD_VERSION | grep 'v1\.'); then
|
||||
echo "Detected Garage v0.9.x / v1.x"
|
||||
export GARAGE_OLDVER=v1
|
||||
fi
|
||||
|
||||
if echo $OLD_VERSION | grep 'v1\.'; then
|
||||
DO_SSEC_TEST=1
|
||||
fi
|
||||
SSEC_KEY="u8zCfnEyt5Imo/krN+sxA1DQXxLWtPJavU6T6gOVj1Y="
|
||||
|
||||
echo "⏳ Setup cluster using old version"
|
||||
$GARAGE_BIN --version
|
||||
${SCRIPT_FOLDER}/dev-clean.sh
|
||||
|
|
@ -37,7 +45,23 @@ ${SCRIPT_FOLDER}/dev-bucket.sh
|
|||
|
||||
echo "🛠️ Inserting data in old cluster"
|
||||
source ${SCRIPT_FOLDER}/dev-env-rclone.sh
|
||||
rclone copy "${SCRIPT_FOLDER}/../.git/" garage:eprouvette/test_dotgit --stats=1s --stats-log-level=NOTICE --stats-one-line
|
||||
rclone copy "${SCRIPT_FOLDER}/../.git/" garage:eprouvette/test_dotgit \
|
||||
--stats=1s --stats-log-level=NOTICE --stats-one-line
|
||||
|
||||
if [ "$DO_SSEC_TEST" = "1" ]; then
|
||||
# upload small file (should be single part)
|
||||
rclone copy "${SCRIPT_FOLDER}/test-upgrade.sh" garage:eprouvette/test-ssec \
|
||||
--s3-sse-customer-algorithm AES256 \
|
||||
--s3-sse-customer-key-base64 "$SSEC_KEY" \
|
||||
--stats=1s --stats-log-level=NOTICE --stats-one-line
|
||||
# do a multipart upload
|
||||
dd if=/dev/urandom of=/tmp/randfile-for-upgrade bs=5M count=5
|
||||
rclone copy "/tmp/randfile-for-upgrade" garage:eprouvette/test-ssec \
|
||||
--s3-chunk-size 5M \
|
||||
--s3-sse-customer-algorithm AES256 \
|
||||
--s3-sse-customer-key-base64 "$SSEC_KEY" \
|
||||
--stats=1s --stats-log-level=NOTICE --stats-one-line
|
||||
fi
|
||||
|
||||
echo "🏁 Stopping old cluster"
|
||||
killall -INT old_garage
|
||||
|
|
@ -47,7 +71,7 @@ killall -9 old_garage || true
|
|||
echo "🏁 Removing old garage version"
|
||||
rm -rv $GARAGE_BIN
|
||||
export -n GARAGE_BIN
|
||||
export -n GARAGE_08
|
||||
export -n GARAGE_OLDVER
|
||||
|
||||
echo "================ read data from new cluster ==================="
|
||||
|
||||
|
|
@ -60,7 +84,8 @@ ${SCRIPT_FOLDER}/dev-cluster.sh >> /tmp/garage.log 2>&1 &
|
|||
sleep 3
|
||||
|
||||
echo "🛠️ Retrieving data from old cluster"
|
||||
rclone copy garage:eprouvette/test_dotgit /tmp/test_dotgit --stats=1s --stats-log-level=NOTICE --stats-one-line --fast-list
|
||||
rclone copy garage:eprouvette/test_dotgit /tmp/test_dotgit \
|
||||
--stats=1s --stats-log-level=NOTICE --stats-one-line --fast-list
|
||||
|
||||
if ! diff <(find "${SCRIPT_FOLDER}/../.git" -type f | xargs md5sum | cut -d ' ' -f 1 | sort) <(find /tmp/test_dotgit -type f | xargs md5sum | cut -d ' ' -f 1 | sort); then
|
||||
echo "TEST FAILURE: directories are different"
|
||||
|
|
@ -68,6 +93,23 @@ if ! diff <(find "${SCRIPT_FOLDER}/../.git" -type f | xargs md5sum | cut -d ' '
|
|||
fi
|
||||
rm -r /tmp/test_dotgit
|
||||
|
||||
if [ "$DO_SSEC_TEST" = "1" ]; then
|
||||
rclone copy garage:eprouvette/test-ssec /tmp/test_ssec_out \
|
||||
--s3-sse-customer-algorithm AES256 \
|
||||
--s3-sse-customer-key-base64 "$SSEC_KEY" \
|
||||
--stats=1s --stats-log-level=NOTICE --stats-one-line
|
||||
if ! diff "/tmp/test_ssec_out/test-upgrade.sh" "${SCRIPT_FOLDER}/test-upgrade.sh"; then
|
||||
echo "SSEC-FAILURE (small file)"
|
||||
exit 1
|
||||
fi
|
||||
if ! diff "/tmp/test_ssec_out/randfile-for-upgrade" "/tmp/randfile-for-upgrade"; then
|
||||
echo "SSEC-FAILURE (big file)"
|
||||
exit 1
|
||||
fi
|
||||
rm -r /tmp/test_ssec_out
|
||||
rm /tmp/randfile-for-upgrade
|
||||
fi
|
||||
|
||||
echo "🏁 Teardown"
|
||||
rm -rf /tmp/garage-{data,meta}-*
|
||||
rm -rf /tmp/config.*.toml
|
||||
|
|
|
|||
|
|
@ -26,17 +26,21 @@ in
|
|||
s3cmd
|
||||
minio-client
|
||||
rclone
|
||||
(python313.withPackages (ps: [ ps.boto3 ]))
|
||||
|
||||
socat
|
||||
psmisc
|
||||
which
|
||||
openssl
|
||||
curl
|
||||
jq
|
||||
typos
|
||||
];
|
||||
shellHook = ''
|
||||
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
|
||||
|
||||
function to_s3 {
|
||||
AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED AWS_RESPONSE_CHECKSUM_VALIDATION=WHEN_REQUIRED \
|
||||
aws \
|
||||
--endpoint-url https://garage.deuxfleurs.fr \
|
||||
--region garage \
|
||||
|
|
@ -48,7 +52,7 @@ in
|
|||
function to_docker {
|
||||
executor \
|
||||
--force \
|
||||
--customPlatform="$(echo "''${DOCKER_PLATFORM}" | sed 's/i386/386/')" \
|
||||
--custom-platform="$(echo "''${DOCKER_PLATFORM}" | sed 's/i386/386/')" \
|
||||
--destination "$(echo "''${CONTAINER_NAME}" | sed 's/i386/386/'):''${CONTAINER_TAG}" \
|
||||
--context dir://`pwd` \
|
||||
--verbosity=debug
|
||||
|
|
@ -93,6 +97,7 @@ in
|
|||
|
||||
nix-build nix/build_index.nix
|
||||
|
||||
AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED AWS_RESPONSE_CHECKSUM_VALIDATION=WHEN_REQUIRED \
|
||||
aws \
|
||||
--endpoint-url https://garage.deuxfleurs.fr \
|
||||
--region garage \
|
||||
|
|
@ -100,6 +105,7 @@ in
|
|||
result/share/_releases.json \
|
||||
s3://garagehq.deuxfleurs.fr/
|
||||
|
||||
AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED AWS_RESPONSE_CHECKSUM_VALIDATION=WHEN_REQUIRED \
|
||||
aws \
|
||||
--endpoint-url https://garage.deuxfleurs.fr \
|
||||
--region garage \
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_api_admin"
|
||||
version = "1.3.1"
|
||||
version = "2.2.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -14,7 +14,9 @@ path = "lib.rs"
|
|||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
format_table.workspace = true
|
||||
garage_model.workspace = true
|
||||
garage_block.workspace = true
|
||||
garage_table.workspace = true
|
||||
garage_util.workspace = true
|
||||
garage_rpc.workspace = true
|
||||
|
|
@ -22,8 +24,11 @@ garage_api_common.workspace = true
|
|||
|
||||
argon2.workspace = true
|
||||
async-trait.workspace = true
|
||||
bytesize.workspace = true
|
||||
chrono.workspace = true
|
||||
thiserror.workspace = true
|
||||
hex.workspace = true
|
||||
paste.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
futures.workspace = true
|
||||
|
|
@ -34,10 +39,12 @@ url.workspace = true
|
|||
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
utoipa.workspace = true
|
||||
|
||||
opentelemetry.workspace = true
|
||||
opentelemetry-prometheus = { workspace = true, optional = true }
|
||||
prometheus = { workspace = true, optional = true }
|
||||
|
||||
[features]
|
||||
metrics = [ "opentelemetry-prometheus", "prometheus" ]
|
||||
metrics = ["opentelemetry-prometheus", "prometheus"]
|
||||
k2v = ["garage_model/k2v"]
|
||||
|
|
|
|||
292
src/api/admin/admin_token.rs
Normal file
|
|
@ -0,0 +1,292 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
use garage_table::*;
|
||||
use garage_util::time::now_msec;
|
||||
|
||||
use garage_model::admin_token_table::*;
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
impl RequestHandler for ListAdminTokensRequest {
|
||||
type Response = ListAdminTokensResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<ListAdminTokensResponse, Error> {
|
||||
let now = now_msec();
|
||||
|
||||
let mut res = garage
|
||||
.admin_token_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
|
||||
10000,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?
|
||||
.iter()
|
||||
.map(|t| admin_token_info_results(t, now))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if garage.config.admin.metrics_token.is_some() {
|
||||
res.insert(
|
||||
0,
|
||||
GetAdminTokenInfoResponse {
|
||||
id: None,
|
||||
created: None,
|
||||
name: "metrics_token (from daemon configuration)".into(),
|
||||
expiration: None,
|
||||
expired: false,
|
||||
scope: vec!["Metrics".into()],
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
if garage.config.admin.admin_token.is_some() {
|
||||
res.insert(
|
||||
0,
|
||||
GetAdminTokenInfoResponse {
|
||||
id: None,
|
||||
created: None,
|
||||
name: "admin_token (from daemon configuration)".into(),
|
||||
expiration: None,
|
||||
expired: false,
|
||||
scope: vec!["*".into()],
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
Ok(ListAdminTokensResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for GetAdminTokenInfoRequest {
|
||||
type Response = GetAdminTokenInfoResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetAdminTokenInfoResponse, Error> {
|
||||
let token = match (self.id, self.search) {
|
||||
(Some(id), None) => get_existing_admin_token(garage, &id).await?,
|
||||
(None, Some(search)) => {
|
||||
let candidates = garage
|
||||
.admin_token_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(KeyFilter::MatchesAndNotDeleted(search.to_string())),
|
||||
10,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
if candidates.len() != 1 {
|
||||
return Err(Error::bad_request(format!(
|
||||
"{} matching admin tokens",
|
||||
candidates.len()
|
||||
)));
|
||||
}
|
||||
candidates.into_iter().next().unwrap()
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::bad_request(
|
||||
"Either id or search must be provided (but not both)",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(admin_token_info_results(&token, now_msec()))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for CreateAdminTokenRequest {
|
||||
type Response = CreateAdminTokenResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<CreateAdminTokenResponse, Error> {
|
||||
let (mut token, secret) = if self.0.name.is_some() {
|
||||
AdminApiToken::new("")
|
||||
} else {
|
||||
AdminApiToken::new(&format!("token_{}", Utc::now().format("%Y%m%d_%H%M")))
|
||||
};
|
||||
|
||||
apply_token_updates(&mut token, self.0)?;
|
||||
|
||||
garage.admin_token_table.insert(&token).await?;
|
||||
|
||||
Ok(CreateAdminTokenResponse {
|
||||
secret_token: secret,
|
||||
info: admin_token_info_results(&token, now_msec()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for UpdateAdminTokenRequest {
|
||||
type Response = UpdateAdminTokenResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<UpdateAdminTokenResponse, Error> {
|
||||
let mut token = get_existing_admin_token(garage, &self.id).await?;
|
||||
|
||||
apply_token_updates(&mut token, self.body)?;
|
||||
|
||||
garage.admin_token_table.insert(&token).await?;
|
||||
|
||||
Ok(UpdateAdminTokenResponse(admin_token_info_results(
|
||||
&token,
|
||||
now_msec(),
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for DeleteAdminTokenRequest {
|
||||
type Response = DeleteAdminTokenResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<DeleteAdminTokenResponse, Error> {
|
||||
let token = get_existing_admin_token(garage, &self.id).await?;
|
||||
|
||||
garage
|
||||
.admin_token_table
|
||||
.insert(&AdminApiToken::delete(token.prefix))
|
||||
.await?;
|
||||
|
||||
Ok(DeleteAdminTokenResponse)
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for GetCurrentAdminTokenInfoRequest {
|
||||
type Response = GetCurrentAdminTokenInfoResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetCurrentAdminTokenInfoResponse, Error> {
|
||||
let now = now_msec();
|
||||
|
||||
if garage
|
||||
.config
|
||||
.admin
|
||||
.metrics_token
|
||||
.as_ref()
|
||||
.is_some_and(|s| s == &self.admin_token)
|
||||
{
|
||||
return Ok(GetCurrentAdminTokenInfoResponse(
|
||||
GetAdminTokenInfoResponse {
|
||||
id: None,
|
||||
created: None,
|
||||
name: "metrics_token (from daemon configuration)".into(),
|
||||
expiration: None,
|
||||
expired: false,
|
||||
scope: vec!["Metrics".into()],
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
if garage
|
||||
.config
|
||||
.admin
|
||||
.admin_token
|
||||
.as_ref()
|
||||
.is_some_and(|s| s == &self.admin_token)
|
||||
{
|
||||
return Ok(GetCurrentAdminTokenInfoResponse(
|
||||
GetAdminTokenInfoResponse {
|
||||
id: None,
|
||||
created: None,
|
||||
name: "admin_token (from daemon configuration)".into(),
|
||||
expiration: None,
|
||||
expired: false,
|
||||
scope: vec!["*".into()],
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
let (prefix, _) = self.admin_token.split_once('.').unwrap();
|
||||
let token = get_existing_admin_token(garage, &prefix.to_string()).await?;
|
||||
|
||||
Ok(GetCurrentAdminTokenInfoResponse(admin_token_info_results(
|
||||
&token, now,
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
// ---- helpers ----
|
||||
|
||||
fn admin_token_info_results(token: &AdminApiToken, now: u64) -> GetAdminTokenInfoResponse {
|
||||
let params = token.params().unwrap();
|
||||
|
||||
GetAdminTokenInfoResponse {
|
||||
id: Some(token.prefix.clone()),
|
||||
created: Some(
|
||||
DateTime::from_timestamp_millis(params.created as i64)
|
||||
.expect("invalid timestamp stored in db"),
|
||||
),
|
||||
name: params.name.get().to_string(),
|
||||
expiration: params.expiration.get().map(|x| {
|
||||
DateTime::from_timestamp_millis(x as i64).expect("invalid timestamp stored in db")
|
||||
}),
|
||||
expired: params.is_expired(now),
|
||||
scope: params.scope.get().0.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_existing_admin_token(garage: &Garage, id: &String) -> Result<AdminApiToken, Error> {
|
||||
garage
|
||||
.admin_token_table
|
||||
.get(&EmptyKey, id)
|
||||
.await?
|
||||
.filter(|k| !k.state.is_deleted())
|
||||
.ok_or_else(|| Error::NoSuchAdminToken(id.to_string()))
|
||||
}
|
||||
|
||||
fn apply_token_updates(
|
||||
token: &mut AdminApiToken,
|
||||
updates: UpdateAdminTokenRequestBody,
|
||||
) -> Result<(), Error> {
|
||||
if updates.never_expires && updates.expiration.is_some() {
|
||||
return Err(Error::bad_request(
|
||||
"cannot specify `expiration` and `never_expires`",
|
||||
));
|
||||
}
|
||||
|
||||
let params = token.params_mut().unwrap();
|
||||
|
||||
if let Some(name) = updates.name {
|
||||
params.name.update(name);
|
||||
}
|
||||
if let Some(expiration) = updates.expiration {
|
||||
params
|
||||
.expiration
|
||||
.update(Some(expiration.timestamp_millis() as u64));
|
||||
}
|
||||
if updates.never_expires {
|
||||
params.expiration.update(None);
|
||||
}
|
||||
if let Some(scope) = updates.scope {
|
||||
params.scope.update(AdminApiTokenScope(scope));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
1352
src/api/admin/api.rs
Normal file
|
|
@ -1,333 +1,237 @@
|
|||
use std::collections::HashMap;
|
||||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use argon2::password_hash::PasswordHash;
|
||||
|
||||
use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||
use http::header::{HeaderValue, ACCESS_CONTROL_ALLOW_ORIGIN, AUTHORIZATION};
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::watch;
|
||||
|
||||
use opentelemetry::trace::SpanRef;
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
use opentelemetry_prometheus::PrometheusExporter;
|
||||
#[cfg(feature = "metrics")]
|
||||
use prometheus::{Encoder, TextEncoder};
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_rpc::system::ClusterHealthStatus;
|
||||
use garage_rpc::{Endpoint as RpcEndpoint, *};
|
||||
use garage_table::EmptyKey;
|
||||
use garage_util::background::BackgroundRunner;
|
||||
use garage_util::data::Uuid;
|
||||
use garage_util::error::Error as GarageError;
|
||||
use garage_util::socket_address::UnixOrTCPSocketAddress;
|
||||
use garage_util::time::now_msec;
|
||||
|
||||
use garage_api_common::generic_server::*;
|
||||
use garage_api_common::helpers::*;
|
||||
|
||||
use crate::bucket::*;
|
||||
use crate::cluster::*;
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::key::*;
|
||||
use crate::router_v0;
|
||||
use crate::router_v1::{Authorization, Endpoint};
|
||||
use crate::router_v1;
|
||||
use crate::Authorization;
|
||||
use crate::RequestHandler;
|
||||
|
||||
// ---- FOR RPC ----
|
||||
|
||||
pub const ADMIN_RPC_PATH: &str = "garage_api/admin/rpc.rs/Rpc";
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum AdminRpc {
|
||||
Proxy(AdminApiRequest),
|
||||
Internal(LocalAdminApiRequest),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum AdminRpcResponse {
|
||||
ProxyApiOkResponse(TaggedAdminApiResponse),
|
||||
InternalApiOkResponse(LocalAdminApiResponse),
|
||||
ApiErrorResponse {
|
||||
http_code: u16,
|
||||
error_code: String,
|
||||
message: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl Rpc for AdminRpc {
|
||||
type Response = Result<AdminRpcResponse, GarageError>;
|
||||
}
|
||||
|
||||
impl EndpointHandler<AdminRpc> for AdminApiServer {
|
||||
async fn handle(
|
||||
self: &Arc<Self>,
|
||||
message: &AdminRpc,
|
||||
_from: NodeID,
|
||||
) -> Result<AdminRpcResponse, GarageError> {
|
||||
match message {
|
||||
AdminRpc::Proxy(req) => {
|
||||
info!("Proxied admin API request: {}", req.name());
|
||||
let res = req.clone().handle(&self.garage, self).await;
|
||||
match res {
|
||||
Ok(res) => Ok(AdminRpcResponse::ProxyApiOkResponse(res.tagged())),
|
||||
Err(e) => Ok(AdminRpcResponse::ApiErrorResponse {
|
||||
http_code: e.http_status_code().as_u16(),
|
||||
error_code: e.code().to_string(),
|
||||
message: e.to_string(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
AdminRpc::Internal(req) => {
|
||||
info!("Internal admin API request: {}", req.name());
|
||||
let res = req.clone().handle(&self.garage, self).await;
|
||||
match res {
|
||||
Ok(res) => Ok(AdminRpcResponse::InternalApiOkResponse(res)),
|
||||
Err(e) => Ok(AdminRpcResponse::ApiErrorResponse {
|
||||
http_code: e.http_status_code().as_u16(),
|
||||
error_code: e.code().to_string(),
|
||||
message: e.to_string(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---- FOR HTTP ----
|
||||
|
||||
pub type ResBody = BoxBody<Error>;
|
||||
|
||||
pub struct AdminApiServer {
|
||||
garage: Arc<Garage>,
|
||||
#[cfg(feature = "metrics")]
|
||||
exporter: PrometheusExporter,
|
||||
pub(crate) exporter: PrometheusExporter,
|
||||
metrics_token: Option<String>,
|
||||
metrics_require_token: bool,
|
||||
admin_token: Option<String>,
|
||||
pub(crate) background: Arc<BackgroundRunner>,
|
||||
pub(crate) endpoint: Arc<RpcEndpoint<AdminRpc, Self>>,
|
||||
}
|
||||
|
||||
pub enum HttpEndpoint {
|
||||
Old(router_v1::Endpoint),
|
||||
New(String),
|
||||
}
|
||||
|
||||
impl AdminApiServer {
|
||||
pub fn new(
|
||||
garage: Arc<Garage>,
|
||||
background: Arc<BackgroundRunner>,
|
||||
#[cfg(feature = "metrics")] exporter: PrometheusExporter,
|
||||
) -> Self {
|
||||
) -> Arc<Self> {
|
||||
let cfg = &garage.config.admin;
|
||||
let metrics_token = cfg.metrics_token.as_deref().map(hash_bearer_token);
|
||||
let admin_token = cfg.admin_token.as_deref().map(hash_bearer_token);
|
||||
Self {
|
||||
let metrics_require_token = cfg.metrics_require_token;
|
||||
|
||||
let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into());
|
||||
let admin = Arc::new(Self {
|
||||
garage,
|
||||
#[cfg(feature = "metrics")]
|
||||
exporter,
|
||||
metrics_token,
|
||||
metrics_require_token,
|
||||
admin_token,
|
||||
}
|
||||
background,
|
||||
endpoint,
|
||||
});
|
||||
admin.endpoint.set_handler(admin.clone());
|
||||
admin
|
||||
}
|
||||
|
||||
pub async fn run(
|
||||
self,
|
||||
self: Arc<Self>,
|
||||
bind_addr: UnixOrTCPSocketAddress,
|
||||
must_exit: watch::Receiver<bool>,
|
||||
) -> Result<(), GarageError> {
|
||||
let region = self.garage.config.s3_api.s3_region.clone();
|
||||
ApiServer::new(region, self)
|
||||
ApiServer::new(region, ArcAdminApiServer(self))
|
||||
.run_server(bind_addr, Some(0o220), must_exit)
|
||||
.await
|
||||
}
|
||||
|
||||
fn handle_options(&self, _req: &Request<IncomingBody>) -> Result<Response<ResBody>, Error> {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
.header(ALLOW, "OPTIONS, GET, POST")
|
||||
.header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS, GET, POST")
|
||||
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||
.body(empty_body())?)
|
||||
}
|
||||
|
||||
async fn handle_check_domain(
|
||||
async fn handle_http_api(
|
||||
&self,
|
||||
req: Request<IncomingBody>,
|
||||
endpoint: HttpEndpoint,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let query_params: HashMap<String, String> = req
|
||||
.uri()
|
||||
.query()
|
||||
.map(|v| {
|
||||
url::form_urlencoded::parse(v.as_bytes())
|
||||
.into_owned()
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_else(HashMap::new);
|
||||
let auth_header = req.headers().get(AUTHORIZATION).cloned();
|
||||
|
||||
let has_domain_key = query_params.contains_key("domain");
|
||||
|
||||
if !has_domain_key {
|
||||
return Err(Error::bad_request("No domain query string found"));
|
||||
}
|
||||
|
||||
let domain = query_params
|
||||
.get("domain")
|
||||
.ok_or_internal_error("Could not parse domain query string")?;
|
||||
|
||||
if self.check_domain(domain).await? {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(string_body(format!(
|
||||
"Domain '{domain}' is managed by Garage"
|
||||
)))?)
|
||||
} else {
|
||||
Err(Error::bad_request(format!(
|
||||
"Domain '{domain}' is not managed by Garage"
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_domain(&self, domain: &str) -> Result<bool, Error> {
|
||||
// Resolve bucket from domain name, inferring if the website must be activated for the
|
||||
// domain to be valid.
|
||||
let (bucket_name, must_check_website) = if let Some(bname) = self
|
||||
.garage
|
||||
.config
|
||||
.s3_api
|
||||
.root_domain
|
||||
.as_ref()
|
||||
.and_then(|rd| host_to_bucket(domain, rd))
|
||||
{
|
||||
(bname.to_string(), false)
|
||||
} else if let Some(bname) = self
|
||||
.garage
|
||||
.config
|
||||
.s3_web
|
||||
.as_ref()
|
||||
.and_then(|sw| host_to_bucket(domain, sw.root_domain.as_str()))
|
||||
{
|
||||
(bname.to_string(), true)
|
||||
} else {
|
||||
(domain.to_string(), true)
|
||||
let request = match endpoint {
|
||||
HttpEndpoint::Old(endpoint_v1) => AdminApiRequest::from_v1(endpoint_v1, req).await?,
|
||||
HttpEndpoint::New(_) => AdminApiRequest::from_request(req).await?,
|
||||
};
|
||||
|
||||
let bucket_id = match self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.resolve_global_bucket_name(&bucket_name)
|
||||
.await?
|
||||
{
|
||||
Some(bucket_id) => bucket_id,
|
||||
None => return Ok(false),
|
||||
};
|
||||
|
||||
if !must_check_website {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let bucket = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
|
||||
let bucket_state = bucket.state.as_option().unwrap();
|
||||
let bucket_website_config = bucket_state.website_config.get();
|
||||
|
||||
match bucket_website_config {
|
||||
Some(_v) => Ok(true),
|
||||
None => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_health(&self) -> Result<Response<ResBody>, Error> {
|
||||
let health = self.garage.system.health();
|
||||
|
||||
let (status, status_str) = match health.status {
|
||||
ClusterHealthStatus::Healthy => (StatusCode::OK, "Garage is fully operational"),
|
||||
ClusterHealthStatus::Degraded => (
|
||||
StatusCode::OK,
|
||||
"Garage is operational but some storage nodes are unavailable",
|
||||
),
|
||||
ClusterHealthStatus::Unavailable => (
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
"Quorum is not available for some/all partitions, reads and writes will fail",
|
||||
let (global_token_hash, token_required) = match request.authorization_type() {
|
||||
Authorization::None => (None, false),
|
||||
Authorization::MetricsToken => (
|
||||
self.metrics_token.as_deref(),
|
||||
self.metrics_token.is_some() || self.metrics_require_token,
|
||||
),
|
||||
Authorization::AdminToken => (self.admin_token.as_deref(), true),
|
||||
};
|
||||
let status_str = format!(
|
||||
"{}\nConsult the full health check API endpoint at /v1/health for more details\n",
|
||||
status_str
|
||||
);
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(status)
|
||||
.header(http::header::CONTENT_TYPE, "text/plain")
|
||||
.body(string_body(status_str))?)
|
||||
}
|
||||
|
||||
fn handle_metrics(&self) -> Result<Response<ResBody>, Error> {
|
||||
#[cfg(feature = "metrics")]
|
||||
{
|
||||
use opentelemetry::trace::Tracer;
|
||||
|
||||
let mut buffer = vec![];
|
||||
let encoder = TextEncoder::new();
|
||||
|
||||
let tracer = opentelemetry::global::tracer("garage");
|
||||
let metric_families = tracer.in_span("admin/gather_metrics", |_| {
|
||||
self.exporter.registry().gather()
|
||||
});
|
||||
|
||||
encoder
|
||||
.encode(&metric_families, &mut buffer)
|
||||
.ok_or_internal_error("Could not serialize metrics")?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(http::header::CONTENT_TYPE, encoder.format_type())
|
||||
.body(bytes_body(buffer.into()))?)
|
||||
if token_required {
|
||||
verify_authorization(&self.garage, global_token_hash, auth_header, request.name())?;
|
||||
}
|
||||
|
||||
match request {
|
||||
AdminApiRequest::Options(req) => req.handle(&self.garage, self).await,
|
||||
AdminApiRequest::CheckDomain(req) => req.handle(&self.garage, self).await,
|
||||
AdminApiRequest::Health(req) => req.handle(&self.garage, self).await,
|
||||
AdminApiRequest::Metrics(req) => req.handle(&self.garage, self).await,
|
||||
req => {
|
||||
let res = req.handle(&self.garage, self).await?;
|
||||
let mut res = json_ok_response(&res)?;
|
||||
res.headers_mut()
|
||||
.insert(ACCESS_CONTROL_ALLOW_ORIGIN, HeaderValue::from_static("*"));
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "metrics"))]
|
||||
Err(Error::bad_request(
|
||||
"Garage was built without the metrics feature".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiHandler for AdminApiServer {
|
||||
struct ArcAdminApiServer(Arc<AdminApiServer>);
|
||||
|
||||
impl ApiHandler for ArcAdminApiServer {
|
||||
const API_NAME: &'static str = "admin";
|
||||
const API_NAME_DISPLAY: &'static str = "Admin";
|
||||
|
||||
type Endpoint = Endpoint;
|
||||
type Endpoint = HttpEndpoint;
|
||||
type Error = Error;
|
||||
|
||||
fn parse_endpoint(&self, req: &Request<IncomingBody>) -> Result<Endpoint, Error> {
|
||||
fn parse_endpoint(&self, req: &Request<IncomingBody>) -> Result<HttpEndpoint, Error> {
|
||||
if req.uri().path().starts_with("/v0/") {
|
||||
let endpoint_v0 = router_v0::Endpoint::from_request(req)?;
|
||||
Endpoint::from_v0(endpoint_v0)
|
||||
let endpoint_v1 = router_v1::Endpoint::from_v0(endpoint_v0)?;
|
||||
Ok(HttpEndpoint::Old(endpoint_v1))
|
||||
} else if req.uri().path().starts_with("/v1/") {
|
||||
let endpoint_v1 = router_v1::Endpoint::from_request(req)?;
|
||||
Ok(HttpEndpoint::Old(endpoint_v1))
|
||||
} else {
|
||||
Endpoint::from_request(req)
|
||||
Ok(HttpEndpoint::New(req.uri().path().to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
req: Request<IncomingBody>,
|
||||
endpoint: Endpoint,
|
||||
endpoint: HttpEndpoint,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let required_auth_hash =
|
||||
match endpoint.authorization_type() {
|
||||
Authorization::None => None,
|
||||
Authorization::MetricsToken => self.metrics_token.as_deref(),
|
||||
Authorization::AdminToken => match self.admin_token.as_deref() {
|
||||
None => return Err(Error::forbidden(
|
||||
"Admin token isn't configured, admin API access is disabled for security.",
|
||||
)),
|
||||
Some(t) => Some(t),
|
||||
},
|
||||
};
|
||||
self.0.handle_http_api(req, endpoint).await
|
||||
}
|
||||
|
||||
if let Some(password_hash) = required_auth_hash {
|
||||
match req.headers().get("Authorization") {
|
||||
None => return Err(Error::forbidden("Authorization token must be provided")),
|
||||
Some(authorization) => {
|
||||
verify_bearer_token(&authorization, password_hash)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match endpoint {
|
||||
Endpoint::Options => self.handle_options(&req),
|
||||
Endpoint::CheckDomain => self.handle_check_domain(req).await,
|
||||
Endpoint::Health => self.handle_health(),
|
||||
Endpoint::Metrics => self.handle_metrics(),
|
||||
Endpoint::GetClusterStatus => handle_get_cluster_status(&self.garage).await,
|
||||
Endpoint::GetClusterHealth => handle_get_cluster_health(&self.garage).await,
|
||||
Endpoint::ConnectClusterNodes => handle_connect_cluster_nodes(&self.garage, req).await,
|
||||
// Layout
|
||||
Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await,
|
||||
Endpoint::UpdateClusterLayout => handle_update_cluster_layout(&self.garage, req).await,
|
||||
Endpoint::ApplyClusterLayout => handle_apply_cluster_layout(&self.garage, req).await,
|
||||
Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage).await,
|
||||
// Keys
|
||||
Endpoint::ListKeys => handle_list_keys(&self.garage).await,
|
||||
Endpoint::GetKeyInfo {
|
||||
id,
|
||||
search,
|
||||
show_secret_key,
|
||||
} => {
|
||||
let show_secret_key = show_secret_key.map(|x| x == "true").unwrap_or(false);
|
||||
handle_get_key_info(&self.garage, id, search, show_secret_key).await
|
||||
}
|
||||
Endpoint::CreateKey => handle_create_key(&self.garage, req).await,
|
||||
Endpoint::ImportKey => handle_import_key(&self.garage, req).await,
|
||||
Endpoint::UpdateKey { id } => handle_update_key(&self.garage, id, req).await,
|
||||
Endpoint::DeleteKey { id } => handle_delete_key(&self.garage, id).await,
|
||||
// Buckets
|
||||
Endpoint::ListBuckets => handle_list_buckets(&self.garage).await,
|
||||
Endpoint::GetBucketInfo { id, global_alias } => {
|
||||
handle_get_bucket_info(&self.garage, id, global_alias).await
|
||||
}
|
||||
Endpoint::CreateBucket => handle_create_bucket(&self.garage, req).await,
|
||||
Endpoint::DeleteBucket { id } => handle_delete_bucket(&self.garage, id).await,
|
||||
Endpoint::UpdateBucket { id } => handle_update_bucket(&self.garage, id, req).await,
|
||||
// Bucket-key permissions
|
||||
Endpoint::BucketAllowKey => {
|
||||
handle_bucket_change_key_perm(&self.garage, req, true).await
|
||||
}
|
||||
Endpoint::BucketDenyKey => {
|
||||
handle_bucket_change_key_perm(&self.garage, req, false).await
|
||||
}
|
||||
// Bucket aliasing
|
||||
Endpoint::GlobalAliasBucket { id, alias } => {
|
||||
handle_global_alias_bucket(&self.garage, id, alias).await
|
||||
}
|
||||
Endpoint::GlobalUnaliasBucket { id, alias } => {
|
||||
handle_global_unalias_bucket(&self.garage, id, alias).await
|
||||
}
|
||||
Endpoint::LocalAliasBucket {
|
||||
id,
|
||||
access_key_id,
|
||||
alias,
|
||||
} => handle_local_alias_bucket(&self.garage, id, access_key_id, alias).await,
|
||||
Endpoint::LocalUnaliasBucket {
|
||||
id,
|
||||
access_key_id,
|
||||
alias,
|
||||
} => handle_local_unalias_bucket(&self.garage, id, access_key_id, alias).await,
|
||||
}
|
||||
fn key_id_from_request(&self, req: &Request<IncomingBody>) -> Option<String> {
|
||||
let auth_header = req.headers().get(AUTHORIZATION)?;
|
||||
let token = parse_authorization(auth_header).ok()?;
|
||||
let key_id = token.split_once('.')?.0;
|
||||
Some(key_id.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiEndpoint for Endpoint {
|
||||
fn name(&self) -> &'static str {
|
||||
Endpoint::name(self)
|
||||
impl ApiEndpoint for HttpEndpoint {
|
||||
fn name(&self) -> Cow<'static, str> {
|
||||
match self {
|
||||
Self::Old(endpoint_v1) => Cow::Borrowed(endpoint_v1.name()),
|
||||
Self::New(path) => Cow::Owned(path.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
fn add_span_attributes(&self, _span: SpanRef<'_>) {}
|
||||
|
|
@ -347,20 +251,91 @@ fn hash_bearer_token(token: &str) -> String {
|
|||
.to_string()
|
||||
}
|
||||
|
||||
fn verify_bearer_token(token: &hyper::http::HeaderValue, password_hash: &str) -> Result<(), Error> {
|
||||
use argon2::{password_hash::PasswordVerifier, Argon2};
|
||||
|
||||
let parsed_hash = PasswordHash::new(&password_hash).unwrap();
|
||||
|
||||
token
|
||||
fn parse_authorization(auth_header: &hyper::http::HeaderValue) -> Result<&str, Error> {
|
||||
let token = auth_header
|
||||
.to_str()?
|
||||
.strip_prefix("Bearer ")
|
||||
.and_then(|token| {
|
||||
Argon2::default()
|
||||
.verify_password(token.trim().as_bytes(), &parsed_hash)
|
||||
.ok()
|
||||
})
|
||||
.ok_or_else(|| Error::forbidden("Invalid authorization token"))?;
|
||||
.ok_or_else(|| Error::forbidden("Invalid Authorization header"))?
|
||||
.trim();
|
||||
Ok(token)
|
||||
}
|
||||
|
||||
fn verify_authorization(
|
||||
garage: &Garage,
|
||||
global_token_hash: Option<&str>,
|
||||
auth_header: Option<hyper::http::HeaderValue>,
|
||||
endpoint_name: &str,
|
||||
) -> Result<(), Error> {
|
||||
use argon2::{password_hash::PasswordHash, password_hash::PasswordVerifier, Argon2};
|
||||
|
||||
let invalid_msg = "Invalid bearer token";
|
||||
|
||||
let token = match &auth_header {
|
||||
None => {
|
||||
return Err(Error::forbidden(
|
||||
"Bearer token must be provided in Authorization header",
|
||||
))
|
||||
}
|
||||
Some(authorization) => parse_authorization(authorization)?,
|
||||
};
|
||||
|
||||
let token_hash_string = if let Some((prefix, _)) = token.split_once('.') {
|
||||
garage
|
||||
.admin_token_table
|
||||
.get_local(&EmptyKey, &prefix.to_string())?
|
||||
.and_then(|k| k.state.into_option())
|
||||
.filter(|p| !p.is_expired(now_msec()))
|
||||
// GetCurrentAdminTokenInfo endpoint must be accessible even if it is not in the token scopes
|
||||
.filter(|p| p.has_scope(endpoint_name) || endpoint_name == "GetCurrentAdminTokenInfo")
|
||||
.ok_or_else(|| Error::forbidden(invalid_msg))?
|
||||
.token_hash
|
||||
} else {
|
||||
global_token_hash
|
||||
.ok_or_else(|| Error::forbidden(invalid_msg))?
|
||||
.to_string()
|
||||
};
|
||||
|
||||
let token_hash =
|
||||
PasswordHash::new(&token_hash_string).ok_or_internal_error("Could not parse token hash")?;
|
||||
|
||||
Argon2::default()
|
||||
.verify_password(token.as_bytes(), &token_hash)
|
||||
.map_err(|_| Error::forbidden(invalid_msg))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn find_matching_nodes(garage: &Garage, spec: &str) -> Result<Vec<Uuid>, Error> {
|
||||
if spec == "self" {
|
||||
Ok(vec![garage.system.id])
|
||||
} else {
|
||||
// Collect all nodes currently up and/or in cluster layout
|
||||
let mut res = vec![];
|
||||
if let Ok(all_nodes) = garage.system.cluster_layout().all_nodes() {
|
||||
res = all_nodes.to_vec();
|
||||
}
|
||||
for node in garage.system.get_known_nodes() {
|
||||
if node.is_up && !res.contains(&node.id) {
|
||||
res.push(node.id);
|
||||
}
|
||||
}
|
||||
|
||||
if spec == "*" {
|
||||
// match all nodes
|
||||
Ok(res)
|
||||
} else {
|
||||
// filter nodes that match spec
|
||||
res.retain(|node| hex::encode(node).starts_with(spec));
|
||||
if res.is_empty() {
|
||||
Err(Error::bad_request(format!("No nodes matching {}", spec)))
|
||||
} else if res.len() > 1 {
|
||||
Err(Error::bad_request(format!(
|
||||
"Multiple nodes matching {}: {:?}",
|
||||
spec, res
|
||||
)))
|
||||
} else {
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
284
src/api/admin/block.rs
Normal file
|
|
@ -0,0 +1,284 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::Error as GarageError;
|
||||
use garage_util::time::now_msec;
|
||||
|
||||
use garage_table::EmptyKey;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::s3::object_table::*;
|
||||
use garage_model::s3::version_table::*;
|
||||
|
||||
use garage_api_common::common_error::CommonErrorDerivative;
|
||||
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
impl RequestHandler for LocalListBlockErrorsRequest {
|
||||
type Response = LocalListBlockErrorsResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalListBlockErrorsResponse, Error> {
|
||||
let errors = garage.block_manager.list_resync_errors()?;
|
||||
let now = now_msec();
|
||||
let errors = errors
|
||||
.into_iter()
|
||||
.map(|e| BlockError {
|
||||
block_hash: hex::encode(e.hash),
|
||||
refcount: e.refcount,
|
||||
error_count: e.error_count,
|
||||
last_try_secs_ago: now.saturating_sub(e.last_try) / 1000,
|
||||
next_try_in_secs: e.next_try.saturating_sub(now) / 1000,
|
||||
})
|
||||
.collect();
|
||||
Ok(LocalListBlockErrorsResponse(errors))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalGetBlockInfoRequest {
|
||||
type Response = LocalGetBlockInfoResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalGetBlockInfoResponse, Error> {
|
||||
let hash = find_block_hash_by_prefix(garage, &self.block_hash)?;
|
||||
let refcount = garage.block_manager.get_block_rc(&hash)?;
|
||||
let block_refs = garage
|
||||
.block_ref_table
|
||||
.get_range(&hash, None, None, 10000, Default::default())
|
||||
.await?;
|
||||
let mut versions = vec![];
|
||||
for br in block_refs {
|
||||
if let Some(v) = garage.version_table.get(&br.version, &EmptyKey).await? {
|
||||
let bl = match &v.backlink {
|
||||
VersionBacklink::MultipartUpload { upload_id } => {
|
||||
if let Some(u) = garage.mpu_table.get(upload_id, &EmptyKey).await? {
|
||||
BlockVersionBacklink::Upload {
|
||||
upload_id: hex::encode(upload_id),
|
||||
upload_deleted: u.deleted.get(),
|
||||
upload_garbage_collected: false,
|
||||
bucket_id: Some(hex::encode(u.bucket_id)),
|
||||
key: Some(u.key.to_string()),
|
||||
}
|
||||
} else {
|
||||
BlockVersionBacklink::Upload {
|
||||
upload_id: hex::encode(upload_id),
|
||||
upload_deleted: true,
|
||||
upload_garbage_collected: true,
|
||||
bucket_id: None,
|
||||
key: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
VersionBacklink::Object { bucket_id, key } => BlockVersionBacklink::Object {
|
||||
bucket_id: hex::encode(bucket_id),
|
||||
key: key.to_string(),
|
||||
},
|
||||
};
|
||||
versions.push(BlockVersion {
|
||||
version_id: hex::encode(br.version),
|
||||
ref_deleted: br.deleted.get(),
|
||||
version_deleted: v.deleted.get(),
|
||||
garbage_collected: false,
|
||||
backlink: Some(bl),
|
||||
});
|
||||
} else {
|
||||
versions.push(BlockVersion {
|
||||
version_id: hex::encode(br.version),
|
||||
ref_deleted: br.deleted.get(),
|
||||
version_deleted: true,
|
||||
garbage_collected: true,
|
||||
backlink: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
Ok(LocalGetBlockInfoResponse {
|
||||
block_hash: hex::encode(hash),
|
||||
refcount,
|
||||
versions,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalRetryBlockResyncRequest {
|
||||
type Response = LocalRetryBlockResyncResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalRetryBlockResyncResponse, Error> {
|
||||
match self {
|
||||
Self::All { all: true } => {
|
||||
let blocks = garage.block_manager.list_resync_errors()?;
|
||||
for b in blocks.iter() {
|
||||
garage.block_manager.resync.clear_backoff(&b.hash)?;
|
||||
}
|
||||
Ok(LocalRetryBlockResyncResponse {
|
||||
count: blocks.len() as u64,
|
||||
})
|
||||
}
|
||||
Self::All { all: false } => Err(Error::bad_request("nonsense")),
|
||||
Self::Blocks { block_hashes } => {
|
||||
for hash in block_hashes.iter() {
|
||||
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
||||
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
|
||||
garage.block_manager.resync.clear_backoff(&hash)?;
|
||||
}
|
||||
Ok(LocalRetryBlockResyncResponse {
|
||||
count: block_hashes.len() as u64,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalPurgeBlocksRequest {
|
||||
type Response = LocalPurgeBlocksResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalPurgeBlocksResponse, Error> {
|
||||
let mut obj_dels = 0;
|
||||
let mut mpu_dels = 0;
|
||||
let mut ver_dels = 0;
|
||||
let mut br_dels = 0;
|
||||
|
||||
for hash in self.0.iter() {
|
||||
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
||||
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
|
||||
let block_refs = garage
|
||||
.block_ref_table
|
||||
.get_range(&hash, None, None, 10000, Default::default())
|
||||
.await?;
|
||||
|
||||
for br in block_refs {
|
||||
if let Some(version) = garage.version_table.get(&br.version, &EmptyKey).await? {
|
||||
handle_block_purge_version_backlink(
|
||||
garage,
|
||||
&version,
|
||||
&mut obj_dels,
|
||||
&mut mpu_dels,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !version.deleted.get() {
|
||||
let deleted_version = Version::new(version.uuid, version.backlink, true);
|
||||
garage.version_table.insert(&deleted_version).await?;
|
||||
ver_dels += 1;
|
||||
}
|
||||
}
|
||||
if !br.deleted.get() {
|
||||
let mut br = br;
|
||||
br.deleted.set();
|
||||
garage.block_ref_table.insert(&br).await?;
|
||||
br_dels += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(LocalPurgeBlocksResponse {
|
||||
blocks_purged: self.0.len() as u64,
|
||||
block_refs_purged: br_dels,
|
||||
versions_deleted: ver_dels,
|
||||
objects_deleted: obj_dels,
|
||||
uploads_deleted: mpu_dels,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn find_block_hash_by_prefix(garage: &Arc<Garage>, prefix: &str) -> Result<Hash, Error> {
|
||||
if prefix.len() < 4 {
|
||||
return Err(Error::bad_request(
|
||||
"Please specify at least 4 characters of the block hash",
|
||||
));
|
||||
}
|
||||
|
||||
let prefix_bin = hex::decode(&prefix[..prefix.len() & !1]).ok_or_bad_request("invalid hash")?;
|
||||
|
||||
let iter = garage
|
||||
.block_ref_table
|
||||
.data
|
||||
.store
|
||||
.range(&prefix_bin[..]..)
|
||||
.map_err(GarageError::from)?;
|
||||
let mut found = None;
|
||||
for item in iter {
|
||||
let (k, _v) = item.map_err(GarageError::from)?;
|
||||
let hash = Hash::try_from(&k[..32]).unwrap();
|
||||
if hash.as_slice()[..prefix_bin.len()] != prefix_bin {
|
||||
break;
|
||||
}
|
||||
if hex::encode(hash.as_slice()).starts_with(prefix) {
|
||||
match &found {
|
||||
Some(x) if *x == hash => (),
|
||||
Some(_) => {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Several blocks match prefix `{}`",
|
||||
prefix
|
||||
)));
|
||||
}
|
||||
None => {
|
||||
found = Some(hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
found.ok_or_else(|| Error::NoSuchBlock(prefix.to_string()))
|
||||
}
|
||||
|
||||
async fn handle_block_purge_version_backlink(
|
||||
garage: &Arc<Garage>,
|
||||
version: &Version,
|
||||
obj_dels: &mut u64,
|
||||
mpu_dels: &mut u64,
|
||||
) -> Result<(), Error> {
|
||||
let (bucket_id, key, ov_id) = match &version.backlink {
|
||||
VersionBacklink::Object { bucket_id, key } => (*bucket_id, key.clone(), version.uuid),
|
||||
VersionBacklink::MultipartUpload { upload_id } => {
|
||||
if let Some(mut mpu) = garage.mpu_table.get(upload_id, &EmptyKey).await? {
|
||||
if !mpu.deleted.get() {
|
||||
mpu.parts.clear();
|
||||
mpu.deleted.set();
|
||||
garage.mpu_table.insert(&mpu).await?;
|
||||
*mpu_dels += 1;
|
||||
}
|
||||
(mpu.bucket_id, mpu.key.clone(), *upload_id)
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(object) = garage.object_table.get(&bucket_id, &key).await? {
|
||||
let ov = object.versions().iter().rev().find(|v| v.is_complete());
|
||||
if let Some(ov) = ov {
|
||||
if ov.uuid == ov_id {
|
||||
let del_uuid = gen_uuid();
|
||||
let deleted_object = Object::new(
|
||||
bucket_id,
|
||||
key,
|
||||
vec![ObjectVersion {
|
||||
uuid: del_uuid,
|
||||
timestamp: ov.timestamp + 1,
|
||||
state: ObjectVersionState::Complete(ObjectVersionData::DeleteMarker),
|
||||
}],
|
||||
);
|
||||
garage.object_table.insert(&deleted_object).await?;
|
||||
*obj_dels += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -1,411 +1,288 @@
|
|||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::fmt::Write;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use format_table::format_table_to_string;
|
||||
|
||||
use garage_util::crdt::*;
|
||||
use garage_util::data::*;
|
||||
|
||||
use garage_rpc::layout;
|
||||
use garage_rpc::layout::PARTITION_BITS;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
use garage_api_common::helpers::{json_ok_response, parse_json_body};
|
||||
|
||||
use crate::api_server::ResBody;
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
let layout = garage.system.cluster_layout();
|
||||
let mut nodes = garage
|
||||
.system
|
||||
.get_known_nodes()
|
||||
.into_iter()
|
||||
.map(|i| {
|
||||
(
|
||||
i.id,
|
||||
NodeResp {
|
||||
id: hex::encode(i.id),
|
||||
addr: i.addr,
|
||||
hostname: i.status.hostname,
|
||||
is_up: i.is_up,
|
||||
last_seen_secs_ago: i.last_seen_secs_ago,
|
||||
data_partition: i
|
||||
.status
|
||||
.data_disk_avail
|
||||
.map(|(avail, total)| FreeSpaceResp {
|
||||
available: avail,
|
||||
total,
|
||||
impl RequestHandler for GetClusterStatusRequest {
|
||||
type Response = GetClusterStatusResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetClusterStatusResponse, Error> {
|
||||
let layout = garage.system.cluster_layout();
|
||||
let mut nodes = garage
|
||||
.system
|
||||
.get_known_nodes()
|
||||
.into_iter()
|
||||
.map(|i| {
|
||||
(
|
||||
i.id,
|
||||
NodeResp {
|
||||
id: hex::encode(i.id),
|
||||
garage_version: i.status.garage_version,
|
||||
addr: i.addr,
|
||||
hostname: i.status.hostname,
|
||||
is_up: i.is_up,
|
||||
last_seen_secs_ago: i.last_seen_secs_ago,
|
||||
data_partition: i.status.data_disk_avail.map(|(avail, total)| {
|
||||
FreeSpaceResp {
|
||||
available: avail,
|
||||
total,
|
||||
}
|
||||
}),
|
||||
metadata_partition: i.status.meta_disk_avail.map(|(avail, total)| {
|
||||
FreeSpaceResp {
|
||||
available: avail,
|
||||
total,
|
||||
}
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
metadata_partition: i.status.meta_disk_avail.map(|(avail, total)| {
|
||||
FreeSpaceResp {
|
||||
available: avail,
|
||||
total,
|
||||
}
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
for (id, _, role) in layout.current().roles.items().iter() {
|
||||
if let layout::NodeRoleV(Some(r)) = role {
|
||||
let role = NodeRoleResp {
|
||||
id: hex::encode(id),
|
||||
zone: r.zone.to_string(),
|
||||
capacity: r.capacity,
|
||||
tags: r.tags.clone(),
|
||||
};
|
||||
match nodes.get_mut(id) {
|
||||
None => {
|
||||
nodes.insert(
|
||||
*id,
|
||||
NodeResp {
|
||||
id: hex::encode(id),
|
||||
role: Some(role),
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
Some(n) => {
|
||||
n.role = Some(role);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for ver in layout.versions().iter().rev().skip(1) {
|
||||
for (id, _, role) in ver.roles.items().iter() {
|
||||
if let layout::NodeRoleV(Some(r)) = role {
|
||||
if r.capacity.is_some() {
|
||||
if let Some(n) = nodes.get_mut(id) {
|
||||
if n.role.is_none() {
|
||||
n.draining = true;
|
||||
if let Ok(current_layout) = layout.current() {
|
||||
for (id, _, role) in current_layout.roles.items().iter() {
|
||||
if let layout::NodeRoleV(Some(r)) = role {
|
||||
let role = NodeAssignedRole {
|
||||
zone: r.zone.to_string(),
|
||||
capacity: r.capacity,
|
||||
tags: r.tags.clone(),
|
||||
};
|
||||
match nodes.get_mut(id) {
|
||||
None => {
|
||||
nodes.insert(
|
||||
*id,
|
||||
NodeResp {
|
||||
id: hex::encode(id),
|
||||
role: Some(role),
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
Some(n) => {
|
||||
n.role = Some(role);
|
||||
}
|
||||
} else {
|
||||
nodes.insert(
|
||||
*id,
|
||||
NodeResp {
|
||||
id: hex::encode(id),
|
||||
draining: true,
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut nodes = nodes.into_values().collect::<Vec<_>>();
|
||||
nodes.sort_by(|x, y| x.id.cmp(&y.id));
|
||||
if let Ok(layout_versions) = layout.versions() {
|
||||
for ver in layout_versions.iter().rev().skip(1) {
|
||||
for (id, _, role) in ver.roles.items().iter() {
|
||||
if let layout::NodeRoleV(Some(r)) = role {
|
||||
if r.capacity.is_some() {
|
||||
if let Some(n) = nodes.get_mut(id) {
|
||||
if n.role.is_none() {
|
||||
n.draining = true;
|
||||
}
|
||||
} else {
|
||||
nodes.insert(
|
||||
*id,
|
||||
NodeResp {
|
||||
id: hex::encode(id),
|
||||
draining: true,
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let res = GetClusterStatusResponse {
|
||||
node: hex::encode(garage.system.id),
|
||||
garage_version: garage_util::version::garage_version(),
|
||||
garage_features: garage_util::version::garage_features(),
|
||||
rust_version: garage_util::version::rust_version(),
|
||||
db_engine: garage.db.engine(),
|
||||
layout_version: layout.current().version,
|
||||
nodes,
|
||||
};
|
||||
let mut nodes = nodes.into_values().collect::<Vec<_>>();
|
||||
nodes.sort_by(|x, y| x.id.cmp(&y.id));
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
use garage_rpc::system::ClusterHealthStatus;
|
||||
let health = garage.system.health();
|
||||
let health = ClusterHealth {
|
||||
status: match health.status {
|
||||
ClusterHealthStatus::Healthy => "healthy",
|
||||
ClusterHealthStatus::Degraded => "degraded",
|
||||
ClusterHealthStatus::Unavailable => "unavailable",
|
||||
},
|
||||
known_nodes: health.known_nodes,
|
||||
connected_nodes: health.connected_nodes,
|
||||
storage_nodes: health.storage_nodes,
|
||||
storage_nodes_ok: health.storage_nodes_ok,
|
||||
partitions: health.partitions,
|
||||
partitions_quorum: health.partitions_quorum,
|
||||
partitions_all_ok: health.partitions_all_ok,
|
||||
};
|
||||
Ok(json_ok_response(&health)?)
|
||||
}
|
||||
|
||||
pub async fn handle_connect_cluster_nodes(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<Vec<String>, _, Error>(req).await?;
|
||||
|
||||
let res = futures::future::join_all(req.iter().map(|node| garage.system.connect(node)))
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|r| match r {
|
||||
Ok(()) => ConnectClusterNodesResponse {
|
||||
success: true,
|
||||
error: None,
|
||||
},
|
||||
Err(e) => ConnectClusterNodesResponse {
|
||||
success: false,
|
||||
error: Some(format!("{}", e)),
|
||||
},
|
||||
Ok(GetClusterStatusResponse {
|
||||
layout_version: layout.inner().current().version,
|
||||
nodes,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
let res = format_cluster_layout(garage.system.cluster_layout().inner());
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResponse {
|
||||
let roles = layout
|
||||
.current()
|
||||
.roles
|
||||
.items()
|
||||
.iter()
|
||||
.filter_map(|(k, _, v)| v.0.clone().map(|x| (k, x)))
|
||||
.map(|(k, v)| NodeRoleResp {
|
||||
id: hex::encode(k),
|
||||
zone: v.zone.clone(),
|
||||
capacity: v.capacity,
|
||||
tags: v.tags.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let staged_role_changes = layout
|
||||
.staging
|
||||
.get()
|
||||
.roles
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(k, _, v)| layout.current().roles.get(k) != Some(v))
|
||||
.map(|(k, _, v)| match &v.0 {
|
||||
None => NodeRoleChange {
|
||||
id: hex::encode(k),
|
||||
action: NodeRoleChangeEnum::Remove { remove: true },
|
||||
},
|
||||
Some(r) => NodeRoleChange {
|
||||
id: hex::encode(k),
|
||||
action: NodeRoleChangeEnum::Update {
|
||||
zone: r.zone.clone(),
|
||||
capacity: r.capacity,
|
||||
tags: r.tags.clone(),
|
||||
},
|
||||
},
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
GetClusterLayoutResponse {
|
||||
version: layout.current().version,
|
||||
roles,
|
||||
staged_role_changes,
|
||||
}
|
||||
}
|
||||
|
||||
// ----
|
||||
impl RequestHandler for GetClusterHealthRequest {
|
||||
type Response = GetClusterHealthResponse;
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ClusterHealth {
|
||||
status: &'static str,
|
||||
known_nodes: usize,
|
||||
connected_nodes: usize,
|
||||
storage_nodes: usize,
|
||||
storage_nodes_ok: usize,
|
||||
partitions: usize,
|
||||
partitions_quorum: usize,
|
||||
partitions_all_ok: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetClusterStatusResponse {
|
||||
node: String,
|
||||
garage_version: &'static str,
|
||||
garage_features: Option<&'static [&'static str]>,
|
||||
rust_version: &'static str,
|
||||
db_engine: String,
|
||||
layout_version: u64,
|
||||
nodes: Vec<NodeResp>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ApplyClusterLayoutResponse {
|
||||
message: Vec<String>,
|
||||
layout: GetClusterLayoutResponse,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ConnectClusterNodesResponse {
|
||||
success: bool,
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetClusterLayoutResponse {
|
||||
version: u64,
|
||||
roles: Vec<NodeRoleResp>,
|
||||
staged_role_changes: Vec<NodeRoleChange>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct NodeRoleResp {
|
||||
id: String,
|
||||
zone: String,
|
||||
capacity: Option<u64>,
|
||||
tags: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct FreeSpaceResp {
|
||||
available: u64,
|
||||
total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct NodeResp {
|
||||
id: String,
|
||||
role: Option<NodeRoleResp>,
|
||||
addr: Option<SocketAddr>,
|
||||
hostname: Option<String>,
|
||||
is_up: bool,
|
||||
last_seen_secs_ago: Option<u64>,
|
||||
draining: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
data_partition: Option<FreeSpaceResp>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
metadata_partition: Option<FreeSpaceResp>,
|
||||
}
|
||||
|
||||
// ---- update functions ----
|
||||
|
||||
pub async fn handle_update_cluster_layout(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
|
||||
|
||||
let mut layout = garage.system.cluster_layout().inner().clone();
|
||||
|
||||
let mut roles = layout.current().roles.clone();
|
||||
roles.merge(&layout.staging.get().roles);
|
||||
|
||||
for change in updates {
|
||||
let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
|
||||
let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?;
|
||||
|
||||
let new_role = match change.action {
|
||||
NodeRoleChangeEnum::Remove { remove: true } => None,
|
||||
NodeRoleChangeEnum::Update {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
} => Some(layout::NodeRole {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
}),
|
||||
_ => return Err(Error::bad_request("Invalid layout change")),
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetClusterHealthResponse, Error> {
|
||||
use garage_rpc::system::ClusterHealthStatus;
|
||||
let health = garage.system.health();
|
||||
let health = GetClusterHealthResponse {
|
||||
status: match health.status {
|
||||
ClusterHealthStatus::Healthy => "healthy",
|
||||
ClusterHealthStatus::Degraded => "degraded",
|
||||
ClusterHealthStatus::Unavailable => "unavailable",
|
||||
}
|
||||
.to_string(),
|
||||
known_nodes: health.known_nodes,
|
||||
connected_nodes: health.connected_nodes,
|
||||
storage_nodes: health.storage_nodes,
|
||||
// Translating storage_nodes_up (admin API context) to storage_nodes_ok (metrics context)
|
||||
// TODO: when releasing major release, consider renaming all the fields in the metrics to storage_nodes_up
|
||||
storage_nodes_up: health.storage_nodes_ok,
|
||||
partitions: health.partitions,
|
||||
partitions_quorum: health.partitions_quorum,
|
||||
partitions_all_ok: health.partitions_all_ok,
|
||||
};
|
||||
|
||||
layout
|
||||
.staging
|
||||
.get_mut()
|
||||
.roles
|
||||
.merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
|
||||
Ok(health)
|
||||
}
|
||||
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
|
||||
let res = format_cluster_layout(&layout);
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
pub async fn handle_apply_cluster_layout(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let param = parse_json_body::<ApplyLayoutRequest, _, Error>(req).await?;
|
||||
impl RequestHandler for GetClusterStatisticsRequest {
|
||||
type Response = GetClusterStatisticsResponse;
|
||||
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
|
||||
// FIXME: return this as a JSON struct instead of text
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetClusterStatisticsResponse, Error> {
|
||||
let mut ret = String::new();
|
||||
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
// Gather storage node and free space statistics for current nodes
|
||||
let layout = &garage.system.cluster_layout();
|
||||
let mut node_partition_count = HashMap::<Uuid, u64>::new();
|
||||
if let Ok(current_layout) = layout.current() {
|
||||
for short_id in current_layout.ring_assignment_data.iter() {
|
||||
let id = current_layout.node_id_vec[*short_id as usize];
|
||||
*node_partition_count.entry(id).or_default() += 1;
|
||||
}
|
||||
}
|
||||
let node_info = garage
|
||||
.system
|
||||
.get_known_nodes()
|
||||
.into_iter()
|
||||
.map(|n| (n.id, n))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let res = ApplyClusterLayoutResponse {
|
||||
message: msg,
|
||||
layout: format_cluster_layout(&layout),
|
||||
};
|
||||
Ok(json_ok_response(&res)?)
|
||||
let mut table = vec![" ID\tHostname\tZone\tCapacity\tPart.\tDataAvail\tMetaAvail".into()];
|
||||
for (id, parts) in node_partition_count.iter() {
|
||||
let info = node_info.get(id);
|
||||
let status = info.map(|x| &x.status);
|
||||
let role = layout
|
||||
.current()
|
||||
.ok()
|
||||
.and_then(|l| l.roles.get(id))
|
||||
.and_then(|x| x.0.as_ref());
|
||||
let hostname = status.and_then(|x| x.hostname.as_deref()).unwrap_or("?");
|
||||
let zone = role.map(|x| x.zone.as_str()).unwrap_or("?");
|
||||
let capacity = role
|
||||
.map(|x| x.capacity_string())
|
||||
.unwrap_or_else(|| "?".into());
|
||||
let avail_str = |x| match x {
|
||||
Some((avail, total)) => {
|
||||
let pct = (avail as f64) / (total as f64) * 100.;
|
||||
let avail = bytesize::ByteSize::b(avail);
|
||||
let total = bytesize::ByteSize::b(total);
|
||||
format!("{}/{} ({:.1}%)", avail, total, pct)
|
||||
}
|
||||
None => "?".into(),
|
||||
};
|
||||
let data_avail = avail_str(status.and_then(|x| x.data_disk_avail));
|
||||
let meta_avail = avail_str(status.and_then(|x| x.meta_disk_avail));
|
||||
table.push(format!(
|
||||
" {:?}\t{}\t{}\t{}\t{}\t{}\t{}",
|
||||
id, hostname, zone, capacity, parts, data_avail, meta_avail
|
||||
));
|
||||
}
|
||||
write!(
|
||||
&mut ret,
|
||||
"Storage nodes:\n{}",
|
||||
format_table_to_string(table)
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let meta_part_avail = node_partition_count
|
||||
.iter()
|
||||
.filter_map(|(id, parts)| {
|
||||
node_info
|
||||
.get(id)
|
||||
.and_then(|x| x.status.meta_disk_avail)
|
||||
.map(|c| c.0 / *parts)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let data_part_avail = node_partition_count
|
||||
.iter()
|
||||
.filter_map(|(id, parts)| {
|
||||
node_info
|
||||
.get(id)
|
||||
.and_then(|x| x.status.data_disk_avail)
|
||||
.map(|c| c.0 / *parts)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
if !meta_part_avail.is_empty() && !data_part_avail.is_empty() {
|
||||
let meta_avail =
|
||||
bytesize::ByteSize(meta_part_avail.iter().min().unwrap() * (1 << PARTITION_BITS));
|
||||
let data_avail =
|
||||
bytesize::ByteSize(data_part_avail.iter().min().unwrap() * (1 << PARTITION_BITS));
|
||||
writeln!(
|
||||
&mut ret,
|
||||
"\nEstimated available storage space cluster-wide (might be lower in practice):"
|
||||
)
|
||||
.unwrap();
|
||||
if meta_part_avail.len() < node_partition_count.len()
|
||||
|| data_part_avail.len() < node_partition_count.len()
|
||||
{
|
||||
ret += &format_table_to_string(vec![
|
||||
format!(" data: < {}", data_avail),
|
||||
format!(" metadata: < {}", meta_avail),
|
||||
]);
|
||||
writeln!(&mut ret, "A precise estimate could not be given as information is missing for some storage nodes.").unwrap();
|
||||
} else {
|
||||
ret += &format_table_to_string(vec![
|
||||
format!(" data: {}", data_avail),
|
||||
format!(" metadata: {}", meta_avail),
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetClusterStatisticsResponse { freeform: ret })
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_revert_cluster_layout(
|
||||
garage: &Arc<Garage>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let layout = layout.revert_staged_changes()?;
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
impl RequestHandler for ConnectClusterNodesRequest {
|
||||
type Response = ConnectClusterNodesResponse;
|
||||
|
||||
let res = format_cluster_layout(&layout);
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
type UpdateClusterLayoutRequest = Vec<NodeRoleChange>;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ApplyLayoutRequest {
|
||||
version: u64,
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct NodeRoleChange {
|
||||
id: String,
|
||||
#[serde(flatten)]
|
||||
action: NodeRoleChangeEnum,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum NodeRoleChangeEnum {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Remove { remove: bool },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Update {
|
||||
zone: String,
|
||||
capacity: Option<u64>,
|
||||
tags: Vec<String>,
|
||||
},
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<ConnectClusterNodesResponse, Error> {
|
||||
let res = futures::future::join_all(self.0.iter().map(|node| garage.system.connect(node)))
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|r| match r {
|
||||
Ok(()) => ConnectNodeResponse {
|
||||
success: true,
|
||||
error: None,
|
||||
},
|
||||
Err(e) => ConnectNodeResponse {
|
||||
success: false,
|
||||
error: Some(format!("{}", e)),
|
||||
},
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
Ok(ConnectClusterNodesResponse(res))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,10 +21,26 @@ pub enum Error {
|
|||
Common(#[from] CommonError),
|
||||
|
||||
// Category: cannot process
|
||||
/// The admin API token does not exist
|
||||
#[error("Admin token not found: {0}")]
|
||||
NoSuchAdminToken(String),
|
||||
|
||||
/// The API access key does not exist
|
||||
#[error("Access key not found: {0}")]
|
||||
NoSuchAccessKey(String),
|
||||
|
||||
/// The requested block does not exist
|
||||
#[error("Block not found: {0}")]
|
||||
NoSuchBlock(String),
|
||||
|
||||
/// The requested worker does not exist
|
||||
#[error("Worker not found: {0}")]
|
||||
NoSuchWorker(u64),
|
||||
|
||||
/// The object requested don't exists
|
||||
#[error("Key not found")]
|
||||
NoSuchKey,
|
||||
|
||||
/// In Import key, the key already exists
|
||||
#[error("Key {0} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.")]
|
||||
KeyAlreadyExists(String),
|
||||
|
|
@ -46,11 +62,15 @@ impl From<HelperError> for Error {
|
|||
}
|
||||
|
||||
impl Error {
|
||||
fn code(&self) -> &'static str {
|
||||
pub fn code(&self) -> &'static str {
|
||||
match self {
|
||||
Error::Common(c) => c.aws_code(),
|
||||
Error::NoSuchAdminToken(_) => "NoSuchAdminToken",
|
||||
Error::NoSuchAccessKey(_) => "NoSuchAccessKey",
|
||||
Error::NoSuchWorker(_) => "NoSuchWorker",
|
||||
Error::NoSuchBlock(_) => "NoSuchBlock",
|
||||
Error::KeyAlreadyExists(_) => "KeyAlreadyExists",
|
||||
Error::NoSuchKey => "NoSuchKey",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -60,7 +80,11 @@ impl ApiError for Error {
|
|||
fn http_status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::Common(c) => c.http_status_code(),
|
||||
Error::NoSuchAccessKey(_) => StatusCode::NOT_FOUND,
|
||||
Error::NoSuchAdminToken(_)
|
||||
| Error::NoSuchAccessKey(_)
|
||||
| Error::NoSuchWorker(_)
|
||||
| Error::NoSuchBlock(_)
|
||||
| Error::NoSuchKey => StatusCode::NOT_FOUND,
|
||||
Error::KeyAlreadyExists(_) => StatusCode::CONFLICT,
|
||||
}
|
||||
}
|
||||
|
|
@ -68,6 +92,7 @@ impl ApiError for Error {
|
|||
fn add_http_headers(&self, header_map: &mut HeaderMap<HeaderValue>) {
|
||||
use hyper::header;
|
||||
header_map.append(header::CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header_map.append(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*".parse().unwrap());
|
||||
}
|
||||
|
||||
fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody {
|
||||
|
|
|
|||
|
|
@ -1,173 +1,190 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use chrono::DateTime;
|
||||
|
||||
use garage_table::*;
|
||||
use garage_util::time::now_msec;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::key_table::*;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
|
||||
use crate::api_server::ResBody;
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
let res = garage
|
||||
.key_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
|
||||
10000,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?
|
||||
.iter()
|
||||
.map(|k| ListKeyResultItem {
|
||||
id: k.key_id.to_string(),
|
||||
name: k.params().unwrap().name.get().clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
impl RequestHandler for ListKeysRequest {
|
||||
type Response = ListKeysResponse;
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
async fn handle(self, garage: &Arc<Garage>, _admin: &Admin) -> Result<ListKeysResponse, Error> {
|
||||
let now = now_msec();
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ListKeyResultItem {
|
||||
id: String,
|
||||
name: String,
|
||||
}
|
||||
|
||||
pub async fn handle_get_key_info(
|
||||
garage: &Arc<Garage>,
|
||||
id: Option<String>,
|
||||
search: Option<String>,
|
||||
show_secret_key: bool,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let key = if let Some(id) = id {
|
||||
garage.key_helper().get_existing_key(&id).await?
|
||||
} else if let Some(search) = search {
|
||||
garage
|
||||
.key_helper()
|
||||
.get_existing_matching_key(&search)
|
||||
let res = garage
|
||||
.key_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
|
||||
10000,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
unreachable!();
|
||||
};
|
||||
.iter()
|
||||
.map(|k| {
|
||||
let p = k.params().unwrap();
|
||||
|
||||
key_info_results(garage, key, show_secret_key).await
|
||||
}
|
||||
ListKeysResponseItem {
|
||||
id: k.key_id.to_string(),
|
||||
name: p.name.get().clone(),
|
||||
created: p.created.map(|x| {
|
||||
DateTime::from_timestamp_millis(x as i64)
|
||||
.expect("invalid timestamp stored in db")
|
||||
}),
|
||||
expiration: p.expiration.get().map(|x| {
|
||||
DateTime::from_timestamp_millis(x as i64)
|
||||
.expect("invalid timestamp stored in db")
|
||||
}),
|
||||
expired: p.is_expired(now),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
pub async fn handle_create_key(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<CreateKeyRequest, _, Error>(req).await?;
|
||||
|
||||
let key = Key::new(req.name.as_deref().unwrap_or("Unnamed key"));
|
||||
garage.key_table.insert(&key).await?;
|
||||
|
||||
key_info_results(garage, key, true).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct CreateKeyRequest {
|
||||
name: Option<String>,
|
||||
}
|
||||
|
||||
pub async fn handle_import_key(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<ImportKeyRequest, _, Error>(req).await?;
|
||||
|
||||
let prev_key = garage.key_table.get(&EmptyKey, &req.access_key_id).await?;
|
||||
if prev_key.is_some() {
|
||||
return Err(Error::KeyAlreadyExists(req.access_key_id.to_string()));
|
||||
Ok(ListKeysResponse(res))
|
||||
}
|
||||
|
||||
let imported_key = Key::import(
|
||||
&req.access_key_id,
|
||||
&req.secret_access_key,
|
||||
req.name.as_deref().unwrap_or("Imported key"),
|
||||
)
|
||||
.ok_or_bad_request("Invalid key format")?;
|
||||
garage.key_table.insert(&imported_key).await?;
|
||||
|
||||
key_info_results(garage, imported_key, false).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ImportKeyRequest {
|
||||
access_key_id: String,
|
||||
secret_access_key: String,
|
||||
name: Option<String>,
|
||||
}
|
||||
impl RequestHandler for GetKeyInfoRequest {
|
||||
type Response = GetKeyInfoResponse;
|
||||
|
||||
pub async fn handle_update_key(
|
||||
garage: &Arc<Garage>,
|
||||
id: String,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<UpdateKeyRequest, _, Error>(req).await?;
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetKeyInfoResponse, Error> {
|
||||
let key = match (self.id, self.search) {
|
||||
(Some(id), None) => garage.key_helper().get_existing_key(&id).await?,
|
||||
(None, Some(search)) => {
|
||||
let candidates = garage
|
||||
.key_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(KeyFilter::MatchesAndNotDeleted(search.to_string())),
|
||||
10,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
if candidates.len() != 1 {
|
||||
return Err(Error::bad_request(format!(
|
||||
"{} matching keys",
|
||||
candidates.len()
|
||||
)));
|
||||
}
|
||||
candidates.into_iter().next().unwrap()
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::bad_request(
|
||||
"Either id or search must be provided (but not both)",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let mut key = garage.key_helper().get_existing_key(&id).await?;
|
||||
|
||||
let key_state = key.state.as_option_mut().unwrap();
|
||||
|
||||
if let Some(new_name) = req.name {
|
||||
key_state.name.update(new_name);
|
||||
key_info_results(garage, key, self.show_secret_key).await
|
||||
}
|
||||
if let Some(allow) = req.allow {
|
||||
if allow.create_bucket {
|
||||
key_state.allow_create_bucket.update(true);
|
||||
}
|
||||
|
||||
impl RequestHandler for CreateKeyRequest {
|
||||
type Response = CreateKeyResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<CreateKeyResponse, Error> {
|
||||
let mut key = Key::new("Unnamed key");
|
||||
|
||||
apply_key_updates(&mut key, self.0)?;
|
||||
|
||||
garage.key_table.insert(&key).await?;
|
||||
|
||||
Ok(CreateKeyResponse(
|
||||
key_info_results(garage, key, true).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for ImportKeyRequest {
|
||||
type Response = ImportKeyResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<ImportKeyResponse, Error> {
|
||||
let prev_key = garage.key_table.get(&EmptyKey, &self.access_key_id).await?;
|
||||
if prev_key.is_some() {
|
||||
return Err(Error::KeyAlreadyExists(self.access_key_id.to_string()));
|
||||
}
|
||||
}
|
||||
if let Some(deny) = req.deny {
|
||||
if deny.create_bucket {
|
||||
key_state.allow_create_bucket.update(false);
|
||||
}
|
||||
}
|
||||
|
||||
garage.key_table.insert(&key).await?;
|
||||
let imported_key = Key::import(
|
||||
&self.access_key_id,
|
||||
&self.secret_access_key,
|
||||
self.name.as_deref().unwrap_or("Imported key"),
|
||||
)
|
||||
.ok_or_bad_request("Invalid key format")?;
|
||||
garage.key_table.insert(&imported_key).await?;
|
||||
|
||||
key_info_results(garage, key, false).await
|
||||
Ok(ImportKeyResponse(
|
||||
key_info_results(garage, imported_key, false).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UpdateKeyRequest {
|
||||
name: Option<String>,
|
||||
allow: Option<KeyPerm>,
|
||||
deny: Option<KeyPerm>,
|
||||
impl RequestHandler for UpdateKeyRequest {
|
||||
type Response = UpdateKeyResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<UpdateKeyResponse, Error> {
|
||||
let mut key = garage.key_helper().get_existing_key(&self.id).await?;
|
||||
|
||||
apply_key_updates(&mut key, self.body)?;
|
||||
|
||||
garage.key_table.insert(&key).await?;
|
||||
|
||||
Ok(UpdateKeyResponse(
|
||||
key_info_results(garage, key, false).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_delete_key(
|
||||
garage: &Arc<Garage>,
|
||||
id: String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let helper = garage.locked_helper().await;
|
||||
impl RequestHandler for DeleteKeyRequest {
|
||||
type Response = DeleteKeyResponse;
|
||||
|
||||
let mut key = helper.key().get_existing_key(&id).await?;
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<DeleteKeyResponse, Error> {
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
helper.delete_key(&mut key).await?;
|
||||
let mut key = helper.key().get_existing_key(&self.id).await?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
.body(empty_body())?)
|
||||
helper.delete_key(&mut key).await?;
|
||||
|
||||
Ok(DeleteKeyResponse)
|
||||
}
|
||||
}
|
||||
|
||||
async fn key_info_results(
|
||||
garage: &Arc<Garage>,
|
||||
key: Key,
|
||||
show_secret: bool,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
) -> Result<GetKeyInfoResponse, Error> {
|
||||
let mut relevant_buckets = HashMap::new();
|
||||
|
||||
let key_state = key.state.as_option().unwrap();
|
||||
|
|
@ -193,8 +210,15 @@ async fn key_info_results(
|
|||
}
|
||||
}
|
||||
|
||||
let res = GetKeyInfoResult {
|
||||
let res = GetKeyInfoResponse {
|
||||
name: key_state.name.get().clone(),
|
||||
created: key_state.created.map(|x| {
|
||||
DateTime::from_timestamp_millis(x as i64).expect("invalid timestamp stored in db")
|
||||
}),
|
||||
expiration: key_state.expiration.get().map(|x| {
|
||||
DateTime::from_timestamp_millis(x as i64).expect("invalid timestamp stored in db")
|
||||
}),
|
||||
expired: key_state.is_expired(now_msec()),
|
||||
access_key_id: key.key_id.clone(),
|
||||
secret_access_key: if show_secret {
|
||||
Some(key_state.secret_key.clone())
|
||||
|
|
@ -206,9 +230,18 @@ async fn key_info_results(
|
|||
},
|
||||
buckets: relevant_buckets
|
||||
.into_values()
|
||||
.map(|bucket| {
|
||||
.filter_map(|bucket| {
|
||||
let state = bucket.state.as_option().unwrap();
|
||||
KeyInfoBucketResult {
|
||||
let permissions = key_state
|
||||
.authorized_buckets
|
||||
.get(&bucket.id)
|
||||
.filter(|p| p.is_any())
|
||||
.map(|p| ApiBucketKeyPerm {
|
||||
read: p.allow_read,
|
||||
write: p.allow_write,
|
||||
owner: p.allow_owner,
|
||||
})?;
|
||||
Some(KeyInfoBucketResponse {
|
||||
id: hex::encode(bucket.id),
|
||||
global_aliases: state
|
||||
.aliases
|
||||
|
|
@ -224,57 +257,45 @@ async fn key_info_results(
|
|||
.filter(|((k, _), _, a)| *a && *k == key.key_id)
|
||||
.map(|((_, n), _, _)| n.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
permissions: key_state
|
||||
.authorized_buckets
|
||||
.get(&bucket.id)
|
||||
.map(|p| ApiBucketKeyPerm {
|
||||
read: p.allow_read,
|
||||
write: p.allow_write,
|
||||
owner: p.allow_owner,
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
}
|
||||
permissions,
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
};
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetKeyInfoResult {
|
||||
name: String,
|
||||
access_key_id: String,
|
||||
#[serde(skip_serializing_if = "is_default")]
|
||||
secret_access_key: Option<String>,
|
||||
permissions: KeyPerm,
|
||||
buckets: Vec<KeyInfoBucketResult>,
|
||||
}
|
||||
fn apply_key_updates(key: &mut Key, updates: UpdateKeyRequestBody) -> Result<(), Error> {
|
||||
if updates.never_expires && updates.expiration.is_some() {
|
||||
return Err(Error::bad_request(
|
||||
"cannot specify `expiration` and `never_expires`",
|
||||
));
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct KeyPerm {
|
||||
#[serde(default)]
|
||||
create_bucket: bool,
|
||||
}
|
||||
let key_state = key.state.as_option_mut().unwrap();
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct KeyInfoBucketResult {
|
||||
id: String,
|
||||
global_aliases: Vec<String>,
|
||||
local_aliases: Vec<String>,
|
||||
permissions: ApiBucketKeyPerm,
|
||||
}
|
||||
if let Some(new_name) = updates.name {
|
||||
key_state.name.update(new_name);
|
||||
}
|
||||
if let Some(expiration) = updates.expiration {
|
||||
key_state
|
||||
.expiration
|
||||
.update(Some(expiration.timestamp_millis() as u64));
|
||||
}
|
||||
if updates.never_expires {
|
||||
key_state.expiration.update(None);
|
||||
}
|
||||
if let Some(allow) = updates.allow {
|
||||
if allow.create_bucket {
|
||||
key_state.allow_create_bucket.update(true);
|
||||
}
|
||||
}
|
||||
if let Some(deny) = updates.deny {
|
||||
if deny.create_bucket {
|
||||
key_state.allow_create_bucket.update(false);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub(crate) struct ApiBucketKeyPerm {
|
||||
#[serde(default)]
|
||||
pub(crate) read: bool,
|
||||
#[serde(default)]
|
||||
pub(crate) write: bool,
|
||||
#[serde(default)]
|
||||
pub(crate) owner: bool,
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
|||