Compare commits
310 commits
main-v1
...
1686a/open
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
45f44023a8 | ||
|
|
173a54a83c | ||
|
|
41c943a6b1 | ||
|
|
84c1e189c4 | ||
|
|
dc8355d0f3 | ||
|
|
1c12ca4caf | ||
|
|
582b168b6a | ||
|
|
c821d4974a | ||
|
|
eb1b621a5a | ||
|
|
0c32294485 | ||
|
|
21500545bb | ||
|
|
a525d0e36a | ||
|
|
29e869ac88 | ||
|
|
cd641a9ed2 | ||
|
|
68876f0b08 | ||
|
|
ba1f30d393 | ||
|
|
9018ed9b97 | ||
|
|
13ded6dd35 | ||
|
|
4d96719d96 | ||
|
|
77ef3c85ea | ||
|
|
79c7126b67 |
||
|
|
77b6233496 |
||
|
|
547fe30a05 |
||
|
|
bff5068efc |
||
|
|
60eee993b4 | ||
|
|
d30bb2acb1 | ||
|
|
730c613807 | ||
|
|
9e356347c6 | ||
|
|
1d3c0511b1 | ||
|
|
f50b342c00 | ||
|
|
cf22e7b71d | ||
|
|
dc8d93698b | ||
|
|
006e78ccea | ||
|
|
276e55ae8b | ||
|
|
ab6d9633ac | ||
|
|
cf2f058f60 | ||
|
|
02677af546 | ||
|
|
fced78c283 |
||
|
|
d211c1e291 |
||
|
|
318ef40c4e |
||
|
|
c9c50b741e | ||
|
|
a0a2c1db88 |
||
|
|
04d549acc7 | ||
|
|
a9cd3f3426 | ||
|
|
8f7e92b6a7 |
||
|
|
c2d54b4136 | ||
|
|
2f7a649870 | ||
|
|
c4836916b0 | ||
|
|
b830bdd1dd | ||
|
|
161b185464 | ||
|
|
8f0d10b0b1 | ||
|
|
6bac369ee2 | ||
|
|
96d303b05e |
||
|
|
3a1dce59f7 | ||
|
|
6fd2cf7966 | ||
|
|
6aecd9718f | ||
|
|
0412013229 | ||
|
|
090dbb412a | ||
|
|
12367d307b | ||
|
|
675c1c156d | ||
|
|
eac3a60050 | ||
|
|
a5580c99fe | ||
|
|
8083bb4a0f | ||
|
|
00cdefa6b3 | ||
|
|
4dee3e6e04 | ||
|
|
5333285b50 | ||
|
|
ef913843f7 | ||
|
|
1fe932d07f | ||
|
|
7e5bb51287 | ||
|
|
00a5c3d8a2 | ||
|
|
2e7a6fccba | ||
|
|
09041035d5 |
||
|
|
ae64ecf10c |
||
|
|
c8ac4a2105 | ||
|
|
280c1303fa |
||
|
|
0f5b3878ca |
||
|
|
d863247f9f | ||
|
|
6c740ff05c | ||
|
|
95d9905524 | ||
|
|
53fe77860b | ||
|
|
6d5e971974 | ||
|
|
4d8407dc0f | ||
|
|
006fb18aea | ||
|
|
b43f309ec7 | ||
|
|
df4721387c | ||
|
|
9c067c0cbd | ||
|
|
742129f4a3 | ||
|
|
7a256b2ebb | ||
|
|
909359ca4c | ||
|
|
3148fa3afe | ||
|
|
6b06459b99 | ||
|
|
4c139bcbca | ||
|
|
4758d8881f | ||
|
|
61e19310c8 | ||
|
|
17fe11fa81 | ||
|
|
16128fca63 |
||
|
|
29570f3192 | ||
|
|
c35c1b5b9b | ||
|
|
7e203f634e | ||
|
|
99f7c0fc4b | ||
|
|
fb95a8819f | ||
|
|
665addc03b | ||
|
|
7949927291 | ||
|
|
2ddb29ca35 | ||
|
|
30d8ec5368 | ||
|
|
47772eb525 | ||
|
|
c1ed770e64 | ||
|
|
7e80e86934 | ||
|
|
4deb57815a |
||
|
|
df343dd808 | ||
|
|
17c73bafa2 | ||
|
|
d8058e7475 | ||
|
|
385fbc606d | ||
|
|
6f9d6919a9 | ||
|
|
91fde4105d | ||
|
|
d975960be3 | ||
|
|
6508acbe71 | ||
|
|
985ad68ade | ||
|
|
b7a853b01f | ||
|
|
66faef9fb6 | ||
|
|
13f67b6cd8 | ||
|
|
0dabf9b22f | ||
|
|
e226fb413f | ||
|
|
708a84f1d6 |
||
|
|
0465475599 | ||
|
|
0a45317b3b | ||
|
|
bb3b832024 | ||
|
|
f8be15c37d | ||
|
|
1e05fc1d53 | ||
|
|
e5eff872f5 | ||
|
|
605ee4cdb1 |
||
|
|
71aef8770e |
||
|
|
b4f6ab963c | ||
|
|
9a31b9c077 | ||
|
|
58a96dc687 | ||
|
|
7bbb3ff9cf | ||
|
|
f04af18193 | ||
|
|
67e0fcc6ea | ||
|
|
78f03aec78 | ||
|
|
56a23d936e | ||
|
|
9b6e45ca1f | ||
|
|
27666ed265 | ||
|
|
e8e722cc66 | ||
|
|
80f818eb6c | ||
|
|
f899e023a0 | ||
|
|
7556c536ae | ||
|
|
2a20319fa9 | ||
|
|
42baa29e50 | ||
|
|
f461348790 | ||
|
|
4a8f7e15ce | ||
|
|
44587d295a | ||
|
|
dc1a4ffd76 | ||
|
|
53005c91a5 | ||
|
|
b7a153b892 | ||
|
|
bc8e6af223 | ||
|
|
78b1481461 | ||
|
|
7ab1d176d4 | ||
|
|
b15d55ea9f | ||
|
|
c13af97b81 | ||
|
|
d1d5c67ba7 | ||
|
|
77125e9464 | ||
|
|
cfd10480ee | ||
|
|
fbb40c4ea0 | ||
|
|
e475c7f802 | ||
|
|
589a992af8 | ||
|
|
768794daae | ||
|
|
abe0546ab0 | ||
|
|
47fe96279b | ||
|
|
45bdf54e7e | ||
|
|
a4b431163c | ||
|
|
db54bf96c7 | ||
|
|
cbcdab4e24 | ||
|
|
38ca35eb0f | ||
|
|
a2d87a012d | ||
|
|
899292ee28 | ||
|
|
c8e9c45889 | ||
|
|
e79b485aa8 | ||
|
|
d38d62f4d7 | ||
|
|
2885806e00 | ||
|
|
52437e4298 | ||
|
|
abcef7a3fd | ||
|
|
5d338f0b8f | ||
|
|
590c9bb4db | ||
|
|
c56b7e20c3 | ||
|
|
2f21181ccb | ||
|
|
2d1c073d2f | ||
|
|
5e7307cbf3 | ||
|
|
fd0e23e984 | ||
|
|
d7506b282c | ||
|
|
6bbdca2e48 | ||
|
|
c6d6cc1fc3 | ||
|
|
5fa6df6ee3 | ||
|
|
c6bed26347 | ||
|
|
d25e631a4a | ||
|
|
514eb29874 | ||
|
|
8ba6454e21 | ||
|
|
9dcc5232a6 | ||
|
|
1e13a66b42 | ||
|
|
2c9e849bbf | ||
|
|
34baade499 | ||
|
|
2f2a96b51d | ||
|
|
c9156f6828 | ||
|
|
4629ee25f7 | ||
|
|
a826c361a9 | ||
|
|
fb6db494cc | ||
|
|
97e2fa5b8b | ||
|
|
cfd259190f | ||
|
|
48e0436f29 | ||
|
|
9c745548c4 | ||
|
|
f7d9c2b383 | ||
|
|
e6862c5d3d | ||
|
|
d032e2017c | ||
|
|
0b12debf6c | ||
|
|
795b4a41b7 | ||
|
|
fd2472d488 | ||
|
|
d2a064bb1b | ||
|
|
88b4623bf1 | ||
|
|
325f79012c | ||
|
|
eb40475f1e | ||
|
|
22c0420607 | ||
|
|
1bd7689301 | ||
|
|
ec0da3b644 | ||
|
|
9511b20153 | ||
|
|
d067a40b3f | ||
|
|
ff6ec62d54 | ||
|
|
004eb94e14 | ||
|
|
46f620119b | ||
|
|
576d0d950e | ||
|
|
85a07c87d7 | ||
|
|
1f645830a4 | ||
|
|
5f308bd688 | ||
|
|
df758e8e0d | ||
|
|
e83864af24 | ||
|
|
3b49dd9e63 | ||
|
|
cef8d75983 | ||
|
|
cd0728cd20 | ||
|
|
0951b5db75 | ||
|
|
3d94eb8d4b | ||
|
|
004866caac | ||
|
|
913e6da41b | ||
|
|
e4881e62f1 | ||
|
|
7ccbfda26d | ||
|
|
6b19d7628e | ||
|
|
411f1d495c | ||
|
|
ba68506c36 | ||
|
|
21c83ab311 | ||
|
|
2e03d90585 | ||
|
|
29ce490dd6 | ||
|
|
c3e8e5e38c | ||
|
|
62a3003cca | ||
|
|
3151695011 | ||
|
|
f034e834fa | ||
|
|
bf0f792418 | ||
|
|
61f3de6496 | ||
|
|
71655c1e89 | ||
|
|
7c8fc04b96 | ||
|
|
f914db057a | ||
|
|
406b6da163 | ||
|
|
9f468b4439 | ||
|
|
97be7b38fa | ||
|
|
6a1079c412 | ||
|
|
b1629dd355 | ||
|
|
d405a9f839 | ||
|
|
7b9c047b11 | ||
|
|
10bbb26b30 | ||
|
|
89ff9f5576 | ||
|
|
bdaf55ab3f | ||
|
|
e96014ca60 | ||
|
|
568c4954e9 | ||
|
|
fe937c2901 | ||
|
|
3192088aac | ||
|
|
5a89350b38 | ||
|
|
3caea5fc06 | ||
|
|
ebc0e9319e | ||
|
|
f8c6a8373d | ||
|
|
076ce04fe5 | ||
|
|
f37d5d2b08 | ||
|
|
819f4f0050 | ||
|
|
69ddaafc60 | ||
|
|
145130481e | ||
|
|
6ed78abb5c | ||
|
|
19454c1679 | ||
|
|
1c03941b19 | ||
|
|
4f0b923c4f | ||
|
|
420bbc162d | ||
|
|
12ea4cda5f | ||
|
|
5fefbd94e9 | ||
|
|
ba810b2e81 | ||
|
|
f8ed3fdbc4 | ||
|
|
2daeb89834 | ||
|
|
4cb45bd398 | ||
|
|
d5ad797ad7 | ||
|
|
a99925e0ed | ||
|
|
f538dc34d3 | ||
|
|
ed58f8b0fe | ||
|
|
5037b97dd4 | ||
|
|
af1a530834 | ||
|
|
c99bfe69ea | ||
|
|
831f2b0207 | ||
|
|
c1eb1610ba | ||
|
|
5560a963e0 | ||
|
|
2aaba39ddc | ||
|
|
47467df83e | ||
|
|
9b7fea4cb0 | ||
|
|
44ce6ae5b4 | ||
|
|
22487ceddf | ||
|
|
6ccfbb2986 | ||
|
|
c939d2a936 | ||
|
|
65e9dde8c9 | ||
|
|
c9b733a4a6 |
|
|
@ -38,7 +38,15 @@ steps:
|
|||
- matrix:
|
||||
ARCH: i386
|
||||
|
||||
- name: upgrade tests
|
||||
- name: upgrade tests from v1.0.0
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-shell --attr ci --run "./script/test-upgrade.sh v1.0.0 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
|
||||
when:
|
||||
- matrix:
|
||||
ARCH: amd64
|
||||
|
||||
- name: upgrade tests from v0.8.4
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
|
||||
|
|
|
|||
1720
Cargo.lock
generated
33
Cargo.toml
|
|
@ -24,18 +24,18 @@ default-members = ["src/garage"]
|
|||
|
||||
# Internal Garage crates
|
||||
format_table = { version = "0.1.1", path = "src/format-table" }
|
||||
garage_api_common = { version = "1.3.0", path = "src/api/common" }
|
||||
garage_api_admin = { version = "1.3.0", path = "src/api/admin" }
|
||||
garage_api_s3 = { version = "1.3.0", path = "src/api/s3" }
|
||||
garage_api_k2v = { version = "1.3.0", path = "src/api/k2v" }
|
||||
garage_block = { version = "1.3.0", path = "src/block" }
|
||||
garage_db = { version = "1.3.0", path = "src/db", default-features = false }
|
||||
garage_model = { version = "1.3.0", path = "src/model", default-features = false }
|
||||
garage_net = { version = "1.3.0", path = "src/net" }
|
||||
garage_rpc = { version = "1.3.0", path = "src/rpc" }
|
||||
garage_table = { version = "1.3.0", path = "src/table" }
|
||||
garage_util = { version = "1.3.0", path = "src/util" }
|
||||
garage_web = { version = "1.3.0", path = "src/web" }
|
||||
garage_api_common = { version = "2.2.0", path = "src/api/common" }
|
||||
garage_api_admin = { version = "2.2.0", path = "src/api/admin" }
|
||||
garage_api_s3 = { version = "2.2.0", path = "src/api/s3" }
|
||||
garage_api_k2v = { version = "2.2.0", path = "src/api/k2v" }
|
||||
garage_block = { version = "2.2.0", path = "src/block" }
|
||||
garage_db = { version = "2.2.0", path = "src/db", default-features = false }
|
||||
garage_model = { version = "2.2.0", path = "src/model", default-features = false }
|
||||
garage_net = { version = "2.2.0", path = "src/net" }
|
||||
garage_rpc = { version = "2.2.0", path = "src/rpc" }
|
||||
garage_table = { version = "2.2.0", path = "src/table" }
|
||||
garage_util = { version = "2.2.0", path = "src/util" }
|
||||
garage_web = { version = "2.2.0", path = "src/web" }
|
||||
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
||||
|
||||
# External crates from crates.io
|
||||
|
|
@ -48,9 +48,8 @@ blake2 = "0.10"
|
|||
bytes = "1.0"
|
||||
bytesize = "1.1"
|
||||
cfg-if = "1.0"
|
||||
chrono = "0.4"
|
||||
crc32fast = "1.4"
|
||||
crc32c = "0.6"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
crc-fast = "1.6"
|
||||
crypto-common = "0.1"
|
||||
gethostname = "0.4"
|
||||
git-version = "0.3.4"
|
||||
|
|
@ -66,6 +65,7 @@ nix = { version = "0.29", default-features = false, features = ["fs"] }
|
|||
nom = "7.1"
|
||||
parking_lot = "0.12"
|
||||
parse_duration = "2.1"
|
||||
paste = "1.0"
|
||||
pin-project = "1.0.12"
|
||||
pnet_datalink = "0.34"
|
||||
rand = "0.8"
|
||||
|
|
@ -101,6 +101,7 @@ serde = { version = "1.0", default-features = false, features = ["derive", "rc"]
|
|||
serde_bytes = "0.11"
|
||||
serde_json = "1.0"
|
||||
toml = { version = "0.8", default-features = false, features = ["parse"] }
|
||||
utoipa = { version = "5.3.1", features = ["chrono"] }
|
||||
|
||||
# newer version requires rust edition 2021
|
||||
k8s-openapi = { version = "0.21", features = ["v1_24"] }
|
||||
|
|
@ -153,5 +154,5 @@ lto = "off"
|
|||
[profile.release]
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
opt-level = "s"
|
||||
opt-level = 3
|
||||
strip = true
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Garage Adminstration API v0</title>
|
||||
<title>Garage administration API v0</title>
|
||||
<!-- needed for adaptive design -->
|
||||
<meta charset="utf-8"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Garage Adminstration API v0</title>
|
||||
<title>Garage administration API v1</title>
|
||||
<!-- needed for adaptive design -->
|
||||
<meta charset="utf-8"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
|
|
|
|||
24
doc/api/garage-admin-v2.html
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Garage administration API v2</title>
|
||||
<!-- needed for adaptive design -->
|
||||
<meta charset="utf-8"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<link href="./css/redoc.css" rel="stylesheet">
|
||||
|
||||
<!--
|
||||
Redoc doesn't change outer page styles
|
||||
-->
|
||||
<style>
|
||||
body {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<redoc spec-url='./garage-admin-v2.json'></redoc>
|
||||
<script src="./redoc.standalone.js"> </script>
|
||||
</body>
|
||||
</html>
|
||||
4429
doc/api/garage-admin-v2.json
Normal file
|
|
@ -12,8 +12,9 @@ In this section, we cover the following web applications:
|
|||
| [Mastodon](#mastodon) | ✅ | Natively supported |
|
||||
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
|
||||
| [ejabberd](#ejabberd) | ✅ | `mod_s3_upload` |
|
||||
| [Pixelfed](#pixelfed) | ✅ | Natively supported |
|
||||
| [Pleroma](#pleroma) | ❓ | Not yet tested |
|
||||
| [Ente](#ente) | ✅ | Natively supported |
|
||||
| [Pixelfed](#pixelfed) | ❓ | Natively supported |
|
||||
| [Pleroma](#pleroma) | ✅ | Natively supported |
|
||||
| [Lemmy](#lemmy) | ✅ | Supported with pict-rs |
|
||||
| [Funkwhale](#funkwhale) | ❓ | Not yet tested |
|
||||
| [Misskey](#misskey) | ❓ | Not yet tested |
|
||||
|
|
@ -567,13 +568,186 @@ The module can then be configured with:
|
|||
Other configuration options can be found in the
|
||||
[configuration YAML file](https://github.com/processone/ejabberd-contrib/blob/master/mod_s3_upload/conf/mod_s3_upload.yml).
|
||||
|
||||
|
||||
## Ente
|
||||
|
||||
Ente is an alternative for Google Photos and Apple Photos. It [can be selfhosted](https://help.ente.io/self-hosting/) and is working fine with Garage as of May 2024.
|
||||
As a first step we need to create a bucket and a key for Ente:
|
||||
|
||||
```bash
|
||||
garage bucket create ente
|
||||
garage key create ente-key
|
||||
# For the CORS setup to work, the key needs to be --owner as well, at least temporarily.
|
||||
garage bucket allow ente --read --write --owner --key ente-key
|
||||
```
|
||||
|
||||
We also need to setup some CORS rules to allow the Ente frontend to access the bucket:
|
||||
|
||||
```bash
|
||||
export CORS='{"CORSRules":[{"AllowedHeaders":["*"],"AllowedMethods":["GET", "PUT", "POST", "DELETE"],"AllowedOrigins":["*"], "ExposeHeaders":["ETag"]}]}'
|
||||
aws s3api put-bucket-cors --bucket ente --cors-configuration $CORS
|
||||
```
|
||||
|
||||
Now we need to configure ente-server to use our bucket. This is explained [in the Ente S3 documentation](https://help.ente.io/self-hosting/guides/external-s3).
|
||||
Prepare a configuration file for ente's backend as `museum.yaml`:
|
||||
|
||||
```yaml
|
||||
credentials-file: /credentials.yaml
|
||||
apps:
|
||||
public-albums: https://albums.example.tld # If you want to use the share album feature
|
||||
internal:
|
||||
hardcoded-ott:
|
||||
local-domain-suffix: "@example.com" # Your domain
|
||||
local-domain-value: 123456 # Custom One-Time Password since we are not sending mail by default
|
||||
key:
|
||||
# WARNING -- You MUST CHANGE the values below
|
||||
# Someone has made an image that can do it for you : https://github.com/EdyTheCow/ente-selfhost/blob/main/images/ente-server-tools/Dockerfile
|
||||
# Simply build it yourself or run docker run --rm ghcr.io/edythecow/ente-server-tools go run tools/gen-random-keys/main.go
|
||||
encryption: yvmG/RnzKrbCb9L3mgsmoxXr9H7i2Z4qlbT0mL3ln4w= # CHANGE THIS VALUE
|
||||
hash: KXYiG07wC7GIgvCSdg+WmyWdXDAn6XKYJtp/wkEU7x573+byBRAYtpTP0wwvi8i/4l37uicX1dVTUzwH3sLZyw== # CHANGE THIS VALUE
|
||||
jwt:
|
||||
secret: i2DecQmfGreG6q1vBj5tCokhlN41gcfS2cjOs9Po-u8= # CHANGE THIS VALUE
|
||||
```
|
||||
|
||||
The full configuration file can be found [here](https://github.com/ente-io/ente/blob/main/server/configurations/local.yaml)
|
||||
Then prepare a credentials file as `credentials.yaml`
|
||||
|
||||
```yaml
|
||||
db:
|
||||
host: postgres
|
||||
port: 5432
|
||||
name: <ente_db_name>
|
||||
user: <pguser>
|
||||
password: <pgpass>
|
||||
|
||||
s3:
|
||||
# Override the primary and secondary hot storage. The commented out values
|
||||
# are the defaults.
|
||||
#
|
||||
hot_storage:
|
||||
primary: b2-eu-cen
|
||||
# secondary: wasabi-eu-central-2-v3
|
||||
|
||||
# If true, enable some workarounds to allow us to use a local minio instance
|
||||
# for object storage.
|
||||
#
|
||||
# 1. Disable SSL.
|
||||
# 2. Use "path" style S3 URLs (see `use_path_style_urls` below).
|
||||
# 3. Directly download the file during replication instead of going via the
|
||||
# Cloudflare worker.
|
||||
# 4. Do not specify storage classes when uploading objects (since minio does
|
||||
# not support them, specifically it doesn't support GLACIER).
|
||||
are_local_buckets: true
|
||||
|
||||
# To use "path" style S3 URLs instead of DNS-based bucket access
|
||||
# default to true if you set "are_local_buckets: true"
|
||||
# use_path_style_urls: true
|
||||
|
||||
b2-eu-cen: # Don't change this key, it is hardcoded
|
||||
key: <keyID>
|
||||
secret: <keySecret>
|
||||
endpoint: garage:3900 # publically accessible endpoint of your garage instance
|
||||
region: garage
|
||||
bucket: <yourbucketName>
|
||||
use_path_style: true
|
||||
# you can specify secondary locations, names are hardcoded as well
|
||||
# wasabi-eu-central-2-v3:
|
||||
# scw-eu-fr-v3:
|
||||
|
||||
# and you can also specify a bucket to be used for embeddings, preview etc..
|
||||
# default to the first bucket
|
||||
# derived-storage: wasabi-eu-central-2-derived
|
||||
```
|
||||
|
||||
Finally you can run it with Docker :
|
||||
|
||||
```bash
|
||||
docker run -d --name ente-server --restart unless-stopped -v /path/to/museum.yaml:/museum.yaml -v /path/to/credentials.yaml:/credentials.yaml -p 8080:8080 ghcr.io/ente-io/ente-server
|
||||
```
|
||||
|
||||
For more information on deployment you can check the [ente documentation](https://help.ente.io/self-hosting/)
|
||||
|
||||
## Pixelfed
|
||||
|
||||
[Pixelfed Technical Documentation > Configuration](https://docs.pixelfed.org/technical-documentation/env.html#filesystem)
|
||||
|
||||
## Pleroma
|
||||
|
||||
[Pleroma Documentation > Pleroma.Uploaders.S3](https://docs-develop.pleroma.social/backend/configuration/cheatsheet/#pleromauploaderss3)
|
||||
### Creating your bucket
|
||||
|
||||
This is the usual Garage setup:
|
||||
|
||||
```bash
|
||||
garage key new --name pleroma-key
|
||||
garage bucket create pleroma
|
||||
garage bucket allow pleroma --read --write --owner --key pleroma-key
|
||||
```
|
||||
|
||||
We also need to expose these buckets publicly to serve their content to users:
|
||||
|
||||
```bash
|
||||
garage bucket website --allow pleroma
|
||||
```
|
||||
|
||||
Note the Key ID and Secret Key.
|
||||
|
||||
### Configure Pleroma
|
||||
|
||||
Update your Pleroma configuration like that in `/etc/pleroma/config.exs`.
|
||||
|
||||
```
|
||||
config :pleroma, Pleroma.Upload,
|
||||
uploader: Pleroma.Uploaders.S3,
|
||||
base_url: "https://pleroma.garage.example.tld"
|
||||
|
||||
config :ex_aws, :s3,
|
||||
access_key_id: "GW...",
|
||||
secret_access_key: "XXX",
|
||||
region: "garage",
|
||||
host: "api.garage.example.tld"
|
||||
```
|
||||
|
||||
And restart Pleroma.
|
||||
|
||||
You can found more information in [Pleroma Documentation > Pleroma.Uploaders.S3](https://docs-develop.pleroma.social/backend/configuration/cheatsheet/#pleromauploaderss3)
|
||||
|
||||
### Migrating your data
|
||||
|
||||
Pleroma have an internal migration tool that can encounter some fatal error
|
||||
|
||||
```
|
||||
** (EXIT from #PID<0.98.0>) an exception was raised:
|
||||
** (File.Error) could not stream "/var/lib/pleroma/uploads/09/f8": illegal operation on a directory
|
||||
(elixir 1.17.3) lib/file/stream.ex:100: anonymous fn/3 in Enumerable.File.Stream.reduce/3
|
||||
(elixir 1.17.3) lib/stream.ex:1675: anonymous fn/5 in Stream.resource/3
|
||||
(elixir 1.17.3) lib/stream.ex:1891: Enumerable.Stream.do_each/4
|
||||
(elixir 1.17.3) lib/task/supervised.ex:370: Task.Supervised.stream_reduce/7
|
||||
(elixir 1.17.3) lib/enum.ex:4423: Enum.map/2
|
||||
(ex_aws_s3 2.5.8) lib/ex_aws/s3/upload.ex:141: ExAws.Operation.ExAws.S3.Upload.perform/2
|
||||
(pleroma 2.10.0) lib/pleroma/uploaders/s3.ex:60: Pleroma.Uploaders.S3.put_file/1
|
||||
(pleroma 2.10.0) lib/pleroma/uploaders/uploader.ex:49: Pleroma.Uploaders.Uploader.put_file/2
|
||||
```
|
||||
|
||||
So, use [your best tool](https://garagehq.deuxfleurs.fr/documentation/connect/cli/) to sync `/var/lib/pleroma/uploads/` in your S3.
|
||||
|
||||
Then, to avoid some non existant problem (just in case of), run this command
|
||||
|
||||
```bash
|
||||
while true
|
||||
do
|
||||
rm -vr $(./bin/pleroma_ctl uploads migrate_local S3 2>&1 | grep "could not stream" | awk -F '"' '{print $2}')
|
||||
sleep 5
|
||||
done
|
||||
```
|
||||
|
||||
If you have many files, stop this command sometime and the command bellow (interactive) to delete local
|
||||
file after upload. Then restart the loop.
|
||||
|
||||
```bash
|
||||
./bin/pleroma_ctl uploads migrate_local S3 --delete
|
||||
```
|
||||
|
||||
And *voilà*
|
||||
|
||||
## Lemmy
|
||||
|
||||
|
|
|
|||
|
|
@ -207,3 +207,13 @@ $ plakar at @garageS3 ls
|
|||
```
|
||||
|
||||
More information in Plakar documentation: https://www.plakar.io/docs/main/quickstart/
|
||||
|
||||
## Synology HyperBackup
|
||||
|
||||
HyperBackup can be configured to upload backups to garage using a custom S3 destination. However, the HyperBackup client hardcodes the `us-east-1` region that is a critical input to the v4 signature process. If garage is not set to `us-east-1`, HyperBackup will recognize available buckets, but fail during the final setup stage.
|
||||
|
||||
In garage.toml:
|
||||
```toml
|
||||
[s3_api]
|
||||
s3_region = "us-east-1"
|
||||
```
|
||||
|
|
|
|||
|
|
@ -149,6 +149,15 @@ rclone help
|
|||
This will tremendously accelerate operations such as `rclone sync` or `rclone ncdu` by reducing the number
|
||||
of ListObjects calls that are made.
|
||||
|
||||
**Garage behind Cloudflare proxy:** when running Garage behind Cloudflare proxy, you might see `Response: error 403 Forbidden, Forbidden: Invalid signature` error in your garage logs or `AccessDenied: Forbidden: Invalid signature` error in rclone logs. Try adding `--s3-sign-accept-encoding=false` flag to your rclone command and see if the issue is resolved.
|
||||
|
||||
```bash
|
||||
# this throws an error
|
||||
rclone lsd garage:
|
||||
|
||||
# this should work
|
||||
rclone lsd --s3-sign-accept-encoding=false garage:
|
||||
```
|
||||
|
||||
## `s3cmd`
|
||||
|
||||
|
|
@ -314,4 +323,3 @@ ls
|
|||
```
|
||||
|
||||
And through the web interface at http://[::1]:8080/web/client
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ have published Ansible roles. We list them and compare them below.
|
|||
|
||||
## Comparison of Ansible roles
|
||||
|
||||
| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | [eddster ansible-role-garage](#eddster-ansible-role-garage) |
|
||||
| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | [eddster2309 ansible-role-garage](#eddster2309-ansible-role-garage) |
|
||||
|------------------------------------|---------------------------------------------|---------------------------------------------------------------|---------------------------------|
|
||||
| **Runtime** | Systemd | Docker | Systemd |
|
||||
| **Target OS** | Any Linux | Any Linux | Any Linux |
|
||||
|
|
|
|||
|
|
@ -29,6 +29,10 @@ it's stable).
|
|||
|
||||
Garage is available in the official repositories under [extra](https://archlinux.org/packages/extra/x86_64/garage).
|
||||
|
||||
```bash
|
||||
pacman -S garage
|
||||
```
|
||||
|
||||
## FreeBSD
|
||||
|
||||
```bash
|
||||
|
|
@ -40,3 +44,9 @@ pkg install garage
|
|||
```bash
|
||||
nix-shell -p garage
|
||||
```
|
||||
|
||||
## conda-forge
|
||||
|
||||
```bash
|
||||
pixi global install garage
|
||||
```
|
||||
|
|
|
|||
|
|
@ -20,10 +20,10 @@ sudo apt-get update
|
|||
sudo apt-get install build-essential
|
||||
```
|
||||
|
||||
## Building from source from the Gitea repository
|
||||
## Building from source from the Forgejo repository
|
||||
|
||||
The primary location for Garage's source code is the
|
||||
[Gitea repository](https://git.deuxfleurs.fr/Deuxfleurs/garage),
|
||||
[Forgejo repository](https://git.deuxfleurs.fr/Deuxfleurs/garage),
|
||||
which contains all of the released versions as well as the code
|
||||
for the developpement of the next version.
|
||||
|
||||
|
|
@ -85,11 +85,14 @@ The following feature flags are available in v0.8.0:
|
|||
| Feature flag | Enabled | Description |
|
||||
| ------------ | ------- | ----------- |
|
||||
| `bundled-libs` | *by default* | Use bundled version of sqlite3, zstd, lmdb and libsodium |
|
||||
| `system-libs` | optional | Use system version of sqlite3, zstd, lmdb and libsodium<br>if available (exclusive with `bundled-libs`, build using<br>`cargo build --no-default-features --features system-libs`) |
|
||||
| `consul-discovery` | optional | Enable automatic registration and discovery<br>of cluster nodes through the Consul API |
|
||||
| `fjall` | experimental | Enable using Fjall to store Garage's metadata |
|
||||
| `journald` | optional | Enable logging to systemd-journald with<br>`GARAGE_LOG_TO_JOURNALD=true` environment variable set |
|
||||
| `k2v` | optional | Enable the experimental K2V API (if used, all nodes on your<br>Garage cluster must have it enabled as well) |
|
||||
| `kubernetes-discovery` | optional | Enable automatic registration and discovery<br>of cluster nodes through the Kubernetes API |
|
||||
| `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API |
|
||||
| `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry |
|
||||
| `syslog` | optional | Enable logging to Syslog |
|
||||
| `lmdb` | *by default* | Enable using LMDB to store Garage's metadata |
|
||||
| `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API |
|
||||
| `sqlite` | *by default* | Enable using Sqlite3 to store Garage's metadata |
|
||||
| `syslog` | optional | Enable logging to Syslog with<br>`GARAGE_LOG_TO_SYSLOG=true` environment variable set |
|
||||
| `system-libs` | optional | Use system version of sqlite3, zstd, lmdb and libsodium<br>if available (exclusive with `bundled-libs`, build using<br>`cargo build --no-default-features --features system-libs`) |
|
||||
| `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry |
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ This is an example `values.overrride.yaml` for deploying in a microk8s cluster w
|
|||
```yaml
|
||||
garage:
|
||||
# Use only 2 replicas per object
|
||||
replicationMode: "2"
|
||||
replicationFactor: 2
|
||||
|
||||
# Start 4 instances (StatefulSets) of garage
|
||||
deployment:
|
||||
|
|
|
|||
|
|
@ -96,14 +96,14 @@ to store 2 TB of data in total.
|
|||
## Get a Docker image
|
||||
|
||||
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||
We encourage you to use a fixed tag (eg. `v1.3.0`) and not the `latest` tag.
|
||||
For this example, we will use the latest published version at the time of the writing which is `v1.3.0` but it's up to you
|
||||
We encourage you to use a fixed tag (eg. `v2.2.0`) and not the `latest` tag.
|
||||
For this example, we will use the latest published version at the time of the writing which is `v2.2.0` but it's up to you
|
||||
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
sudo docker pull dxflrs/garage:v1.3.0
|
||||
sudo docker pull dxflrs/garage:v2.2.0
|
||||
```
|
||||
|
||||
## Deploying and configuring Garage
|
||||
|
|
@ -171,7 +171,7 @@ docker run \
|
|||
-v /etc/garage.toml:/etc/garage.toml \
|
||||
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
||||
-v /var/lib/garage/data:/var/lib/garage/data \
|
||||
dxflrs/garage:v1.3.0
|
||||
dxflrs/garage:v2.2.0
|
||||
```
|
||||
|
||||
With this command line, Garage should be started automatically at each boot.
|
||||
|
|
@ -185,7 +185,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
|
|||
version: "3"
|
||||
services:
|
||||
garage:
|
||||
image: dxflrs/garage:v1.3.0
|
||||
image: dxflrs/garage:v2.2.0
|
||||
network_mode: "host"
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ The main reason to add a reverse proxy in front of Garage is to provide TLS to y
|
|||
|
||||
In production you will likely need your certificates signed by a certificate authority.
|
||||
The most automated way is to use a provider supporting the [ACME protocol](https://datatracker.ietf.org/doc/html/rfc8555)
|
||||
such as [Let's Encrypt](https://letsencrypt.org/), [ZeroSSL](https://zerossl.com/) or [Buypass Go SSL](https://www.buypass.com/ssl/products/acme).
|
||||
such as [Let's Encrypt](https://letsencrypt.org/) or [ZeroSSL](https://zerossl.com/).
|
||||
|
||||
If you are only testing Garage, you can generate a self-signed certificate to follow the documentation:
|
||||
|
||||
|
|
@ -97,7 +97,7 @@ server {
|
|||
location / {
|
||||
proxy_pass http://s3_backend;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header Host $http_host;
|
||||
# Disable buffering to a temporary file.
|
||||
proxy_max_temp_file_size 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -59,11 +59,13 @@ Garage themselves for the following tasks:
|
|||
|
||||
- Hosting of their homepage, [privacyguides.org](https://www.privacyguides.org/), and various other static sites
|
||||
|
||||
- As a Mastodon object storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/)
|
||||
- As a PowerDNS authoritative zone backend through [Lightning Stream](https://doc.powerdns.com/lightningstream/latest/index.html) and [LMDB](https://doc.powerdns.com/authoritative/backends/lmdb.html)
|
||||
|
||||
- As a Mastodon media storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/)
|
||||
|
||||
- As a PeerTube storage backend for [neat.tube](https://neat.tube/)
|
||||
|
||||
- As a [Matrix media backend](https://github.com/matrix-org/synapse-s3-storage-provider)
|
||||
|
||||
Triplebit's Garage cluster is a multi-site cluster currently composed of
|
||||
10 nodes in 3 physical locations.
|
||||
15 storage nodes in 3 physical locations.
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ You may pause an ongoing scrub using `garage repair scrub pause`, but note that
|
|||
the scrub will resume automatically 24 hours later as Garage will not let your
|
||||
cluster run without a regular scrub. If the scrub procedure is too intensive
|
||||
for your servers and is slowing down your workload, the recommended solution
|
||||
is to increase the "scrub tranquility" using `garage repair scrub set-tranquility`.
|
||||
is to increase the "scrub tranquility" using `garage worker set scrub-tranquility`.
|
||||
A higher tranquility value will make Garage take longer pauses between two block
|
||||
verifications. Of course, scrubbing the entire data store will also take longer.
|
||||
|
||||
|
|
|
|||
|
|
@ -162,3 +162,6 @@ your recovery options are as follows:
|
|||
- **Option 3: restoring a filesystem-level snapshot.** If you are using ZFS or
|
||||
BTRFS to snapshot your metadata partition, refer to their specific
|
||||
documentation on rolling back or copying files from an old snapshot.
|
||||
Note that, depending on the properties of the filesystem and of the DB engine,
|
||||
if these snapshots were taken during a write operation to the database, they may
|
||||
also be corrupted and thus unfit for recovery.
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ docker run \
|
|||
-v /path/to/garage.toml:/etc/garage.toml \
|
||||
-v /path/to/garage/meta:/var/lib/garage/meta \
|
||||
-v /path/to/garage/data:/var/lib/garage/data \
|
||||
dxflrs/garage:v1.3.0
|
||||
dxflrs/garage:v2.2.0
|
||||
```
|
||||
|
||||
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
|
||||
|
|
|
|||
|
|
@ -6,41 +6,167 @@ weight = 40
|
|||
The Garage administration API is accessible through a dedicated server whose
|
||||
listen address is specified in the `[admin]` section of the configuration
|
||||
file (see [configuration file
|
||||
reference](@/documentation/reference-manual/configuration.md))
|
||||
reference](@/documentation/reference-manual/configuration.md)).
|
||||
|
||||
**WARNING.** At this point, there is no commitment to the stability of the APIs described in this document.
|
||||
We will bump the version numbers prefixed to each API endpoint each time the syntax
|
||||
or semantics change, meaning that code that relies on these endpoint will break
|
||||
when changes are introduced.
|
||||
|
||||
Versions:
|
||||
- Before Garage 0.7.2 - no admin API
|
||||
- Garage 0.7.2 - admin APIv0
|
||||
- Garage 0.9.0 - admin APIv1, deprecate admin APIv0
|
||||
The current version of the admin API is v2. No breaking changes to the Garage
|
||||
administration API will be published outside of a major release.
|
||||
|
||||
History of previous versions:
|
||||
|
||||
- Before Garage v0.7.2 - no admin API
|
||||
- Garage v0.7.2 - admin API v0
|
||||
- Garage v0.9.0 - admin API v1, deprecate admin API v0
|
||||
- Garage v2.0.0 - admin API v2, deprecate admin API v1
|
||||
|
||||
## Access control
|
||||
|
||||
The admin API uses two different tokens for access control, that are specified in the config file's `[admin]` section:
|
||||
### Using an API token
|
||||
|
||||
- `metrics_token`: the token for accessing the Metrics endpoint (if this token
|
||||
is not set in the config file, the Metrics endpoint can be accessed without
|
||||
access control);
|
||||
|
||||
- `admin_token`: the token for accessing all of the other administration
|
||||
endpoints (if this token is not set in the config file, access to these
|
||||
endpoints is disabled entirely).
|
||||
|
||||
These tokens are used as simple HTTP bearer tokens. In other words, to
|
||||
authenticate access to an admin API endpoint, add the following HTTP header
|
||||
to your request:
|
||||
Administration API tokens tokens are used as simple HTTP bearer tokens. In
|
||||
other words, to authenticate access to an admin API endpoint, add the following
|
||||
HTTP header to your request:
|
||||
|
||||
```
|
||||
Authorization: Bearer <token>
|
||||
```
|
||||
|
||||
## Administration API endpoints
|
||||
### User-defined API tokens
|
||||
|
||||
Cluster administrators may dynamically define administration tokens using the CLI commands under `garage admin-token`.
|
||||
Such tokens may be limited in scope, meaning that they may enable access to only a subset of API calls.
|
||||
They may also have an expiration date to limit their use in time.
|
||||
|
||||
Here is an example to create an administration token that is valid for 30 days
|
||||
and gives access to only a subset of API calls, allowing it to create buckets
|
||||
and access keys and give keys permissions on buckets:
|
||||
|
||||
```bash
|
||||
$ garage admin-token create --expires-in 30d \
|
||||
--scope ListBuckets,GetBucketInfo,ListKeys,GetKeyInfo,CreateBucket,CreateKey,AllowBucketKey,DenyBucketKey \
|
||||
my-token
|
||||
This is your secret bearer token, it will not be shown again by Garage:
|
||||
|
||||
8ed1830b10a276ff57061950.kOSIpxWK9zSGbTO9Xadpv3YndSFWma0_snXcYHaORXk
|
||||
|
||||
==== ADMINISTRATION TOKEN INFORMATION ====
|
||||
Token ID: 8ed1830b10a276ff57061950
|
||||
Token name: my-token
|
||||
Created: 2025-06-15 15:12:44.160 +02:00
|
||||
Validity: valid
|
||||
Expiration: 2025-07-15 15:12:44.117 +02:00
|
||||
|
||||
Scope: ListBuckets
|
||||
GetBucketInfo
|
||||
ListKeys
|
||||
GetKeyInfo
|
||||
CreateBucket
|
||||
CreateKey
|
||||
AllowBucketKey
|
||||
DenyBucketKey
|
||||
```
|
||||
|
||||
When running this command, your token will be shown only once and **will never
|
||||
be shown again by Garage**, so make sure to save it directly. The token is
|
||||
hashed internally, and is identified by its prefix (32 hex digits followed by a
|
||||
dot) which is saved in clear.
|
||||
|
||||
When running `garage admin-token list`, you might see something like this:
|
||||
|
||||
```
|
||||
ID Created Name Expiration Scope
|
||||
- - metrics_token (from daemon configuration) never Metrics
|
||||
8ed1830b10a276ff57061950 2025-06-15 my-token 2025-07-15 15:12:44.117 +02:00 ListBuckets, ... (8)
|
||||
```
|
||||
|
||||
### Master API tokens
|
||||
|
||||
The admin API can also use two different master tokens for access control,
|
||||
specified in the config file's `[admin]` section:
|
||||
|
||||
- `metrics_token`: the token for accessing the Metrics endpoint. If this token
|
||||
is not set in the config file, the Metrics endpoint can be accessed without
|
||||
access control.
|
||||
|
||||
- `admin_token`: the token for accessing all of the other administration
|
||||
endpoints. If this token is not set in the config file, access to these
|
||||
endpoints is only possible with a user-defined admin token.
|
||||
|
||||
With the introduction of multiple user-defined admin tokens, the use of master
|
||||
API tokens is now discouraged.
|
||||
|
||||
|
||||
## Using the admin API
|
||||
|
||||
All of the admin API endpoints are described in the OpenAPI specification:
|
||||
|
||||
- APIv2 - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v2.html) - [OpenAPI JSON](https://garagehq.deuxfleurs.fr/api/garage-admin-v2.json)
|
||||
- APIv1 (deprecated) - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html) - [OpenAPI YAML](https://garagehq.deuxfleurs.fr/api/garage-admin-v1.yml)
|
||||
- APIv0 (deprecated) - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.html) - [OpenAPI YAML](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.yml)
|
||||
|
||||
Making a request to the API from the command line can be as simple as running:
|
||||
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer s3cr3t' http://localhost:3903/v2/GetClusterStatus | jq
|
||||
```
|
||||
|
||||
For more advanced use cases, we recommend using an SDK.
|
||||
[Go to the "Build your own app" section to know how to use our SDKs](@/documentation/build/_index.md)
|
||||
|
||||
### Making API calls from the `garage` CLI
|
||||
|
||||
Since v2.0.0, the `garage` binary provides a subcommand `garage json-api` that
|
||||
allows you to invoke the API without making an HTTP request. This can be
|
||||
useful for scripting Garage deployments.
|
||||
|
||||
`garage json-api` proxies API calls through Garage's internal RPC protocol,
|
||||
therefore it does not require any form of authentication: RPC connection
|
||||
parameters are discovered automatically to contact the locally-running Garage
|
||||
instance (as when running any other `garage` CLI command).
|
||||
|
||||
For simple calls that take no parameters, usage is as follows:
|
||||
|
||||
```
|
||||
$ garage json-api GetClusterHealth
|
||||
{
|
||||
"connectedNodes": 3,
|
||||
"knownNodes": 3,
|
||||
"partitions": 256,
|
||||
"partitionsAllOk": 256,
|
||||
"partitionsQuorum": 256,
|
||||
"status": "healthy",
|
||||
"storageNodes": 3,
|
||||
"storageNodesOk": 3
|
||||
}
|
||||
```
|
||||
|
||||
If you need to specify a JSON body for your call, you can add it directly after
|
||||
the name of the function you are calling:
|
||||
|
||||
```
|
||||
$ garage json-api CreateAdminToken '{"name": "test"}'
|
||||
```
|
||||
|
||||
Or you can feed it through stdin by adding a `-` as the last command parameter:
|
||||
|
||||
```
|
||||
$ garage json-api CreateAdminToken -
|
||||
{"name": "test"}
|
||||
<EOF>
|
||||
```
|
||||
|
||||
For admin API calls that would have taken query parameters in their HTTP version, these parameters can be passed in the JSON body object:
|
||||
|
||||
```
|
||||
$ garage json-api GetAdminTokenInfo '{"id":"b0e6e0ace2c0b2aca4cdb2de"}'
|
||||
```
|
||||
|
||||
For admin API calls that take both query parameters and a JSON body, combine them in the following fashion:
|
||||
|
||||
```
|
||||
$ garage json-api UpdateAdminToken '{"id":"b0e6e0ace2c0b2aca4cdb2de", "body":{"name":"not a test"}}'
|
||||
```
|
||||
|
||||
## Special administration API endpoints
|
||||
|
||||
### Metrics `GET /metrics`
|
||||
|
||||
|
|
@ -83,7 +209,7 @@ content-length: 102
|
|||
date: Tue, 08 Aug 2023 07:22:38 GMT
|
||||
|
||||
Garage is fully operational
|
||||
Consult the full health check API endpoint at /v0/health for more details
|
||||
Consult the full health check API endpoint at /v2/GetClusterHealth for more details
|
||||
```
|
||||
|
||||
### On-demand TLS `GET /check`
|
||||
|
|
@ -126,23 +252,7 @@ $ curl -so /dev/null -w "%{http_code}" http://localhost:3903/check?domain=exampl
|
|||
200
|
||||
```
|
||||
|
||||
|
||||
**References:**
|
||||
- [Using On-Demand TLS](https://caddyserver.com/docs/automatic-https#using-on-demand-tls)
|
||||
- [Add option for a backend check to approve use of on-demand TLS](https://github.com/caddyserver/caddy/pull/1939)
|
||||
- [Serving tens of thousands of domains over HTTPS with Caddy](https://caddy.community/t/serving-tens-of-thousands-of-domains-over-https-with-caddy/11179)
|
||||
|
||||
### Cluster operations
|
||||
|
||||
These endpoints have a dedicated OpenAPI spec.
|
||||
- APIv1 - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html) - [OpenAPI YAML](https://garagehq.deuxfleurs.fr/api/garage-admin-v1.yml)
|
||||
- APIv0 (deprecated) - [HTML spec](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.html) - [OpenAPI YAML](https://garagehq.deuxfleurs.fr/api/garage-admin-v0.yml)
|
||||
|
||||
Requesting the API from the command line can be as simple as running:
|
||||
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer s3cr3t' http://localhost:3903/v0/status | jq
|
||||
```
|
||||
|
||||
For more advanced use cases, we recommend using a SDK.
|
||||
[Go to the "Build your own app" section to know how to use our SDKs](@/documentation/build/_index.md)
|
||||
|
|
|
|||
|
|
@ -51,17 +51,20 @@ allow_punycode = false
|
|||
|
||||
[consul_discovery]
|
||||
api = "catalog"
|
||||
consul_http_addr = "http://127.0.0.1:8500"
|
||||
consul_http_addr = "https://127.0.0.1:8500"
|
||||
tls_skip_verify = false
|
||||
service_name = "garage-daemon"
|
||||
|
||||
ca_cert = "/etc/consul/consul-ca.crt"
|
||||
client_cert = "/etc/consul/consul-client.crt"
|
||||
client_key = "/etc/consul/consul-key.crt"
|
||||
|
||||
# for `agent` API mode, unset client_cert and client_key, and optionally enable `token`
|
||||
# token = "abcdef-01234-56789"
|
||||
tls_skip_verify = false
|
||||
|
||||
tags = [ "dns-enabled" ]
|
||||
meta = { dns-acl = "allow trusted" }
|
||||
|
||||
datacenters = ["dc1", "dc2", "dc3"]
|
||||
|
||||
[kubernetes_discovery]
|
||||
namespace = "garage"
|
||||
|
|
@ -82,6 +85,7 @@ add_host_to_metrics = true
|
|||
[admin]
|
||||
api_bind_addr = "0.0.0.0:3903"
|
||||
metrics_token = "BCAdFjoa9G0KJR0WXnHHm7fs1ZAbfpI8iIZ+Z/a2NgI="
|
||||
metrics_require_token = true
|
||||
admin_token = "UkLeGWEvHnXBqnueR3ISEMWpOnm40jH2tM2HnnL/0F4="
|
||||
trace_sink = "http://localhost:4317"
|
||||
```
|
||||
|
|
@ -97,9 +101,9 @@ The following gives details about each available configuration option.
|
|||
Top-level configuration options, in alphabetical order:
|
||||
[`allow_punycode`](#allow_punycode),
|
||||
[`allow_world_readable_secrets`](#allow_world_readable_secrets),
|
||||
[`block_max_concurrent_reads`](`block_max_concurrent_reads),
|
||||
[`block_ram_buffer_max`](#block_ram_buffer_max),
|
||||
[`block_max_concurrent_reads`](#block_max_concurrent_reads),
|
||||
[`block_max_concurrent_writes_per_request`](#block_max_concurrent_writes_per_request),
|
||||
[`block_ram_buffer_max`](#block_ram_buffer_max),
|
||||
[`block_size`](#block_size),
|
||||
[`bootstrap_peers`](#bootstrap_peers),
|
||||
[`compression_level`](#compression_level),
|
||||
|
|
@ -127,12 +131,14 @@ The `[consul_discovery]` section:
|
|||
[`client_cert`](#consul_client_cert_and_key),
|
||||
[`client_key`](#consul_client_cert_and_key),
|
||||
[`consul_http_addr`](#consul_http_addr),
|
||||
[`datacenters`](#consul_datacenters)
|
||||
[`meta`](#consul_tags_and_meta),
|
||||
[`service_name`](#consul_service_name),
|
||||
[`tags`](#consul_tags_and_meta),
|
||||
[`tls_skip_verify`](#consul_tls_skip_verify),
|
||||
[`token`](#consul_token).
|
||||
|
||||
|
||||
The `[kubernetes_discovery]` section:
|
||||
[`namespace`](#kube_namespace),
|
||||
[`service_name`](#kube_service_name),
|
||||
|
|
@ -150,6 +156,7 @@ The `[s3_web]` section:
|
|||
|
||||
The `[admin]` section:
|
||||
[`api_bind_addr`](#admin_api_bind_addr),
|
||||
[`metrics_require_token`](#admin_metrics_require_token),
|
||||
[`metrics_token`/`metrics_token_file`](#admin_metrics_token),
|
||||
[`admin_token`/`admin_token_file`](#admin_token),
|
||||
[`trace_sink`](#admin_trace_sink),
|
||||
|
|
@ -336,7 +343,7 @@ Since `v0.8.0`, Garage can use alternative storage backends as follows:
|
|||
| --------- | ----------------- | ------------- |
|
||||
| [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
|
||||
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
|
||||
| [Fjall](https://github.com/fjall-rs/fjall) (**experimental support** since `v1.3.0`) | `"fjall"` | `<metadata_dir>/db.fjall/` |
|
||||
| [Fjall](https://github.com/fjall-rs/fjall) (**experimental support** since `v1.3.0`/`v2.1.0`) | `"fjall"` | `<metadata_dir>/db.fjall/` |
|
||||
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
|
||||
|
||||
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
|
||||
|
|
@ -345,8 +352,16 @@ old Sled metadata databases to another engine.
|
|||
|
||||
Performance characteristics of the different DB engines are as follows:
|
||||
|
||||
- LMDB: the recommended database engine for high-performance distributed clusters.
|
||||
LMDB works very well, but is known to have the following limitations:
|
||||
- **LMDB:** the recommended database engine for high-performance distributed clusters
|
||||
with `replication_factor` ≥ 2.
|
||||
LMDB works well, but is known to have the following limitations:
|
||||
|
||||
- LMDB is prone to database corruption after an unclean shutdown (e.g. a process kill
|
||||
or a power outage). It is recommended to configure
|
||||
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval) to be
|
||||
able to easily recover from this situation. With `replication_factor` ≥ 2,
|
||||
metadata can also be reconstructed from remote nodes upon corruption
|
||||
(see [Recovering from failures](@/documentation/operations/recovering.md#corrupted_meta)).
|
||||
|
||||
- The data format of LMDB is not portable between architectures, so for
|
||||
instance the Garage database of an x86-64 node cannot be moved to an ARM64
|
||||
|
|
@ -356,30 +371,21 @@ LMDB works very well, but is known to have the following limitations:
|
|||
node to very small database sizes due to how LMDB works; it is therefore
|
||||
not recommended.
|
||||
|
||||
- Several users have reported corrupted LMDB database files after an unclean
|
||||
shutdown (e.g. a power outage). This situation can generally be recovered
|
||||
from if your cluster is geo-replicated (by rebuilding your metadata db from
|
||||
other nodes), or if you have saved regular snapshots at the filesystem
|
||||
level.
|
||||
|
||||
- Keys in LMDB are limited to 511 bytes. This limit translates to limits on
|
||||
object keys in S3 and sort keys in K2V that are limted to 479 bytes.
|
||||
|
||||
- Sqlite: Garage supports Sqlite as an alternative storage backend for
|
||||
metadata, which does not have the issues listed above for LMDB.
|
||||
On versions 0.8.x and earlier, Sqlite should be avoided due to abysmal
|
||||
performance, which was fixed with the addition of `metadata_fsync`.
|
||||
Sqlite is still probably slower than LMDB due to the way we use it,
|
||||
so it is not the best choice for high-performance storage clusters,
|
||||
but it should work fine in many cases.
|
||||
- **Sqlite:** Garage supports Sqlite as an alternative storage backend for
|
||||
metadata, which does not have the issues listed above for LMDB. Sqlite is
|
||||
slower than LMDB, so it is not the best choice for high-performance storage
|
||||
clusters.
|
||||
|
||||
- Fjall: a storage engine based on LSM trees, which theoretically allow for
|
||||
- **Fjall:** a storage engine based on LSM trees, which theoretically allow for
|
||||
higher write throughput than other storage engines that are based on B-trees.
|
||||
Using Fjall could potentially improve Garage's performance significantly in
|
||||
write-heavy workloads. **Support for Fjall is experimental at this point**,
|
||||
we have added it to Garage for evaluation purposes only. **Do not use it for
|
||||
production-critical workloads.**
|
||||
|
||||
we have added it to Garage for evaluation purposes only. **Use it only with
|
||||
test data, and report any issues to our bug tracker. Do not use it for
|
||||
production workloads.**
|
||||
|
||||
It is possible to convert Garage's metadata directory from one format to another
|
||||
using the `garage convert-db` command, which should be used as follows:
|
||||
|
|
@ -438,7 +444,8 @@ if geographical replication is used.
|
|||
#### `metadata_auto_snapshot_interval` (since `v0.9.4`) {#metadata_auto_snapshot_interval}
|
||||
|
||||
If this value is set, Garage will automatically take a snapshot of the metadata
|
||||
DB file at a regular interval and save it in the metadata directory.
|
||||
DB file at a regular interval and save it in the metadata directory,
|
||||
or in [`metadata_snapshots_dir`](#metadata_snapshots_dir) if it is set.
|
||||
This parameter can take any duration string that can be parsed by
|
||||
the [`parse_duration`](https://docs.rs/parse_duration/latest/parse_duration/#syntax) crate.
|
||||
|
||||
|
|
@ -447,14 +454,19 @@ corrupted, for instance after an unclean shutdown. See [this
|
|||
page](@/documentation/operations/recovering.md#corrupted_meta) for details.
|
||||
Garage keeps only the two most recent snapshots of the metadata DB and deletes
|
||||
older ones automatically.
|
||||
You can also create metadata snapshots manually at any point using the
|
||||
`garage meta snapshot` command.
|
||||
|
||||
Using snapshots created by Garage is the best option to make snapshots of your
|
||||
node's metadata for potential recovery, as they are guaranteed to be clean and
|
||||
consistent, contrarily to filesystem-level snapshots that may be taken while
|
||||
some writes are in-flight and thus might be corrupted.
|
||||
|
||||
Note that taking a metadata snapshot is a relatively intensive operation as the
|
||||
entire data file is copied. A snapshot being taken might have performance
|
||||
impacts on the Garage node while it is running. If the cluster is under heavy
|
||||
write load when a snapshot operation is running, this might also cause the
|
||||
database file to grow in size significantly as pages cannot be recycled easily.
|
||||
For this reason, it might be better to use filesystem-level snapshots instead
|
||||
if possible.
|
||||
|
||||
#### `disable_scrub` {#disable_scrub}
|
||||
|
||||
|
|
@ -548,13 +560,13 @@ metric in Prometheus: a non-zero number of such events indicates an I/O
|
|||
bottleneck on HDD read speed.
|
||||
|
||||
|
||||
#### `block_max_concurrent_writes_per_request` (since `v2.1.0`) {#block_max_concurrent_writes_per_request}
|
||||
#### `block_max_concurrent_writes_per_request` (since `v1.3.1` / `v2.2.0`) {#block_max_concurrent_writes_per_request}
|
||||
|
||||
This parameter is designed to adapt to the concurrent write performance of
|
||||
different storage media.Maximum number of parallel block writes per put request
|
||||
Higher values improve throughput but increase memory usage.
|
||||
different storage media. Maximum number of parallel block writes per put request.
|
||||
Higher values may improve throughput but increase memory usage.
|
||||
|
||||
Default: 3, Recommended: 10-30 for NVMe, 3-10 for HDD
|
||||
Default value: 3. Recommended values: 10-30 for NVMe, 3-10 for spinning HDD.
|
||||
|
||||
#### `lmdb_map_size` {#lmdb_map_size}
|
||||
|
||||
|
|
@ -728,6 +740,18 @@ node_prefix "" {
|
|||
}
|
||||
```
|
||||
|
||||
|
||||
#### `datacenters` {#consul_datacenters}
|
||||
|
||||
Optional list of datacenters that allow garage to do service discovery when Consul is configured in WAN federation.
|
||||
|
||||
Example: `datacenters = ["dc1", "dc2", "dc3"]`
|
||||
|
||||
In a WAN configuration, by default the Consul services API only responds with
|
||||
local LAN services. When a list of datacenters is specified using this option,
|
||||
Garage will query the consul server API by datacenter directly, allowing for
|
||||
Garage to discover nodes across the Consul WAN.
|
||||
|
||||
#### `tags` and `meta` {#consul_tags_and_meta}
|
||||
|
||||
Additional list of tags and map of service meta to add during service registration.
|
||||
|
|
@ -824,10 +848,34 @@ See [administration API reference](@/documentation/reference-manual/admin-api.md
|
|||
Alternatively, since `v0.8.5`, a path can be used to create a unix socket. Note that for security reasons,
|
||||
the socket will have 0220 mode. Make sure to set user and group permissions accordingly.
|
||||
|
||||
#### `admin_token`, `admin_token_file` or `GARAGE_ADMIN_TOKEN`, `GARAGE_ADMIN_TOKEN_FILE` (env) {#admin_token}
|
||||
|
||||
The token for accessing all administration functions on the admin endpoint,
|
||||
with the exception of the metrics endpoint (see `metrics_token`).
|
||||
|
||||
You can use any random string for this value. We recommend generating a random
|
||||
token with `openssl rand -base64 32`.
|
||||
|
||||
For Garage version earlier than `v2.0`, if this token is not set,
|
||||
access to these endpoints is disabled entirely.
|
||||
|
||||
Since Garage `v2.0`, additional admin API tokens can be defined dynamically
|
||||
in your Garage cluster using administration commands. This new admin token system
|
||||
is more flexible since it allows admin tokens to have an expiration date,
|
||||
and to have a scope restricted to certain admin API functions. If `admin_token`
|
||||
is set, it behaves as an admin token without expiration and with full scope.
|
||||
Otherwise, only admin API tokens defined dynamically can be used.
|
||||
|
||||
`admin_token` was introduced in Garage `v0.7.2`.
|
||||
`admin_token_file` and the `GARAGE_ADMIN_TOKEN` environment variable are supported since Garage `v0.8.2`.
|
||||
|
||||
`GARAGE_ADMIN_TOKEN_FILE` is supported since `v0.8.5` / `v0.9.1`.
|
||||
|
||||
#### `metrics_token`, `metrics_token_file` or `GARAGE_METRICS_TOKEN`, `GARAGE_METRICS_TOKEN_FILE` (env) {#admin_metrics_token}
|
||||
|
||||
The token for accessing the Metrics endpoint. If this token is not set, the
|
||||
Metrics endpoint can be accessed without access control.
|
||||
The token for accessing the Prometheus metrics endpoint (`/metrics`).
|
||||
If this token is not set, and unless `metrics_require_token` is set to `true`,
|
||||
the metrics endpoint can be accessed without access control.
|
||||
|
||||
You can use any random string for this value. We recommend generating a random token with `openssl rand -base64 32`.
|
||||
|
||||
|
|
@ -836,17 +884,12 @@ You can use any random string for this value. We recommend generating a random t
|
|||
|
||||
`GARAGE_METRICS_TOKEN_FILE` is supported since `v0.8.5` / `v0.9.1`.
|
||||
|
||||
#### `admin_token`, `admin_token_file` or `GARAGE_ADMIN_TOKEN`, `GARAGE_ADMIN_TOKEN_FILE` (env) {#admin_token}
|
||||
#### `metrics_require_token` (since `v2.0.0`) {#admin_metrics_require_token}
|
||||
|
||||
The token for accessing all of the other administration endpoints. If this
|
||||
token is not set, access to these endpoints is disabled entirely.
|
||||
|
||||
You can use any random string for this value. We recommend generating a random token with `openssl rand -base64 32`.
|
||||
|
||||
`admin_token` was introduced in Garage `v0.7.2`.
|
||||
`admin_token_file` and the `GARAGE_ADMIN_TOKEN` environment variable are supported since Garage `v0.8.2`.
|
||||
|
||||
`GARAGE_ADMIN_TOKEN_FILE` is supported since `v0.8.5` / `v0.9.1`.
|
||||
If this is set to `true`, accessing the metrics endpoint will always require
|
||||
an access token. Valid tokens include the `metrics_token` if it is set,
|
||||
and admin API token defined dynamicaly in Garage which have
|
||||
the `Metrics` endpoint in their scope.
|
||||
|
||||
#### `trace_sink` {#admin_trace_sink}
|
||||
|
||||
|
|
|
|||
|
|
@ -129,5 +129,5 @@ related to objects stored in an S3 bucket.
|
|||
In the context of our research project, [Aérogramme](https://aerogramme.deuxfleurs.fr),
|
||||
K2V is used to provide metadata and log storage for operations on encrypted e-mail storage.
|
||||
|
||||
Learn more on the specification of K2V [here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/k2v/doc/drafts/k2v-spec.md)
|
||||
Learn more on the specification of K2V [here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/commit/f8be15c37db857e177d543de7be863692628d567/doc/drafts/k2v-spec.md)
|
||||
and on how to enable it in Garage [here](@/documentation/reference-manual/k2v.md).
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ the `k2v` feature flag enabled can be obtained from our download page under
|
|||
with `-k2v` (example: `v0.7.2-k2v`).
|
||||
|
||||
The specification of the K2V API can be found
|
||||
[here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/main/doc/drafts/k2v-spec.md).
|
||||
[here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/commit/f8be15c37db857e177d543de7be863692628d567/doc/drafts/k2v-spec.md).
|
||||
This document also includes a high-level overview of K2V's design.
|
||||
|
||||
The K2V API uses AWSv4 signatures for authentification, same as the S3 API.
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
+++
|
||||
title = "Migrating from 0.3 to 0.4"
|
||||
weight = 20
|
||||
weight = 80
|
||||
+++
|
||||
|
||||
**Migrating from 0.3 to 0.4 is unsupported. This document is only intended to
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
+++
|
||||
title = "Migrating from 0.5 to 0.6"
|
||||
weight = 15
|
||||
weight = 75
|
||||
+++
|
||||
|
||||
**This guide explains how to migrate to 0.6 if you have an existing 0.5 cluster.
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
+++
|
||||
title = "Migrating from 0.6 to 0.7"
|
||||
weight = 14
|
||||
weight = 74
|
||||
+++
|
||||
**This guide explains how to migrate to 0.7 if you have an existing 0.6 cluster.
|
||||
We don't recommend trying to migrate to 0.7 directly from 0.5 or older.**
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
+++
|
||||
title = "Migrating from 0.7 to 0.8"
|
||||
weight = 13
|
||||
weight = 73
|
||||
+++
|
||||
|
||||
**This guide explains how to migrate to 0.8 if you have an existing 0.7 cluster.
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
+++
|
||||
title = "Migrating from 0.8 to 0.9"
|
||||
weight = 12
|
||||
weight = 72
|
||||
+++
|
||||
|
||||
**This guide explains how to migrate to 0.9 if you have an existing 0.8 cluster.
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
+++
|
||||
title = "Migrating from 0.9 to 1.0"
|
||||
weight = 11
|
||||
weight = 71
|
||||
+++
|
||||
|
||||
**This guide explains how to migrate to 1.0 if you have an existing 0.9 cluster.
|
||||
|
|
|
|||
70
doc/book/working-documents/migration-2.md
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
+++
|
||||
title = "Migrating from 1.0 to 2.0"
|
||||
weight = 70
|
||||
+++
|
||||
|
||||
**This guide explains how to migrate to v2.x if you have an existing v1.x.x cluster.
|
||||
We don't recommend trying to migrate to v2.x directly from v0.9.x or older.**
|
||||
|
||||
This migration procedure has been tested on several clusters without issues.
|
||||
However, it is still a *critical procedure* that might cause issues.
|
||||
**Make sure to back up all your data before attempting it!**
|
||||
|
||||
You might also want to read our [general documentation on upgrading Garage](@/documentation/operations/upgrading.md).
|
||||
|
||||
## Changes introduced in v2.0
|
||||
|
||||
The following are **breaking changes** in Garage v2.0 that require your attention when migrating:
|
||||
|
||||
- The administration API has been completely reworked.
|
||||
Some calls to the `/v1/` endpoints will still work but most will not.
|
||||
New endpoints are prefixed by `/v2/`. **You will need to update all your code that makes use of the admin API.**
|
||||
|
||||
- `replication_mode` is no longer a supported configuration parameter,
|
||||
please use `replication_factor` and `consistency_mode` instead.
|
||||
|
||||
## Migration procedure
|
||||
|
||||
The migration to Garage v2.0 can be done with almost no downtime,
|
||||
by restarting all nodes at once in the new version.
|
||||
|
||||
The migration steps are as follows:
|
||||
|
||||
1. Do a `garage repair --all-nodes --yes tables`, check the logs and check that
|
||||
all data seems to be synced correctly between nodes. If you have time, do
|
||||
additional `garage repair` procedures (`blocks`, `versions`, `block_refs`,
|
||||
etc.)
|
||||
|
||||
2. Ensure you have a snapshot of your Garage installation that you can restore
|
||||
to in case the upgrade goes wrong, with one of the following options:
|
||||
|
||||
- You may use the `garage meta snapshot --all` command
|
||||
to make a backup snapshot of the metadata directories of your nodes
|
||||
for backup purposes. Once this command has completed, copy the following
|
||||
files and directories from the `metadata_dir` of all your nodes
|
||||
to somewhere safe: `snapshots`, `cluster_layout`, `data_layout`,
|
||||
`node_key`, `node_key.pub`. (If you have set the `metadata_snapshots_dir`
|
||||
to a different value in your config file, back up that directory instead.)
|
||||
|
||||
- If you are running a filesystem such as ZFS or BTRFS that support
|
||||
snapshotting, you can create a filesystem-level snapshot of the `metadata_dir`
|
||||
of all your nodes to be used as a restoration point if needed.
|
||||
|
||||
- You may also make a back-up manually: turn off each node
|
||||
individually; back up its metadata folder (for instance, use the following
|
||||
command if your metadata directory is `/var/lib/garage/meta`: `cd
|
||||
/var/lib/garage ; tar -acf meta-v1.0.tar.zst meta/`); turn it back on
|
||||
again. This will allow you to take a backup of all nodes without
|
||||
impacting global cluster availability. You can do all nodes of a single
|
||||
zone at once as this does not impact the availability of Garage.
|
||||
|
||||
3. Prepare your updated binaries and configuration files for Garage v2.0.
|
||||
**Remember to update your configuration file to remove `replication_mode` and replace it by `replication_factor`.**
|
||||
|
||||
4. Shut down all v1.0 nodes simultaneously, and restart them all simultaneously
|
||||
in v2.0. Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to
|
||||
achieve this as fast as possible. Garage v2.0 should be in a working state
|
||||
as soon as enough nodes have started.
|
||||
|
||||
5. Monitor your cluster in the following hours to see if it works well under
|
||||
your production load.
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
+++
|
||||
title = "Testing strategy"
|
||||
weight = 30
|
||||
weight = 100
|
||||
+++
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -13,8 +13,12 @@ We will bump the version numbers prefixed to each API endpoint each time the syn
|
|||
or semantics change, meaning that code that relies on these endpoints will break
|
||||
when changes are introduced.
|
||||
|
||||
The Garage administration API was introduced in version 0.7.2, this document
|
||||
does not apply to older versions of Garage.
|
||||
The Garage administration API was introduced in version 0.7.2, and was
|
||||
changed several times.
|
||||
|
||||
**THIS DOCUMENT IS DEPRECATED.** We now have an OpenAPI spec which is automatically generated
|
||||
from Garage's source code and is always up-to-date. See `doc/api/garage-admin-v2.html`.
|
||||
Text in this document is no longer kept in sync with the admin API's actual behavior.
|
||||
|
||||
|
||||
## Access control
|
||||
|
|
@ -52,34 +56,28 @@ Returns an HTTP status 200 if the node is ready to answer user's requests,
|
|||
and an HTTP status 503 (Service Unavailable) if there are some partitions
|
||||
for which a quorum of nodes is not available.
|
||||
A simple textual message is also returned in a body with content-type `text/plain`.
|
||||
See `/v1/health` for an API that also returns JSON output.
|
||||
See `/v2/GetClusterHealth` for an API that also returns JSON output.
|
||||
|
||||
### Other special endpoints
|
||||
|
||||
#### CheckDomain `GET /check?domain=<domain>`
|
||||
|
||||
Checks whether this Garage cluster serves a website for domain `<domain>`.
|
||||
Returns HTTP 200 Ok if yes, or HTTP 4xx if no website is available for this domain.
|
||||
|
||||
### Cluster operations
|
||||
|
||||
#### GetClusterStatus `GET /v1/status`
|
||||
#### GetClusterStatus `GET /v2/GetClusterStatus`
|
||||
|
||||
Returns the cluster's current status in JSON, including:
|
||||
|
||||
- ID of the node being queried and its version of the Garage daemon
|
||||
- Live nodes
|
||||
- Currently configured cluster layout
|
||||
- Staged changes to the cluster layout
|
||||
|
||||
Example response body:
|
||||
|
||||
```json
|
||||
{
|
||||
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||
"garageVersion": "v1.3.0",
|
||||
"garageFeatures": [
|
||||
"k2v",
|
||||
"lmdb",
|
||||
"sqlite",
|
||||
"metrics",
|
||||
"bundled-libs"
|
||||
],
|
||||
"rustVersion": "1.68.0",
|
||||
"dbEngine": "LMDB (using Heed crate)",
|
||||
"layoutVersion": 5,
|
||||
"nodes": [
|
||||
{
|
||||
|
|
@ -169,7 +167,7 @@ Example response body:
|
|||
}
|
||||
```
|
||||
|
||||
#### GetClusterHealth `GET /v1/health`
|
||||
#### GetClusterHealth `GET /v2/GetClusterHealth`
|
||||
|
||||
Returns the cluster's current health in JSON format, with the following variables:
|
||||
|
||||
|
|
@ -202,7 +200,7 @@ Example response body:
|
|||
}
|
||||
```
|
||||
|
||||
#### ConnectClusterNodes `POST /v1/connect`
|
||||
#### ConnectClusterNodes `POST /v2/ConnectClusterNodes`
|
||||
|
||||
Instructs this Garage node to connect to other Garage nodes at specified addresses.
|
||||
|
||||
|
|
@ -232,7 +230,7 @@ Example response:
|
|||
]
|
||||
```
|
||||
|
||||
#### GetClusterLayout `GET /v1/layout`
|
||||
#### GetClusterLayout `GET /v2/GetClusterLayout`
|
||||
|
||||
Returns the cluster's current layout in JSON, including:
|
||||
|
||||
|
|
@ -293,7 +291,7 @@ Example response body:
|
|||
}
|
||||
```
|
||||
|
||||
#### UpdateClusterLayout `POST /v1/layout`
|
||||
#### UpdateClusterLayout `POST /v2/UpdateClusterLayout`
|
||||
|
||||
Send modifications to the cluster layout. These modifications will
|
||||
be included in the staged role changes, visible in subsequent calls
|
||||
|
|
@ -330,7 +328,7 @@ This returns the new cluster layout with the proposed staged changes,
|
|||
as returned by GetClusterLayout.
|
||||
|
||||
|
||||
#### ApplyClusterLayout `POST /v1/layout/apply`
|
||||
#### ApplyClusterLayout `POST /v2/ApplyClusterLayout`
|
||||
|
||||
Applies to the cluster the layout changes currently registered as
|
||||
staged layout changes.
|
||||
|
|
@ -350,23 +348,11 @@ existing layout in the cluster.
|
|||
This returns the message describing all the calculations done to compute the new
|
||||
layout, as well as the description of the layout as returned by GetClusterLayout.
|
||||
|
||||
#### RevertClusterLayout `POST /v1/layout/revert`
|
||||
#### RevertClusterLayout `POST /v2/RevertClusterLayout`
|
||||
|
||||
Clears all of the staged layout changes.
|
||||
|
||||
Request body format:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": 13
|
||||
}
|
||||
```
|
||||
|
||||
Reverting the staged changes is done by incrementing the version number
|
||||
and clearing the contents of the staged change list.
|
||||
Similarly to the CLI, the body must include the incremented
|
||||
version number, which MUST be 1 + the value of the currently
|
||||
existing layout in the cluster.
|
||||
This requests contains an empty body.
|
||||
|
||||
This returns the new cluster layout with all changes reverted,
|
||||
as returned by GetClusterLayout.
|
||||
|
|
@ -374,7 +360,7 @@ as returned by GetClusterLayout.
|
|||
|
||||
### Access key operations
|
||||
|
||||
#### ListKeys `GET /v1/key`
|
||||
#### ListKeys `GET /v2/ListKeys`
|
||||
|
||||
Returns all API access keys in the cluster.
|
||||
|
||||
|
|
@ -393,8 +379,8 @@ Example response:
|
|||
]
|
||||
```
|
||||
|
||||
#### GetKeyInfo `GET /v1/key?id=<acces key id>`
|
||||
#### GetKeyInfo `GET /v1/key?search=<pattern>`
|
||||
#### GetKeyInfo `GET /v2/GetKeyInfo?id=<acces key id>`
|
||||
#### GetKeyInfo `GET /v2/GetKeyInfo?search=<pattern>`
|
||||
|
||||
Returns information about the requested API access key.
|
||||
|
||||
|
|
@ -468,7 +454,7 @@ Example response:
|
|||
}
|
||||
```
|
||||
|
||||
#### CreateKey `POST /v1/key`
|
||||
#### CreateKey `POST /v2/CreateKey`
|
||||
|
||||
Creates a new API access key.
|
||||
|
||||
|
|
@ -483,7 +469,7 @@ Request body format:
|
|||
This returns the key info, including the created secret key,
|
||||
in the same format as the result of GetKeyInfo.
|
||||
|
||||
#### ImportKey `POST /v1/key/import`
|
||||
#### ImportKey `POST /v2/ImportKey`
|
||||
|
||||
Imports an existing API key.
|
||||
This will check that the imported key is in the valid format, i.e.
|
||||
|
|
@ -501,7 +487,7 @@ Request body format:
|
|||
|
||||
This returns the key info in the same format as the result of GetKeyInfo.
|
||||
|
||||
#### UpdateKey `POST /v1/key?id=<acces key id>`
|
||||
#### UpdateKey `POST /v2/UpdateKey?id=<acces key id>`
|
||||
|
||||
Updates information about the specified API access key.
|
||||
|
||||
|
|
@ -523,14 +509,14 @@ The possible flags in `allow` and `deny` are: `createBucket`.
|
|||
|
||||
This returns the key info in the same format as the result of GetKeyInfo.
|
||||
|
||||
#### DeleteKey `DELETE /v1/key?id=<acces key id>`
|
||||
#### DeleteKey `POST /v2/DeleteKey?id=<acces key id>`
|
||||
|
||||
Deletes an API access key.
|
||||
|
||||
|
||||
### Bucket operations
|
||||
|
||||
#### ListBuckets `GET /v1/bucket`
|
||||
#### ListBuckets `GET /v2/ListBuckets`
|
||||
|
||||
Returns all storage buckets in the cluster.
|
||||
|
||||
|
|
@ -572,8 +558,8 @@ Example response:
|
|||
]
|
||||
```
|
||||
|
||||
#### GetBucketInfo `GET /v1/bucket?id=<bucket id>`
|
||||
#### GetBucketInfo `GET /v1/bucket?globalAlias=<alias>`
|
||||
#### GetBucketInfo `GET /v2/GetBucketInfo?id=<bucket id>`
|
||||
#### GetBucketInfo `GET /v2/GetBucketInfo?globalAlias=<alias>`
|
||||
|
||||
Returns information about the requested storage bucket.
|
||||
|
||||
|
|
@ -616,7 +602,7 @@ Example response:
|
|||
}
|
||||
```
|
||||
|
||||
#### CreateBucket `POST /v1/bucket`
|
||||
#### CreateBucket `POST /v2/CreateBucket`
|
||||
|
||||
Creates a new storage bucket.
|
||||
|
||||
|
|
@ -656,7 +642,7 @@ or no alias at all.
|
|||
Technically, you can also specify both `globalAlias` and `localAlias` and that would create
|
||||
two aliases, but I don't see why you would want to do that.
|
||||
|
||||
#### UpdateBucket `PUT /v1/bucket?id=<bucket id>`
|
||||
#### UpdateBucket `POST /v2/UpdateBucket?id=<bucket id>`
|
||||
|
||||
Updates configuration of the given bucket.
|
||||
|
||||
|
|
@ -688,16 +674,38 @@ In `quotas`: new values of `maxSize` and `maxObjects` must both be specified, or
|
|||
to remove the quotas. An absent value will be considered the same as a `null`. It is not possible
|
||||
to change only one of the two quotas.
|
||||
|
||||
#### DeleteBucket `DELETE /v1/bucket?id=<bucket id>`
|
||||
#### DeleteBucket `POST /v2/DeleteBucket?id=<bucket id>`
|
||||
|
||||
Deletes a storage bucket. A bucket cannot be deleted if it is not empty.
|
||||
|
||||
Warning: this will delete all aliases associated with the bucket!
|
||||
|
||||
#### CleanupIncompleteUploads `POST /v2/CleanupIncompleteUploads`
|
||||
|
||||
Cleanup all incomplete uploads in a bucket that are older than a specified number
|
||||
of seconds.
|
||||
|
||||
Request body format:
|
||||
|
||||
```json
|
||||
{
|
||||
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||
"olderThanSecs": 3600
|
||||
}
|
||||
```
|
||||
|
||||
Response format
|
||||
|
||||
```json
|
||||
{
|
||||
"uploadsDeleted": 12
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### Operations on permissions for keys on buckets
|
||||
|
||||
#### BucketAllowKey `POST /v1/bucket/allow`
|
||||
#### AllowBucketKey `POST /v2/AllowBucketKey`
|
||||
|
||||
Allows a key to do read/write/owner operations on a bucket.
|
||||
|
||||
|
|
@ -718,7 +726,7 @@ Request body format:
|
|||
Flags in `permissions` which have the value `true` will be activated.
|
||||
Other flags will remain unchanged.
|
||||
|
||||
#### BucketDenyKey `POST /v1/bucket/deny`
|
||||
#### DenyBucketKey `POST /v2/DenyBucketKey`
|
||||
|
||||
Denies a key from doing read/write/owner operations on a bucket.
|
||||
|
||||
|
|
@ -742,19 +750,35 @@ Other flags will remain unchanged.
|
|||
|
||||
### Operations on bucket aliases
|
||||
|
||||
#### GlobalAliasBucket `PUT /v1/bucket/alias/global?id=<bucket id>&alias=<global alias>`
|
||||
#### AddBucketAlias `POST /v2/AddBucketAlias`
|
||||
|
||||
Empty body. Creates a global alias for a bucket.
|
||||
Creates an alias for a bucket in the namespace of a specific access key.
|
||||
To create a global alias, specify the `globalAlias` field.
|
||||
To create a local alias, specify the `localAlias` and `accessKeyId` fields.
|
||||
|
||||
#### GlobalUnaliasBucket `DELETE /v1/bucket/alias/global?id=<bucket id>&alias=<global alias>`
|
||||
Request body format:
|
||||
|
||||
Removes a global alias for a bucket.
|
||||
```json
|
||||
{
|
||||
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||
"globalAlias": "my-bucket"
|
||||
}
|
||||
```
|
||||
|
||||
#### LocalAliasBucket `PUT /v1/bucket/alias/local?id=<bucket id>&accessKeyId=<access key ID>&alias=<local alias>`
|
||||
or:
|
||||
|
||||
Empty body. Creates a local alias for a bucket in the namespace of a specific access key.
|
||||
```json
|
||||
{
|
||||
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||
"localAlias": "my-bucket"
|
||||
}
|
||||
```
|
||||
|
||||
#### LocalUnaliasBucket `DELETE /v1/bucket/alias/local?id=<bucket id>&accessKeyId<access key ID>&alias=<local alias>`
|
||||
#### RemoveBucketAlias `POST /v2/RemoveBucketAlias`
|
||||
|
||||
Removes a local alias for a bucket in the namespace of a specific access key.
|
||||
Removes an alias for a bucket in the namespace of a specific access key.
|
||||
To remove a global alias, specify the `globalAlias` field.
|
||||
To remove a local alias, specify the `localAlias` and `accessKeyId` fields.
|
||||
|
||||
Request body format: same as AddBucketAlias.
|
||||
|
|
|
|||
17
doc/talks/2025-10-06-josy/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
*
|
||||
|
||||
!*.txt
|
||||
!*.md
|
||||
|
||||
!assets
|
||||
|
||||
!.gitignore
|
||||
!*.svg
|
||||
!*.png
|
||||
!*.jpg
|
||||
!*.tex
|
||||
!Makefile
|
||||
!.gitignore
|
||||
!assets/*.drawio.pdf
|
||||
|
||||
!talk.pdf
|
||||
19
doc/talks/2025-10-06-josy/Makefile
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
ASSETS=../assets/lattice/lattice1.pdf_tex \
|
||||
../assets/lattice/lattice2.pdf_tex \
|
||||
../assets/lattice/lattice3.pdf_tex \
|
||||
../assets/lattice/lattice4.pdf_tex \
|
||||
../assets/lattice/lattice5.pdf_tex \
|
||||
../assets/lattice/lattice6.pdf_tex \
|
||||
../assets/lattice/lattice7.pdf_tex \
|
||||
../assets/lattice/lattice8.pdf_tex \
|
||||
../assets/logos/deuxfleurs.pdf \
|
||||
../assets/timeline-22-24.pdf
|
||||
|
||||
talk.pdf: talk.tex $(ASSETS)
|
||||
pdflatex talk.tex
|
||||
|
||||
%.pdf: %.svg
|
||||
inkscape -D -z --file=$^ --export-pdf=$@
|
||||
|
||||
%.pdf_tex: %.svg
|
||||
inkscape -D -z --file=$^ --export-pdf=$@ --export-latex
|
||||
BIN
doc/talks/2025-10-06-josy/talk.pdf
Normal file
702
doc/talks/2025-10-06-josy/talk.tex
Normal file
|
|
@ -0,0 +1,702 @@
|
|||
\nonstopmode
|
||||
\documentclass[aspectratio=169,xcolor={svgnames}]{beamer}
|
||||
\usepackage[utf8]{inputenc}
|
||||
% \usepackage[frenchb]{babel}
|
||||
\usepackage{amsmath}
|
||||
\usepackage{mathtools}
|
||||
\usepackage{breqn}
|
||||
\usepackage{multirow}
|
||||
\usetheme{boxes}
|
||||
\usepackage{graphicx}
|
||||
\usepackage{import}
|
||||
\usepackage{adjustbox}
|
||||
\usepackage[absolute,overlay]{textpos}
|
||||
%\useoutertheme[footline=authortitle,subsection=false]{miniframes}
|
||||
%\useoutertheme[footline=authorinstitute,subsection=false]{miniframes}
|
||||
\useoutertheme{infolines}
|
||||
\setbeamertemplate{headline}{}
|
||||
|
||||
\beamertemplatenavigationsymbolsempty
|
||||
|
||||
\definecolor{TitleOrange}{RGB}{255,137,0}
|
||||
\setbeamercolor{title}{fg=TitleOrange}
|
||||
\setbeamercolor{frametitle}{fg=TitleOrange}
|
||||
|
||||
\definecolor{ListOrange}{RGB}{255,145,5}
|
||||
\setbeamertemplate{itemize item}{\color{ListOrange}$\blacktriangleright$}
|
||||
|
||||
\definecolor{verygrey}{RGB}{70,70,70}
|
||||
\setbeamercolor{normal text}{fg=verygrey}
|
||||
|
||||
|
||||
\usepackage{tabu}
|
||||
\usepackage{multicol}
|
||||
\usepackage{vwcol}
|
||||
\usepackage{stmaryrd}
|
||||
\usepackage{graphicx}
|
||||
|
||||
\usepackage[normalem]{ulem}
|
||||
|
||||
\AtBeginSection[]{
|
||||
\begin{frame}
|
||||
\vfill
|
||||
\centering
|
||||
\begin{beamercolorbox}[sep=8pt,center,shadow=true,rounded=true]{title}
|
||||
\usebeamerfont{title}\insertsectionhead\par%
|
||||
\end{beamercolorbox}
|
||||
\vfill
|
||||
\end{frame}
|
||||
}
|
||||
|
||||
\title{Garage, an S3 backend as reliable as possible}
|
||||
\author{Garage Authors}
|
||||
\date{JoSy S3, 2025-10-08}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\begin{frame}
|
||||
\centering
|
||||
\includegraphics[width=.3\linewidth]{../../sticker/Garage.png}
|
||||
\vspace{1em}
|
||||
|
||||
{\large\bf Garage, an S3 backend as reliable as possible}
|
||||
\vspace{1em}
|
||||
|
||||
\url{https://garagehq.deuxfleurs.fr/}\\
|
||||
\url{mailto:garagehq@deuxfleurs.fr}\\
|
||||
\texttt{\#garage:deuxfleurs.fr} on Matrix
|
||||
\end{frame}
|
||||
|
||||
|
||||
\section{Meet Garage}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{A non-profit initiative}
|
||||
|
||||
|
||||
\begin{columns}[t]
|
||||
\begin{column}{.2\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.5\linewidth, valign=t]{../assets/logos/deuxfleurs.pdf}
|
||||
\end{column}
|
||||
\begin{column}{.8\textwidth}
|
||||
\textbf{Part of a degrowth initiative}\\
|
||||
Garage has been created at Deuxfleurs where we experiment running Internet services without datacenter on commodity and refurbished hardware.
|
||||
\end{column}
|
||||
|
||||
\end{columns}
|
||||
\vspace{2em}
|
||||
\begin{columns}[t]
|
||||
\begin{column}{.2\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.5\linewidth, valign=t]{../assets/community.png}
|
||||
\end{column}
|
||||
\begin{column}{.8\textwidth}
|
||||
\textbf{Developed by a community}\\
|
||||
{\small Some recent contributors: Arthur C, Charles H, dongdigua, Etienne L, Jonah A, Julien K, Lapineige, MagicRR, Milas B, Niklas M, RockWolf, Schwitzd, trinity-1686a, Xavier S, babykart, Baptiste J, eddster2309, James O'C, Joker9944, Maximilien R, Renjaya RZ, Yureka...}
|
||||
\end{column}
|
||||
|
||||
\end{columns}
|
||||
\vspace{2em}
|
||||
\begin{columns}[t]
|
||||
\begin{column}{.2\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.5\linewidth, valign=t]{../assets/logos/AGPLv3_Logo.png}
|
||||
\end{column}
|
||||
\begin{column}{.8\textwidth}
|
||||
\textbf{Owned by nobody, open-core is impossible, zero VC money}\\
|
||||
AGPL + no Contributor License Agreement = Garage ownership spreads among hundredth of contributors.
|
||||
\end{column}
|
||||
|
||||
\end{columns}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Getting support for Garage}
|
||||
\begin{columns}[t]
|
||||
\begin{column}{.2\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.4\linewidth, valign=t]{../assets/alex.jpg}
|
||||
\end{column}
|
||||
\begin{column}{.4\textwidth}
|
||||
\textbf{Alex Auvolat}\\
|
||||
PhD; co-founder of Deuxfleurs\\
|
||||
Garage maintainer, Freelance
|
||||
\end{column}
|
||||
\begin{column}{.3\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.4\linewidth, valign=t]{../assets/support.png}
|
||||
\end{column}
|
||||
\begin{column}{.1\textwidth}
|
||||
~
|
||||
\end{column}
|
||||
\end{columns}
|
||||
\vspace{2em}
|
||||
\begin{columns}[t]
|
||||
\begin{column}{.2\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.4\linewidth, valign=t]{../assets/quentin.jpg}
|
||||
\end{column}
|
||||
\begin{column}{.4\textwidth}
|
||||
\textbf{Quentin Dufour}\\
|
||||
PhD; co-founder of Deuxfleurs\\
|
||||
Garage contributor, Freelance
|
||||
\end{column}
|
||||
\begin{column}{.4\textwidth}
|
||||
For support requests, write at: \\
|
||||
\url{garagehq@deuxfleurs.fr}
|
||||
\end{column}
|
||||
\end{columns}
|
||||
\vspace{2em}
|
||||
\begin{columns}[t]
|
||||
\begin{column}{.2\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.4\linewidth, valign=t]{../assets/armael.jpg}
|
||||
\end{column}
|
||||
\begin{column}{.4\textwidth}
|
||||
\textbf{Armaël Guéneau}\\
|
||||
PhD; member of Deuxfleurs\\
|
||||
Garage contributor, Freelance
|
||||
\end{column}
|
||||
\begin{column}{.4\textwidth}
|
||||
Eligible: email support, architecture design, specific feature development, etc.
|
||||
\end{column}
|
||||
\end{columns}
|
||||
|
||||
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Our initial goal}
|
||||
|
||||
\centering
|
||||
\Large
|
||||
|
||||
Being a self-sovereign community to be free of our degrowth choice
|
||||
|
||||
$\big\downarrow$
|
||||
|
||||
As web citizens, datacenters are big black boxes. \\
|
||||
We want to leave them to autonoumously manage our servers.
|
||||
|
||||
$\big\downarrow$
|
||||
|
||||
We want reliable services without relying on dedicated hardware or places.
|
||||
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Building a resilient system with cheap stuff}
|
||||
|
||||
\only<1,4-7>{
|
||||
\begin{itemize}
|
||||
\item \textcolor<5->{gray}{Commodity hardware (e.g. old desktop PCs)\\
|
||||
\vspace{.5em}
|
||||
\visible<4->{{\footnotesize (can die at any time)}}}
|
||||
\vspace{1.5em}
|
||||
\item<5-> \textcolor<7->{gray}{Regular Internet (e.g. FTTB, FTTH) and power grid connections\\
|
||||
\vspace{.5em}
|
||||
\visible<6->{{\footnotesize (can be unavailable randomly)}}}
|
||||
\vspace{1.5em}
|
||||
\item<7-> \textbf{Geographical redundancy} (multi-site replication)
|
||||
\end{itemize}
|
||||
}
|
||||
\only<2>{
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/neptune.jpg}
|
||||
\end{center}
|
||||
}
|
||||
\only<3>{
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/atuin.jpg}
|
||||
\end{center}
|
||||
}
|
||||
\only<8>{
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/inframap_jdll2023.pdf}
|
||||
\end{center}
|
||||
}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Object storage: a crucial component}
|
||||
\begin{center}
|
||||
\includegraphics[height=6em]{../assets/logos/Amazon-S3.jpg}
|
||||
\hspace{3em}
|
||||
\visible<2->{\includegraphics[height=5em]{../assets/logos/minio.png}}
|
||||
\hspace{3em}
|
||||
\visible<3>{\includegraphics[height=6em]{../../logo/garage_hires_crop.png}}
|
||||
\end{center}
|
||||
\vspace{1em}
|
||||
S3: a de-facto standard, many compatible applications
|
||||
|
||||
\vspace{1em}
|
||||
\visible<2->{MinIO is self-hostable but not suited for geo-distributed deployments}
|
||||
|
||||
\vspace{1em}
|
||||
\visible<3->{\textbf{Garage is a self-hosted drop-in replacement for the Amazon S3 object store}}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{CRDTs / weak consistency instead of consensus}
|
||||
|
||||
\underline{Internally, Garage uses only CRDTs} (conflict-free replicated data types)
|
||||
|
||||
\vspace{2em}
|
||||
Why not Raft, Paxos, ...? Issues of consensus algorithms:
|
||||
|
||||
\vspace{1em}
|
||||
\begin{itemize}
|
||||
\item<2-> \textbf{Software complexity}
|
||||
\vspace{1em}
|
||||
\item<3-> \textbf{Performance issues:}
|
||||
\vspace{.5em}
|
||||
\begin{itemize}
|
||||
\item<4-> The leader is a \textbf{bottleneck} for all requests\\
|
||||
\vspace{.5em}
|
||||
\item<5-> \textbf{Sensitive to higher latency} between nodes
|
||||
\vspace{.5em}
|
||||
\item<6-> \textbf{Takes time to reconverge} when disrupted (e.g. node going down)
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{The data model of object storage}
|
||||
Object storage is basically a \textbf{key-value store}:
|
||||
\vspace{.5em}
|
||||
|
||||
{\scriptsize
|
||||
\begin{center}
|
||||
\begin{tabular}{|l|p{7cm}|}
|
||||
\hline
|
||||
\textbf{Key: file path + name} & \textbf{Value: file data + metadata} \\
|
||||
\hline
|
||||
\hline
|
||||
\texttt{index.html} &
|
||||
\texttt{Content-Type: text/html; charset=utf-8} \newline
|
||||
\texttt{Content-Length: 24929} \newline
|
||||
\texttt{<binary blob>} \\
|
||||
\hline
|
||||
\texttt{img/logo.svg} &
|
||||
\texttt{Content-Type: text/svg+xml} \newline
|
||||
\texttt{Content-Length: 13429} \newline
|
||||
\texttt{<binary blob>} \\
|
||||
\hline
|
||||
\texttt{download/index.html} &
|
||||
\texttt{Content-Type: text/html; charset=utf-8} \newline
|
||||
\texttt{Content-Length: 26563} \newline
|
||||
\texttt{<binary blob>} \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{center}
|
||||
}
|
||||
|
||||
\vspace{1em}
|
||||
\begin{itemize}
|
||||
\item<2> Maps well to CRDT data types
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Performance gains in practice}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/perf/endpoint_latency_0.7_0.8_minio.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
% ======================================== OPERATING
|
||||
% ======================================== OPERATING
|
||||
% ======================================== OPERATING
|
||||
|
||||
|
||||
\section{Production clusters}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Deployment kinds}
|
||||
|
||||
\includegraphics[width=.9\linewidth]{../assets/cluster_kind.png}
|
||||
\vspace{1em}
|
||||
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{How big they are?}
|
||||
|
||||
\includegraphics[width=.9\linewidth]{../assets/cluster_size.png}
|
||||
\vspace{1em}
|
||||
|
||||
\textit{"Petabyte storage setup for a video site. Nginx as CDN in-front using garage-s3-website feature. Each storage node has ~64TB storage with raid10, no replication within garage. 25gbit nic. haproxy to loadbalance across 5 nodes. mostly reads with very few writes."}
|
||||
|
||||
\vspace{1em}
|
||||
\textit{"We currently manage 7 Garage nodes, 28TB total storage, 6M blocks for 3M objects and 4TB of object data. We have been running Garage in production for 2.5 years."}
|
||||
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Operating Garage}
|
||||
\begin{center}
|
||||
\only<1-2>{
|
||||
\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_status_0.10.png}
|
||||
\\\vspace{1em}
|
||||
\visible<2>{\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_status_unhealthy_0.10.png}}
|
||||
}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Garage's architecture}
|
||||
\begin{center}
|
||||
\only<1>{\includegraphics[width=.45\linewidth]{../assets/garage.drawio.pdf}}%
|
||||
\only<2>{\includegraphics[width=.6\linewidth]{../assets/garage_sync.drawio.pdf}}%
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Digging deeper}
|
||||
\begin{center}
|
||||
\only<1>{\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_stats_0.10.png}}
|
||||
\only<2>{\includegraphics[width=.5\linewidth]{../assets/screenshots/garage_worker_list_0.10.png}}
|
||||
\only<3>{\includegraphics[width=.6\linewidth]{../assets/screenshots/garage_worker_param_0.10.png}}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Potential limitations and bottlenecks}
|
||||
\begin{itemize}
|
||||
\item Global:
|
||||
\begin{itemize}
|
||||
\item Max. $\sim$100 nodes per cluster (excluding gateways)
|
||||
\end{itemize}
|
||||
\vspace{1em}
|
||||
\item Metadata:
|
||||
\begin{itemize}
|
||||
\item One big bucket = bottleneck, object list on 3 nodes only
|
||||
\end{itemize}
|
||||
\vspace{1em}
|
||||
\item Block manager:
|
||||
\begin{itemize}
|
||||
\item Lots of small files on disk
|
||||
\item Processing the resync queue can be slow
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Deployment advice for very large clusters}
|
||||
\begin{itemize}
|
||||
\item Metadata storage:
|
||||
\begin{itemize}
|
||||
\item ZFS mirror (x2) on fast NVMe
|
||||
\item Use LMDB storage engine
|
||||
\end{itemize}
|
||||
\vspace{.5em}
|
||||
\item Data block storage:
|
||||
\begin{itemize}
|
||||
\item Use Garage's native multi-HDD support
|
||||
\item XFS on individual drives
|
||||
\item Increase block size (1MB $\to$ 10MB, requires more RAM and good networking)
|
||||
\item Tune \texttt{resync-tranquility} and \texttt{resync-worker-count} dynamically
|
||||
\end{itemize}
|
||||
\vspace{.5em}
|
||||
\item Other :
|
||||
\begin{itemize}
|
||||
\item Split data over several buckets
|
||||
\item Use less than 100 storage nodes
|
||||
\item Use gateway nodes
|
||||
\end{itemize}
|
||||
\vspace{.5em}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Focus on Deuxfleurs}
|
||||
|
||||
Host institutional websites, partnership with a web agency.
|
||||
Matrix media backend.
|
||||
|
||||
Plan to use it as an email backend for an internally developed email server.
|
||||
|
||||
\end{frame}
|
||||
|
||||
|
||||
% ======================================== TIMELINE
|
||||
% ======================================== TIMELINE
|
||||
% ======================================== TIMELINE
|
||||
|
||||
\section{Recent developments}
|
||||
|
||||
% ====================== v0.7.0 ===============================
|
||||
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/tl.drawio.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{April 2022 - Garage v0.7.0}
|
||||
Focus on \underline{observability and ecosystem integration}
|
||||
\vspace{2em}
|
||||
\begin{itemize}
|
||||
\item \textbf{Monitoring:} metrics and traces, using OpenTelemetry
|
||||
\vspace{1em}
|
||||
\item Replication modes with 1 or 2 copies / weaker consistency
|
||||
\vspace{1em}
|
||||
\item Kubernetes integration for node discovery
|
||||
\vspace{1em}
|
||||
\item Admin API (v0.7.2)
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Metrics (Prometheus + Grafana)}
|
||||
\begin{center}
|
||||
\includegraphics[width=.9\linewidth]{../assets/screenshots/grafana_dashboard.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Traces (Jaeger)}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/screenshots/jaeger_listobjects.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
% ====================== v0.8.0 ===============================
|
||||
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/tl.drawio.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{November 2022 - Garage v0.8.0}
|
||||
Focus on \underline{performance}
|
||||
\vspace{2em}
|
||||
\begin{itemize}
|
||||
\item \textbf{Alternative metadata DB engines} (LMDB, Sqlite)
|
||||
\vspace{1em}
|
||||
\item \textbf{Performance improvements:} block streaming, various optimizations...
|
||||
\vspace{1em}
|
||||
\item Bucket quotas (max size, max \#objects)
|
||||
\vspace{1em}
|
||||
\item Quality of life improvements, observability, etc.
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{About metadata DB engines}
|
||||
\textbf{Issues with Sled:}
|
||||
\vspace{1em}
|
||||
\begin{itemize}
|
||||
\item Huge files on disk
|
||||
\vspace{.5em}
|
||||
\item Unpredictable performance, especially on HDD
|
||||
\vspace{.5em}
|
||||
\item API limitations
|
||||
\vspace{.5em}
|
||||
\item Not actively maintained
|
||||
\end{itemize}
|
||||
|
||||
\vspace{2em}
|
||||
\textbf{LMDB:} very stable, good performance, file size is reasonable\\
|
||||
\textbf{Sqlite} also available as a second choice
|
||||
|
||||
\vspace{1em}
|
||||
Sled will be removed in Garage v1.0
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{DB engine performance comparison}
|
||||
\begin{center}
|
||||
\includegraphics[width=.6\linewidth]{../assets/perf/db_engine.png}
|
||||
\end{center}
|
||||
NB: Sqlite was slow due to synchronous mode, now configurable
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Block streaming}
|
||||
\begin{center}
|
||||
\only<1>{\includegraphics[width=.8\linewidth]{../assets/schema-streaming-1.png}}
|
||||
\only<2>{\includegraphics[width=.8\linewidth]{../assets/schema-streaming-2.png}}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{TTFB benchmark}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/perf/ttfb.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Throughput benchmark}
|
||||
\begin{center}
|
||||
\includegraphics[width=.7\linewidth]{../assets/perf/io-0.7-0.8-minio.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
% ====================== v0.9.0 ===============================
|
||||
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/tl.drawio.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{October 2023 - Garage v0.9.0}
|
||||
Focus on \underline{streamlining \& usability}
|
||||
\vspace{2em}
|
||||
\begin{itemize}
|
||||
\item Support multiple HDDs per node
|
||||
\vspace{1em}
|
||||
\item S3 compatibility:
|
||||
\vspace{1em}
|
||||
\begin{itemize}
|
||||
\item support basic lifecycle configurations
|
||||
\vspace{.5em}
|
||||
\item allow for multipart upload part retries
|
||||
\end{itemize}
|
||||
\vspace{1em}
|
||||
\item LMDB by default, deprecation of Sled
|
||||
\vspace{1em}
|
||||
\item New layout computation algorithm
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Layout computation}
|
||||
\begin{overprint}
|
||||
\onslide<1>
|
||||
\begin{center}
|
||||
\includegraphics[width=\linewidth, trim=0 0 0 -4cm]{../assets/screenshots/garage_status_0.9_prod_zonehl.png}
|
||||
\end{center}
|
||||
\onslide<2>
|
||||
\begin{center}
|
||||
\includegraphics[width=.7\linewidth]{../assets/map.png}
|
||||
\end{center}
|
||||
\end{overprint}
|
||||
\vspace{1em}
|
||||
Garage stores replicas on different zones when possible
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{What a "layout" is}
|
||||
\textbf{A layout is a precomputed index table:}
|
||||
\vspace{1em}
|
||||
|
||||
{\footnotesize
|
||||
\begin{center}
|
||||
\begin{tabular}{|l|l|l|l|}
|
||||
\hline
|
||||
\textbf{Partition} & \textbf{Node 1} & \textbf{Node 2} & \textbf{Node 3} \\
|
||||
\hline
|
||||
\hline
|
||||
Partition 0 & df-ymk (bespin) & Abricot (scorpio) & Courgette (neptune) \\
|
||||
\hline
|
||||
Partition 1 & Ananas (scorpio) & Courgette (neptune) & df-ykl (bespin) \\
|
||||
\hline
|
||||
Partition 2 & df-ymf (bespin) & Celeri (neptune) & Abricot (scorpio) \\
|
||||
\hline
|
||||
\hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ \\
|
||||
\hline
|
||||
Partition 255 & Concombre (neptune) & df-ykl (bespin) & Abricot (scorpio) \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{center}
|
||||
}
|
||||
|
||||
\vspace{2em}
|
||||
\visible<2->{
|
||||
The index table is built centrally using an optimal algorithm,\\
|
||||
then propagated to all nodes
|
||||
}
|
||||
|
||||
\vspace{1em}
|
||||
\visible<3->{
|
||||
\footnotesize
|
||||
Oulamara, M., \& Auvolat, A. (2023). \emph{An algorithm for geo-distributed and redundant storage in Garage}.\\ arXiv preprint arXiv:2302.13798.
|
||||
}
|
||||
\end{frame}
|
||||
|
||||
|
||||
|
||||
% ====================== v1.0.0 ===============================
|
||||
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/tl.drawio.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{April 2024 - Garage v1.0.0}
|
||||
Focus on \underline{consistency, security \& stability}
|
||||
\vspace{2em}
|
||||
\begin{itemize}
|
||||
\item Fix consistency issues when reshuffling data (Jepsen testing)
|
||||
\vspace{1em}
|
||||
\item \textbf{Security audit} by Radically Open Security
|
||||
\vspace{1em}
|
||||
\item Misc. S3 features (SSE-C, checksums, ...) and compatibility fixes
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
% ====================== v2.0.0 ===============================
|
||||
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/tl.drawio.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Garage v2.0.0}
|
||||
Focus on \underline{}
|
||||
\vspace{2em}
|
||||
\begin{itemize}
|
||||
\item TODO
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Currently funding...}
|
||||
|
||||
\textit{...}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{We run community surveys}
|
||||
\begin{center}
|
||||
\includegraphics[width=.6\linewidth]{../assets/survey_requested_features.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
% ======================================== END
|
||||
% ======================================== END
|
||||
% ======================================== END
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Where to find us}
|
||||
\begin{center}
|
||||
\includegraphics[width=.25\linewidth]{../../logo/garage_hires.png}\\
|
||||
\vspace{-1em}
|
||||
\url{https://garagehq.deuxfleurs.fr/}\\
|
||||
\url{mailto:garagehq@deuxfleurs.fr}\\
|
||||
\texttt{\#garage:deuxfleurs.fr} on Matrix
|
||||
|
||||
\vspace{1.5em}
|
||||
\includegraphics[width=.06\linewidth]{../assets/logos/rust_logo.png}
|
||||
\includegraphics[width=.13\linewidth]{../assets/logos/AGPLv3_Logo.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\end{document}
|
||||
|
||||
%% vim: set ts=4 sw=4 tw=0 noet spelllang=en :
|
||||
BIN
doc/talks/assets/armael.jpg
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
doc/talks/assets/cluster_kind.png
Normal file
|
After Width: | Height: | Size: 50 KiB |
BIN
doc/talks/assets/cluster_size.png
Normal file
|
After Width: | Height: | Size: 24 KiB |
BIN
doc/talks/assets/community.png
Normal file
|
After Width: | Height: | Size: 6.1 KiB |
BIN
doc/talks/assets/quentin.jpg
Normal file
|
After Width: | Height: | Size: 123 KiB |
BIN
doc/talks/assets/support.png
Normal file
|
After Width: | Height: | Size: 7.9 KiB |
BIN
doc/talks/assets/tl.drawio.png
Normal file
|
After Width: | Height: | Size: 183 KiB |
7
flake.lock
generated
|
|
@ -12,16 +12,17 @@
|
|||
"original": {
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "6fe74265bbb6d016d663b1091f015e2976c4a527",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"locked": {
|
||||
"lastModified": 1717312683,
|
||||
"narHash": "sha256-FrlieJH50AuvagamEvWMIE6D2OAnERuDboFDYAED/dE=",
|
||||
"lastModified": 1761640442,
|
||||
"narHash": "sha256-AtrEP6Jmdvrqiv4x2xa5mrtaIp3OEe8uBYCDZDS+hu8=",
|
||||
"owner": "nix-community",
|
||||
"repo": "flake-compat",
|
||||
"rev": "38fd3954cf65ce6faf3d0d45cd26059e059f07ea",
|
||||
"rev": "4a56054d8ffc173222d09dad23adf4ba946c8884",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
|
|||
|
|
@ -11,7 +11,8 @@
|
|||
"github:oxalica/rust-overlay/ab726555a9a72e6dc80649809147823a813fa95b";
|
||||
inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
inputs.crane.url = "github:ipetkov/crane";
|
||||
# Crane as of 2025-01-24
|
||||
inputs.crane.url = "github:ipetkov/crane/6fe74265bbb6d016d663b1091f015e2976c4a527";
|
||||
|
||||
inputs.flake-compat.url = "github:nix-community/flake-compat";
|
||||
inputs.flake-utils.url = "github:numtide/flake-utils";
|
||||
|
|
@ -89,6 +90,9 @@
|
|||
cargo-outdated
|
||||
cargo-machete
|
||||
nixpkgs-fmt
|
||||
openssl
|
||||
socat
|
||||
killall
|
||||
];
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -167,7 +167,7 @@ let
|
|||
</ul></p>
|
||||
<p> Sources:
|
||||
<ul>
|
||||
<li><a href="https://git.deuxfleurs.fr/Deuxfleurs/garage/src/${r.type}/${x.version}">gitea</a></li>
|
||||
<li><a href="https://git.deuxfleurs.fr/Deuxfleurs/garage/src/${r.type}/${x.version}">Forgejo</a></li>
|
||||
<li><a href="https://git.deuxfleurs.fr/Deuxfleurs/garage/archive/${x.version}.zip">.zip</a></li>
|
||||
<li><a href="https://git.deuxfleurs.fr/Deuxfleurs/garage/archive/${x.version}.tar.gz">.tar.gz</a></li>
|
||||
</ul></p>
|
||||
|
|
|
|||
|
|
@ -17,13 +17,19 @@ else
|
|||
fi
|
||||
|
||||
$GARAGE_BIN -c /tmp/config.1.toml bucket create eprouvette
|
||||
if [ "$GARAGE_08" = "1" ]; then
|
||||
if [ "$GARAGE_OLDVER" = "v08" ]; then
|
||||
KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key new --name opérateur)
|
||||
else
|
||||
KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key create opérateur)
|
||||
fi
|
||||
ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
|
||||
SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'`
|
||||
elif [ "$GARAGE_OLDVER" = "v1" ]; then
|
||||
KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key create opérateur)
|
||||
ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
|
||||
SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'`
|
||||
else
|
||||
KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml json-api CreateKey '{"name":"opérateur"}')
|
||||
ACCESS_KEY=`echo $KEY_INFO|jq -r .accessKeyId`
|
||||
SECRET_KEY=`echo $KEY_INFO|jq -r .secretAccessKey`
|
||||
fi
|
||||
$GARAGE_BIN -c /tmp/config.1.toml bucket allow eprouvette --read --write --owner --key $ACCESS_KEY
|
||||
echo "$ACCESS_KEY $SECRET_KEY" > /tmp/garage.s3
|
||||
|
||||
|
|
|
|||
|
|
@ -30,6 +30,12 @@ for count in $(seq 1 3); do
|
|||
CONF_PATH="/tmp/config.$count.toml"
|
||||
LABEL="\e[${FANCYCOLORS[$count]}[$count]\e[49m"
|
||||
|
||||
if [ "$GARAGE_OLDVER" == "v08" ]; then
|
||||
REPLICATION_MODE="replication_mode = \"3\""
|
||||
else
|
||||
REPLICATION_MODE="replication_factor = 3"
|
||||
fi
|
||||
|
||||
cat > $CONF_PATH <<EOF
|
||||
block_size = 1048576 # objects are split in blocks of maximum this number of bytes
|
||||
metadata_dir = "/tmp/garage-meta-$count"
|
||||
|
|
@ -38,7 +44,7 @@ data_dir = "/tmp/garage-data-$count"
|
|||
rpc_bind_addr = "0.0.0.0:$((3900+$count))" # the port other Garage nodes will use to talk to this node
|
||||
rpc_public_addr = "127.0.0.1:$((3900+$count))"
|
||||
bootstrap_peers = []
|
||||
replication_mode = "3"
|
||||
$REPLICATION_MODE
|
||||
rpc_secret = "$NETWORK_SECRET"
|
||||
|
||||
[s3_api]
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ until $GARAGE_BIN -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do
|
|||
sleep 1
|
||||
done
|
||||
|
||||
if [ "$GARAGE_08" = "1" ]; then
|
||||
if [ "$GARAGE_OLDVER" = "v08" ]; then
|
||||
$GARAGE_BIN -c /tmp/config.1.toml status \
|
||||
| grep 'NO ROLE' \
|
||||
| grep -Po '^[0-9a-f]+' \
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
|
||||
export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
|
||||
export AWS_DEFAULT_REGION='garage'
|
||||
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
|
||||
# FUTUREWORK: set AWS_ENDPOINT_URL instead, once nixpkgs bumps awscli to >=2.13.0.
|
||||
function aws { command aws --endpoint-url http://127.0.0.1:3911 $@ ; }
|
||||
|
||||
|
|
|
|||
|
|
@ -2,8 +2,8 @@ apiVersion: v2
|
|||
name: garage
|
||||
description: S3-compatible object store for small self-hosted geo-distributed deployments
|
||||
type: application
|
||||
version: 0.7.2
|
||||
appVersion: "v1.3.0"
|
||||
version: 0.9.2
|
||||
appVersion: "v2.2.0"
|
||||
home: https://garagehq.deuxfleurs.fr/
|
||||
icon: https://garagehq.deuxfleurs.fr/images/garage-logo.svg
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# garage
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
S3-compatible object store for small self-hosted geo-distributed deployments
|
||||
|
||||
|
|
@ -15,6 +15,7 @@ S3-compatible object store for small self-hosted geo-distributed deployments
|
|||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| affinity | object | `{}` | |
|
||||
| commonLabels | object | `{}` | Extra labels for all resources |
|
||||
| deployment.kind | string | `"StatefulSet"` | Switchable to DaemonSet |
|
||||
| deployment.podManagementPolicy | string | `"OrderedReady"` | If using statefulset, allow Parallel or OrderedReady (default) |
|
||||
| deployment.replicaCount | int | `3` | Number of StatefulSet replicas/garage nodes to start |
|
||||
|
|
@ -22,15 +23,16 @@ S3-compatible object store for small self-hosted geo-distributed deployments
|
|||
| extraVolumeMounts | object | `{}` | |
|
||||
| extraVolumes | object | `{}` | |
|
||||
| fullnameOverride | string | `""` | |
|
||||
| garage.blockSize | string | `"1048576"` | Defaults is 1MB An increase can result in better performance in certain scenarios https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size |
|
||||
| garage.blockSize | string | `"1048576"` | Defaults is 1MB An increase can result in better performance in certain scenarios https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block_size |
|
||||
| garage.bootstrapPeers | list | `[]` | This is not required if you use the integrated kubernetes discovery |
|
||||
| garage.compressionLevel | string | `"1"` | zstd compression level of stored blocks https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level |
|
||||
| garage.dbEngine | string | `"lmdb"` | Can be changed for better performance on certain systems https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0 |
|
||||
| garage.compressionLevel | string | `"1"` | zstd compression level of stored blocks https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression_level |
|
||||
| garage.dbEngine | string | `"lmdb"` | Can be changed for better performance on certain systems https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db_engine |
|
||||
| garage.existingConfigMap | string | `""` | if not empty string, allow using an existing ConfigMap for the garage.toml, if set, ignores garage.toml |
|
||||
| garage.garageTomlString | string | `""` | String Template for the garage configuration if set, ignores above values. Values can be templated, see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/ |
|
||||
| garage.kubernetesSkipCrd | bool | `false` | Set to true if you want to use k8s discovery but install the CRDs manually outside of the helm chart, for example if you operate at namespace level without cluster ressources |
|
||||
| garage.replicationFactor | string | `"3"` | Default to 3 replicas, see the replication_factor section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication_factor |
|
||||
| garage.consistencyMode | string | `"consistent"` | Default to read-after-write consistency, see the consistency_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#consistency_mode |
|
||||
| garage.metadataAutoSnapshotInterval | string | `""` | If this value is set, Garage will automatically take a snapshot of the metadata DB file at a regular interval and save it in the metadata directory. https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#metadata_auto_snapshot_interval |
|
||||
| garage.replicationMode | string | `"3"` | Default to 3 replicas, see the replication_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode |
|
||||
| garage.rpcBindAddr | string | `"[::]:3901"` | |
|
||||
| garage.rpcSecret | string | `""` | If not given, a random secret will be generated and stored in a Secret object |
|
||||
| garage.s3.api.region | string | `"garage"` | |
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ If release name contains chart name it will be used as a full name.
|
|||
Create the name of the rpc secret
|
||||
*/}}
|
||||
{{- define "garage.rpcSecretName" -}}
|
||||
{{- printf "%s-rpc-secret" (include "garage.fullname" .) -}}
|
||||
{{- .Values.garage.existingRpcSecret | default (printf "%s-rpc-secret" (include "garage.fullname" .)) -}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
|
|
@ -47,6 +47,9 @@ helm.sh/chart: {{ include "garage.chart" . }}
|
|||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 0 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
|
|
|
|||
|
|
@ -15,7 +15,8 @@ data:
|
|||
|
||||
block_size = {{ .Values.garage.blockSize }}
|
||||
|
||||
replication_mode = "{{ .Values.garage.replicationMode }}"
|
||||
replication_factor = {{ .Values.garage.replicationFactor }}
|
||||
consistency_mode = "{{ .Values.garage.consistencyMode }}"
|
||||
|
||||
compression_level = {{ .Values.garage.compressionLevel }}
|
||||
|
||||
|
|
@ -27,7 +28,15 @@ data:
|
|||
# rpc_secret will be populated by the init container from a k8s secret object
|
||||
rpc_secret = "__RPC_SECRET_REPLACE__"
|
||||
|
||||
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
|
||||
bootstrap_peers = [
|
||||
{{- range $index, $peer := .Values.garage.bootstrapPeers }}
|
||||
{{- if $index}}, {{ end }}{{ $peer | quote }}
|
||||
{{ end }}
|
||||
]
|
||||
|
||||
{{- if .Values.garage.additionalTopLevelConfig }}
|
||||
{{ .Values.garage.additionalTopLevelConfig | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
[kubernetes_discovery]
|
||||
namespace = "{{ .Release.Namespace }}"
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
{{- if not .Values.garage.existingRpcSecret }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
|
|
@ -12,3 +13,4 @@ data:
|
|||
{{- $prevRpcSecret := $prevSecretData.rpcSecret | default "" | b64dec }}
|
||||
{{/* Priority is: 1. from values, 2. previous value, 3. generate random */}}
|
||||
rpcSecret: {{ .Values.garage.rpcSecret | default $prevRpcSecret | default (include "jupyterhub.randHex" 64) | b64enc | quote }}
|
||||
{{- end }}
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ spec:
|
|||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "garage.selectorLabels" . | nindent 8 }}
|
||||
{{- include "garage.labels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
|
|
|
|||
|
|
@ -2,23 +2,32 @@
|
|||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# -- Additional labels to add to all resources created by this chart
|
||||
commonLabels: {}
|
||||
# app.kubernetes.io/part-of: storage
|
||||
# team: platform
|
||||
|
||||
# Garage configuration. These values go to garage.toml
|
||||
garage:
|
||||
# -- Can be changed for better performance on certain systems
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db_engine
|
||||
dbEngine: "lmdb"
|
||||
|
||||
# -- Defaults is 1MB
|
||||
# An increase can result in better performance in certain scenarios
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block_size
|
||||
blockSize: "1048576"
|
||||
|
||||
# -- Default to 3 replicas, see the replication_mode section at
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode
|
||||
replicationMode: "3"
|
||||
# -- Default to 3 replicas, see the replication_factor section at
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication_factor
|
||||
replicationFactor: "3"
|
||||
|
||||
# -- By default, enable read-after-write consistency guarantees, see the consistency_mode section at
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#consistency_mode
|
||||
consistencyMode: "consistent"
|
||||
|
||||
# -- zstd compression level of stored blocks
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression_level
|
||||
compressionLevel: "1"
|
||||
|
||||
# -- If this value is set, Garage will automatically take a snapshot of the metadata DB file at a regular interval and save it in the metadata directory.
|
||||
|
|
@ -28,6 +37,10 @@ garage:
|
|||
rpcBindAddr: "[::]:3901"
|
||||
# -- If not given, a random secret will be generated and stored in a Secret object
|
||||
rpcSecret: ""
|
||||
# -- If you want to provide an rpcSecret within an existing k8s secret,
|
||||
# specify the secret name here, and store the value under the secret key `rpcSecret`
|
||||
# the default secret will not be created
|
||||
existingRpcSecret: ""
|
||||
# -- This is not required if you use the integrated kubernetes discovery
|
||||
bootstrapPeers: []
|
||||
# -- Set to true if you want to use k8s discovery but install the CRDs manually outside
|
||||
|
|
@ -41,6 +54,12 @@ garage:
|
|||
rootDomain: ".web.garage.tld"
|
||||
index: "index.html"
|
||||
|
||||
# -- Additional configuration to append to garage.toml. Use a multi-line string for custom config.
|
||||
# Example:
|
||||
# additionalTopLevelConfig: |-
|
||||
# data_fsync = true
|
||||
additionalTopLevelConfig: ""
|
||||
|
||||
# -- if not empty string, allow using an existing ConfigMap for the garage.toml,
|
||||
# if set, ignores garage.toml
|
||||
existingConfigMap: ""
|
||||
|
|
@ -108,6 +127,7 @@ podSecurityContext:
|
|||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
fsGroup: 1000
|
||||
fsGroupChangePolicy: "OnRootMismatch"
|
||||
runAsNonRoot: true
|
||||
|
||||
securityContext:
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@
|
|||
"rpc_bind_addr = \"0.0.0.0:3901\"\n"
|
||||
"rpc_public_addr = \"" node ":3901\"\n"
|
||||
"db_engine = \"lmdb\"\n"
|
||||
"replication_mode = \"3\"\n"
|
||||
"replication_factor = 3\n"
|
||||
"data_dir = \"" data-dir "\"\n"
|
||||
"metadata_dir = \"" meta-dir "\"\n"
|
||||
"[s3_api]\n"
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ data:
|
|||
metadata_dir = "/tmp/meta"
|
||||
data_dir = "/tmp/data"
|
||||
|
||||
replication_mode = "3"
|
||||
replication_factor = 3
|
||||
|
||||
rpc_bind_addr = "[::]:3901"
|
||||
rpc_secret = "1799bccfd7411eddcf9ebd316bc1f5287ad12a68094e1c6ac6abde7e6feae1ec"
|
||||
|
|
|
|||
|
|
@ -694,32 +694,7 @@
|
|||
]
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"__systemRef": "hideSeriesFrom",
|
||||
"matcher": {
|
||||
"id": "byNames",
|
||||
"options": {
|
||||
"mode": "exclude",
|
||||
"names": [
|
||||
"10.83.2.3:3903"
|
||||
],
|
||||
"prefix": "All except:",
|
||||
"readOnly": true
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
|
|
|
|||
|
|
@ -112,6 +112,23 @@ if [ -z "$SKIP_S3CMD" ]; then
|
|||
done
|
||||
fi
|
||||
|
||||
# BOTO3
|
||||
if [ -z "$SKIP_BOTO3" ]; then
|
||||
echo "🛠️ Testing with boto3 for STREAMING-UNSIGNED-PAYLOAD-TRAILER"
|
||||
source ${SCRIPT_FOLDER}/dev-env-aws.sh
|
||||
AWS_ENDPOINT_URL=https://localhost:4443 python <<EOF
|
||||
import boto3
|
||||
client = boto3.client('s3', verify=False)
|
||||
print("Put&delete hello world object")
|
||||
client.put_object(Body=b'hello world', Bucket='eprouvette', Key='test.s3.txt')
|
||||
client.delete_object(Bucket='eprouvette', Key='test.s3.txt')
|
||||
print("Put&delete big object")
|
||||
client.upload_file("/tmp/garage.3.rnd", 'eprouvette', 'garage.3.rnd')
|
||||
client.delete_object(Bucket='eprouvette', Key='garage.3.rnd')
|
||||
print("OK!")
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Minio Client
|
||||
if [ -z "$SKIP_MC" ]; then
|
||||
echo "🛠️ Testing with mc (minio client)"
|
||||
|
|
|
|||
|
|
@ -24,9 +24,17 @@ echo "============= insert data into old version cluster ================="
|
|||
export GARAGE_BIN=/tmp/old_garage
|
||||
if echo $OLD_VERSION | grep 'v0\.8\.'; then
|
||||
echo "Detected Garage v0.8.x"
|
||||
export GARAGE_08=1
|
||||
export GARAGE_OLDVER=v08
|
||||
elif (echo $OLD_VERSION | grep 'v0\.9\.') || (echo $OLD_VERSION | grep 'v1\.'); then
|
||||
echo "Detected Garage v0.9.x / v1.x"
|
||||
export GARAGE_OLDVER=v1
|
||||
fi
|
||||
|
||||
if echo $OLD_VERSION | grep 'v1\.'; then
|
||||
DO_SSEC_TEST=1
|
||||
fi
|
||||
SSEC_KEY="u8zCfnEyt5Imo/krN+sxA1DQXxLWtPJavU6T6gOVj1Y="
|
||||
|
||||
echo "⏳ Setup cluster using old version"
|
||||
$GARAGE_BIN --version
|
||||
${SCRIPT_FOLDER}/dev-clean.sh
|
||||
|
|
@ -37,7 +45,23 @@ ${SCRIPT_FOLDER}/dev-bucket.sh
|
|||
|
||||
echo "🛠️ Inserting data in old cluster"
|
||||
source ${SCRIPT_FOLDER}/dev-env-rclone.sh
|
||||
rclone copy "${SCRIPT_FOLDER}/../.git/" garage:eprouvette/test_dotgit --stats=1s --stats-log-level=NOTICE --stats-one-line
|
||||
rclone copy "${SCRIPT_FOLDER}/../.git/" garage:eprouvette/test_dotgit \
|
||||
--stats=1s --stats-log-level=NOTICE --stats-one-line
|
||||
|
||||
if [ "$DO_SSEC_TEST" = "1" ]; then
|
||||
# upload small file (should be single part)
|
||||
rclone copy "${SCRIPT_FOLDER}/test-upgrade.sh" garage:eprouvette/test-ssec \
|
||||
--s3-sse-customer-algorithm AES256 \
|
||||
--s3-sse-customer-key-base64 "$SSEC_KEY" \
|
||||
--stats=1s --stats-log-level=NOTICE --stats-one-line
|
||||
# do a multipart upload
|
||||
dd if=/dev/urandom of=/tmp/randfile-for-upgrade bs=5M count=5
|
||||
rclone copy "/tmp/randfile-for-upgrade" garage:eprouvette/test-ssec \
|
||||
--s3-chunk-size 5M \
|
||||
--s3-sse-customer-algorithm AES256 \
|
||||
--s3-sse-customer-key-base64 "$SSEC_KEY" \
|
||||
--stats=1s --stats-log-level=NOTICE --stats-one-line
|
||||
fi
|
||||
|
||||
echo "🏁 Stopping old cluster"
|
||||
killall -INT old_garage
|
||||
|
|
@ -47,7 +71,7 @@ killall -9 old_garage || true
|
|||
echo "🏁 Removing old garage version"
|
||||
rm -rv $GARAGE_BIN
|
||||
export -n GARAGE_BIN
|
||||
export -n GARAGE_08
|
||||
export -n GARAGE_OLDVER
|
||||
|
||||
echo "================ read data from new cluster ==================="
|
||||
|
||||
|
|
@ -60,7 +84,8 @@ ${SCRIPT_FOLDER}/dev-cluster.sh >> /tmp/garage.log 2>&1 &
|
|||
sleep 3
|
||||
|
||||
echo "🛠️ Retrieving data from old cluster"
|
||||
rclone copy garage:eprouvette/test_dotgit /tmp/test_dotgit --stats=1s --stats-log-level=NOTICE --stats-one-line --fast-list
|
||||
rclone copy garage:eprouvette/test_dotgit /tmp/test_dotgit \
|
||||
--stats=1s --stats-log-level=NOTICE --stats-one-line --fast-list
|
||||
|
||||
if ! diff <(find "${SCRIPT_FOLDER}/../.git" -type f | xargs md5sum | cut -d ' ' -f 1 | sort) <(find /tmp/test_dotgit -type f | xargs md5sum | cut -d ' ' -f 1 | sort); then
|
||||
echo "TEST FAILURE: directories are different"
|
||||
|
|
@ -68,6 +93,23 @@ if ! diff <(find "${SCRIPT_FOLDER}/../.git" -type f | xargs md5sum | cut -d ' '
|
|||
fi
|
||||
rm -r /tmp/test_dotgit
|
||||
|
||||
if [ "$DO_SSEC_TEST" = "1" ]; then
|
||||
rclone copy garage:eprouvette/test-ssec /tmp/test_ssec_out \
|
||||
--s3-sse-customer-algorithm AES256 \
|
||||
--s3-sse-customer-key-base64 "$SSEC_KEY" \
|
||||
--stats=1s --stats-log-level=NOTICE --stats-one-line
|
||||
if ! diff "/tmp/test_ssec_out/test-upgrade.sh" "${SCRIPT_FOLDER}/test-upgrade.sh"; then
|
||||
echo "SSEC-FAILURE (small file)"
|
||||
exit 1
|
||||
fi
|
||||
if ! diff "/tmp/test_ssec_out/randfile-for-upgrade" "/tmp/randfile-for-upgrade"; then
|
||||
echo "SSEC-FAILURE (big file)"
|
||||
exit 1
|
||||
fi
|
||||
rm -r /tmp/test_ssec_out
|
||||
rm /tmp/randfile-for-upgrade
|
||||
fi
|
||||
|
||||
echo "🏁 Teardown"
|
||||
rm -rf /tmp/garage-{data,meta}-*
|
||||
rm -rf /tmp/config.*.toml
|
||||
|
|
|
|||
|
|
@ -26,6 +26,8 @@ in
|
|||
s3cmd
|
||||
minio-client
|
||||
rclone
|
||||
(python313.withPackages (ps: [ ps.boto3 ]))
|
||||
|
||||
socat
|
||||
psmisc
|
||||
which
|
||||
|
|
@ -37,6 +39,7 @@ in
|
|||
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
|
||||
|
||||
function to_s3 {
|
||||
AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED AWS_RESPONSE_CHECKSUM_VALIDATION=WHEN_REQUIRED \
|
||||
aws \
|
||||
--endpoint-url https://garage.deuxfleurs.fr \
|
||||
--region garage \
|
||||
|
|
@ -93,6 +96,7 @@ in
|
|||
|
||||
nix-build nix/build_index.nix
|
||||
|
||||
AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED AWS_RESPONSE_CHECKSUM_VALIDATION=WHEN_REQUIRED \
|
||||
aws \
|
||||
--endpoint-url https://garage.deuxfleurs.fr \
|
||||
--region garage \
|
||||
|
|
@ -100,6 +104,7 @@ in
|
|||
result/share/_releases.json \
|
||||
s3://garagehq.deuxfleurs.fr/
|
||||
|
||||
AWS_REQUEST_CHECKSUM_CALCULATION=WHEN_REQUIRED AWS_RESPONSE_CHECKSUM_VALIDATION=WHEN_REQUIRED \
|
||||
aws \
|
||||
--endpoint-url https://garage.deuxfleurs.fr \
|
||||
--region garage \
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_api_admin"
|
||||
version = "1.3.0"
|
||||
version = "2.2.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -14,7 +14,9 @@ path = "lib.rs"
|
|||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
format_table.workspace = true
|
||||
garage_model.workspace = true
|
||||
garage_block.workspace = true
|
||||
garage_table.workspace = true
|
||||
garage_util.workspace = true
|
||||
garage_rpc.workspace = true
|
||||
|
|
@ -22,8 +24,11 @@ garage_api_common.workspace = true
|
|||
|
||||
argon2.workspace = true
|
||||
async-trait.workspace = true
|
||||
bytesize.workspace = true
|
||||
chrono.workspace = true
|
||||
thiserror.workspace = true
|
||||
hex.workspace = true
|
||||
paste.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
futures.workspace = true
|
||||
|
|
@ -34,6 +39,7 @@ url.workspace = true
|
|||
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
utoipa.workspace = true
|
||||
|
||||
opentelemetry.workspace = true
|
||||
opentelemetry-prometheus = { workspace = true, optional = true }
|
||||
|
|
@ -41,3 +47,4 @@ prometheus = { workspace = true, optional = true }
|
|||
|
||||
[features]
|
||||
metrics = [ "opentelemetry-prometheus", "prometheus" ]
|
||||
k2v = [ "garage_model/k2v" ]
|
||||
|
|
|
|||
292
src/api/admin/admin_token.rs
Normal file
|
|
@ -0,0 +1,292 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
use garage_table::*;
|
||||
use garage_util::time::now_msec;
|
||||
|
||||
use garage_model::admin_token_table::*;
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
impl RequestHandler for ListAdminTokensRequest {
|
||||
type Response = ListAdminTokensResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<ListAdminTokensResponse, Error> {
|
||||
let now = now_msec();
|
||||
|
||||
let mut res = garage
|
||||
.admin_token_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
|
||||
10000,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?
|
||||
.iter()
|
||||
.map(|t| admin_token_info_results(t, now))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if garage.config.admin.metrics_token.is_some() {
|
||||
res.insert(
|
||||
0,
|
||||
GetAdminTokenInfoResponse {
|
||||
id: None,
|
||||
created: None,
|
||||
name: "metrics_token (from daemon configuration)".into(),
|
||||
expiration: None,
|
||||
expired: false,
|
||||
scope: vec!["Metrics".into()],
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
if garage.config.admin.admin_token.is_some() {
|
||||
res.insert(
|
||||
0,
|
||||
GetAdminTokenInfoResponse {
|
||||
id: None,
|
||||
created: None,
|
||||
name: "admin_token (from daemon configuration)".into(),
|
||||
expiration: None,
|
||||
expired: false,
|
||||
scope: vec!["*".into()],
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
Ok(ListAdminTokensResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for GetAdminTokenInfoRequest {
|
||||
type Response = GetAdminTokenInfoResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetAdminTokenInfoResponse, Error> {
|
||||
let token = match (self.id, self.search) {
|
||||
(Some(id), None) => get_existing_admin_token(garage, &id).await?,
|
||||
(None, Some(search)) => {
|
||||
let candidates = garage
|
||||
.admin_token_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(KeyFilter::MatchesAndNotDeleted(search.to_string())),
|
||||
10,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
if candidates.len() != 1 {
|
||||
return Err(Error::bad_request(format!(
|
||||
"{} matching admin tokens",
|
||||
candidates.len()
|
||||
)));
|
||||
}
|
||||
candidates.into_iter().next().unwrap()
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::bad_request(
|
||||
"Either id or search must be provided (but not both)",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(admin_token_info_results(&token, now_msec()))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for CreateAdminTokenRequest {
|
||||
type Response = CreateAdminTokenResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<CreateAdminTokenResponse, Error> {
|
||||
let (mut token, secret) = if self.0.name.is_some() {
|
||||
AdminApiToken::new("")
|
||||
} else {
|
||||
AdminApiToken::new(&format!("token_{}", Utc::now().format("%Y%m%d_%H%M")))
|
||||
};
|
||||
|
||||
apply_token_updates(&mut token, self.0)?;
|
||||
|
||||
garage.admin_token_table.insert(&token).await?;
|
||||
|
||||
Ok(CreateAdminTokenResponse {
|
||||
secret_token: secret,
|
||||
info: admin_token_info_results(&token, now_msec()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for UpdateAdminTokenRequest {
|
||||
type Response = UpdateAdminTokenResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<UpdateAdminTokenResponse, Error> {
|
||||
let mut token = get_existing_admin_token(&garage, &self.id).await?;
|
||||
|
||||
apply_token_updates(&mut token, self.body)?;
|
||||
|
||||
garage.admin_token_table.insert(&token).await?;
|
||||
|
||||
Ok(UpdateAdminTokenResponse(admin_token_info_results(
|
||||
&token,
|
||||
now_msec(),
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for DeleteAdminTokenRequest {
|
||||
type Response = DeleteAdminTokenResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<DeleteAdminTokenResponse, Error> {
|
||||
let token = get_existing_admin_token(&garage, &self.id).await?;
|
||||
|
||||
garage
|
||||
.admin_token_table
|
||||
.insert(&AdminApiToken::delete(token.prefix))
|
||||
.await?;
|
||||
|
||||
Ok(DeleteAdminTokenResponse)
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for GetCurrentAdminTokenInfoRequest {
|
||||
type Response = GetCurrentAdminTokenInfoResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetCurrentAdminTokenInfoResponse, Error> {
|
||||
let now = now_msec();
|
||||
|
||||
if garage
|
||||
.config
|
||||
.admin
|
||||
.metrics_token
|
||||
.as_ref()
|
||||
.is_some_and(|s| s == &self.admin_token)
|
||||
{
|
||||
return Ok(GetCurrentAdminTokenInfoResponse(
|
||||
GetAdminTokenInfoResponse {
|
||||
id: None,
|
||||
created: None,
|
||||
name: "metrics_token (from daemon configuration)".into(),
|
||||
expiration: None,
|
||||
expired: false,
|
||||
scope: vec!["Metrics".into()],
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
if garage
|
||||
.config
|
||||
.admin
|
||||
.admin_token
|
||||
.as_ref()
|
||||
.is_some_and(|s| s == &self.admin_token)
|
||||
{
|
||||
return Ok(GetCurrentAdminTokenInfoResponse(
|
||||
GetAdminTokenInfoResponse {
|
||||
id: None,
|
||||
created: None,
|
||||
name: "admin_token (from daemon configuration)".into(),
|
||||
expiration: None,
|
||||
expired: false,
|
||||
scope: vec!["*".into()],
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
let (prefix, _) = self.admin_token.split_once('.').unwrap();
|
||||
let token = get_existing_admin_token(&garage, &prefix.to_string()).await?;
|
||||
|
||||
Ok(GetCurrentAdminTokenInfoResponse(admin_token_info_results(
|
||||
&token, now,
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
// ---- helpers ----
|
||||
|
||||
fn admin_token_info_results(token: &AdminApiToken, now: u64) -> GetAdminTokenInfoResponse {
|
||||
let params = token.params().unwrap();
|
||||
|
||||
GetAdminTokenInfoResponse {
|
||||
id: Some(token.prefix.clone()),
|
||||
created: Some(
|
||||
DateTime::from_timestamp_millis(params.created as i64)
|
||||
.expect("invalid timestamp stored in db"),
|
||||
),
|
||||
name: params.name.get().to_string(),
|
||||
expiration: params.expiration.get().map(|x| {
|
||||
DateTime::from_timestamp_millis(x as i64).expect("invalid timestamp stored in db")
|
||||
}),
|
||||
expired: params.is_expired(now),
|
||||
scope: params.scope.get().0.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_existing_admin_token(garage: &Garage, id: &String) -> Result<AdminApiToken, Error> {
|
||||
garage
|
||||
.admin_token_table
|
||||
.get(&EmptyKey, id)
|
||||
.await?
|
||||
.filter(|k| !k.state.is_deleted())
|
||||
.ok_or_else(|| Error::NoSuchAdminToken(id.to_string()))
|
||||
}
|
||||
|
||||
fn apply_token_updates(
|
||||
token: &mut AdminApiToken,
|
||||
updates: UpdateAdminTokenRequestBody,
|
||||
) -> Result<(), Error> {
|
||||
if updates.never_expires && updates.expiration.is_some() {
|
||||
return Err(Error::bad_request(
|
||||
"cannot specify `expiration` and `never_expires`",
|
||||
));
|
||||
}
|
||||
|
||||
let params = token.params_mut().unwrap();
|
||||
|
||||
if let Some(name) = updates.name {
|
||||
params.name.update(name);
|
||||
}
|
||||
if let Some(expiration) = updates.expiration {
|
||||
params
|
||||
.expiration
|
||||
.update(Some(expiration.timestamp_millis() as u64));
|
||||
}
|
||||
if updates.never_expires {
|
||||
params.expiration.update(None);
|
||||
}
|
||||
if let Some(scope) = updates.scope {
|
||||
params.scope.update(AdminApiTokenScope(scope));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
1352
src/api/admin/api.rs
Normal file
|
|
@ -1,333 +1,237 @@
|
|||
use std::collections::HashMap;
|
||||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use argon2::password_hash::PasswordHash;
|
||||
|
||||
use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||
use http::header::{HeaderValue, ACCESS_CONTROL_ALLOW_ORIGIN, AUTHORIZATION};
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::watch;
|
||||
|
||||
use opentelemetry::trace::SpanRef;
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
use opentelemetry_prometheus::PrometheusExporter;
|
||||
#[cfg(feature = "metrics")]
|
||||
use prometheus::{Encoder, TextEncoder};
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_rpc::system::ClusterHealthStatus;
|
||||
use garage_rpc::{Endpoint as RpcEndpoint, *};
|
||||
use garage_table::EmptyKey;
|
||||
use garage_util::background::BackgroundRunner;
|
||||
use garage_util::data::Uuid;
|
||||
use garage_util::error::Error as GarageError;
|
||||
use garage_util::socket_address::UnixOrTCPSocketAddress;
|
||||
use garage_util::time::now_msec;
|
||||
|
||||
use garage_api_common::generic_server::*;
|
||||
use garage_api_common::helpers::*;
|
||||
|
||||
use crate::bucket::*;
|
||||
use crate::cluster::*;
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::key::*;
|
||||
use crate::router_v0;
|
||||
use crate::router_v1::{Authorization, Endpoint};
|
||||
use crate::router_v1;
|
||||
use crate::Authorization;
|
||||
use crate::RequestHandler;
|
||||
|
||||
// ---- FOR RPC ----
|
||||
|
||||
pub const ADMIN_RPC_PATH: &str = "garage_api/admin/rpc.rs/Rpc";
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum AdminRpc {
|
||||
Proxy(AdminApiRequest),
|
||||
Internal(LocalAdminApiRequest),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum AdminRpcResponse {
|
||||
ProxyApiOkResponse(TaggedAdminApiResponse),
|
||||
InternalApiOkResponse(LocalAdminApiResponse),
|
||||
ApiErrorResponse {
|
||||
http_code: u16,
|
||||
error_code: String,
|
||||
message: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl Rpc for AdminRpc {
|
||||
type Response = Result<AdminRpcResponse, GarageError>;
|
||||
}
|
||||
|
||||
impl EndpointHandler<AdminRpc> for AdminApiServer {
|
||||
async fn handle(
|
||||
self: &Arc<Self>,
|
||||
message: &AdminRpc,
|
||||
_from: NodeID,
|
||||
) -> Result<AdminRpcResponse, GarageError> {
|
||||
match message {
|
||||
AdminRpc::Proxy(req) => {
|
||||
info!("Proxied admin API request: {}", req.name());
|
||||
let res = req.clone().handle(&self.garage, &self).await;
|
||||
match res {
|
||||
Ok(res) => Ok(AdminRpcResponse::ProxyApiOkResponse(res.tagged())),
|
||||
Err(e) => Ok(AdminRpcResponse::ApiErrorResponse {
|
||||
http_code: e.http_status_code().as_u16(),
|
||||
error_code: e.code().to_string(),
|
||||
message: e.to_string(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
AdminRpc::Internal(req) => {
|
||||
info!("Internal admin API request: {}", req.name());
|
||||
let res = req.clone().handle(&self.garage, &self).await;
|
||||
match res {
|
||||
Ok(res) => Ok(AdminRpcResponse::InternalApiOkResponse(res)),
|
||||
Err(e) => Ok(AdminRpcResponse::ApiErrorResponse {
|
||||
http_code: e.http_status_code().as_u16(),
|
||||
error_code: e.code().to_string(),
|
||||
message: e.to_string(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---- FOR HTTP ----
|
||||
|
||||
pub type ResBody = BoxBody<Error>;
|
||||
|
||||
pub struct AdminApiServer {
|
||||
garage: Arc<Garage>,
|
||||
#[cfg(feature = "metrics")]
|
||||
exporter: PrometheusExporter,
|
||||
pub(crate) exporter: PrometheusExporter,
|
||||
metrics_token: Option<String>,
|
||||
metrics_require_token: bool,
|
||||
admin_token: Option<String>,
|
||||
pub(crate) background: Arc<BackgroundRunner>,
|
||||
pub(crate) endpoint: Arc<RpcEndpoint<AdminRpc, Self>>,
|
||||
}
|
||||
|
||||
pub enum HttpEndpoint {
|
||||
Old(router_v1::Endpoint),
|
||||
New(String),
|
||||
}
|
||||
|
||||
impl AdminApiServer {
|
||||
pub fn new(
|
||||
garage: Arc<Garage>,
|
||||
background: Arc<BackgroundRunner>,
|
||||
#[cfg(feature = "metrics")] exporter: PrometheusExporter,
|
||||
) -> Self {
|
||||
) -> Arc<Self> {
|
||||
let cfg = &garage.config.admin;
|
||||
let metrics_token = cfg.metrics_token.as_deref().map(hash_bearer_token);
|
||||
let admin_token = cfg.admin_token.as_deref().map(hash_bearer_token);
|
||||
Self {
|
||||
let metrics_require_token = cfg.metrics_require_token;
|
||||
|
||||
let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into());
|
||||
let admin = Arc::new(Self {
|
||||
garage,
|
||||
#[cfg(feature = "metrics")]
|
||||
exporter,
|
||||
metrics_token,
|
||||
metrics_require_token,
|
||||
admin_token,
|
||||
}
|
||||
background,
|
||||
endpoint,
|
||||
});
|
||||
admin.endpoint.set_handler(admin.clone());
|
||||
admin
|
||||
}
|
||||
|
||||
pub async fn run(
|
||||
self,
|
||||
self: Arc<Self>,
|
||||
bind_addr: UnixOrTCPSocketAddress,
|
||||
must_exit: watch::Receiver<bool>,
|
||||
) -> Result<(), GarageError> {
|
||||
let region = self.garage.config.s3_api.s3_region.clone();
|
||||
ApiServer::new(region, self)
|
||||
ApiServer::new(region, ArcAdminApiServer(self))
|
||||
.run_server(bind_addr, Some(0o220), must_exit)
|
||||
.await
|
||||
}
|
||||
|
||||
fn handle_options(&self, _req: &Request<IncomingBody>) -> Result<Response<ResBody>, Error> {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
.header(ALLOW, "OPTIONS, GET, POST")
|
||||
.header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS, GET, POST")
|
||||
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||
.body(empty_body())?)
|
||||
}
|
||||
|
||||
async fn handle_check_domain(
|
||||
async fn handle_http_api(
|
||||
&self,
|
||||
req: Request<IncomingBody>,
|
||||
endpoint: HttpEndpoint,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let query_params: HashMap<String, String> = req
|
||||
.uri()
|
||||
.query()
|
||||
.map(|v| {
|
||||
url::form_urlencoded::parse(v.as_bytes())
|
||||
.into_owned()
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_else(HashMap::new);
|
||||
let auth_header = req.headers().get(AUTHORIZATION).cloned();
|
||||
|
||||
let has_domain_key = query_params.contains_key("domain");
|
||||
|
||||
if !has_domain_key {
|
||||
return Err(Error::bad_request("No domain query string found"));
|
||||
}
|
||||
|
||||
let domain = query_params
|
||||
.get("domain")
|
||||
.ok_or_internal_error("Could not parse domain query string")?;
|
||||
|
||||
if self.check_domain(domain).await? {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(string_body(format!(
|
||||
"Domain '{domain}' is managed by Garage"
|
||||
)))?)
|
||||
} else {
|
||||
Err(Error::bad_request(format!(
|
||||
"Domain '{domain}' is not managed by Garage"
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_domain(&self, domain: &str) -> Result<bool, Error> {
|
||||
// Resolve bucket from domain name, inferring if the website must be activated for the
|
||||
// domain to be valid.
|
||||
let (bucket_name, must_check_website) = if let Some(bname) = self
|
||||
.garage
|
||||
.config
|
||||
.s3_api
|
||||
.root_domain
|
||||
.as_ref()
|
||||
.and_then(|rd| host_to_bucket(domain, rd))
|
||||
{
|
||||
(bname.to_string(), false)
|
||||
} else if let Some(bname) = self
|
||||
.garage
|
||||
.config
|
||||
.s3_web
|
||||
.as_ref()
|
||||
.and_then(|sw| host_to_bucket(domain, sw.root_domain.as_str()))
|
||||
{
|
||||
(bname.to_string(), true)
|
||||
} else {
|
||||
(domain.to_string(), true)
|
||||
let request = match endpoint {
|
||||
HttpEndpoint::Old(endpoint_v1) => AdminApiRequest::from_v1(endpoint_v1, req).await?,
|
||||
HttpEndpoint::New(_) => AdminApiRequest::from_request(req).await?,
|
||||
};
|
||||
|
||||
let bucket_id = match self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.resolve_global_bucket_name(&bucket_name)
|
||||
.await?
|
||||
{
|
||||
Some(bucket_id) => bucket_id,
|
||||
None => return Ok(false),
|
||||
};
|
||||
|
||||
if !must_check_website {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let bucket = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
|
||||
let bucket_state = bucket.state.as_option().unwrap();
|
||||
let bucket_website_config = bucket_state.website_config.get();
|
||||
|
||||
match bucket_website_config {
|
||||
Some(_v) => Ok(true),
|
||||
None => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_health(&self) -> Result<Response<ResBody>, Error> {
|
||||
let health = self.garage.system.health();
|
||||
|
||||
let (status, status_str) = match health.status {
|
||||
ClusterHealthStatus::Healthy => (StatusCode::OK, "Garage is fully operational"),
|
||||
ClusterHealthStatus::Degraded => (
|
||||
StatusCode::OK,
|
||||
"Garage is operational but some storage nodes are unavailable",
|
||||
),
|
||||
ClusterHealthStatus::Unavailable => (
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
"Quorum is not available for some/all partitions, reads and writes will fail",
|
||||
let (global_token_hash, token_required) = match request.authorization_type() {
|
||||
Authorization::None => (None, false),
|
||||
Authorization::MetricsToken => (
|
||||
self.metrics_token.as_deref(),
|
||||
self.metrics_token.is_some() || self.metrics_require_token,
|
||||
),
|
||||
Authorization::AdminToken => (self.admin_token.as_deref(), true),
|
||||
};
|
||||
let status_str = format!(
|
||||
"{}\nConsult the full health check API endpoint at /v1/health for more details\n",
|
||||
status_str
|
||||
);
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(status)
|
||||
.header(http::header::CONTENT_TYPE, "text/plain")
|
||||
.body(string_body(status_str))?)
|
||||
if token_required {
|
||||
verify_authorization(&self.garage, global_token_hash, auth_header, request.name())?;
|
||||
}
|
||||
|
||||
fn handle_metrics(&self) -> Result<Response<ResBody>, Error> {
|
||||
#[cfg(feature = "metrics")]
|
||||
{
|
||||
use opentelemetry::trace::Tracer;
|
||||
|
||||
let mut buffer = vec![];
|
||||
let encoder = TextEncoder::new();
|
||||
|
||||
let tracer = opentelemetry::global::tracer("garage");
|
||||
let metric_families = tracer.in_span("admin/gather_metrics", |_| {
|
||||
self.exporter.registry().gather()
|
||||
});
|
||||
|
||||
encoder
|
||||
.encode(&metric_families, &mut buffer)
|
||||
.ok_or_internal_error("Could not serialize metrics")?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(http::header::CONTENT_TYPE, encoder.format_type())
|
||||
.body(bytes_body(buffer.into()))?)
|
||||
match request {
|
||||
AdminApiRequest::Options(req) => req.handle(&self.garage, &self).await,
|
||||
AdminApiRequest::CheckDomain(req) => req.handle(&self.garage, &self).await,
|
||||
AdminApiRequest::Health(req) => req.handle(&self.garage, &self).await,
|
||||
AdminApiRequest::Metrics(req) => req.handle(&self.garage, &self).await,
|
||||
req => {
|
||||
let res = req.handle(&self.garage, &self).await?;
|
||||
let mut res = json_ok_response(&res)?;
|
||||
res.headers_mut()
|
||||
.insert(ACCESS_CONTROL_ALLOW_ORIGIN, HeaderValue::from_static("*"));
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "metrics"))]
|
||||
Err(Error::bad_request(
|
||||
"Garage was built without the metrics feature".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiHandler for AdminApiServer {
|
||||
struct ArcAdminApiServer(Arc<AdminApiServer>);
|
||||
|
||||
impl ApiHandler for ArcAdminApiServer {
|
||||
const API_NAME: &'static str = "admin";
|
||||
const API_NAME_DISPLAY: &'static str = "Admin";
|
||||
|
||||
type Endpoint = Endpoint;
|
||||
type Endpoint = HttpEndpoint;
|
||||
type Error = Error;
|
||||
|
||||
fn parse_endpoint(&self, req: &Request<IncomingBody>) -> Result<Endpoint, Error> {
|
||||
fn parse_endpoint(&self, req: &Request<IncomingBody>) -> Result<HttpEndpoint, Error> {
|
||||
if req.uri().path().starts_with("/v0/") {
|
||||
let endpoint_v0 = router_v0::Endpoint::from_request(req)?;
|
||||
Endpoint::from_v0(endpoint_v0)
|
||||
let endpoint_v1 = router_v1::Endpoint::from_v0(endpoint_v0)?;
|
||||
Ok(HttpEndpoint::Old(endpoint_v1))
|
||||
} else if req.uri().path().starts_with("/v1/") {
|
||||
let endpoint_v1 = router_v1::Endpoint::from_request(req)?;
|
||||
Ok(HttpEndpoint::Old(endpoint_v1))
|
||||
} else {
|
||||
Endpoint::from_request(req)
|
||||
Ok(HttpEndpoint::New(req.uri().path().to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
req: Request<IncomingBody>,
|
||||
endpoint: Endpoint,
|
||||
endpoint: HttpEndpoint,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let required_auth_hash =
|
||||
match endpoint.authorization_type() {
|
||||
Authorization::None => None,
|
||||
Authorization::MetricsToken => self.metrics_token.as_deref(),
|
||||
Authorization::AdminToken => match self.admin_token.as_deref() {
|
||||
None => return Err(Error::forbidden(
|
||||
"Admin token isn't configured, admin API access is disabled for security.",
|
||||
)),
|
||||
Some(t) => Some(t),
|
||||
},
|
||||
};
|
||||
|
||||
if let Some(password_hash) = required_auth_hash {
|
||||
match req.headers().get("Authorization") {
|
||||
None => return Err(Error::forbidden("Authorization token must be provided")),
|
||||
Some(authorization) => {
|
||||
verify_bearer_token(&authorization, password_hash)?;
|
||||
self.0.handle_http_api(req, endpoint).await
|
||||
}
|
||||
|
||||
fn key_id_from_request(&self, req: &Request<IncomingBody>) -> Option<String> {
|
||||
let auth_header = req.headers().get(AUTHORIZATION)?;
|
||||
let token = parse_authorization(auth_header).ok()?;
|
||||
let key_id = token.split_once('.')?.0;
|
||||
Some(key_id.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
match endpoint {
|
||||
Endpoint::Options => self.handle_options(&req),
|
||||
Endpoint::CheckDomain => self.handle_check_domain(req).await,
|
||||
Endpoint::Health => self.handle_health(),
|
||||
Endpoint::Metrics => self.handle_metrics(),
|
||||
Endpoint::GetClusterStatus => handle_get_cluster_status(&self.garage).await,
|
||||
Endpoint::GetClusterHealth => handle_get_cluster_health(&self.garage).await,
|
||||
Endpoint::ConnectClusterNodes => handle_connect_cluster_nodes(&self.garage, req).await,
|
||||
// Layout
|
||||
Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await,
|
||||
Endpoint::UpdateClusterLayout => handle_update_cluster_layout(&self.garage, req).await,
|
||||
Endpoint::ApplyClusterLayout => handle_apply_cluster_layout(&self.garage, req).await,
|
||||
Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage).await,
|
||||
// Keys
|
||||
Endpoint::ListKeys => handle_list_keys(&self.garage).await,
|
||||
Endpoint::GetKeyInfo {
|
||||
id,
|
||||
search,
|
||||
show_secret_key,
|
||||
} => {
|
||||
let show_secret_key = show_secret_key.map(|x| x == "true").unwrap_or(false);
|
||||
handle_get_key_info(&self.garage, id, search, show_secret_key).await
|
||||
impl ApiEndpoint for HttpEndpoint {
|
||||
fn name(&self) -> Cow<'static, str> {
|
||||
match self {
|
||||
Self::Old(endpoint_v1) => Cow::Borrowed(endpoint_v1.name()),
|
||||
Self::New(path) => Cow::Owned(path.clone()),
|
||||
}
|
||||
Endpoint::CreateKey => handle_create_key(&self.garage, req).await,
|
||||
Endpoint::ImportKey => handle_import_key(&self.garage, req).await,
|
||||
Endpoint::UpdateKey { id } => handle_update_key(&self.garage, id, req).await,
|
||||
Endpoint::DeleteKey { id } => handle_delete_key(&self.garage, id).await,
|
||||
// Buckets
|
||||
Endpoint::ListBuckets => handle_list_buckets(&self.garage).await,
|
||||
Endpoint::GetBucketInfo { id, global_alias } => {
|
||||
handle_get_bucket_info(&self.garage, id, global_alias).await
|
||||
}
|
||||
Endpoint::CreateBucket => handle_create_bucket(&self.garage, req).await,
|
||||
Endpoint::DeleteBucket { id } => handle_delete_bucket(&self.garage, id).await,
|
||||
Endpoint::UpdateBucket { id } => handle_update_bucket(&self.garage, id, req).await,
|
||||
// Bucket-key permissions
|
||||
Endpoint::BucketAllowKey => {
|
||||
handle_bucket_change_key_perm(&self.garage, req, true).await
|
||||
}
|
||||
Endpoint::BucketDenyKey => {
|
||||
handle_bucket_change_key_perm(&self.garage, req, false).await
|
||||
}
|
||||
// Bucket aliasing
|
||||
Endpoint::GlobalAliasBucket { id, alias } => {
|
||||
handle_global_alias_bucket(&self.garage, id, alias).await
|
||||
}
|
||||
Endpoint::GlobalUnaliasBucket { id, alias } => {
|
||||
handle_global_unalias_bucket(&self.garage, id, alias).await
|
||||
}
|
||||
Endpoint::LocalAliasBucket {
|
||||
id,
|
||||
access_key_id,
|
||||
alias,
|
||||
} => handle_local_alias_bucket(&self.garage, id, access_key_id, alias).await,
|
||||
Endpoint::LocalUnaliasBucket {
|
||||
id,
|
||||
access_key_id,
|
||||
alias,
|
||||
} => handle_local_unalias_bucket(&self.garage, id, access_key_id, alias).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiEndpoint for Endpoint {
|
||||
fn name(&self) -> &'static str {
|
||||
Endpoint::name(self)
|
||||
}
|
||||
|
||||
fn add_span_attributes(&self, _span: SpanRef<'_>) {}
|
||||
|
|
@ -347,20 +251,91 @@ fn hash_bearer_token(token: &str) -> String {
|
|||
.to_string()
|
||||
}
|
||||
|
||||
fn verify_bearer_token(token: &hyper::http::HeaderValue, password_hash: &str) -> Result<(), Error> {
|
||||
use argon2::{password_hash::PasswordVerifier, Argon2};
|
||||
|
||||
let parsed_hash = PasswordHash::new(&password_hash).unwrap();
|
||||
|
||||
token
|
||||
fn parse_authorization(auth_header: &hyper::http::HeaderValue) -> Result<&str, Error> {
|
||||
let token = auth_header
|
||||
.to_str()?
|
||||
.strip_prefix("Bearer ")
|
||||
.and_then(|token| {
|
||||
.ok_or_else(|| Error::forbidden("Invalid Authorization header"))?
|
||||
.trim();
|
||||
Ok(token)
|
||||
}
|
||||
|
||||
fn verify_authorization(
|
||||
garage: &Garage,
|
||||
global_token_hash: Option<&str>,
|
||||
auth_header: Option<hyper::http::HeaderValue>,
|
||||
endpoint_name: &str,
|
||||
) -> Result<(), Error> {
|
||||
use argon2::{password_hash::PasswordHash, password_hash::PasswordVerifier, Argon2};
|
||||
|
||||
let invalid_msg = "Invalid bearer token";
|
||||
|
||||
let token = match &auth_header {
|
||||
None => {
|
||||
return Err(Error::forbidden(
|
||||
"Bearer token must be provided in Authorization header",
|
||||
))
|
||||
}
|
||||
Some(authorization) => parse_authorization(authorization)?,
|
||||
};
|
||||
|
||||
let token_hash_string = if let Some((prefix, _)) = token.split_once('.') {
|
||||
garage
|
||||
.admin_token_table
|
||||
.get_local(&EmptyKey, &prefix.to_string())?
|
||||
.and_then(|k| k.state.into_option())
|
||||
.filter(|p| !p.is_expired(now_msec()))
|
||||
// GetCurrentAdminTokenInfo endpoint must be accessible even if it is not in the token scopes
|
||||
.filter(|p| p.has_scope(endpoint_name) || endpoint_name == "GetCurrentAdminTokenInfo")
|
||||
.ok_or_else(|| Error::forbidden(invalid_msg))?
|
||||
.token_hash
|
||||
} else {
|
||||
global_token_hash
|
||||
.ok_or_else(|| Error::forbidden(invalid_msg))?
|
||||
.to_string()
|
||||
};
|
||||
|
||||
let token_hash =
|
||||
PasswordHash::new(&token_hash_string).ok_or_internal_error("Could not parse token hash")?;
|
||||
|
||||
Argon2::default()
|
||||
.verify_password(token.trim().as_bytes(), &parsed_hash)
|
||||
.ok()
|
||||
})
|
||||
.ok_or_else(|| Error::forbidden("Invalid authorization token"))?;
|
||||
.verify_password(token.as_bytes(), &token_hash)
|
||||
.map_err(|_| Error::forbidden(invalid_msg))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn find_matching_nodes(garage: &Garage, spec: &str) -> Result<Vec<Uuid>, Error> {
|
||||
if spec == "self" {
|
||||
Ok(vec![garage.system.id])
|
||||
} else {
|
||||
// Collect all nodes currently up and/or in cluster layout
|
||||
let mut res = vec![];
|
||||
if let Ok(all_nodes) = garage.system.cluster_layout().all_nodes() {
|
||||
res = all_nodes.to_vec();
|
||||
}
|
||||
for node in garage.system.get_known_nodes() {
|
||||
if node.is_up && !res.contains(&node.id) {
|
||||
res.push(node.id);
|
||||
}
|
||||
}
|
||||
|
||||
if spec == "*" {
|
||||
// match all nodes
|
||||
Ok(res)
|
||||
} else {
|
||||
// filter nodes that match spec
|
||||
res.retain(|node| hex::encode(node).starts_with(spec));
|
||||
if res.is_empty() {
|
||||
Err(Error::bad_request(format!("No nodes matching {}", spec)))
|
||||
} else if res.len() > 1 {
|
||||
Err(Error::bad_request(format!(
|
||||
"Multiple nodes matching {}: {:?}",
|
||||
spec, res
|
||||
)))
|
||||
} else {
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
284
src/api/admin/block.rs
Normal file
|
|
@ -0,0 +1,284 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::Error as GarageError;
|
||||
use garage_util::time::now_msec;
|
||||
|
||||
use garage_table::EmptyKey;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::s3::object_table::*;
|
||||
use garage_model::s3::version_table::*;
|
||||
|
||||
use garage_api_common::common_error::CommonErrorDerivative;
|
||||
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
impl RequestHandler for LocalListBlockErrorsRequest {
|
||||
type Response = LocalListBlockErrorsResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalListBlockErrorsResponse, Error> {
|
||||
let errors = garage.block_manager.list_resync_errors()?;
|
||||
let now = now_msec();
|
||||
let errors = errors
|
||||
.into_iter()
|
||||
.map(|e| BlockError {
|
||||
block_hash: hex::encode(&e.hash),
|
||||
refcount: e.refcount,
|
||||
error_count: e.error_count,
|
||||
last_try_secs_ago: now.saturating_sub(e.last_try) / 1000,
|
||||
next_try_in_secs: e.next_try.saturating_sub(now) / 1000,
|
||||
})
|
||||
.collect();
|
||||
Ok(LocalListBlockErrorsResponse(errors))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalGetBlockInfoRequest {
|
||||
type Response = LocalGetBlockInfoResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalGetBlockInfoResponse, Error> {
|
||||
let hash = find_block_hash_by_prefix(garage, &self.block_hash)?;
|
||||
let refcount = garage.block_manager.get_block_rc(&hash)?;
|
||||
let block_refs = garage
|
||||
.block_ref_table
|
||||
.get_range(&hash, None, None, 10000, Default::default())
|
||||
.await?;
|
||||
let mut versions = vec![];
|
||||
for br in block_refs {
|
||||
if let Some(v) = garage.version_table.get(&br.version, &EmptyKey).await? {
|
||||
let bl = match &v.backlink {
|
||||
VersionBacklink::MultipartUpload { upload_id } => {
|
||||
if let Some(u) = garage.mpu_table.get(upload_id, &EmptyKey).await? {
|
||||
BlockVersionBacklink::Upload {
|
||||
upload_id: hex::encode(&upload_id),
|
||||
upload_deleted: u.deleted.get(),
|
||||
upload_garbage_collected: false,
|
||||
bucket_id: Some(hex::encode(&u.bucket_id)),
|
||||
key: Some(u.key.to_string()),
|
||||
}
|
||||
} else {
|
||||
BlockVersionBacklink::Upload {
|
||||
upload_id: hex::encode(&upload_id),
|
||||
upload_deleted: true,
|
||||
upload_garbage_collected: true,
|
||||
bucket_id: None,
|
||||
key: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
VersionBacklink::Object { bucket_id, key } => BlockVersionBacklink::Object {
|
||||
bucket_id: hex::encode(&bucket_id),
|
||||
key: key.to_string(),
|
||||
},
|
||||
};
|
||||
versions.push(BlockVersion {
|
||||
version_id: hex::encode(&br.version),
|
||||
ref_deleted: br.deleted.get(),
|
||||
version_deleted: v.deleted.get(),
|
||||
garbage_collected: false,
|
||||
backlink: Some(bl),
|
||||
});
|
||||
} else {
|
||||
versions.push(BlockVersion {
|
||||
version_id: hex::encode(&br.version),
|
||||
ref_deleted: br.deleted.get(),
|
||||
version_deleted: true,
|
||||
garbage_collected: true,
|
||||
backlink: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
Ok(LocalGetBlockInfoResponse {
|
||||
block_hash: hex::encode(&hash),
|
||||
refcount,
|
||||
versions,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalRetryBlockResyncRequest {
|
||||
type Response = LocalRetryBlockResyncResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalRetryBlockResyncResponse, Error> {
|
||||
match self {
|
||||
Self::All { all: true } => {
|
||||
let blocks = garage.block_manager.list_resync_errors()?;
|
||||
for b in blocks.iter() {
|
||||
garage.block_manager.resync.clear_backoff(&b.hash)?;
|
||||
}
|
||||
Ok(LocalRetryBlockResyncResponse {
|
||||
count: blocks.len() as u64,
|
||||
})
|
||||
}
|
||||
Self::All { all: false } => Err(Error::bad_request("nonsense")),
|
||||
Self::Blocks { block_hashes } => {
|
||||
for hash in block_hashes.iter() {
|
||||
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
||||
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
|
||||
garage.block_manager.resync.clear_backoff(&hash)?;
|
||||
}
|
||||
Ok(LocalRetryBlockResyncResponse {
|
||||
count: block_hashes.len() as u64,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalPurgeBlocksRequest {
|
||||
type Response = LocalPurgeBlocksResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalPurgeBlocksResponse, Error> {
|
||||
let mut obj_dels = 0;
|
||||
let mut mpu_dels = 0;
|
||||
let mut ver_dels = 0;
|
||||
let mut br_dels = 0;
|
||||
|
||||
for hash in self.0.iter() {
|
||||
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
||||
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
|
||||
let block_refs = garage
|
||||
.block_ref_table
|
||||
.get_range(&hash, None, None, 10000, Default::default())
|
||||
.await?;
|
||||
|
||||
for br in block_refs {
|
||||
if let Some(version) = garage.version_table.get(&br.version, &EmptyKey).await? {
|
||||
handle_block_purge_version_backlink(
|
||||
garage,
|
||||
&version,
|
||||
&mut obj_dels,
|
||||
&mut mpu_dels,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !version.deleted.get() {
|
||||
let deleted_version = Version::new(version.uuid, version.backlink, true);
|
||||
garage.version_table.insert(&deleted_version).await?;
|
||||
ver_dels += 1;
|
||||
}
|
||||
}
|
||||
if !br.deleted.get() {
|
||||
let mut br = br;
|
||||
br.deleted.set();
|
||||
garage.block_ref_table.insert(&br).await?;
|
||||
br_dels += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(LocalPurgeBlocksResponse {
|
||||
blocks_purged: self.0.len() as u64,
|
||||
block_refs_purged: br_dels,
|
||||
versions_deleted: ver_dels,
|
||||
objects_deleted: obj_dels,
|
||||
uploads_deleted: mpu_dels,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn find_block_hash_by_prefix(garage: &Arc<Garage>, prefix: &str) -> Result<Hash, Error> {
|
||||
if prefix.len() < 4 {
|
||||
return Err(Error::bad_request(
|
||||
"Please specify at least 4 characters of the block hash",
|
||||
));
|
||||
}
|
||||
|
||||
let prefix_bin = hex::decode(&prefix[..prefix.len() & !1]).ok_or_bad_request("invalid hash")?;
|
||||
|
||||
let iter = garage
|
||||
.block_ref_table
|
||||
.data
|
||||
.store
|
||||
.range(&prefix_bin[..]..)
|
||||
.map_err(GarageError::from)?;
|
||||
let mut found = None;
|
||||
for item in iter {
|
||||
let (k, _v) = item.map_err(GarageError::from)?;
|
||||
let hash = Hash::try_from(&k[..32]).unwrap();
|
||||
if &hash.as_slice()[..prefix_bin.len()] != prefix_bin {
|
||||
break;
|
||||
}
|
||||
if hex::encode(hash.as_slice()).starts_with(prefix) {
|
||||
match &found {
|
||||
Some(x) if *x == hash => (),
|
||||
Some(_) => {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Several blocks match prefix `{}`",
|
||||
prefix
|
||||
)));
|
||||
}
|
||||
None => {
|
||||
found = Some(hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
found.ok_or_else(|| Error::NoSuchBlock(prefix.to_string()))
|
||||
}
|
||||
|
||||
async fn handle_block_purge_version_backlink(
|
||||
garage: &Arc<Garage>,
|
||||
version: &Version,
|
||||
obj_dels: &mut u64,
|
||||
mpu_dels: &mut u64,
|
||||
) -> Result<(), Error> {
|
||||
let (bucket_id, key, ov_id) = match &version.backlink {
|
||||
VersionBacklink::Object { bucket_id, key } => (*bucket_id, key.clone(), version.uuid),
|
||||
VersionBacklink::MultipartUpload { upload_id } => {
|
||||
if let Some(mut mpu) = garage.mpu_table.get(upload_id, &EmptyKey).await? {
|
||||
if !mpu.deleted.get() {
|
||||
mpu.parts.clear();
|
||||
mpu.deleted.set();
|
||||
garage.mpu_table.insert(&mpu).await?;
|
||||
*mpu_dels += 1;
|
||||
}
|
||||
(mpu.bucket_id, mpu.key.clone(), *upload_id)
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(object) = garage.object_table.get(&bucket_id, &key).await? {
|
||||
let ov = object.versions().iter().rev().find(|v| v.is_complete());
|
||||
if let Some(ov) = ov {
|
||||
if ov.uuid == ov_id {
|
||||
let del_uuid = gen_uuid();
|
||||
let deleted_object = Object::new(
|
||||
bucket_id,
|
||||
key,
|
||||
vec![ObjectVersion {
|
||||
uuid: del_uuid,
|
||||
timestamp: ov.timestamp + 1,
|
||||
state: ObjectVersionState::Complete(ObjectVersionData::DeleteMarker),
|
||||
}],
|
||||
);
|
||||
garage.object_table.insert(&deleted_object).await?;
|
||||
*obj_dels += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -1,23 +1,28 @@
|
|||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::fmt::Write;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use format_table::format_table_to_string;
|
||||
|
||||
use garage_util::crdt::*;
|
||||
use garage_util::data::*;
|
||||
|
||||
use garage_rpc::layout;
|
||||
use garage_rpc::layout::PARTITION_BITS;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
use garage_api_common::helpers::{json_ok_response, parse_json_body};
|
||||
|
||||
use crate::api_server::ResBody;
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
impl RequestHandler for GetClusterStatusRequest {
|
||||
type Response = GetClusterStatusResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetClusterStatusResponse, Error> {
|
||||
let layout = garage.system.cluster_layout();
|
||||
let mut nodes = garage
|
||||
.system
|
||||
|
|
@ -28,16 +33,16 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
|
|||
i.id,
|
||||
NodeResp {
|
||||
id: hex::encode(i.id),
|
||||
garage_version: i.status.garage_version,
|
||||
addr: i.addr,
|
||||
hostname: i.status.hostname,
|
||||
is_up: i.is_up,
|
||||
last_seen_secs_ago: i.last_seen_secs_ago,
|
||||
data_partition: i
|
||||
.status
|
||||
.data_disk_avail
|
||||
.map(|(avail, total)| FreeSpaceResp {
|
||||
data_partition: i.status.data_disk_avail.map(|(avail, total)| {
|
||||
FreeSpaceResp {
|
||||
available: avail,
|
||||
total,
|
||||
}
|
||||
}),
|
||||
metadata_partition: i.status.meta_disk_avail.map(|(avail, total)| {
|
||||
FreeSpaceResp {
|
||||
|
|
@ -51,10 +56,10 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
|
|||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
for (id, _, role) in layout.current().roles.items().iter() {
|
||||
if let Ok(current_layout) = layout.current() {
|
||||
for (id, _, role) in current_layout.roles.items().iter() {
|
||||
if let layout::NodeRoleV(Some(r)) = role {
|
||||
let role = NodeRoleResp {
|
||||
id: hex::encode(id),
|
||||
let role = NodeAssignedRole {
|
||||
zone: r.zone.to_string(),
|
||||
capacity: r.capacity,
|
||||
tags: r.tags.clone(),
|
||||
|
|
@ -76,8 +81,10 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for ver in layout.versions().iter().rev().skip(1) {
|
||||
if let Ok(layout_versions) = layout.versions() {
|
||||
for ver in layout_versions.iter().rev().skip(1) {
|
||||
for (id, _, role) in ver.roles.items().iter() {
|
||||
if let layout::NodeRoleV(Some(r)) = role {
|
||||
if r.capacity.is_some() {
|
||||
|
|
@ -99,313 +106,183 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut nodes = nodes.into_values().collect::<Vec<_>>();
|
||||
nodes.sort_by(|x, y| x.id.cmp(&y.id));
|
||||
|
||||
let res = GetClusterStatusResponse {
|
||||
node: hex::encode(garage.system.id),
|
||||
garage_version: garage_util::version::garage_version(),
|
||||
garage_features: garage_util::version::garage_features(),
|
||||
rust_version: garage_util::version::rust_version(),
|
||||
db_engine: garage.db.engine(),
|
||||
layout_version: layout.current().version,
|
||||
Ok(GetClusterStatusResponse {
|
||||
layout_version: layout.inner().current().version,
|
||||
nodes,
|
||||
};
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
impl RequestHandler for GetClusterHealthRequest {
|
||||
type Response = GetClusterHealthResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetClusterHealthResponse, Error> {
|
||||
use garage_rpc::system::ClusterHealthStatus;
|
||||
let health = garage.system.health();
|
||||
let health = ClusterHealth {
|
||||
let health = GetClusterHealthResponse {
|
||||
status: match health.status {
|
||||
ClusterHealthStatus::Healthy => "healthy",
|
||||
ClusterHealthStatus::Degraded => "degraded",
|
||||
ClusterHealthStatus::Unavailable => "unavailable",
|
||||
},
|
||||
}
|
||||
.to_string(),
|
||||
known_nodes: health.known_nodes,
|
||||
connected_nodes: health.connected_nodes,
|
||||
storage_nodes: health.storage_nodes,
|
||||
storage_nodes_ok: health.storage_nodes_ok,
|
||||
// Translating storage_nodes_up (admin API context) to storage_nodes_ok (metrics context)
|
||||
// TODO: when releasing major release, consider renaming all the fields in the metrics to storage_nodes_up
|
||||
storage_nodes_up: health.storage_nodes_ok,
|
||||
partitions: health.partitions,
|
||||
partitions_quorum: health.partitions_quorum,
|
||||
partitions_all_ok: health.partitions_all_ok,
|
||||
};
|
||||
Ok(json_ok_response(&health)?)
|
||||
Ok(health)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_connect_cluster_nodes(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<Vec<String>, _, Error>(req).await?;
|
||||
impl RequestHandler for GetClusterStatisticsRequest {
|
||||
type Response = GetClusterStatisticsResponse;
|
||||
|
||||
let res = futures::future::join_all(req.iter().map(|node| garage.system.connect(node)))
|
||||
// FIXME: return this as a JSON struct instead of text
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetClusterStatisticsResponse, Error> {
|
||||
let mut ret = String::new();
|
||||
|
||||
// Gather storage node and free space statistics for current nodes
|
||||
let layout = &garage.system.cluster_layout();
|
||||
let mut node_partition_count = HashMap::<Uuid, u64>::new();
|
||||
if let Ok(current_layout) = layout.current() {
|
||||
for short_id in current_layout.ring_assignment_data.iter() {
|
||||
let id = current_layout.node_id_vec[*short_id as usize];
|
||||
*node_partition_count.entry(id).or_default() += 1;
|
||||
}
|
||||
}
|
||||
let node_info = garage
|
||||
.system
|
||||
.get_known_nodes()
|
||||
.into_iter()
|
||||
.map(|n| (n.id, n))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let mut table = vec![" ID\tHostname\tZone\tCapacity\tPart.\tDataAvail\tMetaAvail".into()];
|
||||
for (id, parts) in node_partition_count.iter() {
|
||||
let info = node_info.get(id);
|
||||
let status = info.map(|x| &x.status);
|
||||
let role = layout
|
||||
.current()
|
||||
.ok()
|
||||
.and_then(|l| l.roles.get(id))
|
||||
.and_then(|x| x.0.as_ref());
|
||||
let hostname = status.and_then(|x| x.hostname.as_deref()).unwrap_or("?");
|
||||
let zone = role.map(|x| x.zone.as_str()).unwrap_or("?");
|
||||
let capacity = role
|
||||
.map(|x| x.capacity_string())
|
||||
.unwrap_or_else(|| "?".into());
|
||||
let avail_str = |x| match x {
|
||||
Some((avail, total)) => {
|
||||
let pct = (avail as f64) / (total as f64) * 100.;
|
||||
let avail = bytesize::ByteSize::b(avail);
|
||||
let total = bytesize::ByteSize::b(total);
|
||||
format!("{}/{} ({:.1}%)", avail, total, pct)
|
||||
}
|
||||
None => "?".into(),
|
||||
};
|
||||
let data_avail = avail_str(status.and_then(|x| x.data_disk_avail));
|
||||
let meta_avail = avail_str(status.and_then(|x| x.meta_disk_avail));
|
||||
table.push(format!(
|
||||
" {:?}\t{}\t{}\t{}\t{}\t{}\t{}",
|
||||
id, hostname, zone, capacity, parts, data_avail, meta_avail
|
||||
));
|
||||
}
|
||||
write!(
|
||||
&mut ret,
|
||||
"Storage nodes:\n{}",
|
||||
format_table_to_string(table)
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let meta_part_avail = node_partition_count
|
||||
.iter()
|
||||
.filter_map(|(id, parts)| {
|
||||
node_info
|
||||
.get(id)
|
||||
.and_then(|x| x.status.meta_disk_avail)
|
||||
.map(|c| c.0 / *parts)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let data_part_avail = node_partition_count
|
||||
.iter()
|
||||
.filter_map(|(id, parts)| {
|
||||
node_info
|
||||
.get(id)
|
||||
.and_then(|x| x.status.data_disk_avail)
|
||||
.map(|c| c.0 / *parts)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
if !meta_part_avail.is_empty() && !data_part_avail.is_empty() {
|
||||
let meta_avail =
|
||||
bytesize::ByteSize(meta_part_avail.iter().min().unwrap() * (1 << PARTITION_BITS));
|
||||
let data_avail =
|
||||
bytesize::ByteSize(data_part_avail.iter().min().unwrap() * (1 << PARTITION_BITS));
|
||||
writeln!(
|
||||
&mut ret,
|
||||
"\nEstimated available storage space cluster-wide (might be lower in practice):"
|
||||
)
|
||||
.unwrap();
|
||||
if meta_part_avail.len() < node_partition_count.len()
|
||||
|| data_part_avail.len() < node_partition_count.len()
|
||||
{
|
||||
ret += &format_table_to_string(vec![
|
||||
format!(" data: < {}", data_avail),
|
||||
format!(" metadata: < {}", meta_avail),
|
||||
]);
|
||||
writeln!(&mut ret, "A precise estimate could not be given as information is missing for some storage nodes.").unwrap();
|
||||
} else {
|
||||
ret += &format_table_to_string(vec![
|
||||
format!(" data: {}", data_avail),
|
||||
format!(" metadata: {}", meta_avail),
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetClusterStatisticsResponse { freeform: ret })
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for ConnectClusterNodesRequest {
|
||||
type Response = ConnectClusterNodesResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<ConnectClusterNodesResponse, Error> {
|
||||
let res = futures::future::join_all(self.0.iter().map(|node| garage.system.connect(node)))
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|r| match r {
|
||||
Ok(()) => ConnectClusterNodesResponse {
|
||||
Ok(()) => ConnectNodeResponse {
|
||||
success: true,
|
||||
error: None,
|
||||
},
|
||||
Err(e) => ConnectClusterNodesResponse {
|
||||
Err(e) => ConnectNodeResponse {
|
||||
success: false,
|
||||
error: Some(format!("{}", e)),
|
||||
},
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
let res = format_cluster_layout(garage.system.cluster_layout().inner());
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResponse {
|
||||
let roles = layout
|
||||
.current()
|
||||
.roles
|
||||
.items()
|
||||
.iter()
|
||||
.filter_map(|(k, _, v)| v.0.clone().map(|x| (k, x)))
|
||||
.map(|(k, v)| NodeRoleResp {
|
||||
id: hex::encode(k),
|
||||
zone: v.zone.clone(),
|
||||
capacity: v.capacity,
|
||||
tags: v.tags.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let staged_role_changes = layout
|
||||
.staging
|
||||
.get()
|
||||
.roles
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(k, _, v)| layout.current().roles.get(k) != Some(v))
|
||||
.map(|(k, _, v)| match &v.0 {
|
||||
None => NodeRoleChange {
|
||||
id: hex::encode(k),
|
||||
action: NodeRoleChangeEnum::Remove { remove: true },
|
||||
},
|
||||
Some(r) => NodeRoleChange {
|
||||
id: hex::encode(k),
|
||||
action: NodeRoleChangeEnum::Update {
|
||||
zone: r.zone.clone(),
|
||||
capacity: r.capacity,
|
||||
tags: r.tags.clone(),
|
||||
},
|
||||
},
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
GetClusterLayoutResponse {
|
||||
version: layout.current().version,
|
||||
roles,
|
||||
staged_role_changes,
|
||||
Ok(ConnectClusterNodesResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ClusterHealth {
|
||||
status: &'static str,
|
||||
known_nodes: usize,
|
||||
connected_nodes: usize,
|
||||
storage_nodes: usize,
|
||||
storage_nodes_ok: usize,
|
||||
partitions: usize,
|
||||
partitions_quorum: usize,
|
||||
partitions_all_ok: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetClusterStatusResponse {
|
||||
node: String,
|
||||
garage_version: &'static str,
|
||||
garage_features: Option<&'static [&'static str]>,
|
||||
rust_version: &'static str,
|
||||
db_engine: String,
|
||||
layout_version: u64,
|
||||
nodes: Vec<NodeResp>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ApplyClusterLayoutResponse {
|
||||
message: Vec<String>,
|
||||
layout: GetClusterLayoutResponse,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ConnectClusterNodesResponse {
|
||||
success: bool,
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetClusterLayoutResponse {
|
||||
version: u64,
|
||||
roles: Vec<NodeRoleResp>,
|
||||
staged_role_changes: Vec<NodeRoleChange>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct NodeRoleResp {
|
||||
id: String,
|
||||
zone: String,
|
||||
capacity: Option<u64>,
|
||||
tags: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct FreeSpaceResp {
|
||||
available: u64,
|
||||
total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct NodeResp {
|
||||
id: String,
|
||||
role: Option<NodeRoleResp>,
|
||||
addr: Option<SocketAddr>,
|
||||
hostname: Option<String>,
|
||||
is_up: bool,
|
||||
last_seen_secs_ago: Option<u64>,
|
||||
draining: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
data_partition: Option<FreeSpaceResp>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
metadata_partition: Option<FreeSpaceResp>,
|
||||
}
|
||||
|
||||
// ---- update functions ----
|
||||
|
||||
pub async fn handle_update_cluster_layout(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
|
||||
|
||||
let mut layout = garage.system.cluster_layout().inner().clone();
|
||||
|
||||
let mut roles = layout.current().roles.clone();
|
||||
roles.merge(&layout.staging.get().roles);
|
||||
|
||||
for change in updates {
|
||||
let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
|
||||
let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?;
|
||||
|
||||
let new_role = match change.action {
|
||||
NodeRoleChangeEnum::Remove { remove: true } => None,
|
||||
NodeRoleChangeEnum::Update {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
} => Some(layout::NodeRole {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
}),
|
||||
_ => return Err(Error::bad_request("Invalid layout change")),
|
||||
};
|
||||
|
||||
layout
|
||||
.staging
|
||||
.get_mut()
|
||||
.roles
|
||||
.merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
|
||||
}
|
||||
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
|
||||
let res = format_cluster_layout(&layout);
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
pub async fn handle_apply_cluster_layout(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let param = parse_json_body::<ApplyLayoutRequest, _, Error>(req).await?;
|
||||
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
|
||||
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
|
||||
let res = ApplyClusterLayoutResponse {
|
||||
message: msg,
|
||||
layout: format_cluster_layout(&layout),
|
||||
};
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
pub async fn handle_revert_cluster_layout(
|
||||
garage: &Arc<Garage>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let layout = layout.revert_staged_changes()?;
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
|
||||
let res = format_cluster_layout(&layout);
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
type UpdateClusterLayoutRequest = Vec<NodeRoleChange>;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ApplyLayoutRequest {
|
||||
version: u64,
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct NodeRoleChange {
|
||||
id: String,
|
||||
#[serde(flatten)]
|
||||
action: NodeRoleChangeEnum,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum NodeRoleChangeEnum {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Remove { remove: bool },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Update {
|
||||
zone: String,
|
||||
capacity: Option<u64>,
|
||||
tags: Vec<String>,
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,10 +21,26 @@ pub enum Error {
|
|||
Common(#[from] CommonError),
|
||||
|
||||
// Category: cannot process
|
||||
/// The admin API token does not exist
|
||||
#[error("Admin token not found: {0}")]
|
||||
NoSuchAdminToken(String),
|
||||
|
||||
/// The API access key does not exist
|
||||
#[error("Access key not found: {0}")]
|
||||
NoSuchAccessKey(String),
|
||||
|
||||
/// The requested block does not exist
|
||||
#[error("Block not found: {0}")]
|
||||
NoSuchBlock(String),
|
||||
|
||||
/// The requested worker does not exist
|
||||
#[error("Worker not found: {0}")]
|
||||
NoSuchWorker(u64),
|
||||
|
||||
/// The object requested don't exists
|
||||
#[error("Key not found")]
|
||||
NoSuchKey,
|
||||
|
||||
/// In Import key, the key already exists
|
||||
#[error("Key {0} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.")]
|
||||
KeyAlreadyExists(String),
|
||||
|
|
@ -46,11 +62,15 @@ impl From<HelperError> for Error {
|
|||
}
|
||||
|
||||
impl Error {
|
||||
fn code(&self) -> &'static str {
|
||||
pub fn code(&self) -> &'static str {
|
||||
match self {
|
||||
Error::Common(c) => c.aws_code(),
|
||||
Error::NoSuchAdminToken(_) => "NoSuchAdminToken",
|
||||
Error::NoSuchAccessKey(_) => "NoSuchAccessKey",
|
||||
Error::NoSuchWorker(_) => "NoSuchWorker",
|
||||
Error::NoSuchBlock(_) => "NoSuchBlock",
|
||||
Error::KeyAlreadyExists(_) => "KeyAlreadyExists",
|
||||
Error::NoSuchKey => "NoSuchKey",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -60,7 +80,11 @@ impl ApiError for Error {
|
|||
fn http_status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::Common(c) => c.http_status_code(),
|
||||
Error::NoSuchAccessKey(_) => StatusCode::NOT_FOUND,
|
||||
Error::NoSuchAdminToken(_)
|
||||
| Error::NoSuchAccessKey(_)
|
||||
| Error::NoSuchWorker(_)
|
||||
| Error::NoSuchBlock(_)
|
||||
| Error::NoSuchKey => StatusCode::NOT_FOUND,
|
||||
Error::KeyAlreadyExists(_) => StatusCode::CONFLICT,
|
||||
}
|
||||
}
|
||||
|
|
@ -68,6 +92,7 @@ impl ApiError for Error {
|
|||
fn add_http_headers(&self, header_map: &mut HeaderMap<HeaderValue>) {
|
||||
use hyper::header;
|
||||
header_map.append(header::CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header_map.append(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*".parse().unwrap());
|
||||
}
|
||||
|
||||
fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody {
|
||||
|
|
|
|||
|
|
@ -1,20 +1,24 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use chrono::DateTime;
|
||||
|
||||
use garage_table::*;
|
||||
use garage_util::time::now_msec;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::key_table::*;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
|
||||
use crate::api_server::ResBody;
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
impl RequestHandler for ListKeysRequest {
|
||||
type Response = ListKeysResponse;
|
||||
|
||||
async fn handle(self, garage: &Arc<Garage>, _admin: &Admin) -> Result<ListKeysResponse, Error> {
|
||||
let now = now_msec();
|
||||
|
||||
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
let res = garage
|
||||
.key_table
|
||||
.get_range(
|
||||
|
|
@ -26,148 +30,161 @@ pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<ResBody>,
|
|||
)
|
||||
.await?
|
||||
.iter()
|
||||
.map(|k| ListKeyResultItem {
|
||||
.map(|k| {
|
||||
let p = k.params().unwrap();
|
||||
|
||||
ListKeysResponseItem {
|
||||
id: k.key_id.to_string(),
|
||||
name: k.params().unwrap().name.get().clone(),
|
||||
name: p.name.get().clone(),
|
||||
created: p.created.map(|x| {
|
||||
DateTime::from_timestamp_millis(x as i64)
|
||||
.expect("invalid timestamp stored in db")
|
||||
}),
|
||||
expiration: p.expiration.get().map(|x| {
|
||||
DateTime::from_timestamp_millis(x as i64)
|
||||
.expect("invalid timestamp stored in db")
|
||||
}),
|
||||
expired: p.is_expired(now),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
Ok(ListKeysResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ListKeyResultItem {
|
||||
id: String,
|
||||
name: String,
|
||||
}
|
||||
impl RequestHandler for GetKeyInfoRequest {
|
||||
type Response = GetKeyInfoResponse;
|
||||
|
||||
pub async fn handle_get_key_info(
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
id: Option<String>,
|
||||
search: Option<String>,
|
||||
show_secret_key: bool,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let key = if let Some(id) = id {
|
||||
garage.key_helper().get_existing_key(&id).await?
|
||||
} else if let Some(search) = search {
|
||||
garage
|
||||
.key_helper()
|
||||
.get_existing_matching_key(&search)
|
||||
_admin: &Admin,
|
||||
) -> Result<GetKeyInfoResponse, Error> {
|
||||
let key = match (self.id, self.search) {
|
||||
(Some(id), None) => garage.key_helper().get_existing_key(&id).await?,
|
||||
(None, Some(search)) => {
|
||||
let candidates = garage
|
||||
.key_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(KeyFilter::MatchesAndNotDeleted(search.to_string())),
|
||||
10,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
unreachable!();
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
if candidates.len() != 1 {
|
||||
return Err(Error::bad_request(format!(
|
||||
"{} matching keys",
|
||||
candidates.len()
|
||||
)));
|
||||
}
|
||||
candidates.into_iter().next().unwrap()
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::bad_request(
|
||||
"Either id or search must be provided (but not both)",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
key_info_results(garage, key, show_secret_key).await
|
||||
Ok(key_info_results(garage, key, self.show_secret_key).await?)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_create_key(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<CreateKeyRequest, _, Error>(req).await?;
|
||||
impl RequestHandler for CreateKeyRequest {
|
||||
type Response = CreateKeyResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<CreateKeyResponse, Error> {
|
||||
let mut key = Key::new("Unnamed key");
|
||||
|
||||
apply_key_updates(&mut key, self.0)?;
|
||||
|
||||
let key = Key::new(req.name.as_deref().unwrap_or("Unnamed key"));
|
||||
garage.key_table.insert(&key).await?;
|
||||
|
||||
key_info_results(garage, key, true).await
|
||||
Ok(CreateKeyResponse(
|
||||
key_info_results(garage, key, true).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct CreateKeyRequest {
|
||||
name: Option<String>,
|
||||
}
|
||||
impl RequestHandler for ImportKeyRequest {
|
||||
type Response = ImportKeyResponse;
|
||||
|
||||
pub async fn handle_import_key(
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<ImportKeyRequest, _, Error>(req).await?;
|
||||
|
||||
let prev_key = garage.key_table.get(&EmptyKey, &req.access_key_id).await?;
|
||||
_admin: &Admin,
|
||||
) -> Result<ImportKeyResponse, Error> {
|
||||
let prev_key = garage.key_table.get(&EmptyKey, &self.access_key_id).await?;
|
||||
if prev_key.is_some() {
|
||||
return Err(Error::KeyAlreadyExists(req.access_key_id.to_string()));
|
||||
return Err(Error::KeyAlreadyExists(self.access_key_id.to_string()));
|
||||
}
|
||||
|
||||
let imported_key = Key::import(
|
||||
&req.access_key_id,
|
||||
&req.secret_access_key,
|
||||
req.name.as_deref().unwrap_or("Imported key"),
|
||||
&self.access_key_id,
|
||||
&self.secret_access_key,
|
||||
self.name.as_deref().unwrap_or("Imported key"),
|
||||
)
|
||||
.ok_or_bad_request("Invalid key format")?;
|
||||
garage.key_table.insert(&imported_key).await?;
|
||||
|
||||
key_info_results(garage, imported_key, false).await
|
||||
Ok(ImportKeyResponse(
|
||||
key_info_results(garage, imported_key, false).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ImportKeyRequest {
|
||||
access_key_id: String,
|
||||
secret_access_key: String,
|
||||
name: Option<String>,
|
||||
}
|
||||
impl RequestHandler for UpdateKeyRequest {
|
||||
type Response = UpdateKeyResponse;
|
||||
|
||||
pub async fn handle_update_key(
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
id: String,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<UpdateKeyRequest, _, Error>(req).await?;
|
||||
_admin: &Admin,
|
||||
) -> Result<UpdateKeyResponse, Error> {
|
||||
let mut key = garage.key_helper().get_existing_key(&self.id).await?;
|
||||
|
||||
let mut key = garage.key_helper().get_existing_key(&id).await?;
|
||||
|
||||
let key_state = key.state.as_option_mut().unwrap();
|
||||
|
||||
if let Some(new_name) = req.name {
|
||||
key_state.name.update(new_name);
|
||||
}
|
||||
if let Some(allow) = req.allow {
|
||||
if allow.create_bucket {
|
||||
key_state.allow_create_bucket.update(true);
|
||||
}
|
||||
}
|
||||
if let Some(deny) = req.deny {
|
||||
if deny.create_bucket {
|
||||
key_state.allow_create_bucket.update(false);
|
||||
}
|
||||
}
|
||||
apply_key_updates(&mut key, self.body)?;
|
||||
|
||||
garage.key_table.insert(&key).await?;
|
||||
|
||||
key_info_results(garage, key, false).await
|
||||
Ok(UpdateKeyResponse(
|
||||
key_info_results(garage, key, false).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UpdateKeyRequest {
|
||||
name: Option<String>,
|
||||
allow: Option<KeyPerm>,
|
||||
deny: Option<KeyPerm>,
|
||||
}
|
||||
impl RequestHandler for DeleteKeyRequest {
|
||||
type Response = DeleteKeyResponse;
|
||||
|
||||
pub async fn handle_delete_key(
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
id: String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
_admin: &Admin,
|
||||
) -> Result<DeleteKeyResponse, Error> {
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
let mut key = helper.key().get_existing_key(&id).await?;
|
||||
let mut key = helper.key().get_existing_key(&self.id).await?;
|
||||
|
||||
helper.delete_key(&mut key).await?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
.body(empty_body())?)
|
||||
Ok(DeleteKeyResponse)
|
||||
}
|
||||
}
|
||||
|
||||
async fn key_info_results(
|
||||
garage: &Arc<Garage>,
|
||||
key: Key,
|
||||
show_secret: bool,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
) -> Result<GetKeyInfoResponse, Error> {
|
||||
let mut relevant_buckets = HashMap::new();
|
||||
|
||||
let key_state = key.state.as_option().unwrap();
|
||||
|
|
@ -193,8 +210,15 @@ async fn key_info_results(
|
|||
}
|
||||
}
|
||||
|
||||
let res = GetKeyInfoResult {
|
||||
let res = GetKeyInfoResponse {
|
||||
name: key_state.name.get().clone(),
|
||||
created: key_state.created.map(|x| {
|
||||
DateTime::from_timestamp_millis(x as i64).expect("invalid timestamp stored in db")
|
||||
}),
|
||||
expiration: key_state.expiration.get().map(|x| {
|
||||
DateTime::from_timestamp_millis(x as i64).expect("invalid timestamp stored in db")
|
||||
}),
|
||||
expired: key_state.is_expired(now_msec()),
|
||||
access_key_id: key.key_id.clone(),
|
||||
secret_access_key: if show_secret {
|
||||
Some(key_state.secret_key.clone())
|
||||
|
|
@ -206,9 +230,18 @@ async fn key_info_results(
|
|||
},
|
||||
buckets: relevant_buckets
|
||||
.into_values()
|
||||
.map(|bucket| {
|
||||
.filter_map(|bucket| {
|
||||
let state = bucket.state.as_option().unwrap();
|
||||
KeyInfoBucketResult {
|
||||
let permissions = key_state
|
||||
.authorized_buckets
|
||||
.get(&bucket.id)
|
||||
.filter(|p| p.is_any())
|
||||
.map(|p| ApiBucketKeyPerm {
|
||||
read: p.allow_read,
|
||||
write: p.allow_write,
|
||||
owner: p.allow_owner,
|
||||
})?;
|
||||
Some(KeyInfoBucketResponse {
|
||||
id: hex::encode(bucket.id),
|
||||
global_aliases: state
|
||||
.aliases
|
||||
|
|
@ -224,57 +257,45 @@ async fn key_info_results(
|
|||
.filter(|((k, _), _, a)| *a && *k == key.key_id)
|
||||
.map(|((_, n), _, _)| n.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
permissions: key_state
|
||||
.authorized_buckets
|
||||
.get(&bucket.id)
|
||||
.map(|p| ApiBucketKeyPerm {
|
||||
read: p.allow_read,
|
||||
write: p.allow_write,
|
||||
owner: p.allow_owner,
|
||||
permissions,
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
};
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetKeyInfoResult {
|
||||
name: String,
|
||||
access_key_id: String,
|
||||
#[serde(skip_serializing_if = "is_default")]
|
||||
secret_access_key: Option<String>,
|
||||
permissions: KeyPerm,
|
||||
buckets: Vec<KeyInfoBucketResult>,
|
||||
fn apply_key_updates(key: &mut Key, updates: UpdateKeyRequestBody) -> Result<(), Error> {
|
||||
if updates.never_expires && updates.expiration.is_some() {
|
||||
return Err(Error::bad_request(
|
||||
"cannot specify `expiration` and `never_expires`",
|
||||
));
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct KeyPerm {
|
||||
#[serde(default)]
|
||||
create_bucket: bool,
|
||||
let key_state = key.state.as_option_mut().unwrap();
|
||||
|
||||
if let Some(new_name) = updates.name {
|
||||
key_state.name.update(new_name);
|
||||
}
|
||||
if let Some(expiration) = updates.expiration {
|
||||
key_state
|
||||
.expiration
|
||||
.update(Some(expiration.timestamp_millis() as u64));
|
||||
}
|
||||
if updates.never_expires {
|
||||
key_state.expiration.update(None);
|
||||
}
|
||||
if let Some(allow) = updates.allow {
|
||||
if allow.create_bucket {
|
||||
key_state.allow_create_bucket.update(true);
|
||||
}
|
||||
}
|
||||
if let Some(deny) = updates.deny {
|
||||
if deny.create_bucket {
|
||||
key_state.allow_create_bucket.update(false);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct KeyInfoBucketResult {
|
||||
id: String,
|
||||
global_aliases: Vec<String>,
|
||||
local_aliases: Vec<String>,
|
||||
permissions: ApiBucketKeyPerm,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub(crate) struct ApiBucketKeyPerm {
|
||||
#[serde(default)]
|
||||
pub(crate) read: bool,
|
||||
#[serde(default)]
|
||||
pub(crate) write: bool,
|
||||
#[serde(default)]
|
||||
pub(crate) owner: bool,
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
|||
406
src/api/admin/layout.rs
Normal file
|
|
@ -0,0 +1,406 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use garage_util::crdt::*;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::Error as GarageError;
|
||||
|
||||
use garage_rpc::layout;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
impl RequestHandler for GetClusterLayoutRequest {
|
||||
type Response = GetClusterLayoutResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetClusterLayoutResponse, Error> {
|
||||
Ok(format_cluster_layout(
|
||||
garage.system.cluster_layout().inner(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResponse {
|
||||
let current = layout.current();
|
||||
|
||||
let roles = current
|
||||
.roles
|
||||
.items()
|
||||
.iter()
|
||||
.filter_map(|(k, _, v)| v.0.clone().map(|x| (k, x)))
|
||||
.map(|(k, v)| {
|
||||
let stored_partitions = current.get_node_usage(k).ok().map(|x| x as u64);
|
||||
LayoutNodeRole {
|
||||
id: hex::encode(k),
|
||||
zone: v.zone.clone(),
|
||||
capacity: v.capacity,
|
||||
stored_partitions,
|
||||
usable_capacity: stored_partitions.map(|x| x * current.partition_size),
|
||||
tags: v.tags.clone(),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let staged_role_changes = layout
|
||||
.staging
|
||||
.get()
|
||||
.roles
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(k, _, v)| current.roles.get(k) != Some(v))
|
||||
.map(|(k, _, v)| match &v.0 {
|
||||
None => NodeRoleChange {
|
||||
id: hex::encode(k),
|
||||
action: NodeRoleChangeEnum::Remove { remove: true },
|
||||
},
|
||||
Some(r) => NodeRoleChange {
|
||||
id: hex::encode(k),
|
||||
action: NodeRoleChangeEnum::Update(NodeAssignedRole {
|
||||
zone: r.zone.clone(),
|
||||
capacity: r.capacity,
|
||||
tags: r.tags.clone(),
|
||||
}),
|
||||
},
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let staged_parameters = if *layout.staging.get().parameters.get() != current.parameters {
|
||||
Some((*layout.staging.get().parameters.get()).into())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
GetClusterLayoutResponse {
|
||||
version: current.version,
|
||||
roles,
|
||||
partition_size: current.partition_size,
|
||||
parameters: current.parameters.into(),
|
||||
staged_role_changes,
|
||||
staged_parameters,
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for GetClusterLayoutHistoryRequest {
|
||||
type Response = GetClusterLayoutHistoryResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetClusterLayoutHistoryResponse, Error> {
|
||||
let layout_helper = garage.system.cluster_layout();
|
||||
let layout = layout_helper.inner();
|
||||
let min_stored = layout.min_stored();
|
||||
|
||||
let versions = layout
|
||||
.versions
|
||||
.iter()
|
||||
.rev()
|
||||
.chain(layout.old_versions.iter().rev())
|
||||
.map(|ver| {
|
||||
let status = if ver.version == layout.current().version {
|
||||
ClusterLayoutVersionStatus::Current
|
||||
} else if ver.version >= min_stored {
|
||||
ClusterLayoutVersionStatus::Draining
|
||||
} else {
|
||||
ClusterLayoutVersionStatus::Historical
|
||||
};
|
||||
ClusterLayoutVersion {
|
||||
version: ver.version,
|
||||
status,
|
||||
storage_nodes: ver
|
||||
.roles
|
||||
.items()
|
||||
.iter()
|
||||
.filter(
|
||||
|(_, _, x)| matches!(x, layout::NodeRoleV(Some(c)) if c.capacity.is_some()),
|
||||
)
|
||||
.count() as u64,
|
||||
gateway_nodes: ver
|
||||
.roles
|
||||
.items()
|
||||
.iter()
|
||||
.filter(
|
||||
|(_, _, x)| matches!(x, layout::NodeRoleV(Some(c)) if c.capacity.is_none()),
|
||||
)
|
||||
.count() as u64,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let all_nodes = layout.get_all_nodes();
|
||||
let min_ack = layout_helper.ack_map_min();
|
||||
|
||||
let update_trackers = if layout.versions.len() > 1 {
|
||||
Some(
|
||||
all_nodes
|
||||
.iter()
|
||||
.map(|node| {
|
||||
(
|
||||
hex::encode(&node),
|
||||
NodeUpdateTrackers {
|
||||
ack: layout.update_trackers.ack_map.get(node, min_stored),
|
||||
sync: layout.update_trackers.sync_map.get(node, min_stored),
|
||||
sync_ack: layout.update_trackers.sync_ack_map.get(node, min_stored),
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(GetClusterLayoutHistoryResponse {
|
||||
current_version: layout.current().version,
|
||||
min_ack,
|
||||
versions,
|
||||
update_trackers,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
// ---- update functions ----
|
||||
|
||||
impl RequestHandler for UpdateClusterLayoutRequest {
|
||||
type Response = UpdateClusterLayoutResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<UpdateClusterLayoutResponse, Error> {
|
||||
let mut layout = garage.system.cluster_layout().inner().clone();
|
||||
|
||||
let mut roles = layout.current().roles.clone();
|
||||
roles.merge(&layout.staging.get().roles);
|
||||
|
||||
for change in self.roles {
|
||||
let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
|
||||
let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?;
|
||||
|
||||
let new_role = match change.action {
|
||||
NodeRoleChangeEnum::Remove { remove: true } => None,
|
||||
NodeRoleChangeEnum::Update(NodeAssignedRole {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
}) => {
|
||||
if matches!(capacity, Some(cap) if cap < 1024) {
|
||||
return Err(Error::bad_request("Capacity should be at least 1K (1024)"));
|
||||
}
|
||||
Some(layout::NodeRole {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
})
|
||||
}
|
||||
_ => return Err(Error::bad_request("Invalid layout change")),
|
||||
};
|
||||
|
||||
layout
|
||||
.staging
|
||||
.get_mut()
|
||||
.roles
|
||||
.merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
|
||||
}
|
||||
|
||||
if let Some(param) = self.parameters {
|
||||
if let ZoneRedundancy::AtLeast(r_int) = param.zone_redundancy {
|
||||
if r_int > layout.current().replication_factor {
|
||||
return Err(Error::bad_request(format!(
|
||||
"The zone redundancy must be smaller or equal to the replication factor ({}).",
|
||||
layout.current().replication_factor
|
||||
)));
|
||||
} else if r_int < 1 {
|
||||
return Err(Error::bad_request(
|
||||
"The zone redundancy must be at least 1.",
|
||||
));
|
||||
}
|
||||
}
|
||||
layout.staging.get_mut().parameters.update(param.into());
|
||||
}
|
||||
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
|
||||
let res = format_cluster_layout(&layout);
|
||||
Ok(UpdateClusterLayoutResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for PreviewClusterLayoutChangesRequest {
|
||||
type Response = PreviewClusterLayoutChangesResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<PreviewClusterLayoutChangesResponse, Error> {
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let new_ver = layout.current().version + 1;
|
||||
match layout.apply_staged_changes(new_ver) {
|
||||
Err(GarageError::Message(error)) => {
|
||||
Ok(PreviewClusterLayoutChangesResponse::Error { error })
|
||||
}
|
||||
Err(e) => Err(e.into()),
|
||||
Ok((new_layout, msg)) => Ok(PreviewClusterLayoutChangesResponse::Success {
|
||||
message: msg,
|
||||
new_layout: format_cluster_layout(&new_layout),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for ApplyClusterLayoutRequest {
|
||||
type Response = ApplyClusterLayoutResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<ApplyClusterLayoutResponse, Error> {
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let (layout, msg) = layout.apply_staged_changes(self.version)?;
|
||||
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
|
||||
Ok(ApplyClusterLayoutResponse {
|
||||
message: msg,
|
||||
layout: format_cluster_layout(&layout),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for RevertClusterLayoutRequest {
|
||||
type Response = RevertClusterLayoutResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<RevertClusterLayoutResponse, Error> {
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let layout = layout.revert_staged_changes()?;
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
|
||||
let res = format_cluster_layout(&layout);
|
||||
Ok(RevertClusterLayoutResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for ClusterLayoutSkipDeadNodesRequest {
|
||||
type Response = ClusterLayoutSkipDeadNodesResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<ClusterLayoutSkipDeadNodesResponse, Error> {
|
||||
let status = garage.system.get_known_nodes();
|
||||
|
||||
let mut layout = garage.system.cluster_layout().inner().clone();
|
||||
let mut ack_updated = vec![];
|
||||
let mut sync_updated = vec![];
|
||||
|
||||
if layout.versions.len() == 1 {
|
||||
return Err(Error::bad_request(
|
||||
"This command cannot be called when there is only one live cluster layout version",
|
||||
));
|
||||
}
|
||||
|
||||
let min_v = layout.min_stored();
|
||||
if self.version <= min_v || self.version > layout.current().version {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Invalid version, you may use the following version numbers: {}",
|
||||
(min_v + 1..=layout.current().version)
|
||||
.map(|x| x.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ")
|
||||
)));
|
||||
}
|
||||
|
||||
let all_nodes = layout.get_all_nodes();
|
||||
for node in all_nodes.iter() {
|
||||
// Update ACK tracker for dead nodes or for all nodes if --allow-missing-data
|
||||
if self.allow_missing_data || !status.iter().any(|x| x.id == *node && x.is_up) {
|
||||
if layout.update_trackers.ack_map.set_max(*node, self.version) {
|
||||
ack_updated.push(hex::encode(node));
|
||||
}
|
||||
}
|
||||
|
||||
// If --allow-missing-data, update SYNC tracker for all nodes.
|
||||
if self.allow_missing_data {
|
||||
if layout.update_trackers.sync_map.set_max(*node, self.version) {
|
||||
sync_updated.push(hex::encode(node));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
|
||||
Ok(ClusterLayoutSkipDeadNodesResponse {
|
||||
ack_updated,
|
||||
sync_updated,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
impl From<layout::ZoneRedundancy> for ZoneRedundancy {
|
||||
fn from(x: layout::ZoneRedundancy) -> Self {
|
||||
match x {
|
||||
layout::ZoneRedundancy::Maximum => ZoneRedundancy::Maximum,
|
||||
layout::ZoneRedundancy::AtLeast(x) => ZoneRedundancy::AtLeast(x),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<layout::ZoneRedundancy> for ZoneRedundancy {
|
||||
fn into(self) -> layout::ZoneRedundancy {
|
||||
match self {
|
||||
ZoneRedundancy::Maximum => layout::ZoneRedundancy::Maximum,
|
||||
ZoneRedundancy::AtLeast(x) => layout::ZoneRedundancy::AtLeast(x),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<layout::LayoutParameters> for LayoutParameters {
|
||||
fn from(x: layout::LayoutParameters) -> Self {
|
||||
LayoutParameters {
|
||||
zone_redundancy: x.zone_redundancy.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<layout::LayoutParameters> for LayoutParameters {
|
||||
fn into(self) -> layout::LayoutParameters {
|
||||
layout::LayoutParameters {
|
||||
zone_redundancy: self.zone_redundancy.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -3,9 +3,44 @@ extern crate tracing;
|
|||
|
||||
pub mod api_server;
|
||||
mod error;
|
||||
mod macros;
|
||||
|
||||
pub mod api;
|
||||
pub mod openapi;
|
||||
mod router_v0;
|
||||
mod router_v1;
|
||||
mod router_v2;
|
||||
|
||||
mod admin_token;
|
||||
mod bucket;
|
||||
mod cluster;
|
||||
mod key;
|
||||
mod layout;
|
||||
mod special;
|
||||
|
||||
mod block;
|
||||
mod node;
|
||||
mod repair;
|
||||
mod worker;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
pub use api_server::AdminApiServer as Admin;
|
||||
|
||||
pub enum Authorization {
|
||||
None,
|
||||
MetricsToken,
|
||||
AdminToken,
|
||||
}
|
||||
|
||||
pub trait RequestHandler {
|
||||
type Response;
|
||||
|
||||
fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
admin: &Admin,
|
||||
) -> impl std::future::Future<Output = Result<Self::Response, error::Error>> + Send;
|
||||
}
|
||||
|
|
|
|||
208
src/api/admin/macros.rs
Normal file
|
|
@ -0,0 +1,208 @@
|
|||
macro_rules! admin_endpoints {
|
||||
[
|
||||
$(@special $special_endpoint:ident,)*
|
||||
$($endpoint:ident,)*
|
||||
] => {
|
||||
paste! {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum AdminApiRequest {
|
||||
$(
|
||||
$special_endpoint( [<$special_endpoint Request>] ),
|
||||
)*
|
||||
$(
|
||||
$endpoint( [<$endpoint Request>] ),
|
||||
)*
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum AdminApiResponse {
|
||||
$(
|
||||
$endpoint( [<$endpoint Response>] ),
|
||||
)*
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum TaggedAdminApiResponse {
|
||||
$(
|
||||
$endpoint( [<$endpoint Response>] ),
|
||||
)*
|
||||
}
|
||||
|
||||
impl AdminApiRequest {
|
||||
pub fn name(&self) -> &'static str {
|
||||
match self {
|
||||
$(
|
||||
Self::$special_endpoint(_) => stringify!($special_endpoint),
|
||||
)*
|
||||
$(
|
||||
Self::$endpoint(_) => stringify!($endpoint),
|
||||
)*
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AdminApiResponse {
|
||||
pub fn tagged(self) -> TaggedAdminApiResponse {
|
||||
match self {
|
||||
$(
|
||||
Self::$endpoint(res) => TaggedAdminApiResponse::$endpoint(res),
|
||||
)*
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
$(
|
||||
impl From< [< $endpoint Request >] > for AdminApiRequest {
|
||||
fn from(req: [< $endpoint Request >]) -> AdminApiRequest {
|
||||
AdminApiRequest::$endpoint(req)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<TaggedAdminApiResponse> for [< $endpoint Response >] {
|
||||
type Error = TaggedAdminApiResponse;
|
||||
fn try_from(resp: TaggedAdminApiResponse) -> Result< [< $endpoint Response >], TaggedAdminApiResponse> {
|
||||
match resp {
|
||||
TaggedAdminApiResponse::$endpoint(v) => Ok(v),
|
||||
x => Err(x),
|
||||
}
|
||||
}
|
||||
}
|
||||
)*
|
||||
|
||||
impl RequestHandler for AdminApiRequest {
|
||||
type Response = AdminApiResponse;
|
||||
|
||||
async fn handle(self, garage: &Arc<Garage>, admin: &Admin) -> Result<AdminApiResponse, Error> {
|
||||
match self {
|
||||
$(
|
||||
AdminApiRequest::$special_endpoint(_) => Err(
|
||||
Error::Common(CommonError::BadRequest(
|
||||
concat!(stringify!($special_endpoint), " cannot be used outside of the HTTP Admin API").into()
|
||||
))
|
||||
),
|
||||
)*
|
||||
$(
|
||||
AdminApiRequest::$endpoint(req) => Ok(AdminApiResponse::$endpoint(req.handle(garage, admin).await?)),
|
||||
)*
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! local_admin_endpoints {
|
||||
[
|
||||
$($endpoint:ident,)*
|
||||
] => {
|
||||
paste! {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum LocalAdminApiRequest {
|
||||
$(
|
||||
$endpoint( [<Local $endpoint Request>] ),
|
||||
)*
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum LocalAdminApiResponse {
|
||||
$(
|
||||
$endpoint( [<Local $endpoint Response>] ),
|
||||
)*
|
||||
}
|
||||
|
||||
$(
|
||||
pub type [< $endpoint Request >] = MultiRequest< [< Local $endpoint Request >] >;
|
||||
|
||||
pub type [< $endpoint RequestBody >] = [< Local $endpoint Request >];
|
||||
|
||||
pub type [< $endpoint Response >] = MultiResponse< [< Local $endpoint Response >] >;
|
||||
|
||||
impl From< [< Local $endpoint Request >] > for LocalAdminApiRequest {
|
||||
fn from(req: [< Local $endpoint Request >]) -> LocalAdminApiRequest {
|
||||
LocalAdminApiRequest::$endpoint(req)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<LocalAdminApiResponse> for [< Local $endpoint Response >] {
|
||||
type Error = LocalAdminApiResponse;
|
||||
fn try_from(resp: LocalAdminApiResponse) -> Result< [< Local $endpoint Response >], LocalAdminApiResponse> {
|
||||
match resp {
|
||||
LocalAdminApiResponse::$endpoint(v) => Ok(v),
|
||||
x => Err(x),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for [< $endpoint Request >] {
|
||||
type Response = [< $endpoint Response >];
|
||||
|
||||
async fn handle(self, garage: &Arc<Garage>, admin: &Admin) -> Result<Self::Response, Error> {
|
||||
let to = find_matching_nodes(garage, self.node.as_str())?;
|
||||
|
||||
let resps = garage.system.rpc_helper().call_many(&admin.endpoint,
|
||||
&to,
|
||||
AdminRpc::Internal(self.body.into()),
|
||||
RequestStrategy::with_priority(PRIO_NORMAL),
|
||||
).await?;
|
||||
|
||||
let mut ret = [< $endpoint Response >] {
|
||||
success: HashMap::new(),
|
||||
error: HashMap::new(),
|
||||
};
|
||||
for (node, resp) in resps {
|
||||
match resp {
|
||||
Ok(AdminRpcResponse::InternalApiOkResponse(r)) => {
|
||||
match [< Local $endpoint Response >]::try_from(r) {
|
||||
Ok(r) => {
|
||||
ret.success.insert(hex::encode(node), r);
|
||||
}
|
||||
Err(_) => {
|
||||
ret.error.insert(hex::encode(node), "returned invalid value".to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(AdminRpcResponse::ApiErrorResponse{error_code, http_code, message}) => {
|
||||
ret.error.insert(hex::encode(node), format!("{} ({}): {}", error_code, http_code, message));
|
||||
}
|
||||
Ok(_) => {
|
||||
ret.error.insert(hex::encode(node), "returned invalid value".to_string());
|
||||
}
|
||||
Err(e) => {
|
||||
ret.error.insert(hex::encode(node), e.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
)*
|
||||
|
||||
impl LocalAdminApiRequest {
|
||||
pub fn name(&self) -> &'static str {
|
||||
match self {
|
||||
$(
|
||||
Self::$endpoint(_) => stringify!($endpoint),
|
||||
)*
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalAdminApiRequest {
|
||||
type Response = LocalAdminApiResponse;
|
||||
|
||||
async fn handle(self, garage: &Arc<Garage>, admin: &Admin) -> Result<LocalAdminApiResponse, Error> {
|
||||
Ok(match self {
|
||||
$(
|
||||
LocalAdminApiRequest::$endpoint(req) => LocalAdminApiResponse::$endpoint(req.handle(garage, admin).await?),
|
||||
)*
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) use admin_endpoints;
|
||||
pub(crate) use local_admin_endpoints;
|
||||
149
src/api/admin/node.rs
Normal file
|
|
@ -0,0 +1,149 @@
|
|||
use std::fmt::Write;
|
||||
use std::sync::Arc;
|
||||
|
||||
use format_table::format_table_to_string;
|
||||
|
||||
use garage_util::error::Error as GarageError;
|
||||
|
||||
use garage_table::replication::*;
|
||||
use garage_table::*;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
use crate::api::*;
|
||||
use crate::error::Error;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
impl RequestHandler for LocalGetNodeInfoRequest {
|
||||
type Response = LocalGetNodeInfoResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalGetNodeInfoResponse, Error> {
|
||||
Ok(LocalGetNodeInfoResponse {
|
||||
node_id: hex::encode(garage.system.id),
|
||||
garage_version: garage_util::version::garage_version().to_string(),
|
||||
garage_features: garage_util::version::garage_features()
|
||||
.map(|features| features.iter().map(ToString::to_string).collect()),
|
||||
rust_version: garage_util::version::rust_version().to_string(),
|
||||
db_engine: garage.db.engine(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalCreateMetadataSnapshotRequest {
|
||||
type Response = LocalCreateMetadataSnapshotResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalCreateMetadataSnapshotResponse, Error> {
|
||||
garage_model::snapshot::async_snapshot_metadata(garage).await?;
|
||||
Ok(LocalCreateMetadataSnapshotResponse)
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalGetNodeStatisticsRequest {
|
||||
type Response = LocalGetNodeStatisticsResponse;
|
||||
|
||||
// FIXME: return this as a JSON struct instead of text
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalGetNodeStatisticsResponse, Error> {
|
||||
let sys_status = garage.system.local_status();
|
||||
|
||||
let mut ret = format_table_to_string(vec![
|
||||
format!("Node ID:\t{:?}", garage.system.id),
|
||||
format!("Hostname:\t{}", sys_status.hostname.unwrap_or_default(),),
|
||||
format!(
|
||||
"Garage version:\t{}",
|
||||
garage_util::version::garage_version(),
|
||||
),
|
||||
format!(
|
||||
"Garage features:\t{}",
|
||||
garage_util::version::garage_features()
|
||||
.map(|list| list.join(", "))
|
||||
.unwrap_or_else(|| "(unknown)".into()),
|
||||
),
|
||||
format!(
|
||||
"Rust compiler version:\t{}",
|
||||
garage_util::version::rust_version(),
|
||||
),
|
||||
format!("Database engine:\t{}", garage.db.engine()),
|
||||
]);
|
||||
|
||||
// Gather table statistics
|
||||
let mut table = vec![" Table\tItems\tMklItems\tMklTodo\tInsQueue\tGcTodo".into()];
|
||||
table.push(gather_table_stats(&garage.admin_token_table)?);
|
||||
table.push(gather_table_stats(&garage.bucket_table)?);
|
||||
table.push(gather_table_stats(&garage.bucket_alias_table)?);
|
||||
table.push(gather_table_stats(&garage.key_table)?);
|
||||
|
||||
table.push(gather_table_stats(&garage.object_table)?);
|
||||
table.push(gather_table_stats(&garage.object_counter_table.table)?);
|
||||
table.push(gather_table_stats(&garage.mpu_table)?);
|
||||
table.push(gather_table_stats(&garage.mpu_counter_table.table)?);
|
||||
table.push(gather_table_stats(&garage.version_table)?);
|
||||
table.push(gather_table_stats(&garage.block_ref_table)?);
|
||||
|
||||
#[cfg(feature = "k2v")]
|
||||
{
|
||||
table.push(gather_table_stats(&garage.k2v.item_table)?);
|
||||
table.push(gather_table_stats(&garage.k2v.counter_table.table)?);
|
||||
}
|
||||
|
||||
write!(
|
||||
&mut ret,
|
||||
"\nTable stats:\n{}",
|
||||
format_table_to_string(table)
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Gather block manager statistics
|
||||
writeln!(&mut ret, "\nBlock manager stats:").unwrap();
|
||||
let rc_len = garage.block_manager.rc_approximate_len()?.to_string();
|
||||
|
||||
ret += &format_table_to_string(vec![
|
||||
format!(" number of RC entries:\t{} (~= number of blocks)", rc_len),
|
||||
format!(
|
||||
" resync queue length:\t{}",
|
||||
garage.block_manager.resync.queue_approximate_len()?
|
||||
),
|
||||
format!(
|
||||
" blocks with resync errors:\t{}",
|
||||
garage.block_manager.resync.errors_approximate_len()?
|
||||
),
|
||||
]);
|
||||
|
||||
Ok(LocalGetNodeStatisticsResponse { freeform: ret })
|
||||
}
|
||||
}
|
||||
|
||||
fn gather_table_stats<F, R>(t: &Arc<Table<F, R>>) -> Result<String, Error>
|
||||
where
|
||||
F: TableSchema + 'static,
|
||||
R: TableReplication + 'static,
|
||||
{
|
||||
let data_len = t
|
||||
.data
|
||||
.store
|
||||
.approximate_len()
|
||||
.map_err(GarageError::from)?
|
||||
.to_string();
|
||||
let mkl_len = t.merkle_updater.merkle_tree_approximate_len()?.to_string();
|
||||
|
||||
Ok(format!(
|
||||
" {}\t{}\t{}\t{}\t{}\t{}",
|
||||
F::TABLE_NAME,
|
||||
data_len,
|
||||
mkl_len,
|
||||
t.merkle_updater.todo_approximate_len()?,
|
||||
t.data.insert_queue_approximate_len()?,
|
||||
t.data.gc_todo_approximate_len()?
|
||||
))
|
||||
}
|
||||
961
src/api/admin/openapi.rs
Normal file
|
|
@ -0,0 +1,961 @@
|
|||
#![allow(dead_code)]
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::{Modify, OpenApi, ToSchema};
|
||||
|
||||
use crate::api::*;
|
||||
|
||||
// **********************************************
|
||||
// Special endpoints
|
||||
// **********************************************
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/metrics",
|
||||
tag = "Special endpoints",
|
||||
description = "Prometheus metrics endpoint",
|
||||
security((), ("bearerAuth" = [])),
|
||||
responses(
|
||||
(status = 200, description = "Garage daemon metrics exported in Prometheus format"),
|
||||
),
|
||||
)]
|
||||
fn Metrics() -> () {}
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/health",
|
||||
tag = "Special endpoints",
|
||||
description = "
|
||||
Check cluster health. The status code returned by this function indicates
|
||||
whether this Garage daemon can answer API requests.
|
||||
Garage will return `200 OK` even if some storage nodes are disconnected,
|
||||
as long as it is able to have a quorum of nodes for read and write operations.
|
||||
",
|
||||
security(()),
|
||||
responses(
|
||||
(status = 200, description = "Garage is able to answer requests"),
|
||||
(status = 503, description = "This Garage daemon is not able to handle requests")
|
||||
),
|
||||
)]
|
||||
fn Health() -> () {}
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/check",
|
||||
tag = "Special endpoints",
|
||||
description = "
|
||||
Static website domain name check. Checks whether a bucket is configured to serve
|
||||
a static website for the requested domain. This is used by reverse proxies such
|
||||
as Caddy or Tricot, to avoid requesting TLS certificates for domain names that
|
||||
do not correspond to an actual website.
|
||||
",
|
||||
params(CheckDomainRequest),
|
||||
security(()),
|
||||
responses(
|
||||
(status = 200, description = "The domain name redirects to a static website bucket"),
|
||||
(status = 400, description = "No static website bucket exists for this domain")
|
||||
),
|
||||
)]
|
||||
fn CheckDomain() -> () {}
|
||||
|
||||
// **********************************************
|
||||
// Cluster operations
|
||||
// **********************************************
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/v2/GetClusterStatus",
|
||||
tag = "Cluster",
|
||||
description = "
|
||||
Returns the cluster's current status, including:
|
||||
|
||||
- ID of the node being queried and its version of the Garage daemon
|
||||
- Live nodes
|
||||
- Currently configured cluster layout
|
||||
- Staged changes to the cluster layout
|
||||
|
||||
*Capacity is given in bytes*
|
||||
",
|
||||
responses(
|
||||
(status = 200, description = "Cluster status report", body = GetClusterStatusResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn GetClusterStatus() -> () {}
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/v2/GetClusterHealth",
|
||||
tag = "Cluster",
|
||||
description = "Returns the global status of the cluster, the number of connected nodes (over the number of known ones), the number of healthy storage nodes (over the declared ones), and the number of healthy partitions (over the total).",
|
||||
responses(
|
||||
(status = 200, description = "Cluster health report", body = GetClusterHealthResponse),
|
||||
),
|
||||
)]
|
||||
fn GetClusterHealth() -> () {}
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/v2/GetClusterStatistics",
|
||||
tag = "Cluster",
|
||||
description = "
|
||||
Fetch global cluster statistics.
|
||||
|
||||
*Note: do not try to parse the `freeform` field of the response, it is given as a string specifically because its format is not stable.*
|
||||
",
|
||||
responses(
|
||||
(status = 200, description = "Global cluster statistics", body = GetClusterStatisticsResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn GetClusterStatistics() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/ConnectClusterNodes",
|
||||
tag = "Cluster",
|
||||
description = "Instructs this Garage node to connect to other Garage nodes at specified `<node_id>@<net_address>`. `node_id` is generated automatically on node start.",
|
||||
request_body=ConnectClusterNodesRequest,
|
||||
responses(
|
||||
(status = 200, description = "The request has been handled correctly but it does not mean that all connection requests succeeded; some might have fail, you need to check the body!", body = ConnectClusterNodesResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn ConnectClusterNodes() -> () {}
|
||||
|
||||
// **********************************************
|
||||
// Admin API token operations
|
||||
// **********************************************
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/v2/ListAdminTokens",
|
||||
tag = "Admin API token",
|
||||
description = "Returns all admin API tokens in the cluster.",
|
||||
responses(
|
||||
(status = 200, description = "Returns info about all admin API tokens", body = ListAdminTokensResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn ListAdminTokens() -> () {}
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/v2/GetAdminTokenInfo",
|
||||
tag = "Admin API token",
|
||||
description = "
|
||||
Return information about a specific admin API token.
|
||||
You can search by specifying the exact token identifier (`id`) or by specifying a pattern (`search`).
|
||||
",
|
||||
params(GetAdminTokenInfoRequest),
|
||||
responses(
|
||||
(status = 200, description = "Information about the admin token", body = GetAdminTokenInfoResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn GetAdminTokenInfo() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/CreateAdminToken",
|
||||
tag = "Admin API token",
|
||||
description = "Creates a new admin API token",
|
||||
request_body = UpdateAdminTokenRequestBody,
|
||||
responses(
|
||||
(status = 200, description = "Admin token has been created", body = CreateAdminTokenResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn CreateAdminToken() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/UpdateAdminToken",
|
||||
tag = "Admin API token",
|
||||
description = "
|
||||
Updates information about the specified admin API token.
|
||||
",
|
||||
request_body = UpdateAdminTokenRequestBody,
|
||||
params(UpdateAdminTokenRequest),
|
||||
responses(
|
||||
(status = 200, description = "Admin token has been updated", body = UpdateAdminTokenResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn UpdateAdminToken() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/DeleteAdminToken",
|
||||
tag = "Admin API token",
|
||||
description = "Delete an admin API token from the cluster, revoking all its permissions.",
|
||||
params(DeleteAdminTokenRequest),
|
||||
responses(
|
||||
(status = 200, description = "Admin token has been deleted"),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn DeleteAdminToken() -> () {}
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/v2/GetCurrentAdminTokenInfo",
|
||||
tag = "Admin API token",
|
||||
description = "
|
||||
Return information about the calling admin API token.
|
||||
",
|
||||
responses(
|
||||
(status = 200, description = "Information about the admin token", body = GetCurrentAdminTokenInfoResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn GetCurrentAdminTokenInfo() -> () {}
|
||||
|
||||
// **********************************************
|
||||
// Layout operations
|
||||
// **********************************************
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/v2/GetClusterLayout",
|
||||
tag = "Cluster layout",
|
||||
description = "
|
||||
Returns the cluster's current layout, including:
|
||||
|
||||
- Currently configured cluster layout
|
||||
- Staged changes to the cluster layout
|
||||
|
||||
*Capacity is given in bytes*
|
||||
",
|
||||
responses(
|
||||
(status = 200, description = "Current cluster layout", body = GetClusterLayoutResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn GetClusterLayout() -> () {}
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/v2/GetClusterLayoutHistory",
|
||||
tag = "Cluster layout",
|
||||
description = "
|
||||
Returns the history of layouts in the cluster
|
||||
",
|
||||
responses(
|
||||
(status = 200, description = "Cluster layout history", body = GetClusterLayoutHistoryResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn GetClusterLayoutHistory() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/UpdateClusterLayout",
|
||||
tag = "Cluster layout",
|
||||
description = "
|
||||
Send modifications to the cluster layout. These modifications will be included in the staged role changes, visible in subsequent calls of `GET /GetClusterHealth`. Once the set of staged changes is satisfactory, the user may call `POST /ApplyClusterLayout` to apply the changed changes, or `POST /RevertClusterLayout` to clear all of the staged changes in the layout.
|
||||
|
||||
Setting the capacity to `null` will configure the node as a gateway.
|
||||
Otherwise, capacity must be now set in bytes (before Garage 0.9 it was arbitrary weights).
|
||||
For example to declare 100GB, you must set `capacity: 100000000000`.
|
||||
|
||||
Garage uses internally the International System of Units (SI), it assumes that 1kB = 1000 bytes, and displays storage as kB, MB, GB (and not KiB, MiB, GiB that assume 1KiB = 1024 bytes).
|
||||
",
|
||||
request_body(
|
||||
content=UpdateClusterLayoutRequestOpenapi,
|
||||
description="
|
||||
To add a new node to the layout or to change the configuration of an existing node, simply set the values you want (`zone`, `capacity`, and `tags`).
|
||||
To remove a node, simply pass the `remove: true` field.
|
||||
This logic is represented in OpenAPI with a 'One Of' object.
|
||||
|
||||
Contrary to the CLI that may update only a subset of the fields capacity, zone and tags, when calling this API all of these values must be specified.
|
||||
"
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Proposed changes have been added to the list of pending changes", body = UpdateClusterLayoutResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn UpdateClusterLayout() -> () {}
|
||||
|
||||
// Hack: we cannot use the UpdateClusterLayoutRequest from api.rs,
|
||||
// as it contains (via NodeRoleChange) an untagged enum flattenned into
|
||||
// a struct, which breaks the openapi generator.
|
||||
// See issue #1249.
|
||||
// Instead, we use a rewritten version of the NodeRoleChange struct where
|
||||
// the struct fields are distributed into the enum variants (this is an equivalent
|
||||
// representation, but this way we avoid having to rewrite all uses of the original
|
||||
// struct in the Garage codebase).
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
#[schema(as = UpdateClusterLayoutRequest)]
|
||||
pub struct UpdateClusterLayoutRequestOpenapi {
|
||||
/// New node roles to assign or remove in the cluster layout
|
||||
#[serde(default)]
|
||||
pub roles: Vec<NodeRoleChangeOpenapi>,
|
||||
/// New layout computation parameters to use
|
||||
#[serde(default)]
|
||||
pub parameters: Option<LayoutParameters>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
#[schema(as = NodeRoleChangeRequest)]
|
||||
#[serde(untagged)]
|
||||
pub enum NodeRoleChangeOpenapi {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Remove {
|
||||
/// ID of the node for which this change applies
|
||||
id: String,
|
||||
/// Set `remove` to `true` to remove the node from the layout
|
||||
remove: bool,
|
||||
},
|
||||
Update(NodeRoleUpdate),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NodeRoleUpdate {
|
||||
/// ID of the node for which this change applies
|
||||
id: String,
|
||||
#[serde(flatten)]
|
||||
role: NodeAssignedRole,
|
||||
}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/PreviewClusterLayoutChanges",
|
||||
tag = "Cluster layout",
|
||||
description = "
|
||||
Computes a new layout taking into account the staged parameters, and returns it with detailed statistics. The new layout is not applied in the cluster.
|
||||
|
||||
*Note: do not try to parse the `message` field of the response, it is given as an array of string specifically because its format is not stable.*
|
||||
",
|
||||
responses(
|
||||
(status = 200, description = "Information about the new layout", body = PreviewClusterLayoutChangesResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn PreviewClusterLayoutChanges() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/ApplyClusterLayout",
|
||||
tag = "Cluster layout",
|
||||
description = "
|
||||
Applies to the cluster the layout changes currently registered as staged layout changes.
|
||||
|
||||
*Note: do not try to parse the `message` field of the response, it is given as an array of string specifically because its format is not stable.*
|
||||
",
|
||||
request_body=ApplyClusterLayoutRequest,
|
||||
responses(
|
||||
(status = 200, description = "The updated cluster layout has been applied in the cluster", body = ApplyClusterLayoutResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn ApplyClusterLayout() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/RevertClusterLayout",
|
||||
tag = "Cluster layout",
|
||||
description = "Clear staged layout changes",
|
||||
responses(
|
||||
(status = 200, description = "All pending changes to the cluster layout have been erased", body = RevertClusterLayoutResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn RevertClusterLayout() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/ClusterLayoutSkipDeadNodes",
|
||||
tag = "Cluster layout",
|
||||
description = "Force progress in layout update trackers",
|
||||
request_body = ClusterLayoutSkipDeadNodesRequest,
|
||||
responses(
|
||||
(status = 200, description = "Request has been taken into account", body = ClusterLayoutSkipDeadNodesResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn ClusterLayoutSkipDeadNodes() -> () {}
|
||||
|
||||
// **********************************************
|
||||
// Access key operations
|
||||
// **********************************************
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/v2/ListKeys",
|
||||
tag = "Access key",
|
||||
description = "Returns all API access keys in the cluster.",
|
||||
responses(
|
||||
(status = 200, description = "Returns the key identifier (aka `AWS_ACCESS_KEY_ID`) and its associated, human friendly, name if any (otherwise return an empty string)", body = ListKeysResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn ListKeys() -> () {}
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/v2/GetKeyInfo",
|
||||
tag = "Access key",
|
||||
description = "
|
||||
Return information about a specific key like its identifiers, its permissions and buckets on which it has permissions.
|
||||
You can search by specifying the exact key identifier (`id`) or by specifying a pattern (`search`).
|
||||
|
||||
For confidentiality reasons, the secret key is not returned by default: you must pass the `showSecretKey` query parameter to get it.
|
||||
",
|
||||
params(GetKeyInfoRequest),
|
||||
responses(
|
||||
(status = 200, description = "Information about the access key", body = GetKeyInfoResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn GetKeyInfo() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/CreateKey",
|
||||
tag = "Access key",
|
||||
description = "Creates a new API access key.",
|
||||
request_body = CreateKeyRequest,
|
||||
responses(
|
||||
(status = 200, description = "Access key has been created", body = CreateKeyResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn CreateKey() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/ImportKey",
|
||||
tag = "Access key",
|
||||
description = "
|
||||
Imports an existing API key. This feature must only be used for migrations and backup restore.
|
||||
|
||||
**Do not use it to generate custom key identifiers or you will break your Garage cluster.**
|
||||
",
|
||||
request_body = ImportKeyRequest,
|
||||
responses(
|
||||
(status = 200, description = "Access key has been imported", body = ImportKeyResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn ImportKey() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/UpdateKey",
|
||||
tag = "Access key",
|
||||
description = "
|
||||
Updates information about the specified API access key.
|
||||
|
||||
*Note: the secret key is not returned in the response, `null` is sent instead.*
|
||||
",
|
||||
request_body = UpdateKeyRequestBody,
|
||||
params(UpdateKeyRequest),
|
||||
responses(
|
||||
(status = 200, description = "Access key has been updated", body = UpdateKeyResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn UpdateKey() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/DeleteKey",
|
||||
tag = "Access key",
|
||||
description = "Delete a key from the cluster. Its access will be removed from all the buckets. Buckets are not automatically deleted and can be dangling. You should manually delete them before. ",
|
||||
params(DeleteKeyRequest),
|
||||
responses(
|
||||
(status = 200, description = "Access key has been deleted"),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn DeleteKey() -> () {}
|
||||
|
||||
// **********************************************
|
||||
// Bucket operations
|
||||
// **********************************************
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/v2/ListBuckets",
|
||||
tag = "Bucket",
|
||||
description = "List all the buckets on the cluster with their UUID and their global and local aliases.",
|
||||
responses(
|
||||
(status = 200, description = "Returns the UUID of all the buckets and all their aliases", body = ListBucketsResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn ListBuckets() -> () {}
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/v2/GetBucketInfo",
|
||||
tag = "Bucket",
|
||||
description = "
|
||||
Given a bucket identifier (`id`) or a global alias (`alias`), get its information.
|
||||
It includes its aliases, its web configuration, keys that have some permissions
|
||||
on it, some statistics (number of objects, size), number of dangling multipart uploads,
|
||||
and its quotas (if any).
|
||||
",
|
||||
params(GetBucketInfoRequest),
|
||||
responses(
|
||||
(status = 200, description = "Returns exhaustive information about the bucket", body = GetBucketInfoResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn GetBucketInfo() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/CreateBucket",
|
||||
tag = "Bucket",
|
||||
description = "
|
||||
Creates a new bucket, either with a global alias, a local one, or no alias at all.
|
||||
Technically, you can also specify both `globalAlias` and `localAlias` and that would create two aliases.
|
||||
",
|
||||
request_body = CreateBucketRequest,
|
||||
responses(
|
||||
(status = 200, description = "Returns exhaustive information about the bucket", body = CreateBucketResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn CreateBucket() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/UpdateBucket",
|
||||
tag = "Bucket",
|
||||
description = "
|
||||
All fields (`websiteAccess` and `quotas`) are optional.
|
||||
If they are present, the corresponding modifications are applied to the bucket, otherwise nothing is changed.
|
||||
|
||||
In `websiteAccess`: if `enabled` is `true`, `indexDocument` must be specified.
|
||||
The field `errorDocument` is optional, if no error document is set a generic
|
||||
error message is displayed when errors happen. Conversely, if `enabled` is
|
||||
`false`, neither `indexDocument` nor `errorDocument` must be specified.
|
||||
|
||||
In `quotas`: new values of `maxSize` and `maxObjects` must both be specified, or set to `null`
|
||||
to remove the quotas. An absent value will be considered the same as a `null`. It is not possible
|
||||
to change only one of the two quotas.
|
||||
",
|
||||
params(UpdateBucketRequest),
|
||||
request_body = UpdateBucketRequestBody,
|
||||
responses(
|
||||
(status = 200, description = "Bucket has been updated", body = UpdateBucketResponse),
|
||||
(status = 404, description = "Bucket not found"),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn UpdateBucket() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/DeleteBucket",
|
||||
tag = "Bucket",
|
||||
description = "
|
||||
Deletes a storage bucket. A bucket cannot be deleted if it is not empty.
|
||||
|
||||
**Warning:** this will delete all aliases associated with the bucket!
|
||||
",
|
||||
params(DeleteBucketRequest),
|
||||
responses(
|
||||
(status = 200, description = "Bucket has been deleted"),
|
||||
(status = 400, description = "Bucket is not empty"),
|
||||
(status = 404, description = "Bucket not found"),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn DeleteBucket() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/CleanupIncompleteUploads",
|
||||
tag = "Bucket",
|
||||
description = "Removes all incomplete multipart uploads that are older than the specified number of seconds.",
|
||||
request_body = CleanupIncompleteUploadsRequest,
|
||||
responses(
|
||||
(status = 200, description = "The bucket was cleaned up successfully", body = CleanupIncompleteUploadsResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn CleanupIncompleteUploads() -> () {}
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/v2/InspectObject",
|
||||
tag = "Bucket",
|
||||
description = "
|
||||
Returns detailed information about an object in a bucket, including its internal state in Garage.
|
||||
|
||||
This API call can be used to list the data blocks referenced by an object,
|
||||
as well as to view metadata associated to the object.
|
||||
|
||||
This call may return a list of more than one version for the object, for instance in the
|
||||
case where there is a currently stored version of the object, and a newer version whose
|
||||
upload is in progress and not yet finished.
|
||||
",
|
||||
params(InspectObjectRequest),
|
||||
responses(
|
||||
(status = 200, description = "Returns exhaustive information about the object", body = InspectObjectResponse),
|
||||
(status = 404, description = "Object not found"),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn InspectObject() -> () {}
|
||||
|
||||
// **********************************************
|
||||
// Operations on permissions for keys on buckets
|
||||
// **********************************************
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/AllowBucketKey",
|
||||
tag = "Permission",
|
||||
description = "
|
||||
⚠️ **DISCLAIMER**: Garage's developers are aware that this endpoint has an unconventional semantic. Be extra careful when implementing it, its behavior is not obvious.
|
||||
|
||||
Allows a key to do read/write/owner operations on a bucket.
|
||||
|
||||
Flags in permissions which have the value true will be activated. Other flags will remain unchanged (ie. they will keep their internal value).
|
||||
|
||||
For example, if you set read to true, the key will be allowed to read the bucket.
|
||||
If you set it to false, the key will keeps its previous read permission.
|
||||
If you want to disallow read for the key, check the DenyBucketKey operation.
|
||||
",
|
||||
request_body = AllowBucketKeyRequest,
|
||||
responses(
|
||||
(status = 200, description = "Returns exhaustive information about the bucket", body = AllowBucketKeyResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn AllowBucketKey() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/DenyBucketKey",
|
||||
tag = "Permission",
|
||||
description = "
|
||||
⚠️ **DISCLAIMER**: Garage's developers are aware that this endpoint has an unconventional semantic. Be extra careful when implementing it, its behavior is not obvious.
|
||||
|
||||
Denies a key from doing read/write/owner operations on a bucket.
|
||||
|
||||
Flags in permissions which have the value true will be deactivated. Other flags will remain unchanged.
|
||||
|
||||
For example, if you set read to true, the key will be denied from reading.
|
||||
If you set read to false, the key will keep its previous permissions.
|
||||
If you want the key to have the reading permission, check the AllowBucketKey operation.
|
||||
",
|
||||
request_body = DenyBucketKeyRequest,
|
||||
responses(
|
||||
(status = 200, description = "Returns exhaustive information about the bucket", body = DenyBucketKeyResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn DenyBucketKey() -> () {}
|
||||
|
||||
// **********************************************
|
||||
// Operations on bucket aliases
|
||||
// **********************************************
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/AddBucketAlias",
|
||||
tag = "Bucket alias",
|
||||
description = "Add an alias for the target bucket. This can be either a global or a local alias, depending on which fields are specified.",
|
||||
request_body = BucketAliasEnumOpenapi,
|
||||
responses(
|
||||
(status = 200, description = "Returns exhaustive information about the bucket", body = AddBucketAliasResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn AddBucketAlias() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/RemoveBucketAlias",
|
||||
tag = "Bucket alias",
|
||||
description = "Remove an alias for the target bucket. This can be either a global or a local alias, depending on which fields are specified.",
|
||||
request_body = BucketAliasEnumOpenapi,
|
||||
responses(
|
||||
(status = 200, description = "Returns exhaustive information about the bucket", body = RemoveBucketAliasResponse),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn RemoveBucketAlias() -> () {}
|
||||
|
||||
// Hack for issue #1249 (see UpdateClusterLayout)
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
#[serde(untagged)]
|
||||
#[schema(as = BucketAliasEnum)]
|
||||
pub enum BucketAliasEnumOpenapi {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Global {
|
||||
bucket_id: String,
|
||||
global_alias: String,
|
||||
},
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Local {
|
||||
bucket_id: String,
|
||||
local_alias: String,
|
||||
access_key_id: String,
|
||||
},
|
||||
}
|
||||
|
||||
// **********************************************
|
||||
// Node operations
|
||||
// **********************************************
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/v2/GetNodeInfo",
|
||||
tag = "Node",
|
||||
description = "
|
||||
Return information about the Garage daemon running on one or several nodes.
|
||||
",
|
||||
params(MultiRequestQueryParams),
|
||||
responses(
|
||||
(status = 200, description = "Responses from individual cluster nodes", body = MultiResponse<LocalGetNodeInfoResponse>),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn GetNodeInfo() -> () {}
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/v2/GetNodeStatistics",
|
||||
tag = "Node",
|
||||
description = "
|
||||
Fetch statistics for one or several Garage nodes.
|
||||
|
||||
*Note: do not try to parse the `freeform` field of the response, it is given as a string specifically because its format is not stable.*
|
||||
",
|
||||
params(MultiRequestQueryParams),
|
||||
responses(
|
||||
(status = 200, description = "Responses from individual cluster nodes", body = MultiResponse<LocalGetNodeStatisticsResponse>),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn GetNodeStatistics() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/CreateMetadataSnapshot",
|
||||
tag = "Node",
|
||||
description = "
|
||||
Instruct one or several nodes to take a snapshot of their metadata databases.
|
||||
",
|
||||
params(MultiRequestQueryParams),
|
||||
responses(
|
||||
(status = 200, description = "Responses from individual cluster nodes", body = MultiResponse<LocalCreateMetadataSnapshotResponse>),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn CreateMetadataSnapshot() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/LaunchRepairOperation",
|
||||
tag = "Node",
|
||||
description = "
|
||||
Launch a repair operation on one or several cluster nodes.
|
||||
",
|
||||
params(MultiRequestQueryParams),
|
||||
request_body = LocalLaunchRepairOperationRequest,
|
||||
responses(
|
||||
(status = 200, description = "Responses from individual cluster nodes", body = MultiResponse<LocalLaunchRepairOperationResponse>),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn LaunchRepairOperation() -> () {}
|
||||
|
||||
// **********************************************
|
||||
// Worker operations
|
||||
// **********************************************
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/ListWorkers",
|
||||
tag = "Worker",
|
||||
description = "
|
||||
List background workers currently running on one or several cluster nodes.
|
||||
",
|
||||
params(MultiRequestQueryParams),
|
||||
request_body = LocalListWorkersRequest,
|
||||
responses(
|
||||
(status = 200, description = "Responses from individual cluster nodes", body = MultiResponse<LocalListWorkersResponse>),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn ListWorkers() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/GetWorkerInfo",
|
||||
tag = "Worker",
|
||||
description = "
|
||||
Get information about the specified background worker on one or several cluster nodes.
|
||||
",
|
||||
params(MultiRequestQueryParams),
|
||||
request_body = LocalGetWorkerInfoRequest,
|
||||
responses(
|
||||
(status = 200, description = "Responses from individual cluster nodes", body = MultiResponse<LocalGetWorkerInfoResponse>),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn GetWorkerInfo() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/GetWorkerVariable",
|
||||
tag = "Worker",
|
||||
description = "
|
||||
Fetch values of one or several worker variables, from one or several cluster nodes.
|
||||
",
|
||||
params(MultiRequestQueryParams),
|
||||
request_body = LocalGetWorkerVariableRequest,
|
||||
responses(
|
||||
(status = 200, description = "Responses from individual cluster nodes", body = MultiResponse<LocalGetWorkerVariableResponse>),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn GetWorkerVariable() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/SetWorkerVariable",
|
||||
tag = "Worker",
|
||||
description = "
|
||||
Set the value for a worker variable, on one or several cluster nodes.
|
||||
",
|
||||
params(MultiRequestQueryParams),
|
||||
request_body = LocalSetWorkerVariableRequest,
|
||||
responses(
|
||||
(status = 200, description = "Responses from individual cluster nodes", body = MultiResponse<LocalSetWorkerVariableResponse>),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn SetWorkerVariable() -> () {}
|
||||
|
||||
// **********************************************
|
||||
// Block operations
|
||||
// **********************************************
|
||||
|
||||
#[utoipa::path(get,
|
||||
path = "/v2/ListBlockErrors",
|
||||
tag = "Block",
|
||||
description = "
|
||||
List data blocks that are currently in an errored state on one or several Garage nodes.
|
||||
",
|
||||
params(MultiRequestQueryParams),
|
||||
responses(
|
||||
(status = 200, description = "Responses from individual cluster nodes", body = MultiResponse<LocalListBlockErrorsResponse>),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn ListBlockErrors() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/GetBlockInfo",
|
||||
tag = "Block",
|
||||
description = "
|
||||
Get detailed information about a data block stored on a Garage node, including all object versions and in-progress multipart uploads that contain a reference to this block.
|
||||
",
|
||||
params(MultiRequestQueryParams),
|
||||
request_body = LocalGetBlockInfoRequest,
|
||||
responses(
|
||||
(status = 200, description = "Detailed block information", body = MultiResponse<LocalGetBlockInfoResponse>),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn GetBlockInfo() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/RetryBlockResync",
|
||||
tag = "Block",
|
||||
description = "
|
||||
Instruct Garage node(s) to retry the resynchronization of one or several missing data block(s).
|
||||
",
|
||||
params(MultiRequestQueryParams),
|
||||
request_body = LocalRetryBlockResyncRequest,
|
||||
responses(
|
||||
(status = 200, description = "Responses from individual cluster nodes", body = MultiResponse<LocalRetryBlockResyncResponse>),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn RetryBlockResync() -> () {}
|
||||
|
||||
#[utoipa::path(post,
|
||||
path = "/v2/PurgeBlocks",
|
||||
tag = "Block",
|
||||
description = "
|
||||
Purge references to one or several missing data blocks.
|
||||
|
||||
This will remove all objects and in-progress multipart uploads that contain the specified data block(s). The objects will be permanently deleted from the buckets in which they appear. Use with caution.
|
||||
",
|
||||
params(MultiRequestQueryParams),
|
||||
request_body = LocalPurgeBlocksRequest,
|
||||
responses(
|
||||
(status = 200, description = "Responses from individual cluster nodes", body = MultiResponse<LocalPurgeBlocksResponse>),
|
||||
(status = 500, description = "Internal server error")
|
||||
),
|
||||
)]
|
||||
fn PurgeBlocks() -> () {}
|
||||
|
||||
// **********************************************
|
||||
// **********************************************
|
||||
// **********************************************
|
||||
|
||||
struct SecurityAddon;
|
||||
|
||||
impl Modify for SecurityAddon {
|
||||
fn modify(&self, openapi: &mut utoipa::openapi::OpenApi) {
|
||||
use utoipa::openapi::security::*;
|
||||
let components = openapi.components.as_mut().unwrap(); // we can unwrap safely since there already is components registered.
|
||||
components.add_security_scheme(
|
||||
"bearerAuth",
|
||||
SecurityScheme::Http(Http::builder().scheme(HttpAuthScheme::Bearer).build()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(OpenApi)]
|
||||
#[openapi(
|
||||
info(
|
||||
version = "v2.2.0",
|
||||
title = "Garage administration API",
|
||||
description = "Administrate your Garage cluster programatically, including status, layout, keys, buckets, and maintainance tasks.
|
||||
|
||||
*Disclaimer: This API may change in future Garage versions. Read the changelog and upgrade your scripts before upgrading. Additionnaly, this specification is early stage and can contain bugs, so be careful and please report any issues on our issue tracker.*",
|
||||
contact(
|
||||
name = "The Garage team",
|
||||
email = "garagehq@deuxfleurs.fr",
|
||||
url = "https://garagehq.deuxfleurs.fr/",
|
||||
),
|
||||
),
|
||||
modifiers(&SecurityAddon),
|
||||
security(("bearerAuth" = [])),
|
||||
paths(
|
||||
// Special ops
|
||||
Metrics,
|
||||
Health,
|
||||
CheckDomain,
|
||||
// Cluster operations
|
||||
GetClusterHealth,
|
||||
GetClusterStatus,
|
||||
GetClusterStatistics,
|
||||
ConnectClusterNodes,
|
||||
// Admin token operations
|
||||
ListAdminTokens,
|
||||
GetAdminTokenInfo,
|
||||
CreateAdminToken,
|
||||
UpdateAdminToken,
|
||||
DeleteAdminToken,
|
||||
GetCurrentAdminTokenInfo,
|
||||
// Layout operations
|
||||
GetClusterLayout,
|
||||
GetClusterLayoutHistory,
|
||||
UpdateClusterLayout,
|
||||
PreviewClusterLayoutChanges,
|
||||
ApplyClusterLayout,
|
||||
RevertClusterLayout,
|
||||
ClusterLayoutSkipDeadNodes,
|
||||
// Key operations
|
||||
ListKeys,
|
||||
GetKeyInfo,
|
||||
CreateKey,
|
||||
ImportKey,
|
||||
UpdateKey,
|
||||
DeleteKey,
|
||||
// Bucket operations
|
||||
ListBuckets,
|
||||
GetBucketInfo,
|
||||
CreateBucket,
|
||||
UpdateBucket,
|
||||
DeleteBucket,
|
||||
CleanupIncompleteUploads,
|
||||
InspectObject,
|
||||
// Operations on permissions
|
||||
AllowBucketKey,
|
||||
DenyBucketKey,
|
||||
// Operations on aliases
|
||||
AddBucketAlias,
|
||||
RemoveBucketAlias,
|
||||
// Node operations
|
||||
GetNodeInfo,
|
||||
GetNodeStatistics,
|
||||
CreateMetadataSnapshot,
|
||||
LaunchRepairOperation,
|
||||
// Worker operations
|
||||
ListWorkers,
|
||||
GetWorkerInfo,
|
||||
GetWorkerVariable,
|
||||
SetWorkerVariable,
|
||||
// Block operations
|
||||
ListBlockErrors,
|
||||
GetBlockInfo,
|
||||
RetryBlockResync,
|
||||
PurgeBlocks,
|
||||
),
|
||||
servers(
|
||||
(url = "http://localhost:3903/", description = "A local server")
|
||||
),
|
||||
)]
|
||||
pub struct ApiDoc;
|
||||
|
|
@ -5,6 +5,14 @@ use std::time::Duration;
|
|||
use async_trait::async_trait;
|
||||
use tokio::sync::watch;
|
||||
|
||||
use garage_util::background::*;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::{Error as GarageError, OkOrMessage};
|
||||
use garage_util::migrate::Migrate;
|
||||
|
||||
use garage_table::replication::*;
|
||||
use garage_table::*;
|
||||
|
||||
use garage_block::manager::BlockManager;
|
||||
use garage_block::repair::ScrubWorkerCommand;
|
||||
|
||||
|
|
@ -14,25 +22,23 @@ use garage_model::s3::mpu_table::*;
|
|||
use garage_model::s3::object_table::*;
|
||||
use garage_model::s3::version_table::*;
|
||||
|
||||
use garage_table::replication::*;
|
||||
use garage_table::*;
|
||||
|
||||
use garage_util::background::*;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::Error;
|
||||
use garage_util::migrate::Migrate;
|
||||
|
||||
use crate::*;
|
||||
use crate::api::*;
|
||||
use crate::error::Error;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
const RC_REPAIR_ITER_COUNT: usize = 64;
|
||||
|
||||
pub async fn launch_online_repair(
|
||||
impl RequestHandler for LocalLaunchRepairOperationRequest {
|
||||
type Response = LocalLaunchRepairOperationResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
bg: &BackgroundRunner,
|
||||
opt: RepairOpt,
|
||||
) -> Result<(), Error> {
|
||||
match opt.what {
|
||||
RepairWhat::Tables => {
|
||||
admin: &Admin,
|
||||
) -> Result<LocalLaunchRepairOperationResponse, Error> {
|
||||
let bg = &admin.background;
|
||||
match self.repair_type {
|
||||
RepairType::Tables => {
|
||||
info!("Launching a full sync of tables");
|
||||
garage.bucket_table.syncer.add_full_sync()?;
|
||||
garage.object_table.syncer.add_full_sync()?;
|
||||
|
|
@ -40,65 +46,65 @@ pub async fn launch_online_repair(
|
|||
garage.block_ref_table.syncer.add_full_sync()?;
|
||||
garage.key_table.syncer.add_full_sync()?;
|
||||
}
|
||||
RepairWhat::Versions => {
|
||||
RepairType::Versions => {
|
||||
info!("Repairing the versions table");
|
||||
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairVersions));
|
||||
}
|
||||
RepairWhat::MultipartUploads => {
|
||||
RepairType::MultipartUploads => {
|
||||
info!("Repairing the multipart uploads table");
|
||||
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairMpu));
|
||||
}
|
||||
RepairWhat::BlockRefs => {
|
||||
RepairType::BlockRefs => {
|
||||
info!("Repairing the block refs table");
|
||||
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairBlockRefs));
|
||||
}
|
||||
RepairWhat::BlockRc => {
|
||||
RepairType::BlockRc => {
|
||||
info!("Repairing the block reference counters");
|
||||
bg.spawn_worker(BlockRcRepair::new(
|
||||
garage.block_manager.clone(),
|
||||
garage.block_ref_table.clone(),
|
||||
));
|
||||
}
|
||||
RepairWhat::Blocks => {
|
||||
RepairType::Blocks => {
|
||||
info!("Repairing the stored blocks");
|
||||
bg.spawn_worker(garage_block::repair::RepairWorker::new(
|
||||
garage.block_manager.clone(),
|
||||
));
|
||||
}
|
||||
RepairWhat::Scrub { cmd } => {
|
||||
RepairType::Scrub(cmd) => {
|
||||
let cmd = match cmd {
|
||||
ScrubCmd::Start => ScrubWorkerCommand::Start,
|
||||
ScrubCmd::Pause => ScrubWorkerCommand::Pause(Duration::from_secs(3600 * 24)),
|
||||
ScrubCmd::Resume => ScrubWorkerCommand::Resume,
|
||||
ScrubCmd::Cancel => ScrubWorkerCommand::Cancel,
|
||||
ScrubCmd::SetTranquility { tranquility } => {
|
||||
garage
|
||||
.block_manager
|
||||
.scrub_persister
|
||||
.set_with(|x| x.tranquility = tranquility)?;
|
||||
return Ok(());
|
||||
ScrubCommand::Start => ScrubWorkerCommand::Start,
|
||||
ScrubCommand::Pause => {
|
||||
ScrubWorkerCommand::Pause(Duration::from_secs(3600 * 24))
|
||||
}
|
||||
ScrubCommand::Resume => ScrubWorkerCommand::Resume,
|
||||
ScrubCommand::Cancel => ScrubWorkerCommand::Cancel,
|
||||
};
|
||||
info!("Sending command to scrub worker: {:?}", cmd);
|
||||
garage.block_manager.send_scrub_command(cmd).await?;
|
||||
}
|
||||
RepairWhat::Rebalance => {
|
||||
RepairType::Rebalance => {
|
||||
info!("Rebalancing the stored blocks among storage locations");
|
||||
bg.spawn_worker(garage_block::repair::RebalanceWorker::new(
|
||||
garage.block_manager.clone(),
|
||||
));
|
||||
}
|
||||
RepairWhat::Aliases => {
|
||||
RepairType::Aliases => {
|
||||
info!("Repairing bucket aliases (foreground)");
|
||||
garage.locked_helper().await.repair_aliases().await?;
|
||||
}
|
||||
RepairWhat::ClearResyncQueue => {
|
||||
RepairType::ClearResyncQueue => {
|
||||
info!("Clearing resync queue (foreground)");
|
||||
let garage = garage.clone();
|
||||
tokio::task::spawn_blocking(move || garage.block_manager.resync.clear_resync_queue())
|
||||
.await??
|
||||
tokio::task::spawn_blocking(move || {
|
||||
garage.block_manager.resync.clear_resync_queue()
|
||||
})
|
||||
.await
|
||||
.map_err(garage_util::error::Error::from)??;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
Ok(LocalLaunchRepairOperationResponse)
|
||||
}
|
||||
}
|
||||
|
||||
// ----
|
||||
|
|
@ -112,7 +118,7 @@ trait TableRepair: Send + Sync + 'static {
|
|||
&mut self,
|
||||
garage: &Garage,
|
||||
entry: <<Self as TableRepair>::T as TableSchema>::E,
|
||||
) -> impl Future<Output = Result<bool, Error>> + Send;
|
||||
) -> impl Future<Output = Result<bool, GarageError>> + Send;
|
||||
}
|
||||
|
||||
struct TableRepairWorker<T: TableRepair> {
|
||||
|
|
@ -148,7 +154,10 @@ impl<R: TableRepair> Worker for TableRepairWorker<R> {
|
|||
}
|
||||
}
|
||||
|
||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||
async fn work(
|
||||
&mut self,
|
||||
_must_exit: &mut watch::Receiver<bool>,
|
||||
) -> Result<WorkerState, GarageError> {
|
||||
let (item_bytes, next_pos) = match R::table(&self.garage).data.store.get_gt(&self.pos)? {
|
||||
Some((k, v)) => (v, k),
|
||||
None => {
|
||||
|
|
@ -190,7 +199,7 @@ impl TableRepair for RepairVersions {
|
|||
&garage.version_table
|
||||
}
|
||||
|
||||
async fn process(&mut self, garage: &Garage, version: Version) -> Result<bool, Error> {
|
||||
async fn process(&mut self, garage: &Garage, version: Version) -> Result<bool, GarageError> {
|
||||
if !version.deleted.get() {
|
||||
let ref_exists = match &version.backlink {
|
||||
VersionBacklink::Object { bucket_id, key } => garage
|
||||
|
|
@ -236,7 +245,11 @@ impl TableRepair for RepairBlockRefs {
|
|||
&garage.block_ref_table
|
||||
}
|
||||
|
||||
async fn process(&mut self, garage: &Garage, mut block_ref: BlockRef) -> Result<bool, Error> {
|
||||
async fn process(
|
||||
&mut self,
|
||||
garage: &Garage,
|
||||
mut block_ref: BlockRef,
|
||||
) -> Result<bool, GarageError> {
|
||||
if !block_ref.deleted.get() {
|
||||
let ref_exists = garage
|
||||
.version_table
|
||||
|
|
@ -271,7 +284,11 @@ impl TableRepair for RepairMpu {
|
|||
&garage.mpu_table
|
||||
}
|
||||
|
||||
async fn process(&mut self, garage: &Garage, mut mpu: MultipartUpload) -> Result<bool, Error> {
|
||||
async fn process(
|
||||
&mut self,
|
||||
garage: &Garage,
|
||||
mut mpu: MultipartUpload,
|
||||
) -> Result<bool, GarageError> {
|
||||
if !mpu.deleted.get() {
|
||||
let ref_exists = garage
|
||||
.object_table
|
||||
|
|
@ -338,7 +355,10 @@ impl Worker for BlockRcRepair {
|
|||
}
|
||||
}
|
||||
|
||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||
async fn work(
|
||||
&mut self,
|
||||
_must_exit: &mut watch::Receiver<bool>,
|
||||
) -> Result<WorkerState, GarageError> {
|
||||
for _i in 0..RC_REPAIR_ITER_COUNT {
|
||||
let next1 = self
|
||||
.block_manager
|
||||
|
|
@ -7,12 +7,6 @@ use garage_api_common::router_macros::*;
|
|||
use crate::error::*;
|
||||
use crate::router_v0;
|
||||
|
||||
pub enum Authorization {
|
||||
None,
|
||||
MetricsToken,
|
||||
AdminToken,
|
||||
}
|
||||
|
||||
router_match! {@func
|
||||
|
||||
/// List of all Admin API endpoints.
|
||||
|
|
@ -211,15 +205,6 @@ impl Endpoint {
|
|||
))),
|
||||
}
|
||||
}
|
||||
/// Get the kind of authorization which is required to perform the operation.
|
||||
pub fn authorization_type(&self) -> Authorization {
|
||||
match self {
|
||||
Self::Health => Authorization::None,
|
||||
Self::CheckDomain => Authorization::None,
|
||||
Self::Metrics => Authorization::MetricsToken,
|
||||
_ => Authorization::AdminToken,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
generateQueryParameters! {
|
||||
|
|
|
|||
276
src/api/admin/router_v2.rs
Normal file
|
|
@ -0,0 +1,276 @@
|
|||
use std::borrow::Cow;
|
||||
|
||||
use hyper::body::Incoming as IncomingBody;
|
||||
use hyper::{Method, Request};
|
||||
use paste::paste;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::router_macros::*;
|
||||
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::router_v1;
|
||||
use crate::Authorization;
|
||||
|
||||
impl AdminApiRequest {
|
||||
/// Determine which S3 endpoint a request is for using the request, and a bucket which was
|
||||
/// possibly extracted from the Host header.
|
||||
/// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets
|
||||
pub async fn from_request(req: Request<IncomingBody>) -> Result<Self, Error> {
|
||||
let uri = req.uri().clone();
|
||||
let path = uri.path();
|
||||
let query = uri.query();
|
||||
|
||||
let method = req.method().clone();
|
||||
|
||||
let mut query = QueryParameters::from_query(query.unwrap_or_default())?;
|
||||
|
||||
let res = router_match!(@gen_path_parser_v2 (&method, path, "/v2/", query, req) [
|
||||
@special OPTIONS _ => Options (),
|
||||
@special GET "/check" => CheckDomain (query::domain),
|
||||
@special GET "/health" => Health (),
|
||||
@special GET "/metrics" => Metrics (),
|
||||
// Cluster endpoints
|
||||
GET GetClusterStatus (),
|
||||
GET GetClusterHealth (),
|
||||
POST ConnectClusterNodes (body),
|
||||
// Admin token endpoints
|
||||
GET ListAdminTokens (),
|
||||
GET GetAdminTokenInfo (query_opt::id, query_opt::search),
|
||||
POST CreateAdminToken (body),
|
||||
POST UpdateAdminToken (body_field, query::id),
|
||||
POST DeleteAdminToken (query::id),
|
||||
GET GetCurrentAdminTokenInfo (admin_token),
|
||||
// Layout endpoints
|
||||
GET GetClusterLayout (),
|
||||
GET GetClusterLayoutHistory (),
|
||||
POST UpdateClusterLayout (body),
|
||||
POST PreviewClusterLayoutChanges (),
|
||||
POST ApplyClusterLayout (body),
|
||||
POST RevertClusterLayout (),
|
||||
POST ClusterLayoutSkipDeadNodes (body),
|
||||
// API key endpoints
|
||||
GET GetKeyInfo (query_opt::id, query_opt::search, parse_default(false)::show_secret_key),
|
||||
POST UpdateKey (body_field, query::id),
|
||||
POST CreateKey (body),
|
||||
POST ImportKey (body),
|
||||
POST DeleteKey (query::id),
|
||||
GET ListKeys (),
|
||||
// Bucket endpoints
|
||||
GET GetBucketInfo (query_opt::id, query_opt::global_alias, query_opt::search),
|
||||
GET ListBuckets (),
|
||||
POST CreateBucket (body),
|
||||
POST DeleteBucket (query::id),
|
||||
POST UpdateBucket (body_field, query::id),
|
||||
POST CleanupIncompleteUploads (body),
|
||||
GET InspectObject (query::bucket_id, query::key),
|
||||
// Bucket-key permissions
|
||||
POST AllowBucketKey (body),
|
||||
POST DenyBucketKey (body),
|
||||
// Bucket aliases
|
||||
POST AddBucketAlias (body),
|
||||
POST RemoveBucketAlias (body),
|
||||
// Node APIs
|
||||
GET GetNodeInfo (default::body, query::node),
|
||||
POST CreateMetadataSnapshot (default::body, query::node),
|
||||
GET GetNodeStatistics (default::body, query::node),
|
||||
GET GetClusterStatistics (),
|
||||
POST LaunchRepairOperation (body_field, query::node),
|
||||
// Worker APIs
|
||||
POST ListWorkers (body_field, query::node),
|
||||
POST GetWorkerInfo (body_field, query::node),
|
||||
POST GetWorkerVariable (body_field, query::node),
|
||||
POST SetWorkerVariable (body_field, query::node),
|
||||
// Block APIs
|
||||
GET ListBlockErrors (default::body, query::node),
|
||||
POST GetBlockInfo (body_field, query::node),
|
||||
POST RetryBlockResync (body_field, query::node),
|
||||
POST PurgeBlocks (body_field, query::node),
|
||||
]);
|
||||
|
||||
if let Some(message) = query.nonempty_message() {
|
||||
debug!("Unused query parameter: {}", message)
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Some endpoints work exactly the same in their v2/ version as they did in their v1/ version.
|
||||
/// For these endpoints, we can convert a v1/ call to its equivalent as if it was made using
|
||||
/// its v2/ URL.
|
||||
pub async fn from_v1(
|
||||
v1_endpoint: router_v1::Endpoint,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Self, Error> {
|
||||
use router_v1::Endpoint;
|
||||
|
||||
match v1_endpoint {
|
||||
// GetClusterStatus semantics changed:
|
||||
// info about local node is no longer returned
|
||||
Endpoint::GetClusterHealth => {
|
||||
Ok(AdminApiRequest::GetClusterHealth(GetClusterHealthRequest))
|
||||
}
|
||||
Endpoint::ConnectClusterNodes => {
|
||||
let req = parse_json_body::<ConnectClusterNodesRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::ConnectClusterNodes(req))
|
||||
}
|
||||
|
||||
// Layout
|
||||
Endpoint::GetClusterLayout => {
|
||||
Ok(AdminApiRequest::GetClusterLayout(GetClusterLayoutRequest))
|
||||
}
|
||||
// UpdateClusterLayout semantics changed
|
||||
Endpoint::ApplyClusterLayout => {
|
||||
let param = parse_json_body::<ApplyClusterLayoutRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::ApplyClusterLayout(param))
|
||||
}
|
||||
Endpoint::RevertClusterLayout => Ok(AdminApiRequest::RevertClusterLayout(
|
||||
RevertClusterLayoutRequest,
|
||||
)),
|
||||
|
||||
// Keys
|
||||
Endpoint::ListKeys => Ok(AdminApiRequest::ListKeys(ListKeysRequest)),
|
||||
Endpoint::GetKeyInfo {
|
||||
id,
|
||||
search,
|
||||
show_secret_key,
|
||||
} => {
|
||||
let show_secret_key = show_secret_key.map(|x| x == "true").unwrap_or(false);
|
||||
Ok(AdminApiRequest::GetKeyInfo(GetKeyInfoRequest {
|
||||
id,
|
||||
search,
|
||||
show_secret_key,
|
||||
}))
|
||||
}
|
||||
Endpoint::CreateKey => {
|
||||
let req = parse_json_body::<CreateKeyRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::CreateKey(req))
|
||||
}
|
||||
Endpoint::ImportKey => {
|
||||
let req = parse_json_body::<ImportKeyRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::ImportKey(req))
|
||||
}
|
||||
Endpoint::UpdateKey { id } => {
|
||||
let body = parse_json_body::<UpdateKeyRequestBody, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::UpdateKey(UpdateKeyRequest { id, body }))
|
||||
}
|
||||
|
||||
// DeleteKey semantics changed:
|
||||
// - in v1/ : HTTP DELETE => HTTP 204 No Content
|
||||
// - in v2/ : HTTP POST => HTTP 200 Ok
|
||||
// Endpoint::DeleteKey { id } => Ok(AdminApiRequest::DeleteKey(DeleteKeyRequest { id })),
|
||||
|
||||
// Buckets
|
||||
Endpoint::ListBuckets => Ok(AdminApiRequest::ListBuckets(ListBucketsRequest)),
|
||||
Endpoint::GetBucketInfo { id, global_alias } => {
|
||||
Ok(AdminApiRequest::GetBucketInfo(GetBucketInfoRequest {
|
||||
id,
|
||||
global_alias,
|
||||
search: None,
|
||||
}))
|
||||
}
|
||||
Endpoint::CreateBucket => {
|
||||
let req = parse_json_body::<CreateBucketRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::CreateBucket(req))
|
||||
}
|
||||
|
||||
// DeleteBucket semantics changed::
|
||||
// - in v1/ : HTTP DELETE => HTTP 204 No Content
|
||||
// - in v2/ : HTTP POST => HTTP 200 Ok
|
||||
// Endpoint::DeleteBucket { id } => {
|
||||
// Ok(AdminApiRequest::DeleteBucket(DeleteBucketRequest { id }))
|
||||
// }
|
||||
Endpoint::UpdateBucket { id } => {
|
||||
let body = parse_json_body::<UpdateBucketRequestBody, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::UpdateBucket(UpdateBucketRequest {
|
||||
id,
|
||||
body,
|
||||
}))
|
||||
}
|
||||
|
||||
// Bucket-key permissions
|
||||
Endpoint::BucketAllowKey => {
|
||||
let req = parse_json_body::<BucketKeyPermChangeRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::AllowBucketKey(AllowBucketKeyRequest(req)))
|
||||
}
|
||||
Endpoint::BucketDenyKey => {
|
||||
let req = parse_json_body::<BucketKeyPermChangeRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::DenyBucketKey(DenyBucketKeyRequest(req)))
|
||||
}
|
||||
// Bucket aliasing
|
||||
Endpoint::GlobalAliasBucket { id, alias } => {
|
||||
Ok(AdminApiRequest::AddBucketAlias(AddBucketAliasRequest {
|
||||
bucket_id: id,
|
||||
alias: BucketAliasEnum::Global {
|
||||
global_alias: alias,
|
||||
},
|
||||
}))
|
||||
}
|
||||
Endpoint::GlobalUnaliasBucket { id, alias } => Ok(AdminApiRequest::RemoveBucketAlias(
|
||||
RemoveBucketAliasRequest {
|
||||
bucket_id: id,
|
||||
alias: BucketAliasEnum::Global {
|
||||
global_alias: alias,
|
||||
},
|
||||
},
|
||||
)),
|
||||
Endpoint::LocalAliasBucket {
|
||||
id,
|
||||
access_key_id,
|
||||
alias,
|
||||
} => Ok(AdminApiRequest::AddBucketAlias(AddBucketAliasRequest {
|
||||
bucket_id: id,
|
||||
alias: BucketAliasEnum::Local {
|
||||
local_alias: alias,
|
||||
access_key_id,
|
||||
},
|
||||
})),
|
||||
Endpoint::LocalUnaliasBucket {
|
||||
id,
|
||||
access_key_id,
|
||||
alias,
|
||||
} => Ok(AdminApiRequest::RemoveBucketAlias(
|
||||
RemoveBucketAliasRequest {
|
||||
bucket_id: id,
|
||||
alias: BucketAliasEnum::Local {
|
||||
local_alias: alias,
|
||||
access_key_id,
|
||||
},
|
||||
},
|
||||
)),
|
||||
|
||||
// For endpoints that have different body content syntax, issue
|
||||
// deprecation warning
|
||||
_ => Err(Error::bad_request(format!(
|
||||
"v1/ endpoint is no longer supported: {}",
|
||||
v1_endpoint.name()
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the kind of authorization which is required to perform the operation.
|
||||
pub fn authorization_type(&self) -> Authorization {
|
||||
match self {
|
||||
Self::Options(_) | Self::Health(_) | Self::CheckDomain(_) => Authorization::None,
|
||||
Self::Metrics(_) => Authorization::MetricsToken,
|
||||
_ => Authorization::AdminToken,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
generateQueryParameters! {
|
||||
keywords: [],
|
||||
fields: [
|
||||
"node" => node,
|
||||
"domain" => domain,
|
||||
"format" => format,
|
||||
"id" => id,
|
||||
"search" => search,
|
||||
"globalAlias" => global_alias,
|
||||
"alias" => alias,
|
||||
"accessKeyId" => access_key_id,
|
||||
"showSecretKey" => show_secret_key,
|
||||
"bucketId" => bucket_id,
|
||||
"key" => key
|
||||
]
|
||||
}
|
||||
173
src/api/admin/special.rs
Normal file
|
|
@ -0,0 +1,173 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use http::header::{
|
||||
ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW,
|
||||
};
|
||||
use hyper::{Response, StatusCode};
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
use prometheus::{Encoder, TextEncoder};
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_rpc::system::ClusterHealthStatus;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
|
||||
use crate::api::{CheckDomainRequest, HealthRequest, MetricsRequest, OptionsRequest};
|
||||
use crate::api_server::ResBody;
|
||||
use crate::error::*;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
impl RequestHandler for OptionsRequest {
|
||||
type Response = Response<ResBody>;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
_garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(ALLOW, "OPTIONS,GET,POST")
|
||||
.header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS,GET,POST")
|
||||
.header(ACCESS_CONTROL_ALLOW_HEADERS, "authorization,content-type")
|
||||
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||
.body(empty_body())?)
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for MetricsRequest {
|
||||
type Response = Response<ResBody>;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
_garage: &Arc<Garage>,
|
||||
admin: &Admin,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
#[cfg(feature = "metrics")]
|
||||
{
|
||||
use opentelemetry::trace::Tracer;
|
||||
|
||||
let mut buffer = vec![];
|
||||
let encoder = TextEncoder::new();
|
||||
|
||||
let tracer = opentelemetry::global::tracer("garage");
|
||||
let metric_families = tracer.in_span("admin/gather_metrics", |_| {
|
||||
admin.exporter.registry().gather()
|
||||
});
|
||||
|
||||
encoder
|
||||
.encode(&metric_families, &mut buffer)
|
||||
.ok_or_internal_error("Could not serialize metrics")?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(http::header::CONTENT_TYPE, encoder.format_type())
|
||||
.body(bytes_body(buffer.into()))?)
|
||||
}
|
||||
#[cfg(not(feature = "metrics"))]
|
||||
Err(Error::bad_request(
|
||||
"Garage was built without the metrics feature".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for HealthRequest {
|
||||
type Response = Response<ResBody>;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let health = garage.system.health();
|
||||
|
||||
let (status, status_str) = match health.status {
|
||||
ClusterHealthStatus::Healthy => (StatusCode::OK, "Garage is fully operational"),
|
||||
ClusterHealthStatus::Degraded => (
|
||||
StatusCode::OK,
|
||||
"Garage is operational but some storage nodes are unavailable",
|
||||
),
|
||||
ClusterHealthStatus::Unavailable => (
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
"Quorum is not available for some/all partitions, reads and writes will fail",
|
||||
),
|
||||
};
|
||||
let status_str = format!(
|
||||
"{}\nConsult the full health check API endpoint at /v2/GetClusterHealth for more details\n",
|
||||
status_str
|
||||
);
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(status)
|
||||
.header(http::header::CONTENT_TYPE, "text/plain")
|
||||
.body(string_body(status_str))?)
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for CheckDomainRequest {
|
||||
type Response = Response<ResBody>;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
if check_domain(garage, &self.domain).await? {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(string_body(format!(
|
||||
"Domain '{}' is managed by Garage",
|
||||
self.domain
|
||||
)))?)
|
||||
} else {
|
||||
Err(Error::bad_request(format!(
|
||||
"Domain '{}' is not managed by Garage",
|
||||
self.domain
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_domain(garage: &Arc<Garage>, domain: &str) -> Result<bool, Error> {
|
||||
// Resolve bucket from domain name, inferring if the website must be activated for the
|
||||
// domain to be valid.
|
||||
let (bucket_name, must_check_website) = if let Some(bname) = garage
|
||||
.config
|
||||
.s3_api
|
||||
.root_domain
|
||||
.as_ref()
|
||||
.and_then(|rd| host_to_bucket(domain, rd))
|
||||
{
|
||||
(bname.to_string(), false)
|
||||
} else if let Some(bname) = garage
|
||||
.config
|
||||
.s3_web
|
||||
.as_ref()
|
||||
.and_then(|sw| host_to_bucket(domain, sw.root_domain.as_str()))
|
||||
{
|
||||
(bname.to_string(), true)
|
||||
} else {
|
||||
(domain.to_string(), true)
|
||||
};
|
||||
|
||||
let bucket = match garage
|
||||
.bucket_helper()
|
||||
.resolve_global_bucket_fast(&bucket_name)?
|
||||
{
|
||||
Some(b) => b,
|
||||
None => return Ok(false),
|
||||
};
|
||||
|
||||
if !must_check_website {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let bucket_state = bucket.state.as_option().unwrap();
|
||||
let bucket_website_config = bucket_state.website_config.get();
|
||||
|
||||
match bucket_website_config {
|
||||
Some(_v) => Ok(true),
|
||||
None => Ok(false),
|
||||
}
|
||||
}
|
||||
118
src/api/admin/worker.rs
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use garage_util::background::*;
|
||||
use garage_util::time::now_msec;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
use crate::api::*;
|
||||
use crate::error::Error;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
impl RequestHandler for LocalListWorkersRequest {
|
||||
type Response = LocalListWorkersResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
_garage: &Arc<Garage>,
|
||||
admin: &Admin,
|
||||
) -> Result<LocalListWorkersResponse, Error> {
|
||||
let workers = admin.background.get_worker_info();
|
||||
let info = workers
|
||||
.into_iter()
|
||||
.filter(|(_, w)| {
|
||||
(!self.busy_only
|
||||
|| matches!(w.state, WorkerState::Busy | WorkerState::Throttled(_)))
|
||||
&& (!self.error_only || w.errors > 0)
|
||||
})
|
||||
.map(|(id, w)| worker_info_to_api(id as u64, w))
|
||||
.collect::<Vec<_>>();
|
||||
Ok(LocalListWorkersResponse(info))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalGetWorkerInfoRequest {
|
||||
type Response = LocalGetWorkerInfoResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
_garage: &Arc<Garage>,
|
||||
admin: &Admin,
|
||||
) -> Result<LocalGetWorkerInfoResponse, Error> {
|
||||
let info = admin
|
||||
.background
|
||||
.get_worker_info()
|
||||
.get(&(self.id as usize))
|
||||
.ok_or(Error::NoSuchWorker(self.id))?
|
||||
.clone();
|
||||
Ok(LocalGetWorkerInfoResponse(worker_info_to_api(
|
||||
self.id, info,
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalGetWorkerVariableRequest {
|
||||
type Response = LocalGetWorkerVariableResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalGetWorkerVariableResponse, Error> {
|
||||
let mut res = HashMap::new();
|
||||
if let Some(k) = self.variable {
|
||||
res.insert(k.clone(), garage.bg_vars.get(&k)?);
|
||||
} else {
|
||||
let vars = garage.bg_vars.get_all();
|
||||
for (k, v) in vars.iter() {
|
||||
res.insert(k.to_string(), v.to_string());
|
||||
}
|
||||
}
|
||||
Ok(LocalGetWorkerVariableResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalSetWorkerVariableRequest {
|
||||
type Response = LocalSetWorkerVariableResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalSetWorkerVariableResponse, Error> {
|
||||
garage.bg_vars.set(&self.variable, &self.value)?;
|
||||
|
||||
Ok(LocalSetWorkerVariableResponse {
|
||||
variable: self.variable,
|
||||
value: self.value,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ---- helper functions ----
|
||||
|
||||
fn worker_info_to_api(id: u64, info: WorkerInfo) -> WorkerInfoResp {
|
||||
WorkerInfoResp {
|
||||
id,
|
||||
name: info.name,
|
||||
state: match info.state {
|
||||
WorkerState::Busy => WorkerStateResp::Busy,
|
||||
WorkerState::Throttled(t) => WorkerStateResp::Throttled { duration_secs: t },
|
||||
WorkerState::Idle => WorkerStateResp::Idle,
|
||||
WorkerState::Done => WorkerStateResp::Done,
|
||||
},
|
||||
errors: info.errors as u64,
|
||||
consecutive_errors: info.consecutive_errors as u64,
|
||||
last_error: info.last_error.map(|(message, t)| WorkerLastError {
|
||||
message,
|
||||
secs_ago: now_msec().saturating_sub(t) / 1000,
|
||||
}),
|
||||
|
||||
tranquility: info.status.tranquility,
|
||||
progress: info.status.progress,
|
||||
queue_length: info.status.queue_length,
|
||||
persistent_errors: info.status.persistent_errors,
|
||||
freeform: info.status.freeform,
|
||||
}
|
||||
}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_api_common"
|
||||
version = "1.3.0"
|
||||
version = "2.2.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -21,8 +21,7 @@ garage_util.workspace = true
|
|||
base64.workspace = true
|
||||
bytes.workspace = true
|
||||
chrono.workspace = true
|
||||
crc32fast.workspace = true
|
||||
crc32c.workspace = true
|
||||
crc-fast.workspace = true
|
||||
crypto-common.workspace = true
|
||||
thiserror.workspace = true
|
||||
hex.workspace = true
|
||||
|
|
|
|||
|
|
@ -9,9 +9,7 @@ use hyper::{body::Body, body::Incoming as IncomingBody, Request, Response, Statu
|
|||
use garage_model::bucket_table::{BucketParams, CorsRule as GarageCorsRule};
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
use crate::common_error::{
|
||||
helper_error_as_internal, CommonError, OkOrBadRequest, OkOrInternalError,
|
||||
};
|
||||
use crate::common_error::{CommonError, OkOrBadRequest, OkOrInternalError};
|
||||
use crate::helpers::*;
|
||||
|
||||
pub fn find_matching_cors_rule<'a, B>(
|
||||
|
|
@ -76,7 +74,7 @@ pub fn add_cors_headers(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle_options_api(
|
||||
pub fn handle_options_api(
|
||||
garage: Arc<Garage>,
|
||||
req: &Request<IncomingBody>,
|
||||
bucket_name: Option<String>,
|
||||
|
|
@ -93,16 +91,8 @@ pub async fn handle_options_api(
|
|||
// OPTIONS calls are not auhtenticated).
|
||||
if let Some(bn) = bucket_name {
|
||||
let helper = garage.bucket_helper();
|
||||
let bucket_id = helper
|
||||
.resolve_global_bucket_name(&bn)
|
||||
.await
|
||||
.map_err(helper_error_as_internal)?;
|
||||
if let Some(id) = bucket_id {
|
||||
let bucket = garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(id)
|
||||
.await
|
||||
.map_err(helper_error_as_internal)?;
|
||||
let bucket_opt = helper.resolve_global_bucket_fast(&bn)?;
|
||||
if let Some(bucket) = bucket_opt {
|
||||
let bucket_params = bucket.state.into_option().unwrap();
|
||||
handle_options_for_bucket(req, &bucket_params)
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
use std::borrow::Cow;
|
||||
use std::convert::Infallible;
|
||||
use std::fs::{self, Permissions};
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
|
|
@ -35,7 +36,7 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
|
|||
use crate::helpers::{BoxBody, ErrorBody};
|
||||
|
||||
pub trait ApiEndpoint: Send + Sync + 'static {
|
||||
fn name(&self) -> &'static str;
|
||||
fn name(&self) -> Cow<'static, str>;
|
||||
fn add_span_attributes(&self, span: SpanRef<'_>);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -45,6 +45,83 @@ macro_rules! router_match {
|
|||
}
|
||||
}
|
||||
}};
|
||||
(@gen_path_parser_v2 ($method:expr, $reqpath:expr, $pathprefix:literal, $query:expr, $req:expr)
|
||||
[
|
||||
$(@special $spec_meth:ident $spec_path:pat => $spec_api:ident $spec_params:tt,)*
|
||||
$($meth:ident $api:ident $params:tt,)*
|
||||
]) => {{
|
||||
{
|
||||
#[allow(unused_parens)]
|
||||
match ($method, $reqpath) {
|
||||
$(
|
||||
(&Method::$spec_meth, $spec_path) => AdminApiRequest::$spec_api (
|
||||
router_match!(@@gen_parse_request $spec_api, $spec_params, $query, $req)
|
||||
),
|
||||
)*
|
||||
$(
|
||||
(&Method::$meth, concat!($pathprefix, stringify!($api)))
|
||||
=> AdminApiRequest::$api (
|
||||
router_match!(@@gen_parse_request $api, $params, $query, $req)
|
||||
),
|
||||
)*
|
||||
(m, p) => {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Unknown API endpoint: {} {}",
|
||||
m, p
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}};
|
||||
(@@gen_parse_request $api:ident, (), $query: expr, $req:expr) => {{
|
||||
paste!(
|
||||
[< $api Request >]
|
||||
)
|
||||
}};
|
||||
(@@gen_parse_request $api:ident, (body), $query: expr, $req:expr) => {{
|
||||
paste!({
|
||||
parse_json_body::< [<$api Request>], _, Error>($req).await?
|
||||
})
|
||||
}};
|
||||
(@@gen_parse_request $api:ident, (admin_token), $query: expr, $req:expr) => {{
|
||||
paste!({
|
||||
let auth_header = $req.headers()
|
||||
.get(hyper::header::AUTHORIZATION)
|
||||
.ok_or_else(|| Error::bad_request("Missing Authorization header"))?
|
||||
.to_str()
|
||||
.map_err(|_| Error::bad_request("Invalid Authorization header"))?;
|
||||
|
||||
let admin_token = auth_header.strip_prefix("Bearer ")
|
||||
.ok_or_else(|| Error::bad_request("Authorization header must be Bearer token"))?
|
||||
.to_string();
|
||||
|
||||
[< $api Request >] { admin_token }
|
||||
})
|
||||
}};
|
||||
(@@gen_parse_request $api:ident, (body_field, $($conv:ident $(($conv_arg:expr))? :: $param:ident),*), $query: expr, $req:expr)
|
||||
=>
|
||||
{{
|
||||
paste!({
|
||||
let body = parse_json_body::< [<$api RequestBody>], _, Error>($req).await?;
|
||||
[< $api Request >] {
|
||||
body,
|
||||
$(
|
||||
$param: router_match!(@@parse_param $query, $conv $(($conv_arg))?, $param),
|
||||
)+
|
||||
}
|
||||
})
|
||||
}};
|
||||
(@@gen_parse_request $api:ident, ($($conv:ident $(($conv_arg:expr))? :: $param:ident),*), $query: expr, $req:expr)
|
||||
=>
|
||||
{{
|
||||
paste!({
|
||||
[< $api Request >] {
|
||||
$(
|
||||
$param: router_match!(@@parse_param $query, $conv $(($conv_arg))?, $param),
|
||||
)+
|
||||
}
|
||||
})
|
||||
}};
|
||||
(@gen_parser ($keyword:expr, $key:ident, $query:expr, $header:expr),
|
||||
key: [$($kw_k:ident $(if $required_k:ident)? $(header $header_k:expr)? => $api_k:ident $(($($conv_k:ident :: $param_k:ident),*))?,)*],
|
||||
no_key: [$($kw_nk:ident $(if $required_nk:ident)? $(if_header $header_nk:expr)? => $api_nk:ident $(($($conv_nk:ident :: $param_nk:ident),*))?,)*]) => {{
|
||||
|
|
@ -79,13 +156,19 @@ macro_rules! router_match {
|
|||
}
|
||||
}};
|
||||
|
||||
(@@parse_param $query:expr, default, $param:ident) => {{
|
||||
Default::default()
|
||||
}};
|
||||
(@@parse_param $query:expr, query_opt, $param:ident) => {{
|
||||
// extract optional query parameter
|
||||
$query.$param.take().map(|param| param.into_owned())
|
||||
}};
|
||||
(@@parse_param $query:expr, query, $param:ident) => {{
|
||||
// extract mendatory query parameter
|
||||
$query.$param.take().ok_or_bad_request("Missing argument for endpoint")?.into_owned()
|
||||
$query.$param.take()
|
||||
.ok_or_bad_request(
|
||||
format!("Missing argument `{}` for endpoint", stringify!($param))
|
||||
)?.into_owned()
|
||||
}};
|
||||
(@@parse_param $query:expr, opt_parse, $param:ident) => {{
|
||||
// extract and parse optional query parameter
|
||||
|
|
@ -99,10 +182,22 @@ macro_rules! router_match {
|
|||
(@@parse_param $query:expr, parse, $param:ident) => {{
|
||||
// extract and parse mandatory query parameter
|
||||
// both missing and un-parseable parameters are reported as errors
|
||||
$query.$param.take().ok_or_bad_request("Missing argument for endpoint")?
|
||||
$query.$param.take()
|
||||
.ok_or_bad_request(
|
||||
format!("Missing argument `{}` for endpoint", stringify!($param))
|
||||
)?
|
||||
.parse()
|
||||
.map_err(|_| Error::bad_request("Failed to parse query parameter"))?
|
||||
}};
|
||||
(@@parse_param $query:expr, parse_default($default:expr), $param:ident) => {{
|
||||
// extract and parse optional query parameter
|
||||
// using provided value as default if paramter is missing
|
||||
$query.$param.take().map(|x| x
|
||||
.parse()
|
||||
.map_err(|_| Error::bad_request("Failed to parse query parameter")))
|
||||
.transpose()?
|
||||
.unwrap_or($default)
|
||||
}};
|
||||
(@func
|
||||
$(#[$doc:meta])*
|
||||
pub enum Endpoint {
|
||||
|
|
@ -187,6 +282,7 @@ macro_rules! generateQueryParameters {
|
|||
},
|
||||
)*
|
||||
$(
|
||||
// FIXME: remove if !v.is_empty() ?
|
||||
$f_param => if !v.is_empty() {
|
||||
if res.$f_name.replace(v).is_some() {
|
||||
return Err(Error::bad_request(format!(
|
||||
|
|
|
|||
|
|
@ -1,9 +1,7 @@
|
|||
use std::convert::{TryFrom, TryInto};
|
||||
use std::hash::Hasher;
|
||||
use std::convert::TryInto;
|
||||
|
||||
use base64::prelude::*;
|
||||
use crc32c::Crc32cHasher as Crc32c;
|
||||
use crc32fast::Hasher as Crc32;
|
||||
use crc_fast::{CrcAlgorithm, Digest as CrcDigest};
|
||||
use md5::{Digest, Md5};
|
||||
use sha1::Sha1;
|
||||
use sha2::Sha256;
|
||||
|
|
@ -21,17 +19,40 @@ pub const CONTENT_MD5: HeaderName = HeaderName::from_static("content-md5");
|
|||
pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName =
|
||||
HeaderName::from_static("x-amz-checksum-algorithm");
|
||||
pub const X_AMZ_CHECKSUM_MODE: HeaderName = HeaderName::from_static("x-amz-checksum-mode");
|
||||
pub const X_AMZ_CHECKSUM_TYPE: HeaderName = HeaderName::from_static("x-amz-checksum-type");
|
||||
pub const X_AMZ_CHECKSUM_CRC32: HeaderName = HeaderName::from_static("x-amz-checksum-crc32");
|
||||
pub const X_AMZ_CHECKSUM_CRC32C: HeaderName = HeaderName::from_static("x-amz-checksum-crc32c");
|
||||
pub const X_AMZ_CHECKSUM_CRC64NVME: HeaderName =
|
||||
HeaderName::from_static("x-amz-checksum-crc64nvme");
|
||||
pub const X_AMZ_CHECKSUM_SHA1: HeaderName = HeaderName::from_static("x-amz-checksum-sha1");
|
||||
pub const X_AMZ_CHECKSUM_SHA256: HeaderName = HeaderName::from_static("x-amz-checksum-sha256");
|
||||
|
||||
// Values for x-amz-checksum-type
|
||||
pub const COMPOSITE: &str = "COMPOSITE";
|
||||
pub const FULL_OBJECT: &str = "FULL_OBJECT";
|
||||
|
||||
pub type Crc32Checksum = [u8; 4];
|
||||
pub type Crc32cChecksum = [u8; 4];
|
||||
pub type Crc64NvmeChecksum = [u8; 8];
|
||||
pub type Md5Checksum = [u8; 16];
|
||||
pub type Sha1Checksum = [u8; 20];
|
||||
pub type Sha256Checksum = [u8; 32];
|
||||
|
||||
// -- MAP OF CRC ALGORITHMS :
|
||||
// CRC32 -> CrcAlgorithm::Crc32IsoHdlc
|
||||
// CRC32C -> CrcAlgorithm::Crc32Iscsi
|
||||
// CRC64NVME -> CrcAlgorithm::Crc64Nvme
|
||||
|
||||
pub fn new_crc32() -> CrcDigest {
|
||||
CrcDigest::new(CrcAlgorithm::Crc32IsoHdlc)
|
||||
}
|
||||
pub fn new_crc32c() -> CrcDigest {
|
||||
CrcDigest::new(CrcAlgorithm::Crc32Iscsi)
|
||||
}
|
||||
pub fn new_crc64nvme() -> CrcDigest {
|
||||
CrcDigest::new(CrcAlgorithm::Crc64Nvme)
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct ExpectedChecksums {
|
||||
// base64-encoded md5 (content-md5 header)
|
||||
|
|
@ -43,8 +64,9 @@ pub struct ExpectedChecksums {
|
|||
}
|
||||
|
||||
pub struct Checksummer {
|
||||
pub crc32: Option<Crc32>,
|
||||
pub crc32c: Option<Crc32c>,
|
||||
pub crc32: Option<CrcDigest>,
|
||||
pub crc32c: Option<CrcDigest>,
|
||||
pub crc64nvme: Option<CrcDigest>,
|
||||
pub md5: Option<Md5>,
|
||||
pub sha1: Option<Sha1>,
|
||||
pub sha256: Option<Sha256>,
|
||||
|
|
@ -54,6 +76,7 @@ pub struct Checksummer {
|
|||
pub struct Checksums {
|
||||
pub crc32: Option<Crc32Checksum>,
|
||||
pub crc32c: Option<Crc32cChecksum>,
|
||||
pub crc64nvme: Option<Crc64NvmeChecksum>,
|
||||
pub md5: Option<Md5Checksum>,
|
||||
pub sha1: Option<Sha1Checksum>,
|
||||
pub sha256: Option<Sha256Checksum>,
|
||||
|
|
@ -64,6 +87,7 @@ impl Checksummer {
|
|||
Self {
|
||||
crc32: None,
|
||||
crc32c: None,
|
||||
crc64nvme: None,
|
||||
md5: None,
|
||||
sha1: None,
|
||||
sha256: None,
|
||||
|
|
@ -91,10 +115,13 @@ impl Checksummer {
|
|||
self.sha256 = Some(Sha256::new());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) {
|
||||
self.crc32 = Some(Crc32::new());
|
||||
self.crc32 = Some(new_crc32());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) {
|
||||
self.crc32c = Some(Crc32c::default());
|
||||
self.crc32c = Some(new_crc32c());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Crc64Nvme(_))) {
|
||||
self.crc64nvme = Some(new_crc64nvme());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) {
|
||||
self.sha1 = Some(Sha1::new());
|
||||
|
|
@ -104,10 +131,13 @@ impl Checksummer {
|
|||
pub fn add(mut self, algo: Option<ChecksumAlgorithm>) -> Self {
|
||||
match algo {
|
||||
Some(ChecksumAlgorithm::Crc32) => {
|
||||
self.crc32 = Some(Crc32::new());
|
||||
self.crc32 = Some(new_crc32());
|
||||
}
|
||||
Some(ChecksumAlgorithm::Crc32c) => {
|
||||
self.crc32c = Some(Crc32c::default());
|
||||
self.crc32c = Some(new_crc32c());
|
||||
}
|
||||
Some(ChecksumAlgorithm::Crc64Nvme) => {
|
||||
self.crc64nvme = Some(new_crc64nvme());
|
||||
}
|
||||
Some(ChecksumAlgorithm::Sha1) => {
|
||||
self.sha1 = Some(Sha1::new());
|
||||
|
|
@ -125,7 +155,10 @@ impl Checksummer {
|
|||
crc32.update(bytes);
|
||||
}
|
||||
if let Some(crc32c) = &mut self.crc32c {
|
||||
crc32c.write(bytes);
|
||||
crc32c.update(bytes);
|
||||
}
|
||||
if let Some(crc64nvme) = &mut self.crc64nvme {
|
||||
crc64nvme.update(bytes);
|
||||
}
|
||||
if let Some(md5) = &mut self.md5 {
|
||||
md5.update(bytes);
|
||||
|
|
@ -140,10 +173,9 @@ impl Checksummer {
|
|||
|
||||
pub fn finalize(self) -> Checksums {
|
||||
Checksums {
|
||||
crc32: self.crc32.map(|x| u32::to_be_bytes(x.finalize())),
|
||||
crc32c: self
|
||||
.crc32c
|
||||
.map(|x| u32::to_be_bytes(u32::try_from(x.finish()).unwrap())),
|
||||
crc32: self.crc32.map(|x| u32::to_be_bytes(x.finalize() as u32)),
|
||||
crc32c: self.crc32c.map(|x| u32::to_be_bytes(x.finalize() as u32)),
|
||||
crc64nvme: self.crc64nvme.map(|x| u64::to_be_bytes(x.finalize())),
|
||||
md5: self.md5.map(|x| x.finalize()[..].try_into().unwrap()),
|
||||
sha1: self.sha1.map(|x| x.finalize()[..].try_into().unwrap()),
|
||||
sha256: self.sha256.map(|x| x.finalize()[..].try_into().unwrap()),
|
||||
|
|
@ -175,10 +207,11 @@ impl Checksums {
|
|||
}
|
||||
if let Some(extra) = expected.extra {
|
||||
let algo = extra.algorithm();
|
||||
if self.extract(Some(algo)) != Some(extra) {
|
||||
let calculated = self.extract(Some(algo));
|
||||
if calculated != Some(extra) {
|
||||
return Err(Error::InvalidDigest(format!(
|
||||
"Failed to validate checksum for algorithm {:?}",
|
||||
algo
|
||||
"Failed to validate checksum for algorithm {:?}: calculated {:?}, expected {:?}",
|
||||
algo, calculated, extra
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
|
@ -190,6 +223,9 @@ impl Checksums {
|
|||
None => None,
|
||||
Some(ChecksumAlgorithm::Crc32) => Some(ChecksumValue::Crc32(self.crc32.unwrap())),
|
||||
Some(ChecksumAlgorithm::Crc32c) => Some(ChecksumValue::Crc32c(self.crc32c.unwrap())),
|
||||
Some(ChecksumAlgorithm::Crc64Nvme) => {
|
||||
Some(ChecksumValue::Crc64Nvme(self.crc64nvme.unwrap()))
|
||||
}
|
||||
Some(ChecksumAlgorithm::Sha1) => Some(ChecksumValue::Sha1(self.sha1.unwrap())),
|
||||
Some(ChecksumAlgorithm::Sha256) => Some(ChecksumValue::Sha256(self.sha256.unwrap())),
|
||||
}
|
||||
|
|
@ -202,6 +238,7 @@ pub fn parse_checksum_algorithm(algo: &str) -> Result<ChecksumAlgorithm, Error>
|
|||
match algo {
|
||||
"CRC32" => Ok(ChecksumAlgorithm::Crc32),
|
||||
"CRC32C" => Ok(ChecksumAlgorithm::Crc32c),
|
||||
"CRC64NVME" => Ok(ChecksumAlgorithm::Crc64Nvme),
|
||||
"SHA1" => Ok(ChecksumAlgorithm::Sha1),
|
||||
"SHA256" => Ok(ChecksumAlgorithm::Sha256),
|
||||
_ => Err(Error::bad_request("invalid checksum algorithm")),
|
||||
|
|
@ -225,6 +262,7 @@ pub fn request_trailer_checksum_algorithm(
|
|||
None => Ok(None),
|
||||
Some(x) if x == X_AMZ_CHECKSUM_CRC32 => Ok(Some(ChecksumAlgorithm::Crc32)),
|
||||
Some(x) if x == X_AMZ_CHECKSUM_CRC32C => Ok(Some(ChecksumAlgorithm::Crc32c)),
|
||||
Some(x) if x == X_AMZ_CHECKSUM_CRC64NVME => Ok(Some(ChecksumAlgorithm::Crc64Nvme)),
|
||||
Some(x) if x == X_AMZ_CHECKSUM_SHA1 => Ok(Some(ChecksumAlgorithm::Sha1)),
|
||||
Some(x) if x == X_AMZ_CHECKSUM_SHA256 => Ok(Some(ChecksumAlgorithm::Sha256)),
|
||||
_ => Err(Error::bad_request("invalid checksum algorithm")),
|
||||
|
|
@ -243,6 +281,12 @@ pub fn request_checksum_value(
|
|||
if headers.contains_key(X_AMZ_CHECKSUM_CRC32C) {
|
||||
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Crc32c)?);
|
||||
}
|
||||
if headers.contains_key(X_AMZ_CHECKSUM_CRC64NVME) {
|
||||
ret.push(extract_checksum_value(
|
||||
headers,
|
||||
ChecksumAlgorithm::Crc64Nvme,
|
||||
)?);
|
||||
}
|
||||
if headers.contains_key(X_AMZ_CHECKSUM_SHA1) {
|
||||
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Sha1)?);
|
||||
}
|
||||
|
|
@ -281,6 +325,14 @@ pub fn extract_checksum_value(
|
|||
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
|
||||
Ok(ChecksumValue::Crc32c(crc32c))
|
||||
}
|
||||
ChecksumAlgorithm::Crc64Nvme => {
|
||||
let crc64nvme = headers
|
||||
.get(X_AMZ_CHECKSUM_CRC64NVME)
|
||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-crc64nvme header")?;
|
||||
Ok(ChecksumValue::Crc64Nvme(crc64nvme))
|
||||
}
|
||||
ChecksumAlgorithm::Sha1 => {
|
||||
let sha1 = headers
|
||||
.get(X_AMZ_CHECKSUM_SHA1)
|
||||
|
|
@ -311,6 +363,9 @@ pub fn add_checksum_response_headers(
|
|||
Some(ChecksumValue::Crc32c(crc32c)) => {
|
||||
resp = resp.header(X_AMZ_CHECKSUM_CRC32C, BASE64_STANDARD.encode(&crc32c));
|
||||
}
|
||||
Some(ChecksumValue::Crc64Nvme(crc64nvme)) => {
|
||||
resp = resp.header(X_AMZ_CHECKSUM_CRC64NVME, BASE64_STANDARD.encode(&crc64nvme));
|
||||
}
|
||||
Some(ChecksumValue::Sha1(sha1)) => {
|
||||
resp = resp.header(X_AMZ_CHECKSUM_SHA1, BASE64_STANDARD.encode(&sha1));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -64,12 +64,12 @@ pub struct VerifiedRequest {
|
|||
pub content_sha256_header: ContentSha256Header,
|
||||
}
|
||||
|
||||
pub async fn verify_request(
|
||||
pub fn verify_request(
|
||||
garage: &Garage,
|
||||
mut req: Request<IncomingBody>,
|
||||
service: &'static str,
|
||||
) -> Result<VerifiedRequest, Error> {
|
||||
let checked_signature = payload::check_payload_signature(&garage, &mut req, service).await?;
|
||||
let checked_signature = payload::check_payload_signature(&garage, &mut req, service)?;
|
||||
|
||||
let request = streaming::parse_streaming_body(
|
||||
req,
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ use sha2::{Digest, Sha256};
|
|||
|
||||
use garage_table::*;
|
||||
use garage_util::data::Hash;
|
||||
use garage_util::time::now_msec;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::key_table::*;
|
||||
|
|
@ -32,7 +33,7 @@ pub struct CheckedSignature {
|
|||
pub signature_header: Option<String>,
|
||||
}
|
||||
|
||||
pub async fn check_payload_signature(
|
||||
pub fn check_payload_signature(
|
||||
garage: &Garage,
|
||||
request: &mut Request<IncomingBody>,
|
||||
service: &'static str,
|
||||
|
|
@ -43,9 +44,9 @@ pub async fn check_payload_signature(
|
|||
// We check for presigned-URL-style authentication first, because
|
||||
// the browser or something else could inject an Authorization header
|
||||
// that is totally unrelated to AWS signatures.
|
||||
check_presigned_signature(garage, service, request, query).await
|
||||
check_presigned_signature(garage, service, request, query)
|
||||
} else if request.headers().contains_key(AUTHORIZATION) {
|
||||
check_standard_signature(garage, service, request, query).await
|
||||
check_standard_signature(garage, service, request, query)
|
||||
} else {
|
||||
// Unsigned (anonymous) request
|
||||
let content_sha256 = request
|
||||
|
|
@ -93,7 +94,7 @@ fn parse_x_amz_content_sha256(header: Option<&str>) -> Result<ContentSha256Heade
|
|||
}
|
||||
}
|
||||
|
||||
async fn check_standard_signature(
|
||||
fn check_standard_signature(
|
||||
garage: &Garage,
|
||||
service: &'static str,
|
||||
request: &Request<IncomingBody>,
|
||||
|
|
@ -128,7 +129,7 @@ async fn check_standard_signature(
|
|||
trace!("canonical request:\n{}", canonical_request);
|
||||
trace!("string to sign:\n{}", string_to_sign);
|
||||
|
||||
let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?;
|
||||
let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes())?;
|
||||
|
||||
let content_sha256_header = parse_x_amz_content_sha256(Some(&authorization.content_sha256))?;
|
||||
|
||||
|
|
@ -139,7 +140,7 @@ async fn check_standard_signature(
|
|||
})
|
||||
}
|
||||
|
||||
async fn check_presigned_signature(
|
||||
fn check_presigned_signature(
|
||||
garage: &Garage,
|
||||
service: &'static str,
|
||||
request: &mut Request<IncomingBody>,
|
||||
|
|
@ -178,7 +179,7 @@ async fn check_presigned_signature(
|
|||
trace!("canonical request (presigned url):\n{}", canonical_request);
|
||||
trace!("string to sign (presigned url):\n{}", string_to_sign);
|
||||
|
||||
let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?;
|
||||
let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes())?;
|
||||
|
||||
// In the page on presigned URLs, AWS specifies that if a signed query
|
||||
// parameter and a signed header of the same name have different values,
|
||||
|
|
@ -380,7 +381,7 @@ pub fn parse_date(date: &str) -> Result<DateTime<Utc>, Error> {
|
|||
Ok(Utc.from_utc_datetime(&date))
|
||||
}
|
||||
|
||||
pub async fn verify_v4(
|
||||
pub fn verify_v4(
|
||||
garage: &Garage,
|
||||
service: &str,
|
||||
auth: &Authorization,
|
||||
|
|
@ -393,12 +394,18 @@ pub async fn verify_v4(
|
|||
|
||||
let key = garage
|
||||
.key_table
|
||||
.get(&EmptyKey, &auth.key_id)
|
||||
.await?
|
||||
.get_local(&EmptyKey, &auth.key_id)?
|
||||
.filter(|k| !k.state.is_deleted())
|
||||
.ok_or_else(|| Error::forbidden(format!("No such key: {}", &auth.key_id)))?;
|
||||
let key_p = key.params().unwrap();
|
||||
|
||||
if key_p.is_expired(now_msec()) {
|
||||
return Err(Error::forbidden(format!(
|
||||
"Access key {} has expired",
|
||||
key.key_id
|
||||
)));
|
||||
}
|
||||
|
||||
let mut hmac = signing_hmac(
|
||||
&auth.date,
|
||||
&key_p.secret_key,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_api_k2v"
|
||||
version = "1.3.0"
|
||||
version = "2.2.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hyper::{body::Incoming as IncomingBody, Method, Request, Response};
|
||||
|
|
@ -76,25 +77,19 @@ impl ApiHandler for K2VApiServer {
|
|||
// The OPTIONS method is processed early, before we even check for an API key
|
||||
if let Endpoint::Options = endpoint {
|
||||
let options_res = handle_options_api(garage, &req, Some(bucket_name))
|
||||
.await
|
||||
.ok_or_bad_request("Error handling OPTIONS")?;
|
||||
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
|
||||
}
|
||||
|
||||
let verified_request = verify_request(&garage, req, "k2v").await?;
|
||||
let verified_request = verify_request(&garage, req, "k2v")?;
|
||||
let req = verified_request.request;
|
||||
let api_key = verified_request.access_key;
|
||||
|
||||
let bucket_id = garage
|
||||
.bucket_helper()
|
||||
.resolve_bucket(&bucket_name, &api_key)
|
||||
.await
|
||||
.map_err(pass_helper_error)?;
|
||||
let bucket = garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await
|
||||
.map_err(helper_error_as_internal)?;
|
||||
.resolve_bucket_fast(&bucket_name, &api_key)
|
||||
.map_err(pass_helper_error)?;
|
||||
let bucket_id = bucket.id;
|
||||
let bucket_params = bucket.state.into_option().unwrap();
|
||||
|
||||
let allowed = match endpoint.authorization_type() {
|
||||
|
|
@ -185,8 +180,8 @@ impl ApiHandler for K2VApiServer {
|
|||
}
|
||||
|
||||
impl ApiEndpoint for K2VApiEndpoint {
|
||||
fn name(&self) -> &'static str {
|
||||
self.endpoint.name()
|
||||
fn name(&self) -> Cow<'static, str> {
|
||||
Cow::Borrowed(self.endpoint.name())
|
||||
}
|
||||
|
||||
fn add_span_attributes(&self, span: SpanRef<'_>) {
|
||||
|
|
|
|||
|
|
@ -2,8 +2,8 @@ use hyper::header::HeaderValue;
|
|||
use hyper::{HeaderMap, StatusCode};
|
||||
use thiserror::Error;
|
||||
|
||||
pub(crate) use garage_api_common::common_error::pass_helper_error;
|
||||
use garage_api_common::common_error::{commonErrorDerivative, CommonError};
|
||||
pub(crate) use garage_api_common::common_error::{helper_error_as_internal, pass_helper_error};
|
||||
pub use garage_api_common::common_error::{
|
||||
CommonErrorDerivative, OkOrBadRequest, OkOrInternalError,
|
||||
};
|
||||
|
|
@ -99,6 +99,7 @@ impl ApiError for Error {
|
|||
fn add_http_headers(&self, header_map: &mut HeaderMap<HeaderValue>) {
|
||||
use hyper::header;
|
||||
header_map.append(header::CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header_map.append(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*".parse().unwrap());
|
||||
}
|
||||
|
||||
fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody {
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ pub async fn handle_read_index(
|
|||
let node_id_vec = garage
|
||||
.system
|
||||
.cluster_layout()
|
||||
.all_nongateway_nodes()
|
||||
.all_nongateway_nodes()?
|
||||
.to_vec();
|
||||
|
||||
let (partition_keys, more, next_start) = read_range(
|
||||
|
|
@ -66,7 +66,7 @@ pub async fn handle_read_index(
|
|||
bytes: *vals.get(&s_bytes).unwrap_or(&0),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
.collect(),
|
||||
more,
|
||||
next_start,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_api_s3"
|
||||
version = "1.3.0"
|
||||
version = "2.2.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -27,10 +27,10 @@ async-compression.workspace = true
|
|||
base64.workspace = true
|
||||
bytes.workspace = true
|
||||
chrono.workspace = true
|
||||
crc32fast.workspace = true
|
||||
crc32c.workspace = true
|
||||
crc-fast.workspace = true
|
||||
thiserror.workspace = true
|
||||
hex.workspace = true
|
||||
hmac.workspace = true
|
||||
tracing.workspace = true
|
||||
md-5.workspace = true
|
||||
pin-project.workspace = true
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hyper::header;
|
||||
|
|
@ -117,11 +118,11 @@ impl ApiHandler for S3ApiServer {
|
|||
return handle_post_object(garage, req, bucket_name.unwrap()).await;
|
||||
}
|
||||
if let Endpoint::Options = endpoint {
|
||||
let options_res = handle_options_api(garage, &req, bucket_name).await?;
|
||||
let options_res = handle_options_api(garage, &req, bucket_name)?;
|
||||
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
|
||||
}
|
||||
|
||||
let verified_request = verify_request(&garage, req, "s3").await?;
|
||||
let verified_request = verify_request(&garage, req, "s3")?;
|
||||
let req = verified_request.request;
|
||||
let api_key = verified_request.access_key;
|
||||
|
||||
|
|
@ -139,15 +140,11 @@ impl ApiHandler for S3ApiServer {
|
|||
return handle_create_bucket(&garage, req, &api_key.key_id, bucket_name).await;
|
||||
}
|
||||
|
||||
let bucket_id = garage
|
||||
.bucket_helper()
|
||||
.resolve_bucket(&bucket_name, &api_key)
|
||||
.await
|
||||
.map_err(pass_helper_error)?;
|
||||
let bucket = garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
.resolve_bucket_fast(&bucket_name, &api_key)
|
||||
.map_err(pass_helper_error)?;
|
||||
let bucket_id = bucket.id;
|
||||
let bucket_params = bucket.state.into_option().unwrap();
|
||||
|
||||
let allowed = match endpoint.authorization_type() {
|
||||
|
|
@ -352,8 +349,8 @@ impl ApiHandler for S3ApiServer {
|
|||
}
|
||||
|
||||
impl ApiEndpoint for S3ApiEndpoint {
|
||||
fn name(&self) -> &'static str {
|
||||
self.endpoint.name()
|
||||
fn name(&self) -> Cow<'static, str> {
|
||||
Cow::Borrowed(self.endpoint.name())
|
||||
}
|
||||
|
||||
fn add_span_attributes(&self, span: SpanRef<'_>) {
|
||||
|
|
|
|||
|
|
@ -192,21 +192,16 @@ pub async fn handle_create_bucket(
|
|||
let api_key = helper.key().get_existing_key(api_key_id).await?;
|
||||
let key_params = api_key.params().unwrap();
|
||||
|
||||
let existing_bucket = if let Some(Some(bucket_id)) = key_params.local_aliases.get(&bucket_name)
|
||||
{
|
||||
Some(*bucket_id)
|
||||
} else {
|
||||
helper
|
||||
let existing_bucket = helper
|
||||
.bucket()
|
||||
.resolve_global_bucket_name(&bucket_name)
|
||||
.await?
|
||||
};
|
||||
.resolve_bucket(&bucket_name, &api_key.key_id)
|
||||
.await?;
|
||||
|
||||
if let Some(bucket_id) = existing_bucket {
|
||||
if let Some(bucket) = existing_bucket {
|
||||
// Check we have write or owner permission on the bucket,
|
||||
// in that case it's fine, return 200 OK, bucket exists;
|
||||
// otherwise return a forbidden error.
|
||||
let kp = api_key.bucket_permissions(&bucket_id);
|
||||
let kp = api_key.bucket_permissions(&bucket.id);
|
||||
if !(kp.allow_write || kp.allow_owner) {
|
||||
return Err(CommonError::BucketAlreadyExists.into());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ use garage_api_common::helpers::*;
|
|||
use garage_api_common::signature::checksum::*;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::encryption::EncryptionParams;
|
||||
use crate::encryption::{EncryptionParams, OekDerivationInfo};
|
||||
use crate::error::*;
|
||||
use crate::get::{check_version_not_deleted, full_object_byte_stream, PreconditionHeaders};
|
||||
use crate::multipart;
|
||||
|
|
@ -66,11 +66,37 @@ pub async fn handle_copy(
|
|||
&ctx.garage,
|
||||
req.headers(),
|
||||
&source_version_meta.encryption,
|
||||
OekDerivationInfo::for_object(&source_object, source_version),
|
||||
)?;
|
||||
let dest_encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
|
||||
let dest_uuid = gen_uuid();
|
||||
let dest_encryption = EncryptionParams::new_from_headers(
|
||||
&ctx.garage,
|
||||
req.headers(),
|
||||
OekDerivationInfo {
|
||||
bucket_id: ctx.bucket_id,
|
||||
version_id: dest_uuid,
|
||||
object_key: dest_key,
|
||||
},
|
||||
)?;
|
||||
|
||||
let was_multipart = source_version_meta.etag.contains('-') // HACK
|
||||
|| source_object_meta_inner.checksum_type == Some(ChecksumType::Composite);
|
||||
|
||||
// Extract source checksum info before source_object_meta_inner is consumed
|
||||
let source_checksum = source_object_meta_inner.checksum;
|
||||
let source_checksum_type = match (source_object_meta_inner.checksum_type, source_checksum) {
|
||||
(Some(ct), _) => Some(ct),
|
||||
(None, Some(_)) => {
|
||||
// Migrated object from garage v1.x or older
|
||||
// determine checksum type depending if this is a multipart upload or not
|
||||
if was_multipart {
|
||||
Some(ChecksumType::Composite)
|
||||
} else {
|
||||
Some(ChecksumType::FullObject)
|
||||
}
|
||||
}
|
||||
(None, None) => None,
|
||||
};
|
||||
let source_checksum_algorithm = source_checksum.map(|x| x.algorithm());
|
||||
|
||||
// If source object has a checksum, the destination object must as well.
|
||||
|
|
@ -79,7 +105,6 @@ pub async fn handle_copy(
|
|||
let checksum_algorithm = checksum_algorithm.or(source_checksum_algorithm);
|
||||
|
||||
// Determine metadata of destination object
|
||||
let was_multipart = source_version_meta.etag.contains('-');
|
||||
let dest_object_meta = ObjectVersionMetaInner {
|
||||
headers: match req.headers().get("x-amz-metadata-directive") {
|
||||
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => {
|
||||
|
|
@ -99,6 +124,7 @@ pub async fn handle_copy(
|
|||
}
|
||||
},
|
||||
checksum: source_checksum,
|
||||
checksum_type: source_checksum_type,
|
||||
};
|
||||
|
||||
// Do actual object copying
|
||||
|
|
@ -118,8 +144,8 @@ pub async fn handle_copy(
|
|||
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
||||
|
||||
let must_recopy = !EncryptionParams::is_same(&source_encryption, &dest_encryption)
|
||||
|| source_checksum_algorithm != checksum_algorithm
|
||||
|| (was_multipart && checksum_algorithm.is_some());
|
||||
|| (checksum_algorithm.is_some()
|
||||
&& (was_multipart || checksum_algorithm != source_checksum_algorithm));
|
||||
|
||||
let res = if !must_recopy {
|
||||
// In most cases, we can just copy the metadata and link blocks of the
|
||||
|
|
@ -127,6 +153,7 @@ pub async fn handle_copy(
|
|||
handle_copy_metaonly(
|
||||
ctx,
|
||||
dest_key,
|
||||
dest_uuid,
|
||||
dest_object_meta,
|
||||
dest_encryption,
|
||||
source_version,
|
||||
|
|
@ -135,21 +162,25 @@ pub async fn handle_copy(
|
|||
)
|
||||
.await?
|
||||
} else {
|
||||
let expected_checksum = ExpectedChecksums {
|
||||
md5: None,
|
||||
sha256: None,
|
||||
extra: source_checksum,
|
||||
};
|
||||
let checksum_mode = if was_multipart || source_checksum_algorithm != checksum_algorithm {
|
||||
ChecksumMode::Calculate(checksum_algorithm)
|
||||
} else {
|
||||
ChecksumMode::Verify(&expected_checksum)
|
||||
ChecksumMode::Verify(ExpectedChecksums {
|
||||
md5: None,
|
||||
sha256: None,
|
||||
extra: source_checksum,
|
||||
})
|
||||
};
|
||||
// For multipart uploads that had a composite checksum, set checksum type
|
||||
// to full object as it will be recalculated.
|
||||
let dest_object_meta = ObjectVersionMetaInner {
|
||||
checksum_type: checksum_algorithm.map(|_| ChecksumType::FullObject),
|
||||
..dest_object_meta
|
||||
};
|
||||
// If source and dest encryption use different keys,
|
||||
// we must decrypt content and re-encrypt, so rewrite all data blocks.
|
||||
handle_copy_reencrypt(
|
||||
ctx,
|
||||
dest_key,
|
||||
dest_uuid,
|
||||
dest_object_meta,
|
||||
dest_encryption,
|
||||
source_version,
|
||||
|
|
@ -181,6 +212,7 @@ pub async fn handle_copy(
|
|||
async fn handle_copy_metaonly(
|
||||
ctx: ReqCtx,
|
||||
dest_key: &str,
|
||||
dest_uuid: Uuid,
|
||||
dest_object_meta: ObjectVersionMetaInner,
|
||||
dest_encryption: EncryptionParams,
|
||||
source_version: &ObjectVersion,
|
||||
|
|
@ -194,7 +226,6 @@ async fn handle_copy_metaonly(
|
|||
} = ctx;
|
||||
|
||||
// Generate parameters for copied object
|
||||
let new_uuid = gen_uuid();
|
||||
let new_timestamp = now_msec();
|
||||
|
||||
let new_meta = ObjectVersionMeta {
|
||||
|
|
@ -204,7 +235,7 @@ async fn handle_copy_metaonly(
|
|||
};
|
||||
|
||||
let res = SaveStreamResult {
|
||||
version_uuid: new_uuid,
|
||||
version_uuid: dest_uuid,
|
||||
version_timestamp: new_timestamp,
|
||||
etag: new_meta.etag.clone(),
|
||||
};
|
||||
|
|
@ -216,7 +247,7 @@ async fn handle_copy_metaonly(
|
|||
// bytes is either plaintext before&after or encrypted with the
|
||||
// same keys, so it's ok to just copy it as is
|
||||
let dest_object_version = ObjectVersion {
|
||||
uuid: new_uuid,
|
||||
uuid: dest_uuid,
|
||||
timestamp: new_timestamp,
|
||||
state: ObjectVersionState::Complete(ObjectVersionData::Inline(
|
||||
new_meta,
|
||||
|
|
@ -243,7 +274,7 @@ async fn handle_copy_metaonly(
|
|||
// This holds a reference to the object in the Version table
|
||||
// so that it won't be deleted, e.g. by repair_versions.
|
||||
let tmp_dest_object_version = ObjectVersion {
|
||||
uuid: new_uuid,
|
||||
uuid: dest_uuid,
|
||||
timestamp: new_timestamp,
|
||||
state: ObjectVersionState::Uploading {
|
||||
encryption: new_meta.encryption.clone(),
|
||||
|
|
@ -263,7 +294,7 @@ async fn handle_copy_metaonly(
|
|||
// marked as deleted (they are marked as deleted only if the Version
|
||||
// doesn't exist or is marked as deleted).
|
||||
let mut dest_version = Version::new(
|
||||
new_uuid,
|
||||
dest_uuid,
|
||||
VersionBacklink::Object {
|
||||
bucket_id: dest_bucket_id,
|
||||
key: dest_key.to_string(),
|
||||
|
|
@ -282,7 +313,7 @@ async fn handle_copy_metaonly(
|
|||
.iter()
|
||||
.map(|b| BlockRef {
|
||||
block: b.1.hash,
|
||||
version: new_uuid,
|
||||
version: dest_uuid,
|
||||
deleted: false.into(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
|
@ -298,7 +329,7 @@ async fn handle_copy_metaonly(
|
|||
// with the stuff before, the block's reference counts could be decremented before
|
||||
// they are incremented again for the new version, leading to data being deleted.
|
||||
let dest_object_version = ObjectVersion {
|
||||
uuid: new_uuid,
|
||||
uuid: dest_uuid,
|
||||
timestamp: new_timestamp,
|
||||
state: ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
||||
new_meta,
|
||||
|
|
@ -320,12 +351,13 @@ async fn handle_copy_metaonly(
|
|||
async fn handle_copy_reencrypt(
|
||||
ctx: ReqCtx,
|
||||
dest_key: &str,
|
||||
dest_uuid: Uuid,
|
||||
dest_object_meta: ObjectVersionMetaInner,
|
||||
dest_encryption: EncryptionParams,
|
||||
source_version: &ObjectVersion,
|
||||
source_version_data: &ObjectVersionData,
|
||||
source_encryption: EncryptionParams,
|
||||
checksum_mode: ChecksumMode<'_>,
|
||||
checksum_mode: ChecksumMode,
|
||||
) -> Result<SaveStreamResult, Error> {
|
||||
// basically we will read the source data (decrypt if necessary)
|
||||
// and save that in a new object (encrypt if necessary),
|
||||
|
|
@ -339,6 +371,7 @@ async fn handle_copy_reencrypt(
|
|||
|
||||
save_stream(
|
||||
&ctx,
|
||||
dest_uuid,
|
||||
dest_object_meta,
|
||||
dest_encryption,
|
||||
source_stream.map_err(|e| Error::from(GarageError::from(e))),
|
||||
|
|
@ -362,7 +395,7 @@ pub async fn handle_upload_part_copy(
|
|||
let dest_upload_id = multipart::decode_upload_id(upload_id)?;
|
||||
|
||||
let dest_key = dest_key.to_string();
|
||||
let (source_object, (_, dest_version, mut dest_mpu)) = futures::try_join!(
|
||||
let (source_object, (dest_object, dest_version, mut dest_mpu)) = futures::try_join!(
|
||||
get_copy_source(&ctx, req),
|
||||
multipart::get_upload(&ctx, &dest_key, &dest_upload_id)
|
||||
)?;
|
||||
|
|
@ -380,7 +413,10 @@ pub async fn handle_upload_part_copy(
|
|||
&garage,
|
||||
req.headers(),
|
||||
&source_version_meta.encryption,
|
||||
OekDerivationInfo::for_object(&source_object, source_object_version),
|
||||
)?;
|
||||
|
||||
let dest_oek_params = OekDerivationInfo::for_object(&dest_object, &dest_version);
|
||||
let (dest_object_encryption, dest_object_checksum_algorithm) = match dest_version.state {
|
||||
ObjectVersionState::Uploading {
|
||||
encryption,
|
||||
|
|
@ -389,8 +425,12 @@ pub async fn handle_upload_part_copy(
|
|||
} => (encryption, checksum_algorithm),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let (dest_encryption, _) =
|
||||
EncryptionParams::check_decrypt(&garage, req.headers(), &dest_object_encryption)?;
|
||||
let (dest_encryption, _) = EncryptionParams::check_decrypt(
|
||||
&garage,
|
||||
req.headers(),
|
||||
&dest_object_encryption,
|
||||
dest_oek_params,
|
||||
)?;
|
||||
let same_encryption = EncryptionParams::is_same(&source_encryption, &dest_encryption);
|
||||
|
||||
// Check source range is valid
|
||||
|
|
@ -505,7 +545,7 @@ pub async fn handle_upload_part_copy(
|
|||
|
||||
// Now, actually copy the blocks
|
||||
let mut checksummer = Checksummer::init(&Default::default(), !dest_encryption.is_encrypted())
|
||||
.add(dest_object_checksum_algorithm);
|
||||
.add(dest_object_checksum_algorithm.map(|(algo, _)| algo));
|
||||
|
||||
// First, create a stream that is able to read the source blocks
|
||||
// and extract the subrange if necessary.
|
||||
|
|
@ -655,7 +695,7 @@ pub async fn handle_upload_part_copy(
|
|||
|
||||
let checksums = checksummer.finalize();
|
||||
let etag = dest_encryption.etag_from_md5(&checksums.md5);
|
||||
let checksum = checksums.extract(dest_object_checksum_algorithm);
|
||||
let checksum = checksums.extract(dest_object_checksum_algorithm.map(|(algo, _)| algo));
|
||||
|
||||
// Put the part's ETag in the Versiontable
|
||||
dest_mpu.parts.put(
|
||||
|
|
@ -695,16 +735,15 @@ async fn get_copy_source(ctx: &ReqCtx, req: &Request<ReqBody>) -> Result<Object,
|
|||
let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?;
|
||||
|
||||
let (source_bucket, source_key) = parse_bucket_key(©_source, None)?;
|
||||
let source_bucket_id = garage
|
||||
let source_bucket = garage
|
||||
.bucket_helper()
|
||||
.resolve_bucket(&source_bucket.to_string(), api_key)
|
||||
.await
|
||||
.resolve_bucket_fast(&source_bucket.to_string(), api_key)
|
||||
.map_err(pass_helper_error)?;
|
||||
|
||||
if !api_key.allow_read(&source_bucket_id) {
|
||||
if !api_key.allow_read(&source_bucket.id) {
|
||||
return Err(Error::forbidden(format!(
|
||||
"Reading from bucket {} not allowed for this key",
|
||||
source_bucket
|
||||
"Reading from bucket {:?} not allowed for this key",
|
||||
source_bucket.id
|
||||
)));
|
||||
}
|
||||
|
||||
|
|
@ -712,7 +751,7 @@ async fn get_copy_source(ctx: &ReqCtx, req: &Request<ReqBody>) -> Result<Object,
|
|||
|
||||
let source_object = garage
|
||||
.object_table
|
||||
.get(&source_bucket_id, &source_key.to_string())
|
||||
.get(&source_bucket.id, &source_key.to_string())
|
||||
.await?
|
||||
.ok_or(Error::NoSuchKey)?;
|
||||
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ pub async fn handle_get_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
|||
.body(string_body(xml))?)
|
||||
} else {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(empty_body())?)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ use aes_gcm::{
|
|||
};
|
||||
use base64::prelude::*;
|
||||
use bytes::Bytes;
|
||||
use sha2::Sha256;
|
||||
|
||||
use futures::stream::Stream;
|
||||
use futures::task;
|
||||
|
|
@ -21,12 +22,12 @@ use http::header::{HeaderMap, HeaderName, HeaderValue};
|
|||
use garage_net::bytes_buf::BytesBuf;
|
||||
use garage_net::stream::{stream_asyncread, ByteStream};
|
||||
use garage_rpc::rpc_helper::OrderTag;
|
||||
use garage_util::data::Hash;
|
||||
use garage_util::data::{Hash, Uuid};
|
||||
use garage_util::error::Error as GarageError;
|
||||
use garage_util::migrate::Migrate;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionMetaInner};
|
||||
use garage_model::s3::object_table::*;
|
||||
|
||||
use garage_api_common::common_error::*;
|
||||
use garage_api_common::signature::checksum::Md5Checksum;
|
||||
|
|
@ -64,32 +65,45 @@ const STREAM_ENC_CYPER_CHUNK_SIZE: usize = STREAM_ENC_PLAIN_CHUNK_SIZE + 16;
|
|||
pub enum EncryptionParams {
|
||||
Plaintext,
|
||||
SseC {
|
||||
/// the value of x-amz-server-side-encryption-customer-key
|
||||
client_key: Key<Aes256Gcm>,
|
||||
/// the value of x-amz-server-side-encryption-customer-key-md5
|
||||
client_key_md5: Md5Output,
|
||||
/// the object encryption key, for uploads created in garage v2+
|
||||
object_key: Option<Key<Aes256Gcm>>,
|
||||
/// the compression level used for compressing data blocks
|
||||
compression_level: Option<i32>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct OekDerivationInfo<'a> {
|
||||
pub bucket_id: Uuid,
|
||||
pub version_id: Uuid,
|
||||
pub object_key: &'a str,
|
||||
}
|
||||
|
||||
impl EncryptionParams {
|
||||
pub fn is_encrypted(&self) -> bool {
|
||||
!matches!(self, Self::Plaintext)
|
||||
}
|
||||
|
||||
pub fn is_same(a: &Self, b: &Self) -> bool {
|
||||
let relevant_info = |x: &Self| match x {
|
||||
Self::Plaintext => None,
|
||||
Self::SseC {
|
||||
client_key,
|
||||
compression_level,
|
||||
..
|
||||
} => Some((*client_key, compression_level.is_some())),
|
||||
};
|
||||
relevant_info(a) == relevant_info(b)
|
||||
// This function is used in CopyObject and UploadPartCopy to determine
|
||||
// whether the object must be re-encrypted. If this returns true,
|
||||
// data blocks are reused as-is. Since Garage v2, we are using
|
||||
// object-specific encryption keys, so we know that if both source
|
||||
// and destination are encrypted, it can't be with the same key.
|
||||
match (a, b) {
|
||||
(Self::Plaintext, Self::Plaintext) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_from_headers(
|
||||
garage: &Garage,
|
||||
headers: &HeaderMap,
|
||||
oek_info: OekDerivationInfo<'_>,
|
||||
) -> Result<EncryptionParams, Error> {
|
||||
let key = parse_request_headers(
|
||||
headers,
|
||||
|
|
@ -101,6 +115,7 @@ impl EncryptionParams {
|
|||
Some((client_key, client_key_md5)) => Ok(EncryptionParams::SseC {
|
||||
client_key,
|
||||
client_key_md5,
|
||||
object_key: Some(oek_info.derive_oek(&client_key)),
|
||||
compression_level: garage.config.compression_level,
|
||||
}),
|
||||
None => Ok(EncryptionParams::Plaintext),
|
||||
|
|
@ -126,6 +141,7 @@ impl EncryptionParams {
|
|||
garage: &Garage,
|
||||
headers: &HeaderMap,
|
||||
obj_enc: &'a ObjectVersionEncryption,
|
||||
oek_info: OekDerivationInfo<'_>,
|
||||
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
|
||||
let key = parse_request_headers(
|
||||
headers,
|
||||
|
|
@ -133,13 +149,14 @@ impl EncryptionParams {
|
|||
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
|
||||
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
|
||||
)?;
|
||||
Self::check_decrypt_common(garage, key, obj_enc)
|
||||
Self::check_decrypt_common(garage, key, obj_enc, oek_info)
|
||||
}
|
||||
|
||||
pub fn check_decrypt_for_copy_source<'a>(
|
||||
garage: &Garage,
|
||||
headers: &HeaderMap,
|
||||
obj_enc: &'a ObjectVersionEncryption,
|
||||
oek_info: OekDerivationInfo<'_>,
|
||||
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
|
||||
let key = parse_request_headers(
|
||||
headers,
|
||||
|
|
@ -147,22 +164,32 @@ impl EncryptionParams {
|
|||
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
|
||||
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
|
||||
)?;
|
||||
Self::check_decrypt_common(garage, key, obj_enc)
|
||||
Self::check_decrypt_common(garage, key, obj_enc, oek_info)
|
||||
}
|
||||
|
||||
fn check_decrypt_common<'a>(
|
||||
garage: &Garage,
|
||||
key: Option<(Key<Aes256Gcm>, Md5Output)>,
|
||||
obj_enc: &'a ObjectVersionEncryption,
|
||||
oek_info: OekDerivationInfo<'_>,
|
||||
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
|
||||
match (key, &obj_enc) {
|
||||
(
|
||||
Some((client_key, client_key_md5)),
|
||||
ObjectVersionEncryption::SseC { inner, compressed },
|
||||
ObjectVersionEncryption::SseC {
|
||||
inner,
|
||||
compressed,
|
||||
use_oek,
|
||||
},
|
||||
) => {
|
||||
let enc = Self::SseC {
|
||||
client_key,
|
||||
client_key_md5,
|
||||
object_key: if *use_oek {
|
||||
Some(oek_info.derive_oek(&client_key))
|
||||
} else {
|
||||
None
|
||||
},
|
||||
compression_level: if *compressed {
|
||||
Some(garage.config.compression_level.unwrap_or(1))
|
||||
} else {
|
||||
|
|
@ -193,13 +220,16 @@ impl EncryptionParams {
|
|||
) -> Result<ObjectVersionEncryption, Error> {
|
||||
match self {
|
||||
Self::SseC {
|
||||
compression_level, ..
|
||||
compression_level,
|
||||
object_key,
|
||||
..
|
||||
} => {
|
||||
let plaintext = meta.encode().map_err(GarageError::from)?;
|
||||
let ciphertext = self.encrypt_blob(&plaintext)?;
|
||||
Ok(ObjectVersionEncryption::SseC {
|
||||
inner: ciphertext.into_owned(),
|
||||
compressed: compression_level.is_some(),
|
||||
use_oek: object_key.is_some(),
|
||||
})
|
||||
}
|
||||
Self::Plaintext => Ok(ObjectVersionEncryption::Plaintext { inner: meta }),
|
||||
|
|
@ -228,24 +258,37 @@ impl EncryptionParams {
|
|||
// This is used for encrypting object metadata and inlined data for small objects.
|
||||
// This does not compress anything.
|
||||
|
||||
pub fn encrypt_blob<'a>(&self, blob: &'a [u8]) -> Result<Cow<'a, [u8]>, Error> {
|
||||
fn cipher(&self) -> Option<Aes256Gcm> {
|
||||
match self {
|
||||
Self::SseC { client_key, .. } => {
|
||||
let cipher = Aes256Gcm::new(&client_key);
|
||||
Self::SseC {
|
||||
object_key: Some(oek),
|
||||
..
|
||||
} => Some(Aes256Gcm::new(&oek)),
|
||||
Self::SseC {
|
||||
client_key,
|
||||
object_key: None,
|
||||
..
|
||||
} => Some(Aes256Gcm::new(&client_key)),
|
||||
Self::Plaintext => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encrypt_blob<'a>(&self, blob: &'a [u8]) -> Result<Cow<'a, [u8]>, Error> {
|
||||
match self.cipher() {
|
||||
Some(cipher) => {
|
||||
let nonce = Aes256Gcm::generate_nonce(&mut OsRng);
|
||||
let ciphertext = cipher
|
||||
.encrypt(&nonce, blob)
|
||||
.ok_or_internal_error("Encryption failed")?;
|
||||
Ok(Cow::Owned([nonce.to_vec(), ciphertext].concat()))
|
||||
}
|
||||
Self::Plaintext => Ok(Cow::Borrowed(blob)),
|
||||
None => Ok(Cow::Borrowed(blob)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decrypt_blob<'a>(&self, blob: &'a [u8]) -> Result<Cow<'a, [u8]>, Error> {
|
||||
match self {
|
||||
Self::SseC { client_key, .. } => {
|
||||
let cipher = Aes256Gcm::new(&client_key);
|
||||
match self.cipher() {
|
||||
Some(cipher) => {
|
||||
let nonce_size = <Aes256Gcm as AeadCore>::NonceSize::to_usize();
|
||||
let nonce = Nonce::from_slice(
|
||||
blob.get(..nonce_size)
|
||||
|
|
@ -258,7 +301,7 @@ impl EncryptionParams {
|
|||
)?;
|
||||
Ok(Cow::Owned(plaintext))
|
||||
}
|
||||
Self::Plaintext => Ok(Cow::Borrowed(blob)),
|
||||
None => Ok(Cow::Borrowed(blob)),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -284,10 +327,12 @@ impl EncryptionParams {
|
|||
Self::Plaintext => stream,
|
||||
Self::SseC {
|
||||
client_key,
|
||||
object_key,
|
||||
compression_level,
|
||||
..
|
||||
} => {
|
||||
let plaintext = DecryptStream::new(stream, *client_key);
|
||||
let key = object_key.as_ref().unwrap_or(client_key);
|
||||
let plaintext = DecryptStream::new(stream, *key);
|
||||
if compression_level.is_some() {
|
||||
let reader = stream_asyncread(Box::pin(plaintext));
|
||||
let reader = BufReader::new(reader);
|
||||
|
|
@ -307,9 +352,12 @@ impl EncryptionParams {
|
|||
Self::Plaintext => Ok(block),
|
||||
Self::SseC {
|
||||
client_key,
|
||||
object_key,
|
||||
compression_level,
|
||||
..
|
||||
} => {
|
||||
let key = object_key.as_ref().unwrap_or(client_key);
|
||||
|
||||
let block = if let Some(level) = compression_level {
|
||||
Cow::Owned(
|
||||
garage_block::zstd_encode(block.as_ref(), *level)
|
||||
|
|
@ -325,7 +373,7 @@ impl EncryptionParams {
|
|||
OsRng.fill_bytes(&mut nonce);
|
||||
ret.extend_from_slice(nonce.as_slice());
|
||||
|
||||
let mut cipher = EncryptorLE31::<Aes256Gcm>::new(&client_key, &nonce);
|
||||
let mut cipher = EncryptorLE31::<Aes256Gcm>::new(key, &nonce);
|
||||
let mut iter = block.chunks(STREAM_ENC_PLAIN_CHUNK_SIZE).peekable();
|
||||
|
||||
if iter.peek().is_none() {
|
||||
|
|
@ -361,6 +409,13 @@ impl EncryptionParams {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn has_encryption_header(headers: &HeaderMap) -> bool {
|
||||
match headers.get(X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM) {
|
||||
Some(h) => h.as_bytes() == CUSTOMER_ALGORITHM_AES256,
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_request_headers(
|
||||
headers: &HeaderMap,
|
||||
alg_header: &HeaderName,
|
||||
|
|
@ -420,6 +475,30 @@ fn parse_request_headers(
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> OekDerivationInfo<'a> {
|
||||
pub fn for_object<'b>(object: &'a Object, version: &'b ObjectVersion) -> Self {
|
||||
Self {
|
||||
bucket_id: object.bucket_id,
|
||||
version_id: version.uuid,
|
||||
object_key: &object.key,
|
||||
}
|
||||
}
|
||||
|
||||
fn derive_oek(&self, client_key: &Key<Aes256Gcm>) -> Key<Aes256Gcm> {
|
||||
use hmac::{Hmac, Mac};
|
||||
|
||||
// info = bucket_id + object_name + version_uuid + "garage-object-encryption-key"
|
||||
// oek = hmac_sha256(ssec_key, info)
|
||||
let mut hmac = <Hmac<Sha256> as Mac>::new_from_slice(client_key.as_slice())
|
||||
.expect("create hmac-sha256");
|
||||
hmac.update(b"garage-object-encryption-key");
|
||||
hmac.update(self.bucket_id.as_slice());
|
||||
hmac.update(self.version_id.as_slice());
|
||||
hmac.update(self.object_key.as_bytes());
|
||||
hmac.finalize().into_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
// ---- encrypt & decrypt streams ----
|
||||
|
||||
#[pin_project::pin_project]
|
||||
|
|
@ -569,6 +648,7 @@ mod tests {
|
|||
let enc = EncryptionParams::SseC {
|
||||
client_key: Aes256Gcm::generate_key(&mut OsRng),
|
||||
client_key_md5: Default::default(), // not needed
|
||||
object_key: Some(Aes256Gcm::generate_key(&mut OsRng)),
|
||||
compression_level,
|
||||
};
|
||||
|
||||
|
|
|
|||