mirror of
https://git.deuxfleurs.fr/Deuxfleurs/garage.git
synced 2026-05-15 05:36:53 -04:00
Compare commits
390 commits
db-no-unsa
...
main-v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b6b18427a5 | ||
|
|
9987166b2b | ||
|
|
b72b090a09 | ||
|
|
8551aefed4 | ||
|
|
47bf5d9fb0 | ||
|
|
5df37dae5e | ||
|
|
44af0bdab3 | ||
|
|
a7d6620e18 | ||
|
|
8eb12755e4 | ||
|
|
c685a2cbaf | ||
|
|
969f42a970 | ||
|
|
424d4f8d4d | ||
|
|
bf5290036f |
||
|
|
4efc8bac07 | ||
|
|
f3dcc39903 | ||
|
|
43e02920c2 | ||
|
|
dcc2fe4ac5 |
||
|
|
e3a5ec6ef6 | ||
|
|
4d124e1c76 | ||
|
|
d769a7be5d | ||
|
|
511cf0c6ec | ||
|
|
95693d45b2 | ||
|
|
ca296477f3 | ||
|
|
ca3b4a050d | ||
|
|
a057ab23ea | ||
|
|
58bc65b9a8 | ||
|
|
ac851d6dee | ||
|
|
eac2aa6fe4 | ||
|
|
1e0201ada2 | ||
|
|
82297371bf | ||
|
|
174f4f01a8 | ||
|
|
1aac7b4875 | ||
|
|
b43c58cbe5 | ||
|
|
9481ac428e | ||
|
|
1c29d04cc5 | ||
|
|
b48a8eaa1f | ||
|
|
42fd8583bd | ||
|
|
236af3a958 | ||
|
|
4b1fdbef55 | ||
|
|
0f1b488be0 | ||
|
|
0bbf63ee0e | ||
|
|
879d941d7b | ||
|
|
d726cf0299 | ||
|
|
0c7aeab6f8 | ||
|
|
5687fc0375 | ||
|
|
97f1e9ab52 | ||
|
|
60b1d78b56 | ||
|
|
4c895a7186 | ||
|
|
c3b5cbf212 | ||
|
|
57a467b5c0 | ||
|
|
6cf6db5c61 | ||
|
|
d5a57e3e13 | ||
|
|
5cf354acb4 | ||
|
|
2b007ddea3 | ||
|
|
c8599a8636 | ||
|
|
0b901bf291 | ||
|
|
c8c20d6f47 | ||
|
|
e5db610e4c | ||
|
|
65c6f8adea | ||
|
|
54b9bf02a3 | ||
|
|
469153233f | ||
|
|
90bba5889a | ||
|
|
a64b567d43 | ||
|
|
6ea86db8cd | ||
|
|
aa69c06f2b | ||
|
|
a6c6c44310 | ||
|
|
96d7713915 | ||
|
|
d64498c3d3 | ||
|
|
b340599e68 | ||
|
|
5448012b27 | ||
|
|
ce34d11a65 | ||
|
|
8cb7623ebd | ||
|
|
5469c95877 | ||
|
|
f930c6f643 | ||
|
|
afcb22bf16 | ||
|
|
cc29a40d51 | ||
|
|
0f3f180c3e | ||
|
|
70cf6004ae | ||
|
|
c7571ff89b | ||
|
|
1b42919bf7 | ||
|
|
3f4ab3a4a3 | ||
|
|
3a4afc04a9 | ||
|
|
fbf03e9378 | ||
|
|
9eb07d4c7b | ||
|
|
85ee4f5d8c | ||
|
|
328072d122 | ||
|
|
26bc807905 | ||
|
|
a9f5f242b2 | ||
|
|
ae98abca5c | ||
|
|
adfa44ad70 | ||
|
|
47143b88ad | ||
|
|
8843aa92fa |
||
|
|
b601b3e46d | ||
|
|
a19d2f16e2 | ||
|
|
fc8fc60f6d | ||
|
|
77079a1498 | ||
|
|
2a4f729b57 | ||
|
|
1b042e379e |
||
|
|
ffbce0f689 | ||
|
|
37e5621dde | ||
|
|
6529ff379a | ||
|
|
a8d73682a4 | ||
|
|
8654eb19bf | ||
|
|
54ea412188 | ||
|
|
2ade8c86f6 | ||
|
|
b15e2cbb6c | ||
|
|
0fd1b7342b | ||
|
|
be16bc7a05 | ||
|
|
bfaa1ca6b7 | ||
|
|
de8eeab4ad | ||
|
|
ae3f7ee76c |
||
|
|
2dc3a6dbbe | ||
|
|
c6bc3f229b | ||
|
|
bba9202f31 | ||
|
|
a605a80806 | ||
|
|
539af12d21 | ||
|
|
a2a9e3cec4 | ||
|
|
14274bc13c | ||
|
|
bf4691d98a | ||
|
|
ad151cb1dc | ||
|
|
3c20984a08 | ||
|
|
e6e4e051a1 | ||
|
|
9b38cba6f3 | ||
|
|
4ef954d176 | ||
|
|
02498a93d0 | ||
|
|
4caad5425d | ||
|
|
9ec3f8cc3c | ||
|
|
14d2f2b18d | ||
|
|
a7d845a999 | ||
|
|
dd20e5d22a | ||
|
|
6906a4ff12 | ||
|
|
9053782d71 |
||
|
|
c96be1a9a8 | ||
|
|
98e56490a1 | ||
|
|
e791ccec8f | ||
|
|
d605c4fed1 | ||
|
|
0ce5f7eb00 |
||
|
|
516255321f | ||
|
|
f3b05ff771 | ||
|
|
e254cc20e5 | ||
|
|
12f15c4c2b | ||
|
|
42c5d02cdf | ||
|
|
4689b10448 | ||
|
|
156b10ee65 | ||
|
|
8647ebf003 | ||
|
|
67d7c0769b | ||
|
|
09ed5ab8cc |
||
|
|
a0ea28b0da |
||
|
|
c5237c31e7 | ||
|
|
f87943a39d | ||
|
|
c0846c56fe | ||
|
|
1cb0ae10a8 | ||
|
|
1a8f74fc94 | ||
|
|
2191620af5 | ||
|
|
bf27a3ec98 | ||
|
|
f64ec6e542 | ||
|
|
6d38907dac | ||
|
|
cfe8e8d45c | ||
|
|
f6e805e7db | ||
|
|
45e10e55f9 | ||
|
|
730bfee753 | ||
|
|
ccab0e4ae5 | ||
|
|
abb60dcf7e | ||
|
|
f8b0817ddc | ||
|
|
21c0dda16a | ||
|
|
658541d812 | ||
|
|
c5df820e2c | ||
|
|
a04d6cd5b8 | ||
|
|
44a896f9b5 | ||
|
|
cee7560fc1 | ||
|
|
2f0c5ca220 | ||
|
|
859b38b0d2 | ||
|
|
2729a71d9d | ||
|
|
c9d00f5f7b | ||
|
|
89c944ebd6 | ||
|
|
24470377c9 | ||
|
|
5b26545abf | ||
|
|
9c7e3c7bde | ||
|
|
165f9316e2 | ||
|
|
a94adf804f | ||
|
|
e4c9a8cd53 | ||
|
|
9312c6bbcb | ||
|
|
fdf4dad728 | ||
|
|
6820b69f30 | ||
|
|
d0104b9f9b | ||
|
|
3fe8db9e52 | ||
|
|
627a37fe9f | ||
|
|
2f55889835 | ||
|
|
8b9cc5ca3f | ||
|
|
a1533d2919 | ||
|
|
c1b39d9ba1 | ||
|
|
d84308c413 | ||
|
|
63f20bdeab | ||
|
|
a2e134f036 | ||
|
|
06aa4b604f | ||
|
|
d3226bfa91 | ||
|
|
af67626ab2 | ||
|
|
5475da8ea8 | ||
|
|
620dc58560 | ||
|
|
47e87c8739 | ||
|
|
34599bff51 | ||
|
|
ec1a475923 | ||
|
|
b9df2d1ad1 | ||
|
|
390a5d97fe | ||
|
|
4dc2bc337f | ||
|
|
5dd2791981 | ||
|
|
d601f31186 | ||
|
|
e4de7bdfd5 | ||
|
|
d18c5ad0ff | ||
|
|
3d5e9a027e | ||
|
|
f4ca7758b4 | ||
|
|
4563313f87 | ||
|
|
afa28706e5 | ||
|
|
84f1db91c4 | ||
|
|
9fa20d45be | ||
|
|
9330fd79d3 | ||
|
|
83f6928ff7 | ||
|
|
ab71544499 | ||
|
|
991edbe02c | ||
|
|
9f3c7c3720 | ||
|
|
bfde9152b8 | ||
|
|
7bb042f0b7 | ||
|
|
a1d081ee84 | ||
|
|
e8fa89e834 | ||
|
|
beedc9fd11 | ||
|
|
6d798c640f | ||
|
|
d4e3e60920 | ||
|
|
43402c9619 | ||
|
|
efa6f3d85e | ||
|
|
74a1b49b13 | ||
|
|
23d57b89dc | ||
|
|
5e3e1f4453 | ||
|
|
59c153d280 | ||
|
|
bb3e0f7d22 | ||
|
|
0156e40c9d | ||
|
|
f6f88065ad | ||
|
|
591bd808ec | ||
|
|
294cb99409 | ||
|
|
2eb9fcae20 | ||
|
|
58b9eb46fc | ||
|
|
255b01b626 | ||
|
|
58a765c51f | ||
|
|
1c431b8457 | ||
|
|
39ac034de5 | ||
|
|
8ddb0dd485 | ||
|
|
83887a8519 | ||
|
|
0a15db6960 | ||
|
|
295237476e | ||
|
|
9d83605736 | ||
|
|
4b1a7fb5e3 | ||
|
|
b6aaebaf4c | ||
|
|
7bbc8fec50 | ||
|
|
6689800986 | ||
|
|
d2246baab7 | ||
|
|
afac1d4d4a | ||
|
|
6ca99fd02c | ||
|
|
b568bb863d | ||
|
|
b8f301a61d | ||
|
|
428ad2075d |
||
|
|
3661a597fa | ||
|
|
0fd3c0e794 |
||
|
|
4c1bf42192 |
||
|
|
906c8708fd | ||
|
|
747889a096 | ||
|
|
feb09a4bc6 | ||
|
|
aa8bc6aa88 | ||
|
|
aba7902995 |
||
|
|
78de7b5bde | ||
|
|
9bd9e392ba | ||
|
|
116ad479a8 |
||
|
|
b6a58c5c16 |
||
|
|
2b0bfa9b18 | ||
|
|
a18b3f0d1f | ||
|
|
7a143f46fc |
||
|
|
c731f0291a | ||
|
|
34453bc9c2 |
||
|
|
6da1353541 | ||
|
|
bd71728874 |
||
|
|
51ced60366 |
||
|
|
586957b4b7 | ||
|
|
8d2bb4afeb | ||
|
|
c26f32b769 |
||
|
|
8062ec7b4b | ||
|
|
eb416a02fb | ||
|
|
74363c9060 | ||
|
|
615698df7d | ||
|
|
7061fa5a56 |
||
|
|
8881930cdd |
||
|
|
52f6c0760b | ||
|
|
5b0602c7e9 |
||
|
|
182b2af7e5 | ||
|
|
baf32c9575 |
||
|
|
3dda1ee4f6 | ||
|
|
aa7ce9e97c |
||
|
|
8d62616ec0 | ||
|
|
bd6fe72c06 | ||
|
|
4c9e8ef625 |
||
|
|
3e711bc110 | ||
|
|
7fb66b4944 |
||
|
|
679ae8bcbb | ||
|
|
2a93ad0c84 |
||
|
|
f190032589 | ||
|
|
3a87bd1370 | ||
|
|
9302cd42f0 | ||
|
|
060ad0da32 | ||
|
|
a5ed1161c6 | ||
|
|
222674432b | ||
|
|
070a8ad110 | ||
|
|
770384cae1 | ||
|
|
a0f6bc5b7f | ||
|
|
88c734bbd9 | ||
|
|
d38509ef4b | ||
|
|
39b37833c5 | ||
|
|
a2c1de646b | ||
|
|
15847a636a | ||
|
|
123d3e1f04 | ||
|
|
a6e4b96ca9 | ||
|
|
b442b0e35e | ||
|
|
0c3b198b22 | ||
|
|
33c2086d9e | ||
|
|
5ad1e55ccf | ||
|
|
1779fd40c0 | ||
|
|
ff093ddbb8 |
||
|
|
90e3c2af91 |
||
|
|
b47706809c | ||
|
|
126e0f47a3 | ||
|
|
738bb2f09c | ||
|
|
7dd7cb5759 |
||
|
|
8b663d8c5b | ||
|
|
c051db8204 | ||
|
|
50669b3e76 |
||
|
|
e5838b4837 | ||
|
|
87dfaf2eb9 |
||
|
|
554437254e |
||
|
|
afad62939e |
||
|
|
8bfc16ba7d |
||
|
|
ecf641d88c | ||
|
|
75cd14926d | ||
|
|
e1dc84e123 |
||
|
|
85f580cbde |
||
|
|
0d3e285d13 |
||
|
|
25c196f34d |
||
|
|
4eba32f29f |
||
|
|
32f1786f9f |
||
|
|
01a0bd5410 |
||
|
|
c0eeb0b0f3 |
||
|
|
51d11b4b26 |
||
|
|
f7cd4eb600 |
||
|
|
95eb8808e8 | ||
|
|
e0a4fc097a |
||
|
|
73551e9a2d |
||
|
|
80f81fa6f3 | ||
|
|
f267609343 |
||
|
|
cdde0f19ee | ||
|
|
74949c69cb |
||
|
|
7e0107c47d | ||
|
|
3844110cd0 |
||
|
|
961b4f9af3 |
||
|
|
5225a81dee |
||
|
|
e835196940 | ||
|
|
ba33bb31f1 |
||
|
|
30abf7e086 | ||
|
|
84018be862 |
||
|
|
091e693670 |
||
|
|
fe8a7819fa |
||
|
|
ce69dc302c |
||
|
|
26310f3242 | ||
|
|
65853a4863 | ||
|
|
783b586de9 |
||
|
|
3eab639c14 |
||
|
|
3165ab926c |
||
|
|
dc0b78cdb8 |
||
|
|
693b89b94b | ||
|
|
cf344d73d5 | ||
|
|
0038ca8a78 |
||
|
|
1a0bffae34 | ||
|
|
b55f52a9b7 |
||
|
|
e8f9718ccd |
||
|
|
fd2e19bf1b | ||
|
|
8cf3d24875 |
||
|
|
a68c37555d |
||
|
|
1e42808a59 |
||
|
|
8dff278b72 |
||
|
|
a80ce6ab5a | ||
|
|
990205dc3b |
||
|
|
7c86ff6c37 |
||
|
|
62b01d8705 | ||
|
|
422d45b659 |
||
|
|
a7dddebedd |
||
|
|
8b35a946d9 |
221 changed files with 10447 additions and 11955 deletions
|
|
@ -1,3 +0,0 @@
|
||||||
[target.x86_64-unknown-linux-gnu]
|
|
||||||
linker = "clang"
|
|
||||||
rustflags = ["-C", "link-arg=-fuse-ld=mold"]
|
|
||||||
|
|
@ -1,3 +1,6 @@
|
||||||
|
labels:
|
||||||
|
nix: "enabled"
|
||||||
|
|
||||||
when:
|
when:
|
||||||
event:
|
event:
|
||||||
- push
|
- push
|
||||||
|
|
@ -9,37 +12,33 @@ when:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: check formatting
|
- name: check formatting
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --attr devShell --run "cargo fmt -- --check"
|
- nix-build -j4 --attr flakePackages.fmt
|
||||||
|
|
||||||
- name: build
|
- name: build
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
- nix-build -j4 --attr flakePackages.dev
|
||||||
|
|
||||||
- name: unit + func tests
|
- name: unit + func tests (lmdb)
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
environment:
|
|
||||||
GARAGE_TEST_INTEGRATION_EXE: result-bin/bin/garage
|
|
||||||
GARAGE_TEST_INTEGRATION_PATH: tmp-garage-integration
|
|
||||||
commands:
|
commands:
|
||||||
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
- nix-build -j4 --attr flakePackages.tests-lmdb
|
||||||
- nix-build --no-build-output --attr test.amd64
|
|
||||||
- ./result/bin/garage_db-*
|
- name: unit + func tests (sqlite)
|
||||||
- ./result/bin/garage_api-*
|
image: nixpkgs/nix:nixos-24.05
|
||||||
- ./result/bin/garage_model-*
|
commands:
|
||||||
- ./result/bin/garage_rpc-*
|
- nix-build -j4 --attr flakePackages.tests-sqlite
|
||||||
- ./result/bin/garage_table-*
|
|
||||||
- ./result/bin/garage_util-*
|
- name: unit + func tests (fjall)
|
||||||
- ./result/bin/garage_web-*
|
image: nixpkgs/nix:nixos-24.05
|
||||||
- ./result/bin/garage-*
|
commands:
|
||||||
- ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
|
- nix-build -j4 --attr flakePackages.tests-fjall
|
||||||
- rm result
|
|
||||||
- rm -rv tmp-garage-integration
|
|
||||||
|
|
||||||
- name: integration tests
|
- name: integration tests
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
- nix-build -j4 --attr flakePackages.dev
|
||||||
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
||||||
|
depends_on: [ build ]
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,6 @@
|
||||||
|
labels:
|
||||||
|
nix: "enabled"
|
||||||
|
|
||||||
when:
|
when:
|
||||||
event:
|
event:
|
||||||
- deployment
|
- deployment
|
||||||
|
|
@ -8,20 +11,21 @@ depends_on:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: refresh-index
|
- name: refresh-index
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
secrets:
|
environment:
|
||||||
- source: garagehq_aws_access_key_id
|
AWS_ACCESS_KEY_ID:
|
||||||
target: AWS_ACCESS_KEY_ID
|
from_secret: garagehq_aws_access_key_id
|
||||||
- source: garagehq_aws_secret_access_key
|
AWS_SECRET_ACCESS_KEY:
|
||||||
target: AWS_SECRET_ACCESS_KEY
|
from_secret: garagehq_aws_secret_access_key
|
||||||
commands:
|
commands:
|
||||||
- mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf
|
- mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf
|
||||||
- nix-shell --attr ci --run "refresh_index"
|
- nix-shell --attr ci --run "refresh_index"
|
||||||
|
|
||||||
- name: multiarch-docker
|
- name: multiarch-docker
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
secrets:
|
environment:
|
||||||
- docker_auth
|
DOCKER_AUTH:
|
||||||
|
from_secret: docker_auth
|
||||||
commands:
|
commands:
|
||||||
- mkdir -p /root/.docker
|
- mkdir -p /root/.docker
|
||||||
- echo $DOCKER_AUTH > /root/.docker/config.json
|
- echo $DOCKER_AUTH > /root/.docker/config.json
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,6 @@
|
||||||
|
labels:
|
||||||
|
nix: "enabled"
|
||||||
|
|
||||||
when:
|
when:
|
||||||
event:
|
event:
|
||||||
- deployment
|
- deployment
|
||||||
|
|
@ -16,18 +19,17 @@ matrix:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: build
|
- name: build
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-build --no-build-output --attr pkgs.${ARCH}.release --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
- nix-build --attr releasePackages.${ARCH} --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
|
|
||||||
- name: check is static binary
|
- name: check is static binary
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-build --no-build-output --attr pkgs.${ARCH}.release --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
- nix-shell --attr ci --run "./script/not-dynamic.sh result/bin/garage"
|
||||||
- nix-shell --attr ci --run "./script/not-dynamic.sh result-bin/bin/garage"
|
|
||||||
|
|
||||||
- name: integration tests
|
- name: integration tests
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
||||||
when:
|
when:
|
||||||
|
|
@ -37,7 +39,7 @@ steps:
|
||||||
ARCH: i386
|
ARCH: i386
|
||||||
|
|
||||||
- name: upgrade tests
|
- name: upgrade tests
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
|
- nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
|
||||||
when:
|
when:
|
||||||
|
|
@ -45,24 +47,23 @@ steps:
|
||||||
ARCH: amd64
|
ARCH: amd64
|
||||||
|
|
||||||
- name: push static binary
|
- name: push static binary
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
environment:
|
environment:
|
||||||
TARGET: "${TARGET}"
|
TARGET: "${TARGET}"
|
||||||
secrets:
|
AWS_ACCESS_KEY_ID:
|
||||||
- source: garagehq_aws_access_key_id
|
from_secret: garagehq_aws_access_key_id
|
||||||
target: AWS_ACCESS_KEY_ID
|
AWS_SECRET_ACCESS_KEY:
|
||||||
- source: garagehq_aws_secret_access_key
|
from_secret: garagehq_aws_secret_access_key
|
||||||
target: AWS_SECRET_ACCESS_KEY
|
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --attr ci --run "to_s3"
|
- nix-shell --attr ci --run "to_s3"
|
||||||
|
|
||||||
- name: docker build and publish
|
- name: docker build and publish
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
environment:
|
environment:
|
||||||
DOCKER_PLATFORM: "linux/${ARCH}"
|
DOCKER_PLATFORM: "linux/${ARCH}"
|
||||||
CONTAINER_NAME: "dxflrs/${ARCH}_garage"
|
CONTAINER_NAME: "dxflrs/${ARCH}_garage"
|
||||||
secrets:
|
DOCKER_AUTH:
|
||||||
- docker_auth
|
from_secret: docker_auth
|
||||||
commands:
|
commands:
|
||||||
- mkdir -p /root/.docker
|
- mkdir -p /root/.docker
|
||||||
- echo $DOCKER_AUTH > /root/.docker/config.json
|
- echo $DOCKER_AUTH > /root/.docker/config.json
|
||||||
|
|
|
||||||
2972
Cargo.lock
generated
2972
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
68
Cargo.toml
68
Cargo.toml
|
|
@ -8,7 +8,10 @@ members = [
|
||||||
"src/table",
|
"src/table",
|
||||||
"src/block",
|
"src/block",
|
||||||
"src/model",
|
"src/model",
|
||||||
"src/api",
|
"src/api/common",
|
||||||
|
"src/api/s3",
|
||||||
|
"src/api/k2v",
|
||||||
|
"src/api/admin",
|
||||||
"src/web",
|
"src/web",
|
||||||
"src/garage",
|
"src/garage",
|
||||||
"src/k2v-client",
|
"src/k2v-client",
|
||||||
|
|
@ -21,15 +24,18 @@ default-members = ["src/garage"]
|
||||||
|
|
||||||
# Internal Garage crates
|
# Internal Garage crates
|
||||||
format_table = { version = "0.1.1", path = "src/format-table" }
|
format_table = { version = "0.1.1", path = "src/format-table" }
|
||||||
garage_api = { version = "0.10.0", path = "src/api" }
|
garage_api_common = { version = "1.3.1", path = "src/api/common" }
|
||||||
garage_block = { version = "0.10.0", path = "src/block" }
|
garage_api_admin = { version = "1.3.1", path = "src/api/admin" }
|
||||||
garage_db = { version = "0.10.0", path = "src/db", default-features = false }
|
garage_api_s3 = { version = "1.3.1", path = "src/api/s3" }
|
||||||
garage_model = { version = "0.10.0", path = "src/model", default-features = false }
|
garage_api_k2v = { version = "1.3.1", path = "src/api/k2v" }
|
||||||
garage_net = { version = "0.10.0", path = "src/net" }
|
garage_block = { version = "1.3.1", path = "src/block" }
|
||||||
garage_rpc = { version = "0.10.0", path = "src/rpc" }
|
garage_db = { version = "1.3.1", path = "src/db", default-features = false }
|
||||||
garage_table = { version = "0.10.0", path = "src/table" }
|
garage_model = { version = "1.3.1", path = "src/model", default-features = false }
|
||||||
garage_util = { version = "0.10.0", path = "src/util" }
|
garage_net = { version = "1.3.1", path = "src/net" }
|
||||||
garage_web = { version = "0.10.0", path = "src/web" }
|
garage_rpc = { version = "1.3.1", path = "src/rpc" }
|
||||||
|
garage_table = { version = "1.3.1", path = "src/table" }
|
||||||
|
garage_util = { version = "1.3.1", path = "src/util" }
|
||||||
|
garage_web = { version = "1.3.1", path = "src/web" }
|
||||||
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
||||||
|
|
||||||
# External crates from crates.io
|
# External crates from crates.io
|
||||||
|
|
@ -43,25 +49,27 @@ bytes = "1.0"
|
||||||
bytesize = "1.1"
|
bytesize = "1.1"
|
||||||
cfg-if = "1.0"
|
cfg-if = "1.0"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
|
crc32fast = "1.4"
|
||||||
|
crc32c = "0.6"
|
||||||
crypto-common = "0.1"
|
crypto-common = "0.1"
|
||||||
digest = "0.10"
|
|
||||||
err-derive = "0.3"
|
|
||||||
gethostname = "0.4"
|
gethostname = "0.4"
|
||||||
git-version = "0.3.4"
|
git-version = "0.3.4"
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
hexdump = "0.1"
|
hexdump = "0.1"
|
||||||
hmac = "0.12"
|
hmac = "0.12"
|
||||||
idna = "0.5"
|
|
||||||
itertools = "0.12"
|
itertools = "0.12"
|
||||||
|
ipnet = "2.9.0"
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
md-5 = "0.10"
|
md-5 = "0.10"
|
||||||
mktemp = "0.5"
|
mktemp = "0.5"
|
||||||
nix = { version = "0.27", default-features = false, features = ["fs"] }
|
nix = { version = "0.29", default-features = false, features = ["fs"] }
|
||||||
nom = "7.1"
|
nom = "7.1"
|
||||||
|
parking_lot = "0.12"
|
||||||
parse_duration = "2.1"
|
parse_duration = "2.1"
|
||||||
pin-project = "1.0.12"
|
pin-project = "1.0.12"
|
||||||
pnet_datalink = "0.34"
|
pnet_datalink = "0.34"
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
|
sha1 = "0.10"
|
||||||
sha2 = "0.10"
|
sha2 = "0.10"
|
||||||
timeago = { version = "0.4", default-features = false }
|
timeago = { version = "0.4", default-features = false }
|
||||||
xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] }
|
xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] }
|
||||||
|
|
@ -73,11 +81,16 @@ kuska-handshake = { version = "0.2.0", features = ["default", "async_std"] }
|
||||||
clap = { version = "4.1", features = ["derive", "env"] }
|
clap = { version = "4.1", features = ["derive", "env"] }
|
||||||
pretty_env_logger = "0.5"
|
pretty_env_logger = "0.5"
|
||||||
structopt = { version = "0.3", default-features = false }
|
structopt = { version = "0.3", default-features = false }
|
||||||
|
syslog-tracing = "0.3"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
|
tracing-journald = "0.3.1"
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
|
|
||||||
heed = { version = "0.11", default-features = false, features = ["lmdb"] }
|
heed = { version = "0.11", default-features = false, features = ["lmdb"] }
|
||||||
rusqlite = "0.30.0"
|
rusqlite = "0.37"
|
||||||
|
r2d2 = "0.8"
|
||||||
|
r2d2_sqlite = "0.31"
|
||||||
|
fjall = "2.4"
|
||||||
|
|
||||||
async-compression = { version = "0.4", features = ["tokio", "zstd"] }
|
async-compression = { version = "0.4", features = ["tokio", "zstd"] }
|
||||||
zstd = { version = "0.13", default-features = false }
|
zstd = { version = "0.13", default-features = false }
|
||||||
|
|
@ -120,26 +133,21 @@ opentelemetry-contrib = "0.9"
|
||||||
prometheus = "0.13"
|
prometheus = "0.13"
|
||||||
|
|
||||||
# used by the k2v-client crate only
|
# used by the k2v-client crate only
|
||||||
aws-sigv4 = { version = "1.1" }
|
aws-sigv4 = { version = "1.1", default-features = false }
|
||||||
hyper-rustls = { version = "0.26", features = ["http2"] }
|
hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "http2", "ring", "rustls-native-certs"] }
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
thiserror = "1.0"
|
thiserror = "2.0"
|
||||||
|
|
||||||
# ---- used only as build / dev dependencies ----
|
# ---- used only as build / dev dependencies ----
|
||||||
assert-json-diff = "2.0"
|
assert-json-diff = "2.0"
|
||||||
rustc_version = "0.4.0"
|
rustc_version = "0.4.0"
|
||||||
static_init = "1.0"
|
static_init = "1.0"
|
||||||
|
aws-smithy-runtime = { version = "1.8", default-features = false, features = ["tls-rustls"] }
|
||||||
aws-config = "1.1.4"
|
aws-sdk-config = { version = "1.62", default-features = false }
|
||||||
aws-sdk-config = "1.13"
|
aws-sdk-s3 = { version = "1.79", default-features = false, features = ["rt-tokio"] }
|
||||||
aws-sdk-s3 = "1.14"
|
|
||||||
|
|
||||||
[profile.dev]
|
|
||||||
#lto = "thin" # disabled for now, adds 2-4 min to each CI build
|
|
||||||
lto = "off"
|
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
lto = true
|
lto = "thin"
|
||||||
codegen-units = 1
|
codegen-units = 16
|
||||||
opt-level = "s"
|
opt-level = 3
|
||||||
strip = true
|
strip = "debuginfo"
|
||||||
|
|
|
||||||
|
|
@ -3,5 +3,5 @@ FROM scratch
|
||||||
ENV RUST_BACKTRACE=1
|
ENV RUST_BACKTRACE=1
|
||||||
ENV RUST_LOG=garage=info
|
ENV RUST_LOG=garage=info
|
||||||
|
|
||||||
COPY result-bin/bin/garage /
|
COPY result/bin/garage /
|
||||||
CMD [ "/garage", "server"]
|
CMD [ "/garage", "server"]
|
||||||
|
|
|
||||||
11
Makefile
11
Makefile
|
|
@ -1,13 +1,8 @@
|
||||||
.PHONY: doc all release shell run1 run2 run3
|
.PHONY: doc all run1 run2 run3
|
||||||
|
|
||||||
all:
|
all:
|
||||||
clear; cargo build
|
clear
|
||||||
|
cargo build
|
||||||
release:
|
|
||||||
nix-build --attr pkgs.amd64.release --no-build-output
|
|
||||||
|
|
||||||
shell:
|
|
||||||
nix-shell
|
|
||||||
|
|
||||||
# ----
|
# ----
|
||||||
|
|
||||||
|
|
|
||||||
57
default.nix
57
default.nix
|
|
@ -3,53 +3,22 @@
|
||||||
with import ./nix/common.nix;
|
with import ./nix/common.nix;
|
||||||
|
|
||||||
let
|
let
|
||||||
pkgs = import pkgsSrc { };
|
pkgs = import nixpkgs { };
|
||||||
compile = import ./nix/compile.nix;
|
compile = import ./nix/compile.nix;
|
||||||
|
|
||||||
build_debug_and_release = (target: {
|
build_release = target: (compile {
|
||||||
debug = (compile {
|
inherit target system git_version nixpkgs;
|
||||||
inherit system target git_version pkgsSrc cargo2nixOverlay;
|
crane = flake.inputs.crane;
|
||||||
release = false;
|
rust-overlay = flake.inputs.rust-overlay;
|
||||||
}).workspace.garage { compileMode = "build"; };
|
release = true;
|
||||||
|
}).garage;
|
||||||
release = (compile {
|
|
||||||
inherit system target git_version pkgsSrc cargo2nixOverlay;
|
|
||||||
release = true;
|
|
||||||
}).workspace.garage { compileMode = "build"; };
|
|
||||||
});
|
|
||||||
|
|
||||||
test = (rustPkgs:
|
|
||||||
pkgs.symlinkJoin {
|
|
||||||
name = "garage-tests";
|
|
||||||
paths =
|
|
||||||
builtins.map (key: rustPkgs.workspace.${key} { compileMode = "test"; })
|
|
||||||
(builtins.attrNames rustPkgs.workspace);
|
|
||||||
});
|
|
||||||
|
|
||||||
in {
|
in {
|
||||||
pkgs = {
|
releasePackages = {
|
||||||
amd64 = build_debug_and_release "x86_64-unknown-linux-musl";
|
amd64 = build_release "x86_64-unknown-linux-musl";
|
||||||
i386 = build_debug_and_release "i686-unknown-linux-musl";
|
i386 = build_release "i686-unknown-linux-musl";
|
||||||
arm64 = build_debug_and_release "aarch64-unknown-linux-musl";
|
arm64 = build_release "aarch64-unknown-linux-musl";
|
||||||
arm = build_debug_and_release "armv6l-unknown-linux-musleabihf";
|
arm = build_release "armv6l-unknown-linux-musleabihf";
|
||||||
};
|
|
||||||
test = {
|
|
||||||
amd64 = test (compile {
|
|
||||||
inherit system git_version pkgsSrc cargo2nixOverlay;
|
|
||||||
target = "x86_64-unknown-linux-musl";
|
|
||||||
features = [
|
|
||||||
"garage/bundled-libs"
|
|
||||||
"garage/k2v"
|
|
||||||
"garage/lmdb"
|
|
||||||
"garage/sqlite"
|
|
||||||
];
|
|
||||||
});
|
|
||||||
};
|
|
||||||
clippy = {
|
|
||||||
amd64 = (compile {
|
|
||||||
inherit system git_version pkgsSrc cargo2nixOverlay;
|
|
||||||
target = "x86_64-unknown-linux-musl";
|
|
||||||
compiler = "clippy";
|
|
||||||
}).workspace.garage { compileMode = "build"; };
|
|
||||||
};
|
};
|
||||||
|
flakePackages = flake.packages.${system};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -687,7 +687,7 @@ paths:
|
||||||
operationId: "GetBucketInfo"
|
operationId: "GetBucketInfo"
|
||||||
summary: "Get a bucket"
|
summary: "Get a bucket"
|
||||||
description: |
|
description: |
|
||||||
Given a bucket identifier (`id`) or a global alias (`alias`), get its information.
|
Given a bucket identifier (`id`) or a global alias (`globalAlias`), get its information.
|
||||||
It includes its aliases, its web configuration, keys that have some permissions
|
It includes its aliases, its web configuration, keys that have some permissions
|
||||||
on it, some statistics (number of objects, size), number of dangling multipart uploads,
|
on it, some statistics (number of objects, size), number of dangling multipart uploads,
|
||||||
and its quotas (if any).
|
and its quotas (if any).
|
||||||
|
|
@ -701,7 +701,7 @@ paths:
|
||||||
example: "b4018dc61b27ccb5c64ec1b24f53454bbbd180697c758c4d47a22a8921864a87"
|
example: "b4018dc61b27ccb5c64ec1b24f53454bbbd180697c758c4d47a22a8921864a87"
|
||||||
schema:
|
schema:
|
||||||
type: string
|
type: string
|
||||||
- name: alias
|
- name: globalAlias
|
||||||
in: query
|
in: query
|
||||||
description: |
|
description: |
|
||||||
The exact global alias of one of the existing buckets.
|
The exact global alias of one of the existing buckets.
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ client = minio.Minio(
|
||||||
"GKyourapikey",
|
"GKyourapikey",
|
||||||
"abcd[...]1234",
|
"abcd[...]1234",
|
||||||
# Force the region, this is specific to garage
|
# Force the region, this is specific to garage
|
||||||
region="region",
|
region="garage",
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@ In this section, we cover the following web applications:
|
||||||
| [Mastodon](#mastodon) | ✅ | Natively supported |
|
| [Mastodon](#mastodon) | ✅ | Natively supported |
|
||||||
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
|
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
|
||||||
| [ejabberd](#ejabberd) | ✅ | `mod_s3_upload` |
|
| [ejabberd](#ejabberd) | ✅ | `mod_s3_upload` |
|
||||||
| [Pixelfed](#pixelfed) | ❓ | Not yet tested |
|
| [Pixelfed](#pixelfed) | ✅ | Natively supported |
|
||||||
| [Pleroma](#pleroma) | ❓ | Not yet tested |
|
| [Pleroma](#pleroma) | ❓ | Not yet tested |
|
||||||
| [Lemmy](#lemmy) | ✅ | Supported with pict-rs |
|
| [Lemmy](#lemmy) | ✅ | Supported with pict-rs |
|
||||||
| [Funkwhale](#funkwhale) | ❓ | Not yet tested |
|
| [Funkwhale](#funkwhale) | ❓ | Not yet tested |
|
||||||
|
|
@ -69,7 +69,7 @@ $CONFIG = array(
|
||||||
'hostname' => '127.0.0.1', // Can also be a domain name, eg. garage.example.com
|
'hostname' => '127.0.0.1', // Can also be a domain name, eg. garage.example.com
|
||||||
'port' => 3900, // Put your reverse proxy port or your S3 API port
|
'port' => 3900, // Put your reverse proxy port or your S3 API port
|
||||||
'use_ssl' => false, // Set it to true if you have a TLS enabled reverse proxy
|
'use_ssl' => false, // Set it to true if you have a TLS enabled reverse proxy
|
||||||
'region' => 'garage', // Garage has only one region named "garage"
|
'region' => 'garage', // Garage default region is named "garage", edit according to your cluster config
|
||||||
'use_path_style' => true // Garage supports only path style, must be set to true
|
'use_path_style' => true // Garage supports only path style, must be set to true
|
||||||
],
|
],
|
||||||
],
|
],
|
||||||
|
|
@ -135,7 +135,7 @@ bucket but doesn't also know the secret encryption key.
|
||||||
*Click on the picture to zoom*
|
*Click on the picture to zoom*
|
||||||
|
|
||||||
Add a new external storage. Put what you want in "folder name" (eg. "shared"). Select "Amazon S3". Keep "Access Key" for the Authentication field.
|
Add a new external storage. Put what you want in "folder name" (eg. "shared"). Select "Amazon S3". Keep "Access Key" for the Authentication field.
|
||||||
In Configuration, put your bucket name (eg. nextcloud), the host (eg. 127.0.0.1), the port (eg. 3900 or 443), the region (garage). Tick the SSL box if you have put an HTTPS proxy in front of garage. You must tick the "Path access" box and you must leave the "Legacy authentication (v2)" box empty. Put your Key ID (eg. GK...) and your Secret Key in the last two input boxes. Finally click on the tick symbol on the right of your screen.
|
In Configuration, put your bucket name (eg. nextcloud), the host (eg. 127.0.0.1), the port (eg. 3900 or 443), the region ("garage" if you use the default, or the one your configured in your `garage.toml`). Tick the SSL box if you have put an HTTPS proxy in front of garage. You must tick the "Path access" box and you must leave the "Legacy authentication (v2)" box empty. Put your Key ID (eg. GK...) and your Secret Key in the last two input boxes. Finally click on the tick symbol on the right of your screen.
|
||||||
|
|
||||||
Now go to your "Files" app and a new "linked folder" has appeared with the name you chose earlier (eg. "shared").
|
Now go to your "Files" app and a new "linked folder" has appeared with the name you chose earlier (eg. "shared").
|
||||||
|
|
||||||
|
|
@ -191,10 +191,10 @@ garage key create peertube-key
|
||||||
|
|
||||||
Keep the Key ID and the Secret key in a pad, they will be needed later.
|
Keep the Key ID and the Secret key in a pad, they will be needed later.
|
||||||
|
|
||||||
We need two buckets, one for normal videos (named peertube-video) and one for webtorrent videos (named peertube-playlist).
|
We need two buckets, one for normal videos (named peertube-videos) and one for webtorrent videos (named peertube-playlists).
|
||||||
```bash
|
```bash
|
||||||
garage bucket create peertube-videos
|
garage bucket create peertube-videos
|
||||||
garage bucket create peertube-playlist
|
garage bucket create peertube-playlists
|
||||||
```
|
```
|
||||||
|
|
||||||
Now we allow our key to read and write on these buckets:
|
Now we allow our key to read and write on these buckets:
|
||||||
|
|
@ -238,7 +238,7 @@ object_storage:
|
||||||
# Put localhost only if you have a garage instance running on that node
|
# Put localhost only if you have a garage instance running on that node
|
||||||
endpoint: 'http://localhost:3900' # or "garage.example.com" if you have TLS on port 443
|
endpoint: 'http://localhost:3900' # or "garage.example.com" if you have TLS on port 443
|
||||||
|
|
||||||
# Garage supports only one region for now, named garage
|
# Garage default region is named "garage", edit according to your config
|
||||||
region: 'garage'
|
region: 'garage'
|
||||||
|
|
||||||
credentials:
|
credentials:
|
||||||
|
|
@ -253,7 +253,7 @@ object_storage:
|
||||||
proxify_private_files: false
|
proxify_private_files: false
|
||||||
|
|
||||||
streaming_playlists:
|
streaming_playlists:
|
||||||
bucket_name: 'peertube-playlist'
|
bucket_name: 'peertube-playlists'
|
||||||
|
|
||||||
# Keep it empty for our example
|
# Keep it empty for our example
|
||||||
prefix: ''
|
prefix: ''
|
||||||
|
|
@ -335,6 +335,7 @@ From the [official Mastodon documentation](https://docs.joinmastodon.org/admin/t
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ RAILS_ENV=production bin/tootctl media remove --days 3
|
$ RAILS_ENV=production bin/tootctl media remove --days 3
|
||||||
|
$ RAILS_ENV=production bin/tootctl media remove --days 15 --prune-profiles
|
||||||
$ RAILS_ENV=production bin/tootctl media remove-orphans
|
$ RAILS_ENV=production bin/tootctl media remove-orphans
|
||||||
$ RAILS_ENV=production bin/tootctl preview_cards remove --days 15
|
$ RAILS_ENV=production bin/tootctl preview_cards remove --days 15
|
||||||
```
|
```
|
||||||
|
|
@ -353,8 +354,6 @@ Imports: 1.7 KB
|
||||||
Settings: 0 Bytes
|
Settings: 0 Bytes
|
||||||
```
|
```
|
||||||
|
|
||||||
Unfortunately, [old avatars and headers cannot currently be cleaned up](https://github.com/mastodon/mastodon/issues/9567).
|
|
||||||
|
|
||||||
### Migrating your data
|
### Migrating your data
|
||||||
|
|
||||||
Data migration should be done with an efficient S3 client.
|
Data migration should be done with an efficient S3 client.
|
||||||
|
|
@ -442,7 +441,7 @@ media_storage_providers:
|
||||||
store_synchronous: True # do we want to wait that the file has been written before returning?
|
store_synchronous: True # do we want to wait that the file has been written before returning?
|
||||||
config:
|
config:
|
||||||
bucket: matrix # the name of our bucket, we chose matrix earlier
|
bucket: matrix # the name of our bucket, we chose matrix earlier
|
||||||
region_name: garage # only "garage" is supported for the region field
|
region_name: garage # "garage" by default, edit according to your cluster config
|
||||||
endpoint_url: http://localhost:3900 # the path to the S3 endpoint
|
endpoint_url: http://localhost:3900 # the path to the S3 endpoint
|
||||||
access_key_id: "GKxxx" # your Key ID
|
access_key_id: "GKxxx" # your Key ID
|
||||||
secret_access_key: "xxxx" # your Secret Key
|
secret_access_key: "xxxx" # your Secret Key
|
||||||
|
|
|
||||||
|
|
@ -161,3 +161,49 @@ kopia repository validate-provider
|
||||||
|
|
||||||
You can then run all the standard kopia commands: `kopia snapshot create`, `kopia mount`...
|
You can then run all the standard kopia commands: `kopia snapshot create`, `kopia mount`...
|
||||||
Everything should work out-of-the-box.
|
Everything should work out-of-the-box.
|
||||||
|
|
||||||
|
## Plakar
|
||||||
|
|
||||||
|
Create your key and bucket on Garage server:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
garage key create my-plakar-key
|
||||||
|
garage bucket create plakar-backups
|
||||||
|
garage bucket allow plakar-backups --read --write --key my-plakar-key
|
||||||
|
…
|
||||||
|
```
|
||||||
|
|
||||||
|
On Plakar server, add your Garage as a storage location:
|
||||||
|
```bash
|
||||||
|
plakar store add garageS3 s3://my-garage.tld/plakar-backups \
|
||||||
|
region=garage # Or as you've specified in garage.toml \
|
||||||
|
access_key=<Key ID from "garage key info my-plakar-key"> \
|
||||||
|
secret_access_key=<Secret key from "garage key info my-plakar-key">
|
||||||
|
```
|
||||||
|
|
||||||
|
Then create the repository.
|
||||||
|
```bash
|
||||||
|
plakar at @garageS3 create -plaintext # Unencrypted
|
||||||
|
# or
|
||||||
|
plakar at @garageS3 create #encrypted
|
||||||
|
```
|
||||||
|
|
||||||
|
If you encrypt your backups (Plakar default), you will need to define a strong passphrase. Do not forget to save your password safely. It will be needed to decrypt your backups.
|
||||||
|
|
||||||
|
|
||||||
|
After the repository has been created, check that everything works as expected (that might give an empty result as no file has been added yet, but no error message):
|
||||||
|
```bash
|
||||||
|
plakar at @garageS3 check
|
||||||
|
```
|
||||||
|
|
||||||
|
Now that everything is configure, you can use Garage as your backups storage. For instance sync it with a local backup storage:
|
||||||
|
```bash
|
||||||
|
$ plakar at ~/backups sync to @garageS3
|
||||||
|
```
|
||||||
|
|
||||||
|
Or list the S3 storage content:
|
||||||
|
```bash
|
||||||
|
$ plakar at @garageS3 ls
|
||||||
|
```
|
||||||
|
|
||||||
|
More information in Plakar documentation: https://www.plakar.io/docs/main/quickstart/
|
||||||
|
|
|
||||||
|
|
@ -259,7 +259,7 @@ duck --delete garage:/my-files/an-object.txt
|
||||||
|
|
||||||
## WinSCP (libs3) {#winscp}
|
## WinSCP (libs3) {#winscp}
|
||||||
|
|
||||||
*You can find instructions on how to use the GUI in french [in our wiki](https://wiki.deuxfleurs.fr/fr/Guide/Garage/WinSCP).*
|
*You can find instructions on how to use the GUI in french [in our wiki](https://guide.deuxfleurs.fr/prise_en_main/winscp/).*
|
||||||
|
|
||||||
How to use `winscp.com`, the CLI interface of WinSCP:
|
How to use `winscp.com`, the CLI interface of WinSCP:
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ Garage can also help you serve this content.
|
||||||
|
|
||||||
## Gitea
|
## Gitea
|
||||||
|
|
||||||
You can use Garage with Gitea to store your [git LFS](https://git-lfs.github.com/) data, your users' avatar, and their attachements.
|
You can use Garage with Gitea to store your [git LFS](https://git-lfs.github.com/) data, your users' avatar, and their attachments.
|
||||||
You can configure a different target for each data type (check `[lfs]` and `[attachment]` sections of the Gitea documentation) and you can provide a default one through the `[storage]` section.
|
You can configure a different target for each data type (check `[lfs]` and `[attachment]` sections of the Gitea documentation) and you can provide a default one through the `[storage]` section.
|
||||||
|
|
||||||
Let's start by creating a key and a bucket (your key id and secret will be needed later, keep them somewhere):
|
Let's start by creating a key and a bucket (your key id and secret will be needed later, keep them somewhere):
|
||||||
|
|
|
||||||
|
|
@ -8,18 +8,18 @@ have published Ansible roles. We list them and compare them below.
|
||||||
|
|
||||||
## Comparison of Ansible roles
|
## Comparison of Ansible roles
|
||||||
|
|
||||||
| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) |
|
| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | [eddster ansible-role-garage](#eddster-ansible-role-garage) |
|
||||||
|------------------------------------|---------------------------------------------|---------------------------------------------------------------|
|
|------------------------------------|---------------------------------------------|---------------------------------------------------------------|---------------------------------|
|
||||||
| **Runtime** | Systemd | Docker |
|
| **Runtime** | Systemd | Docker | Systemd |
|
||||||
| **Target OS** | Any Linux | Any Linux |
|
| **Target OS** | Any Linux | Any Linux | Any Linux |
|
||||||
| **Architecture** | amd64, arm64, i686 | amd64, arm64 |
|
| **Architecture** | amd64, arm64, i686 | amd64, arm64 | arm64, arm, 386, amd64 |
|
||||||
| **Additional software** | None | Traefik |
|
| **Additional software** | None | Traefik | Ngnix and Keepalived (optional) |
|
||||||
| **Automatic node connection** | ❌ | ✅ |
|
| **Automatic node connection** | ❌ | ✅ | ✅ |
|
||||||
| **Layout management** | ❌ | ✅ |
|
| **Layout management** | ❌ | ✅ | ✅ |
|
||||||
| **Manage buckets & keys** | ❌ | ✅ (basic) |
|
| **Manage buckets & keys** | ❌ | ✅ (basic) | ✅ |
|
||||||
| **Allow custom Garage config** | ✅ | ❌ |
|
| **Allow custom Garage config** | ✅ | ❌ | ❌ |
|
||||||
| **Facilitate Garage upgrades** | ✅ | ❌ |
|
| **Facilitate Garage upgrades** | ✅ | ❌ | ✅ |
|
||||||
| **Multiple instances on one host** | ✅ | ✅ |
|
| **Multiple instances on one host** | ✅ | ✅ | ❌ |
|
||||||
|
|
||||||
|
|
||||||
## zorun/ansible-role-garage
|
## zorun/ansible-role-garage
|
||||||
|
|
@ -49,3 +49,15 @@ structured DNS names, etc).
|
||||||
|
|
||||||
As a result, this role makes it easier to start with Garage on Ansible,
|
As a result, this role makes it easier to start with Garage on Ansible,
|
||||||
but is less flexible.
|
but is less flexible.
|
||||||
|
|
||||||
|
## eddster2309/ansible-role-garage
|
||||||
|
|
||||||
|
[Source code](https://github.com/eddster2309/ansible-role-garage), [Ansible galaxy](https://galaxy.ansible.com/ui/standalone/roles/eddster2309/garage/)
|
||||||
|
|
||||||
|
This role is a opinionated but customisable role using the official Garage
|
||||||
|
static binaries and only requires Systemd. As such it should work on any
|
||||||
|
Linux based host. It includes all the nesscary configuration to
|
||||||
|
automatically setup a clustered Garage deployment. Most Garage
|
||||||
|
configuration options are exposed through Ansible variables so while you
|
||||||
|
can't provide a custom config you can get very close. It can optionally
|
||||||
|
installed a HA nginx deployment with Keepalived.
|
||||||
|
|
|
||||||
|
|
@ -15,9 +15,10 @@ Alpine Linux repositories (available since v3.17):
|
||||||
apk add garage
|
apk add garage
|
||||||
```
|
```
|
||||||
|
|
||||||
The default configuration file is installed to `/etc/garage.toml`. You can run
|
The default configuration file is installed to `/etc/garage/garage.toml`. You can run
|
||||||
Garage using: `rc-service garage start`. If you don't specify `rpc_secret`, it
|
Garage using: `rc-service garage start`.
|
||||||
will be automatically replaced with a random string on the first start.
|
|
||||||
|
If you don't specify `rpc_secret`, it will be automatically replaced with a random string on the first start.
|
||||||
|
|
||||||
Please note that this package is built without Consul discovery, Kubernetes
|
Please note that this package is built without Consul discovery, Kubernetes
|
||||||
discovery, OpenTelemetry exporter, and K2V features (K2V will be enabled once
|
discovery, OpenTelemetry exporter, and K2V features (K2V will be enabled once
|
||||||
|
|
@ -26,7 +27,7 @@ it's stable).
|
||||||
|
|
||||||
## Arch Linux
|
## Arch Linux
|
||||||
|
|
||||||
Garage is available in the [AUR](https://aur.archlinux.org/packages/garage).
|
Garage is available in the official repositories under [extra](https://archlinux.org/packages/extra/x86_64/garage).
|
||||||
|
|
||||||
## FreeBSD
|
## FreeBSD
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -53,20 +53,43 @@ and that's also why your nodes have super long identifiers.
|
||||||
|
|
||||||
Adding TLS support built into Garage is not currently planned.
|
Adding TLS support built into Garage is not currently planned.
|
||||||
|
|
||||||
## Garage stores data in plain text on the filesystem
|
## Garage stores data in plain text on the filesystem or encrypted using customer keys (SSE-C)
|
||||||
|
|
||||||
Garage does not handle data encryption at rest by itself, and instead delegates
|
For standard S3 API requests, Garage does not encrypt data at rest by itself.
|
||||||
to the user to add encryption, either at the storage layer (LUKS, etc) or on
|
For the most generic at rest encryption of data, we recommend setting up your
|
||||||
the client side (or both). There are no current plans to add data encryption
|
storage partitions on encrypted LUKS devices.
|
||||||
directly in Garage.
|
|
||||||
|
|
||||||
Implementing data encryption directly in Garage might make things simpler for
|
If you are developping your own client software that makes use of S3 storage,
|
||||||
end users, but also raises many more questions, especially around key
|
we recommend implementing data encryption directly on the client side and never
|
||||||
management: for encryption of data, where could Garage get the encryption keys
|
transmitting plaintext data to Garage. This makes it easy to use an external
|
||||||
from ? If we encrypt data but keep the keys in a plaintext file next to them,
|
untrusted storage provider if necessary.
|
||||||
it's useless. We probably don't want to have to manage secrets in garage as it
|
|
||||||
would be very hard to do in a secure way. Maybe integrate with an external
|
Garage does support [SSE-C
|
||||||
system such as Hashicorp Vault?
|
encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html),
|
||||||
|
an encryption mode of Amazon S3 where data is encrypted at rest using
|
||||||
|
encryption keys given by the client. The encryption keys are passed to the
|
||||||
|
server in a header in each request, to encrypt or decrypt data at the moment of
|
||||||
|
reading or writing. The server discards the key as soon as it has finished
|
||||||
|
using it for the request. This mode allows the data to be encrypted at rest by
|
||||||
|
Garage itself, but it requires support in the client software. It is also not
|
||||||
|
adapted to a model where the server is not trusted or assumed to be
|
||||||
|
compromised, as the server can easily know the encryption keys. Note however
|
||||||
|
that when using SSE-C encryption, the only Garage node that knows the
|
||||||
|
encryption key passed in a given request is the node to which the request is
|
||||||
|
directed (which can be a gateway node), so it is easy to have untrusted nodes
|
||||||
|
in the cluster as long as S3 API requests containing SSE-C encryption keys are
|
||||||
|
not directed to them.
|
||||||
|
|
||||||
|
Implementing automatic data encryption directly in Garage without client-side
|
||||||
|
management of keys (something like
|
||||||
|
[SSE-S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingServerSideEncryption.html))
|
||||||
|
could make things simpler for end users that don't want to setup LUKS, but also
|
||||||
|
raises many more questions, especially around key management: for encryption of
|
||||||
|
data, where could Garage get the encryption keys from? If we encrypt data but
|
||||||
|
keep the keys in a plaintext file next to them, it's useless. We probably don't
|
||||||
|
want to have to manage secrets in Garage as it would be very hard to do in a
|
||||||
|
secure way. At the time of speaking, there are no plans to implement this in
|
||||||
|
Garage.
|
||||||
|
|
||||||
|
|
||||||
# Adding data encryption using external tools
|
# Adding data encryption using external tools
|
||||||
|
|
|
||||||
|
|
@ -90,5 +90,6 @@ The following feature flags are available in v0.8.0:
|
||||||
| `kubernetes-discovery` | optional | Enable automatic registration and discovery<br>of cluster nodes through the Kubernetes API |
|
| `kubernetes-discovery` | optional | Enable automatic registration and discovery<br>of cluster nodes through the Kubernetes API |
|
||||||
| `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API |
|
| `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API |
|
||||||
| `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry |
|
| `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry |
|
||||||
|
| `syslog` | optional | Enable logging to Syslog |
|
||||||
| `lmdb` | *by default* | Enable using LMDB to store Garage's metadata |
|
| `lmdb` | *by default* | Enable using LMDB to store Garage's metadata |
|
||||||
| `sqlite` | optional | Enable using Sqlite3 to store Garage's metadata |
|
| `sqlite` | *by default* | Enable using Sqlite3 to store Garage's metadata |
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@ Firstly clone the repository:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://git.deuxfleurs.fr/Deuxfleurs/garage
|
git clone https://git.deuxfleurs.fr/Deuxfleurs/garage
|
||||||
cd garage/scripts/helm
|
cd garage/script/helm
|
||||||
```
|
```
|
||||||
|
|
||||||
Deploy with default options:
|
Deploy with default options:
|
||||||
|
|
@ -26,6 +26,13 @@ Or deploy with custom values:
|
||||||
helm install --create-namespace --namespace garage garage ./garage -f values.override.yaml
|
helm install --create-namespace --namespace garage garage ./garage -f values.override.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you want to manage the CustomRessourceDefinition used by garage for its `kubernetes_discovery` outside of the helm chart, add `garage.kubernetesSkipCrd: true` to your custom values and use the kustomization before deploying the helm chart:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl apply -k ../k8s/crd
|
||||||
|
helm install --create-namespace --namespace garage garage ./garage -f values.override.yaml
|
||||||
|
```
|
||||||
|
|
||||||
After deploying, cluster layout must be configured manually as described in [Creating a cluster layout](@/documentation/quick-start/_index.md#creating-a-cluster-layout). Use the following command to access garage CLI:
|
After deploying, cluster layout must be configured manually as described in [Creating a cluster layout](@/documentation/quick-start/_index.md#creating-a-cluster-layout). Use the following command to access garage CLI:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|
@ -86,3 +93,62 @@ helm delete --namespace garage garage
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that this will leave behind custom CRD `garagenodes.deuxfleurs.fr`, which must be removed manually if desired.
|
Note that this will leave behind custom CRD `garagenodes.deuxfleurs.fr`, which must be removed manually if desired.
|
||||||
|
|
||||||
|
## Increase PVC size on running Garage instances
|
||||||
|
|
||||||
|
Since the Garage Helm chart creates the data and meta PVC based on `StatefulSet` templates, increasing the PVC size can be a bit tricky.
|
||||||
|
|
||||||
|
### Confirm the `StorageClass` used for Garage supports volume expansion
|
||||||
|
|
||||||
|
Confirm the storage class used for garage.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl -n garage get pvc
|
||||||
|
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
|
||||||
|
data-garage-0 Bound pvc-080360c9-8ce3-4acf-8579-1701e57b7f3f 30Gi RWO longhorn-local <unset> 77d
|
||||||
|
data-garage-1 Bound pvc-ab8ba697-6030-4fc7-ab3c-0d6df9e3dbc0 30Gi RWO longhorn-local <unset> 5d8h
|
||||||
|
data-garage-2 Bound pvc-3ab37551-0231-4604-986d-136d0fd950ec 30Gi RWO longhorn-local <unset> 5d5h
|
||||||
|
meta-garage-0 Bound pvc-3b457302-3023-4169-846e-c928c5f2ea65 3Gi RWO longhorn-local <unset> 77d
|
||||||
|
meta-garage-1 Bound pvc-49ace2b9-5c85-42df-9247-51c4cf64b460 3Gi RWO longhorn-local <unset> 5d8h
|
||||||
|
meta-garage-2 Bound pvc-99e2e50f-42b4-4128-ae2f-b52629259723 3Gi RWO longhorn-local <unset> 5d5h
|
||||||
|
```
|
||||||
|
|
||||||
|
In this case, the storage class is `longhorn-local`. Now, check if `ALLOWVOLUMEEXPANSION` is true for the used `StorageClass`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl get storageclasses.storage.k8s.io longhorn-local
|
||||||
|
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
|
||||||
|
longhorn-local driver.longhorn.io Delete Immediate true 103d
|
||||||
|
```
|
||||||
|
|
||||||
|
If your `StorageClass` does not support volume expansion, double check if you can enable it. Otherwise, your only real option is to spin up a new Garage cluster with increased size and migrate all data over.
|
||||||
|
|
||||||
|
If your `StorageClass` supports expansion, you are free to continue.
|
||||||
|
|
||||||
|
### Increase the size of the PVCs
|
||||||
|
|
||||||
|
Increase the size of all PVCs to your desired size.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl -n garage edit pvc data-garage-0
|
||||||
|
kubectl -n garage edit pvc data-garage-1
|
||||||
|
kubectl -n garage edit pvc data-garage-2
|
||||||
|
kubectl -n garage edit pvc meta-garage-0
|
||||||
|
kubectl -n garage edit pvc meta-garage-1
|
||||||
|
kubectl -n garage edit pvc meta-garage-2
|
||||||
|
```
|
||||||
|
|
||||||
|
### Increase the size of the `StatefulSet` PVC template
|
||||||
|
|
||||||
|
This is an optional step, but if not done, future instances of Garage will be created with the original size from the template.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl -n garage delete sts --cascade=orphan garage
|
||||||
|
statefulset.apps "garage" deleted
|
||||||
|
```
|
||||||
|
|
||||||
|
This will remove the Garage `StatefulSet` but leave the pods running. It may seem destructive but needs to be done this way since edits to the size of PVC templates are prohibited.
|
||||||
|
|
||||||
|
### Redeploy the `StatefulSet`
|
||||||
|
|
||||||
|
Now the size of future PVCs can be increased, and the Garage Helm chart can be upgraded. The new `StatefulSet` should take ownership of the orphaned pods again.
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ To run a real-world deployment, make sure the following conditions are met:
|
||||||
[Yggdrasil](https://yggdrasil-network.github.io/) are approaches to consider
|
[Yggdrasil](https://yggdrasil-network.github.io/) are approaches to consider
|
||||||
in addition to building out your own VPN tunneling.
|
in addition to building out your own VPN tunneling.
|
||||||
|
|
||||||
- This guide will assume you are using Docker containers to deploy Garage on each node.
|
- This guide will assume you are using Docker containers to deploy Garage on each node.
|
||||||
Garage can also be run independently, for instance as a [Systemd service](@/documentation/cookbook/systemd.md).
|
Garage can also be run independently, for instance as a [Systemd service](@/documentation/cookbook/systemd.md).
|
||||||
You can also use an orchestrator such as Nomad or Kubernetes to automatically manage
|
You can also use an orchestrator such as Nomad or Kubernetes to automatically manage
|
||||||
Docker containers on a fleet of nodes.
|
Docker containers on a fleet of nodes.
|
||||||
|
|
@ -53,9 +53,9 @@ to store 2 TB of data in total.
|
||||||
|
|
||||||
### Best practices
|
### Best practices
|
||||||
|
|
||||||
- If you have fast dedicated networking between all your nodes, and are planing to store
|
- If you have reasonably fast networking between all your nodes, and are planing to store
|
||||||
very large files, bump the `block_size` configuration parameter to 10 MB
|
mostly large files, bump the `block_size` configuration parameter to 10 MB
|
||||||
(`block_size = 10485760`).
|
(`block_size = "10M"`).
|
||||||
|
|
||||||
- Garage stores its files in two locations: it uses a metadata directory to store frequently-accessed
|
- Garage stores its files in two locations: it uses a metadata directory to store frequently-accessed
|
||||||
small metadata items, and a data directory to store data blocks of uploaded objects.
|
small metadata items, and a data directory to store data blocks of uploaded objects.
|
||||||
|
|
@ -68,30 +68,42 @@ to store 2 TB of data in total.
|
||||||
EXT4 is not recommended as it has more strict limitations on the number of inodes,
|
EXT4 is not recommended as it has more strict limitations on the number of inodes,
|
||||||
which might cause issues with Garage when large numbers of objects are stored.
|
which might cause issues with Garage when large numbers of objects are stored.
|
||||||
|
|
||||||
- If you only have an HDD and no SSD, it's fine to put your metadata alongside the data
|
|
||||||
on the same drive. Having lots of RAM for your kernel to cache the metadata will
|
|
||||||
help a lot with performance. The default LMDB database engine is the most tested
|
|
||||||
and has good performance.
|
|
||||||
|
|
||||||
- For the metadata storage, Garage does not do checksumming and integrity
|
|
||||||
verification on its own. If you are afraid of bitrot/data corruption,
|
|
||||||
put your metadata directory on a ZFS or BTRFS partition. Otherwise, just use regular
|
|
||||||
EXT4 or XFS.
|
|
||||||
|
|
||||||
- Servers with multiple HDDs are supported natively by Garage without resorting
|
- Servers with multiple HDDs are supported natively by Garage without resorting
|
||||||
to RAID, see [our dedicated documentation page](@/documentation/operations/multi-hdd.md).
|
to RAID, see [our dedicated documentation page](@/documentation/operations/multi-hdd.md).
|
||||||
|
|
||||||
|
- For the metadata storage, Garage does not do checksumming and integrity
|
||||||
|
verification on its own, so it is better to use a robust filesystem such as
|
||||||
|
BTRFS or ZFS. Users have reported that when using the LMDB database engine
|
||||||
|
(the default), database files have a tendency of becoming corrupted after an
|
||||||
|
unclean shutdown (e.g. a power outage), so you should take regular snapshots
|
||||||
|
to be able to recover from such a situation. This can be done using Garage's
|
||||||
|
built-in automatic snapshotting (since v0.9.4), or by using filesystem level
|
||||||
|
snapshots. If you cannot do so, you might want to switch to Sqlite which is
|
||||||
|
more robust.
|
||||||
|
|
||||||
|
- LMDB is the fastest and most tested database engine, but it has the following
|
||||||
|
weaknesses: 1/ data files are not architecture-independent, you cannot simply
|
||||||
|
move a Garage metadata directory between nodes running different architectures,
|
||||||
|
and 2/ LMDB is not suited for 32-bit platforms. Sqlite is a viable alternative
|
||||||
|
if any of these are of concern.
|
||||||
|
|
||||||
|
- If you only have an HDD and no SSD, it's fine to put your metadata alongside
|
||||||
|
the data on the same drive, but then consider your filesystem choice wisely
|
||||||
|
(see above). Having lots of RAM for your kernel to cache the metadata will
|
||||||
|
help a lot with performance. The default LMDB database engine is the most
|
||||||
|
tested and has good performance.
|
||||||
|
|
||||||
## Get a Docker image
|
## Get a Docker image
|
||||||
|
|
||||||
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||||
We encourage you to use a fixed tag (eg. `v0.9.3`) and not the `latest` tag.
|
We encourage you to use a fixed tag (eg. `v1.3.0`) and not the `latest` tag.
|
||||||
For this example, we will use the latest published version at the time of the writing which is `v0.9.3` but it's up to you
|
For this example, we will use the latest published version at the time of the writing which is `v1.3.0` but it's up to you
|
||||||
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo docker pull dxflrs/garage:v0.9.3
|
sudo docker pull dxflrs/garage:v1.3.0
|
||||||
```
|
```
|
||||||
|
|
||||||
## Deploying and configuring Garage
|
## Deploying and configuring Garage
|
||||||
|
|
@ -114,6 +126,7 @@ A valid `/etc/garage.toml` for our cluster would look as follows:
|
||||||
metadata_dir = "/var/lib/garage/meta"
|
metadata_dir = "/var/lib/garage/meta"
|
||||||
data_dir = "/var/lib/garage/data"
|
data_dir = "/var/lib/garage/data"
|
||||||
db_engine = "lmdb"
|
db_engine = "lmdb"
|
||||||
|
metadata_auto_snapshot_interval = "6h"
|
||||||
|
|
||||||
replication_factor = 3
|
replication_factor = 3
|
||||||
|
|
||||||
|
|
@ -139,6 +152,8 @@ Check the following for your configuration files:
|
||||||
- Make sure `rpc_public_addr` contains the public IP address of the node you are configuring.
|
- Make sure `rpc_public_addr` contains the public IP address of the node you are configuring.
|
||||||
This parameter is optional but recommended: if your nodes have trouble communicating with
|
This parameter is optional but recommended: if your nodes have trouble communicating with
|
||||||
one another, consider adding it.
|
one another, consider adding it.
|
||||||
|
Alternatively, you can also set `rpc_public_addr_subnet`, which can filter
|
||||||
|
the addresses announced to other peers to a specific subnet.
|
||||||
|
|
||||||
- Make sure `rpc_secret` is the same value on all nodes. It should be a 32-bytes hex-encoded secret key.
|
- Make sure `rpc_secret` is the same value on all nodes. It should be a 32-bytes hex-encoded secret key.
|
||||||
You can generate such a key with `openssl rand -hex 32`.
|
You can generate such a key with `openssl rand -hex 32`.
|
||||||
|
|
@ -156,7 +171,7 @@ docker run \
|
||||||
-v /etc/garage.toml:/etc/garage.toml \
|
-v /etc/garage.toml:/etc/garage.toml \
|
||||||
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
||||||
-v /var/lib/garage/data:/var/lib/garage/data \
|
-v /var/lib/garage/data:/var/lib/garage/data \
|
||||||
dxflrs/garage:v0.9.3
|
dxflrs/garage:v1.3.0
|
||||||
```
|
```
|
||||||
|
|
||||||
With this command line, Garage should be started automatically at each boot.
|
With this command line, Garage should be started automatically at each boot.
|
||||||
|
|
@ -170,7 +185,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
|
||||||
version: "3"
|
version: "3"
|
||||||
services:
|
services:
|
||||||
garage:
|
garage:
|
||||||
image: dxflrs/garage:v0.9.3
|
image: dxflrs/garage:v1.3.0
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
|
|
@ -186,7 +201,7 @@ upgrades. With the containerized setup proposed here, the upgrade process
|
||||||
will require stopping and removing the existing container, and re-creating it
|
will require stopping and removing the existing container, and re-creating it
|
||||||
with the upgraded version.
|
with the upgraded version.
|
||||||
|
|
||||||
## Controling the daemon
|
## Controlling the daemon
|
||||||
|
|
||||||
The `garage` binary has two purposes:
|
The `garage` binary has two purposes:
|
||||||
- it acts as a daemon when launched with `garage server`
|
- it acts as a daemon when launched with `garage server`
|
||||||
|
|
@ -244,7 +259,7 @@ You can then instruct nodes to connect to one another as follows:
|
||||||
Venus$ garage node connect 563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901
|
Venus$ garage node connect 563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901
|
||||||
```
|
```
|
||||||
|
|
||||||
You don't nead to instruct all node to connect to all other nodes:
|
You don't need to instruct all node to connect to all other nodes:
|
||||||
nodes will discover one another transitively.
|
nodes will discover one another transitively.
|
||||||
|
|
||||||
Now if your run `garage status` on any node, you should have an output that looks as follows:
|
Now if your run `garage status` on any node, you should have an output that looks as follows:
|
||||||
|
|
@ -327,8 +342,8 @@ Given the information above, we will configure our cluster as follow:
|
||||||
```bash
|
```bash
|
||||||
garage layout assign 563e -z par1 -c 1T -t mercury
|
garage layout assign 563e -z par1 -c 1T -t mercury
|
||||||
garage layout assign 86f0 -z par1 -c 2T -t venus
|
garage layout assign 86f0 -z par1 -c 2T -t venus
|
||||||
garage layout assign 6814 -z lon1 -c 2T -t earth
|
garage layout assign 6814 -z lon1 -c 2T -t earth
|
||||||
garage layout assign 212f -z bru1 -c 1.5T -t mars
|
garage layout assign 212f -z bru1 -c 1.5T -t mars
|
||||||
```
|
```
|
||||||
|
|
||||||
At this point, the changes in the cluster layout have not yet been applied.
|
At this point, the changes in the cluster layout have not yet been applied.
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,7 @@ StateDirectory=garage
|
||||||
DynamicUser=true
|
DynamicUser=true
|
||||||
ProtectHome=true
|
ProtectHome=true
|
||||||
NoNewPrivileges=true
|
NoNewPrivileges=true
|
||||||
|
LimitNOFILE=42000
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|
|
||||||
|
|
@ -50,3 +50,20 @@ locations. They use Garage themselves for the following tasks:
|
||||||
|
|
||||||
The Deuxfleurs Garage cluster is a multi-site cluster currently composed of
|
The Deuxfleurs Garage cluster is a multi-site cluster currently composed of
|
||||||
9 nodes in 3 physical locations.
|
9 nodes in 3 physical locations.
|
||||||
|
|
||||||
|
### Triplebit
|
||||||
|
|
||||||
|
[Triplebit](https://www.triplebit.org) is a non-profit hosting provider and
|
||||||
|
ISP focused on improving access to privacy-related services. They use
|
||||||
|
Garage themselves for the following tasks:
|
||||||
|
|
||||||
|
- Hosting of their homepage, [privacyguides.org](https://www.privacyguides.org/), and various other static sites
|
||||||
|
|
||||||
|
- As a Mastodon object storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/)
|
||||||
|
|
||||||
|
- As a PeerTube storage backend for [neat.tube](https://neat.tube/)
|
||||||
|
|
||||||
|
- As a [Matrix media backend](https://github.com/matrix-org/synapse-s3-storage-provider)
|
||||||
|
|
||||||
|
Triplebit's Garage cluster is a multi-site cluster currently composed of
|
||||||
|
10 nodes in 3 physical locations.
|
||||||
|
|
|
||||||
|
|
@ -67,7 +67,7 @@ Pithos has been abandonned and should probably not used yet, in the following we
|
||||||
Pithos was relying as a S3 proxy in front of Cassandra (and was working with Scylla DB too).
|
Pithos was relying as a S3 proxy in front of Cassandra (and was working with Scylla DB too).
|
||||||
From its designers' mouth, storing data in Cassandra has shown its limitations justifying the project abandonment.
|
From its designers' mouth, storing data in Cassandra has shown its limitations justifying the project abandonment.
|
||||||
They built a closed-source version 2 that does not store blobs in the database (only metadata) but did not communicate further on it.
|
They built a closed-source version 2 that does not store blobs in the database (only metadata) but did not communicate further on it.
|
||||||
We considered there v2's design but concluded that it does not fit both our *Self-contained & lightweight* and *Simple* properties. It makes the development, the deployment and the operations more complicated while reducing the flexibility.
|
We considered their v2's design but concluded that it does not fit both our *Self-contained & lightweight* and *Simple* properties. It makes the development, the deployment and the operations more complicated while reducing the flexibility.
|
||||||
|
|
||||||
**[Riak CS](https://docs.riak.com/riak/cs/2.1.1/index.html):**
|
**[Riak CS](https://docs.riak.com/riak/cs/2.1.1/index.html):**
|
||||||
*Not written yet*
|
*Not written yet*
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,7 @@ sudo killall nix-daemon
|
||||||
Now you can enter our nix-shell, all the required packages will be downloaded but they will not pollute your environment outside of the shell:
|
Now you can enter our nix-shell, all the required packages will be downloaded but they will not pollute your environment outside of the shell:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
nix-shell
|
nix-shell -A devShell
|
||||||
```
|
```
|
||||||
|
|
||||||
You can use the traditional Rust development workflow:
|
You can use the traditional Rust development workflow:
|
||||||
|
|
@ -65,8 +65,8 @@ nix-build -j $(nproc) --max-jobs auto
|
||||||
```
|
```
|
||||||
|
|
||||||
Our build has multiple parameters you might want to set:
|
Our build has multiple parameters you might want to set:
|
||||||
- `release` build with release optimisations instead of debug
|
- `release` to build with release optimisations instead of debug
|
||||||
- `target allows` for cross compilation
|
- `target` allows for cross compilation
|
||||||
- `compileMode` can be set to test or bench to build a unit test runner
|
- `compileMode` can be set to test or bench to build a unit test runner
|
||||||
- `git_version` to inject the hash to display when running `garage stats`
|
- `git_version` to inject the hash to display when running `garage stats`
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ connecting to. To run on all nodes, add the `-a` flag as follows:
|
||||||
|
|
||||||
# Data block operations
|
# Data block operations
|
||||||
|
|
||||||
## Data store scrub
|
## Data store scrub {#scrub}
|
||||||
|
|
||||||
Scrubbing the data store means examining each individual data block to check that
|
Scrubbing the data store means examining each individual data block to check that
|
||||||
their content is correct, by verifying their hash. Any block found to be corrupted
|
their content is correct, by verifying their hash. Any block found to be corrupted
|
||||||
|
|
@ -104,6 +104,24 @@ operation will also move out all data from locations marked as read-only.
|
||||||
|
|
||||||
# Metadata operations
|
# Metadata operations
|
||||||
|
|
||||||
|
## Metadata snapshotting
|
||||||
|
|
||||||
|
It is good practice to setup automatic snapshotting of your metadata database
|
||||||
|
file, to recover from situations where it becomes corrupted on disk. This can
|
||||||
|
be done at the filesystem level if you are using ZFS or BTRFS.
|
||||||
|
|
||||||
|
Since Garage v0.9.4, Garage is able to take snapshots of the metadata database
|
||||||
|
itself. This basically amounts to copying the database file, except that it can
|
||||||
|
be run live while Garage is running without the risk of corruption or
|
||||||
|
inconsistencies. This can be setup to run automatically on a schedule using
|
||||||
|
[`metadata_auto_snapshot_interval`](@/documentation/reference-manual/configuration.md#metadata_auto_snapshot_interval).
|
||||||
|
A snapshot can also be triggered manually using the `garage meta snapshot`
|
||||||
|
command. Note that taking a snapshot using this method is very intensive as it
|
||||||
|
requires making a full copy of the database file, so you might prefer using
|
||||||
|
filesystem-level snapshots if possible. To recover a corrupted node from such a
|
||||||
|
snapshot, read the instructions
|
||||||
|
[here](@/documentation/operations/recovering.md#corrupted_meta).
|
||||||
|
|
||||||
## Metadata table resync
|
## Metadata table resync
|
||||||
|
|
||||||
Garage automatically resyncs all entries stored in the metadata tables every hour,
|
Garage automatically resyncs all entries stored in the metadata tables every hour,
|
||||||
|
|
@ -123,4 +141,7 @@ blocks may still be held by Garage. If you suspect that such corruption has occu
|
||||||
in your cluster, you can run one of the following repair procedures:
|
in your cluster, you can run one of the following repair procedures:
|
||||||
|
|
||||||
- `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version
|
- `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version
|
||||||
- `garage repair block_refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected)
|
|
||||||
|
- `garage repair block-refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected)
|
||||||
|
|
||||||
|
- `garage repair block-rc`: checks that the reference counters for blocks are in sync with the actual number of non-deleted entries in the block reference table
|
||||||
|
|
|
||||||
|
|
@ -21,14 +21,14 @@ data_dir = [
|
||||||
```
|
```
|
||||||
|
|
||||||
Garage will automatically balance all blocks stored by the node
|
Garage will automatically balance all blocks stored by the node
|
||||||
among the different specified directories, proportionnally to the
|
among the different specified directories, proportionally to the
|
||||||
specified capacities.
|
specified capacities.
|
||||||
|
|
||||||
## Updating the list of storage locations
|
## Updating the list of storage locations
|
||||||
|
|
||||||
If you add new storage locations to your `data_dir`,
|
If you add new storage locations to your `data_dir`,
|
||||||
Garage will not rebalance existing data between storage locations.
|
Garage will not rebalance existing data between storage locations.
|
||||||
Newly written blocks will be balanced proportionnally to the specified capacities,
|
Newly written blocks will be balanced proportionally to the specified capacities,
|
||||||
and existing data may be moved between drives to improve balancing,
|
and existing data may be moved between drives to improve balancing,
|
||||||
but only opportunistically when a data block is re-written (e.g. an object
|
but only opportunistically when a data block is re-written (e.g. an object
|
||||||
is re-uploaded, or an object with a duplicate block is uploaded).
|
is re-uploaded, or an object with a duplicate block is uploaded).
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@ weight = 40
|
||||||
|
|
||||||
Garage is meant to work on old, second-hand hardware.
|
Garage is meant to work on old, second-hand hardware.
|
||||||
In particular, this makes it likely that some of your drives will fail, and some manual intervention will be needed.
|
In particular, this makes it likely that some of your drives will fail, and some manual intervention will be needed.
|
||||||
Fear not! For Garage is fully equipped to handle drive failures, in most common cases.
|
Fear not! Garage is fully equipped to handle drive failures, in most common cases.
|
||||||
|
|
||||||
## A note on availability of Garage
|
## A note on availability of Garage
|
||||||
|
|
||||||
|
|
@ -61,7 +61,7 @@ garage repair -a --yes blocks
|
||||||
|
|
||||||
This will re-synchronize blocks of data that are missing to the new HDD, reading them from copies located on other nodes.
|
This will re-synchronize blocks of data that are missing to the new HDD, reading them from copies located on other nodes.
|
||||||
|
|
||||||
You can check on the advancement of this process by doing the following command:
|
You can check on the advancement of this process by doing the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
garage stats -a
|
garage stats -a
|
||||||
|
|
@ -108,3 +108,57 @@ garage layout apply # once satisfied, apply the changes
|
||||||
|
|
||||||
Garage will then start synchronizing all required data on the new node.
|
Garage will then start synchronizing all required data on the new node.
|
||||||
This process can be monitored using the `garage stats -a` command.
|
This process can be monitored using the `garage stats -a` command.
|
||||||
|
|
||||||
|
## Replacement scenario 3: corrupted metadata {#corrupted_meta}
|
||||||
|
|
||||||
|
In some cases, your metadata DB file might become corrupted, for instance if
|
||||||
|
your node suffered a power outage and did not shut down properly. In this case,
|
||||||
|
you can recover without having to change the node ID and rebuilding a cluster
|
||||||
|
layout. This means that data blocks will not need to be shuffled around, you
|
||||||
|
must simply find a way to repair the metadata file. The best way is generally
|
||||||
|
to discard the corrupted file and recover it from another source.
|
||||||
|
|
||||||
|
First of all, start by locating the database file in your metadata directory,
|
||||||
|
which [depends on your `db_engine`
|
||||||
|
choice](@/documentation/reference-manual/configuration.md#db_engine). Then,
|
||||||
|
your recovery options are as follows:
|
||||||
|
|
||||||
|
- **Option 1: resyncing from other nodes.** In case your cluster is replicated
|
||||||
|
with two or three copies, you can simply delete the database file, and Garage
|
||||||
|
will resync from other nodes. To do so, stop Garage, delete the database file
|
||||||
|
or directory, and restart Garage. Then, do a full table repair by calling
|
||||||
|
`garage repair -a --yes tables`. This will take a bit of time to complete as
|
||||||
|
the new node will need to receive copies of the metadata tables from the
|
||||||
|
network.
|
||||||
|
|
||||||
|
- **Option 2: restoring a snapshot taken by Garage.** Since v0.9.4, Garage can
|
||||||
|
[automatically take regular
|
||||||
|
snapshots](@/documentation/reference-manual/configuration.md#metadata_auto_snapshot_interval)
|
||||||
|
of your metadata DB file. This file or directory should be located under
|
||||||
|
`<metadata_dir>/snapshots`, and is named according to the UTC time at which it
|
||||||
|
was taken. Stop Garage, discard the database file/directory and replace it by the
|
||||||
|
snapshot you want to use. For instance, in the case of LMDB:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd $METADATA_DIR
|
||||||
|
mv db.lmdb db.lmdb.bak
|
||||||
|
cp -r snapshots/2024-03-15T12:13:52Z db.lmdb
|
||||||
|
```
|
||||||
|
|
||||||
|
And for Sqlite:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd $METADATA_DIR
|
||||||
|
mv db.sqlite db.sqlite.bak
|
||||||
|
cp snapshots/2024-03-15T12:13:52Z db.sqlite
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, restart Garage and run a full table repair by calling `garage repair -a
|
||||||
|
--yes tables`. This should run relatively fast as only the changes that
|
||||||
|
occurred since the snapshot was taken will need to be resynchronized. Of
|
||||||
|
course, if your cluster is not replicated, you will lose all changes that
|
||||||
|
occurred since the snapshot was taken.
|
||||||
|
|
||||||
|
- **Option 3: restoring a filesystem-level snapshot.** If you are using ZFS or
|
||||||
|
BTRFS to snapshot your metadata partition, refer to their specific
|
||||||
|
documentation on rolling back or copying files from an old snapshot.
|
||||||
|
|
|
||||||
|
|
@ -71,7 +71,19 @@ The entire procedure would look something like this:
|
||||||
|
|
||||||
2. Take each node offline individually to back up its metadata folder, bring them back online once the backup is done.
|
2. Take each node offline individually to back up its metadata folder, bring them back online once the backup is done.
|
||||||
You can do all of the nodes in a single zone at once as that won't impact global cluster availability.
|
You can do all of the nodes in a single zone at once as that won't impact global cluster availability.
|
||||||
Do not try to make a backup of the metadata folder of a running node.
|
Do not try to manually copy the metadata folder of a running node.
|
||||||
|
|
||||||
|
**Since Garage v0.9.4,** you can use the `garage meta snapshot --all` command
|
||||||
|
to take a simultaneous snapshot of the metadata database files of all your
|
||||||
|
nodes. This avoids the tedious process of having to take them down one by
|
||||||
|
one before upgrading. Be careful that if automatic snapshotting is enabled,
|
||||||
|
Garage only keeps the last two snapshots and deletes older ones, so you might
|
||||||
|
want to disable automatic snapshotting in your upgraded configuration file
|
||||||
|
until you have confirmed that the upgrade ran successfully. In addition to
|
||||||
|
snapshotting the metadata databases of your nodes, you should back-up at
|
||||||
|
least the `cluster_layout` file of one of your Garage instances (this file
|
||||||
|
should be the same on all nodes and you can copy it safely while Garage is
|
||||||
|
running).
|
||||||
|
|
||||||
3. Prepare your binaries and configuration files for the new Garage version
|
3. Prepare your binaries and configuration files for the new Garage version
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -42,6 +42,13 @@ If a binary of the last version is not available for your architecture,
|
||||||
or if you want a build customized for your system,
|
or if you want a build customized for your system,
|
||||||
you can [build Garage from source](@/documentation/cookbook/from-source.md).
|
you can [build Garage from source](@/documentation/cookbook/from-source.md).
|
||||||
|
|
||||||
|
If none of these option work for you, you can also run Garage in a Docker
|
||||||
|
container. When using Docker, the commands used in this guide will not work
|
||||||
|
anymore. We recommend reading the tutorial on [configuring a
|
||||||
|
multi-node cluster](@/documentation/cookbook/real-world.md) to learn about
|
||||||
|
using Garage as a Docker container. For simplicity, a minimal command to launch
|
||||||
|
Garage using Docker is provided in this quick start guide as well.
|
||||||
|
|
||||||
|
|
||||||
## Configuring and starting Garage
|
## Configuring and starting Garage
|
||||||
|
|
||||||
|
|
@ -57,7 +64,7 @@ to generate unique and private secrets for security reasons:
|
||||||
cat > garage.toml <<EOF
|
cat > garage.toml <<EOF
|
||||||
metadata_dir = "/tmp/meta"
|
metadata_dir = "/tmp/meta"
|
||||||
data_dir = "/tmp/data"
|
data_dir = "/tmp/data"
|
||||||
db_engine = "lmdb"
|
db_engine = "sqlite"
|
||||||
|
|
||||||
replication_factor = 1
|
replication_factor = 1
|
||||||
|
|
||||||
|
|
@ -85,6 +92,9 @@ metrics_token = "$(openssl rand -base64 32)"
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
|
See the [Configuration file format](https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/)
|
||||||
|
for complete options and values.
|
||||||
|
|
||||||
Now that your configuration file has been created, you may save it to the directory of your choice.
|
Now that your configuration file has been created, you may save it to the directory of your choice.
|
||||||
By default, Garage looks for **`/etc/garage.toml`.**
|
By default, Garage looks for **`/etc/garage.toml`.**
|
||||||
You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml`
|
You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml`
|
||||||
|
|
@ -111,6 +121,26 @@ garage -c path/to/garage.toml server
|
||||||
|
|
||||||
If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`.
|
If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`.
|
||||||
|
|
||||||
|
Alternatively, if you cannot or do not wish to run the Garage binary directly,
|
||||||
|
you may use Docker to run Garage in a container using the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run \
|
||||||
|
-d \
|
||||||
|
--name garaged \
|
||||||
|
-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903 \
|
||||||
|
-v /path/to/garage.toml:/etc/garage.toml \
|
||||||
|
-v /path/to/garage/meta:/var/lib/garage/meta \
|
||||||
|
-v /path/to/garage/data:/var/lib/garage/data \
|
||||||
|
dxflrs/garage:v1.3.0
|
||||||
|
```
|
||||||
|
|
||||||
|
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
|
||||||
|
|
||||||
|
#### Troubleshooting
|
||||||
|
|
||||||
|
Ensure your configuration file, `metadata_dir` and `data_dir` are readable by the user running the `garage` server or Docker.
|
||||||
|
|
||||||
You can tune Garage's verbosity by setting the `RUST_LOG=` environment variable. \
|
You can tune Garage's verbosity by setting the `RUST_LOG=` environment variable. \
|
||||||
Available log levels are (from less verbose to more verbose): `error`, `warn`, `info` *(default)*, `debug` and `trace`.
|
Available log levels are (from less verbose to more verbose): `error`, `warn`, `info` *(default)*, `debug` and `trace`.
|
||||||
|
|
||||||
|
|
@ -131,6 +161,9 @@ It uses values from the TOML configuration file to find the Garage daemon runnin
|
||||||
local node, therefore if your configuration file is not at `/etc/garage.toml` you will
|
local node, therefore if your configuration file is not at `/etc/garage.toml` you will
|
||||||
again have to specify `-c path/to/garage.toml` at each invocation.
|
again have to specify `-c path/to/garage.toml` at each invocation.
|
||||||
|
|
||||||
|
If you are running Garage in a Docker container, you can set `alias garage="docker exec -ti <container name> /garage"`
|
||||||
|
to use the Garage binary inside your container.
|
||||||
|
|
||||||
If the `garage` CLI is able to correctly detect the parameters of your local Garage node,
|
If the `garage` CLI is able to correctly detect the parameters of your local Garage node,
|
||||||
the following command should be enough to show the status of your cluster:
|
the following command should be enough to show the status of your cluster:
|
||||||
|
|
||||||
|
|
@ -149,11 +182,12 @@ ID Hostname Address Tag Zone Capacit
|
||||||
## Creating a cluster layout
|
## Creating a cluster layout
|
||||||
|
|
||||||
Creating a cluster layout for a Garage deployment means informing Garage
|
Creating a cluster layout for a Garage deployment means informing Garage
|
||||||
of the disk space available on each node of the cluster
|
of the disk space available on each node of the cluster, `-c`,
|
||||||
as well as the zone (e.g. datacenter) each machine is located in.
|
as well as the name of the zone (e.g. datacenter), `-z`, each machine is located in.
|
||||||
|
|
||||||
For our test deployment, we are using only one node. The way in which we configure
|
For our test deployment, we are have only one node with zone named `dc1` and a
|
||||||
it does not matter, you can simply write:
|
capacity of `1G`, though the capacity is ignored for a single node deployment
|
||||||
|
and can be changed later when adding new nodes.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
garage layout assign -z dc1 -c 1G <node_id>
|
garage layout assign -z dc1 -c 1G <node_id>
|
||||||
|
|
@ -166,7 +200,7 @@ For instance here you could write just `garage layout assign -z dc1 -c 1G 563e`.
|
||||||
The layout then has to be applied to the cluster, using:
|
The layout then has to be applied to the cluster, using:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
garage layout apply
|
garage layout apply --version 1
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -316,7 +350,7 @@ Check [our s3 compatibility list](@/documentation/reference-manual/s3-compatibil
|
||||||
|
|
||||||
### Other tools for interacting with Garage
|
### Other tools for interacting with Garage
|
||||||
|
|
||||||
The following tools can also be used to send and recieve files from/to Garage:
|
The following tools can also be used to send and receive files from/to Garage:
|
||||||
|
|
||||||
- [minio-client](@/documentation/connect/cli.md#minio-client)
|
- [minio-client](@/documentation/connect/cli.md#minio-client)
|
||||||
- [s3cmd](@/documentation/connect/cli.md#s3cmd)
|
- [s3cmd](@/documentation/connect/cli.md#s3cmd)
|
||||||
|
|
|
||||||
|
|
@ -13,13 +13,19 @@ consistency_mode = "consistent"
|
||||||
|
|
||||||
metadata_dir = "/var/lib/garage/meta"
|
metadata_dir = "/var/lib/garage/meta"
|
||||||
data_dir = "/var/lib/garage/data"
|
data_dir = "/var/lib/garage/data"
|
||||||
|
metadata_snapshots_dir = "/var/lib/garage/snapshots"
|
||||||
metadata_fsync = true
|
metadata_fsync = true
|
||||||
data_fsync = false
|
data_fsync = false
|
||||||
|
disable_scrub = false
|
||||||
|
use_local_tz = false
|
||||||
|
metadata_auto_snapshot_interval = "6h"
|
||||||
|
|
||||||
db_engine = "lmdb"
|
db_engine = "lmdb"
|
||||||
|
|
||||||
block_size = "1M"
|
block_size = "1M"
|
||||||
|
block_ram_buffer_max = "256MiB"
|
||||||
|
block_max_concurrent_reads = 16
|
||||||
|
block_max_concurrent_writes_per_request =10
|
||||||
lmdb_map_size = "1T"
|
lmdb_map_size = "1T"
|
||||||
|
|
||||||
compression_level = 1
|
compression_level = 1
|
||||||
|
|
@ -28,6 +34,11 @@ rpc_secret = "4425f5c26c5e11581d3223904324dcb5b5d5dfb14e5e7f35e38c595424f5f1e6"
|
||||||
rpc_bind_addr = "[::]:3901"
|
rpc_bind_addr = "[::]:3901"
|
||||||
rpc_bind_outgoing = false
|
rpc_bind_outgoing = false
|
||||||
rpc_public_addr = "[fc00:1::1]:3901"
|
rpc_public_addr = "[fc00:1::1]:3901"
|
||||||
|
# or set rpc_public_adr_subnet to filter down autodiscovery to a subnet:
|
||||||
|
# rpc_public_addr_subnet = "2001:0db8:f00:b00:/64"
|
||||||
|
|
||||||
|
|
||||||
|
allow_world_readable_secrets = false
|
||||||
|
|
||||||
bootstrap_peers = [
|
bootstrap_peers = [
|
||||||
"563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901",
|
"563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901",
|
||||||
|
|
@ -36,6 +47,7 @@ bootstrap_peers = [
|
||||||
"212fd62eeaca72c122b45a7f4fa0f55e012aa5e24ac384a72a3016413fa724ff@[fc00:F::1]:3901",
|
"212fd62eeaca72c122b45a7f4fa0f55e012aa5e24ac384a72a3016413fa724ff@[fc00:F::1]:3901",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
allow_punycode = false
|
||||||
|
|
||||||
[consul_discovery]
|
[consul_discovery]
|
||||||
api = "catalog"
|
api = "catalog"
|
||||||
|
|
@ -65,6 +77,7 @@ root_domain = ".s3.garage"
|
||||||
[s3_web]
|
[s3_web]
|
||||||
bind_addr = "[::]:3902"
|
bind_addr = "[::]:3902"
|
||||||
root_domain = ".web.garage"
|
root_domain = ".web.garage"
|
||||||
|
add_host_to_metrics = true
|
||||||
|
|
||||||
[admin]
|
[admin]
|
||||||
api_bind_addr = "0.0.0.0:3903"
|
api_bind_addr = "0.0.0.0:3903"
|
||||||
|
|
@ -79,22 +92,34 @@ The following gives details about each available configuration option.
|
||||||
|
|
||||||
### Index
|
### Index
|
||||||
|
|
||||||
Top-level configuration options:
|
[Environment variables](#env_variables).
|
||||||
|
|
||||||
|
Top-level configuration options, in alphabetical order:
|
||||||
|
[`allow_punycode`](#allow_punycode),
|
||||||
|
[`allow_world_readable_secrets`](#allow_world_readable_secrets),
|
||||||
|
[`block_max_concurrent_reads`](`block_max_concurrent_reads),
|
||||||
|
[`block_ram_buffer_max`](#block_ram_buffer_max),
|
||||||
|
[`block_max_concurrent_writes_per_request`](#block_max_concurrent_writes_per_request),
|
||||||
[`block_size`](#block_size),
|
[`block_size`](#block_size),
|
||||||
[`bootstrap_peers`](#bootstrap_peers),
|
[`bootstrap_peers`](#bootstrap_peers),
|
||||||
[`compression_level`](#compression_level),
|
[`compression_level`](#compression_level),
|
||||||
|
[`consistency_mode`](#consistency_mode),
|
||||||
[`data_dir`](#data_dir),
|
[`data_dir`](#data_dir),
|
||||||
[`data_fsync`](#data_fsync),
|
[`data_fsync`](#data_fsync),
|
||||||
[`db_engine`](#db_engine),
|
[`db_engine`](#db_engine),
|
||||||
|
[`disable_scrub`](#disable_scrub),
|
||||||
[`lmdb_map_size`](#lmdb_map_size),
|
[`lmdb_map_size`](#lmdb_map_size),
|
||||||
|
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
|
||||||
[`metadata_dir`](#metadata_dir),
|
[`metadata_dir`](#metadata_dir),
|
||||||
[`metadata_fsync`](#metadata_fsync),
|
[`metadata_fsync`](#metadata_fsync),
|
||||||
|
[`metadata_snapshots_dir`](#metadata_snapshots_dir),
|
||||||
[`replication_factor`](#replication_factor),
|
[`replication_factor`](#replication_factor),
|
||||||
[`consistency_mode`](#consistency_mode),
|
|
||||||
[`rpc_bind_addr`](#rpc_bind_addr),
|
[`rpc_bind_addr`](#rpc_bind_addr),
|
||||||
[`rpc_bind_outgoing`](#rpc_bind_outgoing),
|
[`rpc_bind_outgoing`](#rpc_bind_outgoing),
|
||||||
[`rpc_public_addr`](#rpc_public_addr),
|
[`rpc_public_addr`](#rpc_public_addr),
|
||||||
[`rpc_secret`/`rpc_secret_file`](#rpc_secret).
|
[`rpc_public_addr_subnet`](#rpc_public_addr_subnet)
|
||||||
|
[`rpc_secret`/`rpc_secret_file`](#rpc_secret),
|
||||||
|
[`use_local_tz`](#use_local_tz).
|
||||||
|
|
||||||
The `[consul_discovery]` section:
|
The `[consul_discovery]` section:
|
||||||
[`api`](#consul_api),
|
[`api`](#consul_api),
|
||||||
|
|
@ -119,6 +144,7 @@ The `[s3_api]` section:
|
||||||
[`s3_region`](#s3_region).
|
[`s3_region`](#s3_region).
|
||||||
|
|
||||||
The `[s3_web]` section:
|
The `[s3_web]` section:
|
||||||
|
[`add_host_to_metrics`](#web_add_host_to_metrics),
|
||||||
[`bind_addr`](#web_bind_addr),
|
[`bind_addr`](#web_bind_addr),
|
||||||
[`root_domain`](#web_root_domain).
|
[`root_domain`](#web_root_domain).
|
||||||
|
|
||||||
|
|
@ -128,10 +154,31 @@ The `[admin]` section:
|
||||||
[`admin_token`/`admin_token_file`](#admin_token),
|
[`admin_token`/`admin_token_file`](#admin_token),
|
||||||
[`trace_sink`](#admin_trace_sink),
|
[`trace_sink`](#admin_trace_sink),
|
||||||
|
|
||||||
|
### Environment variables {#env_variables}
|
||||||
|
|
||||||
|
The following configuration parameters must be specified as environment variables,
|
||||||
|
they do not exist in the configuration file:
|
||||||
|
|
||||||
|
- `GARAGE_LOG_TO_SYSLOG` (since `v0.9.4`): set this to `1` or `true` to make the
|
||||||
|
Garage daemon send its logs to `syslog` (using the libc `syslog` function)
|
||||||
|
instead of printing to stderr.
|
||||||
|
|
||||||
|
- `GARAGE_LOG_TO_JOURNALD` (since `v1.2.0`): set this to `1` or `true` to make the
|
||||||
|
Garage daemon send its logs to `journald` (using the native protocol of `systemd-journald`)
|
||||||
|
instead of printing to stderr.
|
||||||
|
|
||||||
|
The following environment variables can be used to override the corresponding
|
||||||
|
values in the configuration file:
|
||||||
|
|
||||||
|
- [`GARAGE_ALLOW_WORLD_READABLE_SECRETS`](#allow_world_readable_secrets)
|
||||||
|
- [`GARAGE_RPC_SECRET` and `GARAGE_RPC_SECRET_FILE`](#rpc_secret)
|
||||||
|
- [`GARAGE_ADMIN_TOKEN` and `GARAGE_ADMIN_TOKEN_FILE`](#admin_token)
|
||||||
|
- [`GARAGE_METRICS_TOKEN` and `GARAGE_METRICS_TOKEN`](#admin_metrics_token)
|
||||||
|
|
||||||
|
|
||||||
### Top-level configuration options
|
### Top-level configuration options
|
||||||
|
|
||||||
#### `replication_factor` {#replication_factor}
|
#### `replication_factor` (since `v1.0.0`) {#replication_factor}
|
||||||
|
|
||||||
The replication factor can be any positive integer smaller or equal the node count in your cluster.
|
The replication factor can be any positive integer smaller or equal the node count in your cluster.
|
||||||
The chosen replication factor has a big impact on the cluster's failure tolerancy and performance characteristics.
|
The chosen replication factor has a big impact on the cluster's failure tolerancy and performance characteristics.
|
||||||
|
|
@ -179,7 +226,7 @@ is in progress. In theory, no data should be lost as rebalancing is a
|
||||||
routine operation for Garage, although we cannot guarantee you that everything
|
routine operation for Garage, although we cannot guarantee you that everything
|
||||||
will go right in such an extreme scenario.
|
will go right in such an extreme scenario.
|
||||||
|
|
||||||
#### `consistency_mode` {#consistency_mode}
|
#### `consistency_mode` (since `v1.0.0`) {#consistency_mode}
|
||||||
|
|
||||||
The consistency mode setting determines the read and write behaviour of your cluster.
|
The consistency mode setting determines the read and write behaviour of your cluster.
|
||||||
|
|
||||||
|
|
@ -241,6 +288,7 @@ as the index of all objects, object version and object blocks.
|
||||||
|
|
||||||
Store this folder on a fast SSD drive if possible to maximize Garage's performance.
|
Store this folder on a fast SSD drive if possible to maximize Garage's performance.
|
||||||
|
|
||||||
|
|
||||||
#### `data_dir` {#data_dir}
|
#### `data_dir` {#data_dir}
|
||||||
|
|
||||||
The directory in which Garage will store the data blocks of objects.
|
The directory in which Garage will store the data blocks of objects.
|
||||||
|
|
@ -261,34 +309,77 @@ data_dir = [
|
||||||
See [the dedicated documentation page](@/documentation/operations/multi-hdd.md)
|
See [the dedicated documentation page](@/documentation/operations/multi-hdd.md)
|
||||||
on how to operate Garage in such a setup.
|
on how to operate Garage in such a setup.
|
||||||
|
|
||||||
|
#### `metadata_snapshots_dir` (since `v1.1.0`) {#metadata_snapshots_dir}
|
||||||
|
|
||||||
|
The directory in which Garage will store metadata snapshots when it
|
||||||
|
performs a snapshot of the metadata database, either when instructed to do
|
||||||
|
so from a RPC call or regularly through
|
||||||
|
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval).
|
||||||
|
|
||||||
|
By default, Garage will store snapshots into a `snapshots/` subdirectory
|
||||||
|
of [`metadata_dir`](#metadata_dir). This might quickly fill up your
|
||||||
|
metadata storage space if you use snapshots, because Garage will need up
|
||||||
|
to 4x the space of the existing metadata database: each snapshot requires
|
||||||
|
roughly as much space as the original database, and Garage temporarily
|
||||||
|
needs to store up to three different snapshots before it cleans up the oldest
|
||||||
|
snapshot to go back to two stored snapshots.
|
||||||
|
|
||||||
|
To prevent filling your disk, you might to change this setting to a
|
||||||
|
directory with ample available space, e.g. on the same storage space as
|
||||||
|
[`data_dir`](#data_dir).
|
||||||
|
|
||||||
#### `db_engine` (since `v0.8.0`) {#db_engine}
|
#### `db_engine` (since `v0.8.0`) {#db_engine}
|
||||||
|
|
||||||
Since `v0.8.0`, Garage can use alternative storage backends as follows:
|
Since `v0.8.0`, Garage can use alternative storage backends as follows:
|
||||||
|
|
||||||
| DB engine | `db_engine` value | Database path |
|
| DB engine | `db_engine` value | Database path |
|
||||||
| --------- | ----------------- | ------------- |
|
| --------- | ----------------- | ------------- |
|
||||||
| [LMDB](https://www.lmdb.tech) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
|
| [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
|
||||||
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
|
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
|
||||||
|
| [Fjall](https://github.com/fjall-rs/fjall) (**experimental support** since `v1.3.0`) | `"fjall"` | `<metadata_dir>/db.fjall/` |
|
||||||
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
|
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
|
||||||
|
|
||||||
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
|
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
|
||||||
You can still use an older binary of Garage (e.g. v0.9.3) to migrate
|
You can still use an older binary of Garage (e.g. v0.9.4) to migrate
|
||||||
old Sled metadata databases to another engine.
|
old Sled metadata databases to another engine.
|
||||||
|
|
||||||
Performance characteristics of the different DB engines are as follows:
|
Performance characteristics of the different DB engines are as follows:
|
||||||
|
|
||||||
- LMDB: the recommended database engine on 64-bit systems, much more
|
- LMDB: the recommended database engine for high-performance distributed clusters.
|
||||||
space-efficient and slightly faster. Note that the data format of LMDB is not
|
LMDB works very well, but is known to have the following limitations:
|
||||||
portable between architectures, so for instance the Garage database of an
|
|
||||||
x86-64 node cannot be moved to an ARM64 node. Also note that, while LMDB can
|
- The data format of LMDB is not portable between architectures, so for
|
||||||
technically be used on 32-bit systems, this will limit your node to very
|
instance the Garage database of an x86-64 node cannot be moved to an ARM64
|
||||||
small database sizes due to how LMDB works; it is therefore not recommended.
|
node.
|
||||||
|
|
||||||
|
- While LMDB can technically be used on 32-bit systems, this will limit your
|
||||||
|
node to very small database sizes due to how LMDB works; it is therefore
|
||||||
|
not recommended.
|
||||||
|
|
||||||
|
- Several users have reported corrupted LMDB database files after an unclean
|
||||||
|
shutdown (e.g. a power outage). This situation can generally be recovered
|
||||||
|
from if your cluster is geo-replicated (by rebuilding your metadata db from
|
||||||
|
other nodes), or if you have saved regular snapshots at the filesystem
|
||||||
|
level.
|
||||||
|
|
||||||
|
- Keys in LMDB are limited to 511 bytes. This limit translates to limits on
|
||||||
|
object keys in S3 and sort keys in K2V that are limted to 479 bytes.
|
||||||
|
|
||||||
- Sqlite: Garage supports Sqlite as an alternative storage backend for
|
- Sqlite: Garage supports Sqlite as an alternative storage backend for
|
||||||
metadata, and although it has not been tested as much, it is expected to work
|
metadata, which does not have the issues listed above for LMDB.
|
||||||
satisfactorily. Since Garage v0.9.0, performance issues have largely been
|
On versions 0.8.x and earlier, Sqlite should be avoided due to abysmal
|
||||||
fixed by allowing for a no-fsync mode (see `metadata_fsync`). Sqlite does not
|
performance, which was fixed with the addition of `metadata_fsync`.
|
||||||
have the database size limitation of LMDB on 32-bit systems.
|
Sqlite is still probably slower than LMDB due to the way we use it,
|
||||||
|
so it is not the best choice for high-performance storage clusters,
|
||||||
|
but it should work fine in many cases.
|
||||||
|
|
||||||
|
- Fjall: a storage engine based on LSM trees, which theoretically allow for
|
||||||
|
higher write throughput than other storage engines that are based on B-trees.
|
||||||
|
Using Fjall could potentially improve Garage's performance significantly in
|
||||||
|
write-heavy workloads. **Support for Fjall is experimental at this point**,
|
||||||
|
we have added it to Garage for evaluation purposes only. **Do not use it for
|
||||||
|
production-critical workloads.**
|
||||||
|
|
||||||
|
|
||||||
It is possible to convert Garage's metadata directory from one format to another
|
It is possible to convert Garage's metadata directory from one format to another
|
||||||
using the `garage convert-db` command, which should be used as follows:
|
using the `garage convert-db` command, which should be used as follows:
|
||||||
|
|
@ -315,7 +406,7 @@ Using this option reduces the risk of simultaneous metadata corruption on severa
|
||||||
cluster nodes, which could lead to data loss.
|
cluster nodes, which could lead to data loss.
|
||||||
|
|
||||||
If multi-site replication is used, this option is most likely not necessary, as
|
If multi-site replication is used, this option is most likely not necessary, as
|
||||||
it is extremely unlikely that two nodes in different locations will have a
|
it is extremely unlikely that two nodes in different locations will have a
|
||||||
power failure at the exact same time.
|
power failure at the exact same time.
|
||||||
|
|
||||||
(Metadata corruption on a single node is not an issue, the corrupted data file
|
(Metadata corruption on a single node is not an issue, the corrupted data file
|
||||||
|
|
@ -327,6 +418,7 @@ Here is how this option impacts the different database engines:
|
||||||
|----------|------------------------------------|-------------------------------|
|
|----------|------------------------------------|-------------------------------|
|
||||||
| Sqlite | `PRAGMA synchronous = OFF` | `PRAGMA synchronous = NORMAL` |
|
| Sqlite | `PRAGMA synchronous = OFF` | `PRAGMA synchronous = NORMAL` |
|
||||||
| LMDB | `MDB_NOMETASYNC` + `MDB_NOSYNC` | `MDB_NOMETASYNC` |
|
| LMDB | `MDB_NOMETASYNC` + `MDB_NOSYNC` | `MDB_NOMETASYNC` |
|
||||||
|
| Fjall | default options | not supported |
|
||||||
|
|
||||||
Note that the Sqlite database is always ran in `WAL` mode (`PRAGMA journal_mode = WAL`).
|
Note that the Sqlite database is always ran in `WAL` mode (`PRAGMA journal_mode = WAL`).
|
||||||
|
|
||||||
|
|
@ -343,6 +435,50 @@ at the cost of a moderate drop in write performance.
|
||||||
Similarly to `metatada_fsync`, this is likely not necessary
|
Similarly to `metatada_fsync`, this is likely not necessary
|
||||||
if geographical replication is used.
|
if geographical replication is used.
|
||||||
|
|
||||||
|
#### `metadata_auto_snapshot_interval` (since `v0.9.4`) {#metadata_auto_snapshot_interval}
|
||||||
|
|
||||||
|
If this value is set, Garage will automatically take a snapshot of the metadata
|
||||||
|
DB file at a regular interval and save it in the metadata directory.
|
||||||
|
This parameter can take any duration string that can be parsed by
|
||||||
|
the [`parse_duration`](https://docs.rs/parse_duration/latest/parse_duration/#syntax) crate.
|
||||||
|
|
||||||
|
Snapshots can allow to recover from situations where the metadata DB file is
|
||||||
|
corrupted, for instance after an unclean shutdown. See [this
|
||||||
|
page](@/documentation/operations/recovering.md#corrupted_meta) for details.
|
||||||
|
Garage keeps only the two most recent snapshots of the metadata DB and deletes
|
||||||
|
older ones automatically.
|
||||||
|
|
||||||
|
Note that taking a metadata snapshot is a relatively intensive operation as the
|
||||||
|
entire data file is copied. A snapshot being taken might have performance
|
||||||
|
impacts on the Garage node while it is running. If the cluster is under heavy
|
||||||
|
write load when a snapshot operation is running, this might also cause the
|
||||||
|
database file to grow in size significantly as pages cannot be recycled easily.
|
||||||
|
For this reason, it might be better to use filesystem-level snapshots instead
|
||||||
|
if possible.
|
||||||
|
|
||||||
|
#### `disable_scrub` {#disable_scrub}
|
||||||
|
|
||||||
|
By default, Garage runs a scrub of the data directory approximately once per
|
||||||
|
month, with a random delay to avoid all nodes running at the same time. When
|
||||||
|
it scrubs the data directory, Garage will read all of the data files stored on
|
||||||
|
disk to check their integrity, and will rebuild any data files that it finds
|
||||||
|
corrupted, using the remaining valid copies stored on other nodes.
|
||||||
|
See [this page](@/documentation/operations/durability-repairs.md#scrub) for details.
|
||||||
|
|
||||||
|
Set the `disable_scrub` configuration value to `true` if you don't need Garage
|
||||||
|
to scrub the data directory, for instance if you are already scrubbing at the
|
||||||
|
filesystem level. Note that in this case, if you find a corrupted data file,
|
||||||
|
you should delete it from the data directory and then call `garage repair
|
||||||
|
blocks` on the node to ensure that it re-obtains a copy from another node on
|
||||||
|
the network.
|
||||||
|
|
||||||
|
#### `use_local_tz` (since `v1.1.0`) {#use_local_tz}
|
||||||
|
|
||||||
|
By default, Garage runs the lifecycle worker every day at midnight in UTC. Set the
|
||||||
|
`use_local_tz` configuration value to `true` if you want Garage to run the
|
||||||
|
lifecycle worker at midnight in your local timezone. If you have multiple nodes,
|
||||||
|
you should also ensure that each node has the same timezone configuration.
|
||||||
|
|
||||||
#### `block_size` {#block_size}
|
#### `block_size` {#block_size}
|
||||||
|
|
||||||
Garage splits stored objects in consecutive chunks of size `block_size`
|
Garage splits stored objects in consecutive chunks of size `block_size`
|
||||||
|
|
@ -358,6 +494,68 @@ files will remain available. This however means that chunks from existing files
|
||||||
will not be deduplicated with chunks from newly uploaded files, meaning you
|
will not be deduplicated with chunks from newly uploaded files, meaning you
|
||||||
might use more storage space that is optimally possible.
|
might use more storage space that is optimally possible.
|
||||||
|
|
||||||
|
#### `block_ram_buffer_max` (since `v0.9.4`) {#block_ram_buffer_max}
|
||||||
|
|
||||||
|
A limit on the total size of data blocks kept in RAM by S3 API nodes awaiting
|
||||||
|
to be sent to storage nodes asynchronously.
|
||||||
|
|
||||||
|
Explanation: since Garage wants to tolerate node failures, it uses quorum
|
||||||
|
writes to send data blocks to storage nodes: try to write the block to three
|
||||||
|
nodes, and return ok as soon as two writes complete. So even if all three nodes
|
||||||
|
are online, the third write always completes asynchronously. In general, there
|
||||||
|
are not many writes to a cluster, and the third asynchronous write can
|
||||||
|
terminate early enough so as to not cause unbounded RAM growth. However, if
|
||||||
|
the S3 API node is continuously receiving large quantities of data and the
|
||||||
|
third node is never able to catch up, many data blocks will be kept buffered in
|
||||||
|
RAM as they are awaiting transfer to the third node.
|
||||||
|
|
||||||
|
The `block_ram_buffer_max` sets a limit to the size of buffers that can be kept
|
||||||
|
in RAM in this process. When the limit is reached, backpressure is applied
|
||||||
|
back to the S3 client.
|
||||||
|
|
||||||
|
Note that this only counts buffers that have arrived to a certain stage of
|
||||||
|
processing (received from the client + encrypted and/or compressed as
|
||||||
|
necessary) and are ready to send to the storage nodes. Many other buffers will
|
||||||
|
not be counted and this is not a hard limit on RAM consumption. In particular,
|
||||||
|
if many clients send requests simultaneously with large objects, the RAM
|
||||||
|
consumption will always grow linearly with the number of concurrent requests,
|
||||||
|
as each request will use a few buffers of size `block_size` for receiving and
|
||||||
|
intermediate processing before even trying to send the data to the storage
|
||||||
|
node.
|
||||||
|
|
||||||
|
The default value is 256MiB.
|
||||||
|
|
||||||
|
#### `block_max_concurrent_reads` (since `v1.3.0` / `v2.1.0`) {#block_max_concurrent_reads}
|
||||||
|
|
||||||
|
The maximum number of blocks (individual files in the data directory) open
|
||||||
|
simultaneously for reading.
|
||||||
|
|
||||||
|
Reducing this number does not limit the number of data blocks that can be
|
||||||
|
transferred through the network simultaneously. This mechanism was just added
|
||||||
|
as a backpressure mechanism for HDD read speed: it helps avoid a situation
|
||||||
|
where too many requests are coming in and Garage is reading too many block
|
||||||
|
files simultaneously, thus not making timely progress on any of the reads.
|
||||||
|
|
||||||
|
When a request to read a data block comes in through the network, the requests
|
||||||
|
awaits for one of the `block_max_concurrent_reads` slots to be available
|
||||||
|
(internally implemented using a Semaphore object). Once it acquired a read
|
||||||
|
slot, it reads the entire block file to RAM and frees the slot as soon as the
|
||||||
|
block file is finished reading. Only after the slot is released will the
|
||||||
|
block's data start being transferred over the network. If the request fails to
|
||||||
|
acquire a reading slot wihtin 15 seconds, it fails with a timeout error.
|
||||||
|
Timeout events can be monitored through the `block_read_semaphore_timeouts`
|
||||||
|
metric in Prometheus: a non-zero number of such events indicates an I/O
|
||||||
|
bottleneck on HDD read speed.
|
||||||
|
|
||||||
|
|
||||||
|
#### `block_max_concurrent_writes_per_request` (since `v2.1.0`) {#block_max_concurrent_writes_per_request}
|
||||||
|
|
||||||
|
This parameter is designed to adapt to the concurrent write performance of
|
||||||
|
different storage media.Maximum number of parallel block writes per put request
|
||||||
|
Higher values improve throughput but increase memory usage.
|
||||||
|
|
||||||
|
Default: 3, Recommended: 10-30 for NVMe, 3-10 for HDD
|
||||||
|
|
||||||
#### `lmdb_map_size` {#lmdb_map_size}
|
#### `lmdb_map_size` {#lmdb_map_size}
|
||||||
|
|
||||||
This parameters can be used to set the map size used by LMDB,
|
This parameters can be used to set the map size used by LMDB,
|
||||||
|
|
@ -414,7 +612,7 @@ the node, even in the case of a NAT: the NAT should be configured to forward the
|
||||||
port number to the same internal port nubmer. This means that if you have several nodes running
|
port number to the same internal port nubmer. This means that if you have several nodes running
|
||||||
behind a NAT, they should each use a different RPC port number.
|
behind a NAT, they should each use a different RPC port number.
|
||||||
|
|
||||||
#### `rpc_bind_outgoing`(since v0.9.2) {#rpc_bind_outgoing}
|
#### `rpc_bind_outgoing` (since `v0.9.2`) {#rpc_bind_outgoing}
|
||||||
|
|
||||||
If enabled, pre-bind all sockets for outgoing connections to the same IP address
|
If enabled, pre-bind all sockets for outgoing connections to the same IP address
|
||||||
used for listening (the IP address specified in `rpc_bind_addr`) before
|
used for listening (the IP address specified in `rpc_bind_addr`) before
|
||||||
|
|
@ -432,6 +630,14 @@ RPC calls. **This parameter is optional but recommended.** In case you have
|
||||||
a NAT that binds the RPC port to a port that is different on your public IP,
|
a NAT that binds the RPC port to a port that is different on your public IP,
|
||||||
this field might help making it work.
|
this field might help making it work.
|
||||||
|
|
||||||
|
#### `rpc_public_addr_subnet` {#rpc_public_addr_subnet}
|
||||||
|
In case `rpc_public_addr` is not set, but autodiscovery is used, this allows
|
||||||
|
filtering the list of automatically discovered IPs to a specific subnet.
|
||||||
|
|
||||||
|
For example, if nodes should pick *their* IP inside a specific subnet, but you
|
||||||
|
don't want to explicitly write the IP down (as it's dynamic, or you want to
|
||||||
|
share configs across nodes), you can use this option.
|
||||||
|
|
||||||
#### `bootstrap_peers` {#bootstrap_peers}
|
#### `bootstrap_peers` {#bootstrap_peers}
|
||||||
|
|
||||||
A list of peer identifiers on which to contact other Garage peers of this cluster.
|
A list of peer identifiers on which to contact other Garage peers of this cluster.
|
||||||
|
|
@ -448,7 +654,7 @@ be obtained by running `garage node id` and then included directly in the
|
||||||
key will be returned by `garage node id` and you will have to add the IP
|
key will be returned by `garage node id` and you will have to add the IP
|
||||||
yourself.
|
yourself.
|
||||||
|
|
||||||
### `allow_world_readable_secrets`
|
#### `allow_world_readable_secrets` or `GARAGE_ALLOW_WORLD_READABLE_SECRETS` (env) {#allow_world_readable_secrets}
|
||||||
|
|
||||||
Garage checks the permissions of your secret files to make sure they're not
|
Garage checks the permissions of your secret files to make sure they're not
|
||||||
world-readable. In some cases, the check might fail and consider your files as
|
world-readable. In some cases, the check might fail and consider your files as
|
||||||
|
|
@ -460,6 +666,13 @@ permission verification.
|
||||||
Alternatively, you can set the `GARAGE_ALLOW_WORLD_READABLE_SECRETS`
|
Alternatively, you can set the `GARAGE_ALLOW_WORLD_READABLE_SECRETS`
|
||||||
environment variable to `true` to bypass the permissions check.
|
environment variable to `true` to bypass the permissions check.
|
||||||
|
|
||||||
|
#### `allow_punycode` {#allow_punycode}
|
||||||
|
|
||||||
|
Allow creating buckets with names containing punycode. When used for buckets served
|
||||||
|
as websites, this allows using almost any unicode character in the domain name.
|
||||||
|
|
||||||
|
Default to `false`.
|
||||||
|
|
||||||
### The `[consul_discovery]` section
|
### The `[consul_discovery]` section
|
||||||
|
|
||||||
Garage supports discovering other nodes of the cluster using Consul. For this
|
Garage supports discovering other nodes of the cluster using Consul. For this
|
||||||
|
|
@ -590,6 +803,13 @@ For instance, if `root_domain` is `web.garage.eu`, a bucket called `deuxfleurs.f
|
||||||
will be accessible either with hostname `deuxfleurs.fr.web.garage.eu`
|
will be accessible either with hostname `deuxfleurs.fr.web.garage.eu`
|
||||||
or with hostname `deuxfleurs.fr`.
|
or with hostname `deuxfleurs.fr`.
|
||||||
|
|
||||||
|
#### `add_host_to_metrics` {#web_add_host_to_metrics}
|
||||||
|
|
||||||
|
Whether to include the requested domain name (HTTP `Host` header) in the
|
||||||
|
Prometheus metrics of the web endpoint. This is disabled by default as the
|
||||||
|
number of possible values is not bounded and can be a source of cardinality
|
||||||
|
explosion in the exported metrics.
|
||||||
|
|
||||||
|
|
||||||
### The `[admin]` section
|
### The `[admin]` section
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -61,7 +61,7 @@ directed to a Garage cluster can be handled independently of one another instead
|
||||||
of going through a central bottleneck (the leader node).
|
of going through a central bottleneck (the leader node).
|
||||||
As a consequence, requests can be handled much faster, even in cases where latency
|
As a consequence, requests can be handled much faster, even in cases where latency
|
||||||
between cluster nodes is important (see our [benchmarks](@/documentation/design/benchmarks/index.md) for data on this).
|
between cluster nodes is important (see our [benchmarks](@/documentation/design/benchmarks/index.md) for data on this).
|
||||||
This is particularly usefull when nodes are far from one another and talk to one other through standard Internet connections.
|
This is particularly useful when nodes are far from one another and talk to one other through standard Internet connections.
|
||||||
|
|
||||||
### Web server for static websites
|
### Web server for static websites
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -225,6 +225,17 @@ block_bytes_read 120586322022
|
||||||
block_bytes_written 3386618077
|
block_bytes_written 3386618077
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### `block_ram_buffer_free_kb` (gauge)
|
||||||
|
|
||||||
|
Kibibytes available for buffering blocks that have to be sent to remote nodes.
|
||||||
|
When clients send too much data to this node and a storage node is not receiving
|
||||||
|
data fast enough due to slower network conditions, this will decrease down to
|
||||||
|
zero and backpressure will be applied.
|
||||||
|
|
||||||
|
```
|
||||||
|
block_ram_buffer_free_kb 219829
|
||||||
|
```
|
||||||
|
|
||||||
#### `block_compression_level` (counter)
|
#### `block_compression_level` (counter)
|
||||||
|
|
||||||
Exposes the block compression level configured for the Garage node.
|
Exposes the block compression level configured for the Garage node.
|
||||||
|
|
@ -381,7 +392,7 @@ table_merkle_updater_todo_queue_length{table_name="block_ref"} 0
|
||||||
|
|
||||||
#### `table_sync_items_received`, `table_sync_items_sent` (counters)
|
#### `table_sync_items_received`, `table_sync_items_sent` (counters)
|
||||||
|
|
||||||
Number of data items sent to/recieved from other nodes during resync procedures
|
Number of data items sent to/received from other nodes during resync procedures
|
||||||
|
|
||||||
```
|
```
|
||||||
table_sync_items_received{from="<remote node>",table_name="bucket_v2"} 3
|
table_sync_items_received{from="<remote node>",table_name="bucket_v2"} 3
|
||||||
|
|
|
||||||
|
|
@ -23,17 +23,17 @@ Feel free to open a PR to suggest fixes this table. Minio is missing because the
|
||||||
- 2022-05-25 - Many Ceph S3 endpoints are not documented but implemented. Following a notification from the Ceph community, we added them.
|
- 2022-05-25 - Many Ceph S3 endpoints are not documented but implemented. Following a notification from the Ceph community, we added them.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## High-level features
|
## High-level features
|
||||||
|
|
||||||
| Feature | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
| Feature | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
||||||
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
||||||
| [signature v2](https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html) (deprecated) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
| [signature v2](https://docs.aws.amazon.com/AmazonS3/latest/API/Appendix-Sigv2.html) (deprecated) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
||||||
| [signature v4](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) | ✅ Implemented | ✅ | ✅ | ❌ | ✅ |
|
| [signature v4](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) | ✅ Implemented | ✅ | ✅ | ❌ | ✅ |
|
||||||
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
|
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
|
||||||
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
|
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
|
||||||
| [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) |
|
| [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) |
|
||||||
| [SSE-C encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) | ✅ Implemented | ❓ | ✅ | ❌ | ✅ |
|
| [SSE-C encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) | ✅ Implemented | ❓ | ✅ | ❌ | ✅ |
|
||||||
|
| [Bucket versioning](https://docs.aws.amazon.com/AmazonS3/latest/userguide/Versioning.html) | ❌ Missing | ✅ | ✅ | ❌ | ✅ |
|
||||||
|
|
||||||
*Note:* OpenIO does not says if it supports presigned URLs. Because it is part
|
*Note:* OpenIO does not says if it supports presigned URLs. Because it is part
|
||||||
of signature v4 and they claim they support it without additional precisions,
|
of signature v4 and they claim they support it without additional precisions,
|
||||||
|
|
|
||||||
|
|
@ -42,7 +42,7 @@ The general principle are similar, but details have not been updated.**
|
||||||
A version is defined by the existence of at least one entry in the blocks table for a certain version UUID.
|
A version is defined by the existence of at least one entry in the blocks table for a certain version UUID.
|
||||||
We must keep the following invariant: if a version exists in the blocks table, it has to be referenced in the objects table.
|
We must keep the following invariant: if a version exists in the blocks table, it has to be referenced in the objects table.
|
||||||
We explicitly manage concurrent versions of an object: the version timestamp and version UUID columns are index columns, thus we may have several concurrent versions of an object.
|
We explicitly manage concurrent versions of an object: the version timestamp and version UUID columns are index columns, thus we may have several concurrent versions of an object.
|
||||||
Important: before deleting an older version from the objects table, we must make sure that we did a successfull delete of the blocks of that version from the blocks table.
|
Important: before deleting an older version from the objects table, we must make sure that we did a successful delete of the blocks of that version from the blocks table.
|
||||||
|
|
||||||
Thus, the workflow for reading an object is as follows:
|
Thus, the workflow for reading an object is as follows:
|
||||||
|
|
||||||
|
|
@ -95,7 +95,7 @@ Known issue: if someone is reading from a version that we want to delete and the
|
||||||
Usefull metadata:
|
Usefull metadata:
|
||||||
|
|
||||||
- list of versions that reference this block in the Casandra table, so that we can do GC by checking in Cassandra that the lines still exist
|
- list of versions that reference this block in the Casandra table, so that we can do GC by checking in Cassandra that the lines still exist
|
||||||
- list of other nodes that we know have acknowledged a write of this block, usefull in the rebalancing algorithm
|
- list of other nodes that we know have acknowledged a write of this block, useful in the rebalancing algorithm
|
||||||
|
|
||||||
Write strategy: have a single thread that does all write IO so that it is serialized (or have several threads that manage independent parts of the hash space). When writing a blob, write it to a temporary file, close, then rename so that a concurrent read gets a consistent result (either not found or found with whole content).
|
Write strategy: have a single thread that does all write IO so that it is serialized (or have several threads that manage independent parts of the hash space). When writing a blob, write it to a temporary file, close, then rename so that a concurrent read gets a consistent result (either not found or found with whole content).
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -68,7 +68,7 @@ The migration steps are as follows:
|
||||||
5. Turn off Garage 0.3
|
5. Turn off Garage 0.3
|
||||||
|
|
||||||
6. Backup metadata folders if you can (i.e. if you have space to do it
|
6. Backup metadata folders if you can (i.e. if you have space to do it
|
||||||
somewhere). Backuping data folders could also be usefull but that's much
|
somewhere). Backuping data folders could also be useful but that's much
|
||||||
harder to do. If your filesystem supports snapshots, this could be a good
|
harder to do. If your filesystem supports snapshots, this could be a good
|
||||||
time to use them.
|
time to use them.
|
||||||
|
|
||||||
|
|
|
||||||
77
doc/book/working-documents/migration-1.md
Normal file
77
doc/book/working-documents/migration-1.md
Normal file
|
|
@ -0,0 +1,77 @@
|
||||||
|
+++
|
||||||
|
title = "Migrating from 0.9 to 1.0"
|
||||||
|
weight = 11
|
||||||
|
+++
|
||||||
|
|
||||||
|
**This guide explains how to migrate to 1.0 if you have an existing 0.9 cluster.
|
||||||
|
We don't recommend trying to migrate to 1.0 directly from 0.8 or older.**
|
||||||
|
|
||||||
|
This migration procedure has been tested on several clusters without issues.
|
||||||
|
However, it is still a *critical procedure* that might cause issues.
|
||||||
|
**Make sure to back up all your data before attempting it!**
|
||||||
|
|
||||||
|
You might also want to read our [general documentation on upgrading Garage](@/documentation/operations/upgrading.md).
|
||||||
|
|
||||||
|
## Changes introduced in v1.0
|
||||||
|
|
||||||
|
The following are **breaking changes** in Garage v1.0 that require your attention when migrating:
|
||||||
|
|
||||||
|
- The Sled metadata db engine has been **removed**. If your cluster was still
|
||||||
|
using Sled, you will need to **use a Garage v0.9.x binary** to convert the
|
||||||
|
database using the `garage convert-db` subcommand. See
|
||||||
|
[here](@/documentation/reference-manual/configuration.md#db_engine) for the
|
||||||
|
details of the procedure.
|
||||||
|
|
||||||
|
The following syntax changes have been made to the configuration file:
|
||||||
|
|
||||||
|
- The `replication_mode` parameter has been split into two parameters:
|
||||||
|
[`replication_factor`](@/documentation/reference-manual/configuration.md#replication_factor)
|
||||||
|
and
|
||||||
|
[`consistency_mode`](@/documentation/reference-manual/configuration.md#consistency_mode).
|
||||||
|
The old syntax using `replication_mode` is still supported for legacy
|
||||||
|
reasons and can still be used.
|
||||||
|
|
||||||
|
- The parameters `sled_cache_capacity` and `sled_flush_every_ms` have been removed.
|
||||||
|
|
||||||
|
## Migration procedure
|
||||||
|
|
||||||
|
The migration to Garage v1.0 can be done with almost no downtime,
|
||||||
|
by restarting all nodes at once in the new version.
|
||||||
|
|
||||||
|
The migration steps are as follows:
|
||||||
|
|
||||||
|
1. Do a `garage repair --all-nodes --yes tables`, check the logs and check that
|
||||||
|
all data seems to be synced correctly between nodes. If you have time, do
|
||||||
|
additional `garage repair` procedures (`blocks`, `versions`, `block_refs`,
|
||||||
|
etc.)
|
||||||
|
|
||||||
|
2. Ensure you have a snapshot of your Garage installation that you can restore
|
||||||
|
to in case the upgrade goes wrong:
|
||||||
|
|
||||||
|
- If you are running Garage v0.9.4 or later, use the `garage meta snapshot
|
||||||
|
--all` to make a backup snapshot of the metadata directories of your nodes
|
||||||
|
for backup purposes, and save a copy of the following files in the
|
||||||
|
metadata directories of your nodes: `cluster_layout`, `data_layout`,
|
||||||
|
`node_key`, `node_key.pub`.
|
||||||
|
|
||||||
|
- If you are running a filesystem such as ZFS or BTRFS that support
|
||||||
|
snapshotting, you can create a filesystem-level snapshot to be used as a
|
||||||
|
restoration point if needed.
|
||||||
|
|
||||||
|
- In other cases, make a backup using the old procedure: turn off each node
|
||||||
|
individually; back up its metadata folder (for instance, use the following
|
||||||
|
command if your metadata directory is `/var/lib/garage/meta`: `cd
|
||||||
|
/var/lib/garage ; tar -acf meta-v0.9.tar.zst meta/`); turn it back on
|
||||||
|
again. This will allow you to take a backup of all nodes without
|
||||||
|
impacting global cluster availability. You can do all nodes of a single
|
||||||
|
zone at once as this does not impact the availability of Garage.
|
||||||
|
|
||||||
|
3. Prepare your updated binaries and configuration files for Garage v1.0
|
||||||
|
|
||||||
|
4. Shut down all v0.9 nodes simultaneously, and restart them all simultaneously
|
||||||
|
in v1.0. Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to
|
||||||
|
achieve this as fast as possible. Garage v1.0 should be in a working state
|
||||||
|
as soon as enough nodes have started.
|
||||||
|
|
||||||
|
5. Monitor your cluster in the following hours to see if it works well under
|
||||||
|
your production load.
|
||||||
|
|
@ -37,7 +37,7 @@ There are two reasons for this:
|
||||||
|
|
||||||
Reminder: rules of simplicity, concerning changes to Garage's source code.
|
Reminder: rules of simplicity, concerning changes to Garage's source code.
|
||||||
Always question what we are doing.
|
Always question what we are doing.
|
||||||
Never do anything just because it looks nice or because we "think" it might be usefull at some later point but without knowing precisely why/when.
|
Never do anything just because it looks nice or because we "think" it might be useful at some later point but without knowing precisely why/when.
|
||||||
Only do things that make perfect sense in the context of what we currently know.
|
Only do things that make perfect sense in the context of what we currently know.
|
||||||
|
|
||||||
## References
|
## References
|
||||||
|
|
|
||||||
|
|
@ -70,7 +70,7 @@ Example response body:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||||
"garageVersion": "v0.10.0",
|
"garageVersion": "v1.3.0",
|
||||||
"garageFeatures": [
|
"garageFeatures": [
|
||||||
"k2v",
|
"k2v",
|
||||||
"lmdb",
|
"lmdb",
|
||||||
|
|
|
||||||
|
|
@ -562,7 +562,7 @@ token>", v: ["<value1>", ...] }`, with the following fields:
|
||||||
- in case of concurrent update and deletion, a `null` is added to the list of concurrent values
|
- in case of concurrent update and deletion, a `null` is added to the list of concurrent values
|
||||||
|
|
||||||
- if the `tombstones` query parameter is set to `true`, tombstones are returned
|
- if the `tombstones` query parameter is set to `true`, tombstones are returned
|
||||||
for items that have been deleted (this can be usefull for inserting after an
|
for items that have been deleted (this can be useful for inserting after an
|
||||||
item that has been deleted, so that the insert is not considered
|
item that has been deleted, so that the insert is not considered
|
||||||
concurrent with the delete). Tombstones are returned as tuples in the
|
concurrent with the delete). Tombstones are returned as tuples in the
|
||||||
same format with only `null` values
|
same format with only `null` values
|
||||||
|
|
|
||||||
118
flake.lock
generated
118
flake.lock
generated
|
|
@ -1,38 +1,27 @@
|
||||||
{
|
{
|
||||||
"nodes": {
|
"nodes": {
|
||||||
"cargo2nix": {
|
"crane": {
|
||||||
"inputs": {
|
|
||||||
"flake-compat": [
|
|
||||||
"flake-compat"
|
|
||||||
],
|
|
||||||
"flake-utils": "flake-utils",
|
|
||||||
"nixpkgs": [
|
|
||||||
"nixpkgs"
|
|
||||||
],
|
|
||||||
"rust-overlay": "rust-overlay"
|
|
||||||
},
|
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1666087781,
|
"lastModified": 1737689766,
|
||||||
"narHash": "sha256-trKVdjMZ8mNkGfLcY5LsJJGtdV3xJDZnMVrkFjErlcs=",
|
"narHash": "sha256-ivVXYaYlShxYoKfSo5+y5930qMKKJ8CLcAoIBPQfJ6s=",
|
||||||
"owner": "Alexis211",
|
"owner": "ipetkov",
|
||||||
"repo": "cargo2nix",
|
"repo": "crane",
|
||||||
"rev": "a7a61179b66054904ef6a195d8da736eaaa06c36",
|
"rev": "6fe74265bbb6d016d663b1091f015e2976c4a527",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "Alexis211",
|
"owner": "ipetkov",
|
||||||
"repo": "cargo2nix",
|
"repo": "crane",
|
||||||
"rev": "a7a61179b66054904ef6a195d8da736eaaa06c36",
|
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-compat": {
|
"flake-compat": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1688025799,
|
"lastModified": 1717312683,
|
||||||
"narHash": "sha256-ktpB4dRtnksm9F5WawoIkEneh1nrEvuxb5lJFt1iOyw=",
|
"narHash": "sha256-FrlieJH50AuvagamEvWMIE6D2OAnERuDboFDYAED/dE=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "flake-compat",
|
"repo": "flake-compat",
|
||||||
"rev": "8bf105319d44f6b9f0d764efa4fdef9f1cc9ba1c",
|
"rev": "38fd3954cf65ce6faf3d0d45cd26059e059f07ea",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
@ -46,29 +35,11 @@
|
||||||
"systems": "systems"
|
"systems": "systems"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1681202837,
|
"lastModified": 1731533236,
|
||||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils",
|
||||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-utils_2": {
|
|
||||||
"inputs": {
|
|
||||||
"systems": "systems_2"
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681202837,
|
|
||||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
@ -79,63 +50,47 @@
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1682109806,
|
"lastModified": 1763977559,
|
||||||
"narHash": "sha256-d9g7RKNShMLboTWwukM+RObDWWpHKaqTYXB48clBWXI=",
|
"narHash": "sha256-g4MKqsIRy5yJwEsI+fYODqLUnAqIY4kZai0nldAP6EM=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "2362848adf8def2866fabbffc50462e929d7fffb",
|
"rev": "cfe2c7d5b5d3032862254e68c37a6576b633d632",
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "NixOS",
|
|
||||||
"ref": "nixpkgs-unstable",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nixpkgs_2": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1707091808,
|
|
||||||
"narHash": "sha256-LahKBAfGbY836gtpVNnWwBTIzN7yf/uYM/S0g393r0Y=",
|
|
||||||
"owner": "NixOS",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"rev": "9f2ee8c91ac42da3ae6c6a1d21555f283458247e",
|
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "9f2ee8c91ac42da3ae6c6a1d21555f283458247e",
|
"rev": "cfe2c7d5b5d3032862254e68c37a6576b633d632",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"cargo2nix": "cargo2nix",
|
"crane": "crane",
|
||||||
"flake-compat": "flake-compat",
|
"flake-compat": "flake-compat",
|
||||||
"flake-utils": [
|
"flake-utils": "flake-utils",
|
||||||
"cargo2nix",
|
"nixpkgs": "nixpkgs",
|
||||||
"flake-utils"
|
"rust-overlay": "rust-overlay"
|
||||||
],
|
|
||||||
"nixpkgs": "nixpkgs_2"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"rust-overlay": {
|
"rust-overlay": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"flake-utils": "flake-utils_2",
|
"nixpkgs": [
|
||||||
"nixpkgs": "nixpkgs"
|
"nixpkgs"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1707271822,
|
"lastModified": 1763952169,
|
||||||
"narHash": "sha256-/DZsoPH5GBzOpVEGz5PgJ7vh8Q6TcrJq5u8FcBjqAfI=",
|
"narHash": "sha256-+PeDBD8P+NKauH+w7eO/QWCIp8Cx4mCfWnh9sJmy9CM=",
|
||||||
"owner": "oxalica",
|
"owner": "oxalica",
|
||||||
"repo": "rust-overlay",
|
"repo": "rust-overlay",
|
||||||
"rev": "7a94fe7690d2bdfe1aab475382a505e14dc114a6",
|
"rev": "ab726555a9a72e6dc80649809147823a813fa95b",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "oxalica",
|
"owner": "oxalica",
|
||||||
"repo": "rust-overlay",
|
"repo": "rust-overlay",
|
||||||
|
"rev": "ab726555a9a72e6dc80649809147823a813fa95b",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
@ -153,21 +108,6 @@
|
||||||
"repo": "default",
|
"repo": "default",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"systems_2": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681028828,
|
|
||||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": "root",
|
"root": "root",
|
||||||
|
|
|
||||||
129
flake.nix
129
flake.nix
|
|
@ -2,88 +2,95 @@
|
||||||
description =
|
description =
|
||||||
"Garage, an S3-compatible distributed object store for self-hosted deployments";
|
"Garage, an S3-compatible distributed object store for self-hosted deployments";
|
||||||
|
|
||||||
# Nixpkgs 23.11 as of 2024-02-07, has rustc v1.73
|
# Nixpkgs 25.05 as of 2025-11-24
|
||||||
inputs.nixpkgs.url =
|
inputs.nixpkgs.url =
|
||||||
"github:NixOS/nixpkgs/9f2ee8c91ac42da3ae6c6a1d21555f283458247e";
|
"github:NixOS/nixpkgs/cfe2c7d5b5d3032862254e68c37a6576b633d632";
|
||||||
|
|
||||||
|
# Rust overlay as of 2025-11-24
|
||||||
|
inputs.rust-overlay.url =
|
||||||
|
"github:oxalica/rust-overlay/ab726555a9a72e6dc80649809147823a813fa95b";
|
||||||
|
inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
|
||||||
|
inputs.crane.url = "github:ipetkov/crane";
|
||||||
|
|
||||||
inputs.flake-compat.url = "github:nix-community/flake-compat";
|
inputs.flake-compat.url = "github:nix-community/flake-compat";
|
||||||
|
inputs.flake-utils.url = "github:numtide/flake-utils";
|
||||||
|
|
||||||
inputs.cargo2nix = {
|
outputs = { self, nixpkgs, flake-utils, crane, rust-overlay, ... }:
|
||||||
# As of 2022-10-18: two small patches over unstable branch, one for clippy and one to fix feature detection
|
|
||||||
url = "github:Alexis211/cargo2nix/a7a61179b66054904ef6a195d8da736eaaa06c36";
|
|
||||||
|
|
||||||
# As of 2023-04-25:
|
|
||||||
# - my two patches were merged into unstable (one for clippy and one to "fix" feature detection)
|
|
||||||
# - rustc v1.66
|
|
||||||
# url = "github:cargo2nix/cargo2nix/8fb57a670f7993bfc24099c33eb9c5abb51f29a2";
|
|
||||||
|
|
||||||
# Rust overlay as of 2024-02-07
|
|
||||||
inputs.rust-overlay.url =
|
|
||||||
"github:oxalica/rust-overlay/7a94fe7690d2bdfe1aab475382a505e14dc114a6";
|
|
||||||
|
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
inputs.flake-compat.follows = "flake-compat";
|
|
||||||
};
|
|
||||||
|
|
||||||
inputs.flake-utils.follows = "cargo2nix/flake-utils";
|
|
||||||
|
|
||||||
outputs = { self, nixpkgs, cargo2nix, flake-utils, ... }:
|
|
||||||
let
|
let
|
||||||
git_version = self.lastModifiedDate;
|
|
||||||
compile = import ./nix/compile.nix;
|
compile = import ./nix/compile.nix;
|
||||||
in
|
in
|
||||||
flake-utils.lib.eachDefaultSystem (system:
|
flake-utils.lib.eachDefaultSystem (system:
|
||||||
let
|
let
|
||||||
pkgs = nixpkgs.legacyPackages.${system};
|
pkgs = nixpkgs.legacyPackages.${system};
|
||||||
|
packageFor = target: release: (compile {
|
||||||
|
inherit system target nixpkgs crane rust-overlay release;
|
||||||
|
}).garage;
|
||||||
|
testWith = extraTestEnv: (compile {
|
||||||
|
inherit system nixpkgs crane rust-overlay extraTestEnv;
|
||||||
|
release = false;
|
||||||
|
}).garage-test;
|
||||||
|
lints = (compile {
|
||||||
|
inherit system nixpkgs crane rust-overlay;
|
||||||
|
release = false;
|
||||||
|
});
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
packages =
|
packages = {
|
||||||
let
|
# default = native release build
|
||||||
packageFor = target: (compile {
|
default = packageFor null true;
|
||||||
inherit system git_version target;
|
|
||||||
pkgsSrc = nixpkgs;
|
# <arch> = cross-compiled, statically-linked release builds
|
||||||
cargo2nixOverlay = cargo2nix.overlays.default;
|
amd64 = packageFor "x86_64-unknown-linux-musl" true;
|
||||||
release = true;
|
i386 = packageFor "i686-unknown-linux-musl" true;
|
||||||
}).workspace.garage { compileMode = "build"; };
|
arm64 = packageFor "aarch64-unknown-linux-musl" true;
|
||||||
in
|
arm = packageFor "armv6l-unknown-linux-musl" true;
|
||||||
{
|
|
||||||
# default = native release build
|
# dev = native dev build
|
||||||
default = packageFor null;
|
dev = packageFor null false;
|
||||||
# other = cross-compiled, statically-linked builds
|
|
||||||
amd64 = packageFor "x86_64-unknown-linux-musl";
|
# test = cargo test
|
||||||
i386 = packageFor "i686-unknown-linux-musl";
|
tests = testWith {};
|
||||||
arm64 = packageFor "aarch64-unknown-linux-musl";
|
tests-lmdb = testWith {
|
||||||
arm = packageFor "armv6l-unknown-linux-musl";
|
GARAGE_TEST_INTEGRATION_DB_ENGINE = "lmdb";
|
||||||
};
|
};
|
||||||
|
tests-sqlite = testWith {
|
||||||
|
GARAGE_TEST_INTEGRATION_DB_ENGINE = "sqlite";
|
||||||
|
};
|
||||||
|
tests-fjall = testWith {
|
||||||
|
GARAGE_TEST_INTEGRATION_DB_ENGINE = "fjall";
|
||||||
|
};
|
||||||
|
|
||||||
|
# lints (fmt, clippy)
|
||||||
|
fmt = lints.garage-cargo-fmt;
|
||||||
|
clippy = lints.garage-cargo-clippy;
|
||||||
|
};
|
||||||
|
|
||||||
# ---- developpment shell, for making native builds only ----
|
# ---- developpment shell, for making native builds only ----
|
||||||
devShells =
|
devShells =
|
||||||
let
|
let
|
||||||
shellWithPackages = (packages: (compile {
|
targets = compile {
|
||||||
inherit system git_version;
|
inherit system nixpkgs crane rust-overlay;
|
||||||
pkgsSrc = nixpkgs;
|
};
|
||||||
cargo2nixOverlay = cargo2nix.overlays.default;
|
|
||||||
}).workspaceShell { inherit packages; });
|
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
default = shellWithPackages
|
default = targets.devShell;
|
||||||
(with pkgs; [
|
|
||||||
rustfmt
|
|
||||||
clang
|
|
||||||
mold
|
|
||||||
]);
|
|
||||||
|
|
||||||
# import the full shell using `nix develop .#full`
|
# import the full shell using `nix develop .#full`
|
||||||
full = shellWithPackages (with pkgs; [
|
full = pkgs.mkShell {
|
||||||
rustfmt
|
buildInputs = with pkgs; [
|
||||||
clang
|
targets.toolchain
|
||||||
mold
|
protobuf
|
||||||
# ---- extra packages for dev tasks ----
|
clang
|
||||||
cargo-audit
|
mold
|
||||||
cargo-outdated
|
# ---- extra packages for dev tasks ----
|
||||||
cargo-machete
|
rust-analyzer
|
||||||
nixpkgs-fmt
|
cargo-audit
|
||||||
]);
|
cargo-outdated
|
||||||
|
cargo-machete
|
||||||
|
nixpkgs-fmt
|
||||||
|
];
|
||||||
|
};
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
||||||
158
k2v_test.py
158
k2v_test.py
|
|
@ -1,158 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
import os
|
|
||||||
import requests
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
# let's talk to our AWS Elasticsearch cluster
|
|
||||||
#from requests_aws4auth import AWS4Auth
|
|
||||||
#auth = AWS4Auth('GK31c2f218a2e44f485b94239e',
|
|
||||||
# 'b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835',
|
|
||||||
# 'us-east-1',
|
|
||||||
# 's3')
|
|
||||||
|
|
||||||
from aws_requests_auth.aws_auth import AWSRequestsAuth
|
|
||||||
auth = AWSRequestsAuth(aws_access_key='GK31c2f218a2e44f485b94239e',
|
|
||||||
aws_secret_access_key='b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835',
|
|
||||||
aws_host='localhost:3812',
|
|
||||||
aws_region='us-east-1',
|
|
||||||
aws_service='k2v')
|
|
||||||
|
|
||||||
|
|
||||||
print("-- ReadIndex")
|
|
||||||
response = requests.get('http://localhost:3812/alex',
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
|
|
||||||
sort_keys = ["a", "b", "c", "d"]
|
|
||||||
|
|
||||||
for sk in sort_keys:
|
|
||||||
print("-- (%s) Put initial (no CT)"%sk)
|
|
||||||
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
|
||||||
auth=auth,
|
|
||||||
data='{}: Hello, world!'.format(datetime.timestamp(datetime.now())))
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
print("-- Get")
|
|
||||||
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
ct = response.headers["x-garage-causality-token"]
|
|
||||||
|
|
||||||
print("-- ReadIndex")
|
|
||||||
response = requests.get('http://localhost:3812/alex',
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
print("-- Put with CT")
|
|
||||||
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
|
||||||
auth=auth,
|
|
||||||
headers={'x-garage-causality-token': ct},
|
|
||||||
data='{}: Good bye, world!'.format(datetime.timestamp(datetime.now())))
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
print("-- Get")
|
|
||||||
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
print("-- Put again with same CT (concurrent)")
|
|
||||||
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
|
||||||
auth=auth,
|
|
||||||
headers={'x-garage-causality-token': ct},
|
|
||||||
data='{}: Concurrent value, oops'.format(datetime.timestamp(datetime.now())))
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
for sk in sort_keys:
|
|
||||||
print("-- (%s) Get"%sk)
|
|
||||||
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
ct = response.headers["x-garage-causality-token"]
|
|
||||||
|
|
||||||
print("-- Delete")
|
|
||||||
response = requests.delete('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
|
||||||
headers={'x-garage-causality-token': ct},
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
print("-- ReadIndex")
|
|
||||||
response = requests.get('http://localhost:3812/alex',
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
print("-- InsertBatch")
|
|
||||||
response = requests.post('http://localhost:3812/alex',
|
|
||||||
auth=auth,
|
|
||||||
data='''
|
|
||||||
[
|
|
||||||
{"pk": "root", "sk": "a", "ct": null, "v": "aW5pdGlhbCB0ZXN0Cg=="},
|
|
||||||
{"pk": "root", "sk": "b", "ct": null, "v": "aW5pdGlhbCB0ZXN1Cg=="},
|
|
||||||
{"pk": "root", "sk": "c", "ct": null, "v": "aW5pdGlhbCB0ZXN2Cg=="}
|
|
||||||
]
|
|
||||||
''')
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
print("-- ReadIndex")
|
|
||||||
response = requests.get('http://localhost:3812/alex',
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
for sk in sort_keys:
|
|
||||||
print("-- (%s) Get"%sk)
|
|
||||||
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
ct = response.headers["x-garage-causality-token"]
|
|
||||||
|
|
||||||
print("-- ReadBatch")
|
|
||||||
response = requests.post('http://localhost:3812/alex?search',
|
|
||||||
auth=auth,
|
|
||||||
data='''
|
|
||||||
[
|
|
||||||
{"partitionKey": "root"},
|
|
||||||
{"partitionKey": "root", "tombstones": true},
|
|
||||||
{"partitionKey": "root", "tombstones": true, "limit": 2},
|
|
||||||
{"partitionKey": "root", "start": "c", "singleItem": true},
|
|
||||||
{"partitionKey": "root", "start": "b", "end": "d", "tombstones": true}
|
|
||||||
]
|
|
||||||
''')
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
|
|
||||||
print("-- DeleteBatch")
|
|
||||||
response = requests.post('http://localhost:3812/alex?delete',
|
|
||||||
auth=auth,
|
|
||||||
data='''
|
|
||||||
[
|
|
||||||
{"partitionKey": "root", "start": "b", "end": "c"}
|
|
||||||
]
|
|
||||||
''')
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
print("-- ReadBatch")
|
|
||||||
response = requests.post('http://localhost:3812/alex?search',
|
|
||||||
auth=auth,
|
|
||||||
data='''
|
|
||||||
[
|
|
||||||
{"partitionKey": "root"}
|
|
||||||
]
|
|
||||||
''')
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
with import ./common.nix;
|
with import ./common.nix;
|
||||||
let
|
let
|
||||||
pkgs = import pkgsSrc { };
|
pkgs = import nixpkgs { };
|
||||||
lib = pkgs.lib;
|
lib = pkgs.lib;
|
||||||
|
|
||||||
/* Converts a key list and a value list to a set
|
/* Converts a key list and a value list to a set
|
||||||
|
|
|
||||||
|
|
@ -10,9 +10,9 @@ let
|
||||||
|
|
||||||
flake = (import flake-compat { system = builtins.currentSystem; src = ../.; });
|
flake = (import flake-compat { system = builtins.currentSystem; src = ../.; });
|
||||||
in
|
in
|
||||||
rec {
|
|
||||||
pkgsSrc = flake.defaultNix.inputs.nixpkgs;
|
{
|
||||||
cargo2nix = flake.defaultNix.inputs.cargo2nix;
|
flake = flake.defaultNix;
|
||||||
cargo2nixOverlay = cargo2nix.overlays.default;
|
nixpkgs = flake.defaultNix.inputs.nixpkgs;
|
||||||
devShells = builtins.getAttr builtins.currentSystem flake.defaultNix.devShells;
|
devShells = flake.defaultNix.devShells.${builtins.currentSystem};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
331
nix/compile.nix
331
nix/compile.nix
|
|
@ -1,164 +1,64 @@
|
||||||
{ system, target ? null, pkgsSrc, cargo2nixOverlay, compiler ? "rustc"
|
{
|
||||||
, release ? false, git_version ? null, features ? null, }:
|
/* build inputs */
|
||||||
|
nixpkgs,
|
||||||
|
crane,
|
||||||
|
rust-overlay,
|
||||||
|
|
||||||
|
/* parameters */
|
||||||
|
system,
|
||||||
|
git_version ? null,
|
||||||
|
target ? null,
|
||||||
|
release ? false,
|
||||||
|
features ? null,
|
||||||
|
extraTestEnv ? {}
|
||||||
|
}:
|
||||||
|
|
||||||
let
|
let
|
||||||
log = v: builtins.trace v v;
|
log = v: builtins.trace v v;
|
||||||
|
|
||||||
|
# NixOS and Rust/Cargo triples do not match for ARM, fix it here.
|
||||||
|
rustTarget = if target == "armv6l-unknown-linux-musleabihf" then
|
||||||
|
"arm-unknown-linux-musleabihf"
|
||||||
|
else
|
||||||
|
target;
|
||||||
|
|
||||||
|
rustTargetEnvMap = {
|
||||||
|
"x86_64-unknown-linux-musl" = "X86_64_UNKNOWN_LINUX_MUSL";
|
||||||
|
"aarch64-unknown-linux-musl" = "AARCH64_UNKNOWN_LINUX_MUSL";
|
||||||
|
"i686-unknown-linux-musl" = "I686_UNKNOWN_LINUX_MUSL";
|
||||||
|
"arm-unknown-linux-musleabihf" = "ARM_UNKNOWN_LINUX_MUSLEABIHF";
|
||||||
|
};
|
||||||
|
|
||||||
|
pkgsNative = import nixpkgs {
|
||||||
|
inherit system;
|
||||||
|
overlays = [ (import rust-overlay) ];
|
||||||
|
};
|
||||||
|
|
||||||
pkgs = if target != null then
|
pkgs = if target != null then
|
||||||
import pkgsSrc {
|
import nixpkgs {
|
||||||
inherit system;
|
inherit system;
|
||||||
crossSystem = {
|
crossSystem = {
|
||||||
config = target;
|
config = target;
|
||||||
isStatic = true;
|
isStatic = true;
|
||||||
};
|
};
|
||||||
overlays = [ cargo2nixOverlay ];
|
overlays = [ (import rust-overlay) ];
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
import pkgsSrc {
|
pkgsNative;
|
||||||
inherit system;
|
|
||||||
overlays = [ cargo2nixOverlay ];
|
|
||||||
};
|
|
||||||
|
|
||||||
toolchainOptions = {
|
inherit (pkgs) lib stdenv;
|
||||||
rustVersion = "1.73.0";
|
|
||||||
extraRustComponents = [ "clippy" ];
|
|
||||||
};
|
|
||||||
|
|
||||||
buildEnv = (drv:
|
toolchainFn = (p: p.rust-bin.stable."1.91.0".default.override {
|
||||||
{
|
targets = lib.optionals (target != null) [ rustTarget ];
|
||||||
rustc = drv.setBuildEnv;
|
extensions = [
|
||||||
clippy = ''
|
"rust-src"
|
||||||
${drv.setBuildEnv or ""}
|
"rustfmt"
|
||||||
echo
|
|
||||||
echo --- BUILDING WITH CLIPPY ---
|
|
||||||
echo
|
|
||||||
|
|
||||||
export NIX_RUST_BUILD_FLAGS="''${NIX_RUST_BUILD_FLAGS} --deny warnings"
|
|
||||||
export RUSTC="''${CLIPPY_DRIVER}"
|
|
||||||
'';
|
|
||||||
}.${compiler});
|
|
||||||
|
|
||||||
/* Cargo2nix provides many overrides by default, you can take inspiration from them:
|
|
||||||
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/overrides.nix
|
|
||||||
|
|
||||||
You can have a complete list of the available options by looking at the overriden object, mkcrate:
|
|
||||||
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/mkcrate.nix
|
|
||||||
*/
|
|
||||||
packageOverrides = pkgs:
|
|
||||||
pkgs.rustBuilder.overrides.all ++ [
|
|
||||||
/* [1] We add some logic to compile our crates with clippy, it provides us many additional lints
|
|
||||||
|
|
||||||
[2] We need to alter Nix hardening to make static binaries: PIE,
|
|
||||||
Position Independent Executables seems to be supported only on amd64. Having
|
|
||||||
this flag set either 1. make our executables crash or 2. compile as dynamic on some platforms.
|
|
||||||
Here, we deactivate it. Later (find `codegenOpts`), we reactivate it for supported targets
|
|
||||||
(only amd64 curently) through the `-static-pie` flag.
|
|
||||||
PIE is a feature used by ASLR, which helps mitigate security issues.
|
|
||||||
Learn more about Nix Hardening at: https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/cc-wrapper/add-hardening.sh
|
|
||||||
|
|
||||||
[3] We want to inject the git version while keeping the build deterministic.
|
|
||||||
As we do not want to consider the .git folder as part of the input source,
|
|
||||||
we ask the user (the CI often) to pass the value to Nix.
|
|
||||||
|
|
||||||
[4] We don't want libsodium-sys and zstd-sys to try to use pkgconfig to build against a system library.
|
|
||||||
However the features to do so get activated for some reason (due to a bug in cargo2nix?),
|
|
||||||
so disable them manually here.
|
|
||||||
*/
|
|
||||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
|
||||||
name = "garage";
|
|
||||||
overrideAttrs = drv:
|
|
||||||
(if git_version != null then {
|
|
||||||
# [3]
|
|
||||||
preConfigure = ''
|
|
||||||
${drv.preConfigure or ""}
|
|
||||||
export GIT_VERSION="${git_version}"
|
|
||||||
'';
|
|
||||||
} else
|
|
||||||
{ }) // {
|
|
||||||
# [1]
|
|
||||||
setBuildEnv = (buildEnv drv);
|
|
||||||
# [2]
|
|
||||||
hardeningDisable = [ "pie" ];
|
|
||||||
};
|
|
||||||
})
|
|
||||||
|
|
||||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
|
||||||
name = "garage_rpc";
|
|
||||||
overrideAttrs = drv: { # [1]
|
|
||||||
setBuildEnv = (buildEnv drv);
|
|
||||||
};
|
|
||||||
})
|
|
||||||
|
|
||||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
|
||||||
name = "garage_db";
|
|
||||||
overrideAttrs = drv: { # [1]
|
|
||||||
setBuildEnv = (buildEnv drv);
|
|
||||||
};
|
|
||||||
})
|
|
||||||
|
|
||||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
|
||||||
name = "garage_util";
|
|
||||||
overrideAttrs = drv: { # [1]
|
|
||||||
setBuildEnv = (buildEnv drv);
|
|
||||||
};
|
|
||||||
})
|
|
||||||
|
|
||||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
|
||||||
name = "garage_table";
|
|
||||||
overrideAttrs = drv: { # [1]
|
|
||||||
setBuildEnv = (buildEnv drv);
|
|
||||||
};
|
|
||||||
})
|
|
||||||
|
|
||||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
|
||||||
name = "garage_block";
|
|
||||||
overrideAttrs = drv: { # [1]
|
|
||||||
setBuildEnv = (buildEnv drv);
|
|
||||||
};
|
|
||||||
})
|
|
||||||
|
|
||||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
|
||||||
name = "garage_model";
|
|
||||||
overrideAttrs = drv: { # [1]
|
|
||||||
setBuildEnv = (buildEnv drv);
|
|
||||||
};
|
|
||||||
})
|
|
||||||
|
|
||||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
|
||||||
name = "garage_api";
|
|
||||||
overrideAttrs = drv: { # [1]
|
|
||||||
setBuildEnv = (buildEnv drv);
|
|
||||||
};
|
|
||||||
})
|
|
||||||
|
|
||||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
|
||||||
name = "garage_web";
|
|
||||||
overrideAttrs = drv: { # [1]
|
|
||||||
setBuildEnv = (buildEnv drv);
|
|
||||||
};
|
|
||||||
})
|
|
||||||
|
|
||||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
|
||||||
name = "k2v-client";
|
|
||||||
overrideAttrs = drv: { # [1]
|
|
||||||
setBuildEnv = (buildEnv drv);
|
|
||||||
};
|
|
||||||
})
|
|
||||||
|
|
||||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
|
||||||
name = "libsodium-sys";
|
|
||||||
overrideArgs = old: {
|
|
||||||
features = [ ]; # [4]
|
|
||||||
};
|
|
||||||
})
|
|
||||||
|
|
||||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
|
||||||
name = "zstd-sys";
|
|
||||||
overrideArgs = old: {
|
|
||||||
features = [ ]; # [4]
|
|
||||||
};
|
|
||||||
})
|
|
||||||
];
|
];
|
||||||
|
});
|
||||||
|
|
||||||
|
craneLib = (crane.mkLib pkgs).overrideToolchain toolchainFn;
|
||||||
|
|
||||||
|
src = craneLib.cleanCargoSource ../.;
|
||||||
|
|
||||||
/* We ship some parts of the code disabled by default by putting them behind a flag.
|
/* We ship some parts of the code disabled by default by putting them behind a flag.
|
||||||
It speeds up the compilation (when the feature is not required) and released crates have less dependency by default (less attack surface, disk space, etc.).
|
It speeds up the compilation (when the feature is not required) and released crates have less dependency by default (less attack surface, disk space, etc.).
|
||||||
|
|
@ -168,17 +68,16 @@ let
|
||||||
rootFeatures = if features != null then
|
rootFeatures = if features != null then
|
||||||
features
|
features
|
||||||
else
|
else
|
||||||
([ "garage/bundled-libs" "garage/lmdb" "garage/k2v" ] ++ (if release then [
|
([ "bundled-libs" "lmdb" "sqlite" "fjall" "k2v" ] ++ (lib.optionals release [
|
||||||
"garage/consul-discovery"
|
"consul-discovery"
|
||||||
"garage/kubernetes-discovery"
|
"kubernetes-discovery"
|
||||||
"garage/metrics"
|
"metrics"
|
||||||
"garage/telemetry-otlp"
|
"telemetry-otlp"
|
||||||
"garage/lmdb"
|
"syslog"
|
||||||
"garage/sqlite"
|
"journald"
|
||||||
] else
|
]));
|
||||||
[ ]));
|
|
||||||
|
|
||||||
packageFun = import ../Cargo.nix;
|
featuresStr = lib.concatStringsSep "," rootFeatures;
|
||||||
|
|
||||||
/* We compile fully static binaries with musl to simplify deployment on most systems.
|
/* We compile fully static binaries with musl to simplify deployment on most systems.
|
||||||
When possible, we reactivate PIE hardening (see above).
|
When possible, we reactivate PIE hardening (see above).
|
||||||
|
|
@ -189,12 +88,9 @@ let
|
||||||
For more information on static builds, please refer to Rust's RFC 1721.
|
For more information on static builds, please refer to Rust's RFC 1721.
|
||||||
https://rust-lang.github.io/rfcs/1721-crt-static.html#specifying-dynamicstatic-c-runtime-linkage
|
https://rust-lang.github.io/rfcs/1721-crt-static.html#specifying-dynamicstatic-c-runtime-linkage
|
||||||
*/
|
*/
|
||||||
|
codegenOptsMap = {
|
||||||
codegenOpts = {
|
"x86_64-unknown-linux-musl" =
|
||||||
"armv6l-unknown-linux-musleabihf" = [
|
[ "target-feature=+crt-static" "link-arg=-static-pie" ];
|
||||||
"target-feature=+crt-static"
|
|
||||||
"link-arg=-static"
|
|
||||||
]; # compile as dynamic with static-pie
|
|
||||||
"aarch64-unknown-linux-musl" = [
|
"aarch64-unknown-linux-musl" = [
|
||||||
"target-feature=+crt-static"
|
"target-feature=+crt-static"
|
||||||
"link-arg=-static"
|
"link-arg=-static"
|
||||||
|
|
@ -203,17 +99,106 @@ let
|
||||||
"target-feature=+crt-static"
|
"target-feature=+crt-static"
|
||||||
"link-arg=-static"
|
"link-arg=-static"
|
||||||
]; # segfault with static-pie
|
]; # segfault with static-pie
|
||||||
"x86_64-unknown-linux-musl" =
|
"armv6l-unknown-linux-musleabihf" = [
|
||||||
[ "target-feature=+crt-static" "link-arg=-static-pie" ];
|
"target-feature=+crt-static"
|
||||||
|
"link-arg=-static"
|
||||||
|
]; # compile as dynamic with static-pie
|
||||||
};
|
};
|
||||||
|
|
||||||
# NixOS and Rust/Cargo triples do not match for ARM, fix it here.
|
codegenOpts = if target != null then codegenOptsMap.${target} else [
|
||||||
rustTarget = if target == "armv6l-unknown-linux-musleabihf" then
|
"link-arg=-fuse-ld=mold"
|
||||||
"arm-unknown-linux-musleabihf"
|
];
|
||||||
else
|
|
||||||
target;
|
|
||||||
|
|
||||||
in pkgs.rustBuilder.makePackageSet ({
|
commonArgs =
|
||||||
inherit release packageFun packageOverrides codegenOpts rootFeatures;
|
{
|
||||||
target = rustTarget;
|
inherit src;
|
||||||
} // toolchainOptions)
|
pname = "garage";
|
||||||
|
version = "dev";
|
||||||
|
|
||||||
|
strictDeps = true;
|
||||||
|
cargoExtraArgs = "--locked --features ${featuresStr}";
|
||||||
|
cargoTestExtraArgs = "--workspace";
|
||||||
|
|
||||||
|
nativeBuildInputs = [
|
||||||
|
pkgsNative.protobuf
|
||||||
|
pkgs.stdenv.cc
|
||||||
|
] ++ lib.optionals (target == null) [
|
||||||
|
pkgs.clang
|
||||||
|
pkgs.mold
|
||||||
|
];
|
||||||
|
|
||||||
|
CARGO_PROFILE = if release then "release" else "dev";
|
||||||
|
CARGO_BUILD_RUSTFLAGS =
|
||||||
|
lib.concatStringsSep
|
||||||
|
" "
|
||||||
|
(builtins.map (flag: "-C ${flag}") codegenOpts);
|
||||||
|
}
|
||||||
|
//
|
||||||
|
(if rustTarget != null then {
|
||||||
|
CARGO_BUILD_TARGET = rustTarget;
|
||||||
|
|
||||||
|
"CARGO_TARGET_${rustTargetEnvMap.${rustTarget}}_LINKER" = "${stdenv.cc.targetPrefix}cc";
|
||||||
|
|
||||||
|
HOST_CC = "${stdenv.cc.nativePrefix}cc";
|
||||||
|
TARGET_CC = "${stdenv.cc.targetPrefix}cc";
|
||||||
|
} else {
|
||||||
|
CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER = "clang";
|
||||||
|
});
|
||||||
|
|
||||||
|
in rec {
|
||||||
|
toolchain = toolchainFn pkgs;
|
||||||
|
|
||||||
|
devShell = pkgs.mkShell {
|
||||||
|
buildInputs = [
|
||||||
|
toolchain
|
||||||
|
] ++ (with pkgs; [
|
||||||
|
protobuf
|
||||||
|
clang
|
||||||
|
mold
|
||||||
|
]);
|
||||||
|
};
|
||||||
|
|
||||||
|
# ---- building garage ----
|
||||||
|
|
||||||
|
garage-deps = craneLib.buildDepsOnly commonArgs;
|
||||||
|
|
||||||
|
garage = craneLib.buildPackage (commonArgs // {
|
||||||
|
cargoArtifacts = garage-deps;
|
||||||
|
|
||||||
|
doCheck = false;
|
||||||
|
} //
|
||||||
|
(if git_version != null then {
|
||||||
|
version = git_version;
|
||||||
|
GIT_VERSION = git_version;
|
||||||
|
} else {}));
|
||||||
|
|
||||||
|
# ---- testing garage ----
|
||||||
|
|
||||||
|
garage-test-bin = craneLib.cargoBuild (commonArgs // {
|
||||||
|
cargoArtifacts = garage-deps;
|
||||||
|
|
||||||
|
pname = "garage-tests";
|
||||||
|
|
||||||
|
CARGO_PROFILE = "test";
|
||||||
|
cargoExtraArgs = "${commonArgs.cargoExtraArgs} --tests --workspace";
|
||||||
|
doCheck = false;
|
||||||
|
});
|
||||||
|
|
||||||
|
garage-test = craneLib.cargoTest (commonArgs // {
|
||||||
|
cargoArtifacts = garage-test-bin;
|
||||||
|
nativeBuildInputs = commonArgs.nativeBuildInputs ++ [
|
||||||
|
pkgs.cacert
|
||||||
|
];
|
||||||
|
} // extraTestEnv);
|
||||||
|
|
||||||
|
# ---- source code linting ----
|
||||||
|
|
||||||
|
garage-cargo-fmt = craneLib.cargoFmt (commonArgs // {
|
||||||
|
cargoExtraArgs = "";
|
||||||
|
});
|
||||||
|
|
||||||
|
garage-cargo-clippy = craneLib.cargoClippy (commonArgs // {
|
||||||
|
cargoArtifacts = garage-deps;
|
||||||
|
cargoClippyExtraArgs = "--all-targets -- -D warnings";
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@ PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
|
||||||
FANCYCOLORS=("41m" "42m" "44m" "45m" "100m" "104m")
|
FANCYCOLORS=("41m" "42m" "44m" "45m" "100m" "104m")
|
||||||
|
|
||||||
export RUST_BACKTRACE=1
|
export RUST_BACKTRACE=1
|
||||||
export RUST_LOG=garage=info,garage_api=debug
|
export RUST_LOG=garage=info,garage_api_common=debug,garage_api_s3=debug
|
||||||
MAIN_LABEL="\e[${FANCYCOLORS[0]}[main]\e[49m"
|
MAIN_LABEL="\e[${FANCYCOLORS[0]}[main]\e[49m"
|
||||||
|
|
||||||
if [ -z "$GARAGE_BIN" ]; then
|
if [ -z "$GARAGE_BIN" ]; then
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
|
export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
|
||||||
export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
|
export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
|
||||||
export AWS_DEFAULT_REGION='garage'
|
export AWS_DEFAULT_REGION='garage'
|
||||||
|
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
|
||||||
# FUTUREWORK: set AWS_ENDPOINT_URL instead, once nixpkgs bumps awscli to >=2.13.0.
|
# FUTUREWORK: set AWS_ENDPOINT_URL instead, once nixpkgs bumps awscli to >=2.13.0.
|
||||||
function aws { command aws --endpoint-url http://127.0.0.1:3911 $@ ; }
|
function aws { command aws --endpoint-url http://127.0.0.1:3911 $@ ; }
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,3 @@
|
||||||
# Garage helm3 chart
|
# Garage helm3 chart
|
||||||
|
|
||||||
Documentation is located [here](/doc/book/cookbook/kubernetes.md).
|
Documentation is located [here](https://garagehq.deuxfleurs.fr/documentation/cookbook/kubernetes/).
|
||||||
|
|
|
||||||
|
|
@ -1,24 +1,18 @@
|
||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
name: garage
|
name: garage
|
||||||
description: S3-compatible object store for small self-hosted geo-distributed deployments
|
description: S3-compatible object store for small self-hosted geo-distributed deployments
|
||||||
|
|
||||||
# A chart can be either an 'application' or a 'library' chart.
|
|
||||||
#
|
|
||||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
|
||||||
# to be deployed.
|
|
||||||
#
|
|
||||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
|
||||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
|
||||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
|
||||||
type: application
|
type: application
|
||||||
|
version: 0.7.3
|
||||||
|
appVersion: "v1.3.1"
|
||||||
|
home: https://garagehq.deuxfleurs.fr/
|
||||||
|
icon: https://garagehq.deuxfleurs.fr/images/garage-logo.svg
|
||||||
|
|
||||||
# This is the chart version. This version number should be incremented each time you make changes
|
keywords:
|
||||||
# to the chart and its templates, including the app version.
|
- geo-distributed
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
- read-after-write-consistency
|
||||||
version: 0.4.1
|
- s3-compatible
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
sources:
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
- https://git.deuxfleurs.fr/Deuxfleurs/garage.git
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
|
||||||
# It is recommended to use it with quotes.
|
maintainers: []
|
||||||
appVersion: "v0.9.3"
|
|
||||||
|
|
|
||||||
95
script/helm/garage/README.md
Normal file
95
script/helm/garage/README.md
Normal file
|
|
@ -0,0 +1,95 @@
|
||||||
|
# garage
|
||||||
|
|
||||||
|
  
|
||||||
|
|
||||||
|
S3-compatible object store for small self-hosted geo-distributed deployments
|
||||||
|
|
||||||
|
**Homepage:** <https://garagehq.deuxfleurs.fr/>
|
||||||
|
|
||||||
|
## Source Code
|
||||||
|
|
||||||
|
* <https://git.deuxfleurs.fr/Deuxfleurs/garage.git>
|
||||||
|
|
||||||
|
## Values
|
||||||
|
|
||||||
|
| Key | Type | Default | Description |
|
||||||
|
|-----|------|---------|-------------|
|
||||||
|
| affinity | object | `{}` | |
|
||||||
|
| deployment.kind | string | `"StatefulSet"` | Switchable to DaemonSet |
|
||||||
|
| deployment.podManagementPolicy | string | `"OrderedReady"` | If using statefulset, allow Parallel or OrderedReady (default) |
|
||||||
|
| deployment.replicaCount | int | `3` | Number of StatefulSet replicas/garage nodes to start |
|
||||||
|
| environment | object | `{}` | |
|
||||||
|
| extraVolumeMounts | object | `{}` | |
|
||||||
|
| extraVolumes | object | `{}` | |
|
||||||
|
| fullnameOverride | string | `""` | |
|
||||||
|
| garage.blockSize | string | `"1048576"` | Defaults is 1MB An increase can result in better performance in certain scenarios https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size |
|
||||||
|
| garage.bootstrapPeers | list | `[]` | This is not required if you use the integrated kubernetes discovery |
|
||||||
|
| garage.compressionLevel | string | `"1"` | zstd compression level of stored blocks https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level |
|
||||||
|
| garage.dbEngine | string | `"lmdb"` | Can be changed for better performance on certain systems https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0 |
|
||||||
|
| garage.existingConfigMap | string | `""` | if not empty string, allow using an existing ConfigMap for the garage.toml, if set, ignores garage.toml |
|
||||||
|
| garage.garageTomlString | string | `""` | String Template for the garage configuration if set, ignores above values. Values can be templated, see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/ |
|
||||||
|
| garage.kubernetesSkipCrd | bool | `false` | Set to true if you want to use k8s discovery but install the CRDs manually outside of the helm chart, for example if you operate at namespace level without cluster ressources |
|
||||||
|
| garage.metadataAutoSnapshotInterval | string | `""` | If this value is set, Garage will automatically take a snapshot of the metadata DB file at a regular interval and save it in the metadata directory. https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#metadata_auto_snapshot_interval |
|
||||||
|
| garage.replicationMode | string | `"3"` | Default to 3 replicas, see the replication_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode |
|
||||||
|
| garage.rpcBindAddr | string | `"[::]:3901"` | |
|
||||||
|
| garage.rpcSecret | string | `""` | If not given, a random secret will be generated and stored in a Secret object |
|
||||||
|
| garage.s3.api.region | string | `"garage"` | |
|
||||||
|
| garage.s3.api.rootDomain | string | `".s3.garage.tld"` | |
|
||||||
|
| garage.s3.web.index | string | `"index.html"` | |
|
||||||
|
| garage.s3.web.rootDomain | string | `".web.garage.tld"` | |
|
||||||
|
| image.pullPolicy | string | `"IfNotPresent"` | |
|
||||||
|
| image.repository | string | `"dxflrs/amd64_garage"` | default to amd64 docker image |
|
||||||
|
| image.tag | string | `""` | set the image tag, please prefer using the chart version and not this to avoid compatibility issues |
|
||||||
|
| imagePullSecrets | list | `[]` | set if you need credentials to pull your custom image |
|
||||||
|
| ingress.s3.api.annotations | object | `{}` | Rely _either_ on the className or the annotation below but not both! If you want to use the className, set className: "nginx" and replace "nginx" by an Ingress controller name, examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers). |
|
||||||
|
| ingress.s3.api.enabled | bool | `false` | |
|
||||||
|
| ingress.s3.api.hosts[0] | object | `{"host":"s3.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | garage S3 API endpoint, to be used with awscli for example |
|
||||||
|
| ingress.s3.api.hosts[1] | object | `{"host":"*.s3.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | garage S3 API endpoint, DNS style bucket access |
|
||||||
|
| ingress.s3.api.labels | object | `{}` | |
|
||||||
|
| ingress.s3.api.tls | list | `[]` | |
|
||||||
|
| ingress.s3.web.annotations | object | `{}` | Rely _either_ on the className or the annotation below but not both! If you want to use the className, set className: "nginx" and replace "nginx" by an Ingress controller name, examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers). |
|
||||||
|
| ingress.s3.web.enabled | bool | `false` | |
|
||||||
|
| ingress.s3.web.hosts[0] | object | `{"host":"*.web.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | wildcard website access with bucket name prefix |
|
||||||
|
| ingress.s3.web.hosts[1] | object | `{"host":"mywebpage.example.com","paths":[{"path":"/","pathType":"Prefix"}]}` | specific bucket access with FQDN bucket |
|
||||||
|
| ingress.s3.web.labels | object | `{}` | |
|
||||||
|
| ingress.s3.web.tls | list | `[]` | |
|
||||||
|
| initImage.pullPolicy | string | `"IfNotPresent"` | |
|
||||||
|
| initImage.repository | string | `"busybox"` | |
|
||||||
|
| initImage.tag | string | `"stable"` | |
|
||||||
|
| livenessProbe | object | `{}` | Specifies a livenessProbe |
|
||||||
|
| monitoring.metrics.enabled | bool | `false` | If true, a service for monitoring is created with a prometheus.io/scrape annotation |
|
||||||
|
| monitoring.metrics.serviceMonitor.enabled | bool | `false` | If true, a ServiceMonitor CRD is created for a prometheus operator https://github.com/coreos/prometheus-operator |
|
||||||
|
| monitoring.metrics.serviceMonitor.interval | string | `"15s"` | |
|
||||||
|
| monitoring.metrics.serviceMonitor.labels | object | `{}` | |
|
||||||
|
| monitoring.metrics.serviceMonitor.path | string | `"/metrics"` | |
|
||||||
|
| monitoring.metrics.serviceMonitor.relabelings | list | `[]` | |
|
||||||
|
| monitoring.metrics.serviceMonitor.scheme | string | `"http"` | |
|
||||||
|
| monitoring.metrics.serviceMonitor.scrapeTimeout | string | `"10s"` | |
|
||||||
|
| monitoring.metrics.serviceMonitor.tlsConfig | object | `{}` | |
|
||||||
|
| monitoring.tracing.sink | string | `""` | specify a sink endpoint for OpenTelemetry Traces, eg. `http://localhost:4317` |
|
||||||
|
| nameOverride | string | `""` | |
|
||||||
|
| nodeSelector | object | `{}` | |
|
||||||
|
| persistence.data.hostPath | string | `"/var/lib/garage/data"` | |
|
||||||
|
| persistence.data.size | string | `"100Mi"` | |
|
||||||
|
| persistence.enabled | bool | `true` | |
|
||||||
|
| persistence.meta.hostPath | string | `"/var/lib/garage/meta"` | |
|
||||||
|
| persistence.meta.size | string | `"100Mi"` | |
|
||||||
|
| podAnnotations | object | `{}` | additonal pod annotations |
|
||||||
|
| podSecurityContext.fsGroup | int | `1000` | |
|
||||||
|
| podSecurityContext.runAsGroup | int | `1000` | |
|
||||||
|
| podSecurityContext.runAsNonRoot | bool | `true` | |
|
||||||
|
| podSecurityContext.runAsUser | int | `1000` | |
|
||||||
|
| readinessProbe | object | `{}` | Specifies a readinessProbe |
|
||||||
|
| resources | object | `{}` | |
|
||||||
|
| securityContext.capabilities | object | `{"drop":["ALL"]}` | The default security context is heavily restricted, feel free to tune it to your requirements |
|
||||||
|
| securityContext.readOnlyRootFilesystem | bool | `true` | |
|
||||||
|
| service.s3.api.port | int | `3900` | |
|
||||||
|
| service.s3.web.port | int | `3902` | |
|
||||||
|
| service.type | string | `"ClusterIP"` | You can rely on any service to expose your cluster - ClusterIP (+ Ingress) - NodePort (+ Ingress) - LoadBalancer |
|
||||||
|
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
|
||||||
|
| serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
|
||||||
|
| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
|
||||||
|
| tolerations | list | `[]` | |
|
||||||
|
|
||||||
|
----------------------------------------------
|
||||||
|
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)
|
||||||
|
|
@ -1,7 +1,53 @@
|
||||||
|
{{- if not .Values.garage.existingConfigMap }}
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "garage.fullname" . }}-config
|
name: {{ include "garage.fullname" . }}-config
|
||||||
data:
|
data:
|
||||||
garage.toml: |-
|
garage.toml: |-
|
||||||
{{- tpl (index (index .Values.garage) "garage.toml") $ | nindent 4 }}
|
{{- if .Values.garage.garageTomlString }}
|
||||||
|
{{- tpl (index (index .Values.garage) "garageTomlString") $ | nindent 4 }}
|
||||||
|
{{- else }}
|
||||||
|
metadata_dir = "/mnt/meta"
|
||||||
|
data_dir = "/mnt/data"
|
||||||
|
|
||||||
|
db_engine = "{{ .Values.garage.dbEngine }}"
|
||||||
|
|
||||||
|
block_size = {{ .Values.garage.blockSize }}
|
||||||
|
|
||||||
|
replication_mode = "{{ .Values.garage.replicationMode }}"
|
||||||
|
|
||||||
|
compression_level = {{ .Values.garage.compressionLevel }}
|
||||||
|
|
||||||
|
{{- if .Values.garage.metadataAutoSnapshotInterval }}
|
||||||
|
metadata_auto_snapshot_interval = {{ .Values.garage.metadataAutoSnapshotInterval | quote }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
|
||||||
|
# rpc_secret will be populated by the init container from a k8s secret object
|
||||||
|
rpc_secret = "__RPC_SECRET_REPLACE__"
|
||||||
|
|
||||||
|
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
|
||||||
|
|
||||||
|
[kubernetes_discovery]
|
||||||
|
namespace = "{{ .Release.Namespace }}"
|
||||||
|
service_name = "{{ include "garage.fullname" . }}"
|
||||||
|
skip_crd = {{ .Values.garage.kubernetesSkipCrd }}
|
||||||
|
|
||||||
|
[s3_api]
|
||||||
|
s3_region = "{{ .Values.garage.s3.api.region }}"
|
||||||
|
api_bind_addr = "[::]:3900"
|
||||||
|
root_domain = "{{ .Values.garage.s3.api.rootDomain }}"
|
||||||
|
|
||||||
|
[s3_web]
|
||||||
|
bind_addr = "[::]:3902"
|
||||||
|
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
|
||||||
|
index = "{{ .Values.garage.s3.web.index }}"
|
||||||
|
|
||||||
|
[admin]
|
||||||
|
api_bind_addr = "[::]:3903"
|
||||||
|
{{- if .Values.monitoring.tracing.sink }}
|
||||||
|
trace_sink = "{{ .Values.monitoring.tracing.sink }}"
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
|
||||||
22
script/helm/garage/templates/service-headless.yaml
Normal file
22
script/helm/garage/templates/service-headless.yaml
Normal file
|
|
@ -0,0 +1,22 @@
|
||||||
|
{{- if eq .Values.deployment.kind "StatefulSet" -}}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "garage.fullname" . }}-headless
|
||||||
|
labels:
|
||||||
|
{{- include "garage.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
type: ClusterIP
|
||||||
|
clusterIP: None
|
||||||
|
ports:
|
||||||
|
- port: {{ .Values.service.s3.api.port }}
|
||||||
|
targetPort: 3900
|
||||||
|
protocol: TCP
|
||||||
|
name: s3-api
|
||||||
|
- port: {{ .Values.service.s3.web.port }}
|
||||||
|
targetPort: 3902
|
||||||
|
protocol: TCP
|
||||||
|
name: s3-web
|
||||||
|
selector:
|
||||||
|
{{- include "garage.selectorLabels" . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
|
@ -4,6 +4,10 @@ metadata:
|
||||||
name: {{ include "garage.fullname" . }}
|
name: {{ include "garage.fullname" . }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "garage.labels" . | nindent 4 }}
|
{{- include "garage.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.service.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
spec:
|
spec:
|
||||||
type: {{ .Values.service.type }}
|
type: {{ .Values.service.type }}
|
||||||
ports:
|
ports:
|
||||||
|
|
@ -37,4 +41,4 @@ spec:
|
||||||
name: metrics
|
name: metrics
|
||||||
selector:
|
selector:
|
||||||
{{- include "garage.selectorLabels" . | nindent 4 }}
|
{{- include "garage.selectorLabels" . | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
|
||||||
|
|
@ -10,11 +10,11 @@ spec:
|
||||||
{{- include "garage.selectorLabels" . | nindent 6 }}
|
{{- include "garage.selectorLabels" . | nindent 6 }}
|
||||||
{{- if eq .Values.deployment.kind "StatefulSet" }}
|
{{- if eq .Values.deployment.kind "StatefulSet" }}
|
||||||
replicas: {{ .Values.deployment.replicaCount }}
|
replicas: {{ .Values.deployment.replicaCount }}
|
||||||
serviceName: {{ include "garage.fullname" . }}
|
serviceName: {{ include "garage.fullname" . }}-headless
|
||||||
|
podManagementPolicy: {{ .Values.deployment.podManagementPolicy }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
|
|
||||||
annotations:
|
annotations:
|
||||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||||
{{- with .Values.podAnnotations }}
|
{{- with .Values.podAnnotations }}
|
||||||
|
|
@ -63,6 +63,10 @@ spec:
|
||||||
name: web-api
|
name: web-api
|
||||||
- containerPort: 3903
|
- containerPort: 3903
|
||||||
name: admin
|
name: admin
|
||||||
|
{{- with .Values.environment }}
|
||||||
|
env:
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: meta
|
- name: meta
|
||||||
mountPath: /mnt/meta
|
mountPath: /mnt/meta
|
||||||
|
|
@ -71,15 +75,17 @@ spec:
|
||||||
- name: etc
|
- name: etc
|
||||||
mountPath: /etc/garage.toml
|
mountPath: /etc/garage.toml
|
||||||
subPath: garage.toml
|
subPath: garage.toml
|
||||||
# TODO
|
{{- with .Values.extraVolumeMounts }}
|
||||||
# livenessProbe:
|
{{- toYaml . | nindent 12 }}
|
||||||
# httpGet:
|
{{- end }}
|
||||||
# path: /
|
{{- with .Values.livenessProbe }}
|
||||||
# port: 3900
|
livenessProbe:
|
||||||
# readinessProbe:
|
{{- toYaml . | nindent 12 }}
|
||||||
# httpGet:
|
{{- end }}
|
||||||
# path: /
|
{{- with .Values.readinessProbe }}
|
||||||
# port: 3900
|
readinessProbe:
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
resources:
|
resources:
|
||||||
{{- toYaml .Values.resources | nindent 12 }}
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
volumes:
|
volumes:
|
||||||
|
|
@ -105,6 +111,9 @@ spec:
|
||||||
- name: data
|
- name: data
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- with .Values.extraVolumes }}
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
{{- with .Values.nodeSelector }}
|
{{- with .Values.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
|
|
|
||||||
|
|
@ -4,28 +4,34 @@
|
||||||
|
|
||||||
# Garage configuration. These values go to garage.toml
|
# Garage configuration. These values go to garage.toml
|
||||||
garage:
|
garage:
|
||||||
# Can be changed for better performance on certain systems
|
# -- Can be changed for better performance on certain systems
|
||||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0
|
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0
|
||||||
dbEngine: "lmdb"
|
dbEngine: "lmdb"
|
||||||
|
|
||||||
# Defaults is 1MB
|
# -- Defaults is 1MB
|
||||||
# An increase can result in better performance in certain scenarios
|
# An increase can result in better performance in certain scenarios
|
||||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size
|
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size
|
||||||
blockSize: "1048576"
|
blockSize: "1048576"
|
||||||
|
|
||||||
# Default to 3 replicas, see the replication_mode section at
|
# -- Default to 3 replicas, see the replication_mode section at
|
||||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode
|
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode
|
||||||
replicationMode: "3"
|
replicationMode: "3"
|
||||||
|
|
||||||
# zstd compression level of stored blocks
|
# -- zstd compression level of stored blocks
|
||||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level
|
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level
|
||||||
compressionLevel: "1"
|
compressionLevel: "1"
|
||||||
|
|
||||||
|
# -- If this value is set, Garage will automatically take a snapshot of the metadata DB file at a regular interval and save it in the metadata directory.
|
||||||
|
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#metadata_auto_snapshot_interval
|
||||||
|
metadataAutoSnapshotInterval: ""
|
||||||
|
|
||||||
rpcBindAddr: "[::]:3901"
|
rpcBindAddr: "[::]:3901"
|
||||||
# If not given, a random secret will be generated and stored in a Secret object
|
# -- If not given, a random secret will be generated and stored in a Secret object
|
||||||
rpcSecret: ""
|
rpcSecret: ""
|
||||||
# This is not required if you use the integrated kubernetes discovery
|
# -- This is not required if you use the integrated kubernetes discovery
|
||||||
bootstrapPeers: []
|
bootstrapPeers: []
|
||||||
|
# -- Set to true if you want to use k8s discovery but install the CRDs manually outside
|
||||||
|
# of the helm chart, for example if you operate at namespace level without cluster ressources
|
||||||
kubernetesSkipCrd: false
|
kubernetesSkipCrd: false
|
||||||
s3:
|
s3:
|
||||||
api:
|
api:
|
||||||
|
|
@ -34,47 +40,16 @@ garage:
|
||||||
web:
|
web:
|
||||||
rootDomain: ".web.garage.tld"
|
rootDomain: ".web.garage.tld"
|
||||||
index: "index.html"
|
index: "index.html"
|
||||||
# Template for the garage configuration
|
|
||||||
# Values can be templated
|
|
||||||
# ref: https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/
|
|
||||||
garage.toml: |-
|
|
||||||
metadata_dir = "/mnt/meta"
|
|
||||||
data_dir = "/mnt/data"
|
|
||||||
|
|
||||||
db_engine = "{{ .Values.garage.dbEngine }}"
|
# -- if not empty string, allow using an existing ConfigMap for the garage.toml,
|
||||||
|
# if set, ignores garage.toml
|
||||||
|
existingConfigMap: ""
|
||||||
|
|
||||||
block_size = {{ .Values.garage.blockSize }}
|
# -- String Template for the garage configuration
|
||||||
|
# if set, ignores above values.
|
||||||
replication_mode = "{{ .Values.garage.replicationMode }}"
|
# Values can be templated,
|
||||||
|
# see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/
|
||||||
compression_level = {{ .Values.garage.compressionLevel }}
|
garageTomlString: ""
|
||||||
|
|
||||||
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
|
|
||||||
# rpc_secret will be populated by the init container from a k8s secret object
|
|
||||||
rpc_secret = "__RPC_SECRET_REPLACE__"
|
|
||||||
|
|
||||||
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
|
|
||||||
|
|
||||||
[kubernetes_discovery]
|
|
||||||
namespace = "{{ .Release.Namespace }}"
|
|
||||||
service_name = "{{ include "garage.fullname" . }}"
|
|
||||||
skip_crd = {{ .Values.garage.kubernetesSkipCrd }}
|
|
||||||
|
|
||||||
[s3_api]
|
|
||||||
s3_region = "{{ .Values.garage.s3.api.region }}"
|
|
||||||
api_bind_addr = "[::]:3900"
|
|
||||||
root_domain = "{{ .Values.garage.s3.api.rootDomain }}"
|
|
||||||
|
|
||||||
[s3_web]
|
|
||||||
bind_addr = "[::]:3902"
|
|
||||||
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
|
|
||||||
index = "{{ .Values.garage.s3.web.index }}"
|
|
||||||
|
|
||||||
[admin]
|
|
||||||
api_bind_addr = "[::]:3903"
|
|
||||||
{{- if .Values.monitoring.tracing.sink }}
|
|
||||||
trace_sink = "{{ .Values.monitoring.tracing.sink }}"
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
# Data persistence
|
# Data persistence
|
||||||
persistence:
|
persistence:
|
||||||
|
|
@ -92,14 +67,18 @@ persistence:
|
||||||
|
|
||||||
# Deployment configuration
|
# Deployment configuration
|
||||||
deployment:
|
deployment:
|
||||||
# Switchable to DaemonSet
|
# -- Switchable to DaemonSet
|
||||||
kind: StatefulSet
|
kind: StatefulSet
|
||||||
# Number of StatefulSet replicas/garage nodes to start
|
# -- Number of StatefulSet replicas/garage nodes to start
|
||||||
replicaCount: 3
|
replicaCount: 3
|
||||||
|
# -- If using statefulset, allow Parallel or OrderedReady (default)
|
||||||
|
podManagementPolicy: OrderedReady
|
||||||
|
|
||||||
image:
|
image:
|
||||||
|
# -- default to amd64 docker image
|
||||||
repository: dxflrs/amd64_garage
|
repository: dxflrs/amd64_garage
|
||||||
# please prefer using the chart version and not this tag
|
# -- set the image tag, please prefer using the chart version and not this
|
||||||
|
# to avoid compatibility issues
|
||||||
tag: ""
|
tag: ""
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
|
@ -108,19 +87,21 @@ initImage:
|
||||||
tag: stable
|
tag: stable
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
# -- set if you need credentials to pull your custom image
|
||||||
imagePullSecrets: []
|
imagePullSecrets: []
|
||||||
nameOverride: ""
|
nameOverride: ""
|
||||||
fullnameOverride: ""
|
fullnameOverride: ""
|
||||||
|
|
||||||
serviceAccount:
|
serviceAccount:
|
||||||
# Specifies whether a service account should be created
|
# -- Specifies whether a service account should be created
|
||||||
create: true
|
create: true
|
||||||
# Annotations to add to the service account
|
# -- Annotations to add to the service account
|
||||||
annotations: {}
|
annotations: {}
|
||||||
# The name of the service account to use.
|
# -- The name of the service account to use.
|
||||||
# If not set and create is true, a name is generated using the fullname template
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
name: ""
|
name: ""
|
||||||
|
|
||||||
|
# -- additonal pod annotations
|
||||||
podAnnotations: {}
|
podAnnotations: {}
|
||||||
|
|
||||||
podSecurityContext:
|
podSecurityContext:
|
||||||
|
|
@ -130,7 +111,7 @@ podSecurityContext:
|
||||||
runAsNonRoot: true
|
runAsNonRoot: true
|
||||||
|
|
||||||
securityContext:
|
securityContext:
|
||||||
# The default security context is heavily restricted
|
# -- The default security context is heavily restricted,
|
||||||
# feel free to tune it to your requirements
|
# feel free to tune it to your requirements
|
||||||
capabilities:
|
capabilities:
|
||||||
drop:
|
drop:
|
||||||
|
|
@ -138,11 +119,13 @@ securityContext:
|
||||||
readOnlyRootFilesystem: true
|
readOnlyRootFilesystem: true
|
||||||
|
|
||||||
service:
|
service:
|
||||||
# You can rely on any service to expose your cluster
|
# -- You can rely on any service to expose your cluster
|
||||||
# - ClusterIP (+ Ingress)
|
# - ClusterIP (+ Ingress)
|
||||||
# - NodePort (+ Ingress)
|
# - NodePort (+ Ingress)
|
||||||
# - LoadBalancer
|
# - LoadBalancer
|
||||||
type: ClusterIP
|
type: ClusterIP
|
||||||
|
# -- Annotations to add to the service
|
||||||
|
annotations: {}
|
||||||
s3:
|
s3:
|
||||||
api:
|
api:
|
||||||
port: 3900
|
port: 3900
|
||||||
|
|
@ -154,20 +137,23 @@ ingress:
|
||||||
s3:
|
s3:
|
||||||
api:
|
api:
|
||||||
enabled: false
|
enabled: false
|
||||||
# Rely either on the className or the annotation below but not both
|
# -- Rely _either_ on the className or the annotation below but not both!
|
||||||
# replace "nginx" by an Ingress controller
|
# If you want to use the className, set
|
||||||
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
|
|
||||||
# className: "nginx"
|
# className: "nginx"
|
||||||
|
# and replace "nginx" by an Ingress controller name,
|
||||||
|
# examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers).
|
||||||
annotations: {}
|
annotations: {}
|
||||||
# kubernetes.io/ingress.class: "nginx"
|
# kubernetes.io/ingress.class: "nginx"
|
||||||
# kubernetes.io/tls-acme: "true"
|
# kubernetes.io/tls-acme: "true"
|
||||||
labels: {}
|
labels: {}
|
||||||
hosts:
|
hosts:
|
||||||
- host: "s3.garage.tld" # garage S3 API endpoint
|
# -- garage S3 API endpoint, to be used with awscli for example
|
||||||
|
- host: "s3.garage.tld"
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: Prefix
|
pathType: Prefix
|
||||||
- host: "*.s3.garage.tld" # garage S3 API endpoint, DNS style bucket access
|
# -- garage S3 API endpoint, DNS style bucket access
|
||||||
|
- host: "*.s3.garage.tld"
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: Prefix
|
pathType: Prefix
|
||||||
|
|
@ -177,20 +163,23 @@ ingress:
|
||||||
# - kubernetes.docker.internal
|
# - kubernetes.docker.internal
|
||||||
web:
|
web:
|
||||||
enabled: false
|
enabled: false
|
||||||
# Rely either on the className or the annotation below but not both
|
# -- Rely _either_ on the className or the annotation below but not both!
|
||||||
# replace "nginx" by an Ingress controller
|
# If you want to use the className, set
|
||||||
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
|
|
||||||
# className: "nginx"
|
# className: "nginx"
|
||||||
|
# and replace "nginx" by an Ingress controller name,
|
||||||
|
# examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers).
|
||||||
annotations: {}
|
annotations: {}
|
||||||
# kubernetes.io/ingress.class: nginx
|
# kubernetes.io/ingress.class: nginx
|
||||||
# kubernetes.io/tls-acme: "true"
|
# kubernetes.io/tls-acme: "true"
|
||||||
labels: {}
|
labels: {}
|
||||||
hosts:
|
hosts:
|
||||||
- host: "*.web.garage.tld" # wildcard website access with bucket name prefix
|
# -- wildcard website access with bucket name prefix
|
||||||
|
- host: "*.web.garage.tld"
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: Prefix
|
pathType: Prefix
|
||||||
- host: "mywebpage.example.com" # specific bucket access with FQDN bucket
|
# -- specific bucket access with FQDN bucket
|
||||||
|
- host: "mywebpage.example.com"
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: Prefix
|
pathType: Prefix
|
||||||
|
|
@ -208,18 +197,39 @@ resources: {}
|
||||||
# cpu: 100m
|
# cpu: 100m
|
||||||
# memory: 512Mi
|
# memory: 512Mi
|
||||||
|
|
||||||
|
# -- Specifies a livenessProbe
|
||||||
|
livenessProbe: {}
|
||||||
|
#httpGet:
|
||||||
|
# path: /health
|
||||||
|
# port: 3903
|
||||||
|
#initialDelaySeconds: 5
|
||||||
|
#periodSeconds: 30
|
||||||
|
# -- Specifies a readinessProbe
|
||||||
|
readinessProbe: {}
|
||||||
|
#httpGet:
|
||||||
|
# path: /health
|
||||||
|
# port: 3903
|
||||||
|
#initialDelaySeconds: 5
|
||||||
|
#periodSeconds: 30
|
||||||
|
|
||||||
nodeSelector: {}
|
nodeSelector: {}
|
||||||
|
|
||||||
tolerations: []
|
tolerations: []
|
||||||
|
|
||||||
affinity: {}
|
affinity: {}
|
||||||
|
|
||||||
|
environment: {}
|
||||||
|
|
||||||
|
extraVolumes: {}
|
||||||
|
|
||||||
|
extraVolumeMounts: {}
|
||||||
|
|
||||||
monitoring:
|
monitoring:
|
||||||
metrics:
|
metrics:
|
||||||
# If true, a service for monitoring is created with a prometheus.io/scrape annotation
|
# -- If true, a service for monitoring is created with a prometheus.io/scrape annotation
|
||||||
enabled: false
|
enabled: false
|
||||||
serviceMonitor:
|
serviceMonitor:
|
||||||
# If true, a ServiceMonitor CRD is created for a prometheus operator
|
# -- If true, a ServiceMonitor CRD is created for a prometheus operator
|
||||||
# https://github.com/coreos/prometheus-operator
|
# https://github.com/coreos/prometheus-operator
|
||||||
enabled: false
|
enabled: false
|
||||||
path: /metrics
|
path: /metrics
|
||||||
|
|
@ -231,4 +241,5 @@ monitoring:
|
||||||
scrapeTimeout: 10s
|
scrapeTimeout: 10s
|
||||||
relabelings: []
|
relabelings: []
|
||||||
tracing:
|
tracing:
|
||||||
|
# -- specify a sink endpoint for OpenTelemetry Traces, eg. `http://localhost:4317`
|
||||||
sink: ""
|
sink: ""
|
||||||
|
|
|
||||||
14
script/jepsen.garage/Vagrantfile
vendored
14
script/jepsen.garage/Vagrantfile
vendored
|
|
@ -30,11 +30,11 @@ Vagrant.configure("2") do |config|
|
||||||
config.vm.define "n6" do |config| vm(config, "n6", "192.168.56.26") end
|
config.vm.define "n6" do |config| vm(config, "n6", "192.168.56.26") end
|
||||||
config.vm.define "n7" do |config| vm(config, "n7", "192.168.56.27") end
|
config.vm.define "n7" do |config| vm(config, "n7", "192.168.56.27") end
|
||||||
|
|
||||||
config.vm.define "n8" do |config| vm(config, "n8", "192.168.56.28") end
|
#config.vm.define "n8" do |config| vm(config, "n8", "192.168.56.28") end
|
||||||
config.vm.define "n9" do |config| vm(config, "n9", "192.168.56.29") end
|
#config.vm.define "n9" do |config| vm(config, "n9", "192.168.56.29") end
|
||||||
config.vm.define "n10" do |config| vm(config, "n10", "192.168.56.30") end
|
#config.vm.define "n10" do |config| vm(config, "n10", "192.168.56.30") end
|
||||||
config.vm.define "n11" do |config| vm(config, "n11", "192.168.56.31") end
|
#config.vm.define "n11" do |config| vm(config, "n11", "192.168.56.31") end
|
||||||
config.vm.define "n12" do |config| vm(config, "n12", "192.168.56.32") end
|
#config.vm.define "n12" do |config| vm(config, "n12", "192.168.56.32") end
|
||||||
config.vm.define "n13" do |config| vm(config, "n13", "192.168.56.33") end
|
#config.vm.define "n13" do |config| vm(config, "n13", "192.168.56.33") end
|
||||||
config.vm.define "n14" do |config| vm(config, "n14", "192.168.56.34") end
|
#config.vm.define "n14" do |config| vm(config, "n14", "192.168.56.34") end
|
||||||
end
|
end
|
||||||
|
|
|
||||||
|
|
@ -3,11 +3,10 @@
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
#for ppatch in task3c task3a tsfix2; do
|
#for ppatch in task3c task3a tsfix2; do
|
||||||
for ppatch in tsfix2; do
|
for ppatch in v093 v1rc1; do
|
||||||
#for psc in c cp cdp r pr cpr dpr; do
|
#for psc in c cp cdp r pr cpr dpr; do
|
||||||
for psc in cdp r pr cpr dpr; do
|
for ptsk in reg2 set2; do
|
||||||
#for ptsk in reg2 set1 set2; do
|
for psc in c cp cdp r pr cpr dpr; do
|
||||||
for ptsk in set1; do
|
|
||||||
for irun in $(seq 10); do
|
for irun in $(seq 10); do
|
||||||
lein run test --nodes-file nodes.vagrant \
|
lein run test --nodes-file nodes.vagrant \
|
||||||
--time-limit 60 --rate 100 --concurrency 100 --ops-per-key 100 \
|
--time-limit 60 --rate 100 --concurrency 100 --ops-per-key 100 \
|
||||||
|
|
|
||||||
|
|
@ -38,7 +38,9 @@
|
||||||
"tsfix2" "c82d91c6bccf307186332b6c5c6fc0b128b1b2b1"
|
"tsfix2" "c82d91c6bccf307186332b6c5c6fc0b128b1b2b1"
|
||||||
"task3a" "707442f5de416fdbed4681a33b739f0a787b7834"
|
"task3a" "707442f5de416fdbed4681a33b739f0a787b7834"
|
||||||
"task3b" "431b28e0cfdc9cac6c649193cf602108a8b02997"
|
"task3b" "431b28e0cfdc9cac6c649193cf602108a8b02997"
|
||||||
"task3c" "0041b013a473e3ae72f50209d8f79db75a72848b"})
|
"task3c" "0041b013a473e3ae72f50209d8f79db75a72848b"
|
||||||
|
"v093" "v0.9.3"
|
||||||
|
"v1rc1" "v1.0.0-rc1"})
|
||||||
|
|
||||||
(def cli-opts
|
(def cli-opts
|
||||||
"Additional command line options."
|
"Additional command line options."
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,7 @@
|
||||||
"rpc_bind_addr = \"0.0.0.0:3901\"\n"
|
"rpc_bind_addr = \"0.0.0.0:3901\"\n"
|
||||||
"rpc_public_addr = \"" node ":3901\"\n"
|
"rpc_public_addr = \"" node ":3901\"\n"
|
||||||
"db_engine = \"lmdb\"\n"
|
"db_engine = \"lmdb\"\n"
|
||||||
"replication_mode = \"2\"\n"
|
"replication_mode = \"3\"\n"
|
||||||
"data_dir = \"" data-dir "\"\n"
|
"data_dir = \"" data-dir "\"\n"
|
||||||
"metadata_dir = \"" meta-dir "\"\n"
|
"metadata_dir = \"" meta-dir "\"\n"
|
||||||
"[s3_api]\n"
|
"[s3_api]\n"
|
||||||
|
|
|
||||||
43
script/k8s/crd/garagenodes.deuxfleurs.fr.yaml
Normal file
43
script/k8s/crd/garagenodes.deuxfleurs.fr.yaml
Normal file
|
|
@ -0,0 +1,43 @@
|
||||||
|
apiVersion: apiextensions.k8s.io/v1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
name: garagenodes.deuxfleurs.fr
|
||||||
|
spec:
|
||||||
|
conversion:
|
||||||
|
strategy: None
|
||||||
|
group: deuxfleurs.fr
|
||||||
|
names:
|
||||||
|
kind: GarageNode
|
||||||
|
listKind: GarageNodeList
|
||||||
|
plural: garagenodes
|
||||||
|
singular: garagenode
|
||||||
|
scope: Namespaced
|
||||||
|
versions:
|
||||||
|
- name: v1
|
||||||
|
schema:
|
||||||
|
openAPIV3Schema:
|
||||||
|
description: Auto-generated derived type for Node via `CustomResource`
|
||||||
|
properties:
|
||||||
|
spec:
|
||||||
|
properties:
|
||||||
|
address:
|
||||||
|
format: ip
|
||||||
|
type: string
|
||||||
|
hostname:
|
||||||
|
type: string
|
||||||
|
port:
|
||||||
|
format: uint16
|
||||||
|
minimum: 0
|
||||||
|
type: integer
|
||||||
|
required:
|
||||||
|
- address
|
||||||
|
- hostname
|
||||||
|
- port
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- spec
|
||||||
|
title: GarageNode
|
||||||
|
type: object
|
||||||
|
served: true
|
||||||
|
storage: true
|
||||||
|
subresources: {}
|
||||||
5
script/k8s/crd/kustomization.yaml
Normal file
5
script/k8s/crd/kustomization.yaml
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- garagenodes.deuxfleurs.fr.yaml
|
||||||
|
|
@ -7,7 +7,12 @@ if [ "$#" -ne 1 ]; then
|
||||||
exit 2
|
exit 2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if file $1 | grep 'dynamically linked' 2>&1; then
|
if [ ! -x "$1" ]; then
|
||||||
|
echo "[fail] $1 does not exist or is not an executable"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if file "$1" | grep 'dynamically linked' 2>&1; then
|
||||||
echo "[fail] $1 is dynamic"
|
echo "[fail] $1 is dynamic"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
with import ./nix/common.nix;
|
with import ./nix/common.nix;
|
||||||
|
|
||||||
let
|
let
|
||||||
pkgs = import pkgsSrc {
|
pkgs = import nixpkgs {
|
||||||
inherit system;
|
inherit system;
|
||||||
};
|
};
|
||||||
winscp = (import ./nix/winscp.nix) pkgs;
|
winscp = (import ./nix/winscp.nix) pkgs;
|
||||||
|
|
@ -11,6 +11,7 @@ in
|
||||||
{
|
{
|
||||||
# --- Dev shell inherited from flake.nix ---
|
# --- Dev shell inherited from flake.nix ---
|
||||||
devShell = devShells.default;
|
devShell = devShells.default;
|
||||||
|
devShellFull = devShells.full;
|
||||||
|
|
||||||
# --- Continuous integration shell ---
|
# --- Continuous integration shell ---
|
||||||
# The shell used for all CI jobs (along with devShell)
|
# The shell used for all CI jobs (along with devShell)
|
||||||
|
|
@ -33,12 +34,14 @@ in
|
||||||
jq
|
jq
|
||||||
];
|
];
|
||||||
shellHook = ''
|
shellHook = ''
|
||||||
|
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
|
||||||
|
|
||||||
function to_s3 {
|
function to_s3 {
|
||||||
aws \
|
aws \
|
||||||
--endpoint-url https://garage.deuxfleurs.fr \
|
--endpoint-url https://garage.deuxfleurs.fr \
|
||||||
--region garage \
|
--region garage \
|
||||||
s3 cp \
|
s3 cp \
|
||||||
./result-bin/bin/garage \
|
./result/bin/garage \
|
||||||
s3://garagehq.deuxfleurs.fr/_releases/''${CI_COMMIT_TAG:-$CI_COMMIT_SHA}/''${TARGET}/garage
|
s3://garagehq.deuxfleurs.fr/_releases/''${CI_COMMIT_TAG:-$CI_COMMIT_SHA}/''${TARGET}/garage
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -114,7 +117,7 @@ in
|
||||||
shellHook = ''
|
shellHook = ''
|
||||||
function refresh_cache {
|
function refresh_cache {
|
||||||
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
|
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
|
||||||
for attr in clippy.amd64 test.amd64 pkgs.{amd64,i386,arm,arm64}.release; do
|
for attr in pkgs.amd64.debug test.amd64 pkgs.{amd64,i386,arm,arm64}.release; do
|
||||||
echo "Updating cache for ''${attr}"
|
echo "Updating cache for ''${attr}"
|
||||||
nix copy -j8 \
|
nix copy -j8 \
|
||||||
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/tmp/nix-signing-key.sec' \
|
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/tmp/nix-signing-key.sec' \
|
||||||
|
|
|
||||||
43
src/api/admin/Cargo.toml
Normal file
43
src/api/admin/Cargo.toml
Normal file
|
|
@ -0,0 +1,43 @@
|
||||||
|
[package]
|
||||||
|
name = "garage_api_admin"
|
||||||
|
version = "1.3.1"
|
||||||
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
|
edition = "2018"
|
||||||
|
license = "AGPL-3.0"
|
||||||
|
description = "Admin API server crate for the Garage object store"
|
||||||
|
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
||||||
|
readme = "../../../README.md"
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
path = "lib.rs"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
garage_model.workspace = true
|
||||||
|
garage_table.workspace = true
|
||||||
|
garage_util.workspace = true
|
||||||
|
garage_rpc.workspace = true
|
||||||
|
garage_api_common.workspace = true
|
||||||
|
|
||||||
|
argon2.workspace = true
|
||||||
|
async-trait.workspace = true
|
||||||
|
thiserror.workspace = true
|
||||||
|
hex.workspace = true
|
||||||
|
tracing.workspace = true
|
||||||
|
|
||||||
|
futures.workspace = true
|
||||||
|
tokio.workspace = true
|
||||||
|
http.workspace = true
|
||||||
|
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
|
||||||
|
url.workspace = true
|
||||||
|
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
|
||||||
|
opentelemetry.workspace = true
|
||||||
|
opentelemetry-prometheus = { workspace = true, optional = true }
|
||||||
|
prometheus = { workspace = true, optional = true }
|
||||||
|
|
||||||
|
[features]
|
||||||
|
metrics = [ "opentelemetry-prometheus", "prometheus" ]
|
||||||
|
|
@ -2,7 +2,6 @@ use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use argon2::password_hash::PasswordHash;
|
use argon2::password_hash::PasswordHash;
|
||||||
use async_trait::async_trait;
|
|
||||||
|
|
||||||
use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
|
use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
|
||||||
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||||
|
|
@ -20,15 +19,15 @@ use garage_rpc::system::ClusterHealthStatus;
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
use garage_util::socket_address::UnixOrTCPSocketAddress;
|
use garage_util::socket_address::UnixOrTCPSocketAddress;
|
||||||
|
|
||||||
use crate::generic_server::*;
|
use garage_api_common::generic_server::*;
|
||||||
|
use garage_api_common::helpers::*;
|
||||||
|
|
||||||
use crate::admin::bucket::*;
|
use crate::bucket::*;
|
||||||
use crate::admin::cluster::*;
|
use crate::cluster::*;
|
||||||
use crate::admin::error::*;
|
use crate::error::*;
|
||||||
use crate::admin::key::*;
|
use crate::key::*;
|
||||||
use crate::admin::router_v0;
|
use crate::router_v0;
|
||||||
use crate::admin::router_v1::{Authorization, Endpoint};
|
use crate::router_v1::{Authorization, Endpoint};
|
||||||
use crate::helpers::*;
|
|
||||||
|
|
||||||
pub type ResBody = BoxBody<Error>;
|
pub type ResBody = BoxBody<Error>;
|
||||||
|
|
||||||
|
|
@ -221,7 +220,6 @@ impl AdminApiServer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl ApiHandler for AdminApiServer {
|
impl ApiHandler for AdminApiServer {
|
||||||
const API_NAME: &'static str = "admin";
|
const API_NAME: &'static str = "admin";
|
||||||
const API_NAME_DISPLAY: &'static str = "Admin";
|
const API_NAME_DISPLAY: &'static str = "Admin";
|
||||||
|
|
|
||||||
|
|
@ -17,11 +17,12 @@ use garage_model::permission::*;
|
||||||
use garage_model::s3::mpu_table;
|
use garage_model::s3::mpu_table;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
|
|
||||||
use crate::admin::api_server::ResBody;
|
use garage_api_common::common_error::CommonError;
|
||||||
use crate::admin::error::*;
|
use garage_api_common::helpers::*;
|
||||||
use crate::admin::key::ApiBucketKeyPerm;
|
|
||||||
use crate::common_error::CommonError;
|
use crate::api_server::ResBody;
|
||||||
use crate::helpers::*;
|
use crate::error::*;
|
||||||
|
use crate::key::ApiBucketKeyPerm;
|
||||||
|
|
||||||
pub async fn handle_list_buckets(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
pub async fn handle_list_buckets(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let buckets = garage
|
let buckets = garage
|
||||||
|
|
@ -276,7 +277,7 @@ pub async fn handle_create_bucket(
|
||||||
let helper = garage.locked_helper().await;
|
let helper = garage.locked_helper().await;
|
||||||
|
|
||||||
if let Some(ga) = &req.global_alias {
|
if let Some(ga) = &req.global_alias {
|
||||||
if !is_valid_bucket_name(ga) {
|
if !is_valid_bucket_name(ga, garage.config.allow_punycode) {
|
||||||
return Err(Error::bad_request(format!(
|
return Err(Error::bad_request(format!(
|
||||||
"{}: {}",
|
"{}: {}",
|
||||||
ga, INVALID_BUCKET_NAME_MESSAGE
|
ga, INVALID_BUCKET_NAME_MESSAGE
|
||||||
|
|
@ -291,7 +292,7 @@ pub async fn handle_create_bucket(
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(la) = &req.local_alias {
|
if let Some(la) = &req.local_alias {
|
||||||
if !is_valid_bucket_name(&la.alias) {
|
if !is_valid_bucket_name(&la.alias, garage.config.allow_punycode) {
|
||||||
return Err(Error::bad_request(format!(
|
return Err(Error::bad_request(format!(
|
||||||
"{}: {}",
|
"{}: {}",
|
||||||
la.alias, INVALID_BUCKET_NAME_MESSAGE
|
la.alias, INVALID_BUCKET_NAME_MESSAGE
|
||||||
|
|
@ -381,7 +382,7 @@ pub async fn handle_delete_bucket(
|
||||||
for ((key_id, alias), _, active) in state.local_aliases.items().iter() {
|
for ((key_id, alias), _, active) in state.local_aliases.items().iter() {
|
||||||
if *active {
|
if *active {
|
||||||
helper
|
helper
|
||||||
.unset_local_bucket_alias(bucket.id, key_id, alias)
|
.purge_local_bucket_alias(bucket.id, key_id, alias)
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -12,9 +12,10 @@ use garage_rpc::layout;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
|
|
||||||
use crate::admin::api_server::ResBody;
|
use garage_api_common::helpers::{json_ok_response, parse_json_body};
|
||||||
use crate::admin::error::*;
|
|
||||||
use crate::helpers::{json_ok_response, parse_json_body};
|
use crate::api_server::ResBody;
|
||||||
|
use crate::error::*;
|
||||||
|
|
||||||
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let layout = garage.system.cluster_layout();
|
let layout = garage.system.cluster_layout();
|
||||||
|
|
@ -27,7 +28,7 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
|
||||||
i.id,
|
i.id,
|
||||||
NodeResp {
|
NodeResp {
|
||||||
id: hex::encode(i.id),
|
id: hex::encode(i.id),
|
||||||
addr: Some(i.addr),
|
addr: i.addr,
|
||||||
hostname: i.status.hostname,
|
hostname: i.status.hostname,
|
||||||
is_up: i.is_up,
|
is_up: i.is_up,
|
||||||
last_seen_secs_ago: i.last_seen_secs_ago,
|
last_seen_secs_ago: i.last_seen_secs_ago,
|
||||||
|
|
@ -70,26 +71,30 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Some(n) => {
|
Some(n) => {
|
||||||
if n.role.is_none() {
|
n.role = Some(role);
|
||||||
n.role = Some(role);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for ver in layout.versions.iter().rev().skip(1) {
|
for ver in layout.versions().iter().rev().skip(1) {
|
||||||
for (id, _, role) in ver.roles.items().iter() {
|
for (id, _, role) in ver.roles.items().iter() {
|
||||||
if let layout::NodeRoleV(Some(r)) = role {
|
if let layout::NodeRoleV(Some(r)) = role {
|
||||||
if !nodes.contains_key(id) && r.capacity.is_some() {
|
if r.capacity.is_some() {
|
||||||
nodes.insert(
|
if let Some(n) = nodes.get_mut(id) {
|
||||||
*id,
|
if n.role.is_none() {
|
||||||
NodeResp {
|
n.draining = true;
|
||||||
id: hex::encode(id),
|
}
|
||||||
draining: true,
|
} else {
|
||||||
..Default::default()
|
nodes.insert(
|
||||||
},
|
*id,
|
||||||
);
|
NodeResp {
|
||||||
|
id: hex::encode(id),
|
||||||
|
draining: true,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -156,7 +161,7 @@ pub async fn handle_connect_cluster_nodes(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let res = format_cluster_layout(&garage.system.cluster_layout());
|
let res = format_cluster_layout(garage.system.cluster_layout().inner());
|
||||||
|
|
||||||
Ok(json_ok_response(&res)?)
|
Ok(json_ok_response(&res)?)
|
||||||
}
|
}
|
||||||
|
|
@ -295,7 +300,7 @@ pub async fn handle_update_cluster_layout(
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
|
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let mut layout = garage.system.cluster_layout().clone();
|
let mut layout = garage.system.cluster_layout().inner().clone();
|
||||||
|
|
||||||
let mut roles = layout.current().roles.clone();
|
let mut roles = layout.current().roles.clone();
|
||||||
roles.merge(&layout.staging.get().roles);
|
roles.merge(&layout.staging.get().roles);
|
||||||
|
|
@ -341,7 +346,7 @@ pub async fn handle_apply_cluster_layout(
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let param = parse_json_body::<ApplyLayoutRequest, _, Error>(req).await?;
|
let param = parse_json_body::<ApplyLayoutRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let layout = garage.system.cluster_layout().clone();
|
let layout = garage.system.cluster_layout().inner().clone();
|
||||||
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
|
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
|
||||||
|
|
||||||
garage
|
garage
|
||||||
|
|
@ -360,7 +365,7 @@ pub async fn handle_apply_cluster_layout(
|
||||||
pub async fn handle_revert_cluster_layout(
|
pub async fn handle_revert_cluster_layout(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let layout = garage.system.cluster_layout().clone();
|
let layout = garage.system.cluster_layout().inner().clone();
|
||||||
let layout = layout.revert_staged_changes()?;
|
let layout = layout.revert_staged_changes()?;
|
||||||
garage
|
garage
|
||||||
.system
|
.system
|
||||||
|
|
|
||||||
|
|
@ -1,45 +1,50 @@
|
||||||
use err_derive::Error;
|
use std::convert::TryFrom;
|
||||||
|
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
pub use garage_model::helper::error::Error as HelperError;
|
pub use garage_model::helper::error::Error as HelperError;
|
||||||
|
|
||||||
use crate::common_error::CommonError;
|
use garage_api_common::common_error::{commonErrorDerivative, CommonError};
|
||||||
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
pub use garage_api_common::common_error::{
|
||||||
use crate::generic_server::ApiError;
|
CommonErrorDerivative, OkOrBadRequest, OkOrInternalError,
|
||||||
use crate::helpers::*;
|
};
|
||||||
|
use garage_api_common::generic_server::ApiError;
|
||||||
|
use garage_api_common::helpers::*;
|
||||||
|
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[error(display = "{}", _0)]
|
#[error("{0}")]
|
||||||
/// Error from common error
|
/// Error from common error
|
||||||
Common(CommonError),
|
Common(#[from] CommonError),
|
||||||
|
|
||||||
// Category: cannot process
|
// Category: cannot process
|
||||||
/// The API access key does not exist
|
/// The API access key does not exist
|
||||||
#[error(display = "Access key not found: {}", _0)]
|
#[error("Access key not found: {0}")]
|
||||||
NoSuchAccessKey(String),
|
NoSuchAccessKey(String),
|
||||||
|
|
||||||
/// In Import key, the key already exists
|
/// In Import key, the key already exists
|
||||||
#[error(
|
#[error("Key {0} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.")]
|
||||||
display = "Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.",
|
|
||||||
_0
|
|
||||||
)]
|
|
||||||
KeyAlreadyExists(String),
|
KeyAlreadyExists(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> From<T> for Error
|
commonErrorDerivative!(Error);
|
||||||
where
|
|
||||||
CommonError: From<T>,
|
/// FIXME: helper errors are transformed into their corresponding variants
|
||||||
{
|
/// in the Error struct, but in many case a helper error should be considered
|
||||||
fn from(err: T) -> Self {
|
/// an internal error.
|
||||||
Error::Common(CommonError::from(err))
|
impl From<HelperError> for Error {
|
||||||
|
fn from(err: HelperError) -> Error {
|
||||||
|
match CommonError::try_from(err) {
|
||||||
|
Ok(ce) => Self::Common(ce),
|
||||||
|
Err(HelperError::NoSuchAccessKey(k)) => Self::NoSuchAccessKey(k),
|
||||||
|
Err(_) => unreachable!(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CommonErrorDerivative for Error {}
|
|
||||||
|
|
||||||
impl Error {
|
impl Error {
|
||||||
fn code(&self) -> &'static str {
|
fn code(&self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
|
|
|
||||||
|
|
@ -9,9 +9,10 @@ use garage_table::*;
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::key_table::*;
|
use garage_model::key_table::*;
|
||||||
|
|
||||||
use crate::admin::api_server::ResBody;
|
use garage_api_common::helpers::*;
|
||||||
use crate::admin::error::*;
|
|
||||||
use crate::helpers::*;
|
use crate::api_server::ResBody;
|
||||||
|
use crate::error::*;
|
||||||
|
|
||||||
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let res = garage
|
let res = garage
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,6 @@
|
||||||
|
#[macro_use]
|
||||||
|
extern crate tracing;
|
||||||
|
|
||||||
pub mod api_server;
|
pub mod api_server;
|
||||||
mod error;
|
mod error;
|
||||||
mod router_v0;
|
mod router_v0;
|
||||||
|
|
@ -2,8 +2,9 @@ use std::borrow::Cow;
|
||||||
|
|
||||||
use hyper::{Method, Request};
|
use hyper::{Method, Request};
|
||||||
|
|
||||||
use crate::admin::error::*;
|
use garage_api_common::router_macros::*;
|
||||||
use crate::router_macros::*;
|
|
||||||
|
use crate::error::*;
|
||||||
|
|
||||||
router_match! {@func
|
router_match! {@func
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,9 +2,10 @@ use std::borrow::Cow;
|
||||||
|
|
||||||
use hyper::{Method, Request};
|
use hyper::{Method, Request};
|
||||||
|
|
||||||
use crate::admin::error::*;
|
use garage_api_common::router_macros::*;
|
||||||
use crate::admin::router_v0;
|
|
||||||
use crate::router_macros::*;
|
use crate::error::*;
|
||||||
|
use crate::router_v0;
|
||||||
|
|
||||||
pub enum Authorization {
|
pub enum Authorization {
|
||||||
None,
|
None,
|
||||||
|
|
|
||||||
48
src/api/common/Cargo.toml
Normal file
48
src/api/common/Cargo.toml
Normal file
|
|
@ -0,0 +1,48 @@
|
||||||
|
[package]
|
||||||
|
name = "garage_api_common"
|
||||||
|
version = "1.3.1"
|
||||||
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
|
edition = "2018"
|
||||||
|
license = "AGPL-3.0"
|
||||||
|
description = "Common functions for the API server crates for the Garage object store"
|
||||||
|
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
||||||
|
readme = "../../../README.md"
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
path = "lib.rs"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
garage_model.workspace = true
|
||||||
|
garage_table.workspace = true
|
||||||
|
garage_util.workspace = true
|
||||||
|
|
||||||
|
base64.workspace = true
|
||||||
|
bytes.workspace = true
|
||||||
|
chrono.workspace = true
|
||||||
|
crc32fast.workspace = true
|
||||||
|
crc32c.workspace = true
|
||||||
|
crypto-common.workspace = true
|
||||||
|
thiserror.workspace = true
|
||||||
|
hex.workspace = true
|
||||||
|
hmac.workspace = true
|
||||||
|
md-5.workspace = true
|
||||||
|
tracing.workspace = true
|
||||||
|
nom.workspace = true
|
||||||
|
pin-project.workspace = true
|
||||||
|
sha1.workspace = true
|
||||||
|
sha2.workspace = true
|
||||||
|
|
||||||
|
futures.workspace = true
|
||||||
|
tokio.workspace = true
|
||||||
|
http.workspace = true
|
||||||
|
http-body-util.workspace = true
|
||||||
|
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
|
||||||
|
hyper-util.workspace = true
|
||||||
|
url.workspace = true
|
||||||
|
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
|
||||||
|
opentelemetry.workspace = true
|
||||||
|
|
@ -1,5 +1,7 @@
|
||||||
use err_derive::Error;
|
use std::convert::TryFrom;
|
||||||
|
|
||||||
use hyper::StatusCode;
|
use hyper::StatusCode;
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
|
|
||||||
|
|
@ -10,51 +12,80 @@ use garage_model::helper::error::Error as HelperError;
|
||||||
pub enum CommonError {
|
pub enum CommonError {
|
||||||
// ---- INTERNAL ERRORS ----
|
// ---- INTERNAL ERRORS ----
|
||||||
/// Error related to deeper parts of Garage
|
/// Error related to deeper parts of Garage
|
||||||
#[error(display = "Internal error: {}", _0)]
|
#[error("Internal error: {0}")]
|
||||||
InternalError(#[error(source)] GarageError),
|
InternalError(#[from] GarageError),
|
||||||
|
|
||||||
/// Error related to Hyper
|
/// Error related to Hyper
|
||||||
#[error(display = "Internal error (Hyper error): {}", _0)]
|
#[error("Internal error (Hyper error): {0}")]
|
||||||
Hyper(#[error(source)] hyper::Error),
|
Hyper(#[from] hyper::Error),
|
||||||
|
|
||||||
/// Error related to HTTP
|
/// Error related to HTTP
|
||||||
#[error(display = "Internal error (HTTP error): {}", _0)]
|
#[error("Internal error (HTTP error): {0}")]
|
||||||
Http(#[error(source)] http::Error),
|
Http(#[from] http::Error),
|
||||||
|
|
||||||
// ---- GENERIC CLIENT ERRORS ----
|
// ---- GENERIC CLIENT ERRORS ----
|
||||||
/// Proper authentication was not provided
|
/// Proper authentication was not provided
|
||||||
#[error(display = "Forbidden: {}", _0)]
|
#[error("Forbidden: {0}")]
|
||||||
Forbidden(String),
|
Forbidden(String),
|
||||||
|
|
||||||
/// Generic bad request response with custom message
|
/// Generic bad request response with custom message
|
||||||
#[error(display = "Bad request: {}", _0)]
|
#[error("Bad request: {0}")]
|
||||||
BadRequest(String),
|
BadRequest(String),
|
||||||
|
|
||||||
/// The client sent a header with invalid value
|
/// The client sent a header with invalid value
|
||||||
#[error(display = "Invalid header value: {}", _0)]
|
#[error("Invalid header value: {0}")]
|
||||||
InvalidHeader(#[error(source)] hyper::header::ToStrError),
|
InvalidHeader(#[from] hyper::header::ToStrError),
|
||||||
|
|
||||||
// ---- SPECIFIC ERROR CONDITIONS ----
|
// ---- SPECIFIC ERROR CONDITIONS ----
|
||||||
// These have to be error codes referenced in the S3 spec here:
|
// These have to be error codes referenced in the S3 spec here:
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
|
||||||
/// The bucket requested don't exists
|
/// The bucket requested don't exists
|
||||||
#[error(display = "Bucket not found: {}", _0)]
|
#[error("Bucket not found: {0}")]
|
||||||
NoSuchBucket(String),
|
NoSuchBucket(String),
|
||||||
|
|
||||||
/// Tried to create a bucket that already exist
|
/// Tried to create a bucket that already exist
|
||||||
#[error(display = "Bucket already exists")]
|
#[error("Bucket already exists")]
|
||||||
BucketAlreadyExists,
|
BucketAlreadyExists,
|
||||||
|
|
||||||
/// Tried to delete a non-empty bucket
|
/// Tried to delete a non-empty bucket
|
||||||
#[error(display = "Tried to delete a non-empty bucket")]
|
#[error("Tried to delete a non-empty bucket")]
|
||||||
BucketNotEmpty,
|
BucketNotEmpty,
|
||||||
|
|
||||||
// Category: bad request
|
// Category: bad request
|
||||||
/// Bucket name is not valid according to AWS S3 specs
|
/// Bucket name is not valid according to AWS S3 specs
|
||||||
#[error(display = "Invalid bucket name: {}", _0)]
|
#[error("Invalid bucket name: {0}")]
|
||||||
InvalidBucketName(String),
|
InvalidBucketName(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! commonErrorDerivative {
|
||||||
|
( $error_struct: ident ) => {
|
||||||
|
impl From<garage_util::error::Error> for $error_struct {
|
||||||
|
fn from(err: garage_util::error::Error) -> Self {
|
||||||
|
Self::Common(CommonError::InternalError(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<http::Error> for $error_struct {
|
||||||
|
fn from(err: http::Error) -> Self {
|
||||||
|
Self::Common(CommonError::Http(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<hyper::Error> for $error_struct {
|
||||||
|
fn from(err: hyper::Error) -> Self {
|
||||||
|
Self::Common(CommonError::Hyper(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<hyper::header::ToStrError> for $error_struct {
|
||||||
|
fn from(err: hyper::header::ToStrError) -> Self {
|
||||||
|
Self::Common(CommonError::InvalidHeader(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl CommonErrorDerivative for $error_struct {}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub use commonErrorDerivative;
|
||||||
|
|
||||||
impl CommonError {
|
impl CommonError {
|
||||||
pub fn http_status_code(&self) -> StatusCode {
|
pub fn http_status_code(&self) -> StatusCode {
|
||||||
match self {
|
match self {
|
||||||
|
|
@ -97,18 +128,39 @@ impl CommonError {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<HelperError> for CommonError {
|
impl TryFrom<HelperError> for CommonError {
|
||||||
fn from(err: HelperError) -> Self {
|
type Error = HelperError;
|
||||||
|
|
||||||
|
fn try_from(err: HelperError) -> Result<Self, HelperError> {
|
||||||
match err {
|
match err {
|
||||||
HelperError::Internal(i) => Self::InternalError(i),
|
HelperError::Internal(i) => Ok(Self::InternalError(i)),
|
||||||
HelperError::BadRequest(b) => Self::BadRequest(b),
|
HelperError::BadRequest(b) => Ok(Self::BadRequest(b)),
|
||||||
HelperError::InvalidBucketName(n) => Self::InvalidBucketName(n),
|
HelperError::InvalidBucketName(n) => Ok(Self::InvalidBucketName(n)),
|
||||||
HelperError::NoSuchBucket(n) => Self::NoSuchBucket(n),
|
HelperError::NoSuchBucket(n) => Ok(Self::NoSuchBucket(n)),
|
||||||
e => Self::bad_request(format!("{}", e)),
|
e => Err(e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// This function converts HelperErrors into CommonErrors,
|
||||||
|
/// for variants that exist in CommonError.
|
||||||
|
/// This is used for helper functions that might return InvalidBucketName
|
||||||
|
/// or NoSuchBucket for instance, and we want to pass that error
|
||||||
|
/// up to our caller.
|
||||||
|
pub fn pass_helper_error(err: HelperError) -> CommonError {
|
||||||
|
match CommonError::try_from(err) {
|
||||||
|
Ok(e) => e,
|
||||||
|
Err(e) => panic!("Helper error `{}` should hot have happenned here", e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn helper_error_as_internal(err: HelperError) -> CommonError {
|
||||||
|
match err {
|
||||||
|
HelperError::Internal(e) => CommonError::InternalError(e),
|
||||||
|
e => CommonError::InternalError(GarageError::Message(e.to_string())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub trait CommonErrorDerivative: From<CommonError> {
|
pub trait CommonErrorDerivative: From<CommonError> {
|
||||||
fn internal_error<M: ToString>(msg: M) -> Self {
|
fn internal_error<M: ToString>(msg: M) -> Self {
|
||||||
Self::from(CommonError::InternalError(GarageError::Message(
|
Self::from(CommonError::InternalError(GarageError::Message(
|
||||||
170
src/api/common/cors.rs
Normal file
170
src/api/common/cors.rs
Normal file
|
|
@ -0,0 +1,170 @@
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use http::header::{
|
||||||
|
ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN,
|
||||||
|
ACCESS_CONTROL_EXPOSE_HEADERS, ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD,
|
||||||
|
};
|
||||||
|
use hyper::{body::Body, body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||||
|
|
||||||
|
use garage_model::bucket_table::{BucketParams, CorsRule as GarageCorsRule};
|
||||||
|
use garage_model::garage::Garage;
|
||||||
|
|
||||||
|
use crate::common_error::{
|
||||||
|
helper_error_as_internal, CommonError, OkOrBadRequest, OkOrInternalError,
|
||||||
|
};
|
||||||
|
use crate::helpers::*;
|
||||||
|
|
||||||
|
pub fn find_matching_cors_rule<'a, B>(
|
||||||
|
bucket_params: &'a BucketParams,
|
||||||
|
req: &Request<B>,
|
||||||
|
) -> Result<Option<&'a GarageCorsRule>, CommonError> {
|
||||||
|
if let Some(cors_config) = bucket_params.cors_config.get() {
|
||||||
|
if let Some(origin) = req.headers().get("Origin") {
|
||||||
|
let origin = origin.to_str()?;
|
||||||
|
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
|
||||||
|
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
|
||||||
|
None => vec![],
|
||||||
|
};
|
||||||
|
return Ok(cors_config.iter().find(|rule| {
|
||||||
|
cors_rule_matches(rule, origin, req.method().as_ref(), request_headers.iter())
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cors_rule_matches<'a, HI, S>(
|
||||||
|
rule: &GarageCorsRule,
|
||||||
|
origin: &'a str,
|
||||||
|
method: &'a str,
|
||||||
|
mut request_headers: HI,
|
||||||
|
) -> bool
|
||||||
|
where
|
||||||
|
HI: Iterator<Item = S>,
|
||||||
|
S: AsRef<str>,
|
||||||
|
{
|
||||||
|
rule.allow_origins.iter().any(|x| x == "*" || x == origin)
|
||||||
|
&& rule.allow_methods.iter().any(|x| x == "*" || x == method)
|
||||||
|
&& request_headers.all(|h| {
|
||||||
|
rule.allow_headers
|
||||||
|
.iter()
|
||||||
|
.any(|x| x == "*" || x == h.as_ref())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_cors_headers(
|
||||||
|
resp: &mut Response<impl Body>,
|
||||||
|
rule: &GarageCorsRule,
|
||||||
|
) -> Result<(), http::header::InvalidHeaderValue> {
|
||||||
|
let h = resp.headers_mut();
|
||||||
|
h.insert(
|
||||||
|
ACCESS_CONTROL_ALLOW_ORIGIN,
|
||||||
|
rule.allow_origins.join(", ").parse()?,
|
||||||
|
);
|
||||||
|
h.insert(
|
||||||
|
ACCESS_CONTROL_ALLOW_METHODS,
|
||||||
|
rule.allow_methods.join(", ").parse()?,
|
||||||
|
);
|
||||||
|
h.insert(
|
||||||
|
ACCESS_CONTROL_ALLOW_HEADERS,
|
||||||
|
rule.allow_headers.join(", ").parse()?,
|
||||||
|
);
|
||||||
|
h.insert(
|
||||||
|
ACCESS_CONTROL_EXPOSE_HEADERS,
|
||||||
|
rule.expose_headers.join(", ").parse()?,
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_options_api(
|
||||||
|
garage: Arc<Garage>,
|
||||||
|
req: &Request<IncomingBody>,
|
||||||
|
bucket_name: Option<String>,
|
||||||
|
) -> Result<Response<EmptyBody>, CommonError> {
|
||||||
|
// FIXME: CORS rules of buckets with local aliases are
|
||||||
|
// not taken into account.
|
||||||
|
|
||||||
|
// If the bucket name is a global bucket name,
|
||||||
|
// we try to apply the CORS rules of that bucket.
|
||||||
|
// If a user has a local bucket name that has
|
||||||
|
// the same name, its CORS rules won't be applied
|
||||||
|
// and will be shadowed by the rules of the globally
|
||||||
|
// existing bucket (but this is inevitable because
|
||||||
|
// OPTIONS calls are not auhtenticated).
|
||||||
|
if let Some(bn) = bucket_name {
|
||||||
|
let helper = garage.bucket_helper();
|
||||||
|
let bucket_id = helper
|
||||||
|
.resolve_global_bucket_name(&bn)
|
||||||
|
.await
|
||||||
|
.map_err(helper_error_as_internal)?;
|
||||||
|
if let Some(id) = bucket_id {
|
||||||
|
let bucket = garage
|
||||||
|
.bucket_helper()
|
||||||
|
.get_existing_bucket(id)
|
||||||
|
.await
|
||||||
|
.map_err(helper_error_as_internal)?;
|
||||||
|
let bucket_params = bucket.state.into_option().unwrap();
|
||||||
|
handle_options_for_bucket(req, &bucket_params)
|
||||||
|
} else {
|
||||||
|
// If there is a bucket name in the request, but that name
|
||||||
|
// does not correspond to a global alias for a bucket,
|
||||||
|
// then it's either a non-existing bucket or a local bucket.
|
||||||
|
// We have no way of knowing, because the request is not
|
||||||
|
// authenticated and thus we can't resolve local aliases.
|
||||||
|
// We take the permissive approach of allowing everything,
|
||||||
|
// because we don't want to prevent web apps that use
|
||||||
|
// local bucket names from making API calls.
|
||||||
|
Ok(Response::builder()
|
||||||
|
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||||
|
.header(ACCESS_CONTROL_ALLOW_METHODS, "*")
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(EmptyBody::new())?)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If there is no bucket name in the request,
|
||||||
|
// we are doing a ListBuckets call, which we want to allow
|
||||||
|
// for all origins.
|
||||||
|
Ok(Response::builder()
|
||||||
|
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||||
|
.header(ACCESS_CONTROL_ALLOW_METHODS, "GET")
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(EmptyBody::new())?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_options_for_bucket<B>(
|
||||||
|
req: &Request<B>,
|
||||||
|
bucket_params: &BucketParams,
|
||||||
|
) -> Result<Response<EmptyBody>, CommonError> {
|
||||||
|
let origin = req
|
||||||
|
.headers()
|
||||||
|
.get("Origin")
|
||||||
|
.ok_or_bad_request("Missing Origin header")?
|
||||||
|
.to_str()?;
|
||||||
|
let request_method = req
|
||||||
|
.headers()
|
||||||
|
.get(ACCESS_CONTROL_REQUEST_METHOD)
|
||||||
|
.ok_or_bad_request("Missing Access-Control-Request-Method header")?
|
||||||
|
.to_str()?;
|
||||||
|
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
|
||||||
|
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
|
||||||
|
None => vec![],
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(cors_config) = bucket_params.cors_config.get() {
|
||||||
|
let matching_rule = cors_config
|
||||||
|
.iter()
|
||||||
|
.find(|rule| cors_rule_matches(rule, origin, request_method, request_headers.iter()));
|
||||||
|
if let Some(rule) = matching_rule {
|
||||||
|
let mut resp = Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(EmptyBody::new())?;
|
||||||
|
add_cors_headers(&mut resp, rule).ok_or_internal_error("Invalid CORS configuration")?;
|
||||||
|
return Ok(resp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(CommonError::Forbidden(
|
||||||
|
"This CORS request is not allowed.".into(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
@ -2,8 +2,7 @@ use std::convert::Infallible;
|
||||||
use std::fs::{self, Permissions};
|
use std::fs::{self, Permissions};
|
||||||
use std::os::unix::fs::PermissionsExt;
|
use std::os::unix::fs::PermissionsExt;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
use async_trait::async_trait;
|
|
||||||
|
|
||||||
use futures::future::Future;
|
use futures::future::Future;
|
||||||
use futures::stream::{futures_unordered::FuturesUnordered, StreamExt};
|
use futures::stream::{futures_unordered::FuturesUnordered, StreamExt};
|
||||||
|
|
@ -19,6 +18,7 @@ use hyper_util::rt::TokioIo;
|
||||||
use tokio::io::{AsyncRead, AsyncWrite};
|
use tokio::io::{AsyncRead, AsyncWrite};
|
||||||
use tokio::net::{TcpListener, TcpStream, UnixListener, UnixStream};
|
use tokio::net::{TcpListener, TcpStream, UnixListener, UnixStream};
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
|
use tokio::time::{sleep_until, Instant};
|
||||||
|
|
||||||
use opentelemetry::{
|
use opentelemetry::{
|
||||||
global,
|
global,
|
||||||
|
|
@ -34,7 +34,7 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
|
||||||
|
|
||||||
use crate::helpers::{BoxBody, ErrorBody};
|
use crate::helpers::{BoxBody, ErrorBody};
|
||||||
|
|
||||||
pub(crate) trait ApiEndpoint: Send + Sync + 'static {
|
pub trait ApiEndpoint: Send + Sync + 'static {
|
||||||
fn name(&self) -> &'static str;
|
fn name(&self) -> &'static str;
|
||||||
fn add_span_attributes(&self, span: SpanRef<'_>);
|
fn add_span_attributes(&self, span: SpanRef<'_>);
|
||||||
}
|
}
|
||||||
|
|
@ -45,8 +45,7 @@ pub trait ApiError: std::error::Error + Send + Sync + 'static {
|
||||||
fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody;
|
fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
pub trait ApiHandler: Send + Sync + 'static {
|
||||||
pub(crate) trait ApiHandler: Send + Sync + 'static {
|
|
||||||
const API_NAME: &'static str;
|
const API_NAME: &'static str;
|
||||||
const API_NAME_DISPLAY: &'static str;
|
const API_NAME_DISPLAY: &'static str;
|
||||||
|
|
||||||
|
|
@ -54,14 +53,20 @@ pub(crate) trait ApiHandler: Send + Sync + 'static {
|
||||||
type Error: ApiError;
|
type Error: ApiError;
|
||||||
|
|
||||||
fn parse_endpoint(&self, r: &Request<IncomingBody>) -> Result<Self::Endpoint, Self::Error>;
|
fn parse_endpoint(&self, r: &Request<IncomingBody>) -> Result<Self::Endpoint, Self::Error>;
|
||||||
async fn handle(
|
fn handle(
|
||||||
&self,
|
&self,
|
||||||
req: Request<IncomingBody>,
|
req: Request<IncomingBody>,
|
||||||
endpoint: Self::Endpoint,
|
endpoint: Self::Endpoint,
|
||||||
) -> Result<Response<BoxBody<Self::Error>>, Self::Error>;
|
) -> impl Future<Output = Result<Response<BoxBody<Self::Error>>, Self::Error>> + Send;
|
||||||
|
|
||||||
|
/// Returns the key id used to authenticate this request. The ID returned must be safe to
|
||||||
|
/// log.
|
||||||
|
fn key_id_from_request(&self, _req: &Request<IncomingBody>) -> Option<String> {
|
||||||
|
None
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct ApiServer<A: ApiHandler> {
|
pub struct ApiServer<A: ApiHandler> {
|
||||||
region: String,
|
region: String,
|
||||||
api_handler: A,
|
api_handler: A,
|
||||||
|
|
||||||
|
|
@ -143,19 +148,20 @@ impl<A: ApiHandler> ApiServer<A> {
|
||||||
) -> Result<Response<BoxBody<A::Error>>, http::Error> {
|
) -> Result<Response<BoxBody<A::Error>>, http::Error> {
|
||||||
let uri = req.uri().clone();
|
let uri = req.uri().clone();
|
||||||
|
|
||||||
if let Ok(forwarded_for_ip_addr) =
|
let source = if let Ok(forwarded_for_ip_addr) =
|
||||||
forwarded_headers::handle_forwarded_for_headers(req.headers())
|
forwarded_headers::handle_forwarded_for_headers(req.headers())
|
||||||
{
|
{
|
||||||
info!(
|
format!("{forwarded_for_ip_addr} (via {addr})")
|
||||||
"{} (via {}) {} {}",
|
|
||||||
forwarded_for_ip_addr,
|
|
||||||
addr,
|
|
||||||
req.method(),
|
|
||||||
uri
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
info!("{} {} {}", addr, req.method(), uri);
|
format!("{addr}")
|
||||||
}
|
};
|
||||||
|
// we only do this to log the access key, so we can discard any error
|
||||||
|
let key = self
|
||||||
|
.api_handler
|
||||||
|
.key_id_from_request(&req)
|
||||||
|
.map(|k| format!("(key {k}) "))
|
||||||
|
.unwrap_or_default();
|
||||||
|
info!("{source} {key}{} {uri}", req.method());
|
||||||
debug!("{:?}", req);
|
debug!("{:?}", req);
|
||||||
|
|
||||||
let tracer = opentelemetry::global::tracer("garage");
|
let tracer = opentelemetry::global::tracer("garage");
|
||||||
|
|
@ -246,13 +252,11 @@ impl<A: ApiHandler> ApiServer<A> {
|
||||||
|
|
||||||
// ==== helper functions ====
|
// ==== helper functions ====
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
pub trait Accept: Send + Sync + 'static {
|
pub trait Accept: Send + Sync + 'static {
|
||||||
type Stream: AsyncRead + AsyncWrite + Send + Sync + 'static;
|
type Stream: AsyncRead + AsyncWrite + Send + Sync + 'static;
|
||||||
async fn accept(&self) -> std::io::Result<(Self::Stream, String)>;
|
fn accept(&self) -> impl Future<Output = std::io::Result<(Self::Stream, String)>> + Send;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Accept for TcpListener {
|
impl Accept for TcpListener {
|
||||||
type Stream = TcpStream;
|
type Stream = TcpStream;
|
||||||
async fn accept(&self) -> std::io::Result<(Self::Stream, String)> {
|
async fn accept(&self) -> std::io::Result<(Self::Stream, String)> {
|
||||||
|
|
@ -264,7 +268,6 @@ impl Accept for TcpListener {
|
||||||
|
|
||||||
pub struct UnixListenerOn(pub UnixListener, pub String);
|
pub struct UnixListenerOn(pub UnixListener, pub String);
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Accept for UnixListenerOn {
|
impl Accept for UnixListenerOn {
|
||||||
type Stream = UnixStream;
|
type Stream = UnixStream;
|
||||||
async fn accept(&self) -> std::io::Result<(Self::Stream, String)> {
|
async fn accept(&self) -> std::io::Result<(Self::Stream, String)> {
|
||||||
|
|
@ -291,7 +294,7 @@ where
|
||||||
let connection_collector = tokio::spawn({
|
let connection_collector = tokio::spawn({
|
||||||
let server_name = server_name.clone();
|
let server_name = server_name.clone();
|
||||||
async move {
|
async move {
|
||||||
let mut connections = FuturesUnordered::new();
|
let mut connections = FuturesUnordered::<tokio::task::JoinHandle<()>>::new();
|
||||||
loop {
|
loop {
|
||||||
let collect_next = async {
|
let collect_next = async {
|
||||||
if connections.is_empty() {
|
if connections.is_empty() {
|
||||||
|
|
@ -312,23 +315,34 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !connections.is_empty() {
|
let deadline = Instant::now() + Duration::from_secs(10);
|
||||||
|
while !connections.is_empty() {
|
||||||
info!(
|
info!(
|
||||||
"{} server: {} connections still open",
|
"{} server: {} connections still open, deadline in {:.2}s",
|
||||||
server_name,
|
server_name,
|
||||||
connections.len()
|
connections.len(),
|
||||||
|
(deadline - Instant::now()).as_secs_f32(),
|
||||||
);
|
);
|
||||||
while let Some(conn_res) = connections.next().await {
|
tokio::select! {
|
||||||
trace!(
|
conn_res = connections.next() => {
|
||||||
"{} server: HTTP connection finished: {:?}",
|
trace!(
|
||||||
server_name,
|
"{} server: HTTP connection finished: {:?}",
|
||||||
conn_res
|
server_name,
|
||||||
);
|
conn_res.unwrap(),
|
||||||
info!(
|
);
|
||||||
"{} server: {} connections still open",
|
}
|
||||||
server_name,
|
_ = sleep_until(deadline) => {
|
||||||
connections.len()
|
warn!("{} server: exit deadline reached with {} connections still open, killing them now",
|
||||||
);
|
server_name,
|
||||||
|
connections.len());
|
||||||
|
for conn in connections.iter() {
|
||||||
|
conn.abort();
|
||||||
|
}
|
||||||
|
for conn in connections {
|
||||||
|
assert!(conn.await.unwrap_err().is_cancelled());
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -336,7 +350,11 @@ where
|
||||||
|
|
||||||
while !*must_exit.borrow() {
|
while !*must_exit.borrow() {
|
||||||
let (stream, client_addr) = tokio::select! {
|
let (stream, client_addr) = tokio::select! {
|
||||||
acc = listener.accept() => acc?,
|
acc = listener.accept() => match acc {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) if e.kind() == std::io::ErrorKind::ConnectionAborted => continue,
|
||||||
|
Err(e) => return Err(e.into()),
|
||||||
|
},
|
||||||
_ = must_exit.changed() => continue,
|
_ = must_exit.changed() => continue,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -8,7 +8,6 @@ use hyper::{
|
||||||
body::{Body, Bytes},
|
body::{Body, Bytes},
|
||||||
Request, Response,
|
Request, Response,
|
||||||
};
|
};
|
||||||
use idna::domain_to_unicode;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_model::bucket_table::BucketParams;
|
use garage_model::bucket_table::BucketParams;
|
||||||
|
|
@ -97,7 +96,7 @@ pub fn authority_to_host(authority: &str) -> Result<String, Error> {
|
||||||
authority
|
authority
|
||||||
))),
|
))),
|
||||||
};
|
};
|
||||||
authority.map(|h| domain_to_unicode(h).0)
|
authority.map(|h| h.to_ascii_lowercase())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Extract the bucket name and the key name from an HTTP path and possibly a bucket provided in
|
/// Extract the bucket name and the key name from an HTTP path and possibly a bucket provided in
|
||||||
|
|
@ -363,9 +362,9 @@ mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize)]
|
||||||
pub(crate) struct CustomApiErrorBody {
|
pub struct CustomApiErrorBody {
|
||||||
pub(crate) code: String,
|
pub code: String,
|
||||||
pub(crate) message: String,
|
pub message: String,
|
||||||
pub(crate) region: String,
|
pub region: String,
|
||||||
pub(crate) path: String,
|
pub path: String,
|
||||||
}
|
}
|
||||||
12
src/api/common/lib.rs
Normal file
12
src/api/common/lib.rs
Normal file
|
|
@ -0,0 +1,12 @@
|
||||||
|
//! Crate for serving a S3 compatible API
|
||||||
|
#[macro_use]
|
||||||
|
extern crate tracing;
|
||||||
|
|
||||||
|
pub mod common_error;
|
||||||
|
|
||||||
|
pub mod cors;
|
||||||
|
pub mod encoding;
|
||||||
|
pub mod generic_server;
|
||||||
|
pub mod helpers;
|
||||||
|
pub mod router_macros;
|
||||||
|
pub mod signature;
|
||||||
|
|
@ -1,5 +1,6 @@
|
||||||
/// This macro is used to generate very repetitive match {} blocks in this module
|
/// This macro is used to generate very repetitive match {} blocks in this module
|
||||||
/// It is _not_ made to be used anywhere else
|
/// It is _not_ made to be used anywhere else
|
||||||
|
#[macro_export]
|
||||||
macro_rules! router_match {
|
macro_rules! router_match {
|
||||||
(@match $enum:expr , [ $($endpoint:ident,)* ]) => {{
|
(@match $enum:expr , [ $($endpoint:ident,)* ]) => {{
|
||||||
// usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] }
|
// usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] }
|
||||||
|
|
@ -133,6 +134,7 @@ macro_rules! router_match {
|
||||||
|
|
||||||
/// This macro is used to generate part of the code in this module. It must be called only one, and
|
/// This macro is used to generate part of the code in this module. It must be called only one, and
|
||||||
/// is useless outside of this module.
|
/// is useless outside of this module.
|
||||||
|
#[macro_export]
|
||||||
macro_rules! generateQueryParameters {
|
macro_rules! generateQueryParameters {
|
||||||
(
|
(
|
||||||
keywords: [ $($kw_param:expr => $kw_name: ident),* ],
|
keywords: [ $($kw_param:expr => $kw_name: ident),* ],
|
||||||
|
|
@ -204,7 +206,7 @@ macro_rules! generateQueryParameters {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get an error message in case not all parameters where used when extracting them to
|
/// Get an error message in case not all parameters where used when extracting them to
|
||||||
/// build an Enpoint variant
|
/// build an Endpoint variant
|
||||||
fn nonempty_message(&self) -> Option<&str> {
|
fn nonempty_message(&self) -> Option<&str> {
|
||||||
if self.keyword.is_some() {
|
if self.keyword.is_some() {
|
||||||
Some("Keyword not used")
|
Some("Keyword not used")
|
||||||
|
|
@ -220,5 +222,5 @@ macro_rules! generateQueryParameters {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) use generateQueryParameters;
|
pub use generateQueryParameters;
|
||||||
pub(crate) use router_match;
|
pub use router_match;
|
||||||
135
src/api/common/signature/body.rs
Normal file
135
src/api/common/signature/body.rs
Normal file
|
|
@ -0,0 +1,135 @@
|
||||||
|
use std::sync::Mutex;
|
||||||
|
|
||||||
|
use futures::prelude::*;
|
||||||
|
use futures::stream::BoxStream;
|
||||||
|
use http_body_util::{BodyExt, StreamBody};
|
||||||
|
use hyper::body::{Bytes, Frame};
|
||||||
|
use serde::Deserialize;
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tokio::task;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
use crate::signature::checksum::*;
|
||||||
|
|
||||||
|
pub struct ReqBody {
|
||||||
|
// why need mutex to be sync??
|
||||||
|
pub(crate) stream: Mutex<BoxStream<'static, Result<Frame<Bytes>, Error>>>,
|
||||||
|
pub(crate) checksummer: Checksummer,
|
||||||
|
pub(crate) expected_checksums: ExpectedChecksums,
|
||||||
|
pub(crate) trailer_algorithm: Option<ChecksumAlgorithm>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type StreamingChecksumReceiver = task::JoinHandle<Result<Checksums, Error>>;
|
||||||
|
|
||||||
|
impl ReqBody {
|
||||||
|
pub fn add_expected_checksums(&mut self, more: ExpectedChecksums) {
|
||||||
|
if more.md5.is_some() {
|
||||||
|
self.expected_checksums.md5 = more.md5;
|
||||||
|
}
|
||||||
|
if more.sha256.is_some() {
|
||||||
|
self.expected_checksums.sha256 = more.sha256;
|
||||||
|
}
|
||||||
|
if more.extra.is_some() {
|
||||||
|
self.expected_checksums.extra = more.extra;
|
||||||
|
}
|
||||||
|
self.checksummer.add_expected(&self.expected_checksums);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_md5(&mut self) {
|
||||||
|
self.checksummer.add_md5();
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============ non-streaming =============
|
||||||
|
|
||||||
|
pub async fn json<T: for<'a> Deserialize<'a>>(self) -> Result<T, Error> {
|
||||||
|
let body = self.collect().await?;
|
||||||
|
let resp: T = serde_json::from_slice(&body).ok_or_bad_request("Invalid JSON")?;
|
||||||
|
Ok(resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn collect(self) -> Result<Bytes, Error> {
|
||||||
|
self.collect_with_checksums().await.map(|(b, _)| b)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn collect_with_checksums(mut self) -> Result<(Bytes, Checksums), Error> {
|
||||||
|
let stream: BoxStream<_> = self.stream.into_inner().unwrap();
|
||||||
|
let bytes = BodyExt::collect(StreamBody::new(stream)).await?.to_bytes();
|
||||||
|
|
||||||
|
self.checksummer.update(&bytes);
|
||||||
|
let checksums = self.checksummer.finalize();
|
||||||
|
checksums.verify(&self.expected_checksums)?;
|
||||||
|
|
||||||
|
Ok((bytes, checksums))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============ streaming =============
|
||||||
|
|
||||||
|
pub fn streaming_with_checksums(
|
||||||
|
self,
|
||||||
|
) -> (
|
||||||
|
BoxStream<'static, Result<Bytes, Error>>,
|
||||||
|
StreamingChecksumReceiver,
|
||||||
|
) {
|
||||||
|
let Self {
|
||||||
|
stream,
|
||||||
|
mut checksummer,
|
||||||
|
mut expected_checksums,
|
||||||
|
trailer_algorithm,
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
let (frame_tx, mut frame_rx) = mpsc::channel::<Frame<Bytes>>(5);
|
||||||
|
|
||||||
|
let join_checksums = tokio::spawn(async move {
|
||||||
|
while let Some(frame) = frame_rx.recv().await {
|
||||||
|
match frame.into_data() {
|
||||||
|
Ok(data) => {
|
||||||
|
checksummer = tokio::task::spawn_blocking(move || {
|
||||||
|
checksummer.update(&data);
|
||||||
|
checksummer
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
Err(frame) => {
|
||||||
|
let trailers = frame.into_trailers().unwrap();
|
||||||
|
let algo = trailer_algorithm.unwrap();
|
||||||
|
expected_checksums.extra = Some(extract_checksum_value(&trailers, algo)?);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if trailer_algorithm.is_some() && expected_checksums.extra.is_none() {
|
||||||
|
return Err(Error::bad_request("trailing checksum was not sent"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let checksums = checksummer.finalize();
|
||||||
|
checksums.verify(&expected_checksums)?;
|
||||||
|
|
||||||
|
Ok(checksums)
|
||||||
|
});
|
||||||
|
|
||||||
|
let stream: BoxStream<_> = stream.into_inner().unwrap();
|
||||||
|
let stream = stream.filter_map(move |x| {
|
||||||
|
let frame_tx = frame_tx.clone();
|
||||||
|
async move {
|
||||||
|
match x {
|
||||||
|
Err(e) => Some(Err(e)),
|
||||||
|
Ok(frame) => {
|
||||||
|
if frame.is_data() {
|
||||||
|
let data = frame.data_ref().unwrap().clone();
|
||||||
|
let _ = frame_tx.send(frame).await;
|
||||||
|
Some(Ok(data))
|
||||||
|
} else {
|
||||||
|
let _ = frame_tx.send(frame).await;
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
(stream.boxed(), join_checksums)
|
||||||
|
}
|
||||||
|
}
|
||||||
323
src/api/common/signature/checksum.rs
Normal file
323
src/api/common/signature/checksum.rs
Normal file
|
|
@ -0,0 +1,323 @@
|
||||||
|
use std::convert::{TryFrom, TryInto};
|
||||||
|
use std::hash::Hasher;
|
||||||
|
|
||||||
|
use base64::prelude::*;
|
||||||
|
use crc32c::Crc32cHasher as Crc32c;
|
||||||
|
use crc32fast::Hasher as Crc32;
|
||||||
|
use md5::{Digest, Md5};
|
||||||
|
use sha1::Sha1;
|
||||||
|
use sha2::Sha256;
|
||||||
|
|
||||||
|
use http::{HeaderMap, HeaderName, HeaderValue};
|
||||||
|
|
||||||
|
use garage_util::data::*;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
pub use garage_model::s3::object_table::{ChecksumAlgorithm, ChecksumValue};
|
||||||
|
|
||||||
|
pub const CONTENT_MD5: HeaderName = HeaderName::from_static("content-md5");
|
||||||
|
|
||||||
|
pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName =
|
||||||
|
HeaderName::from_static("x-amz-checksum-algorithm");
|
||||||
|
pub const X_AMZ_CHECKSUM_MODE: HeaderName = HeaderName::from_static("x-amz-checksum-mode");
|
||||||
|
pub const X_AMZ_CHECKSUM_CRC32: HeaderName = HeaderName::from_static("x-amz-checksum-crc32");
|
||||||
|
pub const X_AMZ_CHECKSUM_CRC32C: HeaderName = HeaderName::from_static("x-amz-checksum-crc32c");
|
||||||
|
pub const X_AMZ_CHECKSUM_SHA1: HeaderName = HeaderName::from_static("x-amz-checksum-sha1");
|
||||||
|
pub const X_AMZ_CHECKSUM_SHA256: HeaderName = HeaderName::from_static("x-amz-checksum-sha256");
|
||||||
|
|
||||||
|
pub type Crc32Checksum = [u8; 4];
|
||||||
|
pub type Crc32cChecksum = [u8; 4];
|
||||||
|
pub type Md5Checksum = [u8; 16];
|
||||||
|
pub type Sha1Checksum = [u8; 20];
|
||||||
|
pub type Sha256Checksum = [u8; 32];
|
||||||
|
|
||||||
|
#[derive(Debug, Default, Clone)]
|
||||||
|
pub struct ExpectedChecksums {
|
||||||
|
// base64-encoded md5 (content-md5 header)
|
||||||
|
pub md5: Option<String>,
|
||||||
|
// content_sha256 (as a Hash / FixedBytes32)
|
||||||
|
pub sha256: Option<Hash>,
|
||||||
|
// extra x-amz-checksum-* header
|
||||||
|
pub extra: Option<ChecksumValue>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Checksummer {
|
||||||
|
pub crc32: Option<Crc32>,
|
||||||
|
pub crc32c: Option<Crc32c>,
|
||||||
|
pub md5: Option<Md5>,
|
||||||
|
pub sha1: Option<Sha1>,
|
||||||
|
pub sha256: Option<Sha256>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct Checksums {
|
||||||
|
pub crc32: Option<Crc32Checksum>,
|
||||||
|
pub crc32c: Option<Crc32cChecksum>,
|
||||||
|
pub md5: Option<Md5Checksum>,
|
||||||
|
pub sha1: Option<Sha1Checksum>,
|
||||||
|
pub sha256: Option<Sha256Checksum>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Checksummer {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
crc32: None,
|
||||||
|
crc32c: None,
|
||||||
|
md5: None,
|
||||||
|
sha1: None,
|
||||||
|
sha256: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn init(expected: &ExpectedChecksums, add_md5: bool) -> Self {
|
||||||
|
let mut ret = Self::new();
|
||||||
|
ret.add_expected(expected);
|
||||||
|
if add_md5 {
|
||||||
|
ret.add_md5();
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_md5(&mut self) {
|
||||||
|
self.md5 = Some(Md5::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_expected(&mut self, expected: &ExpectedChecksums) {
|
||||||
|
if expected.md5.is_some() {
|
||||||
|
self.md5 = Some(Md5::new());
|
||||||
|
}
|
||||||
|
if expected.sha256.is_some() || matches!(&expected.extra, Some(ChecksumValue::Sha256(_))) {
|
||||||
|
self.sha256 = Some(Sha256::new());
|
||||||
|
}
|
||||||
|
if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) {
|
||||||
|
self.crc32 = Some(Crc32::new());
|
||||||
|
}
|
||||||
|
if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) {
|
||||||
|
self.crc32c = Some(Crc32c::default());
|
||||||
|
}
|
||||||
|
if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) {
|
||||||
|
self.sha1 = Some(Sha1::new());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add(mut self, algo: Option<ChecksumAlgorithm>) -> Self {
|
||||||
|
match algo {
|
||||||
|
Some(ChecksumAlgorithm::Crc32) => {
|
||||||
|
self.crc32 = Some(Crc32::new());
|
||||||
|
}
|
||||||
|
Some(ChecksumAlgorithm::Crc32c) => {
|
||||||
|
self.crc32c = Some(Crc32c::default());
|
||||||
|
}
|
||||||
|
Some(ChecksumAlgorithm::Sha1) => {
|
||||||
|
self.sha1 = Some(Sha1::new());
|
||||||
|
}
|
||||||
|
Some(ChecksumAlgorithm::Sha256) => {
|
||||||
|
self.sha256 = Some(Sha256::new());
|
||||||
|
}
|
||||||
|
None => (),
|
||||||
|
}
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update(&mut self, bytes: &[u8]) {
|
||||||
|
if let Some(crc32) = &mut self.crc32 {
|
||||||
|
crc32.update(bytes);
|
||||||
|
}
|
||||||
|
if let Some(crc32c) = &mut self.crc32c {
|
||||||
|
crc32c.write(bytes);
|
||||||
|
}
|
||||||
|
if let Some(md5) = &mut self.md5 {
|
||||||
|
md5.update(bytes);
|
||||||
|
}
|
||||||
|
if let Some(sha1) = &mut self.sha1 {
|
||||||
|
sha1.update(bytes);
|
||||||
|
}
|
||||||
|
if let Some(sha256) = &mut self.sha256 {
|
||||||
|
sha256.update(bytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn finalize(self) -> Checksums {
|
||||||
|
Checksums {
|
||||||
|
crc32: self.crc32.map(|x| u32::to_be_bytes(x.finalize())),
|
||||||
|
crc32c: self
|
||||||
|
.crc32c
|
||||||
|
.map(|x| u32::to_be_bytes(u32::try_from(x.finish()).unwrap())),
|
||||||
|
md5: self.md5.map(|x| x.finalize()[..].try_into().unwrap()),
|
||||||
|
sha1: self.sha1.map(|x| x.finalize()[..].try_into().unwrap()),
|
||||||
|
sha256: self.sha256.map(|x| x.finalize()[..].try_into().unwrap()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Checksums {
|
||||||
|
pub fn verify(&self, expected: &ExpectedChecksums) -> Result<(), Error> {
|
||||||
|
if let Some(expected_md5) = &expected.md5 {
|
||||||
|
match self.md5 {
|
||||||
|
Some(md5) if BASE64_STANDARD.encode(&md5) == expected_md5.trim_matches('"') => (),
|
||||||
|
_ => {
|
||||||
|
return Err(Error::InvalidDigest(
|
||||||
|
"MD5 checksum verification failed (from content-md5)".into(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(expected_sha256) = &expected.sha256 {
|
||||||
|
match self.sha256 {
|
||||||
|
Some(sha256) if &sha256[..] == expected_sha256.as_slice() => (),
|
||||||
|
_ => {
|
||||||
|
return Err(Error::InvalidDigest(
|
||||||
|
"SHA256 checksum verification failed (from x-amz-content-sha256)".into(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(extra) = expected.extra {
|
||||||
|
let algo = extra.algorithm();
|
||||||
|
if self.extract(Some(algo)) != Some(extra) {
|
||||||
|
return Err(Error::InvalidDigest(format!(
|
||||||
|
"Failed to validate checksum for algorithm {:?}",
|
||||||
|
algo
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn extract(&self, algo: Option<ChecksumAlgorithm>) -> Option<ChecksumValue> {
|
||||||
|
match algo {
|
||||||
|
None => None,
|
||||||
|
Some(ChecksumAlgorithm::Crc32) => Some(ChecksumValue::Crc32(self.crc32.unwrap())),
|
||||||
|
Some(ChecksumAlgorithm::Crc32c) => Some(ChecksumValue::Crc32c(self.crc32c.unwrap())),
|
||||||
|
Some(ChecksumAlgorithm::Sha1) => Some(ChecksumValue::Sha1(self.sha1.unwrap())),
|
||||||
|
Some(ChecksumAlgorithm::Sha256) => Some(ChecksumValue::Sha256(self.sha256.unwrap())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----
|
||||||
|
|
||||||
|
pub fn parse_checksum_algorithm(algo: &str) -> Result<ChecksumAlgorithm, Error> {
|
||||||
|
match algo {
|
||||||
|
"CRC32" => Ok(ChecksumAlgorithm::Crc32),
|
||||||
|
"CRC32C" => Ok(ChecksumAlgorithm::Crc32c),
|
||||||
|
"SHA1" => Ok(ChecksumAlgorithm::Sha1),
|
||||||
|
"SHA256" => Ok(ChecksumAlgorithm::Sha256),
|
||||||
|
_ => Err(Error::bad_request("invalid checksum algorithm")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extract the value of the x-amz-checksum-algorithm header
|
||||||
|
pub fn request_checksum_algorithm(
|
||||||
|
headers: &HeaderMap<HeaderValue>,
|
||||||
|
) -> Result<Option<ChecksumAlgorithm>, Error> {
|
||||||
|
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
|
||||||
|
None => Ok(None),
|
||||||
|
Some(x) => parse_checksum_algorithm(x.to_str()?).map(Some),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn request_trailer_checksum_algorithm(
|
||||||
|
headers: &HeaderMap<HeaderValue>,
|
||||||
|
) -> Result<Option<ChecksumAlgorithm>, Error> {
|
||||||
|
match headers.get(X_AMZ_TRAILER).map(|x| x.to_str()).transpose()? {
|
||||||
|
None => Ok(None),
|
||||||
|
Some(x) if x == X_AMZ_CHECKSUM_CRC32 => Ok(Some(ChecksumAlgorithm::Crc32)),
|
||||||
|
Some(x) if x == X_AMZ_CHECKSUM_CRC32C => Ok(Some(ChecksumAlgorithm::Crc32c)),
|
||||||
|
Some(x) if x == X_AMZ_CHECKSUM_SHA1 => Ok(Some(ChecksumAlgorithm::Sha1)),
|
||||||
|
Some(x) if x == X_AMZ_CHECKSUM_SHA256 => Ok(Some(ChecksumAlgorithm::Sha256)),
|
||||||
|
_ => Err(Error::bad_request("invalid checksum algorithm")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extract the value of any of the x-amz-checksum-* headers
|
||||||
|
pub fn request_checksum_value(
|
||||||
|
headers: &HeaderMap<HeaderValue>,
|
||||||
|
) -> Result<Option<ChecksumValue>, Error> {
|
||||||
|
let mut ret = vec![];
|
||||||
|
|
||||||
|
if headers.contains_key(X_AMZ_CHECKSUM_CRC32) {
|
||||||
|
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Crc32)?);
|
||||||
|
}
|
||||||
|
if headers.contains_key(X_AMZ_CHECKSUM_CRC32C) {
|
||||||
|
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Crc32c)?);
|
||||||
|
}
|
||||||
|
if headers.contains_key(X_AMZ_CHECKSUM_SHA1) {
|
||||||
|
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Sha1)?);
|
||||||
|
}
|
||||||
|
if headers.contains_key(X_AMZ_CHECKSUM_SHA256) {
|
||||||
|
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Sha256)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ret.len() > 1 {
|
||||||
|
return Err(Error::bad_request(
|
||||||
|
"multiple x-amz-checksum-* headers given",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(ret.pop())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks for the presence of x-amz-checksum-algorithm
|
||||||
|
/// if so extract the corresponding x-amz-checksum-* value
|
||||||
|
pub fn extract_checksum_value(
|
||||||
|
headers: &HeaderMap<HeaderValue>,
|
||||||
|
algo: ChecksumAlgorithm,
|
||||||
|
) -> Result<ChecksumValue, Error> {
|
||||||
|
match algo {
|
||||||
|
ChecksumAlgorithm::Crc32 => {
|
||||||
|
let crc32 = headers
|
||||||
|
.get(X_AMZ_CHECKSUM_CRC32)
|
||||||
|
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||||
|
.and_then(|x| x.try_into().ok())
|
||||||
|
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
|
||||||
|
Ok(ChecksumValue::Crc32(crc32))
|
||||||
|
}
|
||||||
|
ChecksumAlgorithm::Crc32c => {
|
||||||
|
let crc32c = headers
|
||||||
|
.get(X_AMZ_CHECKSUM_CRC32C)
|
||||||
|
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||||
|
.and_then(|x| x.try_into().ok())
|
||||||
|
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
|
||||||
|
Ok(ChecksumValue::Crc32c(crc32c))
|
||||||
|
}
|
||||||
|
ChecksumAlgorithm::Sha1 => {
|
||||||
|
let sha1 = headers
|
||||||
|
.get(X_AMZ_CHECKSUM_SHA1)
|
||||||
|
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||||
|
.and_then(|x| x.try_into().ok())
|
||||||
|
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
|
||||||
|
Ok(ChecksumValue::Sha1(sha1))
|
||||||
|
}
|
||||||
|
ChecksumAlgorithm::Sha256 => {
|
||||||
|
let sha256 = headers
|
||||||
|
.get(X_AMZ_CHECKSUM_SHA256)
|
||||||
|
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||||
|
.and_then(|x| x.try_into().ok())
|
||||||
|
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
|
||||||
|
Ok(ChecksumValue::Sha256(sha256))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_checksum_response_headers(
|
||||||
|
checksum: &Option<ChecksumValue>,
|
||||||
|
mut resp: http::response::Builder,
|
||||||
|
) -> http::response::Builder {
|
||||||
|
match checksum {
|
||||||
|
Some(ChecksumValue::Crc32(crc32)) => {
|
||||||
|
resp = resp.header(X_AMZ_CHECKSUM_CRC32, BASE64_STANDARD.encode(&crc32));
|
||||||
|
}
|
||||||
|
Some(ChecksumValue::Crc32c(crc32c)) => {
|
||||||
|
resp = resp.header(X_AMZ_CHECKSUM_CRC32C, BASE64_STANDARD.encode(&crc32c));
|
||||||
|
}
|
||||||
|
Some(ChecksumValue::Sha1(sha1)) => {
|
||||||
|
resp = resp.header(X_AMZ_CHECKSUM_SHA1, BASE64_STANDARD.encode(&sha1));
|
||||||
|
}
|
||||||
|
Some(ChecksumValue::Sha256(sha256)) => {
|
||||||
|
resp = resp.header(X_AMZ_CHECKSUM_SHA256, BASE64_STANDARD.encode(&sha256));
|
||||||
|
}
|
||||||
|
None => (),
|
||||||
|
}
|
||||||
|
resp
|
||||||
|
}
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
use err_derive::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
use crate::common_error::CommonError;
|
use crate::common_error::CommonError;
|
||||||
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||||
|
|
@ -6,18 +6,22 @@ pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInterna
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[error(display = "{}", _0)]
|
#[error("{0}")]
|
||||||
/// Error from common error
|
/// Error from common error
|
||||||
Common(CommonError),
|
Common(CommonError),
|
||||||
|
|
||||||
/// Authorization Header Malformed
|
/// Authorization Header Malformed
|
||||||
#[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
|
#[error("Authorization header malformed, unexpected scope: {0}")]
|
||||||
AuthorizationHeaderMalformed(String),
|
AuthorizationHeaderMalformed(String),
|
||||||
|
|
||||||
// Category: bad request
|
// Category: bad request
|
||||||
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
#[error("Invalid UTF-8: {0}")]
|
||||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
InvalidUtf8Str(#[from] std::str::Utf8Error),
|
||||||
|
|
||||||
|
/// The provided digest (checksum) value was invalid
|
||||||
|
#[error("Invalid digest: {0}")]
|
||||||
|
InvalidDigest(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> From<T> for Error
|
impl<T> From<T> for Error
|
||||||
118
src/api/common/signature/mod.rs
Normal file
118
src/api/common/signature/mod.rs
Normal file
|
|
@ -0,0 +1,118 @@
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use hmac::{Hmac, Mac};
|
||||||
|
use sha2::Sha256;
|
||||||
|
|
||||||
|
use hyper::header::HeaderName;
|
||||||
|
use hyper::{body::Incoming as IncomingBody, Request};
|
||||||
|
|
||||||
|
use garage_model::garage::Garage;
|
||||||
|
use garage_model::key_table::Key;
|
||||||
|
use garage_util::data::{sha256sum, Hash};
|
||||||
|
|
||||||
|
use error::*;
|
||||||
|
|
||||||
|
pub mod body;
|
||||||
|
pub mod checksum;
|
||||||
|
pub mod error;
|
||||||
|
pub mod payload;
|
||||||
|
pub mod streaming;
|
||||||
|
|
||||||
|
pub const SHORT_DATE: &str = "%Y%m%d";
|
||||||
|
pub const LONG_DATETIME: &str = "%Y%m%dT%H%M%SZ";
|
||||||
|
|
||||||
|
// ---- Constants used in AWSv4 signatures ----
|
||||||
|
|
||||||
|
pub const X_AMZ_ALGORITHM: HeaderName = HeaderName::from_static("x-amz-algorithm");
|
||||||
|
pub const X_AMZ_CREDENTIAL: HeaderName = HeaderName::from_static("x-amz-credential");
|
||||||
|
pub const X_AMZ_DATE: HeaderName = HeaderName::from_static("x-amz-date");
|
||||||
|
pub const X_AMZ_EXPIRES: HeaderName = HeaderName::from_static("x-amz-expires");
|
||||||
|
pub const X_AMZ_SIGNEDHEADERS: HeaderName = HeaderName::from_static("x-amz-signedheaders");
|
||||||
|
pub const X_AMZ_SIGNATURE: HeaderName = HeaderName::from_static("x-amz-signature");
|
||||||
|
pub const X_AMZ_CONTENT_SHA256: HeaderName = HeaderName::from_static("x-amz-content-sha256");
|
||||||
|
pub const X_AMZ_TRAILER: HeaderName = HeaderName::from_static("x-amz-trailer");
|
||||||
|
|
||||||
|
/// Result of `sha256("")`
|
||||||
|
pub(crate) const EMPTY_STRING_HEX_DIGEST: &str =
|
||||||
|
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
|
||||||
|
|
||||||
|
// Signature calculation algorithm
|
||||||
|
pub const AWS4_HMAC_SHA256: &str = "AWS4-HMAC-SHA256";
|
||||||
|
type HmacSha256 = Hmac<Sha256>;
|
||||||
|
|
||||||
|
// Possible values for x-amz-content-sha256, in addition to the actual sha256
|
||||||
|
pub const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD";
|
||||||
|
pub const STREAMING_UNSIGNED_PAYLOAD_TRAILER: &str = "STREAMING-UNSIGNED-PAYLOAD-TRAILER";
|
||||||
|
pub const STREAMING_AWS4_HMAC_SHA256_PAYLOAD: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
|
||||||
|
|
||||||
|
// Used in the computation of StringToSign
|
||||||
|
pub const AWS4_HMAC_SHA256_PAYLOAD: &str = "AWS4-HMAC-SHA256-PAYLOAD";
|
||||||
|
|
||||||
|
// ---- enums to describe stuff going on in signature calculation ----
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum ContentSha256Header {
|
||||||
|
UnsignedPayload,
|
||||||
|
Sha256Checksum(Hash),
|
||||||
|
StreamingPayload { trailer: bool, signed: bool },
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- top-level functions ----
|
||||||
|
|
||||||
|
pub struct VerifiedRequest {
|
||||||
|
pub request: Request<streaming::ReqBody>,
|
||||||
|
pub access_key: Key,
|
||||||
|
pub content_sha256_header: ContentSha256Header,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn verify_request(
|
||||||
|
garage: &Garage,
|
||||||
|
mut req: Request<IncomingBody>,
|
||||||
|
service: &'static str,
|
||||||
|
) -> Result<VerifiedRequest, Error> {
|
||||||
|
let checked_signature = payload::check_payload_signature(&garage, &mut req, service).await?;
|
||||||
|
|
||||||
|
let request = streaming::parse_streaming_body(
|
||||||
|
req,
|
||||||
|
&checked_signature,
|
||||||
|
&garage.config.s3_api.s3_region,
|
||||||
|
service,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let access_key = checked_signature
|
||||||
|
.key
|
||||||
|
.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
|
||||||
|
|
||||||
|
Ok(VerifiedRequest {
|
||||||
|
request,
|
||||||
|
access_key,
|
||||||
|
content_sha256_header: checked_signature.content_sha256_header,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn signing_hmac(
|
||||||
|
datetime: &DateTime<Utc>,
|
||||||
|
secret_key: &str,
|
||||||
|
region: &str,
|
||||||
|
service: &str,
|
||||||
|
) -> Result<HmacSha256, crypto_common::InvalidLength> {
|
||||||
|
let secret = String::from("AWS4") + secret_key;
|
||||||
|
let mut date_hmac = HmacSha256::new_from_slice(secret.as_bytes())?;
|
||||||
|
date_hmac.update(datetime.format(SHORT_DATE).to_string().as_bytes());
|
||||||
|
let mut region_hmac = HmacSha256::new_from_slice(&date_hmac.finalize().into_bytes())?;
|
||||||
|
region_hmac.update(region.as_bytes());
|
||||||
|
let mut service_hmac = HmacSha256::new_from_slice(®ion_hmac.finalize().into_bytes())?;
|
||||||
|
service_hmac.update(service.as_bytes());
|
||||||
|
let mut signing_hmac = HmacSha256::new_from_slice(&service_hmac.finalize().into_bytes())?;
|
||||||
|
signing_hmac.update(b"aws4_request");
|
||||||
|
let hmac = HmacSha256::new_from_slice(&signing_hmac.finalize().into_bytes())?;
|
||||||
|
Ok(hmac)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn compute_scope(datetime: &DateTime<Utc>, region: &str, service: &str) -> String {
|
||||||
|
format!(
|
||||||
|
"{}/{}/{}/aws4_request",
|
||||||
|
datetime.format(SHORT_DATE),
|
||||||
|
region,
|
||||||
|
service
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
@ -13,23 +13,9 @@ use garage_util::data::Hash;
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::key_table::*;
|
use garage_model::key_table::*;
|
||||||
|
|
||||||
use super::LONG_DATETIME;
|
use super::*;
|
||||||
use super::{compute_scope, signing_hmac};
|
|
||||||
|
|
||||||
use crate::encoding::uri_encode;
|
use crate::encoding::uri_encode;
|
||||||
use crate::signature::error::*;
|
|
||||||
|
|
||||||
pub const X_AMZ_ALGORITHM: HeaderName = HeaderName::from_static("x-amz-algorithm");
|
|
||||||
pub const X_AMZ_CREDENTIAL: HeaderName = HeaderName::from_static("x-amz-credential");
|
|
||||||
pub const X_AMZ_DATE: HeaderName = HeaderName::from_static("x-amz-date");
|
|
||||||
pub const X_AMZ_EXPIRES: HeaderName = HeaderName::from_static("x-amz-expires");
|
|
||||||
pub const X_AMZ_SIGNEDHEADERS: HeaderName = HeaderName::from_static("x-amz-signedheaders");
|
|
||||||
pub const X_AMZ_SIGNATURE: HeaderName = HeaderName::from_static("x-amz-signature");
|
|
||||||
pub const X_AMZ_CONTENT_SH256: HeaderName = HeaderName::from_static("x-amz-content-sha256");
|
|
||||||
|
|
||||||
pub const AWS4_HMAC_SHA256: &str = "AWS4-HMAC-SHA256";
|
|
||||||
pub const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD";
|
|
||||||
pub const STREAMING_AWS4_HMAC_SHA256_PAYLOAD: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
|
|
||||||
|
|
||||||
pub type QueryMap = HeaderMap<QueryValue>;
|
pub type QueryMap = HeaderMap<QueryValue>;
|
||||||
pub struct QueryValue {
|
pub struct QueryValue {
|
||||||
|
|
@ -39,16 +25,23 @@ pub struct QueryValue {
|
||||||
value: String,
|
value: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct CheckedSignature {
|
||||||
|
pub key: Option<Key>,
|
||||||
|
pub content_sha256_header: ContentSha256Header,
|
||||||
|
pub signature_header: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn check_payload_signature(
|
pub async fn check_payload_signature(
|
||||||
garage: &Garage,
|
garage: &Garage,
|
||||||
request: &mut Request<IncomingBody>,
|
request: &mut Request<IncomingBody>,
|
||||||
service: &'static str,
|
service: &'static str,
|
||||||
) -> Result<(Option<Key>, Option<Hash>), Error> {
|
) -> Result<CheckedSignature, Error> {
|
||||||
let query = parse_query_map(request.uri())?;
|
let query = parse_query_map(request.uri())?;
|
||||||
|
|
||||||
if query.contains_key(&X_AMZ_ALGORITHM) {
|
if query.contains_key(&X_AMZ_ALGORITHM) {
|
||||||
// We check for presigned-URL-style authentification first, because
|
// We check for presigned-URL-style authentication first, because
|
||||||
// the browser or someting else could inject an Authorization header
|
// the browser or something else could inject an Authorization header
|
||||||
// that is totally unrelated to AWS signatures.
|
// that is totally unrelated to AWS signatures.
|
||||||
check_presigned_signature(garage, service, request, query).await
|
check_presigned_signature(garage, service, request, query).await
|
||||||
} else if request.headers().contains_key(AUTHORIZATION) {
|
} else if request.headers().contains_key(AUTHORIZATION) {
|
||||||
|
|
@ -57,17 +50,46 @@ pub async fn check_payload_signature(
|
||||||
// Unsigned (anonymous) request
|
// Unsigned (anonymous) request
|
||||||
let content_sha256 = request
|
let content_sha256 = request
|
||||||
.headers()
|
.headers()
|
||||||
.get("x-amz-content-sha256")
|
.get(X_AMZ_CONTENT_SHA256)
|
||||||
.filter(|c| c.as_bytes() != UNSIGNED_PAYLOAD.as_bytes());
|
.map(|x| x.to_str())
|
||||||
if let Some(content_sha256) = content_sha256 {
|
.transpose()?;
|
||||||
let sha256 = hex::decode(content_sha256)
|
Ok(CheckedSignature {
|
||||||
.ok()
|
key: None,
|
||||||
.and_then(|bytes| Hash::try_from(&bytes))
|
content_sha256_header: parse_x_amz_content_sha256(content_sha256)?,
|
||||||
.ok_or_bad_request("Invalid content sha256 hash")?;
|
signature_header: None,
|
||||||
Ok((None, Some(sha256)))
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_x_amz_content_sha256(header: Option<&str>) -> Result<ContentSha256Header, Error> {
|
||||||
|
let header = match header {
|
||||||
|
Some(x) => x,
|
||||||
|
None => return Ok(ContentSha256Header::UnsignedPayload),
|
||||||
|
};
|
||||||
|
if header == UNSIGNED_PAYLOAD {
|
||||||
|
Ok(ContentSha256Header::UnsignedPayload)
|
||||||
|
} else if let Some(rest) = header.strip_prefix("STREAMING-") {
|
||||||
|
let (trailer, algo) = if let Some(rest2) = rest.strip_suffix("-TRAILER") {
|
||||||
|
(true, rest2)
|
||||||
} else {
|
} else {
|
||||||
Ok((None, None))
|
(false, rest)
|
||||||
}
|
};
|
||||||
|
let signed = match algo {
|
||||||
|
AWS4_HMAC_SHA256_PAYLOAD => true,
|
||||||
|
UNSIGNED_PAYLOAD => false,
|
||||||
|
_ => {
|
||||||
|
return Err(Error::bad_request(
|
||||||
|
"invalid or unsupported x-amz-content-sha256",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok(ContentSha256Header::StreamingPayload { trailer, signed })
|
||||||
|
} else {
|
||||||
|
let sha256 = hex::decode(header)
|
||||||
|
.ok()
|
||||||
|
.and_then(|bytes| Hash::try_from(&bytes))
|
||||||
|
.ok_or_bad_request("Invalid content sha256 hash")?;
|
||||||
|
Ok(ContentSha256Header::Sha256Checksum(sha256))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -76,13 +98,13 @@ async fn check_standard_signature(
|
||||||
service: &'static str,
|
service: &'static str,
|
||||||
request: &Request<IncomingBody>,
|
request: &Request<IncomingBody>,
|
||||||
query: QueryMap,
|
query: QueryMap,
|
||||||
) -> Result<(Option<Key>, Option<Hash>), Error> {
|
) -> Result<CheckedSignature, Error> {
|
||||||
let authorization = Authorization::parse_header(request.headers())?;
|
let authorization = Authorization::parse_header(request.headers())?;
|
||||||
|
|
||||||
// Verify that all necessary request headers are included in signed_headers
|
// Verify that all necessary request headers are included in signed_headers
|
||||||
// The following must be included for all signatures:
|
// The following must be included for all signatures:
|
||||||
// - the Host header (mandatory)
|
// - the Host header (mandatory)
|
||||||
// - all x-amz-* headers used in the request
|
// - all x-amz-* headers used in the request (except x-amz-content-sha256)
|
||||||
// AWS also indicates that the Content-Type header should be signed if
|
// AWS also indicates that the Content-Type header should be signed if
|
||||||
// it is used, but Minio client doesn't sign it so we don't check it for compatibility.
|
// it is used, but Minio client doesn't sign it so we don't check it for compatibility.
|
||||||
let signed_headers = split_signed_headers(&authorization)?;
|
let signed_headers = split_signed_headers(&authorization)?;
|
||||||
|
|
@ -108,18 +130,13 @@ async fn check_standard_signature(
|
||||||
|
|
||||||
let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?;
|
let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?;
|
||||||
|
|
||||||
let content_sha256 = if authorization.content_sha256 == UNSIGNED_PAYLOAD {
|
let content_sha256_header = parse_x_amz_content_sha256(Some(&authorization.content_sha256))?;
|
||||||
None
|
|
||||||
} else if authorization.content_sha256 == STREAMING_AWS4_HMAC_SHA256_PAYLOAD {
|
|
||||||
let bytes = hex::decode(authorization.signature).ok_or_bad_request("Invalid signature")?;
|
|
||||||
Some(Hash::try_from(&bytes).ok_or_bad_request("Invalid signature")?)
|
|
||||||
} else {
|
|
||||||
let bytes = hex::decode(authorization.content_sha256)
|
|
||||||
.ok_or_bad_request("Invalid content sha256 hash")?;
|
|
||||||
Some(Hash::try_from(&bytes).ok_or_bad_request("Invalid content sha256 hash")?)
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok((Some(key), content_sha256))
|
Ok(CheckedSignature {
|
||||||
|
key: Some(key),
|
||||||
|
content_sha256_header,
|
||||||
|
signature_header: Some(authorization.signature),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn check_presigned_signature(
|
async fn check_presigned_signature(
|
||||||
|
|
@ -127,14 +144,14 @@ async fn check_presigned_signature(
|
||||||
service: &'static str,
|
service: &'static str,
|
||||||
request: &mut Request<IncomingBody>,
|
request: &mut Request<IncomingBody>,
|
||||||
mut query: QueryMap,
|
mut query: QueryMap,
|
||||||
) -> Result<(Option<Key>, Option<Hash>), Error> {
|
) -> Result<CheckedSignature, Error> {
|
||||||
let algorithm = query.get(&X_AMZ_ALGORITHM).unwrap();
|
let algorithm = query.get(&X_AMZ_ALGORITHM).unwrap();
|
||||||
let authorization = Authorization::parse_presigned(&algorithm.value, &query)?;
|
let authorization = Authorization::parse_presigned(&algorithm.value, &query)?;
|
||||||
|
|
||||||
// Verify that all necessary request headers are included in signed_headers
|
// Verify that all necessary request headers are included in signed_headers
|
||||||
// For AWSv4 pre-signed URLs, the following must be incldued:
|
// For AWSv4 pre-signed URLs, the following must be included:
|
||||||
// - the Host header (mandatory)
|
// - the Host header (mandatory)
|
||||||
// - all x-amz-* headers used in the request
|
// - all x-amz-* headers used in the request (except x-amz-content-sha256)
|
||||||
let signed_headers = split_signed_headers(&authorization)?;
|
let signed_headers = split_signed_headers(&authorization)?;
|
||||||
verify_signed_headers(request.headers(), &signed_headers)?;
|
verify_signed_headers(request.headers(), &signed_headers)?;
|
||||||
|
|
||||||
|
|
@ -193,7 +210,11 @@ async fn check_presigned_signature(
|
||||||
|
|
||||||
// Presigned URLs always use UNSIGNED-PAYLOAD,
|
// Presigned URLs always use UNSIGNED-PAYLOAD,
|
||||||
// so there is no sha256 hash to return.
|
// so there is no sha256 hash to return.
|
||||||
Ok((Some(key), None))
|
Ok(CheckedSignature {
|
||||||
|
key: Some(key),
|
||||||
|
content_sha256_header: ContentSha256Header::UnsignedPayload,
|
||||||
|
signature_header: Some(authorization.signature),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse_query_map(uri: &http::uri::Uri) -> Result<QueryMap, Error> {
|
pub fn parse_query_map(uri: &http::uri::Uri) -> Result<QueryMap, Error> {
|
||||||
|
|
@ -247,7 +268,9 @@ fn verify_signed_headers(headers: &HeaderMap, signed_headers: &[HeaderName]) ->
|
||||||
return Err(Error::bad_request("Header `Host` should be signed"));
|
return Err(Error::bad_request("Header `Host` should be signed"));
|
||||||
}
|
}
|
||||||
for (name, _) in headers.iter() {
|
for (name, _) in headers.iter() {
|
||||||
if name.as_str().starts_with("x-amz-") {
|
// Enforce signature of all x-amz-* headers, except x-amz-content-sh256
|
||||||
|
// because it is included in the canonical request in all cases
|
||||||
|
if name.as_str().starts_with("x-amz-") && name != X_AMZ_CONTENT_SHA256 {
|
||||||
if !signed_headers.contains(name) {
|
if !signed_headers.contains(name) {
|
||||||
return Err(Error::bad_request(format!(
|
return Err(Error::bad_request(format!(
|
||||||
"Header `{}` should be signed",
|
"Header `{}` should be signed",
|
||||||
|
|
@ -306,7 +329,7 @@ pub fn canonical_request(
|
||||||
// Note that there is also the issue of path normalization, which I hope is unrelated to the
|
// Note that there is also the issue of path normalization, which I hope is unrelated to the
|
||||||
// one of URI-encoding. At least in aws-sigv4 both parameters can be set independently,
|
// one of URI-encoding. At least in aws-sigv4 both parameters can be set independently,
|
||||||
// and rusoto_signature does not seem to do any effective path normalization, even though
|
// and rusoto_signature does not seem to do any effective path normalization, even though
|
||||||
// it mentions it in the comments (same link to the souce code as above).
|
// it mentions it in the comments (same link to the source code as above).
|
||||||
// We make the explicit choice of NOT normalizing paths in the K2V API because doing so
|
// We make the explicit choice of NOT normalizing paths in the K2V API because doing so
|
||||||
// would make non-normalized paths invalid K2V partition keys, and we don't want that.
|
// would make non-normalized paths invalid K2V partition keys, and we don't want that.
|
||||||
let canonical_uri: std::borrow::Cow<str> = if service != "s3" {
|
let canonical_uri: std::borrow::Cow<str> = if service != "s3" {
|
||||||
|
|
@ -396,7 +419,7 @@ pub async fn verify_v4(
|
||||||
// ============ Authorization header, or X-Amz-* query params =========
|
// ============ Authorization header, or X-Amz-* query params =========
|
||||||
|
|
||||||
pub struct Authorization {
|
pub struct Authorization {
|
||||||
key_id: String,
|
pub key_id: String,
|
||||||
scope: String,
|
scope: String,
|
||||||
signed_headers: String,
|
signed_headers: String,
|
||||||
signature: String,
|
signature: String,
|
||||||
|
|
@ -405,7 +428,7 @@ pub struct Authorization {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Authorization {
|
impl Authorization {
|
||||||
fn parse_header(headers: &HeaderMap) -> Result<Self, Error> {
|
pub fn parse_header(headers: &HeaderMap) -> Result<Self, Error> {
|
||||||
let authorization = headers
|
let authorization = headers
|
||||||
.get(AUTHORIZATION)
|
.get(AUTHORIZATION)
|
||||||
.ok_or_bad_request("Missing authorization header")?
|
.ok_or_bad_request("Missing authorization header")?
|
||||||
|
|
@ -442,13 +465,12 @@ impl Authorization {
|
||||||
.to_string();
|
.to_string();
|
||||||
|
|
||||||
let content_sha256 = headers
|
let content_sha256 = headers
|
||||||
.get(X_AMZ_CONTENT_SH256)
|
.get(X_AMZ_CONTENT_SHA256)
|
||||||
.ok_or_bad_request("Missing X-Amz-Content-Sha256 field")?;
|
.ok_or_bad_request("Missing X-Amz-Content-Sha256 field")?;
|
||||||
|
|
||||||
let date = headers
|
let date = headers
|
||||||
.get(X_AMZ_DATE)
|
.get(X_AMZ_DATE)
|
||||||
.ok_or_bad_request("Missing X-Amz-Date field")
|
.ok_or_bad_request("Missing X-Amz-Date field")?
|
||||||
.map_err(Error::from)?
|
|
||||||
.to_str()?;
|
.to_str()?;
|
||||||
let date = parse_date(date)?;
|
let date = parse_date(date)?;
|
||||||
|
|
||||||
|
|
@ -518,7 +540,7 @@ impl Authorization {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn parse_form(params: &HeaderMap) -> Result<Self, Error> {
|
pub fn parse_form(params: &HeaderMap) -> Result<Self, Error> {
|
||||||
let algorithm = params
|
let algorithm = params
|
||||||
.get(X_AMZ_ALGORITHM)
|
.get(X_AMZ_ALGORITHM)
|
||||||
.ok_or_bad_request("Missing X-Amz-Algorithm header")?
|
.ok_or_bad_request("Missing X-Amz-Algorithm header")?
|
||||||
618
src/api/common/signature/streaming.rs
Normal file
618
src/api/common/signature/streaming.rs
Normal file
|
|
@ -0,0 +1,618 @@
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::sync::Mutex;
|
||||||
|
|
||||||
|
use chrono::{DateTime, NaiveDateTime, TimeZone, Utc};
|
||||||
|
use futures::prelude::*;
|
||||||
|
use futures::task;
|
||||||
|
use hmac::Mac;
|
||||||
|
use http::header::{HeaderMap, HeaderValue, CONTENT_ENCODING};
|
||||||
|
use hyper::body::{Bytes, Frame, Incoming as IncomingBody};
|
||||||
|
use hyper::Request;
|
||||||
|
|
||||||
|
use garage_util::data::Hash;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
use crate::helpers::body_stream;
|
||||||
|
use crate::signature::checksum::*;
|
||||||
|
use crate::signature::payload::CheckedSignature;
|
||||||
|
|
||||||
|
pub use crate::signature::body::ReqBody;
|
||||||
|
|
||||||
|
pub fn parse_streaming_body(
|
||||||
|
mut req: Request<IncomingBody>,
|
||||||
|
checked_signature: &CheckedSignature,
|
||||||
|
region: &str,
|
||||||
|
service: &str,
|
||||||
|
) -> Result<Request<ReqBody>, Error> {
|
||||||
|
debug!(
|
||||||
|
"Content signature mode: {:?}",
|
||||||
|
checked_signature.content_sha256_header
|
||||||
|
);
|
||||||
|
|
||||||
|
match checked_signature.content_sha256_header {
|
||||||
|
ContentSha256Header::StreamingPayload { signed, trailer } => {
|
||||||
|
// Sanity checks
|
||||||
|
if !signed && !trailer {
|
||||||
|
return Err(Error::bad_request(
|
||||||
|
"STREAMING-UNSIGNED-PAYLOAD without trailer is not a valid combination",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the aws-chunked component in the content-encoding: header
|
||||||
|
// Note: this header is not properly sent by minio client, so don't fail
|
||||||
|
// if it is absent from the request.
|
||||||
|
if let Some(content_encoding) = req.headers_mut().remove(CONTENT_ENCODING) {
|
||||||
|
if let Some(rest) = content_encoding.as_bytes().strip_prefix(b"aws-chunked,") {
|
||||||
|
req.headers_mut()
|
||||||
|
.insert(CONTENT_ENCODING, HeaderValue::from_bytes(rest).unwrap());
|
||||||
|
} else if content_encoding != "aws-chunked" {
|
||||||
|
return Err(Error::bad_request(
|
||||||
|
"content-encoding does not contain aws-chunked for STREAMING-*-PAYLOAD",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If trailer header is announced, add the calculation of the requested checksum
|
||||||
|
let mut checksummer = Checksummer::init(&Default::default(), false);
|
||||||
|
let trailer_algorithm = if trailer {
|
||||||
|
let algo = Some(
|
||||||
|
request_trailer_checksum_algorithm(req.headers())?
|
||||||
|
.ok_or_bad_request("Missing x-amz-trailer header")?,
|
||||||
|
);
|
||||||
|
checksummer = checksummer.add(algo);
|
||||||
|
algo
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
// For signed variants, determine signing parameters
|
||||||
|
let sign_params = if signed {
|
||||||
|
let signature = checked_signature
|
||||||
|
.signature_header
|
||||||
|
.clone()
|
||||||
|
.ok_or_bad_request("No signature provided")?;
|
||||||
|
let signature = hex::decode(signature)
|
||||||
|
.ok()
|
||||||
|
.and_then(|bytes| Hash::try_from(&bytes))
|
||||||
|
.ok_or_bad_request("Invalid signature")?;
|
||||||
|
|
||||||
|
let secret_key = checked_signature
|
||||||
|
.key
|
||||||
|
.as_ref()
|
||||||
|
.ok_or_bad_request("Cannot sign streaming payload without signing key")?
|
||||||
|
.state
|
||||||
|
.as_option()
|
||||||
|
.ok_or_internal_error("Deleted key state")?
|
||||||
|
.secret_key
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
let date = req
|
||||||
|
.headers()
|
||||||
|
.get(X_AMZ_DATE)
|
||||||
|
.ok_or_bad_request("Missing X-Amz-Date field")?
|
||||||
|
.to_str()?;
|
||||||
|
let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
|
||||||
|
.ok_or_bad_request("Invalid date")?;
|
||||||
|
let date: DateTime<Utc> = Utc.from_utc_datetime(&date);
|
||||||
|
|
||||||
|
let scope = compute_scope(&date, region, service);
|
||||||
|
let signing_hmac =
|
||||||
|
crate::signature::signing_hmac(&date, &secret_key, region, service)
|
||||||
|
.ok_or_internal_error("Unable to build signing HMAC")?;
|
||||||
|
|
||||||
|
Some(SignParams {
|
||||||
|
datetime: date,
|
||||||
|
scope,
|
||||||
|
signing_hmac,
|
||||||
|
previous_signature: signature,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(req.map(move |body| {
|
||||||
|
let stream = body_stream::<_, Error>(body);
|
||||||
|
|
||||||
|
let signed_payload_stream =
|
||||||
|
StreamingPayloadStream::new(stream, sign_params, trailer).map_err(Error::from);
|
||||||
|
ReqBody {
|
||||||
|
stream: Mutex::new(signed_payload_stream.boxed()),
|
||||||
|
checksummer,
|
||||||
|
expected_checksums: Default::default(),
|
||||||
|
trailer_algorithm,
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
_ => Ok(req.map(|body| {
|
||||||
|
let expected_checksums = ExpectedChecksums {
|
||||||
|
sha256: match &checked_signature.content_sha256_header {
|
||||||
|
ContentSha256Header::Sha256Checksum(sha256) => Some(*sha256),
|
||||||
|
_ => None,
|
||||||
|
},
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let checksummer = Checksummer::init(&expected_checksums, false);
|
||||||
|
|
||||||
|
let stream = http_body_util::BodyStream::new(body).map_err(Error::from);
|
||||||
|
ReqBody {
|
||||||
|
stream: Mutex::new(stream.boxed()),
|
||||||
|
checksummer,
|
||||||
|
expected_checksums,
|
||||||
|
trailer_algorithm: None,
|
||||||
|
}
|
||||||
|
})),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_streaming_payload_signature(
|
||||||
|
signing_hmac: &HmacSha256,
|
||||||
|
date: DateTime<Utc>,
|
||||||
|
scope: &str,
|
||||||
|
previous_signature: Hash,
|
||||||
|
content_sha256: Hash,
|
||||||
|
) -> Result<Hash, StreamingPayloadError> {
|
||||||
|
let string_to_sign = [
|
||||||
|
AWS4_HMAC_SHA256_PAYLOAD,
|
||||||
|
&date.format(LONG_DATETIME).to_string(),
|
||||||
|
scope,
|
||||||
|
&hex::encode(previous_signature),
|
||||||
|
EMPTY_STRING_HEX_DIGEST,
|
||||||
|
&hex::encode(content_sha256),
|
||||||
|
]
|
||||||
|
.join("\n");
|
||||||
|
|
||||||
|
let mut hmac = signing_hmac.clone();
|
||||||
|
hmac.update(string_to_sign.as_bytes());
|
||||||
|
|
||||||
|
Hash::try_from(&hmac.finalize().into_bytes())
|
||||||
|
.ok_or_else(|| StreamingPayloadError::Message("Could not build signature".into()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_streaming_trailer_signature(
|
||||||
|
signing_hmac: &HmacSha256,
|
||||||
|
date: DateTime<Utc>,
|
||||||
|
scope: &str,
|
||||||
|
previous_signature: Hash,
|
||||||
|
trailer_sha256: Hash,
|
||||||
|
) -> Result<Hash, StreamingPayloadError> {
|
||||||
|
let string_to_sign = [
|
||||||
|
AWS4_HMAC_SHA256_PAYLOAD,
|
||||||
|
&date.format(LONG_DATETIME).to_string(),
|
||||||
|
scope,
|
||||||
|
&hex::encode(previous_signature),
|
||||||
|
&hex::encode(trailer_sha256),
|
||||||
|
]
|
||||||
|
.join("\n");
|
||||||
|
|
||||||
|
let mut hmac = signing_hmac.clone();
|
||||||
|
hmac.update(string_to_sign.as_bytes());
|
||||||
|
|
||||||
|
Hash::try_from(&hmac.finalize().into_bytes())
|
||||||
|
.ok_or_else(|| StreamingPayloadError::Message("Could not build signature".into()))
|
||||||
|
}
|
||||||
|
|
||||||
|
mod payload {
|
||||||
|
use http::{HeaderName, HeaderValue};
|
||||||
|
|
||||||
|
use garage_util::data::Hash;
|
||||||
|
|
||||||
|
use nom::bytes::streaming::{tag, take_while};
|
||||||
|
use nom::character::streaming::hex_digit1;
|
||||||
|
use nom::combinator::{map_res, opt};
|
||||||
|
use nom::number::streaming::hex_u32;
|
||||||
|
|
||||||
|
macro_rules! try_parse {
|
||||||
|
($expr:expr) => {
|
||||||
|
$expr.map_err(|e| e.map(Error::Parser))?
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum Error<I> {
|
||||||
|
Parser(nom::error::Error<I>),
|
||||||
|
BadSignature,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<I> Error<I> {
|
||||||
|
pub fn description(&self) -> &str {
|
||||||
|
match *self {
|
||||||
|
Error::Parser(ref e) => e.code.description(),
|
||||||
|
Error::BadSignature => "Bad signature",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ChunkHeader {
|
||||||
|
pub size: usize,
|
||||||
|
pub signature: Option<Hash>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ChunkHeader {
|
||||||
|
pub fn parse_signed(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||||
|
let (input, size) = try_parse!(hex_u32(input));
|
||||||
|
let (input, _) = try_parse!(tag(";")(input));
|
||||||
|
|
||||||
|
let (input, _) = try_parse!(tag("chunk-signature=")(input));
|
||||||
|
let (input, data) = try_parse!(map_res(hex_digit1, hex::decode)(input));
|
||||||
|
let signature = Hash::try_from(&data).ok_or(nom::Err::Failure(Error::BadSignature))?;
|
||||||
|
|
||||||
|
let (input, _) = try_parse!(tag("\r\n")(input));
|
||||||
|
|
||||||
|
let header = ChunkHeader {
|
||||||
|
size: size as usize,
|
||||||
|
signature: Some(signature),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok((input, header))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parse_unsigned(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||||
|
let (input, size) = try_parse!(hex_u32(input));
|
||||||
|
let (input, _) = try_parse!(tag("\r\n")(input));
|
||||||
|
|
||||||
|
let header = ChunkHeader {
|
||||||
|
size: size as usize,
|
||||||
|
signature: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok((input, header))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct TrailerChunk {
|
||||||
|
pub header_name: HeaderName,
|
||||||
|
pub header_value: HeaderValue,
|
||||||
|
pub signature: Option<Hash>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TrailerChunk {
|
||||||
|
fn parse_content(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||||
|
let (input, header_name) = try_parse!(map_res(
|
||||||
|
take_while(|c: u8| c.is_ascii_alphanumeric() || c == b'-'),
|
||||||
|
HeaderName::from_bytes
|
||||||
|
)(input));
|
||||||
|
let (input, _) = try_parse!(tag(b":")(input));
|
||||||
|
let (input, header_value) = try_parse!(map_res(
|
||||||
|
take_while(|c: u8| c.is_ascii_alphanumeric() || b"+/=".contains(&c)),
|
||||||
|
HeaderValue::from_bytes
|
||||||
|
)(input));
|
||||||
|
|
||||||
|
// Possible '\n' after the header value, depends on clients
|
||||||
|
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
||||||
|
let (input, _) = try_parse!(opt(tag(b"\n"))(input));
|
||||||
|
|
||||||
|
let (input, _) = try_parse!(tag(b"\r\n")(input));
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
input,
|
||||||
|
TrailerChunk {
|
||||||
|
header_name,
|
||||||
|
header_value,
|
||||||
|
signature: None,
|
||||||
|
},
|
||||||
|
))
|
||||||
|
}
|
||||||
|
pub fn parse_signed(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||||
|
let (input, trailer) = Self::parse_content(input)?;
|
||||||
|
|
||||||
|
let (input, _) = try_parse!(tag(b"x-amz-trailer-signature:")(input));
|
||||||
|
let (input, data) = try_parse!(map_res(hex_digit1, hex::decode)(input));
|
||||||
|
let signature = Hash::try_from(&data).ok_or(nom::Err::Failure(Error::BadSignature))?;
|
||||||
|
let (input, _) = try_parse!(tag(b"\r\n")(input));
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
input,
|
||||||
|
TrailerChunk {
|
||||||
|
signature: Some(signature),
|
||||||
|
..trailer
|
||||||
|
},
|
||||||
|
))
|
||||||
|
}
|
||||||
|
pub fn parse_unsigned(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||||
|
let (input, trailer) = Self::parse_content(input)?;
|
||||||
|
let (input, _) = try_parse!(tag(b"\r\n")(input));
|
||||||
|
|
||||||
|
Ok((input, trailer))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum StreamingPayloadError {
|
||||||
|
Stream(Error),
|
||||||
|
InvalidSignature,
|
||||||
|
Message(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StreamingPayloadError {
|
||||||
|
fn message(msg: &str) -> Self {
|
||||||
|
StreamingPayloadError::Message(msg.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<StreamingPayloadError> for Error {
|
||||||
|
fn from(err: StreamingPayloadError) -> Self {
|
||||||
|
match err {
|
||||||
|
StreamingPayloadError::Stream(e) => e,
|
||||||
|
StreamingPayloadError::InvalidSignature => {
|
||||||
|
Error::bad_request("Invalid payload signature")
|
||||||
|
}
|
||||||
|
StreamingPayloadError::Message(e) => {
|
||||||
|
Error::bad_request(format!("Chunk format error: {}", e))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<I> From<payload::Error<I>> for StreamingPayloadError {
|
||||||
|
fn from(err: payload::Error<I>) -> Self {
|
||||||
|
Self::message(err.description())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<I> From<nom::error::Error<I>> for StreamingPayloadError {
|
||||||
|
fn from(err: nom::error::Error<I>) -> Self {
|
||||||
|
Self::message(err.code.description())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
enum StreamingPayloadChunk {
|
||||||
|
Chunk {
|
||||||
|
header: payload::ChunkHeader,
|
||||||
|
data: Bytes,
|
||||||
|
},
|
||||||
|
Trailer(payload::TrailerChunk),
|
||||||
|
}
|
||||||
|
|
||||||
|
struct SignParams {
|
||||||
|
datetime: DateTime<Utc>,
|
||||||
|
scope: String,
|
||||||
|
signing_hmac: HmacSha256,
|
||||||
|
previous_signature: Hash,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pin_project::pin_project]
|
||||||
|
pub struct StreamingPayloadStream<S>
|
||||||
|
where
|
||||||
|
S: Stream<Item = Result<Bytes, Error>>,
|
||||||
|
{
|
||||||
|
#[pin]
|
||||||
|
stream: S,
|
||||||
|
buf: bytes::BytesMut,
|
||||||
|
signing: Option<SignParams>,
|
||||||
|
has_trailer: bool,
|
||||||
|
done: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> StreamingPayloadStream<S>
|
||||||
|
where
|
||||||
|
S: Stream<Item = Result<Bytes, Error>>,
|
||||||
|
{
|
||||||
|
fn new(stream: S, signing: Option<SignParams>, has_trailer: bool) -> Self {
|
||||||
|
Self {
|
||||||
|
stream,
|
||||||
|
buf: bytes::BytesMut::new(),
|
||||||
|
signing,
|
||||||
|
has_trailer,
|
||||||
|
done: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_next(
|
||||||
|
input: &[u8],
|
||||||
|
is_signed: bool,
|
||||||
|
has_trailer: bool,
|
||||||
|
) -> nom::IResult<&[u8], StreamingPayloadChunk, StreamingPayloadError> {
|
||||||
|
use nom::bytes::streaming::{tag, take};
|
||||||
|
|
||||||
|
macro_rules! try_parse {
|
||||||
|
($expr:expr) => {
|
||||||
|
$expr.map_err(nom::Err::convert)?
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
let (input, header) = if is_signed {
|
||||||
|
try_parse!(payload::ChunkHeader::parse_signed(input))
|
||||||
|
} else {
|
||||||
|
try_parse!(payload::ChunkHeader::parse_unsigned(input))
|
||||||
|
};
|
||||||
|
|
||||||
|
// 0-sized chunk is the last
|
||||||
|
if header.size == 0 {
|
||||||
|
if has_trailer {
|
||||||
|
let (input, trailer) = if is_signed {
|
||||||
|
try_parse!(payload::TrailerChunk::parse_signed(input))
|
||||||
|
} else {
|
||||||
|
try_parse!(payload::TrailerChunk::parse_unsigned(input))
|
||||||
|
};
|
||||||
|
return Ok((input, StreamingPayloadChunk::Trailer(trailer)));
|
||||||
|
} else {
|
||||||
|
return Ok((
|
||||||
|
input,
|
||||||
|
StreamingPayloadChunk::Chunk {
|
||||||
|
header,
|
||||||
|
data: Bytes::new(),
|
||||||
|
},
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let (input, data) = try_parse!(take::<_, _, nom::error::Error<_>>(header.size)(input));
|
||||||
|
let (input, _) = try_parse!(tag::<_, _, nom::error::Error<_>>("\r\n")(input));
|
||||||
|
|
||||||
|
let data = Bytes::from(data.to_vec());
|
||||||
|
|
||||||
|
Ok((input, StreamingPayloadChunk::Chunk { header, data }))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> Stream for StreamingPayloadStream<S>
|
||||||
|
where
|
||||||
|
S: Stream<Item = Result<Bytes, Error>> + Unpin,
|
||||||
|
{
|
||||||
|
type Item = Result<Frame<Bytes>, StreamingPayloadError>;
|
||||||
|
|
||||||
|
fn poll_next(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
cx: &mut task::Context<'_>,
|
||||||
|
) -> task::Poll<Option<Self::Item>> {
|
||||||
|
use std::task::Poll;
|
||||||
|
|
||||||
|
let mut this = self.project();
|
||||||
|
|
||||||
|
if *this.done {
|
||||||
|
return Poll::Ready(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let (input, payload) =
|
||||||
|
match Self::parse_next(this.buf, this.signing.is_some(), *this.has_trailer) {
|
||||||
|
Ok(res) => res,
|
||||||
|
Err(nom::Err::Incomplete(_)) => {
|
||||||
|
match futures::ready!(this.stream.as_mut().poll_next(cx)) {
|
||||||
|
Some(Ok(bytes)) => {
|
||||||
|
this.buf.extend(bytes);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Some(Err(e)) => {
|
||||||
|
return Poll::Ready(Some(Err(StreamingPayloadError::Stream(e))))
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
return Poll::Ready(Some(Err(StreamingPayloadError::message(
|
||||||
|
"Unexpected EOF",
|
||||||
|
))));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(nom::Err::Error(e)) | Err(nom::Err::Failure(e)) => {
|
||||||
|
return Poll::Ready(Some(Err(e)))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match payload {
|
||||||
|
StreamingPayloadChunk::Chunk { data, header } => {
|
||||||
|
if let Some(signing) = this.signing.as_mut() {
|
||||||
|
let data_sha256sum = sha256sum(&data);
|
||||||
|
|
||||||
|
let expected_signature = compute_streaming_payload_signature(
|
||||||
|
&signing.signing_hmac,
|
||||||
|
signing.datetime,
|
||||||
|
&signing.scope,
|
||||||
|
signing.previous_signature,
|
||||||
|
data_sha256sum,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
if header.signature.unwrap() != expected_signature {
|
||||||
|
return Poll::Ready(Some(Err(StreamingPayloadError::InvalidSignature)));
|
||||||
|
}
|
||||||
|
|
||||||
|
signing.previous_signature = header.signature.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
*this.buf = input.into();
|
||||||
|
|
||||||
|
// 0-sized chunk is the last
|
||||||
|
if data.is_empty() {
|
||||||
|
// if there was a trailer, it would have been returned by the parser
|
||||||
|
assert!(!*this.has_trailer);
|
||||||
|
*this.done = true;
|
||||||
|
return Poll::Ready(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Poll::Ready(Some(Ok(Frame::data(data))));
|
||||||
|
}
|
||||||
|
StreamingPayloadChunk::Trailer(trailer) => {
|
||||||
|
trace!(
|
||||||
|
"In StreamingPayloadStream::poll_next: got trailer {:?}",
|
||||||
|
trailer
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Some(signing) = this.signing.as_mut() {
|
||||||
|
let data = [
|
||||||
|
trailer.header_name.as_ref(),
|
||||||
|
&b":"[..],
|
||||||
|
trailer.header_value.as_ref(),
|
||||||
|
&b"\n"[..],
|
||||||
|
]
|
||||||
|
.concat();
|
||||||
|
let trailer_sha256sum = sha256sum(&data);
|
||||||
|
|
||||||
|
let expected_signature = compute_streaming_trailer_signature(
|
||||||
|
&signing.signing_hmac,
|
||||||
|
signing.datetime,
|
||||||
|
&signing.scope,
|
||||||
|
signing.previous_signature,
|
||||||
|
trailer_sha256sum,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
if trailer.signature.unwrap() != expected_signature {
|
||||||
|
return Poll::Ready(Some(Err(StreamingPayloadError::InvalidSignature)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
*this.buf = input.into();
|
||||||
|
*this.done = true;
|
||||||
|
|
||||||
|
let mut trailers_map = HeaderMap::new();
|
||||||
|
trailers_map.insert(trailer.header_name, trailer.header_value);
|
||||||
|
|
||||||
|
return Poll::Ready(Some(Ok(Frame::trailers(trailers_map))));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||||
|
self.stream.size_hint()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use futures::prelude::*;
|
||||||
|
|
||||||
|
use super::{SignParams, StreamingPayloadError, StreamingPayloadStream};
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_interrupted_signed_payload_stream() {
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
|
||||||
|
use garage_util::data::Hash;
|
||||||
|
|
||||||
|
let datetime = DateTime::parse_from_rfc3339("2021-12-13T13:12:42+01:00") // TODO UNIX 0
|
||||||
|
.unwrap()
|
||||||
|
.with_timezone(&Utc);
|
||||||
|
let secret_key = "test";
|
||||||
|
let region = "test";
|
||||||
|
let scope = crate::signature::compute_scope(&datetime, region, "s3");
|
||||||
|
let signing_hmac =
|
||||||
|
crate::signature::signing_hmac(&datetime, secret_key, region, "s3").unwrap();
|
||||||
|
|
||||||
|
let data: &[&[u8]] = &[b"1"];
|
||||||
|
let body = futures::stream::iter(data.iter().map(|block| Ok(block.to_vec().into())));
|
||||||
|
|
||||||
|
let seed_signature = Hash::default();
|
||||||
|
|
||||||
|
let mut stream = StreamingPayloadStream::new(
|
||||||
|
body,
|
||||||
|
Some(SignParams {
|
||||||
|
signing_hmac,
|
||||||
|
datetime,
|
||||||
|
scope,
|
||||||
|
previous_signature: seed_signature,
|
||||||
|
}),
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(stream.try_next().await.is_err());
|
||||||
|
match stream.try_next().await {
|
||||||
|
Err(StreamingPayloadError::Message(msg)) if msg == "Unexpected EOF" => {}
|
||||||
|
item => panic!(
|
||||||
|
"Unexpected result, expected early EOF error, got {:?}",
|
||||||
|
item
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
37
src/api/k2v/Cargo.toml
Normal file
37
src/api/k2v/Cargo.toml
Normal file
|
|
@ -0,0 +1,37 @@
|
||||||
|
[package]
|
||||||
|
name = "garage_api_k2v"
|
||||||
|
version = "1.3.1"
|
||||||
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
|
edition = "2018"
|
||||||
|
license = "AGPL-3.0"
|
||||||
|
description = "K2V API server crate for the Garage object store"
|
||||||
|
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
||||||
|
readme = "../../../README.md"
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
path = "lib.rs"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
garage_model = { workspace = true, features = [ "k2v" ] }
|
||||||
|
garage_table.workspace = true
|
||||||
|
garage_util = { workspace = true, features = [ "k2v" ] }
|
||||||
|
garage_api_common.workspace = true
|
||||||
|
|
||||||
|
base64.workspace = true
|
||||||
|
thiserror.workspace = true
|
||||||
|
tracing.workspace = true
|
||||||
|
|
||||||
|
futures.workspace = true
|
||||||
|
tokio.workspace = true
|
||||||
|
http.workspace = true
|
||||||
|
http-body-util.workspace = true
|
||||||
|
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
|
||||||
|
percent-encoding.workspace = true
|
||||||
|
url.workspace = true
|
||||||
|
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
|
||||||
|
opentelemetry.workspace = true
|
||||||
|
|
@ -1,7 +1,5 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
|
|
||||||
use hyper::{body::Incoming as IncomingBody, Method, Request, Response};
|
use hyper::{body::Incoming as IncomingBody, Method, Request, Response};
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
|
|
||||||
|
|
@ -12,26 +10,25 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
|
|
||||||
use crate::generic_server::*;
|
use garage_api_common::cors::*;
|
||||||
use crate::k2v::error::*;
|
use garage_api_common::generic_server::*;
|
||||||
|
use garage_api_common::helpers::*;
|
||||||
|
use garage_api_common::signature::verify_request;
|
||||||
|
|
||||||
use crate::signature::verify_request;
|
use crate::batch::*;
|
||||||
|
use crate::error::*;
|
||||||
|
use crate::index::*;
|
||||||
|
use crate::item::*;
|
||||||
|
use crate::router::Endpoint;
|
||||||
|
|
||||||
use crate::helpers::*;
|
pub use garage_api_common::signature::streaming::ReqBody;
|
||||||
use crate::k2v::batch::*;
|
|
||||||
use crate::k2v::index::*;
|
|
||||||
use crate::k2v::item::*;
|
|
||||||
use crate::k2v::router::Endpoint;
|
|
||||||
use crate::s3::cors::*;
|
|
||||||
|
|
||||||
pub use crate::signature::streaming::ReqBody;
|
|
||||||
pub type ResBody = BoxBody<Error>;
|
pub type ResBody = BoxBody<Error>;
|
||||||
|
|
||||||
pub struct K2VApiServer {
|
pub struct K2VApiServer {
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct K2VApiEndpoint {
|
pub struct K2VApiEndpoint {
|
||||||
bucket_name: String,
|
bucket_name: String,
|
||||||
endpoint: Endpoint,
|
endpoint: Endpoint,
|
||||||
}
|
}
|
||||||
|
|
@ -49,7 +46,6 @@ impl K2VApiServer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl ApiHandler for K2VApiServer {
|
impl ApiHandler for K2VApiServer {
|
||||||
const API_NAME: &'static str = "k2v";
|
const API_NAME: &'static str = "k2v";
|
||||||
const API_NAME_DISPLAY: &'static str = "K2V";
|
const API_NAME_DISPLAY: &'static str = "K2V";
|
||||||
|
|
@ -77,7 +73,7 @@ impl ApiHandler for K2VApiServer {
|
||||||
} = endpoint;
|
} = endpoint;
|
||||||
let garage = self.garage.clone();
|
let garage = self.garage.clone();
|
||||||
|
|
||||||
// The OPTIONS method is procesed early, before we even check for an API key
|
// The OPTIONS method is processed early, before we even check for an API key
|
||||||
if let Endpoint::Options = endpoint {
|
if let Endpoint::Options = endpoint {
|
||||||
let options_res = handle_options_api(garage, &req, Some(bucket_name))
|
let options_res = handle_options_api(garage, &req, Some(bucket_name))
|
||||||
.await
|
.await
|
||||||
|
|
@ -85,16 +81,20 @@ impl ApiHandler for K2VApiServer {
|
||||||
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
|
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let (req, api_key, _content_sha256) = verify_request(&garage, req, "k2v").await?;
|
let verified_request = verify_request(&garage, req, "k2v").await?;
|
||||||
|
let req = verified_request.request;
|
||||||
|
let api_key = verified_request.access_key;
|
||||||
|
|
||||||
let bucket_id = garage
|
let bucket_id = garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.resolve_bucket(&bucket_name, &api_key)
|
.resolve_bucket(&bucket_name, &api_key)
|
||||||
.await?;
|
.await
|
||||||
|
.map_err(pass_helper_error)?;
|
||||||
let bucket = garage
|
let bucket = garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.get_existing_bucket(bucket_id)
|
.get_existing_bucket(bucket_id)
|
||||||
.await?;
|
.await
|
||||||
|
.map_err(helper_error_as_internal)?;
|
||||||
let bucket_params = bucket.state.into_option().unwrap();
|
let bucket_params = bucket.state.into_option().unwrap();
|
||||||
|
|
||||||
let allowed = match endpoint.authorization_type() {
|
let allowed = match endpoint.authorization_type() {
|
||||||
|
|
@ -176,6 +176,12 @@ impl ApiHandler for K2VApiServer {
|
||||||
|
|
||||||
Ok(resp_ok)
|
Ok(resp_ok)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn key_id_from_request(&self, req: &Request<IncomingBody>) -> Option<String> {
|
||||||
|
garage_api_common::signature::payload::Authorization::parse_header(req.headers())
|
||||||
|
.map(|auth| auth.key_id)
|
||||||
|
.ok()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ApiEndpoint for K2VApiEndpoint {
|
impl ApiEndpoint for K2VApiEndpoint {
|
||||||
|
|
|
||||||
|
|
@ -4,13 +4,14 @@ use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_table::{EnumerationOrder, TableSchema};
|
use garage_table::{EnumerationOrder, TableSchema};
|
||||||
|
|
||||||
use garage_model::k2v::causality::*;
|
|
||||||
use garage_model::k2v::item_table::*;
|
use garage_model::k2v::item_table::*;
|
||||||
|
|
||||||
use crate::helpers::*;
|
use garage_api_common::helpers::*;
|
||||||
use crate::k2v::api_server::{ReqBody, ResBody};
|
|
||||||
use crate::k2v::error::*;
|
use crate::api_server::{ReqBody, ResBody};
|
||||||
use crate::k2v::range::read_range;
|
use crate::error::*;
|
||||||
|
use crate::item::parse_causality_token;
|
||||||
|
use crate::range::read_range;
|
||||||
|
|
||||||
pub async fn handle_insert_batch(
|
pub async fn handle_insert_batch(
|
||||||
ctx: ReqCtx,
|
ctx: ReqCtx,
|
||||||
|
|
@ -19,11 +20,11 @@ pub async fn handle_insert_batch(
|
||||||
let ReqCtx {
|
let ReqCtx {
|
||||||
garage, bucket_id, ..
|
garage, bucket_id, ..
|
||||||
} = &ctx;
|
} = &ctx;
|
||||||
let items = parse_json_body::<Vec<InsertBatchItem>, _, Error>(req).await?;
|
let items = req.into_body().json::<Vec<InsertBatchItem>>().await?;
|
||||||
|
|
||||||
let mut items2 = vec![];
|
let mut items2 = vec![];
|
||||||
for it in items {
|
for it in items {
|
||||||
let ct = it.ct.map(|s| CausalContext::parse_helper(&s)).transpose()?;
|
let ct = it.ct.map(|s| parse_causality_token(&s)).transpose()?;
|
||||||
let v = match it.v {
|
let v = match it.v {
|
||||||
Some(vs) => DvvsValue::Value(
|
Some(vs) => DvvsValue::Value(
|
||||||
BASE64_STANDARD
|
BASE64_STANDARD
|
||||||
|
|
@ -46,7 +47,7 @@ pub async fn handle_read_batch(
|
||||||
ctx: ReqCtx,
|
ctx: ReqCtx,
|
||||||
req: Request<ReqBody>,
|
req: Request<ReqBody>,
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let queries = parse_json_body::<Vec<ReadBatchQuery>, _, Error>(req).await?;
|
let queries = req.into_body().json::<Vec<ReadBatchQuery>>().await?;
|
||||||
|
|
||||||
let resp_results = futures::future::join_all(
|
let resp_results = futures::future::join_all(
|
||||||
queries
|
queries
|
||||||
|
|
@ -140,7 +141,7 @@ pub async fn handle_delete_batch(
|
||||||
ctx: ReqCtx,
|
ctx: ReqCtx,
|
||||||
req: Request<ReqBody>,
|
req: Request<ReqBody>,
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let queries = parse_json_body::<Vec<DeleteBatchQuery>, _, Error>(req).await?;
|
let queries = req.into_body().json::<Vec<DeleteBatchQuery>>().await?;
|
||||||
|
|
||||||
let resp_results = futures::future::join_all(
|
let resp_results = futures::future::join_all(
|
||||||
queries
|
queries
|
||||||
|
|
@ -261,7 +262,7 @@ pub(crate) async fn handle_poll_range(
|
||||||
} = ctx;
|
} = ctx;
|
||||||
use garage_model::k2v::sub::PollRange;
|
use garage_model::k2v::sub::PollRange;
|
||||||
|
|
||||||
let query = parse_json_body::<PollRangeQuery, _, Error>(req).await?;
|
let query = req.into_body().json::<PollRangeQuery>().await?;
|
||||||
|
|
||||||
let timeout_msec = query.timeout.unwrap_or(300).clamp(1, 600) * 1000;
|
let timeout_msec = query.timeout.unwrap_or(300).clamp(1, 600) * 1000;
|
||||||
|
|
||||||
|
|
@ -281,7 +282,8 @@ pub(crate) async fn handle_poll_range(
|
||||||
query.seen_marker,
|
query.seen_marker,
|
||||||
timeout_msec,
|
timeout_msec,
|
||||||
)
|
)
|
||||||
.await?;
|
.await
|
||||||
|
.map_err(pass_helper_error)?;
|
||||||
|
|
||||||
if let Some((items, seen_marker)) = resp {
|
if let Some((items, seen_marker)) = resp {
|
||||||
let resp = PollRangeResponse {
|
let resp = PollRangeResponse {
|
||||||
|
|
|
||||||
|
|
@ -1,52 +1,54 @@
|
||||||
use err_derive::Error;
|
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
use crate::common_error::CommonError;
|
use garage_api_common::common_error::{commonErrorDerivative, CommonError};
|
||||||
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
pub(crate) use garage_api_common::common_error::{helper_error_as_internal, pass_helper_error};
|
||||||
use crate::generic_server::ApiError;
|
pub use garage_api_common::common_error::{
|
||||||
use crate::helpers::*;
|
CommonErrorDerivative, OkOrBadRequest, OkOrInternalError,
|
||||||
use crate::signature::error::Error as SignatureError;
|
};
|
||||||
|
use garage_api_common::generic_server::ApiError;
|
||||||
|
use garage_api_common::helpers::*;
|
||||||
|
use garage_api_common::signature::error::Error as SignatureError;
|
||||||
|
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[error(display = "{}", _0)]
|
#[error("{0}")]
|
||||||
/// Error from common error
|
/// Error from common error
|
||||||
Common(CommonError),
|
Common(#[from] CommonError),
|
||||||
|
|
||||||
// Category: cannot process
|
// Category: cannot process
|
||||||
/// Authorization Header Malformed
|
/// Authorization Header Malformed
|
||||||
#[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
|
#[error("Authorization header malformed, unexpected scope: {0}")]
|
||||||
AuthorizationHeaderMalformed(String),
|
AuthorizationHeaderMalformed(String),
|
||||||
|
|
||||||
|
/// The provided digest (checksum) value was invalid
|
||||||
|
#[error("Invalid digest: {0}")]
|
||||||
|
InvalidDigest(String),
|
||||||
|
|
||||||
/// The object requested don't exists
|
/// The object requested don't exists
|
||||||
#[error(display = "Key not found")]
|
#[error("Key not found")]
|
||||||
NoSuchKey,
|
NoSuchKey,
|
||||||
|
|
||||||
/// Some base64 encoded data was badly encoded
|
/// Some base64 encoded data was badly encoded
|
||||||
#[error(display = "Invalid base64: {}", _0)]
|
#[error("Invalid base64: {0}")]
|
||||||
InvalidBase64(#[error(source)] base64::DecodeError),
|
InvalidBase64(#[from] base64::DecodeError),
|
||||||
|
|
||||||
|
/// Invalid causality token
|
||||||
|
#[error("Invalid causality token")]
|
||||||
|
InvalidCausalityToken,
|
||||||
|
|
||||||
/// The client asked for an invalid return format (invalid Accept header)
|
/// The client asked for an invalid return format (invalid Accept header)
|
||||||
#[error(display = "Not acceptable: {}", _0)]
|
#[error("Not acceptable: {0}")]
|
||||||
NotAcceptable(String),
|
NotAcceptable(String),
|
||||||
|
|
||||||
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
#[error("Invalid UTF-8: {0}")]
|
||||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
InvalidUtf8Str(#[from] std::str::Utf8Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> From<T> for Error
|
commonErrorDerivative!(Error);
|
||||||
where
|
|
||||||
CommonError: From<T>,
|
|
||||||
{
|
|
||||||
fn from(err: T) -> Self {
|
|
||||||
Error::Common(CommonError::from(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CommonErrorDerivative for Error {}
|
|
||||||
|
|
||||||
impl From<SignatureError> for Error {
|
impl From<SignatureError> for Error {
|
||||||
fn from(err: SignatureError) -> Self {
|
fn from(err: SignatureError) -> Self {
|
||||||
|
|
@ -56,6 +58,7 @@ impl From<SignatureError> for Error {
|
||||||
Self::AuthorizationHeaderMalformed(c)
|
Self::AuthorizationHeaderMalformed(c)
|
||||||
}
|
}
|
||||||
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
|
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
|
||||||
|
SignatureError::InvalidDigest(d) => Self::InvalidDigest(d),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -72,6 +75,8 @@ impl Error {
|
||||||
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
|
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
|
||||||
Error::InvalidBase64(_) => "InvalidBase64",
|
Error::InvalidBase64(_) => "InvalidBase64",
|
||||||
Error::InvalidUtf8Str(_) => "InvalidUtf8String",
|
Error::InvalidUtf8Str(_) => "InvalidUtf8String",
|
||||||
|
Error::InvalidCausalityToken => "CausalityToken",
|
||||||
|
Error::InvalidDigest(_) => "InvalidDigest",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -85,7 +90,9 @@ impl ApiError for Error {
|
||||||
Error::NotAcceptable(_) => StatusCode::NOT_ACCEPTABLE,
|
Error::NotAcceptable(_) => StatusCode::NOT_ACCEPTABLE,
|
||||||
Error::AuthorizationHeaderMalformed(_)
|
Error::AuthorizationHeaderMalformed(_)
|
||||||
| Error::InvalidBase64(_)
|
| Error::InvalidBase64(_)
|
||||||
| Error::InvalidUtf8Str(_) => StatusCode::BAD_REQUEST,
|
| Error::InvalidUtf8Str(_)
|
||||||
|
| Error::InvalidDigest(_)
|
||||||
|
| Error::InvalidCausalityToken => StatusCode::BAD_REQUEST,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,10 +5,11 @@ use garage_table::util::*;
|
||||||
|
|
||||||
use garage_model::k2v::item_table::{BYTES, CONFLICTS, ENTRIES, VALUES};
|
use garage_model::k2v::item_table::{BYTES, CONFLICTS, ENTRIES, VALUES};
|
||||||
|
|
||||||
use crate::helpers::*;
|
use garage_api_common::helpers::*;
|
||||||
use crate::k2v::api_server::ResBody;
|
|
||||||
use crate::k2v::error::*;
|
use crate::api_server::ResBody;
|
||||||
use crate::k2v::range::read_range;
|
use crate::error::*;
|
||||||
|
use crate::range::read_range;
|
||||||
|
|
||||||
pub async fn handle_read_index(
|
pub async fn handle_read_index(
|
||||||
ctx: ReqCtx,
|
ctx: ReqCtx,
|
||||||
|
|
|
||||||
|
|
@ -6,9 +6,10 @@ use hyper::{Request, Response, StatusCode};
|
||||||
use garage_model::k2v::causality::*;
|
use garage_model::k2v::causality::*;
|
||||||
use garage_model::k2v::item_table::*;
|
use garage_model::k2v::item_table::*;
|
||||||
|
|
||||||
use crate::helpers::*;
|
use garage_api_common::helpers::*;
|
||||||
use crate::k2v::api_server::{ReqBody, ResBody};
|
|
||||||
use crate::k2v::error::*;
|
use crate::api_server::{ReqBody, ResBody};
|
||||||
|
use crate::error::*;
|
||||||
|
|
||||||
pub const X_GARAGE_CAUSALITY_TOKEN: &str = "X-Garage-Causality-Token";
|
pub const X_GARAGE_CAUSALITY_TOKEN: &str = "X-Garage-Causality-Token";
|
||||||
|
|
||||||
|
|
@ -18,6 +19,10 @@ pub enum ReturnFormat {
|
||||||
Either,
|
Either,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn parse_causality_token(s: &str) -> Result<CausalContext, Error> {
|
||||||
|
CausalContext::parse(s).ok_or(Error::InvalidCausalityToken)
|
||||||
|
}
|
||||||
|
|
||||||
impl ReturnFormat {
|
impl ReturnFormat {
|
||||||
pub fn from(req: &Request<ReqBody>) -> Result<Self, Error> {
|
pub fn from(req: &Request<ReqBody>) -> Result<Self, Error> {
|
||||||
let accept = match req.headers().get(header::ACCEPT) {
|
let accept = match req.headers().get(header::ACCEPT) {
|
||||||
|
|
@ -136,12 +141,10 @@ pub async fn handle_insert_item(
|
||||||
.get(X_GARAGE_CAUSALITY_TOKEN)
|
.get(X_GARAGE_CAUSALITY_TOKEN)
|
||||||
.map(|s| s.to_str())
|
.map(|s| s.to_str())
|
||||||
.transpose()?
|
.transpose()?
|
||||||
.map(CausalContext::parse_helper)
|
.map(parse_causality_token)
|
||||||
.transpose()?;
|
.transpose()?;
|
||||||
|
|
||||||
let body = http_body_util::BodyExt::collect(req.into_body())
|
let body = req.into_body().collect().await?;
|
||||||
.await?
|
|
||||||
.to_bytes();
|
|
||||||
|
|
||||||
let value = DvvsValue::Value(body.to_vec());
|
let value = DvvsValue::Value(body.to_vec());
|
||||||
|
|
||||||
|
|
@ -176,7 +179,7 @@ pub async fn handle_delete_item(
|
||||||
.get(X_GARAGE_CAUSALITY_TOKEN)
|
.get(X_GARAGE_CAUSALITY_TOKEN)
|
||||||
.map(|s| s.to_str())
|
.map(|s| s.to_str())
|
||||||
.transpose()?
|
.transpose()?
|
||||||
.map(CausalContext::parse_helper)
|
.map(parse_causality_token)
|
||||||
.transpose()?;
|
.transpose()?;
|
||||||
|
|
||||||
let value = DvvsValue::Deleted;
|
let value = DvvsValue::Deleted;
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,6 @@
|
||||||
|
#[macro_use]
|
||||||
|
extern crate tracing;
|
||||||
|
|
||||||
pub mod api_server;
|
pub mod api_server;
|
||||||
mod error;
|
mod error;
|
||||||
mod router;
|
mod router;
|
||||||
|
|
@ -7,8 +7,9 @@ use std::sync::Arc;
|
||||||
use garage_table::replication::TableShardedReplication;
|
use garage_table::replication::TableShardedReplication;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
|
|
||||||
use crate::helpers::key_after_prefix;
|
use garage_api_common::helpers::key_after_prefix;
|
||||||
use crate::k2v::error::*;
|
|
||||||
|
use crate::error::*;
|
||||||
|
|
||||||
/// Read range in a Garage table.
|
/// Read range in a Garage table.
|
||||||
/// Returns (entries, more?, nextStart)
|
/// Returns (entries, more?, nextStart)
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,11 @@
|
||||||
use crate::k2v::error::*;
|
use crate::error::*;
|
||||||
|
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
|
|
||||||
use hyper::{Method, Request};
|
use hyper::{Method, Request};
|
||||||
|
|
||||||
use crate::helpers::Authorization;
|
use garage_api_common::helpers::Authorization;
|
||||||
use crate::router_macros::{generateQueryParameters, router_match};
|
use garage_api_common::router_macros::{generateQueryParameters, router_match};
|
||||||
|
|
||||||
router_match! {@func
|
router_match! {@func
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,17 +0,0 @@
|
||||||
//! Crate for serving a S3 compatible API
|
|
||||||
#[macro_use]
|
|
||||||
extern crate tracing;
|
|
||||||
|
|
||||||
pub mod common_error;
|
|
||||||
|
|
||||||
mod encoding;
|
|
||||||
pub mod generic_server;
|
|
||||||
pub mod helpers;
|
|
||||||
mod router_macros;
|
|
||||||
/// This mode is public only to help testing. Don't expect stability here
|
|
||||||
pub mod signature;
|
|
||||||
|
|
||||||
pub mod admin;
|
|
||||||
#[cfg(feature = "k2v")]
|
|
||||||
pub mod k2v;
|
|
||||||
pub mod s3;
|
|
||||||
|
|
@ -1,12 +1,12 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_api"
|
name = "garage_api_s3"
|
||||||
version = "0.10.0"
|
version = "1.3.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
description = "S3 API server crate for the Garage object store"
|
description = "S3 API server crate for the Garage object store"
|
||||||
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
||||||
readme = "../../README.md"
|
readme = "../../../README.md"
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
path = "lib.rs"
|
path = "lib.rs"
|
||||||
|
|
@ -20,27 +20,24 @@ garage_block.workspace = true
|
||||||
garage_net.workspace = true
|
garage_net.workspace = true
|
||||||
garage_util.workspace = true
|
garage_util.workspace = true
|
||||||
garage_rpc.workspace = true
|
garage_rpc.workspace = true
|
||||||
|
garage_api_common.workspace = true
|
||||||
|
|
||||||
aes-gcm.workspace = true
|
aes-gcm.workspace = true
|
||||||
argon2.workspace = true
|
|
||||||
async-compression.workspace = true
|
async-compression.workspace = true
|
||||||
async-trait.workspace = true
|
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
crypto-common.workspace = true
|
crc32fast.workspace = true
|
||||||
err-derive.workspace = true
|
crc32c.workspace = true
|
||||||
|
thiserror.workspace = true
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
hmac.workspace = true
|
|
||||||
idna.workspace = true
|
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
md-5.workspace = true
|
md-5.workspace = true
|
||||||
nom.workspace = true
|
|
||||||
pin-project.workspace = true
|
pin-project.workspace = true
|
||||||
|
sha1.workspace = true
|
||||||
sha2.workspace = true
|
sha2.workspace = true
|
||||||
|
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
futures-util.workspace = true
|
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tokio-stream.workspace = true
|
tokio-stream.workspace = true
|
||||||
tokio-util.workspace = true
|
tokio-util.workspace = true
|
||||||
|
|
@ -51,21 +48,13 @@ httpdate.workspace = true
|
||||||
http-range.workspace = true
|
http-range.workspace = true
|
||||||
http-body-util.workspace = true
|
http-body-util.workspace = true
|
||||||
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
|
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
|
||||||
hyper-util.workspace = true
|
|
||||||
multer.workspace = true
|
multer.workspace = true
|
||||||
percent-encoding.workspace = true
|
percent-encoding.workspace = true
|
||||||
roxmltree.workspace = true
|
roxmltree.workspace = true
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
|
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_bytes.workspace = true
|
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
quick-xml.workspace = true
|
quick-xml.workspace = true
|
||||||
|
|
||||||
opentelemetry.workspace = true
|
opentelemetry.workspace = true
|
||||||
opentelemetry-prometheus = { workspace = true, optional = true }
|
|
||||||
prometheus = { workspace = true, optional = true }
|
|
||||||
|
|
||||||
[features]
|
|
||||||
k2v = [ "garage_util/k2v", "garage_model/k2v" ]
|
|
||||||
metrics = [ "opentelemetry-prometheus", "prometheus" ]
|
|
||||||
|
|
@ -1,7 +1,5 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
|
|
||||||
use hyper::header;
|
use hyper::header;
|
||||||
use hyper::{body::Incoming as IncomingBody, Request, Response};
|
use hyper::{body::Incoming as IncomingBody, Request, Response};
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
|
|
@ -14,33 +12,33 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::key_table::Key;
|
use garage_model::key_table::Key;
|
||||||
|
|
||||||
use crate::generic_server::*;
|
use garage_api_common::cors::*;
|
||||||
use crate::s3::error::*;
|
use garage_api_common::generic_server::*;
|
||||||
|
use garage_api_common::helpers::*;
|
||||||
|
use garage_api_common::signature::verify_request;
|
||||||
|
|
||||||
use crate::signature::verify_request;
|
use crate::bucket::*;
|
||||||
|
use crate::copy::*;
|
||||||
|
use crate::cors::*;
|
||||||
|
use crate::delete::*;
|
||||||
|
use crate::error::*;
|
||||||
|
use crate::get::*;
|
||||||
|
use crate::lifecycle::*;
|
||||||
|
use crate::list::*;
|
||||||
|
use crate::multipart::*;
|
||||||
|
use crate::post_object::handle_post_object;
|
||||||
|
use crate::put::*;
|
||||||
|
use crate::router::Endpoint;
|
||||||
|
use crate::website::*;
|
||||||
|
|
||||||
use crate::helpers::*;
|
pub use garage_api_common::signature::streaming::ReqBody;
|
||||||
use crate::s3::bucket::*;
|
|
||||||
use crate::s3::copy::*;
|
|
||||||
use crate::s3::cors::*;
|
|
||||||
use crate::s3::delete::*;
|
|
||||||
use crate::s3::get::*;
|
|
||||||
use crate::s3::lifecycle::*;
|
|
||||||
use crate::s3::list::*;
|
|
||||||
use crate::s3::multipart::*;
|
|
||||||
use crate::s3::post_object::handle_post_object;
|
|
||||||
use crate::s3::put::*;
|
|
||||||
use crate::s3::router::Endpoint;
|
|
||||||
use crate::s3::website::*;
|
|
||||||
|
|
||||||
pub use crate::signature::streaming::ReqBody;
|
|
||||||
pub type ResBody = BoxBody<Error>;
|
pub type ResBody = BoxBody<Error>;
|
||||||
|
|
||||||
pub struct S3ApiServer {
|
pub struct S3ApiServer {
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct S3ApiEndpoint {
|
pub struct S3ApiEndpoint {
|
||||||
bucket_name: Option<String>,
|
bucket_name: Option<String>,
|
||||||
endpoint: Endpoint,
|
endpoint: Endpoint,
|
||||||
}
|
}
|
||||||
|
|
@ -70,7 +68,6 @@ impl S3ApiServer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl ApiHandler for S3ApiServer {
|
impl ApiHandler for S3ApiServer {
|
||||||
const API_NAME: &'static str = "s3";
|
const API_NAME: &'static str = "s3";
|
||||||
const API_NAME_DISPLAY: &'static str = "S3";
|
const API_NAME_DISPLAY: &'static str = "S3";
|
||||||
|
|
@ -124,7 +121,9 @@ impl ApiHandler for S3ApiServer {
|
||||||
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
|
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let (req, api_key, content_sha256) = verify_request(&garage, req, "s3").await?;
|
let verified_request = verify_request(&garage, req, "s3").await?;
|
||||||
|
let req = verified_request.request;
|
||||||
|
let api_key = verified_request.access_key;
|
||||||
|
|
||||||
let bucket_name = match bucket_name {
|
let bucket_name = match bucket_name {
|
||||||
None => {
|
None => {
|
||||||
|
|
@ -137,20 +136,14 @@ impl ApiHandler for S3ApiServer {
|
||||||
|
|
||||||
// Special code path for CreateBucket API endpoint
|
// Special code path for CreateBucket API endpoint
|
||||||
if let Endpoint::CreateBucket {} = endpoint {
|
if let Endpoint::CreateBucket {} = endpoint {
|
||||||
return handle_create_bucket(
|
return handle_create_bucket(&garage, req, &api_key.key_id, bucket_name).await;
|
||||||
&garage,
|
|
||||||
req,
|
|
||||||
content_sha256,
|
|
||||||
&api_key.key_id,
|
|
||||||
bucket_name,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let bucket_id = garage
|
let bucket_id = garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.resolve_bucket(&bucket_name, &api_key)
|
.resolve_bucket(&bucket_name, &api_key)
|
||||||
.await?;
|
.await
|
||||||
|
.map_err(pass_helper_error)?;
|
||||||
let bucket = garage
|
let bucket = garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.get_existing_bucket(bucket_id)
|
.get_existing_bucket(bucket_id)
|
||||||
|
|
@ -181,7 +174,7 @@ impl ApiHandler for S3ApiServer {
|
||||||
let resp = match endpoint {
|
let resp = match endpoint {
|
||||||
Endpoint::HeadObject {
|
Endpoint::HeadObject {
|
||||||
key, part_number, ..
|
key, part_number, ..
|
||||||
} => handle_head(ctx, &req, &key, part_number).await,
|
} => handle_head(ctx, &req.map(|_| ()), &key, part_number).await,
|
||||||
Endpoint::GetObject {
|
Endpoint::GetObject {
|
||||||
key,
|
key,
|
||||||
part_number,
|
part_number,
|
||||||
|
|
@ -201,20 +194,20 @@ impl ApiHandler for S3ApiServer {
|
||||||
response_content_type,
|
response_content_type,
|
||||||
response_expires,
|
response_expires,
|
||||||
};
|
};
|
||||||
handle_get(ctx, &req, &key, part_number, overrides).await
|
handle_get(ctx, &req.map(|_| ()), &key, part_number, overrides).await
|
||||||
}
|
}
|
||||||
Endpoint::UploadPart {
|
Endpoint::UploadPart {
|
||||||
key,
|
key,
|
||||||
part_number,
|
part_number,
|
||||||
upload_id,
|
upload_id,
|
||||||
} => handle_put_part(ctx, req, &key, part_number, &upload_id, content_sha256).await,
|
} => handle_put_part(ctx, req, &key, part_number, &upload_id).await,
|
||||||
Endpoint::CopyObject { key } => handle_copy(ctx, &req, &key).await,
|
Endpoint::CopyObject { key } => handle_copy(ctx, &req, &key).await,
|
||||||
Endpoint::UploadPartCopy {
|
Endpoint::UploadPartCopy {
|
||||||
key,
|
key,
|
||||||
part_number,
|
part_number,
|
||||||
upload_id,
|
upload_id,
|
||||||
} => handle_upload_part_copy(ctx, &req, &key, part_number, &upload_id).await,
|
} => handle_upload_part_copy(ctx, &req, &key, part_number, &upload_id).await,
|
||||||
Endpoint::PutObject { key } => handle_put(ctx, req, &key, content_sha256).await,
|
Endpoint::PutObject { key } => handle_put(ctx, req, &key).await,
|
||||||
Endpoint::AbortMultipartUpload { key, upload_id } => {
|
Endpoint::AbortMultipartUpload { key, upload_id } => {
|
||||||
handle_abort_multipart_upload(ctx, &key, &upload_id).await
|
handle_abort_multipart_upload(ctx, &key, &upload_id).await
|
||||||
}
|
}
|
||||||
|
|
@ -223,7 +216,7 @@ impl ApiHandler for S3ApiServer {
|
||||||
handle_create_multipart_upload(ctx, &req, &key).await
|
handle_create_multipart_upload(ctx, &req, &key).await
|
||||||
}
|
}
|
||||||
Endpoint::CompleteMultipartUpload { key, upload_id } => {
|
Endpoint::CompleteMultipartUpload { key, upload_id } => {
|
||||||
handle_complete_multipart_upload(ctx, req, &key, &upload_id, content_sha256).await
|
handle_complete_multipart_upload(ctx, req, &key, &upload_id).await
|
||||||
}
|
}
|
||||||
Endpoint::CreateBucket {} => unreachable!(),
|
Endpoint::CreateBucket {} => unreachable!(),
|
||||||
Endpoint::HeadBucket {} => {
|
Endpoint::HeadBucket {} => {
|
||||||
|
|
@ -233,6 +226,7 @@ impl ApiHandler for S3ApiServer {
|
||||||
Endpoint::DeleteBucket {} => handle_delete_bucket(ctx).await,
|
Endpoint::DeleteBucket {} => handle_delete_bucket(ctx).await,
|
||||||
Endpoint::GetBucketLocation {} => handle_get_bucket_location(ctx),
|
Endpoint::GetBucketLocation {} => handle_get_bucket_location(ctx),
|
||||||
Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(),
|
Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(),
|
||||||
|
Endpoint::GetBucketAcl {} => handle_get_bucket_acl(ctx),
|
||||||
Endpoint::ListObjects {
|
Endpoint::ListObjects {
|
||||||
delimiter,
|
delimiter,
|
||||||
encoding_type,
|
encoding_type,
|
||||||
|
|
@ -319,25 +313,22 @@ impl ApiHandler for S3ApiServer {
|
||||||
} => {
|
} => {
|
||||||
let query = ListPartsQuery {
|
let query = ListPartsQuery {
|
||||||
bucket_name: ctx.bucket_name.clone(),
|
bucket_name: ctx.bucket_name.clone(),
|
||||||
bucket_id,
|
|
||||||
key,
|
key,
|
||||||
upload_id,
|
upload_id,
|
||||||
part_number_marker: part_number_marker.map(|p| p.min(10000)),
|
part_number_marker: part_number_marker.map(|p| p.min(10000)),
|
||||||
max_parts: max_parts.unwrap_or(1000).clamp(1, 1000),
|
max_parts: max_parts.unwrap_or(1000).clamp(1, 1000),
|
||||||
};
|
};
|
||||||
handle_list_parts(ctx, &query).await
|
handle_list_parts(ctx, req, &query).await
|
||||||
}
|
}
|
||||||
Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req, content_sha256).await,
|
Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req).await,
|
||||||
Endpoint::GetBucketWebsite {} => handle_get_website(ctx).await,
|
Endpoint::GetBucketWebsite {} => handle_get_website(ctx).await,
|
||||||
Endpoint::PutBucketWebsite {} => handle_put_website(ctx, req, content_sha256).await,
|
Endpoint::PutBucketWebsite {} => handle_put_website(ctx, req).await,
|
||||||
Endpoint::DeleteBucketWebsite {} => handle_delete_website(ctx).await,
|
Endpoint::DeleteBucketWebsite {} => handle_delete_website(ctx).await,
|
||||||
Endpoint::GetBucketCors {} => handle_get_cors(ctx).await,
|
Endpoint::GetBucketCors {} => handle_get_cors(ctx).await,
|
||||||
Endpoint::PutBucketCors {} => handle_put_cors(ctx, req, content_sha256).await,
|
Endpoint::PutBucketCors {} => handle_put_cors(ctx, req).await,
|
||||||
Endpoint::DeleteBucketCors {} => handle_delete_cors(ctx).await,
|
Endpoint::DeleteBucketCors {} => handle_delete_cors(ctx).await,
|
||||||
Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(ctx).await,
|
Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(ctx).await,
|
||||||
Endpoint::PutBucketLifecycleConfiguration {} => {
|
Endpoint::PutBucketLifecycleConfiguration {} => handle_put_lifecycle(ctx, req).await,
|
||||||
handle_put_lifecycle(ctx, req, content_sha256).await
|
|
||||||
}
|
|
||||||
Endpoint::DeleteBucketLifecycle {} => handle_delete_lifecycle(ctx).await,
|
Endpoint::DeleteBucketLifecycle {} => handle_delete_lifecycle(ctx).await,
|
||||||
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
|
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
|
||||||
};
|
};
|
||||||
|
|
@ -352,6 +343,12 @@ impl ApiHandler for S3ApiServer {
|
||||||
|
|
||||||
Ok(resp_ok)
|
Ok(resp_ok)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn key_id_from_request(&self, req: &Request<IncomingBody>) -> Option<String> {
|
||||||
|
garage_api_common::signature::payload::Authorization::parse_header(req.headers())
|
||||||
|
.map(|auth| auth.key_id)
|
||||||
|
.ok()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ApiEndpoint for S3ApiEndpoint {
|
impl ApiEndpoint for S3ApiEndpoint {
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue