mirror of
https://git.deuxfleurs.fr/Deuxfleurs/garage.git
synced 2026-05-15 13:46:54 -04:00
Compare commits
173 commits
v1.99.0-in
...
main-v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b6b18427a5 | ||
|
|
9987166b2b | ||
|
|
b72b090a09 | ||
|
|
8551aefed4 | ||
|
|
47bf5d9fb0 | ||
|
|
5df37dae5e | ||
|
|
44af0bdab3 | ||
|
|
a7d6620e18 | ||
|
|
8eb12755e4 | ||
|
|
c685a2cbaf | ||
|
|
969f42a970 | ||
|
|
424d4f8d4d | ||
|
|
bf5290036f |
||
|
|
4efc8bac07 | ||
|
|
f3dcc39903 | ||
|
|
43e02920c2 | ||
|
|
dcc2fe4ac5 |
||
|
|
e3a5ec6ef6 | ||
|
|
4d124e1c76 | ||
|
|
d769a7be5d | ||
|
|
511cf0c6ec | ||
|
|
95693d45b2 | ||
|
|
ca296477f3 | ||
|
|
ca3b4a050d | ||
|
|
a057ab23ea | ||
|
|
58bc65b9a8 | ||
|
|
ac851d6dee | ||
|
|
eac2aa6fe4 | ||
|
|
1e0201ada2 | ||
|
|
82297371bf | ||
|
|
174f4f01a8 | ||
|
|
1aac7b4875 | ||
|
|
b43c58cbe5 | ||
|
|
9481ac428e | ||
|
|
1c29d04cc5 | ||
|
|
b48a8eaa1f | ||
|
|
42fd8583bd | ||
|
|
236af3a958 | ||
|
|
4b1fdbef55 | ||
|
|
0f1b488be0 | ||
|
|
0bbf63ee0e | ||
|
|
879d941d7b | ||
|
|
d726cf0299 | ||
|
|
0c7aeab6f8 | ||
|
|
5687fc0375 | ||
|
|
97f1e9ab52 | ||
|
|
60b1d78b56 | ||
|
|
4c895a7186 | ||
|
|
c3b5cbf212 | ||
|
|
57a467b5c0 | ||
|
|
6cf6db5c61 | ||
|
|
d5a57e3e13 | ||
|
|
5cf354acb4 | ||
|
|
2b007ddea3 | ||
|
|
c8599a8636 | ||
|
|
0b901bf291 | ||
|
|
c8c20d6f47 | ||
|
|
e5db610e4c | ||
|
|
65c6f8adea | ||
|
|
54b9bf02a3 | ||
|
|
469153233f | ||
|
|
90bba5889a | ||
|
|
a64b567d43 | ||
|
|
6ea86db8cd | ||
|
|
aa69c06f2b | ||
|
|
a6c6c44310 | ||
|
|
96d7713915 | ||
|
|
d64498c3d3 | ||
|
|
b340599e68 | ||
|
|
5448012b27 | ||
|
|
ce34d11a65 | ||
|
|
8cb7623ebd | ||
|
|
5469c95877 | ||
|
|
f930c6f643 | ||
|
|
afcb22bf16 | ||
|
|
cc29a40d51 | ||
|
|
0f3f180c3e | ||
|
|
70cf6004ae | ||
|
|
c7571ff89b | ||
|
|
1b42919bf7 | ||
|
|
3f4ab3a4a3 | ||
|
|
3a4afc04a9 | ||
|
|
fbf03e9378 | ||
|
|
9eb07d4c7b | ||
|
|
85ee4f5d8c | ||
|
|
328072d122 | ||
|
|
26bc807905 | ||
|
|
a9f5f242b2 | ||
|
|
ae98abca5c | ||
|
|
adfa44ad70 | ||
|
|
47143b88ad | ||
|
|
8843aa92fa |
||
|
|
b601b3e46d | ||
|
|
a19d2f16e2 | ||
|
|
fc8fc60f6d | ||
|
|
77079a1498 | ||
|
|
2a4f729b57 | ||
|
|
1b042e379e |
||
|
|
ffbce0f689 | ||
|
|
37e5621dde | ||
|
|
6529ff379a | ||
|
|
a8d73682a4 | ||
|
|
8654eb19bf | ||
|
|
54ea412188 | ||
|
|
2ade8c86f6 | ||
|
|
b15e2cbb6c | ||
|
|
0fd1b7342b | ||
|
|
be16bc7a05 | ||
|
|
bfaa1ca6b7 | ||
|
|
de8eeab4ad | ||
|
|
ae3f7ee76c |
||
|
|
2dc3a6dbbe | ||
|
|
c6bc3f229b | ||
|
|
bba9202f31 | ||
|
|
a605a80806 | ||
|
|
539af12d21 | ||
|
|
a2a9e3cec4 | ||
|
|
14274bc13c | ||
|
|
bf4691d98a | ||
|
|
ad151cb1dc | ||
|
|
3c20984a08 | ||
|
|
e6e4e051a1 | ||
|
|
9b38cba6f3 | ||
|
|
4ef954d176 | ||
|
|
02498a93d0 | ||
|
|
4caad5425d | ||
|
|
9ec3f8cc3c | ||
|
|
14d2f2b18d | ||
|
|
a7d845a999 | ||
|
|
dd20e5d22a | ||
|
|
6906a4ff12 | ||
|
|
9053782d71 |
||
|
|
c96be1a9a8 | ||
|
|
98e56490a1 | ||
|
|
e791ccec8f | ||
|
|
d605c4fed1 | ||
|
|
0ce5f7eb00 |
||
|
|
516255321f | ||
|
|
f3b05ff771 | ||
|
|
e254cc20e5 | ||
|
|
12f15c4c2b | ||
|
|
42c5d02cdf | ||
|
|
4689b10448 | ||
|
|
156b10ee65 | ||
|
|
8647ebf003 | ||
|
|
67d7c0769b | ||
|
|
09ed5ab8cc |
||
|
|
a0ea28b0da |
||
|
|
c5237c31e7 | ||
|
|
f87943a39d | ||
|
|
c0846c56fe | ||
|
|
1cb0ae10a8 | ||
|
|
1a8f74fc94 | ||
|
|
2191620af5 | ||
|
|
bf27a3ec98 | ||
|
|
f64ec6e542 | ||
|
|
6d38907dac | ||
|
|
cfe8e8d45c | ||
|
|
f6e805e7db | ||
|
|
45e10e55f9 | ||
|
|
730bfee753 | ||
|
|
ccab0e4ae5 | ||
|
|
abb60dcf7e | ||
|
|
f8b0817ddc | ||
|
|
21c0dda16a | ||
|
|
658541d812 | ||
|
|
c5df820e2c | ||
|
|
a04d6cd5b8 | ||
|
|
44a896f9b5 | ||
|
|
cee7560fc1 | ||
|
|
2f0c5ca220 | ||
|
|
6d798c640f | ||
|
|
8b35a946d9 |
121 changed files with 5231 additions and 2329 deletions
|
|
@ -1,3 +1,6 @@
|
|||
labels:
|
||||
nix: "enabled"
|
||||
|
||||
when:
|
||||
event:
|
||||
- push
|
||||
|
|
@ -9,27 +12,32 @@ when:
|
|||
|
||||
steps:
|
||||
- name: check formatting
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-shell --attr devShell --run "cargo fmt -- --check"
|
||||
- nix-build -j4 --attr flakePackages.fmt
|
||||
|
||||
- name: build
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.dev
|
||||
|
||||
- name: unit + func tests (lmdb)
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.tests-lmdb
|
||||
|
||||
- name: unit + func tests (sqlite)
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.tests-sqlite
|
||||
|
||||
- name: unit + func tests (fjall)
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.tests-fjall
|
||||
|
||||
- name: integration tests
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.dev
|
||||
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,6 @@
|
|||
labels:
|
||||
nix: "enabled"
|
||||
|
||||
when:
|
||||
event:
|
||||
- deployment
|
||||
|
|
@ -8,7 +11,7 @@ depends_on:
|
|||
|
||||
steps:
|
||||
- name: refresh-index
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
environment:
|
||||
AWS_ACCESS_KEY_ID:
|
||||
from_secret: garagehq_aws_access_key_id
|
||||
|
|
@ -19,7 +22,7 @@ steps:
|
|||
- nix-shell --attr ci --run "refresh_index"
|
||||
|
||||
- name: multiarch-docker
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
environment:
|
||||
DOCKER_AUTH:
|
||||
from_secret: docker_auth
|
||||
|
|
|
|||
|
|
@ -1,3 +1,6 @@
|
|||
labels:
|
||||
nix: "enabled"
|
||||
|
||||
when:
|
||||
event:
|
||||
- deployment
|
||||
|
|
@ -16,17 +19,17 @@ matrix:
|
|||
|
||||
steps:
|
||||
- name: build
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-build --attr releasePackages.${ARCH} --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||
|
||||
- name: check is static binary
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-shell --attr ci --run "./script/not-dynamic.sh result/bin/garage"
|
||||
|
||||
- name: integration tests
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
||||
when:
|
||||
|
|
@ -36,7 +39,7 @@ steps:
|
|||
ARCH: i386
|
||||
|
||||
- name: upgrade tests
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
|
||||
when:
|
||||
|
|
@ -44,7 +47,7 @@ steps:
|
|||
ARCH: amd64
|
||||
|
||||
- name: push static binary
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
environment:
|
||||
TARGET: "${TARGET}"
|
||||
AWS_ACCESS_KEY_ID:
|
||||
|
|
@ -55,7 +58,7 @@ steps:
|
|||
- nix-shell --attr ci --run "to_s3"
|
||||
|
||||
- name: docker build and publish
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
environment:
|
||||
DOCKER_PLATFORM: "linux/${ARCH}"
|
||||
CONTAINER_NAME: "dxflrs/${ARCH}_garage"
|
||||
|
|
|
|||
2309
Cargo.lock
generated
2309
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
56
Cargo.toml
56
Cargo.toml
|
|
@ -24,18 +24,18 @@ default-members = ["src/garage"]
|
|||
|
||||
# Internal Garage crates
|
||||
format_table = { version = "0.1.1", path = "src/format-table" }
|
||||
garage_api_common = { version = "1.0.1", path = "src/api/common" }
|
||||
garage_api_admin = { version = "1.0.1", path = "src/api/admin" }
|
||||
garage_api_s3 = { version = "1.0.1", path = "src/api/s3" }
|
||||
garage_api_k2v = { version = "1.0.1", path = "src/api/k2v" }
|
||||
garage_block = { version = "1.0.1", path = "src/block" }
|
||||
garage_db = { version = "1.0.1", path = "src/db", default-features = false }
|
||||
garage_model = { version = "1.0.1", path = "src/model", default-features = false }
|
||||
garage_net = { version = "1.0.1", path = "src/net" }
|
||||
garage_rpc = { version = "1.0.1", path = "src/rpc" }
|
||||
garage_table = { version = "1.0.1", path = "src/table" }
|
||||
garage_util = { version = "1.0.1", path = "src/util" }
|
||||
garage_web = { version = "1.0.1", path = "src/web" }
|
||||
garage_api_common = { version = "1.3.1", path = "src/api/common" }
|
||||
garage_api_admin = { version = "1.3.1", path = "src/api/admin" }
|
||||
garage_api_s3 = { version = "1.3.1", path = "src/api/s3" }
|
||||
garage_api_k2v = { version = "1.3.1", path = "src/api/k2v" }
|
||||
garage_block = { version = "1.3.1", path = "src/block" }
|
||||
garage_db = { version = "1.3.1", path = "src/db", default-features = false }
|
||||
garage_model = { version = "1.3.1", path = "src/model", default-features = false }
|
||||
garage_net = { version = "1.3.1", path = "src/net" }
|
||||
garage_rpc = { version = "1.3.1", path = "src/rpc" }
|
||||
garage_table = { version = "1.3.1", path = "src/table" }
|
||||
garage_util = { version = "1.3.1", path = "src/util" }
|
||||
garage_web = { version = "1.3.1", path = "src/web" }
|
||||
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
||||
|
||||
# External crates from crates.io
|
||||
|
|
@ -52,13 +52,11 @@ chrono = "0.4"
|
|||
crc32fast = "1.4"
|
||||
crc32c = "0.6"
|
||||
crypto-common = "0.1"
|
||||
err-derive = "0.3"
|
||||
gethostname = "0.4"
|
||||
git-version = "0.3.4"
|
||||
hex = "0.4"
|
||||
hexdump = "0.1"
|
||||
hmac = "0.12"
|
||||
idna = "0.5"
|
||||
itertools = "0.12"
|
||||
ipnet = "2.9.0"
|
||||
lazy_static = "1.4"
|
||||
|
|
@ -66,6 +64,7 @@ md-5 = "0.10"
|
|||
mktemp = "0.5"
|
||||
nix = { version = "0.29", default-features = false, features = ["fs"] }
|
||||
nom = "7.1"
|
||||
parking_lot = "0.12"
|
||||
parse_duration = "2.1"
|
||||
pin-project = "1.0.12"
|
||||
pnet_datalink = "0.34"
|
||||
|
|
@ -84,12 +83,14 @@ pretty_env_logger = "0.5"
|
|||
structopt = { version = "0.3", default-features = false }
|
||||
syslog-tracing = "0.3"
|
||||
tracing = "0.1"
|
||||
tracing-journald = "0.3.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
heed = { version = "0.11", default-features = false, features = ["lmdb"] }
|
||||
rusqlite = "0.31.0"
|
||||
rusqlite = "0.37"
|
||||
r2d2 = "0.8"
|
||||
r2d2_sqlite = "0.24"
|
||||
r2d2_sqlite = "0.31"
|
||||
fjall = "2.4"
|
||||
|
||||
async-compression = { version = "0.4", features = ["tokio", "zstd"] }
|
||||
zstd = { version = "0.13", default-features = false }
|
||||
|
|
@ -132,24 +133,21 @@ opentelemetry-contrib = "0.9"
|
|||
prometheus = "0.13"
|
||||
|
||||
# used by the k2v-client crate only
|
||||
aws-sigv4 = { version = "1.1" }
|
||||
hyper-rustls = { version = "0.26", features = ["http2"] }
|
||||
aws-sigv4 = { version = "1.1", default-features = false }
|
||||
hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "http2", "ring", "rustls-native-certs"] }
|
||||
log = "0.4"
|
||||
thiserror = "1.0"
|
||||
thiserror = "2.0"
|
||||
|
||||
# ---- used only as build / dev dependencies ----
|
||||
assert-json-diff = "2.0"
|
||||
rustc_version = "0.4.0"
|
||||
static_init = "1.0"
|
||||
aws-sdk-config = "1.13"
|
||||
aws-sdk-s3 = "1.14"
|
||||
|
||||
[profile.dev]
|
||||
#lto = "thin" # disabled for now, adds 2-4 min to each CI build
|
||||
lto = "off"
|
||||
aws-smithy-runtime = { version = "1.8", default-features = false, features = ["tls-rustls"] }
|
||||
aws-sdk-config = { version = "1.62", default-features = false }
|
||||
aws-sdk-s3 = { version = "1.79", default-features = false, features = ["rt-tokio"] }
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
opt-level = "s"
|
||||
strip = true
|
||||
lto = "thin"
|
||||
codegen-units = 16
|
||||
opt-level = 3
|
||||
strip = "debuginfo"
|
||||
|
|
|
|||
|
|
@ -687,7 +687,7 @@ paths:
|
|||
operationId: "GetBucketInfo"
|
||||
summary: "Get a bucket"
|
||||
description: |
|
||||
Given a bucket identifier (`id`) or a global alias (`alias`), get its information.
|
||||
Given a bucket identifier (`id`) or a global alias (`globalAlias`), get its information.
|
||||
It includes its aliases, its web configuration, keys that have some permissions
|
||||
on it, some statistics (number of objects, size), number of dangling multipart uploads,
|
||||
and its quotas (if any).
|
||||
|
|
@ -701,7 +701,7 @@ paths:
|
|||
example: "b4018dc61b27ccb5c64ec1b24f53454bbbd180697c758c4d47a22a8921864a87"
|
||||
schema:
|
||||
type: string
|
||||
- name: alias
|
||||
- name: globalAlias
|
||||
in: query
|
||||
description: |
|
||||
The exact global alias of one of the existing buckets.
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ In this section, we cover the following web applications:
|
|||
| [Mastodon](#mastodon) | ✅ | Natively supported |
|
||||
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
|
||||
| [ejabberd](#ejabberd) | ✅ | `mod_s3_upload` |
|
||||
| [Pixelfed](#pixelfed) | ❓ | Not yet tested |
|
||||
| [Pixelfed](#pixelfed) | ✅ | Natively supported |
|
||||
| [Pleroma](#pleroma) | ❓ | Not yet tested |
|
||||
| [Lemmy](#lemmy) | ✅ | Supported with pict-rs |
|
||||
| [Funkwhale](#funkwhale) | ❓ | Not yet tested |
|
||||
|
|
@ -69,7 +69,7 @@ $CONFIG = array(
|
|||
'hostname' => '127.0.0.1', // Can also be a domain name, eg. garage.example.com
|
||||
'port' => 3900, // Put your reverse proxy port or your S3 API port
|
||||
'use_ssl' => false, // Set it to true if you have a TLS enabled reverse proxy
|
||||
'region' => 'garage', // Garage has only one region named "garage"
|
||||
'region' => 'garage', // Garage default region is named "garage", edit according to your cluster config
|
||||
'use_path_style' => true // Garage supports only path style, must be set to true
|
||||
],
|
||||
],
|
||||
|
|
@ -135,7 +135,7 @@ bucket but doesn't also know the secret encryption key.
|
|||
*Click on the picture to zoom*
|
||||
|
||||
Add a new external storage. Put what you want in "folder name" (eg. "shared"). Select "Amazon S3". Keep "Access Key" for the Authentication field.
|
||||
In Configuration, put your bucket name (eg. nextcloud), the host (eg. 127.0.0.1), the port (eg. 3900 or 443), the region (garage). Tick the SSL box if you have put an HTTPS proxy in front of garage. You must tick the "Path access" box and you must leave the "Legacy authentication (v2)" box empty. Put your Key ID (eg. GK...) and your Secret Key in the last two input boxes. Finally click on the tick symbol on the right of your screen.
|
||||
In Configuration, put your bucket name (eg. nextcloud), the host (eg. 127.0.0.1), the port (eg. 3900 or 443), the region ("garage" if you use the default, or the one your configured in your `garage.toml`). Tick the SSL box if you have put an HTTPS proxy in front of garage. You must tick the "Path access" box and you must leave the "Legacy authentication (v2)" box empty. Put your Key ID (eg. GK...) and your Secret Key in the last two input boxes. Finally click on the tick symbol on the right of your screen.
|
||||
|
||||
Now go to your "Files" app and a new "linked folder" has appeared with the name you chose earlier (eg. "shared").
|
||||
|
||||
|
|
@ -191,10 +191,10 @@ garage key create peertube-key
|
|||
|
||||
Keep the Key ID and the Secret key in a pad, they will be needed later.
|
||||
|
||||
We need two buckets, one for normal videos (named peertube-video) and one for webtorrent videos (named peertube-playlist).
|
||||
We need two buckets, one for normal videos (named peertube-videos) and one for webtorrent videos (named peertube-playlists).
|
||||
```bash
|
||||
garage bucket create peertube-videos
|
||||
garage bucket create peertube-playlist
|
||||
garage bucket create peertube-playlists
|
||||
```
|
||||
|
||||
Now we allow our key to read and write on these buckets:
|
||||
|
|
@ -238,7 +238,7 @@ object_storage:
|
|||
# Put localhost only if you have a garage instance running on that node
|
||||
endpoint: 'http://localhost:3900' # or "garage.example.com" if you have TLS on port 443
|
||||
|
||||
# Garage supports only one region for now, named garage
|
||||
# Garage default region is named "garage", edit according to your config
|
||||
region: 'garage'
|
||||
|
||||
credentials:
|
||||
|
|
@ -253,7 +253,7 @@ object_storage:
|
|||
proxify_private_files: false
|
||||
|
||||
streaming_playlists:
|
||||
bucket_name: 'peertube-playlist'
|
||||
bucket_name: 'peertube-playlists'
|
||||
|
||||
# Keep it empty for our example
|
||||
prefix: ''
|
||||
|
|
@ -441,7 +441,7 @@ media_storage_providers:
|
|||
store_synchronous: True # do we want to wait that the file has been written before returning?
|
||||
config:
|
||||
bucket: matrix # the name of our bucket, we chose matrix earlier
|
||||
region_name: garage # only "garage" is supported for the region field
|
||||
region_name: garage # "garage" by default, edit according to your cluster config
|
||||
endpoint_url: http://localhost:3900 # the path to the S3 endpoint
|
||||
access_key_id: "GKxxx" # your Key ID
|
||||
secret_access_key: "xxxx" # your Secret Key
|
||||
|
|
|
|||
|
|
@ -161,3 +161,49 @@ kopia repository validate-provider
|
|||
|
||||
You can then run all the standard kopia commands: `kopia snapshot create`, `kopia mount`...
|
||||
Everything should work out-of-the-box.
|
||||
|
||||
## Plakar
|
||||
|
||||
Create your key and bucket on Garage server:
|
||||
|
||||
```bash
|
||||
garage key create my-plakar-key
|
||||
garage bucket create plakar-backups
|
||||
garage bucket allow plakar-backups --read --write --key my-plakar-key
|
||||
…
|
||||
```
|
||||
|
||||
On Plakar server, add your Garage as a storage location:
|
||||
```bash
|
||||
plakar store add garageS3 s3://my-garage.tld/plakar-backups \
|
||||
region=garage # Or as you've specified in garage.toml \
|
||||
access_key=<Key ID from "garage key info my-plakar-key"> \
|
||||
secret_access_key=<Secret key from "garage key info my-plakar-key">
|
||||
```
|
||||
|
||||
Then create the repository.
|
||||
```bash
|
||||
plakar at @garageS3 create -plaintext # Unencrypted
|
||||
# or
|
||||
plakar at @garageS3 create #encrypted
|
||||
```
|
||||
|
||||
If you encrypt your backups (Plakar default), you will need to define a strong passphrase. Do not forget to save your password safely. It will be needed to decrypt your backups.
|
||||
|
||||
|
||||
After the repository has been created, check that everything works as expected (that might give an empty result as no file has been added yet, but no error message):
|
||||
```bash
|
||||
plakar at @garageS3 check
|
||||
```
|
||||
|
||||
Now that everything is configure, you can use Garage as your backups storage. For instance sync it with a local backup storage:
|
||||
```bash
|
||||
$ plakar at ~/backups sync to @garageS3
|
||||
```
|
||||
|
||||
Or list the S3 storage content:
|
||||
```bash
|
||||
$ plakar at @garageS3 ls
|
||||
```
|
||||
|
||||
More information in Plakar documentation: https://www.plakar.io/docs/main/quickstart/
|
||||
|
|
|
|||
|
|
@ -8,18 +8,18 @@ have published Ansible roles. We list them and compare them below.
|
|||
|
||||
## Comparison of Ansible roles
|
||||
|
||||
| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) |
|
||||
|------------------------------------|---------------------------------------------|---------------------------------------------------------------|
|
||||
| **Runtime** | Systemd | Docker |
|
||||
| **Target OS** | Any Linux | Any Linux |
|
||||
| **Architecture** | amd64, arm64, i686 | amd64, arm64 |
|
||||
| **Additional software** | None | Traefik |
|
||||
| **Automatic node connection** | ❌ | ✅ |
|
||||
| **Layout management** | ❌ | ✅ |
|
||||
| **Manage buckets & keys** | ❌ | ✅ (basic) |
|
||||
| **Allow custom Garage config** | ✅ | ❌ |
|
||||
| **Facilitate Garage upgrades** | ✅ | ❌ |
|
||||
| **Multiple instances on one host** | ✅ | ✅ |
|
||||
| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | [eddster ansible-role-garage](#eddster-ansible-role-garage) |
|
||||
|------------------------------------|---------------------------------------------|---------------------------------------------------------------|---------------------------------|
|
||||
| **Runtime** | Systemd | Docker | Systemd |
|
||||
| **Target OS** | Any Linux | Any Linux | Any Linux |
|
||||
| **Architecture** | amd64, arm64, i686 | amd64, arm64 | arm64, arm, 386, amd64 |
|
||||
| **Additional software** | None | Traefik | Ngnix and Keepalived (optional) |
|
||||
| **Automatic node connection** | ❌ | ✅ | ✅ |
|
||||
| **Layout management** | ❌ | ✅ | ✅ |
|
||||
| **Manage buckets & keys** | ❌ | ✅ (basic) | ✅ |
|
||||
| **Allow custom Garage config** | ✅ | ❌ | ❌ |
|
||||
| **Facilitate Garage upgrades** | ✅ | ❌ | ✅ |
|
||||
| **Multiple instances on one host** | ✅ | ✅ | ❌ |
|
||||
|
||||
|
||||
## zorun/ansible-role-garage
|
||||
|
|
@ -49,3 +49,15 @@ structured DNS names, etc).
|
|||
|
||||
As a result, this role makes it easier to start with Garage on Ansible,
|
||||
but is less flexible.
|
||||
|
||||
## eddster2309/ansible-role-garage
|
||||
|
||||
[Source code](https://github.com/eddster2309/ansible-role-garage), [Ansible galaxy](https://galaxy.ansible.com/ui/standalone/roles/eddster2309/garage/)
|
||||
|
||||
This role is a opinionated but customisable role using the official Garage
|
||||
static binaries and only requires Systemd. As such it should work on any
|
||||
Linux based host. It includes all the nesscary configuration to
|
||||
automatically setup a clustered Garage deployment. Most Garage
|
||||
configuration options are exposed through Ansible variables so while you
|
||||
can't provide a custom config you can get very close. It can optionally
|
||||
installed a HA nginx deployment with Keepalived.
|
||||
|
|
|
|||
|
|
@ -15,9 +15,10 @@ Alpine Linux repositories (available since v3.17):
|
|||
apk add garage
|
||||
```
|
||||
|
||||
The default configuration file is installed to `/etc/garage.toml`. You can run
|
||||
Garage using: `rc-service garage start`. If you don't specify `rpc_secret`, it
|
||||
will be automatically replaced with a random string on the first start.
|
||||
The default configuration file is installed to `/etc/garage/garage.toml`. You can run
|
||||
Garage using: `rc-service garage start`.
|
||||
|
||||
If you don't specify `rpc_secret`, it will be automatically replaced with a random string on the first start.
|
||||
|
||||
Please note that this package is built without Consul discovery, Kubernetes
|
||||
discovery, OpenTelemetry exporter, and K2V features (K2V will be enabled once
|
||||
|
|
@ -26,7 +27,7 @@ it's stable).
|
|||
|
||||
## Arch Linux
|
||||
|
||||
Garage is available in the [AUR](https://aur.archlinux.org/packages/garage).
|
||||
Garage is available in the official repositories under [extra](https://archlinux.org/packages/extra/x86_64/garage).
|
||||
|
||||
## FreeBSD
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ Firstly clone the repository:
|
|||
|
||||
```bash
|
||||
git clone https://git.deuxfleurs.fr/Deuxfleurs/garage
|
||||
cd garage/scripts/helm
|
||||
cd garage/script/helm
|
||||
```
|
||||
|
||||
Deploy with default options:
|
||||
|
|
@ -26,6 +26,13 @@ Or deploy with custom values:
|
|||
helm install --create-namespace --namespace garage garage ./garage -f values.override.yaml
|
||||
```
|
||||
|
||||
If you want to manage the CustomRessourceDefinition used by garage for its `kubernetes_discovery` outside of the helm chart, add `garage.kubernetesSkipCrd: true` to your custom values and use the kustomization before deploying the helm chart:
|
||||
|
||||
```bash
|
||||
kubectl apply -k ../k8s/crd
|
||||
helm install --create-namespace --namespace garage garage ./garage -f values.override.yaml
|
||||
```
|
||||
|
||||
After deploying, cluster layout must be configured manually as described in [Creating a cluster layout](@/documentation/quick-start/_index.md#creating-a-cluster-layout). Use the following command to access garage CLI:
|
||||
|
||||
```bash
|
||||
|
|
@ -86,3 +93,62 @@ helm delete --namespace garage garage
|
|||
```
|
||||
|
||||
Note that this will leave behind custom CRD `garagenodes.deuxfleurs.fr`, which must be removed manually if desired.
|
||||
|
||||
## Increase PVC size on running Garage instances
|
||||
|
||||
Since the Garage Helm chart creates the data and meta PVC based on `StatefulSet` templates, increasing the PVC size can be a bit tricky.
|
||||
|
||||
### Confirm the `StorageClass` used for Garage supports volume expansion
|
||||
|
||||
Confirm the storage class used for garage.
|
||||
|
||||
```bash
|
||||
kubectl -n garage get pvc
|
||||
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
|
||||
data-garage-0 Bound pvc-080360c9-8ce3-4acf-8579-1701e57b7f3f 30Gi RWO longhorn-local <unset> 77d
|
||||
data-garage-1 Bound pvc-ab8ba697-6030-4fc7-ab3c-0d6df9e3dbc0 30Gi RWO longhorn-local <unset> 5d8h
|
||||
data-garage-2 Bound pvc-3ab37551-0231-4604-986d-136d0fd950ec 30Gi RWO longhorn-local <unset> 5d5h
|
||||
meta-garage-0 Bound pvc-3b457302-3023-4169-846e-c928c5f2ea65 3Gi RWO longhorn-local <unset> 77d
|
||||
meta-garage-1 Bound pvc-49ace2b9-5c85-42df-9247-51c4cf64b460 3Gi RWO longhorn-local <unset> 5d8h
|
||||
meta-garage-2 Bound pvc-99e2e50f-42b4-4128-ae2f-b52629259723 3Gi RWO longhorn-local <unset> 5d5h
|
||||
```
|
||||
|
||||
In this case, the storage class is `longhorn-local`. Now, check if `ALLOWVOLUMEEXPANSION` is true for the used `StorageClass`.
|
||||
|
||||
```bash
|
||||
kubectl get storageclasses.storage.k8s.io longhorn-local
|
||||
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
|
||||
longhorn-local driver.longhorn.io Delete Immediate true 103d
|
||||
```
|
||||
|
||||
If your `StorageClass` does not support volume expansion, double check if you can enable it. Otherwise, your only real option is to spin up a new Garage cluster with increased size and migrate all data over.
|
||||
|
||||
If your `StorageClass` supports expansion, you are free to continue.
|
||||
|
||||
### Increase the size of the PVCs
|
||||
|
||||
Increase the size of all PVCs to your desired size.
|
||||
|
||||
```bash
|
||||
kubectl -n garage edit pvc data-garage-0
|
||||
kubectl -n garage edit pvc data-garage-1
|
||||
kubectl -n garage edit pvc data-garage-2
|
||||
kubectl -n garage edit pvc meta-garage-0
|
||||
kubectl -n garage edit pvc meta-garage-1
|
||||
kubectl -n garage edit pvc meta-garage-2
|
||||
```
|
||||
|
||||
### Increase the size of the `StatefulSet` PVC template
|
||||
|
||||
This is an optional step, but if not done, future instances of Garage will be created with the original size from the template.
|
||||
|
||||
```bash
|
||||
kubectl -n garage delete sts --cascade=orphan garage
|
||||
statefulset.apps "garage" deleted
|
||||
```
|
||||
|
||||
This will remove the Garage `StatefulSet` but leave the pods running. It may seem destructive but needs to be done this way since edits to the size of PVC templates are prohibited.
|
||||
|
||||
### Redeploy the `StatefulSet`
|
||||
|
||||
Now the size of future PVCs can be increased, and the Garage Helm chart can be upgraded. The new `StatefulSet` should take ownership of the orphaned pods again.
|
||||
|
|
|
|||
|
|
@ -96,14 +96,14 @@ to store 2 TB of data in total.
|
|||
## Get a Docker image
|
||||
|
||||
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||
We encourage you to use a fixed tag (eg. `v1.0.1`) and not the `latest` tag.
|
||||
For this example, we will use the latest published version at the time of the writing which is `v1.0.1` but it's up to you
|
||||
We encourage you to use a fixed tag (eg. `v1.3.0`) and not the `latest` tag.
|
||||
For this example, we will use the latest published version at the time of the writing which is `v1.3.0` but it's up to you
|
||||
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
sudo docker pull dxflrs/garage:v1.0.1
|
||||
sudo docker pull dxflrs/garage:v1.3.0
|
||||
```
|
||||
|
||||
## Deploying and configuring Garage
|
||||
|
|
@ -171,7 +171,7 @@ docker run \
|
|||
-v /etc/garage.toml:/etc/garage.toml \
|
||||
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
||||
-v /var/lib/garage/data:/var/lib/garage/data \
|
||||
dxflrs/garage:v1.0.1
|
||||
dxflrs/garage:v1.3.0
|
||||
```
|
||||
|
||||
With this command line, Garage should be started automatically at each boot.
|
||||
|
|
@ -185,7 +185,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
|
|||
version: "3"
|
||||
services:
|
||||
garage:
|
||||
image: dxflrs/garage:v1.0.1
|
||||
image: dxflrs/garage:v1.3.0
|
||||
network_mode: "host"
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ StateDirectory=garage
|
|||
DynamicUser=true
|
||||
ProtectHome=true
|
||||
NoNewPrivileges=true
|
||||
LimitNOFILE=42000
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ The entire procedure would look something like this:
|
|||
|
||||
2. Take each node offline individually to back up its metadata folder, bring them back online once the backup is done.
|
||||
You can do all of the nodes in a single zone at once as that won't impact global cluster availability.
|
||||
Do not try to make a backup of the metadata folder of a running node.
|
||||
Do not try to manually copy the metadata folder of a running node.
|
||||
|
||||
**Since Garage v0.9.4,** you can use the `garage meta snapshot --all` command
|
||||
to take a simultaneous snapshot of the metadata database files of all your
|
||||
|
|
|
|||
|
|
@ -129,10 +129,10 @@ docker run \
|
|||
-d \
|
||||
--name garaged \
|
||||
-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903 \
|
||||
-v /etc/garage.toml:/path/to/garage.toml \
|
||||
-v /var/lib/garage/meta:/path/to/garage/meta \
|
||||
-v /var/lib/garage/data:/path/to/garage/data \
|
||||
dxflrs/garage:v0.9.4
|
||||
-v /path/to/garage.toml:/etc/garage.toml \
|
||||
-v /path/to/garage/meta:/var/lib/garage/meta \
|
||||
-v /path/to/garage/data:/var/lib/garage/data \
|
||||
dxflrs/garage:v1.3.0
|
||||
```
|
||||
|
||||
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
|
||||
|
|
@ -182,11 +182,12 @@ ID Hostname Address Tag Zone Capacit
|
|||
## Creating a cluster layout
|
||||
|
||||
Creating a cluster layout for a Garage deployment means informing Garage
|
||||
of the disk space available on each node of the cluster
|
||||
as well as the zone (e.g. datacenter) each machine is located in.
|
||||
of the disk space available on each node of the cluster, `-c`,
|
||||
as well as the name of the zone (e.g. datacenter), `-z`, each machine is located in.
|
||||
|
||||
For our test deployment, we are using only one node. The way in which we configure
|
||||
it does not matter, you can simply write:
|
||||
For our test deployment, we are have only one node with zone named `dc1` and a
|
||||
capacity of `1G`, though the capacity is ignored for a single node deployment
|
||||
and can be changed later when adding new nodes.
|
||||
|
||||
```bash
|
||||
garage layout assign -z dc1 -c 1G <node_id>
|
||||
|
|
|
|||
|
|
@ -24,7 +24,8 @@ db_engine = "lmdb"
|
|||
|
||||
block_size = "1M"
|
||||
block_ram_buffer_max = "256MiB"
|
||||
|
||||
block_max_concurrent_reads = 16
|
||||
block_max_concurrent_writes_per_request =10
|
||||
lmdb_map_size = "1T"
|
||||
|
||||
compression_level = 1
|
||||
|
|
@ -46,6 +47,7 @@ bootstrap_peers = [
|
|||
"212fd62eeaca72c122b45a7f4fa0f55e012aa5e24ac384a72a3016413fa724ff@[fc00:F::1]:3901",
|
||||
]
|
||||
|
||||
allow_punycode = false
|
||||
|
||||
[consul_discovery]
|
||||
api = "catalog"
|
||||
|
|
@ -92,29 +94,32 @@ The following gives details about each available configuration option.
|
|||
|
||||
[Environment variables](#env_variables).
|
||||
|
||||
Top-level configuration options:
|
||||
Top-level configuration options, in alphabetical order:
|
||||
[`allow_punycode`](#allow_punycode),
|
||||
[`allow_world_readable_secrets`](#allow_world_readable_secrets),
|
||||
[`block_max_concurrent_reads`](`block_max_concurrent_reads),
|
||||
[`block_ram_buffer_max`](#block_ram_buffer_max),
|
||||
[`block_max_concurrent_writes_per_request`](#block_max_concurrent_writes_per_request),
|
||||
[`block_size`](#block_size),
|
||||
[`bootstrap_peers`](#bootstrap_peers),
|
||||
[`compression_level`](#compression_level),
|
||||
[`consistency_mode`](#consistency_mode),
|
||||
[`data_dir`](#data_dir),
|
||||
[`data_fsync`](#data_fsync),
|
||||
[`db_engine`](#db_engine),
|
||||
[`disable_scrub`](#disable_scrub),
|
||||
[`use_local_tz`](#use_local_tz),
|
||||
[`lmdb_map_size`](#lmdb_map_size),
|
||||
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
|
||||
[`metadata_dir`](#metadata_dir),
|
||||
[`metadata_fsync`](#metadata_fsync),
|
||||
[`metadata_snapshots_dir`](#metadata_snapshots_dir),
|
||||
[`replication_factor`](#replication_factor),
|
||||
[`consistency_mode`](#consistency_mode),
|
||||
[`rpc_bind_addr`](#rpc_bind_addr),
|
||||
[`rpc_bind_outgoing`](#rpc_bind_outgoing),
|
||||
[`rpc_public_addr`](#rpc_public_addr),
|
||||
[`rpc_public_addr_subnet`](#rpc_public_addr_subnet)
|
||||
[`rpc_secret`/`rpc_secret_file`](#rpc_secret).
|
||||
[`rpc_secret`/`rpc_secret_file`](#rpc_secret),
|
||||
[`use_local_tz`](#use_local_tz).
|
||||
|
||||
The `[consul_discovery]` section:
|
||||
[`api`](#consul_api),
|
||||
|
|
@ -151,13 +156,17 @@ The `[admin]` section:
|
|||
|
||||
### Environment variables {#env_variables}
|
||||
|
||||
The following configuration parameter must be specified as an environment
|
||||
variable, it does not exist in the configuration file:
|
||||
The following configuration parameters must be specified as environment variables,
|
||||
they do not exist in the configuration file:
|
||||
|
||||
- `GARAGE_LOG_TO_SYSLOG` (since v0.9.4): set this to `1` or `true` to make the
|
||||
- `GARAGE_LOG_TO_SYSLOG` (since `v0.9.4`): set this to `1` or `true` to make the
|
||||
Garage daemon send its logs to `syslog` (using the libc `syslog` function)
|
||||
instead of printing to stderr.
|
||||
|
||||
- `GARAGE_LOG_TO_JOURNALD` (since `v1.2.0`): set this to `1` or `true` to make the
|
||||
Garage daemon send its logs to `journald` (using the native protocol of `systemd-journald`)
|
||||
instead of printing to stderr.
|
||||
|
||||
The following environment variables can be used to override the corresponding
|
||||
values in the configuration file:
|
||||
|
||||
|
|
@ -169,7 +178,7 @@ values in the configuration file:
|
|||
|
||||
### Top-level configuration options
|
||||
|
||||
#### `replication_factor` {#replication_factor}
|
||||
#### `replication_factor` (since `v1.0.0`) {#replication_factor}
|
||||
|
||||
The replication factor can be any positive integer smaller or equal the node count in your cluster.
|
||||
The chosen replication factor has a big impact on the cluster's failure tolerancy and performance characteristics.
|
||||
|
|
@ -217,7 +226,7 @@ is in progress. In theory, no data should be lost as rebalancing is a
|
|||
routine operation for Garage, although we cannot guarantee you that everything
|
||||
will go right in such an extreme scenario.
|
||||
|
||||
#### `consistency_mode` {#consistency_mode}
|
||||
#### `consistency_mode` (since `v1.0.0`) {#consistency_mode}
|
||||
|
||||
The consistency mode setting determines the read and write behaviour of your cluster.
|
||||
|
||||
|
|
@ -300,7 +309,7 @@ data_dir = [
|
|||
See [the dedicated documentation page](@/documentation/operations/multi-hdd.md)
|
||||
on how to operate Garage in such a setup.
|
||||
|
||||
#### `metadata_snapshots_dir` (since Garage `v1.0.2`) {#metadata_snapshots_dir}
|
||||
#### `metadata_snapshots_dir` (since `v1.1.0`) {#metadata_snapshots_dir}
|
||||
|
||||
The directory in which Garage will store metadata snapshots when it
|
||||
performs a snapshot of the metadata database, either when instructed to do
|
||||
|
|
@ -327,6 +336,7 @@ Since `v0.8.0`, Garage can use alternative storage backends as follows:
|
|||
| --------- | ----------------- | ------------- |
|
||||
| [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
|
||||
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
|
||||
| [Fjall](https://github.com/fjall-rs/fjall) (**experimental support** since `v1.3.0`) | `"fjall"` | `<metadata_dir>/db.fjall/` |
|
||||
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
|
||||
|
||||
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
|
||||
|
|
@ -363,6 +373,14 @@ LMDB works very well, but is known to have the following limitations:
|
|||
so it is not the best choice for high-performance storage clusters,
|
||||
but it should work fine in many cases.
|
||||
|
||||
- Fjall: a storage engine based on LSM trees, which theoretically allow for
|
||||
higher write throughput than other storage engines that are based on B-trees.
|
||||
Using Fjall could potentially improve Garage's performance significantly in
|
||||
write-heavy workloads. **Support for Fjall is experimental at this point**,
|
||||
we have added it to Garage for evaluation purposes only. **Do not use it for
|
||||
production-critical workloads.**
|
||||
|
||||
|
||||
It is possible to convert Garage's metadata directory from one format to another
|
||||
using the `garage convert-db` command, which should be used as follows:
|
||||
|
||||
|
|
@ -400,6 +418,7 @@ Here is how this option impacts the different database engines:
|
|||
|----------|------------------------------------|-------------------------------|
|
||||
| Sqlite | `PRAGMA synchronous = OFF` | `PRAGMA synchronous = NORMAL` |
|
||||
| LMDB | `MDB_NOMETASYNC` + `MDB_NOSYNC` | `MDB_NOMETASYNC` |
|
||||
| Fjall | default options | not supported |
|
||||
|
||||
Note that the Sqlite database is always ran in `WAL` mode (`PRAGMA journal_mode = WAL`).
|
||||
|
||||
|
|
@ -416,7 +435,7 @@ at the cost of a moderate drop in write performance.
|
|||
Similarly to `metatada_fsync`, this is likely not necessary
|
||||
if geographical replication is used.
|
||||
|
||||
#### `metadata_auto_snapshot_interval` (since Garage v0.9.4) {#metadata_auto_snapshot_interval}
|
||||
#### `metadata_auto_snapshot_interval` (since `v0.9.4`) {#metadata_auto_snapshot_interval}
|
||||
|
||||
If this value is set, Garage will automatically take a snapshot of the metadata
|
||||
DB file at a regular interval and save it in the metadata directory.
|
||||
|
|
@ -453,7 +472,7 @@ you should delete it from the data directory and then call `garage repair
|
|||
blocks` on the node to ensure that it re-obtains a copy from another node on
|
||||
the network.
|
||||
|
||||
#### `use_local_tz` {#use_local_tz}
|
||||
#### `use_local_tz` (since `v1.1.0`) {#use_local_tz}
|
||||
|
||||
By default, Garage runs the lifecycle worker every day at midnight in UTC. Set the
|
||||
`use_local_tz` configuration value to `true` if you want Garage to run the
|
||||
|
|
@ -475,7 +494,7 @@ files will remain available. This however means that chunks from existing files
|
|||
will not be deduplicated with chunks from newly uploaded files, meaning you
|
||||
might use more storage space that is optimally possible.
|
||||
|
||||
#### `block_ram_buffer_max` (since v0.9.4) {#block_ram_buffer_max}
|
||||
#### `block_ram_buffer_max` (since `v0.9.4`) {#block_ram_buffer_max}
|
||||
|
||||
A limit on the total size of data blocks kept in RAM by S3 API nodes awaiting
|
||||
to be sent to storage nodes asynchronously.
|
||||
|
|
@ -506,6 +525,37 @@ node.
|
|||
|
||||
The default value is 256MiB.
|
||||
|
||||
#### `block_max_concurrent_reads` (since `v1.3.0` / `v2.1.0`) {#block_max_concurrent_reads}
|
||||
|
||||
The maximum number of blocks (individual files in the data directory) open
|
||||
simultaneously for reading.
|
||||
|
||||
Reducing this number does not limit the number of data blocks that can be
|
||||
transferred through the network simultaneously. This mechanism was just added
|
||||
as a backpressure mechanism for HDD read speed: it helps avoid a situation
|
||||
where too many requests are coming in and Garage is reading too many block
|
||||
files simultaneously, thus not making timely progress on any of the reads.
|
||||
|
||||
When a request to read a data block comes in through the network, the requests
|
||||
awaits for one of the `block_max_concurrent_reads` slots to be available
|
||||
(internally implemented using a Semaphore object). Once it acquired a read
|
||||
slot, it reads the entire block file to RAM and frees the slot as soon as the
|
||||
block file is finished reading. Only after the slot is released will the
|
||||
block's data start being transferred over the network. If the request fails to
|
||||
acquire a reading slot wihtin 15 seconds, it fails with a timeout error.
|
||||
Timeout events can be monitored through the `block_read_semaphore_timeouts`
|
||||
metric in Prometheus: a non-zero number of such events indicates an I/O
|
||||
bottleneck on HDD read speed.
|
||||
|
||||
|
||||
#### `block_max_concurrent_writes_per_request` (since `v2.1.0`) {#block_max_concurrent_writes_per_request}
|
||||
|
||||
This parameter is designed to adapt to the concurrent write performance of
|
||||
different storage media.Maximum number of parallel block writes per put request
|
||||
Higher values improve throughput but increase memory usage.
|
||||
|
||||
Default: 3, Recommended: 10-30 for NVMe, 3-10 for HDD
|
||||
|
||||
#### `lmdb_map_size` {#lmdb_map_size}
|
||||
|
||||
This parameters can be used to set the map size used by LMDB,
|
||||
|
|
@ -562,7 +612,7 @@ the node, even in the case of a NAT: the NAT should be configured to forward the
|
|||
port number to the same internal port nubmer. This means that if you have several nodes running
|
||||
behind a NAT, they should each use a different RPC port number.
|
||||
|
||||
#### `rpc_bind_outgoing`(since v0.9.2) {#rpc_bind_outgoing}
|
||||
#### `rpc_bind_outgoing` (since `v0.9.2`) {#rpc_bind_outgoing}
|
||||
|
||||
If enabled, pre-bind all sockets for outgoing connections to the same IP address
|
||||
used for listening (the IP address specified in `rpc_bind_addr`) before
|
||||
|
|
@ -604,7 +654,7 @@ be obtained by running `garage node id` and then included directly in the
|
|||
key will be returned by `garage node id` and you will have to add the IP
|
||||
yourself.
|
||||
|
||||
### `allow_world_readable_secrets` or `GARAGE_ALLOW_WORLD_READABLE_SECRETS` (env) {#allow_world_readable_secrets}
|
||||
#### `allow_world_readable_secrets` or `GARAGE_ALLOW_WORLD_READABLE_SECRETS` (env) {#allow_world_readable_secrets}
|
||||
|
||||
Garage checks the permissions of your secret files to make sure they're not
|
||||
world-readable. In some cases, the check might fail and consider your files as
|
||||
|
|
@ -616,6 +666,13 @@ permission verification.
|
|||
Alternatively, you can set the `GARAGE_ALLOW_WORLD_READABLE_SECRETS`
|
||||
environment variable to `true` to bypass the permissions check.
|
||||
|
||||
#### `allow_punycode` {#allow_punycode}
|
||||
|
||||
Allow creating buckets with names containing punycode. When used for buckets served
|
||||
as websites, this allows using almost any unicode character in the domain name.
|
||||
|
||||
Default to `false`.
|
||||
|
||||
### The `[consul_discovery]` section
|
||||
|
||||
Garage supports discovering other nodes of the cluster using Consul. For this
|
||||
|
|
|
|||
|
|
@ -23,17 +23,17 @@ Feel free to open a PR to suggest fixes this table. Minio is missing because the
|
|||
- 2022-05-25 - Many Ceph S3 endpoints are not documented but implemented. Following a notification from the Ceph community, we added them.
|
||||
|
||||
|
||||
|
||||
## High-level features
|
||||
|
||||
| Feature | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
||||
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
||||
| [signature v2](https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html) (deprecated) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
||||
| [signature v2](https://docs.aws.amazon.com/AmazonS3/latest/API/Appendix-Sigv2.html) (deprecated) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
||||
| [signature v4](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) | ✅ Implemented | ✅ | ✅ | ❌ | ✅ |
|
||||
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
|
||||
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
|
||||
| [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) |
|
||||
| [SSE-C encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) | ✅ Implemented | ❓ | ✅ | ❌ | ✅ |
|
||||
| [Bucket versioning](https://docs.aws.amazon.com/AmazonS3/latest/userguide/Versioning.html) | ❌ Missing | ✅ | ✅ | ❌ | ✅ |
|
||||
|
||||
*Note:* OpenIO does not says if it supports presigned URLs. Because it is part
|
||||
of signature v4 and they claim they support it without additional precisions,
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ Example response body:
|
|||
```json
|
||||
{
|
||||
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||
"garageVersion": "v1.0.1",
|
||||
"garageVersion": "v1.3.0",
|
||||
"garageFeatures": [
|
||||
"k2v",
|
||||
"lmdb",
|
||||
|
|
|
|||
16
flake.lock
generated
16
flake.lock
generated
|
|
@ -50,17 +50,17 @@
|
|||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1736692550,
|
||||
"narHash": "sha256-7tk8xH+g0sJkKLTJFOxphJxxOjMDFMWv24nXslaU2ro=",
|
||||
"lastModified": 1763977559,
|
||||
"narHash": "sha256-g4MKqsIRy5yJwEsI+fYODqLUnAqIY4kZai0nldAP6EM=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
|
||||
"rev": "cfe2c7d5b5d3032862254e68c37a6576b633d632",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
|
||||
"rev": "cfe2c7d5b5d3032862254e68c37a6576b633d632",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
|
|
@ -80,17 +80,17 @@
|
|||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1738549608,
|
||||
"narHash": "sha256-GdyT9QEUSx5k/n8kILuNy83vxxdyUfJ8jL5mMpQZWfw=",
|
||||
"lastModified": 1763952169,
|
||||
"narHash": "sha256-+PeDBD8P+NKauH+w7eO/QWCIp8Cx4mCfWnh9sJmy9CM=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
|
||||
"rev": "ab726555a9a72e6dc80649809147823a813fa95b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
|
||||
"rev": "ab726555a9a72e6dc80649809147823a813fa95b",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
|
|
|
|||
19
flake.nix
19
flake.nix
|
|
@ -2,13 +2,13 @@
|
|||
description =
|
||||
"Garage, an S3-compatible distributed object store for self-hosted deployments";
|
||||
|
||||
# Nixpkgs 24.11 as of 2025-01-12
|
||||
# Nixpkgs 25.05 as of 2025-11-24
|
||||
inputs.nixpkgs.url =
|
||||
"github:NixOS/nixpkgs/7c4869c47090dd7f9f1bdfb49a22aea026996815";
|
||||
"github:NixOS/nixpkgs/cfe2c7d5b5d3032862254e68c37a6576b633d632";
|
||||
|
||||
# Rust overlay as of 2025-02-03
|
||||
# Rust overlay as of 2025-11-24
|
||||
inputs.rust-overlay.url =
|
||||
"github:oxalica/rust-overlay/35c6f8c4352f995ecd53896200769f80a3e8f22d";
|
||||
"github:oxalica/rust-overlay/ab726555a9a72e6dc80649809147823a813fa95b";
|
||||
inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
inputs.crane.url = "github:ipetkov/crane";
|
||||
|
|
@ -30,6 +30,10 @@
|
|||
inherit system nixpkgs crane rust-overlay extraTestEnv;
|
||||
release = false;
|
||||
}).garage-test;
|
||||
lints = (compile {
|
||||
inherit system nixpkgs crane rust-overlay;
|
||||
release = false;
|
||||
});
|
||||
in
|
||||
{
|
||||
packages = {
|
||||
|
|
@ -53,6 +57,13 @@
|
|||
tests-sqlite = testWith {
|
||||
GARAGE_TEST_INTEGRATION_DB_ENGINE = "sqlite";
|
||||
};
|
||||
tests-fjall = testWith {
|
||||
GARAGE_TEST_INTEGRATION_DB_ENGINE = "fjall";
|
||||
};
|
||||
|
||||
# lints (fmt, clippy)
|
||||
fmt = lints.garage-cargo-fmt;
|
||||
clippy = lints.garage-cargo-clippy;
|
||||
};
|
||||
|
||||
# ---- developpment shell, for making native builds only ----
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ let
|
|||
|
||||
inherit (pkgs) lib stdenv;
|
||||
|
||||
toolchainFn = (p: p.rust-bin.stable."1.82.0".default.override {
|
||||
toolchainFn = (p: p.rust-bin.stable."1.91.0".default.override {
|
||||
targets = lib.optionals (target != null) [ rustTarget ];
|
||||
extensions = [
|
||||
"rust-src"
|
||||
|
|
@ -68,12 +68,13 @@ let
|
|||
rootFeatures = if features != null then
|
||||
features
|
||||
else
|
||||
([ "bundled-libs" "lmdb" "sqlite" "k2v" ] ++ (lib.optionals release [
|
||||
([ "bundled-libs" "lmdb" "sqlite" "fjall" "k2v" ] ++ (lib.optionals release [
|
||||
"consul-discovery"
|
||||
"kubernetes-discovery"
|
||||
"metrics"
|
||||
"telemetry-otlp"
|
||||
"syslog"
|
||||
"journald"
|
||||
]));
|
||||
|
||||
featuresStr = lib.concatStringsSep "," rootFeatures;
|
||||
|
|
@ -189,4 +190,15 @@ in rec {
|
|||
pkgs.cacert
|
||||
];
|
||||
} // extraTestEnv);
|
||||
|
||||
# ---- source code linting ----
|
||||
|
||||
garage-cargo-fmt = craneLib.cargoFmt (commonArgs // {
|
||||
cargoExtraArgs = "";
|
||||
});
|
||||
|
||||
garage-cargo-clippy = craneLib.cargoClippy (commonArgs // {
|
||||
cargoArtifacts = garage-deps;
|
||||
cargoClippyExtraArgs = "--all-targets -- -D warnings";
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
|
|||
FANCYCOLORS=("41m" "42m" "44m" "45m" "100m" "104m")
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
export RUST_LOG=garage=info,garage_api=debug
|
||||
export RUST_LOG=garage=info,garage_api_common=debug,garage_api_s3=debug
|
||||
MAIN_LABEL="\e[${FANCYCOLORS[0]}[main]\e[49m"
|
||||
|
||||
if [ -z "$GARAGE_BIN" ]; then
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
|
||||
export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
|
||||
export AWS_DEFAULT_REGION='garage'
|
||||
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
|
||||
# FUTUREWORK: set AWS_ENDPOINT_URL instead, once nixpkgs bumps awscli to >=2.13.0.
|
||||
function aws { command aws --endpoint-url http://127.0.0.1:3911 $@ ; }
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
# Garage helm3 chart
|
||||
|
||||
Documentation is located [here](/doc/book/cookbook/kubernetes.md).
|
||||
Documentation is located [here](https://garagehq.deuxfleurs.fr/documentation/cookbook/kubernetes/).
|
||||
|
|
|
|||
|
|
@ -1,24 +1,18 @@
|
|||
apiVersion: v2
|
||||
name: garage
|
||||
description: S3-compatible object store for small self-hosted geo-distributed deployments
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
version: 0.7.3
|
||||
appVersion: "v1.3.1"
|
||||
home: https://garagehq.deuxfleurs.fr/
|
||||
icon: https://garagehq.deuxfleurs.fr/images/garage-logo.svg
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.0
|
||||
keywords:
|
||||
- geo-distributed
|
||||
- read-after-write-consistency
|
||||
- s3-compatible
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "v1.0.1"
|
||||
sources:
|
||||
- https://git.deuxfleurs.fr/Deuxfleurs/garage.git
|
||||
|
||||
maintainers: []
|
||||
|
|
|
|||
|
|
@ -1,9 +1,15 @@
|
|||
# garage
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
S3-compatible object store for small self-hosted geo-distributed deployments
|
||||
|
||||
**Homepage:** <https://garagehq.deuxfleurs.fr/>
|
||||
|
||||
## Source Code
|
||||
|
||||
* <https://git.deuxfleurs.fr/Deuxfleurs/garage.git>
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|
|
@ -23,6 +29,7 @@ S3-compatible object store for small self-hosted geo-distributed deployments
|
|||
| garage.existingConfigMap | string | `""` | if not empty string, allow using an existing ConfigMap for the garage.toml, if set, ignores garage.toml |
|
||||
| garage.garageTomlString | string | `""` | String Template for the garage configuration if set, ignores above values. Values can be templated, see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/ |
|
||||
| garage.kubernetesSkipCrd | bool | `false` | Set to true if you want to use k8s discovery but install the CRDs manually outside of the helm chart, for example if you operate at namespace level without cluster ressources |
|
||||
| garage.metadataAutoSnapshotInterval | string | `""` | If this value is set, Garage will automatically take a snapshot of the metadata DB file at a regular interval and save it in the metadata directory. https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#metadata_auto_snapshot_interval |
|
||||
| garage.replicationMode | string | `"3"` | Default to 3 replicas, see the replication_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode |
|
||||
| garage.rpcBindAddr | string | `"[::]:3901"` | |
|
||||
| garage.rpcSecret | string | `""` | If not given, a random secret will be generated and stored in a Secret object |
|
||||
|
|
@ -49,6 +56,7 @@ S3-compatible object store for small self-hosted geo-distributed deployments
|
|||
| initImage.pullPolicy | string | `"IfNotPresent"` | |
|
||||
| initImage.repository | string | `"busybox"` | |
|
||||
| initImage.tag | string | `"stable"` | |
|
||||
| livenessProbe | object | `{}` | Specifies a livenessProbe |
|
||||
| monitoring.metrics.enabled | bool | `false` | If true, a service for monitoring is created with a prometheus.io/scrape annotation |
|
||||
| monitoring.metrics.serviceMonitor.enabled | bool | `false` | If true, a ServiceMonitor CRD is created for a prometheus operator https://github.com/coreos/prometheus-operator |
|
||||
| monitoring.metrics.serviceMonitor.interval | string | `"15s"` | |
|
||||
|
|
@ -71,6 +79,7 @@ S3-compatible object store for small self-hosted geo-distributed deployments
|
|||
| podSecurityContext.runAsGroup | int | `1000` | |
|
||||
| podSecurityContext.runAsNonRoot | bool | `true` | |
|
||||
| podSecurityContext.runAsUser | int | `1000` | |
|
||||
| readinessProbe | object | `{}` | Specifies a readinessProbe |
|
||||
| resources | object | `{}` | |
|
||||
| securityContext.capabilities | object | `{"drop":["ALL"]}` | The default security context is heavily restricted, feel free to tune it to your requirements |
|
||||
| securityContext.readOnlyRootFilesystem | bool | `true` | |
|
||||
|
|
|
|||
|
|
@ -19,6 +19,10 @@ data:
|
|||
|
||||
compression_level = {{ .Values.garage.compressionLevel }}
|
||||
|
||||
{{- if .Values.garage.metadataAutoSnapshotInterval }}
|
||||
metadata_auto_snapshot_interval = {{ .Values.garage.metadataAutoSnapshotInterval | quote }}
|
||||
{{- end }}
|
||||
|
||||
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
|
||||
# rpc_secret will be populated by the init container from a k8s secret object
|
||||
rpc_secret = "__RPC_SECRET_REPLACE__"
|
||||
|
|
|
|||
22
script/helm/garage/templates/service-headless.yaml
Normal file
22
script/helm/garage/templates/service-headless.yaml
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
{{- if eq .Values.deployment.kind "StatefulSet" -}}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "garage.fullname" . }}-headless
|
||||
labels:
|
||||
{{- include "garage.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ports:
|
||||
- port: {{ .Values.service.s3.api.port }}
|
||||
targetPort: 3900
|
||||
protocol: TCP
|
||||
name: s3-api
|
||||
- port: {{ .Values.service.s3.web.port }}
|
||||
targetPort: 3902
|
||||
protocol: TCP
|
||||
name: s3-web
|
||||
selector:
|
||||
{{- include "garage.selectorLabels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
|
@ -4,6 +4,10 @@ metadata:
|
|||
name: {{ include "garage.fullname" . }}
|
||||
labels:
|
||||
{{- include "garage.labels" . | nindent 4 }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
|
|
@ -37,4 +41,4 @@ spec:
|
|||
name: metrics
|
||||
selector:
|
||||
{{- include "garage.selectorLabels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
|||
|
|
@ -10,12 +10,11 @@ spec:
|
|||
{{- include "garage.selectorLabels" . | nindent 6 }}
|
||||
{{- if eq .Values.deployment.kind "StatefulSet" }}
|
||||
replicas: {{ .Values.deployment.replicaCount }}
|
||||
serviceName: {{ include "garage.fullname" . }}
|
||||
serviceName: {{ include "garage.fullname" . }}-headless
|
||||
podManagementPolicy: {{ .Values.deployment.podManagementPolicy }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
|
|
@ -79,15 +78,14 @@ spec:
|
|||
{{- with .Values.extraVolumeMounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
# TODO
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: 3900
|
||||
# readinessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: 3900
|
||||
{{- with .Values.livenessProbe }}
|
||||
livenessProbe:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.readinessProbe }}
|
||||
readinessProbe:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -21,6 +21,10 @@ garage:
|
|||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level
|
||||
compressionLevel: "1"
|
||||
|
||||
# -- If this value is set, Garage will automatically take a snapshot of the metadata DB file at a regular interval and save it in the metadata directory.
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#metadata_auto_snapshot_interval
|
||||
metadataAutoSnapshotInterval: ""
|
||||
|
||||
rpcBindAddr: "[::]:3901"
|
||||
# -- If not given, a random secret will be generated and stored in a Secret object
|
||||
rpcSecret: ""
|
||||
|
|
@ -120,6 +124,8 @@ service:
|
|||
# - NodePort (+ Ingress)
|
||||
# - LoadBalancer
|
||||
type: ClusterIP
|
||||
# -- Annotations to add to the service
|
||||
annotations: {}
|
||||
s3:
|
||||
api:
|
||||
port: 3900
|
||||
|
|
@ -191,6 +197,21 @@ resources: {}
|
|||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
# -- Specifies a livenessProbe
|
||||
livenessProbe: {}
|
||||
#httpGet:
|
||||
# path: /health
|
||||
# port: 3903
|
||||
#initialDelaySeconds: 5
|
||||
#periodSeconds: 30
|
||||
# -- Specifies a readinessProbe
|
||||
readinessProbe: {}
|
||||
#httpGet:
|
||||
# path: /health
|
||||
# port: 3903
|
||||
#initialDelaySeconds: 5
|
||||
#periodSeconds: 30
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
|
|
|||
43
script/k8s/crd/garagenodes.deuxfleurs.fr.yaml
Normal file
43
script/k8s/crd/garagenodes.deuxfleurs.fr.yaml
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: garagenodes.deuxfleurs.fr
|
||||
spec:
|
||||
conversion:
|
||||
strategy: None
|
||||
group: deuxfleurs.fr
|
||||
names:
|
||||
kind: GarageNode
|
||||
listKind: GarageNodeList
|
||||
plural: garagenodes
|
||||
singular: garagenode
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Auto-generated derived type for Node via `CustomResource`
|
||||
properties:
|
||||
spec:
|
||||
properties:
|
||||
address:
|
||||
format: ip
|
||||
type: string
|
||||
hostname:
|
||||
type: string
|
||||
port:
|
||||
format: uint16
|
||||
minimum: 0
|
||||
type: integer
|
||||
required:
|
||||
- address
|
||||
- hostname
|
||||
- port
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
title: GarageNode
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources: {}
|
||||
5
script/k8s/crd/kustomization.yaml
Normal file
5
script/k8s/crd/kustomization.yaml
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- garagenodes.deuxfleurs.fr.yaml
|
||||
|
|
@ -34,6 +34,8 @@ in
|
|||
jq
|
||||
];
|
||||
shellHook = ''
|
||||
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
|
||||
|
||||
function to_s3 {
|
||||
aws \
|
||||
--endpoint-url https://garage.deuxfleurs.fr \
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
[package]
|
||||
name = "garage_api_admin"
|
||||
version = "1.0.1"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
description = "Admin API server crate for the Garage object store"
|
||||
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
||||
readme = "../../README.md"
|
||||
readme = "../../../README.md"
|
||||
|
||||
[lib]
|
||||
path = "lib.rs"
|
||||
|
|
@ -22,7 +22,7 @@ garage_api_common.workspace = true
|
|||
|
||||
argon2.workspace = true
|
||||
async-trait.workspace = true
|
||||
err-derive.workspace = true
|
||||
thiserror.workspace = true
|
||||
hex.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
|
|
|
|||
|
|
@ -277,7 +277,7 @@ pub async fn handle_create_bucket(
|
|||
let helper = garage.locked_helper().await;
|
||||
|
||||
if let Some(ga) = &req.global_alias {
|
||||
if !is_valid_bucket_name(ga) {
|
||||
if !is_valid_bucket_name(ga, garage.config.allow_punycode) {
|
||||
return Err(Error::bad_request(format!(
|
||||
"{}: {}",
|
||||
ga, INVALID_BUCKET_NAME_MESSAGE
|
||||
|
|
@ -292,7 +292,7 @@ pub async fn handle_create_bucket(
|
|||
}
|
||||
|
||||
if let Some(la) = &req.local_alias {
|
||||
if !is_valid_bucket_name(&la.alias) {
|
||||
if !is_valid_bucket_name(&la.alias, garage.config.allow_punycode) {
|
||||
return Err(Error::bad_request(format!(
|
||||
"{}: {}",
|
||||
la.alias, INVALID_BUCKET_NAME_MESSAGE
|
||||
|
|
@ -382,7 +382,7 @@ pub async fn handle_delete_bucket(
|
|||
for ((key_id, alias), _, active) in state.local_aliases.items().iter() {
|
||||
if *active {
|
||||
helper
|
||||
.unset_local_bucket_alias(bucket.id, key_id, alias)
|
||||
.purge_local_bucket_alias(bucket.id, key_id, alias)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
use std::convert::TryFrom;
|
||||
|
||||
use err_derive::Error;
|
||||
use hyper::header::HeaderValue;
|
||||
use hyper::{HeaderMap, StatusCode};
|
||||
use thiserror::Error;
|
||||
|
||||
pub use garage_model::helper::error::Error as HelperError;
|
||||
|
||||
|
|
@ -16,20 +16,17 @@ use garage_api_common::helpers::*;
|
|||
/// Errors of this crate
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error(display = "{}", _0)]
|
||||
#[error("{0}")]
|
||||
/// Error from common error
|
||||
Common(#[error(source)] CommonError),
|
||||
Common(#[from] CommonError),
|
||||
|
||||
// Category: cannot process
|
||||
/// The API access key does not exist
|
||||
#[error(display = "Access key not found: {}", _0)]
|
||||
#[error("Access key not found: {0}")]
|
||||
NoSuchAccessKey(String),
|
||||
|
||||
/// In Import key, the key already exists
|
||||
#[error(
|
||||
display = "Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.",
|
||||
_0
|
||||
)]
|
||||
#[error("Key {0} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.")]
|
||||
KeyAlreadyExists(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
[package]
|
||||
name = "garage_api_common"
|
||||
version = "1.0.1"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
description = "Common functions for the API server crates for the Garage object store"
|
||||
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
||||
readme = "../../README.md"
|
||||
readme = "../../../README.md"
|
||||
|
||||
[lib]
|
||||
path = "lib.rs"
|
||||
|
|
@ -18,16 +18,20 @@ garage_model.workspace = true
|
|||
garage_table.workspace = true
|
||||
garage_util.workspace = true
|
||||
|
||||
base64.workspace = true
|
||||
bytes.workspace = true
|
||||
chrono.workspace = true
|
||||
crc32fast.workspace = true
|
||||
crc32c.workspace = true
|
||||
crypto-common.workspace = true
|
||||
err-derive.workspace = true
|
||||
thiserror.workspace = true
|
||||
hex.workspace = true
|
||||
hmac.workspace = true
|
||||
idna.workspace = true
|
||||
md-5.workspace = true
|
||||
tracing.workspace = true
|
||||
nom.workspace = true
|
||||
pin-project.workspace = true
|
||||
sha1.workspace = true
|
||||
sha2.workspace = true
|
||||
|
||||
futures.workspace = true
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
use std::convert::TryFrom;
|
||||
|
||||
use err_derive::Error;
|
||||
use hyper::StatusCode;
|
||||
use thiserror::Error;
|
||||
|
||||
use garage_util::error::Error as GarageError;
|
||||
|
||||
|
|
@ -12,48 +12,48 @@ use garage_model::helper::error::Error as HelperError;
|
|||
pub enum CommonError {
|
||||
// ---- INTERNAL ERRORS ----
|
||||
/// Error related to deeper parts of Garage
|
||||
#[error(display = "Internal error: {}", _0)]
|
||||
InternalError(#[error(source)] GarageError),
|
||||
#[error("Internal error: {0}")]
|
||||
InternalError(#[from] GarageError),
|
||||
|
||||
/// Error related to Hyper
|
||||
#[error(display = "Internal error (Hyper error): {}", _0)]
|
||||
Hyper(#[error(source)] hyper::Error),
|
||||
#[error("Internal error (Hyper error): {0}")]
|
||||
Hyper(#[from] hyper::Error),
|
||||
|
||||
/// Error related to HTTP
|
||||
#[error(display = "Internal error (HTTP error): {}", _0)]
|
||||
Http(#[error(source)] http::Error),
|
||||
#[error("Internal error (HTTP error): {0}")]
|
||||
Http(#[from] http::Error),
|
||||
|
||||
// ---- GENERIC CLIENT ERRORS ----
|
||||
/// Proper authentication was not provided
|
||||
#[error(display = "Forbidden: {}", _0)]
|
||||
#[error("Forbidden: {0}")]
|
||||
Forbidden(String),
|
||||
|
||||
/// Generic bad request response with custom message
|
||||
#[error(display = "Bad request: {}", _0)]
|
||||
#[error("Bad request: {0}")]
|
||||
BadRequest(String),
|
||||
|
||||
/// The client sent a header with invalid value
|
||||
#[error(display = "Invalid header value: {}", _0)]
|
||||
InvalidHeader(#[error(source)] hyper::header::ToStrError),
|
||||
#[error("Invalid header value: {0}")]
|
||||
InvalidHeader(#[from] hyper::header::ToStrError),
|
||||
|
||||
// ---- SPECIFIC ERROR CONDITIONS ----
|
||||
// These have to be error codes referenced in the S3 spec here:
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
|
||||
/// The bucket requested don't exists
|
||||
#[error(display = "Bucket not found: {}", _0)]
|
||||
#[error("Bucket not found: {0}")]
|
||||
NoSuchBucket(String),
|
||||
|
||||
/// Tried to create a bucket that already exist
|
||||
#[error(display = "Bucket already exists")]
|
||||
#[error("Bucket already exists")]
|
||||
BucketAlreadyExists,
|
||||
|
||||
/// Tried to delete a non-empty bucket
|
||||
#[error(display = "Tried to delete a non-empty bucket")]
|
||||
#[error("Tried to delete a non-empty bucket")]
|
||||
BucketNotEmpty,
|
||||
|
||||
// Category: bad request
|
||||
/// Bucket name is not valid according to AWS S3 specs
|
||||
#[error(display = "Invalid bucket name: {}", _0)]
|
||||
#[error("Invalid bucket name: {0}")]
|
||||
InvalidBucketName(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -14,9 +14,9 @@ use crate::common_error::{
|
|||
};
|
||||
use crate::helpers::*;
|
||||
|
||||
pub fn find_matching_cors_rule<'a>(
|
||||
pub fn find_matching_cors_rule<'a, B>(
|
||||
bucket_params: &'a BucketParams,
|
||||
req: &Request<impl Body>,
|
||||
req: &Request<B>,
|
||||
) -> Result<Option<&'a GarageCorsRule>, CommonError> {
|
||||
if let Some(cors_config) = bucket_params.cors_config.get() {
|
||||
if let Some(origin) = req.headers().get("Origin") {
|
||||
|
|
@ -132,8 +132,8 @@ pub async fn handle_options_api(
|
|||
}
|
||||
}
|
||||
|
||||
pub fn handle_options_for_bucket(
|
||||
req: &Request<IncomingBody>,
|
||||
pub fn handle_options_for_bucket<B>(
|
||||
req: &Request<B>,
|
||||
bucket_params: &BucketParams,
|
||||
) -> Result<Response<EmptyBody>, CommonError> {
|
||||
let origin = req
|
||||
|
|
|
|||
|
|
@ -58,6 +58,12 @@ pub trait ApiHandler: Send + Sync + 'static {
|
|||
req: Request<IncomingBody>,
|
||||
endpoint: Self::Endpoint,
|
||||
) -> impl Future<Output = Result<Response<BoxBody<Self::Error>>, Self::Error>> + Send;
|
||||
|
||||
/// Returns the key id used to authenticate this request. The ID returned must be safe to
|
||||
/// log.
|
||||
fn key_id_from_request(&self, _req: &Request<IncomingBody>) -> Option<String> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ApiServer<A: ApiHandler> {
|
||||
|
|
@ -142,19 +148,20 @@ impl<A: ApiHandler> ApiServer<A> {
|
|||
) -> Result<Response<BoxBody<A::Error>>, http::Error> {
|
||||
let uri = req.uri().clone();
|
||||
|
||||
if let Ok(forwarded_for_ip_addr) =
|
||||
let source = if let Ok(forwarded_for_ip_addr) =
|
||||
forwarded_headers::handle_forwarded_for_headers(req.headers())
|
||||
{
|
||||
info!(
|
||||
"{} (via {}) {} {}",
|
||||
forwarded_for_ip_addr,
|
||||
addr,
|
||||
req.method(),
|
||||
uri
|
||||
);
|
||||
format!("{forwarded_for_ip_addr} (via {addr})")
|
||||
} else {
|
||||
info!("{} {} {}", addr, req.method(), uri);
|
||||
}
|
||||
format!("{addr}")
|
||||
};
|
||||
// we only do this to log the access key, so we can discard any error
|
||||
let key = self
|
||||
.api_handler
|
||||
.key_id_from_request(&req)
|
||||
.map(|k| format!("(key {k}) "))
|
||||
.unwrap_or_default();
|
||||
info!("{source} {key}{} {uri}", req.method());
|
||||
debug!("{:?}", req);
|
||||
|
||||
let tracer = opentelemetry::global::tracer("garage");
|
||||
|
|
@ -343,7 +350,11 @@ where
|
|||
|
||||
while !*must_exit.borrow() {
|
||||
let (stream, client_addr) = tokio::select! {
|
||||
acc = listener.accept() => acc?,
|
||||
acc = listener.accept() => match acc {
|
||||
Ok(r) => r,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::ConnectionAborted => continue,
|
||||
Err(e) => return Err(e.into()),
|
||||
},
|
||||
_ = must_exit.changed() => continue,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ use hyper::{
|
|||
body::{Body, Bytes},
|
||||
Request, Response,
|
||||
};
|
||||
use idna::domain_to_unicode;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_model::bucket_table::BucketParams;
|
||||
|
|
@ -97,7 +96,7 @@ pub fn authority_to_host(authority: &str) -> Result<String, Error> {
|
|||
authority
|
||||
))),
|
||||
};
|
||||
authority.map(|h| domain_to_unicode(h).0)
|
||||
authority.map(|h| h.to_ascii_lowercase())
|
||||
}
|
||||
|
||||
/// Extract the bucket name and the key name from an HTTP path and possibly a bucket provided in
|
||||
|
|
|
|||
135
src/api/common/signature/body.rs
Normal file
135
src/api/common/signature/body.rs
Normal file
|
|
@ -0,0 +1,135 @@
|
|||
use std::sync::Mutex;
|
||||
|
||||
use futures::prelude::*;
|
||||
use futures::stream::BoxStream;
|
||||
use http_body_util::{BodyExt, StreamBody};
|
||||
use hyper::body::{Bytes, Frame};
|
||||
use serde::Deserialize;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::task;
|
||||
|
||||
use super::*;
|
||||
|
||||
use crate::signature::checksum::*;
|
||||
|
||||
pub struct ReqBody {
|
||||
// why need mutex to be sync??
|
||||
pub(crate) stream: Mutex<BoxStream<'static, Result<Frame<Bytes>, Error>>>,
|
||||
pub(crate) checksummer: Checksummer,
|
||||
pub(crate) expected_checksums: ExpectedChecksums,
|
||||
pub(crate) trailer_algorithm: Option<ChecksumAlgorithm>,
|
||||
}
|
||||
|
||||
pub type StreamingChecksumReceiver = task::JoinHandle<Result<Checksums, Error>>;
|
||||
|
||||
impl ReqBody {
|
||||
pub fn add_expected_checksums(&mut self, more: ExpectedChecksums) {
|
||||
if more.md5.is_some() {
|
||||
self.expected_checksums.md5 = more.md5;
|
||||
}
|
||||
if more.sha256.is_some() {
|
||||
self.expected_checksums.sha256 = more.sha256;
|
||||
}
|
||||
if more.extra.is_some() {
|
||||
self.expected_checksums.extra = more.extra;
|
||||
}
|
||||
self.checksummer.add_expected(&self.expected_checksums);
|
||||
}
|
||||
|
||||
pub fn add_md5(&mut self) {
|
||||
self.checksummer.add_md5();
|
||||
}
|
||||
|
||||
// ============ non-streaming =============
|
||||
|
||||
pub async fn json<T: for<'a> Deserialize<'a>>(self) -> Result<T, Error> {
|
||||
let body = self.collect().await?;
|
||||
let resp: T = serde_json::from_slice(&body).ok_or_bad_request("Invalid JSON")?;
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
pub async fn collect(self) -> Result<Bytes, Error> {
|
||||
self.collect_with_checksums().await.map(|(b, _)| b)
|
||||
}
|
||||
|
||||
pub async fn collect_with_checksums(mut self) -> Result<(Bytes, Checksums), Error> {
|
||||
let stream: BoxStream<_> = self.stream.into_inner().unwrap();
|
||||
let bytes = BodyExt::collect(StreamBody::new(stream)).await?.to_bytes();
|
||||
|
||||
self.checksummer.update(&bytes);
|
||||
let checksums = self.checksummer.finalize();
|
||||
checksums.verify(&self.expected_checksums)?;
|
||||
|
||||
Ok((bytes, checksums))
|
||||
}
|
||||
|
||||
// ============ streaming =============
|
||||
|
||||
pub fn streaming_with_checksums(
|
||||
self,
|
||||
) -> (
|
||||
BoxStream<'static, Result<Bytes, Error>>,
|
||||
StreamingChecksumReceiver,
|
||||
) {
|
||||
let Self {
|
||||
stream,
|
||||
mut checksummer,
|
||||
mut expected_checksums,
|
||||
trailer_algorithm,
|
||||
} = self;
|
||||
|
||||
let (frame_tx, mut frame_rx) = mpsc::channel::<Frame<Bytes>>(5);
|
||||
|
||||
let join_checksums = tokio::spawn(async move {
|
||||
while let Some(frame) = frame_rx.recv().await {
|
||||
match frame.into_data() {
|
||||
Ok(data) => {
|
||||
checksummer = tokio::task::spawn_blocking(move || {
|
||||
checksummer.update(&data);
|
||||
checksummer
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
Err(frame) => {
|
||||
let trailers = frame.into_trailers().unwrap();
|
||||
let algo = trailer_algorithm.unwrap();
|
||||
expected_checksums.extra = Some(extract_checksum_value(&trailers, algo)?);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if trailer_algorithm.is_some() && expected_checksums.extra.is_none() {
|
||||
return Err(Error::bad_request("trailing checksum was not sent"));
|
||||
}
|
||||
|
||||
let checksums = checksummer.finalize();
|
||||
checksums.verify(&expected_checksums)?;
|
||||
|
||||
Ok(checksums)
|
||||
});
|
||||
|
||||
let stream: BoxStream<_> = stream.into_inner().unwrap();
|
||||
let stream = stream.filter_map(move |x| {
|
||||
let frame_tx = frame_tx.clone();
|
||||
async move {
|
||||
match x {
|
||||
Err(e) => Some(Err(e)),
|
||||
Ok(frame) => {
|
||||
if frame.is_data() {
|
||||
let data = frame.data_ref().unwrap().clone();
|
||||
let _ = frame_tx.send(frame).await;
|
||||
Some(Ok(data))
|
||||
} else {
|
||||
let _ = frame_tx.send(frame).await;
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
(stream.boxed(), join_checksums)
|
||||
}
|
||||
}
|
||||
|
|
@ -11,11 +11,12 @@ use sha2::Sha256;
|
|||
use http::{HeaderMap, HeaderName, HeaderValue};
|
||||
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::OkOrMessage;
|
||||
|
||||
use garage_model::s3::object_table::*;
|
||||
use super::*;
|
||||
|
||||
use crate::error::*;
|
||||
pub use garage_model::s3::object_table::{ChecksumAlgorithm, ChecksumValue};
|
||||
|
||||
pub const CONTENT_MD5: HeaderName = HeaderName::from_static("content-md5");
|
||||
|
||||
pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName =
|
||||
HeaderName::from_static("x-amz-checksum-algorithm");
|
||||
|
|
@ -31,8 +32,8 @@ pub type Md5Checksum = [u8; 16];
|
|||
pub type Sha1Checksum = [u8; 20];
|
||||
pub type Sha256Checksum = [u8; 32];
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct ExpectedChecksums {
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct ExpectedChecksums {
|
||||
// base64-encoded md5 (content-md5 header)
|
||||
pub md5: Option<String>,
|
||||
// content_sha256 (as a Hash / FixedBytes32)
|
||||
|
|
@ -41,7 +42,7 @@ pub(crate) struct ExpectedChecksums {
|
|||
pub extra: Option<ChecksumValue>,
|
||||
}
|
||||
|
||||
pub(crate) struct Checksummer {
|
||||
pub struct Checksummer {
|
||||
pub crc32: Option<Crc32>,
|
||||
pub crc32c: Option<Crc32c>,
|
||||
pub md5: Option<Md5>,
|
||||
|
|
@ -50,7 +51,7 @@ pub(crate) struct Checksummer {
|
|||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct Checksums {
|
||||
pub struct Checksums {
|
||||
pub crc32: Option<Crc32Checksum>,
|
||||
pub crc32c: Option<Crc32cChecksum>,
|
||||
pub md5: Option<Md5Checksum>,
|
||||
|
|
@ -59,34 +60,48 @@ pub(crate) struct Checksums {
|
|||
}
|
||||
|
||||
impl Checksummer {
|
||||
pub(crate) fn init(expected: &ExpectedChecksums, require_md5: bool) -> Self {
|
||||
let mut ret = Self {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
crc32: None,
|
||||
crc32c: None,
|
||||
md5: None,
|
||||
sha1: None,
|
||||
sha256: None,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if expected.md5.is_some() || require_md5 {
|
||||
ret.md5 = Some(Md5::new());
|
||||
}
|
||||
if expected.sha256.is_some() || matches!(&expected.extra, Some(ChecksumValue::Sha256(_))) {
|
||||
ret.sha256 = Some(Sha256::new());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) {
|
||||
ret.crc32 = Some(Crc32::new());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) {
|
||||
ret.crc32c = Some(Crc32c::default());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) {
|
||||
ret.sha1 = Some(Sha1::new());
|
||||
pub fn init(expected: &ExpectedChecksums, add_md5: bool) -> Self {
|
||||
let mut ret = Self::new();
|
||||
ret.add_expected(expected);
|
||||
if add_md5 {
|
||||
ret.add_md5();
|
||||
}
|
||||
ret
|
||||
}
|
||||
|
||||
pub(crate) fn add(mut self, algo: Option<ChecksumAlgorithm>) -> Self {
|
||||
pub fn add_md5(&mut self) {
|
||||
self.md5 = Some(Md5::new());
|
||||
}
|
||||
|
||||
pub fn add_expected(&mut self, expected: &ExpectedChecksums) {
|
||||
if expected.md5.is_some() {
|
||||
self.md5 = Some(Md5::new());
|
||||
}
|
||||
if expected.sha256.is_some() || matches!(&expected.extra, Some(ChecksumValue::Sha256(_))) {
|
||||
self.sha256 = Some(Sha256::new());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) {
|
||||
self.crc32 = Some(Crc32::new());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) {
|
||||
self.crc32c = Some(Crc32c::default());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) {
|
||||
self.sha1 = Some(Sha1::new());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add(mut self, algo: Option<ChecksumAlgorithm>) -> Self {
|
||||
match algo {
|
||||
Some(ChecksumAlgorithm::Crc32) => {
|
||||
self.crc32 = Some(Crc32::new());
|
||||
|
|
@ -105,7 +120,7 @@ impl Checksummer {
|
|||
self
|
||||
}
|
||||
|
||||
pub(crate) fn update(&mut self, bytes: &[u8]) {
|
||||
pub fn update(&mut self, bytes: &[u8]) {
|
||||
if let Some(crc32) = &mut self.crc32 {
|
||||
crc32.update(bytes);
|
||||
}
|
||||
|
|
@ -123,7 +138,7 @@ impl Checksummer {
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) fn finalize(self) -> Checksums {
|
||||
pub fn finalize(self) -> Checksums {
|
||||
Checksums {
|
||||
crc32: self.crc32.map(|x| u32::to_be_bytes(x.finalize())),
|
||||
crc32c: self
|
||||
|
|
@ -183,153 +198,56 @@ impl Checksums {
|
|||
|
||||
// ----
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct MultipartChecksummer {
|
||||
pub md5: Md5,
|
||||
pub extra: Option<MultipartExtraChecksummer>,
|
||||
}
|
||||
|
||||
pub(crate) enum MultipartExtraChecksummer {
|
||||
Crc32(Crc32),
|
||||
Crc32c(Crc32c),
|
||||
Sha1(Sha1),
|
||||
Sha256(Sha256),
|
||||
}
|
||||
|
||||
impl MultipartChecksummer {
|
||||
pub(crate) fn init(algo: Option<ChecksumAlgorithm>) -> Self {
|
||||
Self {
|
||||
md5: Md5::new(),
|
||||
extra: match algo {
|
||||
None => None,
|
||||
Some(ChecksumAlgorithm::Crc32) => {
|
||||
Some(MultipartExtraChecksummer::Crc32(Crc32::new()))
|
||||
}
|
||||
Some(ChecksumAlgorithm::Crc32c) => {
|
||||
Some(MultipartExtraChecksummer::Crc32c(Crc32c::default()))
|
||||
}
|
||||
Some(ChecksumAlgorithm::Sha1) => Some(MultipartExtraChecksummer::Sha1(Sha1::new())),
|
||||
Some(ChecksumAlgorithm::Sha256) => {
|
||||
Some(MultipartExtraChecksummer::Sha256(Sha256::new()))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn update(
|
||||
&mut self,
|
||||
etag: &str,
|
||||
checksum: Option<ChecksumValue>,
|
||||
) -> Result<(), Error> {
|
||||
self.md5
|
||||
.update(&hex::decode(&etag).ok_or_message("invalid etag hex")?);
|
||||
match (&mut self.extra, checksum) {
|
||||
(None, _) => (),
|
||||
(
|
||||
Some(MultipartExtraChecksummer::Crc32(ref mut crc32)),
|
||||
Some(ChecksumValue::Crc32(x)),
|
||||
) => {
|
||||
crc32.update(&x);
|
||||
}
|
||||
(
|
||||
Some(MultipartExtraChecksummer::Crc32c(ref mut crc32c)),
|
||||
Some(ChecksumValue::Crc32c(x)),
|
||||
) => {
|
||||
crc32c.write(&x);
|
||||
}
|
||||
(Some(MultipartExtraChecksummer::Sha1(ref mut sha1)), Some(ChecksumValue::Sha1(x))) => {
|
||||
sha1.update(&x);
|
||||
}
|
||||
(
|
||||
Some(MultipartExtraChecksummer::Sha256(ref mut sha256)),
|
||||
Some(ChecksumValue::Sha256(x)),
|
||||
) => {
|
||||
sha256.update(&x);
|
||||
}
|
||||
(Some(_), b) => {
|
||||
return Err(Error::internal_error(format!(
|
||||
"part checksum was not computed correctly, got: {:?}",
|
||||
b
|
||||
)))
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn finalize(self) -> (Md5Checksum, Option<ChecksumValue>) {
|
||||
let md5 = self.md5.finalize()[..].try_into().unwrap();
|
||||
let extra = match self.extra {
|
||||
None => None,
|
||||
Some(MultipartExtraChecksummer::Crc32(crc32)) => {
|
||||
Some(ChecksumValue::Crc32(u32::to_be_bytes(crc32.finalize())))
|
||||
}
|
||||
Some(MultipartExtraChecksummer::Crc32c(crc32c)) => Some(ChecksumValue::Crc32c(
|
||||
u32::to_be_bytes(u32::try_from(crc32c.finish()).unwrap()),
|
||||
)),
|
||||
Some(MultipartExtraChecksummer::Sha1(sha1)) => {
|
||||
Some(ChecksumValue::Sha1(sha1.finalize()[..].try_into().unwrap()))
|
||||
}
|
||||
Some(MultipartExtraChecksummer::Sha256(sha256)) => Some(ChecksumValue::Sha256(
|
||||
sha256.finalize()[..].try_into().unwrap(),
|
||||
)),
|
||||
};
|
||||
(md5, extra)
|
||||
pub fn parse_checksum_algorithm(algo: &str) -> Result<ChecksumAlgorithm, Error> {
|
||||
match algo {
|
||||
"CRC32" => Ok(ChecksumAlgorithm::Crc32),
|
||||
"CRC32C" => Ok(ChecksumAlgorithm::Crc32c),
|
||||
"SHA1" => Ok(ChecksumAlgorithm::Sha1),
|
||||
"SHA256" => Ok(ChecksumAlgorithm::Sha256),
|
||||
_ => Err(Error::bad_request("invalid checksum algorithm")),
|
||||
}
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
/// Extract the value of the x-amz-checksum-algorithm header
|
||||
pub(crate) fn request_checksum_algorithm(
|
||||
pub fn request_checksum_algorithm(
|
||||
headers: &HeaderMap<HeaderValue>,
|
||||
) -> Result<Option<ChecksumAlgorithm>, Error> {
|
||||
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
|
||||
None => Ok(None),
|
||||
Some(x) if x == "CRC32" => Ok(Some(ChecksumAlgorithm::Crc32)),
|
||||
Some(x) if x == "CRC32C" => Ok(Some(ChecksumAlgorithm::Crc32c)),
|
||||
Some(x) if x == "SHA1" => Ok(Some(ChecksumAlgorithm::Sha1)),
|
||||
Some(x) if x == "SHA256" => Ok(Some(ChecksumAlgorithm::Sha256)),
|
||||
Some(x) => parse_checksum_algorithm(x.to_str()?).map(Some),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn request_trailer_checksum_algorithm(
|
||||
headers: &HeaderMap<HeaderValue>,
|
||||
) -> Result<Option<ChecksumAlgorithm>, Error> {
|
||||
match headers.get(X_AMZ_TRAILER).map(|x| x.to_str()).transpose()? {
|
||||
None => Ok(None),
|
||||
Some(x) if x == X_AMZ_CHECKSUM_CRC32 => Ok(Some(ChecksumAlgorithm::Crc32)),
|
||||
Some(x) if x == X_AMZ_CHECKSUM_CRC32C => Ok(Some(ChecksumAlgorithm::Crc32c)),
|
||||
Some(x) if x == X_AMZ_CHECKSUM_SHA1 => Ok(Some(ChecksumAlgorithm::Sha1)),
|
||||
Some(x) if x == X_AMZ_CHECKSUM_SHA256 => Ok(Some(ChecksumAlgorithm::Sha256)),
|
||||
_ => Err(Error::bad_request("invalid checksum algorithm")),
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract the value of any of the x-amz-checksum-* headers
|
||||
pub(crate) fn request_checksum_value(
|
||||
pub fn request_checksum_value(
|
||||
headers: &HeaderMap<HeaderValue>,
|
||||
) -> Result<Option<ChecksumValue>, Error> {
|
||||
let mut ret = vec![];
|
||||
|
||||
if let Some(crc32_str) = headers.get(X_AMZ_CHECKSUM_CRC32) {
|
||||
let crc32 = BASE64_STANDARD
|
||||
.decode(&crc32_str)
|
||||
.ok()
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
|
||||
ret.push(ChecksumValue::Crc32(crc32))
|
||||
if headers.contains_key(X_AMZ_CHECKSUM_CRC32) {
|
||||
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Crc32)?);
|
||||
}
|
||||
if let Some(crc32c_str) = headers.get(X_AMZ_CHECKSUM_CRC32C) {
|
||||
let crc32c = BASE64_STANDARD
|
||||
.decode(&crc32c_str)
|
||||
.ok()
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
|
||||
ret.push(ChecksumValue::Crc32c(crc32c))
|
||||
if headers.contains_key(X_AMZ_CHECKSUM_CRC32C) {
|
||||
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Crc32c)?);
|
||||
}
|
||||
if let Some(sha1_str) = headers.get(X_AMZ_CHECKSUM_SHA1) {
|
||||
let sha1 = BASE64_STANDARD
|
||||
.decode(&sha1_str)
|
||||
.ok()
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
|
||||
ret.push(ChecksumValue::Sha1(sha1))
|
||||
if headers.contains_key(X_AMZ_CHECKSUM_SHA1) {
|
||||
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Sha1)?);
|
||||
}
|
||||
if let Some(sha256_str) = headers.get(X_AMZ_CHECKSUM_SHA256) {
|
||||
let sha256 = BASE64_STANDARD
|
||||
.decode(&sha256_str)
|
||||
.ok()
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
|
||||
ret.push(ChecksumValue::Sha256(sha256))
|
||||
if headers.contains_key(X_AMZ_CHECKSUM_SHA256) {
|
||||
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Sha256)?);
|
||||
}
|
||||
|
||||
if ret.len() > 1 {
|
||||
|
|
@ -342,48 +260,47 @@ pub(crate) fn request_checksum_value(
|
|||
|
||||
/// Checks for the presence of x-amz-checksum-algorithm
|
||||
/// if so extract the corresponding x-amz-checksum-* value
|
||||
pub(crate) fn request_checksum_algorithm_value(
|
||||
pub fn extract_checksum_value(
|
||||
headers: &HeaderMap<HeaderValue>,
|
||||
) -> Result<Option<ChecksumValue>, Error> {
|
||||
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
|
||||
Some(x) if x == "CRC32" => {
|
||||
algo: ChecksumAlgorithm,
|
||||
) -> Result<ChecksumValue, Error> {
|
||||
match algo {
|
||||
ChecksumAlgorithm::Crc32 => {
|
||||
let crc32 = headers
|
||||
.get(X_AMZ_CHECKSUM_CRC32)
|
||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
|
||||
Ok(Some(ChecksumValue::Crc32(crc32)))
|
||||
Ok(ChecksumValue::Crc32(crc32))
|
||||
}
|
||||
Some(x) if x == "CRC32C" => {
|
||||
ChecksumAlgorithm::Crc32c => {
|
||||
let crc32c = headers
|
||||
.get(X_AMZ_CHECKSUM_CRC32C)
|
||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
|
||||
Ok(Some(ChecksumValue::Crc32c(crc32c)))
|
||||
Ok(ChecksumValue::Crc32c(crc32c))
|
||||
}
|
||||
Some(x) if x == "SHA1" => {
|
||||
ChecksumAlgorithm::Sha1 => {
|
||||
let sha1 = headers
|
||||
.get(X_AMZ_CHECKSUM_SHA1)
|
||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
|
||||
Ok(Some(ChecksumValue::Sha1(sha1)))
|
||||
Ok(ChecksumValue::Sha1(sha1))
|
||||
}
|
||||
Some(x) if x == "SHA256" => {
|
||||
ChecksumAlgorithm::Sha256 => {
|
||||
let sha256 = headers
|
||||
.get(X_AMZ_CHECKSUM_SHA256)
|
||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
|
||||
Ok(Some(ChecksumValue::Sha256(sha256)))
|
||||
Ok(ChecksumValue::Sha256(sha256))
|
||||
}
|
||||
Some(_) => Err(Error::bad_request("invalid x-amz-checksum-algorithm")),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn add_checksum_response_headers(
|
||||
pub fn add_checksum_response_headers(
|
||||
checksum: &Option<ChecksumValue>,
|
||||
mut resp: http::response::Builder,
|
||||
) -> http::response::Builder {
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
use err_derive::Error;
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::common_error::CommonError;
|
||||
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||
|
|
@ -6,18 +6,22 @@ pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInterna
|
|||
/// Errors of this crate
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error(display = "{}", _0)]
|
||||
#[error("{0}")]
|
||||
/// Error from common error
|
||||
Common(CommonError),
|
||||
|
||||
/// Authorization Header Malformed
|
||||
#[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
|
||||
#[error("Authorization header malformed, unexpected scope: {0}")]
|
||||
AuthorizationHeaderMalformed(String),
|
||||
|
||||
// Category: bad request
|
||||
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
||||
#[error("Invalid UTF-8: {0}")]
|
||||
InvalidUtf8Str(#[from] std::str::Utf8Error),
|
||||
|
||||
/// The provided digest (checksum) value was invalid
|
||||
#[error("Invalid digest: {0}")]
|
||||
InvalidDigest(String),
|
||||
}
|
||||
|
||||
impl<T> From<T> for Error
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ use chrono::{DateTime, Utc};
|
|||
use hmac::{Hmac, Mac};
|
||||
use sha2::Sha256;
|
||||
|
||||
use hyper::header::HeaderName;
|
||||
use hyper::{body::Incoming as IncomingBody, Request};
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
|
|
@ -10,6 +11,8 @@ use garage_util::data::{sha256sum, Hash};
|
|||
|
||||
use error::*;
|
||||
|
||||
pub mod body;
|
||||
pub mod checksum;
|
||||
pub mod error;
|
||||
pub mod payload;
|
||||
pub mod streaming;
|
||||
|
|
@ -17,36 +20,73 @@ pub mod streaming;
|
|||
pub const SHORT_DATE: &str = "%Y%m%d";
|
||||
pub const LONG_DATETIME: &str = "%Y%m%dT%H%M%SZ";
|
||||
|
||||
// ---- Constants used in AWSv4 signatures ----
|
||||
|
||||
pub const X_AMZ_ALGORITHM: HeaderName = HeaderName::from_static("x-amz-algorithm");
|
||||
pub const X_AMZ_CREDENTIAL: HeaderName = HeaderName::from_static("x-amz-credential");
|
||||
pub const X_AMZ_DATE: HeaderName = HeaderName::from_static("x-amz-date");
|
||||
pub const X_AMZ_EXPIRES: HeaderName = HeaderName::from_static("x-amz-expires");
|
||||
pub const X_AMZ_SIGNEDHEADERS: HeaderName = HeaderName::from_static("x-amz-signedheaders");
|
||||
pub const X_AMZ_SIGNATURE: HeaderName = HeaderName::from_static("x-amz-signature");
|
||||
pub const X_AMZ_CONTENT_SHA256: HeaderName = HeaderName::from_static("x-amz-content-sha256");
|
||||
pub const X_AMZ_TRAILER: HeaderName = HeaderName::from_static("x-amz-trailer");
|
||||
|
||||
/// Result of `sha256("")`
|
||||
pub(crate) const EMPTY_STRING_HEX_DIGEST: &str =
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
|
||||
|
||||
// Signature calculation algorithm
|
||||
pub const AWS4_HMAC_SHA256: &str = "AWS4-HMAC-SHA256";
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
// Possible values for x-amz-content-sha256, in addition to the actual sha256
|
||||
pub const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD";
|
||||
pub const STREAMING_UNSIGNED_PAYLOAD_TRAILER: &str = "STREAMING-UNSIGNED-PAYLOAD-TRAILER";
|
||||
pub const STREAMING_AWS4_HMAC_SHA256_PAYLOAD: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
|
||||
|
||||
// Used in the computation of StringToSign
|
||||
pub const AWS4_HMAC_SHA256_PAYLOAD: &str = "AWS4-HMAC-SHA256-PAYLOAD";
|
||||
|
||||
// ---- enums to describe stuff going on in signature calculation ----
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ContentSha256Header {
|
||||
UnsignedPayload,
|
||||
Sha256Checksum(Hash),
|
||||
StreamingPayload { trailer: bool, signed: bool },
|
||||
}
|
||||
|
||||
// ---- top-level functions ----
|
||||
|
||||
pub struct VerifiedRequest {
|
||||
pub request: Request<streaming::ReqBody>,
|
||||
pub access_key: Key,
|
||||
pub content_sha256_header: ContentSha256Header,
|
||||
}
|
||||
|
||||
pub async fn verify_request(
|
||||
garage: &Garage,
|
||||
mut req: Request<IncomingBody>,
|
||||
service: &'static str,
|
||||
) -> Result<(Request<streaming::ReqBody>, Key, Option<Hash>), Error> {
|
||||
let (api_key, mut content_sha256) =
|
||||
payload::check_payload_signature(&garage, &mut req, service).await?;
|
||||
let api_key =
|
||||
api_key.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
|
||||
) -> Result<VerifiedRequest, Error> {
|
||||
let checked_signature = payload::check_payload_signature(&garage, &mut req, service).await?;
|
||||
|
||||
let req = streaming::parse_streaming_body(
|
||||
&api_key,
|
||||
let request = streaming::parse_streaming_body(
|
||||
req,
|
||||
&mut content_sha256,
|
||||
&checked_signature,
|
||||
&garage.config.s3_api.s3_region,
|
||||
service,
|
||||
)?;
|
||||
|
||||
Ok((req, api_key, content_sha256))
|
||||
}
|
||||
let access_key = checked_signature
|
||||
.key
|
||||
.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
|
||||
|
||||
pub fn verify_signed_content(expected_sha256: Hash, body: &[u8]) -> Result<(), Error> {
|
||||
if expected_sha256 != sha256sum(body) {
|
||||
return Err(Error::bad_request(
|
||||
"Request content hash does not match signed hash".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
Ok(VerifiedRequest {
|
||||
request,
|
||||
access_key,
|
||||
content_sha256_header: checked_signature.content_sha256_header,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn signing_hmac(
|
||||
|
|
|
|||
|
|
@ -13,23 +13,9 @@ use garage_util::data::Hash;
|
|||
use garage_model::garage::Garage;
|
||||
use garage_model::key_table::*;
|
||||
|
||||
use super::LONG_DATETIME;
|
||||
use super::{compute_scope, signing_hmac};
|
||||
use super::*;
|
||||
|
||||
use crate::encoding::uri_encode;
|
||||
use crate::signature::error::*;
|
||||
|
||||
pub const X_AMZ_ALGORITHM: HeaderName = HeaderName::from_static("x-amz-algorithm");
|
||||
pub const X_AMZ_CREDENTIAL: HeaderName = HeaderName::from_static("x-amz-credential");
|
||||
pub const X_AMZ_DATE: HeaderName = HeaderName::from_static("x-amz-date");
|
||||
pub const X_AMZ_EXPIRES: HeaderName = HeaderName::from_static("x-amz-expires");
|
||||
pub const X_AMZ_SIGNEDHEADERS: HeaderName = HeaderName::from_static("x-amz-signedheaders");
|
||||
pub const X_AMZ_SIGNATURE: HeaderName = HeaderName::from_static("x-amz-signature");
|
||||
pub const X_AMZ_CONTENT_SH256: HeaderName = HeaderName::from_static("x-amz-content-sha256");
|
||||
|
||||
pub const AWS4_HMAC_SHA256: &str = "AWS4-HMAC-SHA256";
|
||||
pub const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD";
|
||||
pub const STREAMING_AWS4_HMAC_SHA256_PAYLOAD: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
|
||||
|
||||
pub type QueryMap = HeaderMap<QueryValue>;
|
||||
pub struct QueryValue {
|
||||
|
|
@ -39,11 +25,18 @@ pub struct QueryValue {
|
|||
value: String,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct CheckedSignature {
|
||||
pub key: Option<Key>,
|
||||
pub content_sha256_header: ContentSha256Header,
|
||||
pub signature_header: Option<String>,
|
||||
}
|
||||
|
||||
pub async fn check_payload_signature(
|
||||
garage: &Garage,
|
||||
request: &mut Request<IncomingBody>,
|
||||
service: &'static str,
|
||||
) -> Result<(Option<Key>, Option<Hash>), Error> {
|
||||
) -> Result<CheckedSignature, Error> {
|
||||
let query = parse_query_map(request.uri())?;
|
||||
|
||||
if query.contains_key(&X_AMZ_ALGORITHM) {
|
||||
|
|
@ -57,17 +50,46 @@ pub async fn check_payload_signature(
|
|||
// Unsigned (anonymous) request
|
||||
let content_sha256 = request
|
||||
.headers()
|
||||
.get("x-amz-content-sha256")
|
||||
.filter(|c| c.as_bytes() != UNSIGNED_PAYLOAD.as_bytes());
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
let sha256 = hex::decode(content_sha256)
|
||||
.ok()
|
||||
.and_then(|bytes| Hash::try_from(&bytes))
|
||||
.ok_or_bad_request("Invalid content sha256 hash")?;
|
||||
Ok((None, Some(sha256)))
|
||||
.get(X_AMZ_CONTENT_SHA256)
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?;
|
||||
Ok(CheckedSignature {
|
||||
key: None,
|
||||
content_sha256_header: parse_x_amz_content_sha256(content_sha256)?,
|
||||
signature_header: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_x_amz_content_sha256(header: Option<&str>) -> Result<ContentSha256Header, Error> {
|
||||
let header = match header {
|
||||
Some(x) => x,
|
||||
None => return Ok(ContentSha256Header::UnsignedPayload),
|
||||
};
|
||||
if header == UNSIGNED_PAYLOAD {
|
||||
Ok(ContentSha256Header::UnsignedPayload)
|
||||
} else if let Some(rest) = header.strip_prefix("STREAMING-") {
|
||||
let (trailer, algo) = if let Some(rest2) = rest.strip_suffix("-TRAILER") {
|
||||
(true, rest2)
|
||||
} else {
|
||||
Ok((None, None))
|
||||
}
|
||||
(false, rest)
|
||||
};
|
||||
let signed = match algo {
|
||||
AWS4_HMAC_SHA256_PAYLOAD => true,
|
||||
UNSIGNED_PAYLOAD => false,
|
||||
_ => {
|
||||
return Err(Error::bad_request(
|
||||
"invalid or unsupported x-amz-content-sha256",
|
||||
))
|
||||
}
|
||||
};
|
||||
Ok(ContentSha256Header::StreamingPayload { trailer, signed })
|
||||
} else {
|
||||
let sha256 = hex::decode(header)
|
||||
.ok()
|
||||
.and_then(|bytes| Hash::try_from(&bytes))
|
||||
.ok_or_bad_request("Invalid content sha256 hash")?;
|
||||
Ok(ContentSha256Header::Sha256Checksum(sha256))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -76,13 +98,13 @@ async fn check_standard_signature(
|
|||
service: &'static str,
|
||||
request: &Request<IncomingBody>,
|
||||
query: QueryMap,
|
||||
) -> Result<(Option<Key>, Option<Hash>), Error> {
|
||||
) -> Result<CheckedSignature, Error> {
|
||||
let authorization = Authorization::parse_header(request.headers())?;
|
||||
|
||||
// Verify that all necessary request headers are included in signed_headers
|
||||
// The following must be included for all signatures:
|
||||
// - the Host header (mandatory)
|
||||
// - all x-amz-* headers used in the request
|
||||
// - all x-amz-* headers used in the request (except x-amz-content-sha256)
|
||||
// AWS also indicates that the Content-Type header should be signed if
|
||||
// it is used, but Minio client doesn't sign it so we don't check it for compatibility.
|
||||
let signed_headers = split_signed_headers(&authorization)?;
|
||||
|
|
@ -108,18 +130,13 @@ async fn check_standard_signature(
|
|||
|
||||
let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?;
|
||||
|
||||
let content_sha256 = if authorization.content_sha256 == UNSIGNED_PAYLOAD {
|
||||
None
|
||||
} else if authorization.content_sha256 == STREAMING_AWS4_HMAC_SHA256_PAYLOAD {
|
||||
let bytes = hex::decode(authorization.signature).ok_or_bad_request("Invalid signature")?;
|
||||
Some(Hash::try_from(&bytes).ok_or_bad_request("Invalid signature")?)
|
||||
} else {
|
||||
let bytes = hex::decode(authorization.content_sha256)
|
||||
.ok_or_bad_request("Invalid content sha256 hash")?;
|
||||
Some(Hash::try_from(&bytes).ok_or_bad_request("Invalid content sha256 hash")?)
|
||||
};
|
||||
let content_sha256_header = parse_x_amz_content_sha256(Some(&authorization.content_sha256))?;
|
||||
|
||||
Ok((Some(key), content_sha256))
|
||||
Ok(CheckedSignature {
|
||||
key: Some(key),
|
||||
content_sha256_header,
|
||||
signature_header: Some(authorization.signature),
|
||||
})
|
||||
}
|
||||
|
||||
async fn check_presigned_signature(
|
||||
|
|
@ -127,14 +144,14 @@ async fn check_presigned_signature(
|
|||
service: &'static str,
|
||||
request: &mut Request<IncomingBody>,
|
||||
mut query: QueryMap,
|
||||
) -> Result<(Option<Key>, Option<Hash>), Error> {
|
||||
) -> Result<CheckedSignature, Error> {
|
||||
let algorithm = query.get(&X_AMZ_ALGORITHM).unwrap();
|
||||
let authorization = Authorization::parse_presigned(&algorithm.value, &query)?;
|
||||
|
||||
// Verify that all necessary request headers are included in signed_headers
|
||||
// For AWSv4 pre-signed URLs, the following must be included:
|
||||
// - the Host header (mandatory)
|
||||
// - all x-amz-* headers used in the request
|
||||
// - all x-amz-* headers used in the request (except x-amz-content-sha256)
|
||||
let signed_headers = split_signed_headers(&authorization)?;
|
||||
verify_signed_headers(request.headers(), &signed_headers)?;
|
||||
|
||||
|
|
@ -193,7 +210,11 @@ async fn check_presigned_signature(
|
|||
|
||||
// Presigned URLs always use UNSIGNED-PAYLOAD,
|
||||
// so there is no sha256 hash to return.
|
||||
Ok((Some(key), None))
|
||||
Ok(CheckedSignature {
|
||||
key: Some(key),
|
||||
content_sha256_header: ContentSha256Header::UnsignedPayload,
|
||||
signature_header: Some(authorization.signature),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_query_map(uri: &http::uri::Uri) -> Result<QueryMap, Error> {
|
||||
|
|
@ -247,7 +268,9 @@ fn verify_signed_headers(headers: &HeaderMap, signed_headers: &[HeaderName]) ->
|
|||
return Err(Error::bad_request("Header `Host` should be signed"));
|
||||
}
|
||||
for (name, _) in headers.iter() {
|
||||
if name.as_str().starts_with("x-amz-") {
|
||||
// Enforce signature of all x-amz-* headers, except x-amz-content-sh256
|
||||
// because it is included in the canonical request in all cases
|
||||
if name.as_str().starts_with("x-amz-") && name != X_AMZ_CONTENT_SHA256 {
|
||||
if !signed_headers.contains(name) {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Header `{}` should be signed",
|
||||
|
|
@ -396,7 +419,7 @@ pub async fn verify_v4(
|
|||
// ============ Authorization header, or X-Amz-* query params =========
|
||||
|
||||
pub struct Authorization {
|
||||
key_id: String,
|
||||
pub key_id: String,
|
||||
scope: String,
|
||||
signed_headers: String,
|
||||
signature: String,
|
||||
|
|
@ -405,7 +428,7 @@ pub struct Authorization {
|
|||
}
|
||||
|
||||
impl Authorization {
|
||||
fn parse_header(headers: &HeaderMap) -> Result<Self, Error> {
|
||||
pub fn parse_header(headers: &HeaderMap) -> Result<Self, Error> {
|
||||
let authorization = headers
|
||||
.get(AUTHORIZATION)
|
||||
.ok_or_bad_request("Missing authorization header")?
|
||||
|
|
@ -442,13 +465,12 @@ impl Authorization {
|
|||
.to_string();
|
||||
|
||||
let content_sha256 = headers
|
||||
.get(X_AMZ_CONTENT_SH256)
|
||||
.get(X_AMZ_CONTENT_SHA256)
|
||||
.ok_or_bad_request("Missing X-Amz-Content-Sha256 field")?;
|
||||
|
||||
let date = headers
|
||||
.get(X_AMZ_DATE)
|
||||
.ok_or_bad_request("Missing X-Amz-Date field")
|
||||
.map_err(Error::from)?
|
||||
.ok_or_bad_request("Missing X-Amz-Date field")?
|
||||
.to_str()?;
|
||||
let date = parse_date(date)?;
|
||||
|
||||
|
|
|
|||
|
|
@ -1,84 +1,157 @@
|
|||
use std::pin::Pin;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use chrono::{DateTime, NaiveDateTime, TimeZone, Utc};
|
||||
use futures::prelude::*;
|
||||
use futures::task;
|
||||
use garage_model::key_table::Key;
|
||||
use hmac::Mac;
|
||||
use http_body_util::StreamBody;
|
||||
use hyper::body::{Bytes, Incoming as IncomingBody};
|
||||
use http::header::{HeaderMap, HeaderValue, CONTENT_ENCODING};
|
||||
use hyper::body::{Bytes, Frame, Incoming as IncomingBody};
|
||||
use hyper::Request;
|
||||
|
||||
use garage_util::data::Hash;
|
||||
|
||||
use super::{compute_scope, sha256sum, HmacSha256, LONG_DATETIME};
|
||||
use super::*;
|
||||
|
||||
use crate::helpers::*;
|
||||
use crate::signature::error::*;
|
||||
use crate::signature::payload::{
|
||||
STREAMING_AWS4_HMAC_SHA256_PAYLOAD, X_AMZ_CONTENT_SH256, X_AMZ_DATE,
|
||||
};
|
||||
use crate::helpers::body_stream;
|
||||
use crate::signature::checksum::*;
|
||||
use crate::signature::payload::CheckedSignature;
|
||||
|
||||
pub const AWS4_HMAC_SHA256_PAYLOAD: &str = "AWS4-HMAC-SHA256-PAYLOAD";
|
||||
|
||||
pub type ReqBody = BoxBody<Error>;
|
||||
pub use crate::signature::body::ReqBody;
|
||||
|
||||
pub fn parse_streaming_body(
|
||||
api_key: &Key,
|
||||
req: Request<IncomingBody>,
|
||||
content_sha256: &mut Option<Hash>,
|
||||
mut req: Request<IncomingBody>,
|
||||
checked_signature: &CheckedSignature,
|
||||
region: &str,
|
||||
service: &str,
|
||||
) -> Result<Request<ReqBody>, Error> {
|
||||
match req.headers().get(X_AMZ_CONTENT_SH256) {
|
||||
Some(header) if header == STREAMING_AWS4_HMAC_SHA256_PAYLOAD => {
|
||||
let signature = content_sha256
|
||||
.take()
|
||||
.ok_or_bad_request("No signature provided")?;
|
||||
debug!(
|
||||
"Content signature mode: {:?}",
|
||||
checked_signature.content_sha256_header
|
||||
);
|
||||
|
||||
let secret_key = &api_key
|
||||
.state
|
||||
.as_option()
|
||||
.ok_or_internal_error("Deleted key state")?
|
||||
.secret_key;
|
||||
match checked_signature.content_sha256_header {
|
||||
ContentSha256Header::StreamingPayload { signed, trailer } => {
|
||||
// Sanity checks
|
||||
if !signed && !trailer {
|
||||
return Err(Error::bad_request(
|
||||
"STREAMING-UNSIGNED-PAYLOAD without trailer is not a valid combination",
|
||||
));
|
||||
}
|
||||
|
||||
let date = req
|
||||
.headers()
|
||||
.get(X_AMZ_DATE)
|
||||
.ok_or_bad_request("Missing X-Amz-Date field")?
|
||||
.to_str()?;
|
||||
let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
|
||||
.ok_or_bad_request("Invalid date")?;
|
||||
let date: DateTime<Utc> = Utc.from_utc_datetime(&date);
|
||||
// Remove the aws-chunked component in the content-encoding: header
|
||||
// Note: this header is not properly sent by minio client, so don't fail
|
||||
// if it is absent from the request.
|
||||
if let Some(content_encoding) = req.headers_mut().remove(CONTENT_ENCODING) {
|
||||
if let Some(rest) = content_encoding.as_bytes().strip_prefix(b"aws-chunked,") {
|
||||
req.headers_mut()
|
||||
.insert(CONTENT_ENCODING, HeaderValue::from_bytes(rest).unwrap());
|
||||
} else if content_encoding != "aws-chunked" {
|
||||
return Err(Error::bad_request(
|
||||
"content-encoding does not contain aws-chunked for STREAMING-*-PAYLOAD",
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let scope = compute_scope(&date, region, service);
|
||||
let signing_hmac = crate::signature::signing_hmac(&date, secret_key, region, service)
|
||||
.ok_or_internal_error("Unable to build signing HMAC")?;
|
||||
// If trailer header is announced, add the calculation of the requested checksum
|
||||
let mut checksummer = Checksummer::init(&Default::default(), false);
|
||||
let trailer_algorithm = if trailer {
|
||||
let algo = Some(
|
||||
request_trailer_checksum_algorithm(req.headers())?
|
||||
.ok_or_bad_request("Missing x-amz-trailer header")?,
|
||||
);
|
||||
checksummer = checksummer.add(algo);
|
||||
algo
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// For signed variants, determine signing parameters
|
||||
let sign_params = if signed {
|
||||
let signature = checked_signature
|
||||
.signature_header
|
||||
.clone()
|
||||
.ok_or_bad_request("No signature provided")?;
|
||||
let signature = hex::decode(signature)
|
||||
.ok()
|
||||
.and_then(|bytes| Hash::try_from(&bytes))
|
||||
.ok_or_bad_request("Invalid signature")?;
|
||||
|
||||
let secret_key = checked_signature
|
||||
.key
|
||||
.as_ref()
|
||||
.ok_or_bad_request("Cannot sign streaming payload without signing key")?
|
||||
.state
|
||||
.as_option()
|
||||
.ok_or_internal_error("Deleted key state")?
|
||||
.secret_key
|
||||
.to_string();
|
||||
|
||||
let date = req
|
||||
.headers()
|
||||
.get(X_AMZ_DATE)
|
||||
.ok_or_bad_request("Missing X-Amz-Date field")?
|
||||
.to_str()?;
|
||||
let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
|
||||
.ok_or_bad_request("Invalid date")?;
|
||||
let date: DateTime<Utc> = Utc.from_utc_datetime(&date);
|
||||
|
||||
let scope = compute_scope(&date, region, service);
|
||||
let signing_hmac =
|
||||
crate::signature::signing_hmac(&date, &secret_key, region, service)
|
||||
.ok_or_internal_error("Unable to build signing HMAC")?;
|
||||
|
||||
Some(SignParams {
|
||||
datetime: date,
|
||||
scope,
|
||||
signing_hmac,
|
||||
previous_signature: signature,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(req.map(move |body| {
|
||||
let stream = body_stream::<_, Error>(body);
|
||||
|
||||
let signed_payload_stream =
|
||||
SignedPayloadStream::new(stream, signing_hmac, date, &scope, signature)
|
||||
.map(|x| x.map(hyper::body::Frame::data))
|
||||
.map_err(Error::from);
|
||||
ReqBody::new(StreamBody::new(signed_payload_stream))
|
||||
StreamingPayloadStream::new(stream, sign_params, trailer).map_err(Error::from);
|
||||
ReqBody {
|
||||
stream: Mutex::new(signed_payload_stream.boxed()),
|
||||
checksummer,
|
||||
expected_checksums: Default::default(),
|
||||
trailer_algorithm,
|
||||
}
|
||||
}))
|
||||
}
|
||||
_ => Ok(req.map(|body| ReqBody::new(http_body_util::BodyExt::map_err(body, Error::from)))),
|
||||
_ => Ok(req.map(|body| {
|
||||
let expected_checksums = ExpectedChecksums {
|
||||
sha256: match &checked_signature.content_sha256_header {
|
||||
ContentSha256Header::Sha256Checksum(sha256) => Some(*sha256),
|
||||
_ => None,
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
let checksummer = Checksummer::init(&expected_checksums, false);
|
||||
|
||||
let stream = http_body_util::BodyStream::new(body).map_err(Error::from);
|
||||
ReqBody {
|
||||
stream: Mutex::new(stream.boxed()),
|
||||
checksummer,
|
||||
expected_checksums,
|
||||
trailer_algorithm: None,
|
||||
}
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
/// Result of `sha256("")`
|
||||
const EMPTY_STRING_HEX_DIGEST: &str =
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
|
||||
|
||||
fn compute_streaming_payload_signature(
|
||||
signing_hmac: &HmacSha256,
|
||||
date: DateTime<Utc>,
|
||||
scope: &str,
|
||||
previous_signature: Hash,
|
||||
content_sha256: Hash,
|
||||
) -> Result<Hash, Error> {
|
||||
) -> Result<Hash, StreamingPayloadError> {
|
||||
let string_to_sign = [
|
||||
AWS4_HMAC_SHA256_PAYLOAD,
|
||||
&date.format(LONG_DATETIME).to_string(),
|
||||
|
|
@ -92,12 +165,49 @@ fn compute_streaming_payload_signature(
|
|||
let mut hmac = signing_hmac.clone();
|
||||
hmac.update(string_to_sign.as_bytes());
|
||||
|
||||
Ok(Hash::try_from(&hmac.finalize().into_bytes()).ok_or_internal_error("Invalid signature")?)
|
||||
Hash::try_from(&hmac.finalize().into_bytes())
|
||||
.ok_or_else(|| StreamingPayloadError::Message("Could not build signature".into()))
|
||||
}
|
||||
|
||||
fn compute_streaming_trailer_signature(
|
||||
signing_hmac: &HmacSha256,
|
||||
date: DateTime<Utc>,
|
||||
scope: &str,
|
||||
previous_signature: Hash,
|
||||
trailer_sha256: Hash,
|
||||
) -> Result<Hash, StreamingPayloadError> {
|
||||
let string_to_sign = [
|
||||
AWS4_HMAC_SHA256_PAYLOAD,
|
||||
&date.format(LONG_DATETIME).to_string(),
|
||||
scope,
|
||||
&hex::encode(previous_signature),
|
||||
&hex::encode(trailer_sha256),
|
||||
]
|
||||
.join("\n");
|
||||
|
||||
let mut hmac = signing_hmac.clone();
|
||||
hmac.update(string_to_sign.as_bytes());
|
||||
|
||||
Hash::try_from(&hmac.finalize().into_bytes())
|
||||
.ok_or_else(|| StreamingPayloadError::Message("Could not build signature".into()))
|
||||
}
|
||||
|
||||
mod payload {
|
||||
use http::{HeaderName, HeaderValue};
|
||||
|
||||
use garage_util::data::Hash;
|
||||
|
||||
use nom::bytes::streaming::{tag, take_while};
|
||||
use nom::character::streaming::hex_digit1;
|
||||
use nom::combinator::{map_res, opt};
|
||||
use nom::number::streaming::hex_u32;
|
||||
|
||||
macro_rules! try_parse {
|
||||
($expr:expr) => {
|
||||
$expr.map_err(|e| e.map(Error::Parser))?
|
||||
};
|
||||
}
|
||||
|
||||
pub enum Error<I> {
|
||||
Parser(nom::error::Error<I>),
|
||||
BadSignature,
|
||||
|
|
@ -113,24 +223,13 @@ mod payload {
|
|||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Header {
|
||||
pub struct ChunkHeader {
|
||||
pub size: usize,
|
||||
pub signature: Hash,
|
||||
pub signature: Option<Hash>,
|
||||
}
|
||||
|
||||
impl Header {
|
||||
pub fn parse(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||
use nom::bytes::streaming::tag;
|
||||
use nom::character::streaming::hex_digit1;
|
||||
use nom::combinator::map_res;
|
||||
use nom::number::streaming::hex_u32;
|
||||
|
||||
macro_rules! try_parse {
|
||||
($expr:expr) => {
|
||||
$expr.map_err(|e| e.map(Error::Parser))?
|
||||
};
|
||||
}
|
||||
|
||||
impl ChunkHeader {
|
||||
pub fn parse_signed(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||
let (input, size) = try_parse!(hex_u32(input));
|
||||
let (input, _) = try_parse!(tag(";")(input));
|
||||
|
||||
|
|
@ -140,96 +239,172 @@ mod payload {
|
|||
|
||||
let (input, _) = try_parse!(tag("\r\n")(input));
|
||||
|
||||
let header = Header {
|
||||
let header = ChunkHeader {
|
||||
size: size as usize,
|
||||
signature,
|
||||
signature: Some(signature),
|
||||
};
|
||||
|
||||
Ok((input, header))
|
||||
}
|
||||
|
||||
pub fn parse_unsigned(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||
let (input, size) = try_parse!(hex_u32(input));
|
||||
let (input, _) = try_parse!(tag("\r\n")(input));
|
||||
|
||||
let header = ChunkHeader {
|
||||
size: size as usize,
|
||||
signature: None,
|
||||
};
|
||||
|
||||
Ok((input, header))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TrailerChunk {
|
||||
pub header_name: HeaderName,
|
||||
pub header_value: HeaderValue,
|
||||
pub signature: Option<Hash>,
|
||||
}
|
||||
|
||||
impl TrailerChunk {
|
||||
fn parse_content(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||
let (input, header_name) = try_parse!(map_res(
|
||||
take_while(|c: u8| c.is_ascii_alphanumeric() || c == b'-'),
|
||||
HeaderName::from_bytes
|
||||
)(input));
|
||||
let (input, _) = try_parse!(tag(b":")(input));
|
||||
let (input, header_value) = try_parse!(map_res(
|
||||
take_while(|c: u8| c.is_ascii_alphanumeric() || b"+/=".contains(&c)),
|
||||
HeaderValue::from_bytes
|
||||
)(input));
|
||||
|
||||
// Possible '\n' after the header value, depends on clients
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
||||
let (input, _) = try_parse!(opt(tag(b"\n"))(input));
|
||||
|
||||
let (input, _) = try_parse!(tag(b"\r\n")(input));
|
||||
|
||||
Ok((
|
||||
input,
|
||||
TrailerChunk {
|
||||
header_name,
|
||||
header_value,
|
||||
signature: None,
|
||||
},
|
||||
))
|
||||
}
|
||||
pub fn parse_signed(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||
let (input, trailer) = Self::parse_content(input)?;
|
||||
|
||||
let (input, _) = try_parse!(tag(b"x-amz-trailer-signature:")(input));
|
||||
let (input, data) = try_parse!(map_res(hex_digit1, hex::decode)(input));
|
||||
let signature = Hash::try_from(&data).ok_or(nom::Err::Failure(Error::BadSignature))?;
|
||||
let (input, _) = try_parse!(tag(b"\r\n")(input));
|
||||
|
||||
Ok((
|
||||
input,
|
||||
TrailerChunk {
|
||||
signature: Some(signature),
|
||||
..trailer
|
||||
},
|
||||
))
|
||||
}
|
||||
pub fn parse_unsigned(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||
let (input, trailer) = Self::parse_content(input)?;
|
||||
let (input, _) = try_parse!(tag(b"\r\n")(input));
|
||||
|
||||
Ok((input, trailer))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum SignedPayloadStreamError {
|
||||
pub enum StreamingPayloadError {
|
||||
Stream(Error),
|
||||
InvalidSignature,
|
||||
Message(String),
|
||||
}
|
||||
|
||||
impl SignedPayloadStreamError {
|
||||
impl StreamingPayloadError {
|
||||
fn message(msg: &str) -> Self {
|
||||
SignedPayloadStreamError::Message(msg.into())
|
||||
StreamingPayloadError::Message(msg.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SignedPayloadStreamError> for Error {
|
||||
fn from(err: SignedPayloadStreamError) -> Self {
|
||||
impl From<StreamingPayloadError> for Error {
|
||||
fn from(err: StreamingPayloadError) -> Self {
|
||||
match err {
|
||||
SignedPayloadStreamError::Stream(e) => e,
|
||||
SignedPayloadStreamError::InvalidSignature => {
|
||||
StreamingPayloadError::Stream(e) => e,
|
||||
StreamingPayloadError::InvalidSignature => {
|
||||
Error::bad_request("Invalid payload signature")
|
||||
}
|
||||
SignedPayloadStreamError::Message(e) => {
|
||||
StreamingPayloadError::Message(e) => {
|
||||
Error::bad_request(format!("Chunk format error: {}", e))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> From<payload::Error<I>> for SignedPayloadStreamError {
|
||||
impl<I> From<payload::Error<I>> for StreamingPayloadError {
|
||||
fn from(err: payload::Error<I>) -> Self {
|
||||
Self::message(err.description())
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> From<nom::error::Error<I>> for SignedPayloadStreamError {
|
||||
impl<I> From<nom::error::Error<I>> for StreamingPayloadError {
|
||||
fn from(err: nom::error::Error<I>) -> Self {
|
||||
Self::message(err.code.description())
|
||||
}
|
||||
}
|
||||
|
||||
struct SignedPayload {
|
||||
header: payload::Header,
|
||||
data: Bytes,
|
||||
enum StreamingPayloadChunk {
|
||||
Chunk {
|
||||
header: payload::ChunkHeader,
|
||||
data: Bytes,
|
||||
},
|
||||
Trailer(payload::TrailerChunk),
|
||||
}
|
||||
|
||||
#[pin_project::pin_project]
|
||||
pub struct SignedPayloadStream<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, Error>>,
|
||||
{
|
||||
#[pin]
|
||||
stream: S,
|
||||
buf: bytes::BytesMut,
|
||||
struct SignParams {
|
||||
datetime: DateTime<Utc>,
|
||||
scope: String,
|
||||
signing_hmac: HmacSha256,
|
||||
previous_signature: Hash,
|
||||
}
|
||||
|
||||
impl<S> SignedPayloadStream<S>
|
||||
#[pin_project::pin_project]
|
||||
pub struct StreamingPayloadStream<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, Error>>,
|
||||
{
|
||||
pub fn new(
|
||||
stream: S,
|
||||
signing_hmac: HmacSha256,
|
||||
datetime: DateTime<Utc>,
|
||||
scope: &str,
|
||||
seed_signature: Hash,
|
||||
) -> Self {
|
||||
#[pin]
|
||||
stream: S,
|
||||
buf: bytes::BytesMut,
|
||||
signing: Option<SignParams>,
|
||||
has_trailer: bool,
|
||||
done: bool,
|
||||
}
|
||||
|
||||
impl<S> StreamingPayloadStream<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, Error>>,
|
||||
{
|
||||
fn new(stream: S, signing: Option<SignParams>, has_trailer: bool) -> Self {
|
||||
Self {
|
||||
stream,
|
||||
buf: bytes::BytesMut::new(),
|
||||
datetime,
|
||||
scope: scope.into(),
|
||||
signing_hmac,
|
||||
previous_signature: seed_signature,
|
||||
signing,
|
||||
has_trailer,
|
||||
done: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_next(input: &[u8]) -> nom::IResult<&[u8], SignedPayload, SignedPayloadStreamError> {
|
||||
fn parse_next(
|
||||
input: &[u8],
|
||||
is_signed: bool,
|
||||
has_trailer: bool,
|
||||
) -> nom::IResult<&[u8], StreamingPayloadChunk, StreamingPayloadError> {
|
||||
use nom::bytes::streaming::{tag, take};
|
||||
|
||||
macro_rules! try_parse {
|
||||
|
|
@ -238,17 +413,30 @@ where
|
|||
};
|
||||
}
|
||||
|
||||
let (input, header) = try_parse!(payload::Header::parse(input));
|
||||
let (input, header) = if is_signed {
|
||||
try_parse!(payload::ChunkHeader::parse_signed(input))
|
||||
} else {
|
||||
try_parse!(payload::ChunkHeader::parse_unsigned(input))
|
||||
};
|
||||
|
||||
// 0-sized chunk is the last
|
||||
if header.size == 0 {
|
||||
return Ok((
|
||||
input,
|
||||
SignedPayload {
|
||||
header,
|
||||
data: Bytes::new(),
|
||||
},
|
||||
));
|
||||
if has_trailer {
|
||||
let (input, trailer) = if is_signed {
|
||||
try_parse!(payload::TrailerChunk::parse_signed(input))
|
||||
} else {
|
||||
try_parse!(payload::TrailerChunk::parse_unsigned(input))
|
||||
};
|
||||
return Ok((input, StreamingPayloadChunk::Trailer(trailer)));
|
||||
} else {
|
||||
return Ok((
|
||||
input,
|
||||
StreamingPayloadChunk::Chunk {
|
||||
header,
|
||||
data: Bytes::new(),
|
||||
},
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let (input, data) = try_parse!(take::<_, _, nom::error::Error<_>>(header.size)(input));
|
||||
|
|
@ -256,15 +444,15 @@ where
|
|||
|
||||
let data = Bytes::from(data.to_vec());
|
||||
|
||||
Ok((input, SignedPayload { header, data }))
|
||||
Ok((input, StreamingPayloadChunk::Chunk { header, data }))
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Stream for SignedPayloadStream<S>
|
||||
impl<S> Stream for StreamingPayloadStream<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, Error>> + Unpin,
|
||||
{
|
||||
type Item = Result<Bytes, SignedPayloadStreamError>;
|
||||
type Item = Result<Frame<Bytes>, StreamingPayloadError>;
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
|
|
@ -274,56 +462,105 @@ where
|
|||
|
||||
let mut this = self.project();
|
||||
|
||||
if *this.done {
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
|
||||
loop {
|
||||
let (input, payload) = match Self::parse_next(this.buf) {
|
||||
Ok(res) => res,
|
||||
Err(nom::Err::Incomplete(_)) => {
|
||||
match futures::ready!(this.stream.as_mut().poll_next(cx)) {
|
||||
Some(Ok(bytes)) => {
|
||||
this.buf.extend(bytes);
|
||||
continue;
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
return Poll::Ready(Some(Err(SignedPayloadStreamError::Stream(e))))
|
||||
}
|
||||
None => {
|
||||
return Poll::Ready(Some(Err(SignedPayloadStreamError::message(
|
||||
"Unexpected EOF",
|
||||
))));
|
||||
let (input, payload) =
|
||||
match Self::parse_next(this.buf, this.signing.is_some(), *this.has_trailer) {
|
||||
Ok(res) => res,
|
||||
Err(nom::Err::Incomplete(_)) => {
|
||||
match futures::ready!(this.stream.as_mut().poll_next(cx)) {
|
||||
Some(Ok(bytes)) => {
|
||||
this.buf.extend(bytes);
|
||||
continue;
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
return Poll::Ready(Some(Err(StreamingPayloadError::Stream(e))))
|
||||
}
|
||||
None => {
|
||||
return Poll::Ready(Some(Err(StreamingPayloadError::message(
|
||||
"Unexpected EOF",
|
||||
))));
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(nom::Err::Error(e)) | Err(nom::Err::Failure(e)) => {
|
||||
return Poll::Ready(Some(Err(e)))
|
||||
}
|
||||
};
|
||||
|
||||
match payload {
|
||||
StreamingPayloadChunk::Chunk { data, header } => {
|
||||
if let Some(signing) = this.signing.as_mut() {
|
||||
let data_sha256sum = sha256sum(&data);
|
||||
|
||||
let expected_signature = compute_streaming_payload_signature(
|
||||
&signing.signing_hmac,
|
||||
signing.datetime,
|
||||
&signing.scope,
|
||||
signing.previous_signature,
|
||||
data_sha256sum,
|
||||
)?;
|
||||
|
||||
if header.signature.unwrap() != expected_signature {
|
||||
return Poll::Ready(Some(Err(StreamingPayloadError::InvalidSignature)));
|
||||
}
|
||||
|
||||
signing.previous_signature = header.signature.unwrap();
|
||||
}
|
||||
|
||||
*this.buf = input.into();
|
||||
|
||||
// 0-sized chunk is the last
|
||||
if data.is_empty() {
|
||||
// if there was a trailer, it would have been returned by the parser
|
||||
assert!(!*this.has_trailer);
|
||||
*this.done = true;
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
|
||||
return Poll::Ready(Some(Ok(Frame::data(data))));
|
||||
}
|
||||
Err(nom::Err::Error(e)) | Err(nom::Err::Failure(e)) => {
|
||||
return Poll::Ready(Some(Err(e)))
|
||||
StreamingPayloadChunk::Trailer(trailer) => {
|
||||
trace!(
|
||||
"In StreamingPayloadStream::poll_next: got trailer {:?}",
|
||||
trailer
|
||||
);
|
||||
|
||||
if let Some(signing) = this.signing.as_mut() {
|
||||
let data = [
|
||||
trailer.header_name.as_ref(),
|
||||
&b":"[..],
|
||||
trailer.header_value.as_ref(),
|
||||
&b"\n"[..],
|
||||
]
|
||||
.concat();
|
||||
let trailer_sha256sum = sha256sum(&data);
|
||||
|
||||
let expected_signature = compute_streaming_trailer_signature(
|
||||
&signing.signing_hmac,
|
||||
signing.datetime,
|
||||
&signing.scope,
|
||||
signing.previous_signature,
|
||||
trailer_sha256sum,
|
||||
)?;
|
||||
|
||||
if trailer.signature.unwrap() != expected_signature {
|
||||
return Poll::Ready(Some(Err(StreamingPayloadError::InvalidSignature)));
|
||||
}
|
||||
}
|
||||
|
||||
*this.buf = input.into();
|
||||
*this.done = true;
|
||||
|
||||
let mut trailers_map = HeaderMap::new();
|
||||
trailers_map.insert(trailer.header_name, trailer.header_value);
|
||||
|
||||
return Poll::Ready(Some(Ok(Frame::trailers(trailers_map))));
|
||||
}
|
||||
};
|
||||
|
||||
// 0-sized chunk is the last
|
||||
if payload.data.is_empty() {
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
|
||||
let data_sha256sum = sha256sum(&payload.data);
|
||||
|
||||
let expected_signature = compute_streaming_payload_signature(
|
||||
this.signing_hmac,
|
||||
*this.datetime,
|
||||
this.scope,
|
||||
*this.previous_signature,
|
||||
data_sha256sum,
|
||||
)
|
||||
.map_err(|e| {
|
||||
SignedPayloadStreamError::Message(format!("Could not build signature: {}", e))
|
||||
})?;
|
||||
|
||||
if payload.header.signature != expected_signature {
|
||||
return Poll::Ready(Some(Err(SignedPayloadStreamError::InvalidSignature)));
|
||||
}
|
||||
|
||||
*this.buf = input.into();
|
||||
*this.previous_signature = payload.header.signature;
|
||||
|
||||
return Poll::Ready(Some(Ok(payload.data)));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -336,7 +573,7 @@ where
|
|||
mod tests {
|
||||
use futures::prelude::*;
|
||||
|
||||
use super::{SignedPayloadStream, SignedPayloadStreamError};
|
||||
use super::{SignParams, StreamingPayloadError, StreamingPayloadStream};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_interrupted_signed_payload_stream() {
|
||||
|
|
@ -358,12 +595,20 @@ mod tests {
|
|||
|
||||
let seed_signature = Hash::default();
|
||||
|
||||
let mut stream =
|
||||
SignedPayloadStream::new(body, signing_hmac, datetime, &scope, seed_signature);
|
||||
let mut stream = StreamingPayloadStream::new(
|
||||
body,
|
||||
Some(SignParams {
|
||||
signing_hmac,
|
||||
datetime,
|
||||
scope,
|
||||
previous_signature: seed_signature,
|
||||
}),
|
||||
false,
|
||||
);
|
||||
|
||||
assert!(stream.try_next().await.is_err());
|
||||
match stream.try_next().await {
|
||||
Err(SignedPayloadStreamError::Message(msg)) if msg == "Unexpected EOF" => {}
|
||||
Err(StreamingPayloadError::Message(msg)) if msg == "Unexpected EOF" => {}
|
||||
item => panic!(
|
||||
"Unexpected result, expected early EOF error, got {:?}",
|
||||
item
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
[package]
|
||||
name = "garage_api_k2v"
|
||||
version = "1.0.1"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
description = "K2V API server crate for the Garage object store"
|
||||
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
||||
readme = "../../README.md"
|
||||
readme = "../../../README.md"
|
||||
|
||||
[lib]
|
||||
path = "lib.rs"
|
||||
|
|
@ -20,7 +20,7 @@ garage_util = { workspace = true, features = [ "k2v" ] }
|
|||
garage_api_common.workspace = true
|
||||
|
||||
base64.workspace = true
|
||||
err-derive.workspace = true
|
||||
thiserror.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
futures.workspace = true
|
||||
|
|
|
|||
|
|
@ -81,7 +81,9 @@ impl ApiHandler for K2VApiServer {
|
|||
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
|
||||
}
|
||||
|
||||
let (req, api_key, _content_sha256) = verify_request(&garage, req, "k2v").await?;
|
||||
let verified_request = verify_request(&garage, req, "k2v").await?;
|
||||
let req = verified_request.request;
|
||||
let api_key = verified_request.access_key;
|
||||
|
||||
let bucket_id = garage
|
||||
.bucket_helper()
|
||||
|
|
@ -174,6 +176,12 @@ impl ApiHandler for K2VApiServer {
|
|||
|
||||
Ok(resp_ok)
|
||||
}
|
||||
|
||||
fn key_id_from_request(&self, req: &Request<IncomingBody>) -> Option<String> {
|
||||
garage_api_common::signature::payload::Authorization::parse_header(req.headers())
|
||||
.map(|auth| auth.key_id)
|
||||
.ok()
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiEndpoint for K2VApiEndpoint {
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ pub async fn handle_insert_batch(
|
|||
let ReqCtx {
|
||||
garage, bucket_id, ..
|
||||
} = &ctx;
|
||||
let items = parse_json_body::<Vec<InsertBatchItem>, _, Error>(req).await?;
|
||||
let items = req.into_body().json::<Vec<InsertBatchItem>>().await?;
|
||||
|
||||
let mut items2 = vec![];
|
||||
for it in items {
|
||||
|
|
@ -47,7 +47,7 @@ pub async fn handle_read_batch(
|
|||
ctx: ReqCtx,
|
||||
req: Request<ReqBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let queries = parse_json_body::<Vec<ReadBatchQuery>, _, Error>(req).await?;
|
||||
let queries = req.into_body().json::<Vec<ReadBatchQuery>>().await?;
|
||||
|
||||
let resp_results = futures::future::join_all(
|
||||
queries
|
||||
|
|
@ -141,7 +141,7 @@ pub async fn handle_delete_batch(
|
|||
ctx: ReqCtx,
|
||||
req: Request<ReqBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let queries = parse_json_body::<Vec<DeleteBatchQuery>, _, Error>(req).await?;
|
||||
let queries = req.into_body().json::<Vec<DeleteBatchQuery>>().await?;
|
||||
|
||||
let resp_results = futures::future::join_all(
|
||||
queries
|
||||
|
|
@ -262,7 +262,7 @@ pub(crate) async fn handle_poll_range(
|
|||
} = ctx;
|
||||
use garage_model::k2v::sub::PollRange;
|
||||
|
||||
let query = parse_json_body::<PollRangeQuery, _, Error>(req).await?;
|
||||
let query = req.into_body().json::<PollRangeQuery>().await?;
|
||||
|
||||
let timeout_msec = query.timeout.unwrap_or(300).clamp(1, 600) * 1000;
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
use err_derive::Error;
|
||||
use hyper::header::HeaderValue;
|
||||
use hyper::{HeaderMap, StatusCode};
|
||||
use thiserror::Error;
|
||||
|
||||
use garage_api_common::common_error::{commonErrorDerivative, CommonError};
|
||||
pub(crate) use garage_api_common::common_error::{helper_error_as_internal, pass_helper_error};
|
||||
|
|
@ -14,34 +14,38 @@ use garage_api_common::signature::error::Error as SignatureError;
|
|||
/// Errors of this crate
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error(display = "{}", _0)]
|
||||
#[error("{0}")]
|
||||
/// Error from common error
|
||||
Common(#[error(source)] CommonError),
|
||||
Common(#[from] CommonError),
|
||||
|
||||
// Category: cannot process
|
||||
/// Authorization Header Malformed
|
||||
#[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
|
||||
#[error("Authorization header malformed, unexpected scope: {0}")]
|
||||
AuthorizationHeaderMalformed(String),
|
||||
|
||||
/// The provided digest (checksum) value was invalid
|
||||
#[error("Invalid digest: {0}")]
|
||||
InvalidDigest(String),
|
||||
|
||||
/// The object requested don't exists
|
||||
#[error(display = "Key not found")]
|
||||
#[error("Key not found")]
|
||||
NoSuchKey,
|
||||
|
||||
/// Some base64 encoded data was badly encoded
|
||||
#[error(display = "Invalid base64: {}", _0)]
|
||||
InvalidBase64(#[error(source)] base64::DecodeError),
|
||||
#[error("Invalid base64: {0}")]
|
||||
InvalidBase64(#[from] base64::DecodeError),
|
||||
|
||||
/// Invalid causality token
|
||||
#[error(display = "Invalid causality token")]
|
||||
#[error("Invalid causality token")]
|
||||
InvalidCausalityToken,
|
||||
|
||||
/// The client asked for an invalid return format (invalid Accept header)
|
||||
#[error(display = "Not acceptable: {}", _0)]
|
||||
#[error("Not acceptable: {0}")]
|
||||
NotAcceptable(String),
|
||||
|
||||
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
||||
#[error("Invalid UTF-8: {0}")]
|
||||
InvalidUtf8Str(#[from] std::str::Utf8Error),
|
||||
}
|
||||
|
||||
commonErrorDerivative!(Error);
|
||||
|
|
@ -54,6 +58,7 @@ impl From<SignatureError> for Error {
|
|||
Self::AuthorizationHeaderMalformed(c)
|
||||
}
|
||||
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
|
||||
SignatureError::InvalidDigest(d) => Self::InvalidDigest(d),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -71,6 +76,7 @@ impl Error {
|
|||
Error::InvalidBase64(_) => "InvalidBase64",
|
||||
Error::InvalidUtf8Str(_) => "InvalidUtf8String",
|
||||
Error::InvalidCausalityToken => "CausalityToken",
|
||||
Error::InvalidDigest(_) => "InvalidDigest",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -85,6 +91,7 @@ impl ApiError for Error {
|
|||
Error::AuthorizationHeaderMalformed(_)
|
||||
| Error::InvalidBase64(_)
|
||||
| Error::InvalidUtf8Str(_)
|
||||
| Error::InvalidDigest(_)
|
||||
| Error::InvalidCausalityToken => StatusCode::BAD_REQUEST,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -144,9 +144,7 @@ pub async fn handle_insert_item(
|
|||
.map(parse_causality_token)
|
||||
.transpose()?;
|
||||
|
||||
let body = http_body_util::BodyExt::collect(req.into_body())
|
||||
.await?
|
||||
.to_bytes();
|
||||
let body = req.into_body().collect().await?;
|
||||
|
||||
let value = DvvsValue::Value(body.to_vec());
|
||||
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
[package]
|
||||
name = "garage_api_s3"
|
||||
version = "1.0.1"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
description = "S3 API server crate for the Garage object store"
|
||||
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
||||
readme = "../../README.md"
|
||||
readme = "../../../README.md"
|
||||
|
||||
[lib]
|
||||
path = "lib.rs"
|
||||
|
|
@ -29,7 +29,7 @@ bytes.workspace = true
|
|||
chrono.workspace = true
|
||||
crc32fast.workspace = true
|
||||
crc32c.workspace = true
|
||||
err-derive.workspace = true
|
||||
thiserror.workspace = true
|
||||
hex.workspace = true
|
||||
tracing.workspace = true
|
||||
md-5.workspace = true
|
||||
|
|
|
|||
|
|
@ -121,7 +121,9 @@ impl ApiHandler for S3ApiServer {
|
|||
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
|
||||
}
|
||||
|
||||
let (req, api_key, content_sha256) = verify_request(&garage, req, "s3").await?;
|
||||
let verified_request = verify_request(&garage, req, "s3").await?;
|
||||
let req = verified_request.request;
|
||||
let api_key = verified_request.access_key;
|
||||
|
||||
let bucket_name = match bucket_name {
|
||||
None => {
|
||||
|
|
@ -134,14 +136,7 @@ impl ApiHandler for S3ApiServer {
|
|||
|
||||
// Special code path for CreateBucket API endpoint
|
||||
if let Endpoint::CreateBucket {} = endpoint {
|
||||
return handle_create_bucket(
|
||||
&garage,
|
||||
req,
|
||||
content_sha256,
|
||||
&api_key.key_id,
|
||||
bucket_name,
|
||||
)
|
||||
.await;
|
||||
return handle_create_bucket(&garage, req, &api_key.key_id, bucket_name).await;
|
||||
}
|
||||
|
||||
let bucket_id = garage
|
||||
|
|
@ -179,7 +174,7 @@ impl ApiHandler for S3ApiServer {
|
|||
let resp = match endpoint {
|
||||
Endpoint::HeadObject {
|
||||
key, part_number, ..
|
||||
} => handle_head(ctx, &req, &key, part_number).await,
|
||||
} => handle_head(ctx, &req.map(|_| ()), &key, part_number).await,
|
||||
Endpoint::GetObject {
|
||||
key,
|
||||
part_number,
|
||||
|
|
@ -199,20 +194,20 @@ impl ApiHandler for S3ApiServer {
|
|||
response_content_type,
|
||||
response_expires,
|
||||
};
|
||||
handle_get(ctx, &req, &key, part_number, overrides).await
|
||||
handle_get(ctx, &req.map(|_| ()), &key, part_number, overrides).await
|
||||
}
|
||||
Endpoint::UploadPart {
|
||||
key,
|
||||
part_number,
|
||||
upload_id,
|
||||
} => handle_put_part(ctx, req, &key, part_number, &upload_id, content_sha256).await,
|
||||
} => handle_put_part(ctx, req, &key, part_number, &upload_id).await,
|
||||
Endpoint::CopyObject { key } => handle_copy(ctx, &req, &key).await,
|
||||
Endpoint::UploadPartCopy {
|
||||
key,
|
||||
part_number,
|
||||
upload_id,
|
||||
} => handle_upload_part_copy(ctx, &req, &key, part_number, &upload_id).await,
|
||||
Endpoint::PutObject { key } => handle_put(ctx, req, &key, content_sha256).await,
|
||||
Endpoint::PutObject { key } => handle_put(ctx, req, &key).await,
|
||||
Endpoint::AbortMultipartUpload { key, upload_id } => {
|
||||
handle_abort_multipart_upload(ctx, &key, &upload_id).await
|
||||
}
|
||||
|
|
@ -221,7 +216,7 @@ impl ApiHandler for S3ApiServer {
|
|||
handle_create_multipart_upload(ctx, &req, &key).await
|
||||
}
|
||||
Endpoint::CompleteMultipartUpload { key, upload_id } => {
|
||||
handle_complete_multipart_upload(ctx, req, &key, &upload_id, content_sha256).await
|
||||
handle_complete_multipart_upload(ctx, req, &key, &upload_id).await
|
||||
}
|
||||
Endpoint::CreateBucket {} => unreachable!(),
|
||||
Endpoint::HeadBucket {} => {
|
||||
|
|
@ -231,6 +226,7 @@ impl ApiHandler for S3ApiServer {
|
|||
Endpoint::DeleteBucket {} => handle_delete_bucket(ctx).await,
|
||||
Endpoint::GetBucketLocation {} => handle_get_bucket_location(ctx),
|
||||
Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(),
|
||||
Endpoint::GetBucketAcl {} => handle_get_bucket_acl(ctx),
|
||||
Endpoint::ListObjects {
|
||||
delimiter,
|
||||
encoding_type,
|
||||
|
|
@ -324,17 +320,15 @@ impl ApiHandler for S3ApiServer {
|
|||
};
|
||||
handle_list_parts(ctx, req, &query).await
|
||||
}
|
||||
Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req, content_sha256).await,
|
||||
Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req).await,
|
||||
Endpoint::GetBucketWebsite {} => handle_get_website(ctx).await,
|
||||
Endpoint::PutBucketWebsite {} => handle_put_website(ctx, req, content_sha256).await,
|
||||
Endpoint::PutBucketWebsite {} => handle_put_website(ctx, req).await,
|
||||
Endpoint::DeleteBucketWebsite {} => handle_delete_website(ctx).await,
|
||||
Endpoint::GetBucketCors {} => handle_get_cors(ctx).await,
|
||||
Endpoint::PutBucketCors {} => handle_put_cors(ctx, req, content_sha256).await,
|
||||
Endpoint::PutBucketCors {} => handle_put_cors(ctx, req).await,
|
||||
Endpoint::DeleteBucketCors {} => handle_delete_cors(ctx).await,
|
||||
Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(ctx).await,
|
||||
Endpoint::PutBucketLifecycleConfiguration {} => {
|
||||
handle_put_lifecycle(ctx, req, content_sha256).await
|
||||
}
|
||||
Endpoint::PutBucketLifecycleConfiguration {} => handle_put_lifecycle(ctx, req).await,
|
||||
Endpoint::DeleteBucketLifecycle {} => handle_delete_lifecycle(ctx).await,
|
||||
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
|
||||
};
|
||||
|
|
@ -349,6 +343,12 @@ impl ApiHandler for S3ApiServer {
|
|||
|
||||
Ok(resp_ok)
|
||||
}
|
||||
|
||||
fn key_id_from_request(&self, req: &Request<IncomingBody>) -> Option<String> {
|
||||
garage_api_common::signature::payload::Authorization::parse_header(req.headers())
|
||||
.map(|auth| auth.key_id)
|
||||
.ok()
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiEndpoint for S3ApiEndpoint {
|
||||
|
|
|
|||
|
|
@ -1,21 +1,18 @@
|
|||
use std::collections::HashMap;
|
||||
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
|
||||
use garage_model::bucket_alias_table::*;
|
||||
use garage_model::bucket_table::Bucket;
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::key_table::Key;
|
||||
use garage_model::key_table::{Key, KeyParams};
|
||||
use garage_model::permission::BucketKeyPerm;
|
||||
use garage_table::util::*;
|
||||
use garage_util::crdt::*;
|
||||
use garage_util::data::*;
|
||||
use garage_util::time::*;
|
||||
|
||||
use garage_api_common::common_error::CommonError;
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::verify_signed_content;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::error::*;
|
||||
|
|
@ -47,6 +44,55 @@ pub fn handle_get_bucket_versioning() -> Result<Response<ResBody>, Error> {
|
|||
.body(string_body(xml))?)
|
||||
}
|
||||
|
||||
pub fn handle_get_bucket_acl(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
bucket_id, api_key, ..
|
||||
} = ctx;
|
||||
let key_p = api_key.params().ok_or_internal_error(
|
||||
"Key should not be in deleted state at this point (in handle_get_bucket_acl)",
|
||||
)?;
|
||||
|
||||
let mut grants: Vec<s3_xml::Grant> = vec![];
|
||||
let kp = api_key.bucket_permissions(&bucket_id);
|
||||
|
||||
if kp.allow_owner {
|
||||
grants.push(s3_xml::Grant {
|
||||
grantee: create_grantee(&key_p, &api_key),
|
||||
permission: s3_xml::Value("FULL_CONTROL".to_string()),
|
||||
});
|
||||
} else {
|
||||
if kp.allow_read {
|
||||
grants.push(s3_xml::Grant {
|
||||
grantee: create_grantee(&key_p, &api_key),
|
||||
permission: s3_xml::Value("READ".to_string()),
|
||||
});
|
||||
grants.push(s3_xml::Grant {
|
||||
grantee: create_grantee(&key_p, &api_key),
|
||||
permission: s3_xml::Value("READ_ACP".to_string()),
|
||||
});
|
||||
}
|
||||
if kp.allow_write {
|
||||
grants.push(s3_xml::Grant {
|
||||
grantee: create_grantee(&key_p, &api_key),
|
||||
permission: s3_xml::Value("WRITE".to_string()),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let access_control_policy = s3_xml::AccessControlPolicy {
|
||||
xmlns: (),
|
||||
owner: None,
|
||||
acl: s3_xml::AccessControlList { entries: grants },
|
||||
};
|
||||
|
||||
let xml = s3_xml::to_xml_with_header(&access_control_policy)?;
|
||||
trace!("xml: {}", xml);
|
||||
|
||||
Ok(Response::builder()
|
||||
.header("Content-Type", "application/xml")
|
||||
.body(string_body(xml))?)
|
||||
}
|
||||
|
||||
pub async fn handle_list_buckets(
|
||||
garage: &Garage,
|
||||
api_key: &Key,
|
||||
|
|
@ -122,15 +168,10 @@ pub async fn handle_list_buckets(
|
|||
pub async fn handle_create_bucket(
|
||||
garage: &Garage,
|
||||
req: Request<ReqBody>,
|
||||
content_sha256: Option<Hash>,
|
||||
api_key_id: &String,
|
||||
bucket_name: String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
verify_signed_content(content_sha256, &body[..])?;
|
||||
}
|
||||
let body = req.into_body().collect().await?;
|
||||
|
||||
let cmd =
|
||||
parse_create_bucket_xml(&body[..]).ok_or_bad_request("Invalid create bucket XML query")?;
|
||||
|
|
@ -180,7 +221,7 @@ pub async fn handle_create_bucket(
|
|||
}
|
||||
|
||||
// Create the bucket!
|
||||
if !is_valid_bucket_name(&bucket_name) {
|
||||
if !is_valid_bucket_name(&bucket_name, garage.config.allow_punycode) {
|
||||
return Err(Error::bad_request(format!(
|
||||
"{}: {}",
|
||||
bucket_name, INVALID_BUCKET_NAME_MESSAGE
|
||||
|
|
@ -249,11 +290,11 @@ pub async fn handle_delete_bucket(ctx: ReqCtx) -> Result<Response<ResBody>, Erro
|
|||
// 1. delete bucket alias
|
||||
if is_local_alias {
|
||||
helper
|
||||
.unset_local_bucket_alias(*bucket_id, &api_key.key_id, bucket_name)
|
||||
.purge_local_bucket_alias(*bucket_id, &api_key.key_id, bucket_name)
|
||||
.await?;
|
||||
} else {
|
||||
helper
|
||||
.unset_global_bucket_alias(*bucket_id, bucket_name)
|
||||
.purge_global_bucket_alias(*bucket_id, bucket_name)
|
||||
.await?;
|
||||
}
|
||||
|
||||
|
|
@ -319,6 +360,15 @@ fn parse_create_bucket_xml(xml_bytes: &[u8]) -> Option<Option<String>> {
|
|||
Some(ret)
|
||||
}
|
||||
|
||||
fn create_grantee(key_params: &KeyParams, api_key: &Key) -> s3_xml::Grantee {
|
||||
s3_xml::Grantee {
|
||||
xmlns_xsi: (),
|
||||
typ: "CanonicalUser".to_string(),
|
||||
display_name: Some(s3_xml::Value(key_params.name.get().to_string())),
|
||||
id: Some(s3_xml::Value(api_key.key_id.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
use std::pin::Pin;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use futures::{stream, stream::Stream, StreamExt, TryStreamExt};
|
||||
|
||||
use bytes::Bytes;
|
||||
use http::header::HeaderName;
|
||||
use hyper::{Request, Response};
|
||||
use serde::Serialize;
|
||||
|
||||
|
|
@ -21,16 +21,26 @@ use garage_model::s3::object_table::*;
|
|||
use garage_model::s3::version_table::*;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::checksum::*;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::checksum::*;
|
||||
use crate::encryption::EncryptionParams;
|
||||
use crate::error::*;
|
||||
use crate::get::full_object_byte_stream;
|
||||
use crate::get::{check_version_not_deleted, full_object_byte_stream, PreconditionHeaders};
|
||||
use crate::multipart;
|
||||
use crate::put::{get_headers, save_stream, ChecksumMode, SaveStreamResult};
|
||||
use crate::put::{extract_metadata_headers, save_stream, ChecksumMode, SaveStreamResult};
|
||||
use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION;
|
||||
use crate::xml::{self as s3_xml, xmlns_tag};
|
||||
|
||||
pub const X_AMZ_COPY_SOURCE_IF_MATCH: HeaderName =
|
||||
HeaderName::from_static("x-amz-copy-source-if-match");
|
||||
pub const X_AMZ_COPY_SOURCE_IF_NONE_MATCH: HeaderName =
|
||||
HeaderName::from_static("x-amz-copy-source-if-none-match");
|
||||
pub const X_AMZ_COPY_SOURCE_IF_MODIFIED_SINCE: HeaderName =
|
||||
HeaderName::from_static("x-amz-copy-source-if-modified-since");
|
||||
pub const X_AMZ_COPY_SOURCE_IF_UNMODIFIED_SINCE: HeaderName =
|
||||
HeaderName::from_static("x-amz-copy-source-if-unmodified-since");
|
||||
|
||||
// -------- CopyObject ---------
|
||||
|
||||
pub async fn handle_copy(
|
||||
|
|
@ -38,7 +48,7 @@ pub async fn handle_copy(
|
|||
req: &Request<ReqBody>,
|
||||
dest_key: &str,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
||||
let copy_precondition = PreconditionHeaders::parse_copy_source(req)?;
|
||||
|
||||
let checksum_algorithm = request_checksum_algorithm(req.headers())?;
|
||||
|
||||
|
|
@ -48,7 +58,7 @@ pub async fn handle_copy(
|
|||
extract_source_info(&source_object)?;
|
||||
|
||||
// Check precondition, e.g. x-amz-copy-source-if-match
|
||||
copy_precondition.check(source_version, &source_version_meta.etag)?;
|
||||
copy_precondition.check_copy_source(source_version, &source_version_meta.etag)?;
|
||||
|
||||
// Determine encryption parameters
|
||||
let (source_encryption, source_object_meta_inner) =
|
||||
|
|
@ -73,9 +83,20 @@ pub async fn handle_copy(
|
|||
let dest_object_meta = ObjectVersionMetaInner {
|
||||
headers: match req.headers().get("x-amz-metadata-directive") {
|
||||
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => {
|
||||
get_headers(req.headers())?
|
||||
extract_metadata_headers(req.headers())?
|
||||
}
|
||||
_ => {
|
||||
// The x-amz-website-redirect-location header is not copied, instead
|
||||
// it is replaced by the value from the request (or removed if no
|
||||
// value was specified)
|
||||
let is_redirect =
|
||||
|(key, _): &(String, String)| key == X_AMZ_WEBSITE_REDIRECT_LOCATION.as_str();
|
||||
let mut headers: Vec<_> = source_object_meta_inner.headers.clone();
|
||||
headers.retain(|h| !is_redirect(h));
|
||||
let new_headers = extract_metadata_headers(req.headers())?;
|
||||
headers.extend(new_headers.into_iter().filter(is_redirect));
|
||||
headers
|
||||
}
|
||||
_ => source_object_meta_inner.into_owned().headers,
|
||||
},
|
||||
checksum: source_checksum,
|
||||
};
|
||||
|
|
@ -216,6 +237,7 @@ async fn handle_copy_metaonly(
|
|||
.get(&source_version.uuid, &EmptyKey)
|
||||
.await?;
|
||||
let source_version = source_version.ok_or(Error::NoSuchKey)?;
|
||||
check_version_not_deleted(&source_version)?;
|
||||
|
||||
// Write an "uploading" marker in Object table
|
||||
// This holds a reference to the object in the Version table
|
||||
|
|
@ -335,7 +357,7 @@ pub async fn handle_upload_part_copy(
|
|||
part_number: u64,
|
||||
upload_id: &str,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
||||
let copy_precondition = PreconditionHeaders::parse_copy_source(req)?;
|
||||
|
||||
let dest_upload_id = multipart::decode_upload_id(upload_id)?;
|
||||
|
||||
|
|
@ -351,7 +373,7 @@ pub async fn handle_upload_part_copy(
|
|||
extract_source_info(&source_object)?;
|
||||
|
||||
// Check precondition on source, e.g. x-amz-copy-source-if-match
|
||||
copy_precondition.check(source_object_version, &source_version_meta.etag)?;
|
||||
copy_precondition.check_copy_source(source_object_version, &source_version_meta.etag)?;
|
||||
|
||||
// Determine encryption parameters
|
||||
let (source_encryption, _) = EncryptionParams::check_decrypt_for_copy_source(
|
||||
|
|
@ -407,6 +429,7 @@ pub async fn handle_upload_part_copy(
|
|||
.get(&source_object_version.uuid, &EmptyKey)
|
||||
.await?
|
||||
.ok_or(Error::NoSuchKey)?;
|
||||
check_version_not_deleted(&source_version)?;
|
||||
|
||||
// We want to reuse blocks from the source version as much as possible.
|
||||
// However, we still need to get the data from these blocks
|
||||
|
|
@ -538,6 +561,7 @@ pub async fn handle_upload_part_copy(
|
|||
|
||||
let mut current_offset = 0;
|
||||
let mut next_block = defragmenter.next().await?;
|
||||
let mut blocks_to_dup = dest_version.clone();
|
||||
|
||||
// TODO this could be optimized similarly to read_and_put_blocks
|
||||
// low priority because uploadpartcopy is rarely used
|
||||
|
|
@ -567,8 +591,7 @@ pub async fn handle_upload_part_copy(
|
|||
.unwrap()?;
|
||||
checksummer = checksummer_updated;
|
||||
|
||||
dest_version.blocks.clear();
|
||||
dest_version.blocks.put(
|
||||
let (version_block_key, version_block) = (
|
||||
VersionBlockKey {
|
||||
part_number,
|
||||
offset: current_offset,
|
||||
|
|
@ -580,37 +603,56 @@ pub async fn handle_upload_part_copy(
|
|||
);
|
||||
current_offset += data_len;
|
||||
|
||||
let block_ref = BlockRef {
|
||||
block: final_hash,
|
||||
version: dest_version_id,
|
||||
deleted: false.into(),
|
||||
let next = if let Some(final_data) = data_to_upload {
|
||||
dest_version.blocks.clear();
|
||||
dest_version.blocks.put(version_block_key, version_block);
|
||||
let block_ref = BlockRef {
|
||||
block: final_hash,
|
||||
version: dest_version_id,
|
||||
deleted: false.into(),
|
||||
};
|
||||
let (_, _, _, next) = futures::try_join!(
|
||||
// Thing 1: if the block is not exactly a block that existed before,
|
||||
// we need to insert that data as a new block.
|
||||
garage.block_manager.rpc_put_block(
|
||||
final_hash,
|
||||
final_data,
|
||||
dest_encryption.is_encrypted(),
|
||||
None
|
||||
),
|
||||
// Thing 2: we need to insert the block in the version
|
||||
garage.version_table.insert(&dest_version),
|
||||
// Thing 3: we need to add a block reference
|
||||
garage.block_ref_table.insert(&block_ref),
|
||||
// Thing 4: we need to read the next block
|
||||
defragmenter.next(),
|
||||
)?;
|
||||
next
|
||||
} else {
|
||||
blocks_to_dup.blocks.put(version_block_key, version_block);
|
||||
defragmenter.next().await?
|
||||
};
|
||||
|
||||
let (_, _, _, next) = futures::try_join!(
|
||||
// Thing 1: if the block is not exactly a block that existed before,
|
||||
// we need to insert that data as a new block.
|
||||
async {
|
||||
if let Some(final_data) = data_to_upload {
|
||||
garage
|
||||
.block_manager
|
||||
.rpc_put_block(final_hash, final_data, dest_encryption.is_encrypted(), None)
|
||||
.await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
},
|
||||
// Thing 2: we need to insert the block in the version
|
||||
garage.version_table.insert(&dest_version),
|
||||
// Thing 3: we need to add a block reference
|
||||
garage.block_ref_table.insert(&block_ref),
|
||||
// Thing 4: we need to read the next block
|
||||
defragmenter.next(),
|
||||
)?;
|
||||
next_block = next;
|
||||
}
|
||||
|
||||
assert_eq!(current_offset, source_range.length);
|
||||
|
||||
// Put the duplicated blocks into the version & block_refs tables
|
||||
let block_refs_to_put = blocks_to_dup
|
||||
.blocks
|
||||
.items()
|
||||
.iter()
|
||||
.map(|b| BlockRef {
|
||||
block: b.1.hash,
|
||||
version: dest_version_id,
|
||||
deleted: false.into(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
futures::try_join!(
|
||||
garage.version_table.insert(&blocks_to_dup),
|
||||
garage.block_ref_table.insert_many(&block_refs_to_put[..]),
|
||||
)?;
|
||||
|
||||
let checksums = checksummer.finalize();
|
||||
let etag = dest_encryption.etag_from_md5(&checksums.md5);
|
||||
let checksum = checksums.extract(dest_object_checksum_algorithm);
|
||||
|
|
@ -703,97 +745,6 @@ fn extract_source_info(
|
|||
Ok((source_version, source_version_data, source_version_meta))
|
||||
}
|
||||
|
||||
struct CopyPreconditionHeaders {
|
||||
copy_source_if_match: Option<Vec<String>>,
|
||||
copy_source_if_modified_since: Option<SystemTime>,
|
||||
copy_source_if_none_match: Option<Vec<String>>,
|
||||
copy_source_if_unmodified_since: Option<SystemTime>,
|
||||
}
|
||||
|
||||
impl CopyPreconditionHeaders {
|
||||
fn parse(req: &Request<ReqBody>) -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
copy_source_if_match: req
|
||||
.headers()
|
||||
.get("x-amz-copy-source-if-match")
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?
|
||||
.map(|x| {
|
||||
x.split(',')
|
||||
.map(|m| m.trim().trim_matches('"').to_string())
|
||||
.collect::<Vec<_>>()
|
||||
}),
|
||||
copy_source_if_modified_since: req
|
||||
.headers()
|
||||
.get("x-amz-copy-source-if-modified-since")
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?
|
||||
.map(httpdate::parse_http_date)
|
||||
.transpose()
|
||||
.ok_or_bad_request("Invalid date in x-amz-copy-source-if-modified-since")?,
|
||||
copy_source_if_none_match: req
|
||||
.headers()
|
||||
.get("x-amz-copy-source-if-none-match")
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?
|
||||
.map(|x| {
|
||||
x.split(',')
|
||||
.map(|m| m.trim().trim_matches('"').to_string())
|
||||
.collect::<Vec<_>>()
|
||||
}),
|
||||
copy_source_if_unmodified_since: req
|
||||
.headers()
|
||||
.get("x-amz-copy-source-if-unmodified-since")
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?
|
||||
.map(httpdate::parse_http_date)
|
||||
.transpose()
|
||||
.ok_or_bad_request("Invalid date in x-amz-copy-source-if-unmodified-since")?,
|
||||
})
|
||||
}
|
||||
|
||||
fn check(&self, v: &ObjectVersion, etag: &str) -> Result<(), Error> {
|
||||
let v_date = UNIX_EPOCH + Duration::from_millis(v.timestamp);
|
||||
|
||||
let ok = match (
|
||||
&self.copy_source_if_match,
|
||||
&self.copy_source_if_unmodified_since,
|
||||
&self.copy_source_if_none_match,
|
||||
&self.copy_source_if_modified_since,
|
||||
) {
|
||||
// TODO I'm not sure all of the conditions are evaluated correctly here
|
||||
|
||||
// If we have both if-match and if-unmodified-since,
|
||||
// basically we don't care about if-unmodified-since,
|
||||
// because in the spec it says that if if-match evaluates to
|
||||
// true but if-unmodified-since evaluates to false,
|
||||
// the copy is still done.
|
||||
(Some(im), _, None, None) => im.iter().any(|x| x == etag || x == "*"),
|
||||
(None, Some(ius), None, None) => v_date <= *ius,
|
||||
|
||||
// If we have both if-none-match and if-modified-since,
|
||||
// then both of the two conditions must evaluate to true
|
||||
(None, None, Some(inm), Some(ims)) => {
|
||||
!inm.iter().any(|x| x == etag || x == "*") && v_date > *ims
|
||||
}
|
||||
(None, None, Some(inm), None) => !inm.iter().any(|x| x == etag || x == "*"),
|
||||
(None, None, None, Some(ims)) => v_date > *ims,
|
||||
(None, None, None, None) => true,
|
||||
_ => {
|
||||
return Err(Error::bad_request(
|
||||
"Invalid combination of x-amz-copy-source-if-xxxxx headers",
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
if ok {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::PreconditionFailed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type BlockStreamItemOk = (Bytes, Option<Hash>);
|
||||
type BlockStreamItem = Result<BlockStreamItemOk, garage_util::error::Error>;
|
||||
|
||||
|
|
|
|||
|
|
@ -2,15 +2,11 @@ use quick_xml::de::from_reader;
|
|||
|
||||
use hyper::{header::HeaderName, Method, Request, Response, StatusCode};
|
||||
|
||||
use http_body_util::BodyExt;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_model::bucket_table::{Bucket, CorsRule as GarageCorsRule};
|
||||
use garage_util::data::*;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::verify_signed_content;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::error::*;
|
||||
|
|
@ -59,7 +55,6 @@ pub async fn handle_delete_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error>
|
|||
pub async fn handle_put_cors(
|
||||
ctx: ReqCtx,
|
||||
req: Request<ReqBody>,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
|
|
@ -68,11 +63,7 @@ pub async fn handle_put_cors(
|
|||
..
|
||||
} = ctx;
|
||||
|
||||
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
verify_signed_content(content_sha256, &body[..])?;
|
||||
}
|
||||
let body = req.into_body().collect().await?;
|
||||
|
||||
let conf: CorsConfiguration = from_reader(&body as &[u8])?;
|
||||
conf.validate()?;
|
||||
|
|
@ -97,7 +88,9 @@ pub async fn handle_put_cors(
|
|||
pub struct CorsConfiguration {
|
||||
#[serde(serialize_with = "xmlns_tag", skip_deserializing)]
|
||||
pub xmlns: (),
|
||||
#[serde(rename = "CORSRule")]
|
||||
// "default" is required to be able to parse an empty list of rules,
|
||||
// cf https://docs.rs/quick-xml/latest/quick_xml/de/#sequences-xsall-and-xssequence-xml-schema-types
|
||||
#[serde(rename = "CORSRule", default)]
|
||||
pub cors_rules: Vec<CorsRule>,
|
||||
}
|
||||
|
||||
|
|
@ -279,4 +272,26 @@ mod tests {
|
|||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_norules() -> Result<(), Error> {
|
||||
let message = r#"<?xml version="1.0" encoding="UTF-8"?>
|
||||
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/" />"#;
|
||||
let conf: CorsConfiguration = from_str(message).unwrap();
|
||||
let ref_value = CorsConfiguration {
|
||||
xmlns: (),
|
||||
cors_rules: vec![],
|
||||
};
|
||||
assert_eq! {
|
||||
ref_value,
|
||||
conf
|
||||
};
|
||||
|
||||
let message2 = to_xml_with_header(&ref_value)?;
|
||||
|
||||
let cleanup = |c: &str| c.replace(char::is_whitespace, "");
|
||||
assert_eq!(cleanup(message), cleanup(&message2));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
use http_body_util::BodyExt;
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
|
||||
use garage_util::data::*;
|
||||
|
|
@ -6,7 +5,6 @@ use garage_util::data::*;
|
|||
use garage_model::s3::object_table::*;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::verify_signed_content;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::error::*;
|
||||
|
|
@ -68,13 +66,8 @@ pub async fn handle_delete(ctx: ReqCtx, key: &str) -> Result<Response<ResBody>,
|
|||
pub async fn handle_delete_objects(
|
||||
ctx: ReqCtx,
|
||||
req: Request<ReqBody>,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
verify_signed_content(content_sha256, &body[..])?;
|
||||
}
|
||||
let body = req.into_body().collect().await?;
|
||||
|
||||
let cmd_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?;
|
||||
let cmd = parse_delete_objects_xml(&cmd_xml).ok_or_bad_request("Invalid delete XML query")?;
|
||||
|
|
|
|||
|
|
@ -29,8 +29,8 @@ use garage_model::garage::Garage;
|
|||
use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionMetaInner};
|
||||
|
||||
use garage_api_common::common_error::*;
|
||||
use garage_api_common::signature::checksum::Md5Checksum;
|
||||
|
||||
use crate::checksum::Md5Checksum;
|
||||
use crate::error::Error;
|
||||
|
||||
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
use std::convert::TryInto;
|
||||
|
||||
use err_derive::Error;
|
||||
use hyper::header::HeaderValue;
|
||||
use hyper::{HeaderMap, StatusCode};
|
||||
use thiserror::Error;
|
||||
|
||||
use garage_model::helper::error::Error as HelperError;
|
||||
|
||||
|
|
@ -25,67 +25,67 @@ use crate::xml as s3_xml;
|
|||
/// Errors of this crate
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error(display = "{}", _0)]
|
||||
#[error("{0}")]
|
||||
/// Error from common error
|
||||
Common(#[error(source)] CommonError),
|
||||
Common(#[from] CommonError),
|
||||
|
||||
// Category: cannot process
|
||||
/// Authorization Header Malformed
|
||||
#[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
|
||||
#[error("Authorization header malformed, unexpected scope: {0}")]
|
||||
AuthorizationHeaderMalformed(String),
|
||||
|
||||
/// The object requested don't exists
|
||||
#[error(display = "Key not found")]
|
||||
#[error("Key not found")]
|
||||
NoSuchKey,
|
||||
|
||||
/// The multipart upload requested don't exists
|
||||
#[error(display = "Upload not found")]
|
||||
#[error("Upload not found")]
|
||||
NoSuchUpload,
|
||||
|
||||
/// Precondition failed (e.g. x-amz-copy-source-if-match)
|
||||
#[error(display = "At least one of the preconditions you specified did not hold")]
|
||||
#[error("At least one of the preconditions you specified did not hold")]
|
||||
PreconditionFailed,
|
||||
|
||||
/// Parts specified in CMU request do not match parts actually uploaded
|
||||
#[error(display = "Parts given to CompleteMultipartUpload do not match uploaded parts")]
|
||||
#[error("Parts given to CompleteMultipartUpload do not match uploaded parts")]
|
||||
InvalidPart,
|
||||
|
||||
/// Parts given to CompleteMultipartUpload were not in ascending order
|
||||
#[error(display = "Parts given to CompleteMultipartUpload were not in ascending order")]
|
||||
#[error("Parts given to CompleteMultipartUpload were not in ascending order")]
|
||||
InvalidPartOrder,
|
||||
|
||||
/// In CompleteMultipartUpload: not enough data
|
||||
/// (here we are more lenient than AWS S3)
|
||||
#[error(display = "Proposed upload is smaller than the minimum allowed object size")]
|
||||
#[error("Proposed upload is smaller than the minimum allowed object size")]
|
||||
EntityTooSmall,
|
||||
|
||||
// Category: bad request
|
||||
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
||||
#[error("Invalid UTF-8: {0}")]
|
||||
InvalidUtf8Str(#[from] std::str::Utf8Error),
|
||||
|
||||
/// The request used an invalid path
|
||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||
InvalidUtf8String(#[error(source)] std::string::FromUtf8Error),
|
||||
#[error("Invalid UTF-8: {0}")]
|
||||
InvalidUtf8String(#[from] std::string::FromUtf8Error),
|
||||
|
||||
/// The client sent invalid XML data
|
||||
#[error(display = "Invalid XML: {}", _0)]
|
||||
#[error("Invalid XML: {0}")]
|
||||
InvalidXml(String),
|
||||
|
||||
/// The client sent a range header with invalid value
|
||||
#[error(display = "Invalid HTTP range: {:?}", _0)]
|
||||
InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)),
|
||||
#[error("Invalid HTTP range: {0:?}")]
|
||||
InvalidRange((http_range::HttpRangeParseError, u64)),
|
||||
|
||||
/// The client sent a range header with invalid value
|
||||
#[error(display = "Invalid encryption algorithm: {:?}, should be AES256", _0)]
|
||||
#[error("Invalid encryption algorithm: {0:?}, should be AES256")]
|
||||
InvalidEncryptionAlgorithm(String),
|
||||
|
||||
/// The client sent invalid XML data
|
||||
#[error(display = "Invalid digest: {}", _0)]
|
||||
/// The provided digest (checksum) value was invalid
|
||||
#[error("Invalid digest: {0}")]
|
||||
InvalidDigest(String),
|
||||
|
||||
/// The client sent a request for an action not supported by garage
|
||||
#[error(display = "Unimplemented action: {}", _0)]
|
||||
#[error("Unimplemented action: {0}")]
|
||||
NotImplemented(String),
|
||||
}
|
||||
|
||||
|
|
@ -99,6 +99,12 @@ impl From<HelperError> for Error {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<(http_range::HttpRangeParseError, u64)> for Error {
|
||||
fn from(err: (http_range::HttpRangeParseError, u64)) -> Error {
|
||||
Error::InvalidRange(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<roxmltree::Error> for Error {
|
||||
fn from(err: roxmltree::Error) -> Self {
|
||||
Self::InvalidXml(format!("{}", err))
|
||||
|
|
@ -119,6 +125,7 @@ impl From<SignatureError> for Error {
|
|||
Self::AuthorizationHeaderMalformed(c)
|
||||
}
|
||||
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
|
||||
SignatureError::InvalidDigest(d) => Self::InvalidDigest(d),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,37 +2,39 @@
|
|||
use std::collections::BTreeMap;
|
||||
use std::convert::TryInto;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, UNIX_EPOCH};
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::future;
|
||||
use futures::stream::{self, Stream, StreamExt};
|
||||
use http::header::{
|
||||
ACCEPT_RANGES, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LANGUAGE,
|
||||
CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, EXPIRES, IF_MODIFIED_SINCE, IF_NONE_MATCH,
|
||||
LAST_MODIFIED, RANGE,
|
||||
HeaderMap, HeaderName, ACCEPT_RANGES, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING,
|
||||
CONTENT_LANGUAGE, CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, EXPIRES, IF_MATCH,
|
||||
IF_MODIFIED_SINCE, IF_NONE_MATCH, IF_UNMODIFIED_SINCE, LAST_MODIFIED, RANGE,
|
||||
};
|
||||
use hyper::{body::Body, Request, Response, StatusCode};
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use garage_net::stream::ByteStream;
|
||||
use garage_rpc::rpc_helper::OrderTag;
|
||||
use garage_table::EmptyKey;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::OkOrMessage;
|
||||
use garage_util::error::{Error as UtilError, OkOrMessage};
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::s3::object_table::*;
|
||||
use garage_model::s3::version_table::*;
|
||||
|
||||
use garage_api_common::common_error::CommonError;
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
|
||||
|
||||
use crate::api_server::ResBody;
|
||||
use crate::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
|
||||
use crate::copy::*;
|
||||
use crate::encryption::EncryptionParams;
|
||||
use crate::error::*;
|
||||
|
||||
const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
|
||||
const X_AMZ_MP_PARTS_COUNT: HeaderName = HeaderName::from_static("x-amz-mp-parts-count");
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct GetObjectOverrides {
|
||||
|
|
@ -115,49 +117,29 @@ fn getobject_override_headers(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn try_answer_cached(
|
||||
fn handle_http_precondition(
|
||||
version: &ObjectVersion,
|
||||
version_meta: &ObjectVersionMeta,
|
||||
req: &Request<impl Body>,
|
||||
) -> Option<Response<ResBody>> {
|
||||
// <trinity> It is possible, and is even usually the case, [that both If-None-Match and
|
||||
// If-Modified-Since] are present in a request. In this situation If-None-Match takes
|
||||
// precedence and If-Modified-Since is ignored (as per 6.Precedence from rfc7232). The rational
|
||||
// being that etag based matching is more accurate, it has no issue with sub-second precision
|
||||
// for instance (in case of very fast updates)
|
||||
let cached = if let Some(none_match) = req.headers().get(IF_NONE_MATCH) {
|
||||
let none_match = none_match.to_str().ok()?;
|
||||
let expected = format!("\"{}\"", version_meta.etag);
|
||||
let found = none_match
|
||||
.split(',')
|
||||
.map(str::trim)
|
||||
.any(|etag| etag == expected || etag == "\"*\"");
|
||||
found
|
||||
} else if let Some(modified_since) = req.headers().get(IF_MODIFIED_SINCE) {
|
||||
let modified_since = modified_since.to_str().ok()?;
|
||||
let client_date = httpdate::parse_http_date(modified_since).ok()?;
|
||||
let server_date = UNIX_EPOCH + Duration::from_millis(version.timestamp);
|
||||
client_date >= server_date
|
||||
} else {
|
||||
false
|
||||
};
|
||||
req: &Request<()>,
|
||||
) -> Result<Option<Response<ResBody>>, Error> {
|
||||
let precondition_headers = PreconditionHeaders::parse(req)?;
|
||||
|
||||
if cached {
|
||||
Some(
|
||||
if let Some(status_code) = precondition_headers.check(&version, &version_meta.etag)? {
|
||||
Ok(Some(
|
||||
Response::builder()
|
||||
.status(StatusCode::NOT_MODIFIED)
|
||||
.status(status_code)
|
||||
.body(empty_body())
|
||||
.unwrap(),
|
||||
)
|
||||
))
|
||||
} else {
|
||||
None
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle HEAD request
|
||||
pub async fn handle_head(
|
||||
ctx: ReqCtx,
|
||||
req: &Request<impl Body>,
|
||||
req: &Request<()>,
|
||||
key: &str,
|
||||
part_number: Option<u64>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
|
|
@ -167,7 +149,7 @@ pub async fn handle_head(
|
|||
/// Handle HEAD request for website
|
||||
pub async fn handle_head_without_ctx(
|
||||
garage: Arc<Garage>,
|
||||
req: &Request<impl Body>,
|
||||
req: &Request<()>,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
part_number: Option<u64>,
|
||||
|
|
@ -196,8 +178,8 @@ pub async fn handle_head_without_ctx(
|
|||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
if let Some(cached) = try_answer_cached(object_version, version_meta, req) {
|
||||
return Ok(cached);
|
||||
if let Some(res) = handle_http_precondition(object_version, version_meta, req)? {
|
||||
return Ok(res);
|
||||
}
|
||||
|
||||
let (encryption, headers) =
|
||||
|
|
@ -234,6 +216,7 @@ pub async fn handle_head_without_ctx(
|
|||
.get(&object_version.uuid, &EmptyKey)
|
||||
.await?
|
||||
.ok_or(Error::NoSuchKey)?;
|
||||
check_version_not_deleted(&version)?;
|
||||
|
||||
let (part_offset, part_end) =
|
||||
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
|
||||
|
|
@ -278,7 +261,7 @@ pub async fn handle_head_without_ctx(
|
|||
/// Handle GET request
|
||||
pub async fn handle_get(
|
||||
ctx: ReqCtx,
|
||||
req: &Request<impl Body>,
|
||||
req: &Request<()>,
|
||||
key: &str,
|
||||
part_number: Option<u64>,
|
||||
overrides: GetObjectOverrides,
|
||||
|
|
@ -289,7 +272,7 @@ pub async fn handle_get(
|
|||
/// Handle GET request
|
||||
pub async fn handle_get_without_ctx(
|
||||
garage: Arc<Garage>,
|
||||
req: &Request<impl Body>,
|
||||
req: &Request<()>,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
part_number: Option<u64>,
|
||||
|
|
@ -318,8 +301,8 @@ pub async fn handle_get_without_ctx(
|
|||
ObjectVersionData::FirstBlock(meta, _) => meta,
|
||||
};
|
||||
|
||||
if let Some(cached) = try_answer_cached(last_v, last_v_meta, req) {
|
||||
return Ok(cached);
|
||||
if let Some(res) = handle_http_precondition(last_v, last_v_meta, req)? {
|
||||
return Ok(res);
|
||||
}
|
||||
|
||||
let (enc, headers) =
|
||||
|
|
@ -340,7 +323,12 @@ pub async fn handle_get_without_ctx(
|
|||
enc,
|
||||
&headers,
|
||||
pn,
|
||||
checksum_mode,
|
||||
ChecksumMode {
|
||||
// TODO: for multipart uploads, checksums of each part should be stored
|
||||
// so that we can return the corresponding checksum here
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
||||
enabled: false,
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
|
@ -354,7 +342,12 @@ pub async fn handle_get_without_ctx(
|
|||
&headers,
|
||||
range.start,
|
||||
range.start + range.length,
|
||||
checksum_mode,
|
||||
ChecksumMode {
|
||||
// TODO: for range queries that align with part boundaries,
|
||||
// we should return the saved checksum of the part
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
||||
enabled: false,
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
|
@ -374,6 +367,21 @@ pub async fn handle_get_without_ctx(
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) fn check_version_not_deleted(version: &Version) -> Result<(), Error> {
|
||||
if version.deleted.get() {
|
||||
// the version was deleted between when the object_table was consulted
|
||||
// and now, this could mean the object was deleted, or overriden.
|
||||
// Rather than say the key doesn't exist, return a transient error
|
||||
// to signal the client to try again.
|
||||
return Err(CommonError::InternalError(UtilError::Message(
|
||||
"conflict/inconsistency between object and version state, version is deleted"
|
||||
.to_string(),
|
||||
))
|
||||
.into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_get_full(
|
||||
garage: Arc<Garage>,
|
||||
version: &ObjectVersion,
|
||||
|
|
@ -440,6 +448,7 @@ pub fn full_object_byte_stream(
|
|||
.ok_or_message("channel closed")?;
|
||||
|
||||
let version = version_fut.await.unwrap()?.ok_or(Error::NoSuchKey)?;
|
||||
check_version_not_deleted(&version)?;
|
||||
for (i, (_, vb)) in version.blocks.items().iter().enumerate().skip(1) {
|
||||
let stream_block_i = encryption
|
||||
.get_block(&garage, &vb.hash, Some(order_stream.order(i as u64)))
|
||||
|
|
@ -455,6 +464,14 @@ pub fn full_object_byte_stream(
|
|||
{
|
||||
Ok(()) => (),
|
||||
Err(e) => {
|
||||
// TODO i think this is a bad idea, we should log
|
||||
// an error and stop there. If the error happens to
|
||||
// be exactly the size of what hasn't been streamed
|
||||
// yet, the client will see the request as a
|
||||
// success
|
||||
// instead truncating the output notify the client
|
||||
// something happened with their download, so that
|
||||
// they can retry it
|
||||
let _ = tx.send(error_stream_item(e)).await;
|
||||
}
|
||||
}
|
||||
|
|
@ -506,7 +523,7 @@ async fn handle_get_range(
|
|||
.get(&version.uuid, &EmptyKey)
|
||||
.await?
|
||||
.ok_or(Error::NoSuchKey)?;
|
||||
|
||||
check_version_not_deleted(&version)?;
|
||||
let body =
|
||||
body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end);
|
||||
Ok(resp_builder.body(body)?)
|
||||
|
|
@ -557,6 +574,8 @@ async fn handle_get_part(
|
|||
.await?
|
||||
.ok_or(Error::NoSuchKey)?;
|
||||
|
||||
check_version_not_deleted(&version)?;
|
||||
|
||||
let (begin, end) =
|
||||
calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?;
|
||||
|
||||
|
|
@ -577,7 +596,7 @@ async fn handle_get_part(
|
|||
}
|
||||
|
||||
fn parse_range_header(
|
||||
req: &Request<impl Body>,
|
||||
req: &Request<()>,
|
||||
total_size: u64,
|
||||
) -> Result<Option<http_range::HttpRange>, Error> {
|
||||
let range = match req.headers().get(RANGE) {
|
||||
|
|
@ -618,7 +637,7 @@ struct ChecksumMode {
|
|||
enabled: bool,
|
||||
}
|
||||
|
||||
fn checksum_mode(req: &Request<impl Body>) -> ChecksumMode {
|
||||
fn checksum_mode(req: &Request<()>) -> ChecksumMode {
|
||||
ChecksumMode {
|
||||
enabled: req
|
||||
.headers()
|
||||
|
|
@ -751,3 +770,118 @@ fn std_error_from_read_error<E: std::fmt::Display>(e: E) -> std::io::Error {
|
|||
format!("Error while reading object data: {}", e),
|
||||
)
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
pub struct PreconditionHeaders {
|
||||
if_match: Option<Vec<String>>,
|
||||
if_modified_since: Option<SystemTime>,
|
||||
if_none_match: Option<Vec<String>>,
|
||||
if_unmodified_since: Option<SystemTime>,
|
||||
}
|
||||
|
||||
impl PreconditionHeaders {
|
||||
fn parse<B>(req: &Request<B>) -> Result<Self, Error> {
|
||||
Self::parse_with(
|
||||
req.headers(),
|
||||
&IF_MATCH,
|
||||
&IF_NONE_MATCH,
|
||||
&IF_MODIFIED_SINCE,
|
||||
&IF_UNMODIFIED_SINCE,
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn parse_copy_source<B>(req: &Request<B>) -> Result<Self, Error> {
|
||||
Self::parse_with(
|
||||
req.headers(),
|
||||
&X_AMZ_COPY_SOURCE_IF_MATCH,
|
||||
&X_AMZ_COPY_SOURCE_IF_NONE_MATCH,
|
||||
&X_AMZ_COPY_SOURCE_IF_MODIFIED_SINCE,
|
||||
&X_AMZ_COPY_SOURCE_IF_UNMODIFIED_SINCE,
|
||||
)
|
||||
}
|
||||
|
||||
fn parse_with(
|
||||
headers: &HeaderMap,
|
||||
hdr_if_match: &HeaderName,
|
||||
hdr_if_none_match: &HeaderName,
|
||||
hdr_if_modified_since: &HeaderName,
|
||||
hdr_if_unmodified_since: &HeaderName,
|
||||
) -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
if_match: headers
|
||||
.get(hdr_if_match)
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?
|
||||
.map(|x| {
|
||||
x.split(',')
|
||||
.map(|m| m.trim().trim_matches('"').to_string())
|
||||
.collect::<Vec<_>>()
|
||||
}),
|
||||
if_none_match: headers
|
||||
.get(hdr_if_none_match)
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?
|
||||
.map(|x| {
|
||||
x.split(',')
|
||||
.map(|m| m.trim().trim_matches('"').to_string())
|
||||
.collect::<Vec<_>>()
|
||||
}),
|
||||
if_modified_since: headers
|
||||
.get(hdr_if_modified_since)
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?
|
||||
.map(httpdate::parse_http_date)
|
||||
.transpose()
|
||||
.ok_or_bad_request("Invalid date in if-modified-since")?,
|
||||
if_unmodified_since: headers
|
||||
.get(hdr_if_unmodified_since)
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?
|
||||
.map(httpdate::parse_http_date)
|
||||
.transpose()
|
||||
.ok_or_bad_request("Invalid date in if-unmodified-since")?,
|
||||
})
|
||||
}
|
||||
|
||||
fn check(&self, v: &ObjectVersion, etag: &str) -> Result<Option<StatusCode>, Error> {
|
||||
// we store date with ms precision, but headers are precise to the second: truncate
|
||||
// the timestamp to handle the same-second edge case
|
||||
let v_date = UNIX_EPOCH + Duration::from_secs(v.timestamp / 1000);
|
||||
|
||||
// Implemented from https://datatracker.ietf.org/doc/html/rfc7232#section-6
|
||||
|
||||
if let Some(im) = &self.if_match {
|
||||
// Step 1: if-match is present
|
||||
if !im.iter().any(|x| x == etag || x == "*") {
|
||||
return Ok(Some(StatusCode::PRECONDITION_FAILED));
|
||||
}
|
||||
} else if let Some(ius) = &self.if_unmodified_since {
|
||||
// Step 2: if-unmodified-since is present, and if-match is absent
|
||||
if v_date > *ius {
|
||||
return Ok(Some(StatusCode::PRECONDITION_FAILED));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(inm) = &self.if_none_match {
|
||||
// Step 3: if-none-match is present
|
||||
if inm.iter().any(|x| x == etag || x == "*") {
|
||||
return Ok(Some(StatusCode::NOT_MODIFIED));
|
||||
}
|
||||
} else if let Some(ims) = &self.if_modified_since {
|
||||
// Step 4: if-modified-since is present, and if-none-match is absent
|
||||
if v_date <= *ims {
|
||||
return Ok(Some(StatusCode::NOT_MODIFIED));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub(crate) fn check_copy_source(&self, v: &ObjectVersion, etag: &str) -> Result<(), Error> {
|
||||
match self.check(v, etag)? {
|
||||
Some(_) => Err(Error::PreconditionFailed),
|
||||
None => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,9 +14,8 @@ mod list;
|
|||
mod multipart;
|
||||
mod post_object;
|
||||
mod put;
|
||||
mod website;
|
||||
pub mod website;
|
||||
|
||||
mod checksum;
|
||||
mod encryption;
|
||||
mod router;
|
||||
pub mod xml;
|
||||
|
|
|
|||
|
|
@ -1,12 +1,10 @@
|
|||
use quick_xml::de::from_reader;
|
||||
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::verify_signed_content;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::error::*;
|
||||
|
|
@ -16,7 +14,6 @@ use garage_model::bucket_table::{
|
|||
parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration,
|
||||
LifecycleFilter as GarageLifecycleFilter, LifecycleRule as GarageLifecycleRule,
|
||||
};
|
||||
use garage_util::data::*;
|
||||
|
||||
pub async fn handle_get_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx { bucket_params, .. } = ctx;
|
||||
|
|
@ -30,7 +27,7 @@ pub async fn handle_get_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, Erro
|
|||
.body(string_body(xml))?)
|
||||
} else {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(empty_body())?)
|
||||
}
|
||||
}
|
||||
|
|
@ -56,7 +53,6 @@ pub async fn handle_delete_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, E
|
|||
pub async fn handle_put_lifecycle(
|
||||
ctx: ReqCtx,
|
||||
req: Request<ReqBody>,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
|
|
@ -65,11 +61,7 @@ pub async fn handle_put_lifecycle(
|
|||
..
|
||||
} = ctx;
|
||||
|
||||
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
verify_signed_content(content_sha256, &body[..])?;
|
||||
}
|
||||
let body = req.into_body().collect().await?;
|
||||
|
||||
let conf: LifecycleConfiguration = from_reader(&body as &[u8])?;
|
||||
let config = conf
|
||||
|
|
|
|||
|
|
@ -1,13 +1,20 @@
|
|||
use std::collections::HashMap;
|
||||
use std::convert::TryInto;
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use std::hash::Hasher;
|
||||
use std::sync::Arc;
|
||||
|
||||
use base64::prelude::*;
|
||||
use crc32c::Crc32cHasher as Crc32c;
|
||||
use crc32fast::Hasher as Crc32;
|
||||
use futures::prelude::*;
|
||||
use hyper::{Request, Response};
|
||||
use md5::{Digest, Md5};
|
||||
use sha1::Sha1;
|
||||
use sha2::Sha256;
|
||||
|
||||
use garage_table::*;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::OkOrMessage;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::s3::block_ref_table::*;
|
||||
|
|
@ -16,10 +23,9 @@ use garage_model::s3::object_table::*;
|
|||
use garage_model::s3::version_table::*;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::verify_signed_content;
|
||||
use garage_api_common::signature::checksum::*;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::checksum::*;
|
||||
use crate::encryption::EncryptionParams;
|
||||
use crate::error::*;
|
||||
use crate::put::*;
|
||||
|
|
@ -43,7 +49,7 @@ pub async fn handle_create_multipart_upload(
|
|||
let upload_id = gen_uuid();
|
||||
let timestamp = next_timestamp(existing_object.as_ref());
|
||||
|
||||
let headers = get_headers(req.headers())?;
|
||||
let headers = extract_metadata_headers(req.headers())?;
|
||||
let meta = ObjectVersionMetaInner {
|
||||
headers,
|
||||
checksum: None,
|
||||
|
|
@ -94,7 +100,6 @@ pub async fn handle_put_part(
|
|||
key: &str,
|
||||
part_number: u64,
|
||||
upload_id: &str,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx { garage, .. } = &ctx;
|
||||
|
||||
|
|
@ -105,17 +110,30 @@ pub async fn handle_put_part(
|
|||
Some(x) => Some(x.to_str()?.to_string()),
|
||||
None => None,
|
||||
},
|
||||
sha256: content_sha256,
|
||||
sha256: None,
|
||||
extra: request_checksum_value(req.headers())?,
|
||||
};
|
||||
|
||||
// Read first chuck, and at the same time try to get object to see if it exists
|
||||
let key = key.to_string();
|
||||
|
||||
let (req_head, req_body) = req.into_parts();
|
||||
let stream = body_stream(req_body);
|
||||
let (req_head, mut req_body) = req.into_parts();
|
||||
|
||||
// Before we stream the body, configure the needed checksums.
|
||||
req_body.add_expected_checksums(expected_checksums.clone());
|
||||
// TODO: avoid parsing encryption headers twice...
|
||||
if !EncryptionParams::new_from_headers(&garage, &req_head.headers)?.is_encrypted() {
|
||||
// For non-encrypted objects, we need to compute the md5sum in all cases
|
||||
// (even if content-md5 is not set), because it is used as an etag of the
|
||||
// part, which is in turn used in the etag computation of the whole object
|
||||
req_body.add_md5();
|
||||
}
|
||||
|
||||
let (stream, stream_checksums) = req_body.streaming_with_checksums();
|
||||
let stream = stream.map_err(Error::from);
|
||||
|
||||
let mut chunker = StreamChunker::new(stream, garage.config.block_size);
|
||||
|
||||
// Read first chuck, and at the same time try to get object to see if it exists
|
||||
let ((_, object_version, mut mpu), first_block) =
|
||||
futures::try_join!(get_upload(&ctx, &key, &upload_id), chunker.next(),)?;
|
||||
|
||||
|
|
@ -172,21 +190,21 @@ pub async fn handle_put_part(
|
|||
garage.version_table.insert(&version).await?;
|
||||
|
||||
// Copy data to version
|
||||
let checksummer =
|
||||
Checksummer::init(&expected_checksums, !encryption.is_encrypted()).add(checksum_algorithm);
|
||||
let (total_size, checksums, _) = read_and_put_blocks(
|
||||
let (total_size, _, _) = read_and_put_blocks(
|
||||
&ctx,
|
||||
&version,
|
||||
encryption,
|
||||
part_number,
|
||||
first_block,
|
||||
&mut chunker,
|
||||
checksummer,
|
||||
chunker,
|
||||
Checksummer::new(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Verify that checksums map
|
||||
checksums.verify(&expected_checksums)?;
|
||||
// Verify that checksums match
|
||||
let checksums = stream_checksums
|
||||
.await
|
||||
.ok_or_internal_error("checksum calculation")??;
|
||||
|
||||
// Store part etag in version
|
||||
let etag = encryption.etag_from_md5(&checksums.md5);
|
||||
|
|
@ -248,7 +266,6 @@ pub async fn handle_complete_multipart_upload(
|
|||
req: Request<ReqBody>,
|
||||
key: &str,
|
||||
upload_id: &str,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
|
|
@ -260,11 +277,7 @@ pub async fn handle_complete_multipart_upload(
|
|||
|
||||
let expected_checksum = request_checksum_value(&req_head.headers)?;
|
||||
|
||||
let body = http_body_util::BodyExt::collect(req_body).await?.to_bytes();
|
||||
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
verify_signed_content(content_sha256, &body[..])?;
|
||||
}
|
||||
let body = req_body.collect().await?;
|
||||
|
||||
let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?;
|
||||
let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml)
|
||||
|
|
@ -602,3 +615,99 @@ fn parse_complete_multipart_upload_body(
|
|||
|
||||
Some(parts)
|
||||
}
|
||||
|
||||
// ====== checksummer ====
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct MultipartChecksummer {
|
||||
pub md5: Md5,
|
||||
pub extra: Option<MultipartExtraChecksummer>,
|
||||
}
|
||||
|
||||
pub(crate) enum MultipartExtraChecksummer {
|
||||
Crc32(Crc32),
|
||||
Crc32c(Crc32c),
|
||||
Sha1(Sha1),
|
||||
Sha256(Sha256),
|
||||
}
|
||||
|
||||
impl MultipartChecksummer {
|
||||
pub(crate) fn init(algo: Option<ChecksumAlgorithm>) -> Self {
|
||||
Self {
|
||||
md5: Md5::new(),
|
||||
extra: match algo {
|
||||
None => None,
|
||||
Some(ChecksumAlgorithm::Crc32) => {
|
||||
Some(MultipartExtraChecksummer::Crc32(Crc32::new()))
|
||||
}
|
||||
Some(ChecksumAlgorithm::Crc32c) => {
|
||||
Some(MultipartExtraChecksummer::Crc32c(Crc32c::default()))
|
||||
}
|
||||
Some(ChecksumAlgorithm::Sha1) => Some(MultipartExtraChecksummer::Sha1(Sha1::new())),
|
||||
Some(ChecksumAlgorithm::Sha256) => {
|
||||
Some(MultipartExtraChecksummer::Sha256(Sha256::new()))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn update(
|
||||
&mut self,
|
||||
etag: &str,
|
||||
checksum: Option<ChecksumValue>,
|
||||
) -> Result<(), Error> {
|
||||
self.md5
|
||||
.update(&hex::decode(&etag).ok_or_message("invalid etag hex")?);
|
||||
match (&mut self.extra, checksum) {
|
||||
(None, _) => (),
|
||||
(
|
||||
Some(MultipartExtraChecksummer::Crc32(ref mut crc32)),
|
||||
Some(ChecksumValue::Crc32(x)),
|
||||
) => {
|
||||
crc32.update(&x);
|
||||
}
|
||||
(
|
||||
Some(MultipartExtraChecksummer::Crc32c(ref mut crc32c)),
|
||||
Some(ChecksumValue::Crc32c(x)),
|
||||
) => {
|
||||
crc32c.write(&x);
|
||||
}
|
||||
(Some(MultipartExtraChecksummer::Sha1(ref mut sha1)), Some(ChecksumValue::Sha1(x))) => {
|
||||
sha1.update(&x);
|
||||
}
|
||||
(
|
||||
Some(MultipartExtraChecksummer::Sha256(ref mut sha256)),
|
||||
Some(ChecksumValue::Sha256(x)),
|
||||
) => {
|
||||
sha256.update(&x);
|
||||
}
|
||||
(Some(_), b) => {
|
||||
return Err(Error::internal_error(format!(
|
||||
"part checksum was not computed correctly, got: {:?}",
|
||||
b
|
||||
)))
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn finalize(self) -> (Md5Checksum, Option<ChecksumValue>) {
|
||||
let md5 = self.md5.finalize()[..].try_into().unwrap();
|
||||
let extra = match self.extra {
|
||||
None => None,
|
||||
Some(MultipartExtraChecksummer::Crc32(crc32)) => {
|
||||
Some(ChecksumValue::Crc32(u32::to_be_bytes(crc32.finalize())))
|
||||
}
|
||||
Some(MultipartExtraChecksummer::Crc32c(crc32c)) => Some(ChecksumValue::Crc32c(
|
||||
u32::to_be_bytes(u32::try_from(crc32c.finish()).unwrap()),
|
||||
)),
|
||||
Some(MultipartExtraChecksummer::Sha1(sha1)) => {
|
||||
Some(ChecksumValue::Sha1(sha1.finalize()[..].try_into().unwrap()))
|
||||
}
|
||||
Some(MultipartExtraChecksummer::Sha256(sha256)) => Some(ChecksumValue::Sha256(
|
||||
sha256.finalize()[..].try_into().unwrap(),
|
||||
)),
|
||||
};
|
||||
(md5, extra)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,13 +18,13 @@ use garage_model::s3::object_table::*;
|
|||
|
||||
use garage_api_common::cors::*;
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::checksum::*;
|
||||
use garage_api_common::signature::payload::{verify_v4, Authorization};
|
||||
|
||||
use crate::api_server::ResBody;
|
||||
use crate::checksum::*;
|
||||
use crate::encryption::EncryptionParams;
|
||||
use crate::error::*;
|
||||
use crate::put::{get_headers, save_stream, ChecksumMode};
|
||||
use crate::put::{extract_metadata_headers, save_stream, ChecksumMode};
|
||||
use crate::xml as s3_xml;
|
||||
|
||||
pub async fn handle_post_object(
|
||||
|
|
@ -141,10 +141,26 @@ pub async fn handle_post_object(
|
|||
|
||||
let mut conditions = decoded_policy.into_conditions()?;
|
||||
|
||||
// If there are conditions on the bucket name, check these against the actual bucket_name rather
|
||||
// than the one in params, which is allowed to be absent.
|
||||
if let Some(conds) = conditions.params.remove("bucket") {
|
||||
for cond in conds {
|
||||
let ok = match cond {
|
||||
Operation::Equal(s) => s.as_str() == bucket_name,
|
||||
Operation::StartsWith(s) => bucket_name.starts_with(&s),
|
||||
};
|
||||
if !ok {
|
||||
return Err(Error::bad_request(
|
||||
"Key 'bucket' has value not allowed in policy",
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (param_key, value) in params.iter() {
|
||||
let param_key = param_key.as_str();
|
||||
match param_key {
|
||||
"policy" | "x-amz-signature" => (), // this is always accepted, as it's required to validate other fields
|
||||
"policy" | "x-amz-signature" | "bucket" => (), // this is always accepted, as it's required to validate other fields
|
||||
"content-type" => {
|
||||
let conds = conditions.params.remove("content-type").ok_or_else(|| {
|
||||
Error::bad_request(format!("Key '{}' is not allowed in policy", param_key))
|
||||
|
|
@ -216,8 +232,9 @@ pub async fn handle_post_object(
|
|||
|
||||
// if we ever start supporting ACLs, we likely want to map "acl" to x-amz-acl" somewhere
|
||||
// around here to make sure the rest of the machinery takes our acl into account.
|
||||
let headers = get_headers(¶ms)?;
|
||||
let headers = extract_metadata_headers(¶ms)?;
|
||||
|
||||
let checksum_algorithm = request_checksum_algorithm(¶ms)?;
|
||||
let expected_checksums = ExpectedChecksums {
|
||||
md5: params
|
||||
.get("content-md5")
|
||||
|
|
@ -225,7 +242,9 @@ pub async fn handle_post_object(
|
|||
.transpose()?
|
||||
.map(str::to_string),
|
||||
sha256: None,
|
||||
extra: request_checksum_algorithm_value(¶ms)?,
|
||||
extra: checksum_algorithm
|
||||
.map(|algo| extract_checksum_value(¶ms, algo))
|
||||
.transpose()?,
|
||||
};
|
||||
|
||||
let meta = ObjectVersionMetaInner {
|
||||
|
|
|
|||
|
|
@ -31,13 +31,13 @@ use garage_model::s3::object_table::*;
|
|||
use garage_model::s3::version_table::*;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::body::StreamingChecksumReceiver;
|
||||
use garage_api_common::signature::checksum::*;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::checksum::*;
|
||||
use crate::encryption::EncryptionParams;
|
||||
use crate::error::*;
|
||||
|
||||
const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
|
||||
use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION;
|
||||
|
||||
pub(crate) struct SaveStreamResult {
|
||||
pub(crate) version_uuid: Uuid,
|
||||
|
|
@ -48,6 +48,10 @@ pub(crate) struct SaveStreamResult {
|
|||
|
||||
pub(crate) enum ChecksumMode<'a> {
|
||||
Verify(&'a ExpectedChecksums),
|
||||
VerifyFrom {
|
||||
checksummer: StreamingChecksumReceiver,
|
||||
trailer_algo: Option<ChecksumAlgorithm>,
|
||||
},
|
||||
Calculate(Option<ChecksumAlgorithm>),
|
||||
}
|
||||
|
||||
|
|
@ -55,10 +59,9 @@ pub async fn handle_put(
|
|||
ctx: ReqCtx,
|
||||
req: Request<ReqBody>,
|
||||
key: &String,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
// Retrieve interesting headers from request
|
||||
let headers = get_headers(req.headers())?;
|
||||
let headers = extract_metadata_headers(req.headers())?;
|
||||
debug!("Object headers: {:?}", headers);
|
||||
|
||||
let expected_checksums = ExpectedChecksums {
|
||||
|
|
@ -66,9 +69,10 @@ pub async fn handle_put(
|
|||
Some(x) => Some(x.to_str()?.to_string()),
|
||||
None => None,
|
||||
},
|
||||
sha256: content_sha256,
|
||||
sha256: None,
|
||||
extra: request_checksum_value(req.headers())?,
|
||||
};
|
||||
let trailer_checksum_algorithm = request_trailer_checksum_algorithm(req.headers())?;
|
||||
|
||||
let meta = ObjectVersionMetaInner {
|
||||
headers,
|
||||
|
|
@ -78,7 +82,19 @@ pub async fn handle_put(
|
|||
// Determine whether object should be encrypted, and if so the key
|
||||
let encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
|
||||
|
||||
let stream = body_stream(req.into_body());
|
||||
// The request body is a special ReqBody object (see garage_api_common::signature::body)
|
||||
// which supports calculating checksums while streaming the data.
|
||||
// Before we start streaming, we configure it to calculate all the checksums we need.
|
||||
let mut req_body = req.into_body();
|
||||
req_body.add_expected_checksums(expected_checksums.clone());
|
||||
if !encryption.is_encrypted() {
|
||||
// For non-encrypted objects, we need to compute the md5sum in all cases
|
||||
// (even if content-md5 is not set), because it is used as the object etag
|
||||
req_body.add_md5();
|
||||
}
|
||||
|
||||
let (stream, checksummer) = req_body.streaming_with_checksums();
|
||||
let stream = stream.map_err(Error::from);
|
||||
|
||||
let res = save_stream(
|
||||
&ctx,
|
||||
|
|
@ -86,7 +102,10 @@ pub async fn handle_put(
|
|||
encryption,
|
||||
stream,
|
||||
key,
|
||||
ChecksumMode::Verify(&expected_checksums),
|
||||
ChecksumMode::VerifyFrom {
|
||||
checksummer,
|
||||
trailer_algo: trailer_checksum_algorithm,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
|
@ -122,10 +141,15 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
|||
let version_uuid = gen_uuid();
|
||||
let version_timestamp = next_timestamp(existing_object.as_ref());
|
||||
|
||||
let mut checksummer = match checksum_mode {
|
||||
let mut checksummer = match &checksum_mode {
|
||||
ChecksumMode::Verify(expected) => Checksummer::init(expected, !encryption.is_encrypted()),
|
||||
ChecksumMode::Calculate(algo) => {
|
||||
Checksummer::init(&Default::default(), !encryption.is_encrypted()).add(algo)
|
||||
Checksummer::init(&Default::default(), !encryption.is_encrypted()).add(*algo)
|
||||
}
|
||||
ChecksumMode::VerifyFrom { .. } => {
|
||||
// Checksums are calculated by the garage_api_common::signature module
|
||||
// so here we can just have an empty checksummer that does nothing
|
||||
Checksummer::new()
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -133,7 +157,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
|||
// as "inline data". We can then return immediately.
|
||||
if first_block.len() < INLINE_THRESHOLD {
|
||||
checksummer.update(&first_block);
|
||||
let checksums = checksummer.finalize();
|
||||
let mut checksums = checksummer.finalize();
|
||||
|
||||
match checksum_mode {
|
||||
ChecksumMode::Verify(expected) => {
|
||||
|
|
@ -142,6 +166,18 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
|||
ChecksumMode::Calculate(algo) => {
|
||||
meta.checksum = checksums.extract(algo);
|
||||
}
|
||||
ChecksumMode::VerifyFrom {
|
||||
checksummer,
|
||||
trailer_algo,
|
||||
} => {
|
||||
drop(chunker);
|
||||
checksums = checksummer
|
||||
.await
|
||||
.ok_or_internal_error("checksum calculation")??;
|
||||
if let Some(algo) = trailer_algo {
|
||||
meta.checksum = checksums.extract(Some(algo));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let size = first_block.len() as u64;
|
||||
|
|
@ -213,13 +249,13 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
|||
garage.version_table.insert(&version).await?;
|
||||
|
||||
// Transfer data
|
||||
let (total_size, checksums, first_block_hash) = read_and_put_blocks(
|
||||
let (total_size, mut checksums, first_block_hash) = read_and_put_blocks(
|
||||
ctx,
|
||||
&version,
|
||||
encryption,
|
||||
1,
|
||||
first_block,
|
||||
&mut chunker,
|
||||
chunker,
|
||||
checksummer,
|
||||
)
|
||||
.await?;
|
||||
|
|
@ -232,6 +268,17 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
|||
ChecksumMode::Calculate(algo) => {
|
||||
meta.checksum = checksums.extract(algo);
|
||||
}
|
||||
ChecksumMode::VerifyFrom {
|
||||
checksummer,
|
||||
trailer_algo,
|
||||
} => {
|
||||
checksums = checksummer
|
||||
.await
|
||||
.ok_or_internal_error("checksum calculation")??;
|
||||
if let Some(algo) = trailer_algo {
|
||||
meta.checksum = checksums.extract(Some(algo));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Verify quotas are respsected
|
||||
|
|
@ -332,7 +379,7 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
|
|||
encryption: EncryptionParams,
|
||||
part_number: u64,
|
||||
first_block: Bytes,
|
||||
chunker: &mut StreamChunker<S>,
|
||||
mut chunker: StreamChunker<S>,
|
||||
checksummer: Checksummer,
|
||||
) -> Result<(u64, Checksums, Hash), Error> {
|
||||
let tracer = opentelemetry::global::tracer("garage");
|
||||
|
|
@ -444,7 +491,7 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
|
|||
};
|
||||
let recv_next = async {
|
||||
// If more than a maximum number of writes are in progress, don't add more for now
|
||||
if currently_running >= PUT_BLOCKS_MAX_PARALLEL {
|
||||
if currently_running >= ctx.garage.config.block_max_concurrent_writes_per_request {
|
||||
futures::future::pending().await
|
||||
} else {
|
||||
block_rx3.recv().await
|
||||
|
|
@ -601,7 +648,9 @@ impl Drop for InterruptedCleanup {
|
|||
|
||||
// ============ helpers ============
|
||||
|
||||
pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<HeaderList, Error> {
|
||||
pub(crate) fn extract_metadata_headers(
|
||||
headers: &HeaderMap<HeaderValue>,
|
||||
) -> Result<HeaderList, Error> {
|
||||
let mut ret = Vec::new();
|
||||
|
||||
// Preserve standard headers
|
||||
|
|
@ -627,6 +676,18 @@ pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<HeaderList
|
|||
std::str::from_utf8(value.as_bytes())?.to_string(),
|
||||
));
|
||||
}
|
||||
if name == X_AMZ_WEBSITE_REDIRECT_LOCATION {
|
||||
let value = std::str::from_utf8(value.as_bytes())?.to_string();
|
||||
if !(value.starts_with("/")
|
||||
|| value.starts_with("http://")
|
||||
|| value.starts_with("https://"))
|
||||
{
|
||||
return Err(Error::bad_request(format!(
|
||||
"Invalid {X_AMZ_WEBSITE_REDIRECT_LOCATION} header",
|
||||
)));
|
||||
}
|
||||
ret.push((X_AMZ_WEBSITE_REDIRECT_LOCATION.to_string(), value));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
|
|
|
|||
|
|
@ -1,19 +1,19 @@
|
|||
use quick_xml::de::from_reader;
|
||||
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
use hyper::{header::HeaderName, Request, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_model::bucket_table::*;
|
||||
use garage_util::data::*;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::verify_signed_content;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::error::*;
|
||||
use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
||||
|
||||
pub const X_AMZ_WEBSITE_REDIRECT_LOCATION: HeaderName =
|
||||
HeaderName::from_static("x-amz-website-redirect-location");
|
||||
|
||||
pub async fn handle_get_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx { bucket_params, .. } = ctx;
|
||||
if let Some(website) = bucket_params.website_config.get() {
|
||||
|
|
@ -61,7 +61,6 @@ pub async fn handle_delete_website(ctx: ReqCtx) -> Result<Response<ResBody>, Err
|
|||
pub async fn handle_put_website(
|
||||
ctx: ReqCtx,
|
||||
req: Request<ReqBody>,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
|
|
@ -70,11 +69,7 @@ pub async fn handle_put_website(
|
|||
..
|
||||
} = ctx;
|
||||
|
||||
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
verify_signed_content(content_sha256, &body[..])?;
|
||||
}
|
||||
let body = req.into_body().collect().await?;
|
||||
|
||||
let conf: WebsiteConfiguration = from_reader(&body as &[u8])?;
|
||||
conf.validate()?;
|
||||
|
|
|
|||
|
|
@ -13,6 +13,10 @@ pub fn xmlns_tag<S: Serializer>(_v: &(), s: S) -> Result<S::Ok, S::Error> {
|
|||
s.serialize_str("http://s3.amazonaws.com/doc/2006-03-01/")
|
||||
}
|
||||
|
||||
pub fn xmlns_xsi_tag<S: Serializer>(_v: &(), s: S) -> Result<S::Ok, S::Error> {
|
||||
s.serialize_str("http://www.w3.org/2001/XMLSchema-instance")
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct Value(#[serde(rename = "$value")] pub String);
|
||||
|
||||
|
|
@ -319,6 +323,42 @@ pub struct PostObject {
|
|||
pub etag: Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||
pub struct Grantee {
|
||||
#[serde(rename = "xmlns:xsi", serialize_with = "xmlns_xsi_tag")]
|
||||
pub xmlns_xsi: (),
|
||||
#[serde(rename = "xsi:type")]
|
||||
pub typ: String,
|
||||
#[serde(rename = "DisplayName")]
|
||||
pub display_name: Option<Value>,
|
||||
#[serde(rename = "ID")]
|
||||
pub id: Option<Value>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||
pub struct Grant {
|
||||
#[serde(rename = "Grantee")]
|
||||
pub grantee: Grantee,
|
||||
#[serde(rename = "Permission")]
|
||||
pub permission: Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||
pub struct AccessControlList {
|
||||
#[serde(rename = "Grant")]
|
||||
pub entries: Vec<Grant>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||
pub struct AccessControlPolicy {
|
||||
#[serde(serialize_with = "xmlns_tag")]
|
||||
pub xmlns: (),
|
||||
#[serde(rename = "Owner")]
|
||||
pub owner: Option<Owner>,
|
||||
#[serde(rename = "AccessControlList")]
|
||||
pub acl: AccessControlList,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
|
@ -427,6 +467,43 @@ mod tests {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_bucket_acl_result() -> Result<(), ApiError> {
|
||||
let grant = Grant {
|
||||
grantee: Grantee {
|
||||
xmlns_xsi: (),
|
||||
typ: "CanonicalUser".to_string(),
|
||||
display_name: Some(Value("owner_name".to_string())),
|
||||
id: Some(Value("qsdfjklm".to_string())),
|
||||
},
|
||||
permission: Value("FULL_CONTROL".to_string()),
|
||||
};
|
||||
|
||||
let get_bucket_acl = AccessControlPolicy {
|
||||
xmlns: (),
|
||||
owner: None,
|
||||
acl: AccessControlList {
|
||||
entries: vec![grant],
|
||||
},
|
||||
};
|
||||
assert_eq!(
|
||||
to_xml_with_header(&get_bucket_acl)?,
|
||||
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
||||
<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
|
||||
<AccessControlList>\
|
||||
<Grant>\
|
||||
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\">\
|
||||
<DisplayName>owner_name</DisplayName>\
|
||||
<ID>qsdfjklm</ID>\
|
||||
</Grantee>\
|
||||
<Permission>FULL_CONTROL</Permission>\
|
||||
</Grant>\
|
||||
</AccessControlList>\
|
||||
</AccessControlPolicy>"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn delete_result() -> Result<(), ApiError> {
|
||||
let delete_result = DeleteResult {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_block"
|
||||
version = "1.0.1"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
|
|||
|
|
@ -50,6 +50,8 @@ pub const INLINE_THRESHOLD: usize = 3072;
|
|||
// to delete the block locally.
|
||||
pub(crate) const BLOCK_GC_DELAY: Duration = Duration::from_secs(600);
|
||||
|
||||
const BLOCK_READ_SEMAPHORE_TIMEOUT: Duration = Duration::from_secs(15);
|
||||
|
||||
/// RPC messages used to share blocks of data between nodes
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum BlockRpc {
|
||||
|
|
@ -87,6 +89,7 @@ pub struct BlockManager {
|
|||
disable_scrub: bool,
|
||||
|
||||
mutation_lock: Vec<Mutex<BlockManagerLocked>>,
|
||||
read_semaphore: Semaphore,
|
||||
|
||||
pub rc: BlockRc,
|
||||
pub resync: BlockResyncManager,
|
||||
|
|
@ -176,6 +179,8 @@ impl BlockManager {
|
|||
.iter()
|
||||
.map(|_| Mutex::new(BlockManagerLocked()))
|
||||
.collect::<Vec<_>>(),
|
||||
|
||||
read_semaphore: Semaphore::new(config.block_max_concurrent_reads),
|
||||
rc,
|
||||
resync,
|
||||
system,
|
||||
|
|
@ -408,8 +413,8 @@ impl BlockManager {
|
|||
}
|
||||
|
||||
/// Get number of items in the refcount table
|
||||
pub fn rc_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.rc.rc_table.len()?)
|
||||
pub fn rc_approximate_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.rc.rc_table.approximate_len()?)
|
||||
}
|
||||
|
||||
/// Send command to start/stop/manager scrub worker
|
||||
|
|
@ -427,7 +432,7 @@ impl BlockManager {
|
|||
|
||||
/// List all resync errors
|
||||
pub fn list_resync_errors(&self) -> Result<Vec<BlockResyncErrorInfo>, Error> {
|
||||
let mut blocks = Vec::with_capacity(self.resync.errors.len()?);
|
||||
let mut blocks = Vec::with_capacity(self.resync.errors.approximate_len()?);
|
||||
for ent in self.resync.errors.iter()? {
|
||||
let (hash, cnt) = ent?;
|
||||
let cnt = ErrorCounter::decode(&cnt);
|
||||
|
|
@ -557,9 +562,6 @@ impl BlockManager {
|
|||
match self.find_block(hash).await {
|
||||
Some(p) => self.read_block_from(hash, &p).await,
|
||||
None => {
|
||||
// Not found but maybe we should have had it ??
|
||||
self.resync
|
||||
.put_to_resync(hash, 2 * self.system.rpc_helper().rpc_timeout())?;
|
||||
return Err(Error::Message(format!(
|
||||
"block {:?} not found on node",
|
||||
hash
|
||||
|
|
@ -581,6 +583,15 @@ impl BlockManager {
|
|||
) -> Result<DataBlock, Error> {
|
||||
let (header, path) = block_path.as_parts_ref();
|
||||
|
||||
let permit = tokio::select! {
|
||||
sem = self.read_semaphore.acquire() => sem.ok_or_message("acquire read semaphore")?,
|
||||
_ = tokio::time::sleep(BLOCK_READ_SEMAPHORE_TIMEOUT) => {
|
||||
self.metrics.block_read_semaphore_timeouts.add(1);
|
||||
debug!("read block {:?}: read_semaphore acquire timeout", hash);
|
||||
return Err(Error::Message("read block: read_semaphore acquire timeout".into()));
|
||||
}
|
||||
};
|
||||
|
||||
let mut f = fs::File::open(&path).await?;
|
||||
let mut data = vec![];
|
||||
f.read_to_end(&mut data).await?;
|
||||
|
|
@ -605,6 +616,8 @@ impl BlockManager {
|
|||
return Err(Error::CorruptData(*hash));
|
||||
}
|
||||
|
||||
drop(permit);
|
||||
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
|
|
@ -770,6 +783,7 @@ impl BlockManagerLocked {
|
|||
|
||||
let mut f = fs::File::create(&path_tmp).await?;
|
||||
f.write_all(data).await?;
|
||||
f.flush().await?;
|
||||
mgr.metrics.bytes_written.add(data.len() as u64);
|
||||
|
||||
if mgr.data_fsync {
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ pub struct BlockManagerMetrics {
|
|||
|
||||
pub(crate) bytes_read: BoundCounter<u64>,
|
||||
pub(crate) block_read_duration: BoundValueRecorder<f64>,
|
||||
pub(crate) block_read_semaphore_timeouts: BoundCounter<u64>,
|
||||
pub(crate) bytes_written: BoundCounter<u64>,
|
||||
pub(crate) block_write_duration: BoundValueRecorder<f64>,
|
||||
pub(crate) delete_counter: BoundCounter<u64>,
|
||||
|
|
@ -50,7 +51,7 @@ impl BlockManagerMetrics {
|
|||
.init(),
|
||||
_rc_size: meter
|
||||
.u64_value_observer("block.rc_size", move |observer| {
|
||||
if let Ok(value) = rc_tree.len() {
|
||||
if let Ok(value) = rc_tree.approximate_len() {
|
||||
observer.observe(value as u64, &[])
|
||||
}
|
||||
})
|
||||
|
|
@ -58,7 +59,7 @@ impl BlockManagerMetrics {
|
|||
.init(),
|
||||
_resync_queue_len: meter
|
||||
.u64_value_observer("block.resync_queue_length", move |observer| {
|
||||
if let Ok(value) = resync_queue.len() {
|
||||
if let Ok(value) = resync_queue.approximate_len() {
|
||||
observer.observe(value as u64, &[]);
|
||||
}
|
||||
})
|
||||
|
|
@ -68,7 +69,7 @@ impl BlockManagerMetrics {
|
|||
.init(),
|
||||
_resync_errored_blocks: meter
|
||||
.u64_value_observer("block.resync_errored_blocks", move |observer| {
|
||||
if let Ok(value) = resync_errors.len() {
|
||||
if let Ok(value) = resync_errors.approximate_len() {
|
||||
observer.observe(value as u64, &[]);
|
||||
}
|
||||
})
|
||||
|
|
@ -119,6 +120,11 @@ impl BlockManagerMetrics {
|
|||
.with_description("Duration of block read operations")
|
||||
.init()
|
||||
.bind(&[]),
|
||||
block_read_semaphore_timeouts: meter
|
||||
.u64_counter("block.read_semaphore_timeouts")
|
||||
.with_description("Number of block reads that failed due to semaphore acquire timeout")
|
||||
.init()
|
||||
.bind(&[]),
|
||||
bytes_written: meter
|
||||
.u64_counter("block.bytes_written")
|
||||
.with_description("Number of bytes written to disk")
|
||||
|
|
|
|||
|
|
@ -106,13 +106,13 @@ impl BlockResyncManager {
|
|||
}
|
||||
|
||||
/// Get length of resync queue
|
||||
pub fn queue_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.queue.len()?)
|
||||
pub fn queue_approximate_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.queue.approximate_len()?)
|
||||
}
|
||||
|
||||
/// Get number of blocks that have an error
|
||||
pub fn errors_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.errors.len()?)
|
||||
pub fn errors_approximate_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.errors.approximate_len()?)
|
||||
}
|
||||
|
||||
/// Clear the error counter for a block and put it in queue immediately
|
||||
|
|
@ -133,6 +133,14 @@ impl BlockResyncManager {
|
|||
)))
|
||||
}
|
||||
|
||||
/// Clear the entire resync queue and list of errored blocks
|
||||
/// Corresponds to `garage repair clear-resync-queue`
|
||||
pub fn clear_resync_queue(&self) -> Result<(), Error> {
|
||||
self.queue.clear()?;
|
||||
self.errors.clear()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
|
||||
let notify = self.notify.clone();
|
||||
vars.register_rw(
|
||||
|
|
@ -548,9 +556,11 @@ impl Worker for ResyncWorker {
|
|||
}
|
||||
|
||||
WorkerStatus {
|
||||
queue_length: Some(self.manager.resync.queue_len().unwrap_or(0) as u64),
|
||||
queue_length: Some(self.manager.resync.queue_approximate_len().unwrap_or(0) as u64),
|
||||
tranquility: Some(tranquility),
|
||||
persistent_errors: Some(self.manager.resync.errors_len().unwrap_or(0) as u64),
|
||||
persistent_errors: Some(
|
||||
self.manager.resync.errors_approximate_len().unwrap_or(0) as u64
|
||||
),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_db"
|
||||
version = "1.0.1"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -12,14 +12,18 @@ readme = "../../README.md"
|
|||
path = "lib.rs"
|
||||
|
||||
[dependencies]
|
||||
err-derive.workspace = true
|
||||
thiserror.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
heed = { workspace = true, optional = true }
|
||||
|
||||
rusqlite = { workspace = true, optional = true, features = ["backup"] }
|
||||
r2d2 = { workspace = true, optional = true }
|
||||
r2d2_sqlite = { workspace = true, optional = true }
|
||||
|
||||
fjall = { workspace = true, optional = true }
|
||||
parking_lot = { workspace = true, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
mktemp.workspace = true
|
||||
|
||||
|
|
@ -27,4 +31,5 @@ mktemp.workspace = true
|
|||
default = [ "lmdb", "sqlite" ]
|
||||
bundled-libs = [ "rusqlite?/bundled" ]
|
||||
lmdb = [ "heed" ]
|
||||
fjall = [ "dep:fjall", "dep:parking_lot" ]
|
||||
sqlite = [ "rusqlite", "r2d2", "r2d2_sqlite" ]
|
||||
|
|
|
|||
453
src/db/fjall_adapter.rs
Normal file
453
src/db/fjall_adapter.rs
Normal file
|
|
@ -0,0 +1,453 @@
|
|||
use core::ops::Bound;
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard};
|
||||
|
||||
use fjall::{
|
||||
PartitionCreateOptions, PersistMode, TransactionalKeyspace, TransactionalPartitionHandle,
|
||||
WriteTransaction,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
open::{Engine, OpenOpt},
|
||||
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
|
||||
TxResult, TxValueIter, Value, ValueIter,
|
||||
};
|
||||
|
||||
pub use fjall;
|
||||
|
||||
// --
|
||||
|
||||
pub(crate) fn open_db(path: &PathBuf, opt: &OpenOpt) -> Result<Db> {
|
||||
info!("Opening Fjall database at: {}", path.display());
|
||||
if opt.fsync {
|
||||
return Err(Error(
|
||||
"metadata_fsync is not supported with the Fjall database engine".into(),
|
||||
));
|
||||
}
|
||||
let mut config = fjall::Config::new(path);
|
||||
if let Some(block_cache_size) = opt.fjall_block_cache_size {
|
||||
config = config.cache_size(block_cache_size as u64);
|
||||
}
|
||||
let keyspace = config.open_transactional()?;
|
||||
Ok(FjallDb::init(keyspace))
|
||||
}
|
||||
|
||||
// -- err
|
||||
|
||||
impl From<fjall::Error> for Error {
|
||||
fn from(e: fjall::Error) -> Error {
|
||||
Error(format!("fjall: {}", e).into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<fjall::LsmError> for Error {
|
||||
fn from(e: fjall::LsmError) -> Error {
|
||||
Error(format!("fjall lsm_tree: {}", e).into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<fjall::Error> for TxOpError {
|
||||
fn from(e: fjall::Error) -> TxOpError {
|
||||
TxOpError(e.into())
|
||||
}
|
||||
}
|
||||
|
||||
// -- db
|
||||
|
||||
pub struct FjallDb {
|
||||
keyspace: TransactionalKeyspace,
|
||||
trees: RwLock<Vec<(String, TransactionalPartitionHandle)>>,
|
||||
}
|
||||
|
||||
type ByteRefRangeBound<'r> = (Bound<&'r [u8]>, Bound<&'r [u8]>);
|
||||
|
||||
impl FjallDb {
|
||||
pub fn init(keyspace: TransactionalKeyspace) -> Db {
|
||||
let s = Self {
|
||||
keyspace,
|
||||
trees: RwLock::new(Vec::new()),
|
||||
};
|
||||
Db(Arc::new(s))
|
||||
}
|
||||
|
||||
fn get_tree(
|
||||
&self,
|
||||
i: usize,
|
||||
) -> Result<MappedRwLockReadGuard<'_, TransactionalPartitionHandle>> {
|
||||
RwLockReadGuard::try_map(self.trees.read(), |trees: &Vec<_>| {
|
||||
trees.get(i).map(|tup| &tup.1)
|
||||
})
|
||||
.map_err(|_| Error("invalid tree id".into()))
|
||||
}
|
||||
}
|
||||
|
||||
impl IDb for FjallDb {
|
||||
fn engine(&self) -> String {
|
||||
"Fjall (EXPERIMENTAL!)".into()
|
||||
}
|
||||
|
||||
fn open_tree(&self, name: &str) -> Result<usize> {
|
||||
let mut trees = self.trees.write();
|
||||
let safe_name = encode_name(name)?;
|
||||
if let Some(i) = trees.iter().position(|(name, _)| *name == safe_name) {
|
||||
Ok(i)
|
||||
} else {
|
||||
let tree = self
|
||||
.keyspace
|
||||
.open_partition(&safe_name, PartitionCreateOptions::default())?;
|
||||
let i = trees.len();
|
||||
trees.push((safe_name, tree));
|
||||
Ok(i)
|
||||
}
|
||||
}
|
||||
|
||||
fn list_trees(&self) -> Result<Vec<String>> {
|
||||
Ok(self
|
||||
.keyspace
|
||||
.list_partitions()
|
||||
.iter()
|
||||
.map(|n| decode_name(&n))
|
||||
.collect::<Result<Vec<_>>>()?)
|
||||
}
|
||||
|
||||
fn snapshot(&self, base_path: &PathBuf) -> Result<()> {
|
||||
std::fs::create_dir_all(base_path)?;
|
||||
let path = Engine::Fjall.db_path(base_path);
|
||||
|
||||
let source_state = self.keyspace.read_tx();
|
||||
let copy_keyspace = fjall::Config::new(path).open()?;
|
||||
|
||||
for partition_name in self.keyspace.list_partitions() {
|
||||
let source_partition = self
|
||||
.keyspace
|
||||
.open_partition(&partition_name, PartitionCreateOptions::default())?;
|
||||
let copy_partition =
|
||||
copy_keyspace.open_partition(&partition_name, PartitionCreateOptions::default())?;
|
||||
|
||||
for entry in source_state.iter(&source_partition) {
|
||||
let (key, value) = entry?;
|
||||
copy_partition.insert(key, value)?;
|
||||
}
|
||||
}
|
||||
|
||||
copy_keyspace.persist(PersistMode::SyncAll)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
fn get(&self, tree_idx: usize, key: &[u8]) -> Result<Option<Value>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
let val = tx.get(&tree, key)?;
|
||||
match val {
|
||||
None => Ok(None),
|
||||
Some(v) => Ok(Some(v.to_vec())),
|
||||
}
|
||||
}
|
||||
|
||||
fn approximate_len(&self, tree_idx: usize) -> Result<usize> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
Ok(tree.approximate_len())
|
||||
}
|
||||
fn is_empty(&self, tree_idx: usize) -> Result<bool> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
Ok(tx.is_empty(&tree)?)
|
||||
}
|
||||
|
||||
fn insert(&self, tree_idx: usize, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let mut tx = self.keyspace.write_tx();
|
||||
tx.insert(&tree, key, value);
|
||||
tx.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove(&self, tree_idx: usize, key: &[u8]) -> Result<()> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let mut tx = self.keyspace.write_tx();
|
||||
tx.remove(&tree, key);
|
||||
tx.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn clear(&self, tree_idx: usize) -> Result<()> {
|
||||
let mut trees = self.trees.write();
|
||||
|
||||
if tree_idx >= trees.len() {
|
||||
return Err(Error("invalid tree id".into()));
|
||||
}
|
||||
let (name, tree) = trees.remove(tree_idx);
|
||||
|
||||
self.keyspace.delete_partition(tree)?;
|
||||
let tree = self
|
||||
.keyspace
|
||||
.open_partition(&name, PartitionCreateOptions::default())?;
|
||||
trees.insert(tree_idx, (name, tree));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn iter(&self, tree_idx: usize) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
Ok(Box::new(tx.iter(&tree).map(iterator_remap)))
|
||||
}
|
||||
|
||||
fn iter_rev(&self, tree_idx: usize) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
Ok(Box::new(tx.iter(&tree).rev().map(iterator_remap)))
|
||||
}
|
||||
|
||||
fn range<'r>(
|
||||
&self,
|
||||
tree_idx: usize,
|
||||
low: Bound<&'r [u8]>,
|
||||
high: Bound<&'r [u8]>,
|
||||
) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
Ok(Box::new(
|
||||
tx.range::<&'r [u8], ByteRefRangeBound>(&tree, (low, high))
|
||||
.map(iterator_remap),
|
||||
))
|
||||
}
|
||||
fn range_rev<'r>(
|
||||
&self,
|
||||
tree_idx: usize,
|
||||
low: Bound<&'r [u8]>,
|
||||
high: Bound<&'r [u8]>,
|
||||
) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
Ok(Box::new(
|
||||
tx.range::<&'r [u8], ByteRefRangeBound>(&tree, (low, high))
|
||||
.rev()
|
||||
.map(iterator_remap),
|
||||
))
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> {
|
||||
let trees = self.trees.read();
|
||||
let mut tx = FjallTx {
|
||||
trees: &trees[..],
|
||||
tx: self.keyspace.write_tx(),
|
||||
};
|
||||
|
||||
let res = f.try_on(&mut tx);
|
||||
match res {
|
||||
TxFnResult::Ok(on_commit) => {
|
||||
tx.tx.commit().map_err(Error::from).map_err(TxError::Db)?;
|
||||
Ok(on_commit)
|
||||
}
|
||||
TxFnResult::Abort => {
|
||||
tx.tx.rollback();
|
||||
Err(TxError::Abort(()))
|
||||
}
|
||||
TxFnResult::DbErr => {
|
||||
tx.tx.rollback();
|
||||
Err(TxError::Db(Error(
|
||||
"(this message will be discarded)".into(),
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
struct FjallTx<'a> {
|
||||
trees: &'a [(String, TransactionalPartitionHandle)],
|
||||
tx: WriteTransaction<'a>,
|
||||
}
|
||||
|
||||
impl<'a> FjallTx<'a> {
|
||||
fn get_tree(&self, i: usize) -> TxOpResult<&TransactionalPartitionHandle> {
|
||||
self.trees.get(i).map(|tup| &tup.1).ok_or_else(|| {
|
||||
TxOpError(Error(
|
||||
"invalid tree id (it might have been openned after the transaction started)".into(),
|
||||
))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ITx for FjallTx<'a> {
|
||||
fn get(&self, tree_idx: usize, key: &[u8]) -> TxOpResult<Option<Value>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
match self.tx.get(tree, key)? {
|
||||
Some(v) => Ok(Some(v.to_vec())),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
fn len(&self, tree_idx: usize) -> TxOpResult<usize> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
Ok(self.tx.len(tree)? as usize)
|
||||
}
|
||||
|
||||
fn insert(&mut self, tree_idx: usize, key: &[u8], value: &[u8]) -> TxOpResult<()> {
|
||||
let tree = self.get_tree(tree_idx)?.clone();
|
||||
self.tx.insert(&tree, key, value);
|
||||
Ok(())
|
||||
}
|
||||
fn remove(&mut self, tree_idx: usize, key: &[u8]) -> TxOpResult<()> {
|
||||
let tree = self.get_tree(tree_idx)?.clone();
|
||||
self.tx.remove(&tree, key);
|
||||
Ok(())
|
||||
}
|
||||
fn clear(&mut self, _tree_idx: usize) -> TxOpResult<()> {
|
||||
unimplemented!("LSM tree clearing in cross-partition transaction is not supported")
|
||||
}
|
||||
|
||||
fn iter(&self, tree_idx: usize) -> TxOpResult<TxValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?.clone();
|
||||
Ok(Box::new(self.tx.iter(&tree).map(iterator_remap_tx)))
|
||||
}
|
||||
fn iter_rev(&self, tree_idx: usize) -> TxOpResult<TxValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?.clone();
|
||||
Ok(Box::new(self.tx.iter(&tree).rev().map(iterator_remap_tx)))
|
||||
}
|
||||
|
||||
fn range<'r>(
|
||||
&self,
|
||||
tree_idx: usize,
|
||||
low: Bound<&'r [u8]>,
|
||||
high: Bound<&'r [u8]>,
|
||||
) -> TxOpResult<TxValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let low = clone_bound(low);
|
||||
let high = clone_bound(high);
|
||||
Ok(Box::new(
|
||||
self.tx
|
||||
.range::<Vec<u8>, ByteVecRangeBounds>(&tree, (low, high))
|
||||
.map(iterator_remap_tx),
|
||||
))
|
||||
}
|
||||
fn range_rev<'r>(
|
||||
&self,
|
||||
tree_idx: usize,
|
||||
low: Bound<&'r [u8]>,
|
||||
high: Bound<&'r [u8]>,
|
||||
) -> TxOpResult<TxValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let low = clone_bound(low);
|
||||
let high = clone_bound(high);
|
||||
Ok(Box::new(
|
||||
self.tx
|
||||
.range::<Vec<u8>, ByteVecRangeBounds>(&tree, (low, high))
|
||||
.rev()
|
||||
.map(iterator_remap_tx),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// -- maps fjall's (k, v) to ours
|
||||
|
||||
fn iterator_remap(r: fjall::Result<(fjall::Slice, fjall::Slice)>) -> Result<(Value, Value)> {
|
||||
r.map(|(k, v)| (k.to_vec(), v.to_vec()))
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
fn iterator_remap_tx(r: fjall::Result<(fjall::Slice, fjall::Slice)>) -> TxOpResult<(Value, Value)> {
|
||||
r.map(|(k, v)| (k.to_vec(), v.to_vec()))
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
// -- utils to deal with Garage's tightness on Bound lifetimes
|
||||
|
||||
type ByteVecBound = Bound<Vec<u8>>;
|
||||
type ByteVecRangeBounds = (ByteVecBound, ByteVecBound);
|
||||
|
||||
fn clone_bound(bound: Bound<&[u8]>) -> ByteVecBound {
|
||||
let value = match bound {
|
||||
Bound::Excluded(v) | Bound::Included(v) => v.to_vec(),
|
||||
Bound::Unbounded => vec![],
|
||||
};
|
||||
|
||||
match bound {
|
||||
Bound::Included(_) => Bound::Included(value),
|
||||
Bound::Excluded(_) => Bound::Excluded(value),
|
||||
Bound::Unbounded => Bound::Unbounded,
|
||||
}
|
||||
}
|
||||
|
||||
// -- utils to encode table names --
|
||||
|
||||
fn encode_name(s: &str) -> Result<String> {
|
||||
let base = 'A' as u32;
|
||||
|
||||
let mut ret = String::with_capacity(s.len() + 10);
|
||||
for c in s.chars() {
|
||||
if c.is_alphanumeric() || c == '_' || c == '-' || c == '#' {
|
||||
ret.push(c);
|
||||
} else if c <= u8::MAX as char {
|
||||
ret.push('$');
|
||||
let c_hi = c as u32 / 16;
|
||||
let c_lo = c as u32 % 16;
|
||||
ret.push(char::from_u32(base + c_hi).unwrap());
|
||||
ret.push(char::from_u32(base + c_lo).unwrap());
|
||||
} else {
|
||||
return Err(Error(
|
||||
format!("table name {} could not be safely encoded", s).into(),
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
fn decode_name(s: &str) -> Result<String> {
|
||||
use std::convert::TryFrom;
|
||||
|
||||
let errfn = || Error(format!("encoded table name {} is invalid", s).into());
|
||||
let c_map = |c: char| {
|
||||
let c = c as u32;
|
||||
let base = 'A' as u32;
|
||||
if (base..base + 16).contains(&c) {
|
||||
Some(c - base)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
let mut ret = String::with_capacity(s.len());
|
||||
let mut it = s.chars();
|
||||
while let Some(c) = it.next() {
|
||||
if c == '$' {
|
||||
let c_hi = it.next().and_then(c_map).ok_or_else(errfn)?;
|
||||
let c_lo = it.next().and_then(c_map).ok_or_else(errfn)?;
|
||||
let c_dec = char::try_from(c_hi * 16 + c_lo).map_err(|_| errfn())?;
|
||||
ret.push(c_dec);
|
||||
} else {
|
||||
ret.push(c);
|
||||
}
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_encdec_name() {
|
||||
for name in [
|
||||
"testname",
|
||||
"test_name",
|
||||
"test name",
|
||||
"test$name",
|
||||
"test:name@help.me$get/this**right",
|
||||
] {
|
||||
let encname = encode_name(name).unwrap();
|
||||
assert!(!encname.contains(' '));
|
||||
assert!(!encname.contains('.'));
|
||||
assert!(!encname.contains('*'));
|
||||
assert_eq!(*name, decode_name(&encname).unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,6 +1,8 @@
|
|||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
#[cfg(feature = "fjall")]
|
||||
pub mod fjall_adapter;
|
||||
#[cfg(feature = "lmdb")]
|
||||
pub mod lmdb_adapter;
|
||||
#[cfg(feature = "sqlite")]
|
||||
|
|
@ -18,7 +20,7 @@ use std::cell::Cell;
|
|||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use err_derive::Error;
|
||||
use thiserror::Error;
|
||||
|
||||
pub use open::*;
|
||||
|
||||
|
|
@ -42,7 +44,7 @@ pub type TxValueIter<'a> = Box<dyn std::iter::Iterator<Item = TxOpResult<(Value,
|
|||
// ----
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[error(display = "{}", _0)]
|
||||
#[error("{0}")]
|
||||
pub struct Error(pub Cow<'static, str>);
|
||||
|
||||
impl From<std::io::Error> for Error {
|
||||
|
|
@ -54,7 +56,7 @@ impl From<std::io::Error> for Error {
|
|||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[error(display = "{}", _0)]
|
||||
#[error("{0}")]
|
||||
pub struct TxOpError(pub(crate) Error);
|
||||
pub type TxOpResult<T> = std::result::Result<T, TxOpError>;
|
||||
|
||||
|
|
@ -104,32 +106,44 @@ impl Db {
|
|||
result: Cell::new(None),
|
||||
};
|
||||
let tx_res = self.0.transaction(&f);
|
||||
let ret = f
|
||||
.result
|
||||
.into_inner()
|
||||
.expect("Transaction did not store result");
|
||||
let fn_res = f.result.into_inner();
|
||||
|
||||
match tx_res {
|
||||
Ok(on_commit) => match ret {
|
||||
Ok(value) => {
|
||||
on_commit.into_iter().for_each(|f| f());
|
||||
Ok(value)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
},
|
||||
Err(TxError::Abort(())) => match ret {
|
||||
Err(TxError::Abort(e)) => Err(TxError::Abort(e)),
|
||||
_ => unreachable!(),
|
||||
},
|
||||
Err(TxError::Db(e2)) => match ret {
|
||||
// Ok was stored -> the error occurred when finalizing
|
||||
// transaction
|
||||
Ok(_) => Err(TxError::Db(e2)),
|
||||
// An error was already stored: that's the one we want to
|
||||
// return
|
||||
Err(TxError::Db(e)) => Err(TxError::Db(e)),
|
||||
_ => unreachable!(),
|
||||
},
|
||||
match (tx_res, fn_res) {
|
||||
(Ok(on_commit), Some(Ok(value))) => {
|
||||
// Transaction succeeded
|
||||
// TxFn stored the value to return to the user in fn_res
|
||||
// tx_res contains the on_commit list of callbacks, run them now
|
||||
on_commit.into_iter().for_each(|f| f());
|
||||
Ok(value)
|
||||
}
|
||||
(Err(TxError::Abort(())), Some(Err(TxError::Abort(e)))) => {
|
||||
// Transaction was aborted by user code
|
||||
// The abort error value is stored in fn_res
|
||||
Err(TxError::Abort(e))
|
||||
}
|
||||
(Err(TxError::Db(_tx_e)), Some(Err(TxError::Db(fn_e)))) => {
|
||||
// Transaction encountered a DB error in user code
|
||||
// The error value encountered is the one in fn_res,
|
||||
// tx_res contains only a dummy error message
|
||||
Err(TxError::Db(fn_e))
|
||||
}
|
||||
(Err(TxError::Db(tx_e)), None) => {
|
||||
// Transaction encounterred a DB error when initializing the transaction,
|
||||
// before user code was called
|
||||
Err(TxError::Db(tx_e))
|
||||
}
|
||||
(Err(TxError::Db(tx_e)), Some(Ok(_))) => {
|
||||
// Transaction encounterred a DB error when commiting the transaction,
|
||||
// after user code was called
|
||||
Err(TxError::Db(tx_e))
|
||||
}
|
||||
(tx_res, fn_res) => {
|
||||
panic!(
|
||||
"unexpected error case: tx_res={:?}, fn_res={:?}",
|
||||
tx_res.map(|_| "..."),
|
||||
fn_res.map(|x| x.map(|_| "...").map_err(|_| "..."))
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -152,7 +166,7 @@ impl Db {
|
|||
let tree_names = other.list_trees()?;
|
||||
for name in tree_names {
|
||||
let tree = self.open_tree(&name)?;
|
||||
if tree.len()? > 0 {
|
||||
if !tree.is_empty()? {
|
||||
return Err(Error(format!("tree {} already contains data", name).into()));
|
||||
}
|
||||
|
||||
|
|
@ -194,8 +208,12 @@ impl Tree {
|
|||
self.0.get(self.1, key.as_ref())
|
||||
}
|
||||
#[inline]
|
||||
pub fn len(&self) -> Result<usize> {
|
||||
self.0.len(self.1)
|
||||
pub fn approximate_len(&self) -> Result<usize> {
|
||||
self.0.approximate_len(self.1)
|
||||
}
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> Result<bool> {
|
||||
self.0.is_empty(self.1)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
|
@ -333,7 +351,8 @@ pub(crate) trait IDb: Send + Sync {
|
|||
fn snapshot(&self, path: &PathBuf) -> Result<()>;
|
||||
|
||||
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
||||
fn len(&self, tree: usize) -> Result<usize>;
|
||||
fn approximate_len(&self, tree: usize) -> Result<usize>;
|
||||
fn is_empty(&self, tree: usize) -> Result<bool>;
|
||||
|
||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()>;
|
||||
fn remove(&self, tree: usize, key: &[u8]) -> Result<()>;
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
use core::ops::Bound;
|
||||
use core::ptr::NonNull;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryInto;
|
||||
use std::marker::PhantomPinned;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
|
@ -11,12 +11,55 @@ use heed::types::ByteSlice;
|
|||
use heed::{BytesDecode, Env, RoTxn, RwTxn, UntypedDatabase as Database};
|
||||
|
||||
use crate::{
|
||||
open::{Engine, OpenOpt},
|
||||
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
|
||||
TxResult, TxValueIter, Value, ValueIter,
|
||||
};
|
||||
|
||||
pub use heed;
|
||||
|
||||
// ---- top-level open function
|
||||
|
||||
pub(crate) fn open_db(path: &PathBuf, opt: &OpenOpt) -> Result<Db> {
|
||||
info!("Opening LMDB database at: {}", path.display());
|
||||
if let Err(e) = std::fs::create_dir_all(&path) {
|
||||
return Err(Error(
|
||||
format!("Unable to create LMDB data directory: {}", e).into(),
|
||||
));
|
||||
}
|
||||
|
||||
let map_size = match opt.lmdb_map_size {
|
||||
None => recommended_map_size(),
|
||||
Some(v) => v - (v % 4096),
|
||||
};
|
||||
|
||||
let mut env_builder = heed::EnvOpenOptions::new();
|
||||
env_builder.max_dbs(100);
|
||||
env_builder.map_size(map_size);
|
||||
env_builder.max_readers(2048);
|
||||
unsafe {
|
||||
env_builder.flag(heed::flags::Flags::MdbNoRdAhead);
|
||||
env_builder.flag(heed::flags::Flags::MdbNoMetaSync);
|
||||
if !opt.fsync {
|
||||
env_builder.flag(heed::flags::Flags::MdbNoSync);
|
||||
}
|
||||
}
|
||||
match env_builder.open(&path) {
|
||||
Err(heed::Error::Io(e)) if e.kind() == std::io::ErrorKind::OutOfMemory => {
|
||||
return Err(Error(
|
||||
"OutOfMemory error while trying to open LMDB database. This can happen \
|
||||
if your operating system is not allowing you to use sufficient virtual \
|
||||
memory address space. Please check that no limit is set (ulimit -v). \
|
||||
You may also try to set a smaller `lmdb_map_size` configuration parameter. \
|
||||
On 32-bit machines, you should probably switch to another database engine."
|
||||
.into(),
|
||||
))
|
||||
}
|
||||
Err(e) => Err(Error(format!("Cannot open LMDB database: {}", e).into())),
|
||||
Ok(db) => Ok(LmdbDb::init(db)),
|
||||
}
|
||||
}
|
||||
|
||||
// -- err
|
||||
|
||||
impl From<heed::Error> for Error {
|
||||
|
|
@ -104,12 +147,11 @@ impl IDb for LmdbDb {
|
|||
Ok(ret2)
|
||||
}
|
||||
|
||||
fn snapshot(&self, to: &PathBuf) -> Result<()> {
|
||||
std::fs::create_dir_all(to)?;
|
||||
let mut path = to.clone();
|
||||
path.push("data.mdb");
|
||||
fn snapshot(&self, base_path: &PathBuf) -> Result<()> {
|
||||
std::fs::create_dir_all(base_path)?;
|
||||
let path = Engine::Lmdb.db_path(base_path);
|
||||
self.db
|
||||
.copy_to_path(path, heed::CompactionOption::Disabled)?;
|
||||
.copy_to_path(path, heed::CompactionOption::Enabled)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
@ -126,11 +168,16 @@ impl IDb for LmdbDb {
|
|||
}
|
||||
}
|
||||
|
||||
fn len(&self, tree: usize) -> Result<usize> {
|
||||
fn approximate_len(&self, tree: usize) -> Result<usize> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
Ok(tree.len(&tx)?.try_into().unwrap())
|
||||
}
|
||||
fn is_empty(&self, tree: usize) -> Result<bool> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
Ok(tree.is_empty(&tx)?)
|
||||
}
|
||||
|
||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
|
|
@ -159,13 +206,15 @@ impl IDb for LmdbDb {
|
|||
fn iter(&self, tree: usize) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
TxAndIterator::make(tx, |tx| Ok(tree.iter(tx)?))
|
||||
// Safety: the cloture does not store its argument anywhere,
|
||||
unsafe { TxAndIterator::make(tx, |tx| Ok(tree.iter(tx)?)) }
|
||||
}
|
||||
|
||||
fn iter_rev(&self, tree: usize) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
TxAndIterator::make(tx, |tx| Ok(tree.rev_iter(tx)?))
|
||||
// Safety: the cloture does not store its argument anywhere,
|
||||
unsafe { TxAndIterator::make(tx, |tx| Ok(tree.rev_iter(tx)?)) }
|
||||
}
|
||||
|
||||
fn range<'r>(
|
||||
|
|
@ -176,7 +225,8 @@ impl IDb for LmdbDb {
|
|||
) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
TxAndIterator::make(tx, |tx| Ok(tree.range(tx, &(low, high))?))
|
||||
// Safety: the cloture does not store its argument anywhere,
|
||||
unsafe { TxAndIterator::make(tx, |tx| Ok(tree.range(tx, &(low, high))?)) }
|
||||
}
|
||||
fn range_rev<'r>(
|
||||
&self,
|
||||
|
|
@ -186,7 +236,8 @@ impl IDb for LmdbDb {
|
|||
) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
TxAndIterator::make(tx, |tx| Ok(tree.rev_range(tx, &(low, high))?))
|
||||
// Safety: the cloture does not store its argument anywhere,
|
||||
unsafe { TxAndIterator::make(tx, |tx| Ok(tree.rev_range(tx, &(low, high))?)) }
|
||||
}
|
||||
|
||||
// ----
|
||||
|
|
@ -316,28 +367,41 @@ where
|
|||
{
|
||||
tx: RoTxn<'a>,
|
||||
iter: Option<I>,
|
||||
_pin: PhantomPinned,
|
||||
}
|
||||
|
||||
impl<'a, I> TxAndIterator<'a, I>
|
||||
where
|
||||
I: Iterator<Item = IteratorItem<'a>> + 'a,
|
||||
{
|
||||
fn make<F>(tx: RoTxn<'a>, iterfun: F) -> Result<ValueIter<'a>>
|
||||
fn iter(self: Pin<&mut Self>) -> &mut Option<I> {
|
||||
// Safety: iter is not structural
|
||||
unsafe { &mut self.get_unchecked_mut().iter }
|
||||
}
|
||||
|
||||
/// Safety: iterfun must not store its argument anywhere but in its result.
|
||||
unsafe fn make<F>(tx: RoTxn<'a>, iterfun: F) -> Result<ValueIter<'a>>
|
||||
where
|
||||
F: FnOnce(&'a RoTxn<'a>) -> Result<I>,
|
||||
{
|
||||
let res = TxAndIterator { tx, iter: None };
|
||||
let res = TxAndIterator {
|
||||
tx,
|
||||
iter: None,
|
||||
_pin: PhantomPinned,
|
||||
};
|
||||
let mut boxed = Box::pin(res);
|
||||
|
||||
// This unsafe allows us to bypass lifetime checks
|
||||
let tx = unsafe { NonNull::from(&boxed.tx).as_ref() };
|
||||
let iter = iterfun(tx)?;
|
||||
let tx_lifetime_overextended: &'a RoTxn<'a> = {
|
||||
let tx = &boxed.tx;
|
||||
// Safety: Artificially extending the lifetime because
|
||||
// this reference will only be stored and accessed from the
|
||||
// returned ValueIter which guarantees that it is destroyed
|
||||
// before the tx it is pointing to.
|
||||
unsafe { &*&raw const *tx }
|
||||
};
|
||||
let iter = iterfun(&tx_lifetime_overextended)?;
|
||||
|
||||
let mut_ref = Pin::as_mut(&mut boxed);
|
||||
// This unsafe allows us to write in a field of the pinned struct
|
||||
unsafe {
|
||||
Pin::get_unchecked_mut(mut_ref).iter = Some(iter);
|
||||
}
|
||||
*boxed.as_mut().iter() = Some(iter);
|
||||
|
||||
Ok(Box::new(TxAndIteratorPin(boxed)))
|
||||
}
|
||||
|
|
@ -348,8 +412,10 @@ where
|
|||
I: Iterator<Item = IteratorItem<'a>> + 'a,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
// ensure the iterator is dropped before the RoTxn it references
|
||||
drop(self.iter.take());
|
||||
// Safety: `new_unchecked` is okay because we know this value is never
|
||||
// used again after being dropped.
|
||||
let this = unsafe { Pin::new_unchecked(self) };
|
||||
drop(this.iter().take());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -365,13 +431,12 @@ where
|
|||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let mut_ref = Pin::as_mut(&mut self.0);
|
||||
// This unsafe allows us to mutably access the iterator field
|
||||
let next = unsafe { Pin::get_unchecked_mut(mut_ref).iter.as_mut()?.next() };
|
||||
match next {
|
||||
None => None,
|
||||
Some(Err(e)) => Some(Err(e.into())),
|
||||
Some(Ok((k, v))) => Some(Ok((k.to_vec(), v.to_vec()))),
|
||||
}
|
||||
let next = mut_ref.iter().as_mut()?.next()?;
|
||||
let res = match next {
|
||||
Err(e) => Err(e.into()),
|
||||
Ok((k, v)) => Ok((k.to_vec(), v.to_vec())),
|
||||
};
|
||||
Some(res)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ use crate::{Db, Error, Result};
|
|||
pub enum Engine {
|
||||
Lmdb,
|
||||
Sqlite,
|
||||
Fjall,
|
||||
}
|
||||
|
||||
impl Engine {
|
||||
|
|
@ -19,8 +20,26 @@ impl Engine {
|
|||
match self {
|
||||
Self::Lmdb => "lmdb",
|
||||
Self::Sqlite => "sqlite",
|
||||
Self::Fjall => "fjall",
|
||||
}
|
||||
}
|
||||
|
||||
/// Return engine-specific DB path from base path
|
||||
pub fn db_path(&self, base_path: &PathBuf) -> PathBuf {
|
||||
let mut ret = base_path.clone();
|
||||
match self {
|
||||
Self::Lmdb => {
|
||||
ret.push("db.lmdb");
|
||||
}
|
||||
Self::Sqlite => {
|
||||
ret.push("db.sqlite");
|
||||
}
|
||||
Self::Fjall => {
|
||||
ret.push("db.fjall");
|
||||
}
|
||||
}
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Engine {
|
||||
|
|
@ -36,10 +55,11 @@ impl std::str::FromStr for Engine {
|
|||
match text {
|
||||
"lmdb" | "heed" => Ok(Self::Lmdb),
|
||||
"sqlite" | "sqlite3" | "rusqlite" => Ok(Self::Sqlite),
|
||||
"fjall" => Ok(Self::Fjall),
|
||||
"sled" => Err(Error("Sled is no longer supported as a database engine. Converting your old metadata db can be done using an older Garage binary (e.g. v0.9.4).".into())),
|
||||
kind => Err(Error(
|
||||
format!(
|
||||
"Invalid DB engine: {} (options are: lmdb, sqlite)",
|
||||
"Invalid DB engine: {} (options are: lmdb, sqlite, fjall)",
|
||||
kind
|
||||
)
|
||||
.into(),
|
||||
|
|
@ -51,6 +71,7 @@ impl std::str::FromStr for Engine {
|
|||
pub struct OpenOpt {
|
||||
pub fsync: bool,
|
||||
pub lmdb_map_size: Option<usize>,
|
||||
pub fjall_block_cache_size: Option<usize>,
|
||||
}
|
||||
|
||||
impl Default for OpenOpt {
|
||||
|
|
@ -58,6 +79,7 @@ impl Default for OpenOpt {
|
|||
Self {
|
||||
fsync: false,
|
||||
lmdb_map_size: None,
|
||||
fjall_block_cache_size: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -66,53 +88,15 @@ pub fn open_db(path: &PathBuf, engine: Engine, opt: &OpenOpt) -> Result<Db> {
|
|||
match engine {
|
||||
// ---- Sqlite DB ----
|
||||
#[cfg(feature = "sqlite")]
|
||||
Engine::Sqlite => {
|
||||
info!("Opening Sqlite database at: {}", path.display());
|
||||
let manager = r2d2_sqlite::SqliteConnectionManager::file(path);
|
||||
Ok(crate::sqlite_adapter::SqliteDb::new(manager, opt.fsync)?)
|
||||
}
|
||||
Engine::Sqlite => crate::sqlite_adapter::open_db(path, opt),
|
||||
|
||||
// ---- LMDB DB ----
|
||||
#[cfg(feature = "lmdb")]
|
||||
Engine::Lmdb => {
|
||||
info!("Opening LMDB database at: {}", path.display());
|
||||
if let Err(e) = std::fs::create_dir_all(&path) {
|
||||
return Err(Error(
|
||||
format!("Unable to create LMDB data directory: {}", e).into(),
|
||||
));
|
||||
}
|
||||
Engine::Lmdb => crate::lmdb_adapter::open_db(path, opt),
|
||||
|
||||
let map_size = match opt.lmdb_map_size {
|
||||
None => crate::lmdb_adapter::recommended_map_size(),
|
||||
Some(v) => v - (v % 4096),
|
||||
};
|
||||
|
||||
let mut env_builder = heed::EnvOpenOptions::new();
|
||||
env_builder.max_dbs(100);
|
||||
env_builder.map_size(map_size);
|
||||
env_builder.max_readers(2048);
|
||||
unsafe {
|
||||
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoRdAhead);
|
||||
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoMetaSync);
|
||||
if !opt.fsync {
|
||||
env_builder.flag(heed::flags::Flags::MdbNoSync);
|
||||
}
|
||||
}
|
||||
match env_builder.open(&path) {
|
||||
Err(heed::Error::Io(e)) if e.kind() == std::io::ErrorKind::OutOfMemory => {
|
||||
return Err(Error(
|
||||
"OutOfMemory error while trying to open LMDB database. This can happen \
|
||||
if your operating system is not allowing you to use sufficient virtual \
|
||||
memory address space. Please check that no limit is set (ulimit -v). \
|
||||
You may also try to set a smaller `lmdb_map_size` configuration parameter. \
|
||||
On 32-bit machines, you should probably switch to another database engine."
|
||||
.into(),
|
||||
))
|
||||
}
|
||||
Err(e) => Err(Error(format!("Cannot open LMDB database: {}", e).into())),
|
||||
Ok(db) => Ok(crate::lmdb_adapter::LmdbDb::init(db)),
|
||||
}
|
||||
}
|
||||
// ---- Fjall DB ----
|
||||
#[cfg(feature = "fjall")]
|
||||
Engine::Fjall => crate::fjall_adapter::open_db(path, opt),
|
||||
|
||||
// Pattern is unreachable when all supported DB engines are compiled into binary. The allow
|
||||
// attribute is added so that we won't have to change this match in case stop building
|
||||
|
|
|
|||
|
|
@ -11,12 +11,23 @@ use r2d2_sqlite::SqliteConnectionManager;
|
|||
use rusqlite::{params, Rows, Statement, Transaction};
|
||||
|
||||
use crate::{
|
||||
open::{Engine, OpenOpt},
|
||||
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
|
||||
TxResult, TxValueIter, Value, ValueIter,
|
||||
};
|
||||
|
||||
pub use rusqlite;
|
||||
|
||||
// ---- top-level open function
|
||||
|
||||
pub(crate) fn open_db(path: &PathBuf, opt: &OpenOpt) -> Result<Db> {
|
||||
info!("Opening Sqlite database at: {}", path.display());
|
||||
let manager = r2d2_sqlite::SqliteConnectionManager::file(path);
|
||||
Ok(SqliteDb::new(manager, opt.fsync)?)
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
type Connection = r2d2::PooledConnection<SqliteConnectionManager>;
|
||||
|
||||
// --- err
|
||||
|
|
@ -139,17 +150,18 @@ impl IDb for SqliteDb {
|
|||
Ok(trees)
|
||||
}
|
||||
|
||||
fn snapshot(&self, to: &PathBuf) -> Result<()> {
|
||||
fn progress(p: rusqlite::backup::Progress) {
|
||||
let percent = (p.pagecount - p.remaining) * 100 / p.pagecount;
|
||||
info!("Sqlite snapshot progress: {}%", percent);
|
||||
}
|
||||
std::fs::create_dir_all(to)?;
|
||||
let mut path = to.clone();
|
||||
path.push("db.sqlite");
|
||||
self.db
|
||||
.get()?
|
||||
.backup(rusqlite::DatabaseName::Main, path, Some(progress))?;
|
||||
fn snapshot(&self, base_path: &PathBuf) -> Result<()> {
|
||||
std::fs::create_dir_all(base_path)?;
|
||||
let path = Engine::Sqlite
|
||||
.db_path(&base_path)
|
||||
.into_os_string()
|
||||
.into_string()
|
||||
.map_err(|_| Error("invalid sqlite path string".into()))?;
|
||||
|
||||
info!("Start sqlite VACUUM INTO `{}`", path);
|
||||
self.db.get()?.execute("VACUUM INTO ?1", params![path])?;
|
||||
info!("Finished sqlite VACUUM INTO `{}`", path);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
@ -160,7 +172,7 @@ impl IDb for SqliteDb {
|
|||
self.internal_get(&self.db.get()?, &tree, key)
|
||||
}
|
||||
|
||||
fn len(&self, tree: usize) -> Result<usize> {
|
||||
fn approximate_len(&self, tree: usize) -> Result<usize> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let db = self.db.get()?;
|
||||
|
||||
|
|
@ -172,6 +184,10 @@ impl IDb for SqliteDb {
|
|||
}
|
||||
}
|
||||
|
||||
fn is_empty(&self, tree: usize) -> Result<bool> {
|
||||
Ok(self.approximate_len(tree)? == 0)
|
||||
}
|
||||
|
||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let db = self.db.get()?;
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
use crate::*;
|
||||
|
||||
fn test_suite(db: Db) {
|
||||
let tree = db.open_tree("tree").unwrap();
|
||||
let tree = db.open_tree("tree:this_is_a_tree").unwrap();
|
||||
|
||||
let ka: &[u8] = &b"test"[..];
|
||||
let kb: &[u8] = &b"zwello"[..];
|
||||
|
|
@ -14,7 +14,7 @@ fn test_suite(db: Db) {
|
|||
|
||||
assert!(tree.insert(ka, va).is_ok());
|
||||
assert_eq!(tree.get(ka).unwrap().unwrap(), va);
|
||||
assert_eq!(tree.len().unwrap(), 1);
|
||||
assert_eq!(tree.iter().unwrap().count(), 1);
|
||||
|
||||
// ---- test transaction logic ----
|
||||
|
||||
|
|
@ -148,3 +148,15 @@ fn test_sqlite_db() {
|
|||
let db = SqliteDb::new(manager, false).unwrap();
|
||||
test_suite(db);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "fjall")]
|
||||
fn test_fjall_db() {
|
||||
use crate::fjall_adapter::{fjall, FjallDb};
|
||||
|
||||
let path = mktemp::Temp::new_dir().unwrap();
|
||||
let config = fjall::Config::new(path).temporary(true);
|
||||
let keyspace = config.open_transactional().unwrap();
|
||||
let db = FjallDb::init(keyspace);
|
||||
test_suite(db);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage"
|
||||
version = "1.0.1"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -57,11 +57,13 @@ opentelemetry.workspace = true
|
|||
opentelemetry-prometheus = { workspace = true, optional = true }
|
||||
opentelemetry-otlp = { workspace = true, optional = true }
|
||||
syslog-tracing = { workspace = true, optional = true }
|
||||
tracing-journald = { workspace = true, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
garage_api_common.workspace = true
|
||||
|
||||
aws-sdk-s3.workspace = true
|
||||
aws-smithy-runtime.workspace = true
|
||||
chrono.workspace = true
|
||||
http.workspace = true
|
||||
hmac.workspace = true
|
||||
|
|
@ -71,10 +73,12 @@ hyper-util.workspace = true
|
|||
mktemp.workspace = true
|
||||
sha2.workspace = true
|
||||
|
||||
|
||||
static_init.workspace = true
|
||||
assert-json-diff.workspace = true
|
||||
serde_json.workspace = true
|
||||
base64.workspace = true
|
||||
crc32fast.workspace = true
|
||||
|
||||
k2v-client.workspace = true
|
||||
|
||||
|
|
@ -87,6 +91,7 @@ k2v = [ "garage_util/k2v", "garage_api_k2v" ]
|
|||
# Database engines
|
||||
lmdb = [ "garage_model/lmdb" ]
|
||||
sqlite = [ "garage_model/sqlite" ]
|
||||
fjall = [ "garage_model/fjall" ]
|
||||
|
||||
# Automatic registration and discovery via Consul API
|
||||
consul-discovery = [ "garage_rpc/consul-discovery" ]
|
||||
|
|
@ -98,6 +103,8 @@ metrics = [ "garage_api_admin/metrics", "opentelemetry-prometheus" ]
|
|||
telemetry-otlp = [ "opentelemetry-otlp" ]
|
||||
# Logging to syslog
|
||||
syslog = [ "syslog-tracing" ]
|
||||
# Logging to journald
|
||||
journald = [ "tracing-journald" ]
|
||||
|
||||
# NOTE: bundled-libs and system-libs should be treat as mutually exclusive;
|
||||
# exactly one of them should be enabled.
|
||||
|
|
|
|||
|
|
@ -101,6 +101,7 @@ impl AdminRpcHandler {
|
|||
let mut obj_dels = 0;
|
||||
let mut mpu_dels = 0;
|
||||
let mut ver_dels = 0;
|
||||
let mut br_dels = 0;
|
||||
|
||||
for hash in blocks {
|
||||
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
||||
|
|
@ -131,12 +132,19 @@ impl AdminRpcHandler {
|
|||
ver_dels += 1;
|
||||
}
|
||||
}
|
||||
if !br.deleted.get() {
|
||||
let mut br = br;
|
||||
br.deleted.set();
|
||||
self.garage.block_ref_table.insert(&br).await?;
|
||||
br_dels += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Purged {} blocks, {} versions, {} objects, {} multipart uploads",
|
||||
"Purged {} blocks: marked {} block refs, {} versions, {} objects and {} multipart uploads as deleted",
|
||||
blocks.len(),
|
||||
br_dels,
|
||||
ver_dels,
|
||||
obj_dels,
|
||||
mpu_dels,
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ impl AdminRpcHandler {
|
|||
|
||||
#[allow(clippy::ptr_arg)]
|
||||
async fn handle_create_bucket(&self, name: &String) -> Result<AdminRpc, Error> {
|
||||
if !is_valid_bucket_name(name) {
|
||||
if !is_valid_bucket_name(name, self.garage.config.allow_punycode) {
|
||||
return Err(Error::BadRequest(format!(
|
||||
"{}: {}",
|
||||
name, INVALID_BUCKET_NAME_MESSAGE
|
||||
|
|
|
|||
|
|
@ -219,7 +219,7 @@ impl AdminRpcHandler {
|
|||
|
||||
// Gather block manager statistics
|
||||
writeln!(&mut ret, "\nBlock manager stats:").unwrap();
|
||||
let rc_len = self.garage.block_manager.rc_len()?.to_string();
|
||||
let rc_len = self.garage.block_manager.rc_approximate_len()?.to_string();
|
||||
|
||||
writeln!(
|
||||
&mut ret,
|
||||
|
|
@ -230,13 +230,13 @@ impl AdminRpcHandler {
|
|||
writeln!(
|
||||
&mut ret,
|
||||
" resync queue length: {}",
|
||||
self.garage.block_manager.resync.queue_len()?
|
||||
self.garage.block_manager.resync.queue_approximate_len()?
|
||||
)
|
||||
.unwrap();
|
||||
writeln!(
|
||||
&mut ret,
|
||||
" blocks with resync errors: {}",
|
||||
self.garage.block_manager.resync.errors_len()?
|
||||
self.garage.block_manager.resync.errors_approximate_len()?
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
|
@ -346,16 +346,21 @@ impl AdminRpcHandler {
|
|||
F: TableSchema + 'static,
|
||||
R: TableReplication + 'static,
|
||||
{
|
||||
let data_len = t.data.store.len().map_err(GarageError::from)?.to_string();
|
||||
let mkl_len = t.merkle_updater.merkle_tree_len()?.to_string();
|
||||
let data_len = t
|
||||
.data
|
||||
.store
|
||||
.approximate_len()
|
||||
.map_err(GarageError::from)?
|
||||
.to_string();
|
||||
let mkl_len = t.merkle_updater.merkle_tree_approximate_len()?.to_string();
|
||||
|
||||
Ok(format!(
|
||||
" {}\t{}\t{}\t{}\t{}",
|
||||
F::TABLE_NAME,
|
||||
data_len,
|
||||
mkl_len,
|
||||
t.merkle_updater.todo_len()?,
|
||||
t.data.gc_todo_len()?
|
||||
t.merkle_updater.todo_approximate_len()?,
|
||||
t.data.gc_todo_approximate_len()?
|
||||
))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -466,6 +466,10 @@ pub enum RepairWhat {
|
|||
/// Repair (resync/rebalance) the set of stored blocks in the cluster
|
||||
#[structopt(name = "blocks", version = garage_version())]
|
||||
Blocks,
|
||||
/// Clear the block resync queue. The list of blocks in errored state
|
||||
/// is cleared as well. You MUST run `garage repair blocks` after invoking this.
|
||||
#[structopt(name = "clear-resync-queue", version = garage_version())]
|
||||
ClearResyncQueue,
|
||||
/// Repropagate object deletions to the version table
|
||||
#[structopt(name = "versions", version = garage_version())]
|
||||
Versions,
|
||||
|
|
@ -478,6 +482,9 @@ pub enum RepairWhat {
|
|||
/// Recalculate block reference counters
|
||||
#[structopt(name = "block-rc", version = garage_version())]
|
||||
BlockRc,
|
||||
/// Fix inconsistency in bucket aliases (WARNING: EXPERIMENTAL)
|
||||
#[structopt(name = "aliases", version = garage_version())]
|
||||
Aliases,
|
||||
/// Verify integrity of all blocks on disc
|
||||
#[structopt(name = "scrub", version = garage_version())]
|
||||
Scrub {
|
||||
|
|
|
|||
|
|
@ -208,6 +208,43 @@ fn init_logging(opt: &Opt) {
|
|||
}
|
||||
}
|
||||
|
||||
if std::env::var("GARAGE_LOG_TO_JOURNALD")
|
||||
.map(|x| x == "1" || x == "true")
|
||||
.unwrap_or(false)
|
||||
{
|
||||
#[cfg(feature = "journald")]
|
||||
{
|
||||
use tracing_journald::{Priority, PriorityMappings};
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
let registry = tracing_subscriber::registry()
|
||||
.with(tracing_subscriber::fmt::layer().with_writer(std::io::sink))
|
||||
.with(env_filter);
|
||||
match tracing_journald::layer() {
|
||||
Ok(layer) => {
|
||||
registry
|
||||
.with(layer.with_priority_mappings(PriorityMappings {
|
||||
info: Priority::Informational,
|
||||
debug: Priority::Debug,
|
||||
..PriorityMappings::new()
|
||||
}))
|
||||
.init();
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Couldn't connect to journald: {}.", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
#[cfg(not(feature = "journald"))]
|
||||
{
|
||||
eprintln!("Journald support is not enabled in this build.");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_writer(std::io::stderr)
|
||||
.with_env_filter(env_filter)
|
||||
|
|
|
|||
|
|
@ -88,6 +88,15 @@ pub async fn launch_online_repair(
|
|||
garage.block_manager.clone(),
|
||||
));
|
||||
}
|
||||
RepairWhat::Aliases => {
|
||||
info!("Repairing bucket aliases (foreground)");
|
||||
garage.locked_helper().await.repair_aliases().await?;
|
||||
}
|
||||
RepairWhat::ClearResyncQueue => {
|
||||
let garage = garage.clone();
|
||||
tokio::task::spawn_blocking(move || garage.block_manager.resync.clear_resync_queue())
|
||||
.await??
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -183,10 +183,21 @@ fn watch_shutdown_signal() -> watch::Receiver<bool> {
|
|||
let mut sigterm =
|
||||
signal(SignalKind::terminate()).expect("Failed to install SIGTERM handler");
|
||||
let mut sighup = signal(SignalKind::hangup()).expect("Failed to install SIGHUP handler");
|
||||
tokio::select! {
|
||||
_ = sigint.recv() => info!("Received SIGINT, shutting down."),
|
||||
_ = sigterm.recv() => info!("Received SIGTERM, shutting down."),
|
||||
_ = sighup.recv() => info!("Received SIGHUP, shutting down."),
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = sigint.recv() => {
|
||||
info!("Received SIGINT, shutting down.");
|
||||
break
|
||||
}
|
||||
_ = sigterm.recv() => {
|
||||
info!("Received SIGTERM, shutting down.");
|
||||
break
|
||||
}
|
||||
_ = sighup.recv() => {
|
||||
info!("Received SIGHUP, reload not supported.");
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
send_cancel.send(true).unwrap();
|
||||
});
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ pub fn build_client(key: &Key) -> Client {
|
|||
.endpoint_url(format!("http://127.0.0.1:{}", DEFAULT_PORT))
|
||||
.region(super::REGION)
|
||||
.credentials_provider(credentials)
|
||||
.behavior_version(BehaviorVersion::v2023_11_09())
|
||||
.behavior_version(BehaviorVersion::v2024_03_28())
|
||||
.build();
|
||||
|
||||
Client::from_conf(config)
|
||||
|
|
|
|||
|
|
@ -192,16 +192,13 @@ impl<'a> RequestBuilder<'a> {
|
|||
.collect::<HeaderMap>();
|
||||
|
||||
let date = now.format(signature::LONG_DATETIME).to_string();
|
||||
all_headers.insert(
|
||||
signature::payload::X_AMZ_DATE,
|
||||
HeaderValue::from_str(&date).unwrap(),
|
||||
);
|
||||
all_headers.insert(signature::X_AMZ_DATE, HeaderValue::from_str(&date).unwrap());
|
||||
all_headers.insert(HOST, HeaderValue::from_str(&host).unwrap());
|
||||
|
||||
let body_sha = match self.body_signature {
|
||||
let body_sha = match &self.body_signature {
|
||||
BodySignature::Unsigned => "UNSIGNED-PAYLOAD".to_owned(),
|
||||
BodySignature::Classic => hex::encode(garage_util::data::sha256sum(&self.body)),
|
||||
BodySignature::Streaming(size) => {
|
||||
BodySignature::Streaming { chunk_size } => {
|
||||
all_headers.insert(
|
||||
CONTENT_ENCODING,
|
||||
HeaderValue::from_str("aws-chunked").unwrap(),
|
||||
|
|
@ -216,18 +213,59 @@ impl<'a> RequestBuilder<'a> {
|
|||
// code.
|
||||
all_headers.insert(
|
||||
CONTENT_LENGTH,
|
||||
to_streaming_body(&self.body, size, String::new(), signer.clone(), now, "")
|
||||
.len()
|
||||
.to_string()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
to_streaming_body(
|
||||
&self.body,
|
||||
*chunk_size,
|
||||
String::new(),
|
||||
signer.clone(),
|
||||
now,
|
||||
"",
|
||||
)
|
||||
.len()
|
||||
.to_string()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD".to_owned()
|
||||
}
|
||||
BodySignature::StreamingUnsignedTrailer {
|
||||
chunk_size,
|
||||
trailer_algorithm,
|
||||
trailer_value,
|
||||
} => {
|
||||
all_headers.insert(
|
||||
CONTENT_ENCODING,
|
||||
HeaderValue::from_str("aws-chunked").unwrap(),
|
||||
);
|
||||
all_headers.insert(
|
||||
HeaderName::from_static("x-amz-decoded-content-length"),
|
||||
HeaderValue::from_str(&self.body.len().to_string()).unwrap(),
|
||||
);
|
||||
all_headers.insert(
|
||||
HeaderName::from_static("x-amz-trailer"),
|
||||
HeaderValue::from_str(&trailer_algorithm).unwrap(),
|
||||
);
|
||||
|
||||
all_headers.insert(
|
||||
CONTENT_LENGTH,
|
||||
to_streaming_unsigned_trailer_body(
|
||||
&self.body,
|
||||
*chunk_size,
|
||||
&trailer_algorithm,
|
||||
&trailer_value,
|
||||
)
|
||||
.len()
|
||||
.to_string()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
"STREAMING-UNSIGNED-PAYLOAD-TRAILER".to_owned()
|
||||
}
|
||||
};
|
||||
all_headers.insert(
|
||||
signature::payload::X_AMZ_CONTENT_SH256,
|
||||
signature::X_AMZ_CONTENT_SHA256,
|
||||
HeaderValue::from_str(&body_sha).unwrap(),
|
||||
);
|
||||
|
||||
|
|
@ -276,10 +314,26 @@ impl<'a> RequestBuilder<'a> {
|
|||
let mut request = Request::builder();
|
||||
*request.headers_mut().unwrap() = all_headers;
|
||||
|
||||
let body = if let BodySignature::Streaming(size) = self.body_signature {
|
||||
to_streaming_body(&self.body, size, signature, streaming_signer, now, &scope)
|
||||
} else {
|
||||
self.body.clone()
|
||||
let body = match &self.body_signature {
|
||||
BodySignature::Streaming { chunk_size } => to_streaming_body(
|
||||
&self.body,
|
||||
*chunk_size,
|
||||
signature,
|
||||
streaming_signer,
|
||||
now,
|
||||
&scope,
|
||||
),
|
||||
BodySignature::StreamingUnsignedTrailer {
|
||||
chunk_size,
|
||||
trailer_algorithm,
|
||||
trailer_value,
|
||||
} => to_streaming_unsigned_trailer_body(
|
||||
&self.body,
|
||||
*chunk_size,
|
||||
&trailer_algorithm,
|
||||
&trailer_value,
|
||||
),
|
||||
_ => self.body.clone(),
|
||||
};
|
||||
let request = request
|
||||
.uri(uri)
|
||||
|
|
@ -308,7 +362,14 @@ impl<'a> RequestBuilder<'a> {
|
|||
pub enum BodySignature {
|
||||
Unsigned,
|
||||
Classic,
|
||||
Streaming(usize),
|
||||
Streaming {
|
||||
chunk_size: usize,
|
||||
},
|
||||
StreamingUnsignedTrailer {
|
||||
chunk_size: usize,
|
||||
trailer_algorithm: String,
|
||||
trailer_value: String,
|
||||
},
|
||||
}
|
||||
|
||||
fn query_param_to_string(params: &HashMap<String, Option<String>>) -> String {
|
||||
|
|
@ -363,3 +424,26 @@ fn to_streaming_body(
|
|||
|
||||
res
|
||||
}
|
||||
|
||||
fn to_streaming_unsigned_trailer_body(
|
||||
body: &[u8],
|
||||
chunk_size: usize,
|
||||
trailer_algorithm: &str,
|
||||
trailer_value: &str,
|
||||
) -> Vec<u8> {
|
||||
let mut res = Vec::with_capacity(body.len());
|
||||
for chunk in body.chunks(chunk_size) {
|
||||
let header = format!("{:x}\r\n", chunk.len());
|
||||
res.extend_from_slice(header.as_bytes());
|
||||
res.extend_from_slice(chunk);
|
||||
res.extend_from_slice(b"\r\n");
|
||||
}
|
||||
|
||||
res.extend_from_slice(b"0\r\n");
|
||||
res.extend_from_slice(trailer_algorithm.as_bytes());
|
||||
res.extend_from_slice(b":");
|
||||
res.extend_from_slice(trailer_value.as_bytes());
|
||||
res.extend_from_slice(b"\n\r\n\r\n");
|
||||
|
||||
res
|
||||
}
|
||||
|
|
|
|||
|
|
@ -63,6 +63,8 @@ rpc_bind_addr = "127.0.0.1:{rpc_port}"
|
|||
rpc_public_addr = "127.0.0.1:{rpc_port}"
|
||||
rpc_secret = "{secret}"
|
||||
|
||||
allow_punycode = true
|
||||
|
||||
[s3_api]
|
||||
s3_region = "{region}"
|
||||
api_bind_addr = "127.0.0.1:{s3_port}"
|
||||
|
|
@ -99,7 +101,10 @@ api_bind_addr = "127.0.0.1:{admin_port}"
|
|||
.arg("server")
|
||||
.stdout(stdout)
|
||||
.stderr(stderr)
|
||||
.env("RUST_LOG", "garage=debug,garage_api=trace")
|
||||
.env(
|
||||
"RUST_LOG",
|
||||
"garage=debug,garage_api_common=trace,garage_api_s3=trace",
|
||||
)
|
||||
.spawn()
|
||||
.expect("Could not start garage");
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
use crate::common;
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use aws_sdk_s3::error::SdkError;
|
||||
use aws_sdk_s3::primitives::{ByteStream, DateTime};
|
||||
use aws_sdk_s3::types::{Delete, ObjectIdentifier};
|
||||
|
||||
const STD_KEY: &str = "hello world";
|
||||
|
|
@ -125,6 +126,153 @@ async fn test_putobject() {
|
|||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_precondition() {
|
||||
let ctx = common::context();
|
||||
let bucket = ctx.create_bucket("precondition");
|
||||
|
||||
let etag = "\"46cf18a9b447991b450cad3facf5937e\"";
|
||||
let etag2 = "\"ae4984b984cd984fe98d4efa954dce98\"";
|
||||
let data = ByteStream::from_static(BODY);
|
||||
|
||||
let r = ctx
|
||||
.client
|
||||
.put_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.body(data)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(r.e_tag.unwrap().as_str(), etag);
|
||||
|
||||
let last_modified;
|
||||
{
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_match(etag)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
|
||||
last_modified = o.last_modified.unwrap();
|
||||
|
||||
let err = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_match(etag2)
|
||||
.send()
|
||||
.await;
|
||||
assert!(
|
||||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 412)
|
||||
);
|
||||
}
|
||||
{
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_none_match(etag2)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
|
||||
|
||||
let err = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_none_match(etag)
|
||||
.send()
|
||||
.await;
|
||||
assert!(
|
||||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
|
||||
);
|
||||
}
|
||||
let older_date = DateTime::from_secs_f64(last_modified.as_secs_f64() - 10.0);
|
||||
let same_date = DateTime::from_secs_f64(last_modified.as_secs_f64());
|
||||
let newer_date = DateTime::from_secs_f64(last_modified.as_secs_f64() + 10.0);
|
||||
{
|
||||
let err = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_modified_since(newer_date)
|
||||
.send()
|
||||
.await;
|
||||
assert!(
|
||||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
|
||||
);
|
||||
|
||||
let err = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_modified_since(same_date)
|
||||
.send()
|
||||
.await;
|
||||
assert!(
|
||||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
|
||||
);
|
||||
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_modified_since(older_date)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
|
||||
}
|
||||
{
|
||||
let err = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_unmodified_since(older_date)
|
||||
.send()
|
||||
.await;
|
||||
assert!(
|
||||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 412)
|
||||
);
|
||||
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_unmodified_since(same_date)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
|
||||
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_unmodified_since(newer_date)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_getobject() {
|
||||
let ctx = common::context();
|
||||
|
|
@ -189,12 +337,14 @@ async fn test_getobject() {
|
|||
|
||||
#[tokio::test]
|
||||
async fn test_metadata() {
|
||||
use aws_sdk_s3::primitives::{DateTime, DateTimeFormat};
|
||||
|
||||
let ctx = common::context();
|
||||
let bucket = ctx.create_bucket("testmetadata");
|
||||
|
||||
let etag = "\"46cf18a9b447991b450cad3facf5937e\"";
|
||||
let exp = aws_sdk_s3::primitives::DateTime::from_secs(10000000000);
|
||||
let exp2 = aws_sdk_s3::primitives::DateTime::from_secs(10000500000);
|
||||
let exp = DateTime::from_secs(10000000000);
|
||||
let exp2 = DateTime::from_secs(10000500000);
|
||||
|
||||
{
|
||||
// Note. The AWS client SDK adds a Content-Type header
|
||||
|
|
@ -227,7 +377,7 @@ async fn test_metadata() {
|
|||
assert_eq!(o.content_disposition, None);
|
||||
assert_eq!(o.content_encoding, None);
|
||||
assert_eq!(o.content_language, None);
|
||||
assert_eq!(o.expires, None);
|
||||
assert_eq!(o.expires_string, None);
|
||||
assert_eq!(o.metadata.unwrap_or_default().len(), 0);
|
||||
|
||||
let o = ctx
|
||||
|
|
@ -250,7 +400,10 @@ async fn test_metadata() {
|
|||
assert_eq!(o.content_disposition.unwrap().as_str(), "cddummy");
|
||||
assert_eq!(o.content_encoding.unwrap().as_str(), "cedummy");
|
||||
assert_eq!(o.content_language.unwrap().as_str(), "cldummy");
|
||||
assert_eq!(o.expires.unwrap(), exp);
|
||||
assert_eq!(
|
||||
o.expires_string.unwrap(),
|
||||
exp.fmt(DateTimeFormat::HttpDate).unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
{
|
||||
|
|
@ -288,7 +441,10 @@ async fn test_metadata() {
|
|||
assert_eq!(o.content_disposition.unwrap().as_str(), "cdtest");
|
||||
assert_eq!(o.content_encoding.unwrap().as_str(), "cetest");
|
||||
assert_eq!(o.content_language.unwrap().as_str(), "cltest");
|
||||
assert_eq!(o.expires.unwrap(), exp2);
|
||||
assert_eq!(
|
||||
o.expires_string.unwrap(),
|
||||
exp2.fmt(DateTimeFormat::HttpDate).unwrap()
|
||||
);
|
||||
let mut meta = o.metadata.unwrap();
|
||||
assert_eq!(meta.remove("testmeta").unwrap(), "hello people");
|
||||
assert_eq!(meta.remove("nice-unicode-meta").unwrap(), "宅配便");
|
||||
|
|
@ -314,7 +470,10 @@ async fn test_metadata() {
|
|||
assert_eq!(o.content_disposition.unwrap().as_str(), "cddummy");
|
||||
assert_eq!(o.content_encoding.unwrap().as_str(), "cedummy");
|
||||
assert_eq!(o.content_language.unwrap().as_str(), "cldummy");
|
||||
assert_eq!(o.expires.unwrap(), exp);
|
||||
assert_eq!(
|
||||
o.expires_string.unwrap(),
|
||||
exp.fmt(DateTimeFormat::HttpDate).unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,8 @@
|
|||
use std::collections::HashMap;
|
||||
|
||||
use base64::prelude::*;
|
||||
use crc32fast::Hasher as Crc32;
|
||||
|
||||
use crate::common;
|
||||
use crate::common::ext::CommandExt;
|
||||
use common::custom_requester::BodySignature;
|
||||
|
|
@ -21,7 +24,7 @@ async fn test_putobject_streaming() {
|
|||
let content_type = "text/csv";
|
||||
let mut headers = HashMap::new();
|
||||
headers.insert("content-type".to_owned(), content_type.to_owned());
|
||||
let _ = ctx
|
||||
let res = ctx
|
||||
.custom_request
|
||||
.builder(bucket.clone())
|
||||
.method(Method::PUT)
|
||||
|
|
@ -29,10 +32,11 @@ async fn test_putobject_streaming() {
|
|||
.signed_headers(headers)
|
||||
.vhost_style(true)
|
||||
.body(vec![])
|
||||
.body_signature(BodySignature::Streaming(10))
|
||||
.body_signature(BodySignature::Streaming { chunk_size: 10 })
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(res.status().is_success(), "got response: {:?}", res);
|
||||
|
||||
// assert_eq!(r.e_tag.unwrap().as_str(), etag);
|
||||
// We return a version ID here
|
||||
|
|
@ -65,7 +69,14 @@ async fn test_putobject_streaming() {
|
|||
{
|
||||
let etag = "\"46cf18a9b447991b450cad3facf5937e\"";
|
||||
|
||||
let _ = ctx
|
||||
let mut crc32 = Crc32::new();
|
||||
crc32.update(&BODY[..]);
|
||||
let crc32 = BASE64_STANDARD.encode(&u32::to_be_bytes(crc32.finalize())[..]);
|
||||
|
||||
let mut headers = HashMap::new();
|
||||
headers.insert("x-amz-checksum-crc32".to_owned(), crc32.clone());
|
||||
|
||||
let res = ctx
|
||||
.custom_request
|
||||
.builder(bucket.clone())
|
||||
.method(Method::PUT)
|
||||
|
|
@ -73,11 +84,13 @@ async fn test_putobject_streaming() {
|
|||
//fail
|
||||
.path("abc".to_owned())
|
||||
.vhost_style(true)
|
||||
.signed_headers(headers)
|
||||
.body(BODY.to_vec())
|
||||
.body_signature(BodySignature::Streaming(16))
|
||||
.body_signature(BodySignature::Streaming { chunk_size: 16 })
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(res.status().is_success(), "got response: {:?}", res);
|
||||
|
||||
// assert_eq!(r.e_tag.unwrap().as_str(), etag);
|
||||
// assert!(r.version_id.is_some());
|
||||
|
|
@ -88,6 +101,7 @@ async fn test_putobject_streaming() {
|
|||
.bucket(&bucket)
|
||||
//.key(CTRL_KEY)
|
||||
.key("abc")
|
||||
.checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
|
@ -98,6 +112,142 @@ async fn test_putobject_streaming() {
|
|||
assert_eq!(o.content_length.unwrap(), 62);
|
||||
assert_eq!(o.parts_count, None);
|
||||
assert_eq!(o.tag_count, None);
|
||||
assert_eq!(o.checksum_crc32.unwrap(), crc32);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_putobject_streaming_unsigned_trailer() {
|
||||
let ctx = common::context();
|
||||
let bucket = ctx.create_bucket("putobject-streaming-unsigned-trailer");
|
||||
|
||||
{
|
||||
// Send an empty object (can serve as a directory marker)
|
||||
// with a content type
|
||||
let etag = "\"d41d8cd98f00b204e9800998ecf8427e\"";
|
||||
let content_type = "text/csv";
|
||||
let mut headers = HashMap::new();
|
||||
headers.insert("content-type".to_owned(), content_type.to_owned());
|
||||
|
||||
let empty_crc32 = BASE64_STANDARD.encode(&u32::to_be_bytes(Crc32::new().finalize())[..]);
|
||||
|
||||
let res = ctx
|
||||
.custom_request
|
||||
.builder(bucket.clone())
|
||||
.method(Method::PUT)
|
||||
.path(STD_KEY.to_owned())
|
||||
.signed_headers(headers)
|
||||
.vhost_style(true)
|
||||
.body(vec![])
|
||||
.body_signature(BodySignature::StreamingUnsignedTrailer {
|
||||
chunk_size: 10,
|
||||
trailer_algorithm: "x-amz-checksum-crc32".into(),
|
||||
trailer_value: empty_crc32,
|
||||
})
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(res.status().is_success(), "got response: {:?}", res);
|
||||
|
||||
// assert_eq!(r.e_tag.unwrap().as_str(), etag);
|
||||
// We return a version ID here
|
||||
// We should check if Amazon is returning one when versioning is not enabled
|
||||
// assert!(r.version_id.is_some());
|
||||
|
||||
//let _version = r.version_id.unwrap();
|
||||
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_bytes_eq!(o.body, b"");
|
||||
assert_eq!(o.e_tag.unwrap(), etag);
|
||||
// We do not return version ID
|
||||
// We should check if Amazon is returning one when versioning is not enabled
|
||||
// assert_eq!(o.version_id.unwrap(), _version);
|
||||
assert_eq!(o.content_type.unwrap(), content_type);
|
||||
assert!(o.last_modified.is_some());
|
||||
assert_eq!(o.content_length.unwrap(), 0);
|
||||
assert_eq!(o.parts_count, None);
|
||||
assert_eq!(o.tag_count, None);
|
||||
}
|
||||
|
||||
{
|
||||
let etag = "\"46cf18a9b447991b450cad3facf5937e\"";
|
||||
|
||||
let mut crc32 = Crc32::new();
|
||||
crc32.update(&BODY[..]);
|
||||
let crc32 = BASE64_STANDARD.encode(&u32::to_be_bytes(crc32.finalize())[..]);
|
||||
|
||||
// try sending with wrong crc32, check that it fails
|
||||
let err_res = ctx
|
||||
.custom_request
|
||||
.builder(bucket.clone())
|
||||
.method(Method::PUT)
|
||||
//.path(CTRL_KEY.to_owned()) at the moment custom_request does not encode url so this
|
||||
//fail
|
||||
.path("abc".to_owned())
|
||||
.vhost_style(true)
|
||||
.body(BODY.to_vec())
|
||||
.body_signature(BodySignature::StreamingUnsignedTrailer {
|
||||
chunk_size: 16,
|
||||
trailer_algorithm: "x-amz-checksum-crc32".into(),
|
||||
trailer_value: "2Yp9Yw==".into(),
|
||||
})
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(
|
||||
err_res.status().is_client_error(),
|
||||
"got response: {:?}",
|
||||
err_res
|
||||
);
|
||||
|
||||
let res = ctx
|
||||
.custom_request
|
||||
.builder(bucket.clone())
|
||||
.method(Method::PUT)
|
||||
//.path(CTRL_KEY.to_owned()) at the moment custom_request does not encode url so this
|
||||
//fail
|
||||
.path("abc".to_owned())
|
||||
.vhost_style(true)
|
||||
.body(BODY.to_vec())
|
||||
.body_signature(BodySignature::StreamingUnsignedTrailer {
|
||||
chunk_size: 16,
|
||||
trailer_algorithm: "x-amz-checksum-crc32".into(),
|
||||
trailer_value: crc32.clone(),
|
||||
})
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(res.status().is_success(), "got response: {:?}", res);
|
||||
|
||||
// assert_eq!(r.e_tag.unwrap().as_str(), etag);
|
||||
// assert!(r.version_id.is_some());
|
||||
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
//.key(CTRL_KEY)
|
||||
.key("abc")
|
||||
.checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_bytes_eq!(o.body, BODY);
|
||||
assert_eq!(o.e_tag.unwrap(), etag);
|
||||
assert!(o.last_modified.is_some());
|
||||
assert_eq!(o.content_length.unwrap(), 62);
|
||||
assert_eq!(o.parts_count, None);
|
||||
assert_eq!(o.tag_count, None);
|
||||
assert_eq!(o.checksum_crc32.unwrap(), crc32);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -119,7 +269,7 @@ async fn test_create_bucket_streaming() {
|
|||
.custom_request
|
||||
.builder(bucket.to_owned())
|
||||
.method(Method::PUT)
|
||||
.body_signature(BodySignature::Streaming(10))
|
||||
.body_signature(BodySignature::Streaming { chunk_size: 10 })
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
|
@ -174,7 +324,7 @@ async fn test_put_website_streaming() {
|
|||
.method(Method::PUT)
|
||||
.query_params(query)
|
||||
.body(website_config.as_bytes().to_vec())
|
||||
.body_signature(BodySignature::Streaming(10))
|
||||
.body_signature(BodySignature::Streaming { chunk_size: 10 })
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ use http::{Request, StatusCode};
|
|||
use http_body_util::BodyExt;
|
||||
use http_body_util::Full as FullBody;
|
||||
use hyper::body::Bytes;
|
||||
use hyper::header::LOCATION;
|
||||
use hyper_util::client::legacy::Client;
|
||||
use hyper_util::rt::TokioExecutor;
|
||||
use serde_json::json;
|
||||
|
|
@ -295,6 +296,33 @@ async fn test_website_s3_api() {
|
|||
);
|
||||
}
|
||||
|
||||
// Test x-amz-website-redirect-location
|
||||
{
|
||||
ctx.client
|
||||
.put_object()
|
||||
.bucket(&bucket)
|
||||
.key("test-redirect.html")
|
||||
.website_redirect_location("https://perdu.com")
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let req = Request::builder()
|
||||
.method("GET")
|
||||
.uri(format!(
|
||||
"http://127.0.0.1:{}/test-redirect.html",
|
||||
ctx.garage.web_port
|
||||
))
|
||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||
.body(Body::new(Bytes::new()))
|
||||
.unwrap();
|
||||
|
||||
let resp = client.request(req).await.unwrap();
|
||||
|
||||
assert_eq!(resp.status(), StatusCode::MOVED_PERMANENTLY);
|
||||
assert_eq!(resp.headers().get(LOCATION).unwrap(), "https://perdu.com");
|
||||
}
|
||||
|
||||
// Test CORS with an allowed preflight request
|
||||
{
|
||||
let req = Request::builder()
|
||||
|
|
@ -505,3 +533,118 @@ async fn test_website_check_domain() {
|
|||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_website_puny() {
|
||||
const BCKT_NAME: &str = "xn--pda.eu";
|
||||
let ctx = common::context();
|
||||
let bucket = ctx.create_bucket(BCKT_NAME);
|
||||
|
||||
let data = ByteStream::from_static(BODY);
|
||||
|
||||
ctx.client
|
||||
.put_object()
|
||||
.bucket(&bucket)
|
||||
.key("index.html")
|
||||
.body(data)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let client = Client::builder(TokioExecutor::new()).build_http();
|
||||
|
||||
let req = |suffix| {
|
||||
Request::builder()
|
||||
.method("GET")
|
||||
.uri(format!("http://127.0.0.1:{}/", ctx.garage.web_port))
|
||||
.header("Host", format!("{}{}", BCKT_NAME, suffix))
|
||||
.body(Body::new(Bytes::new()))
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
ctx.garage
|
||||
.command()
|
||||
.args(["bucket", "website", "--allow", BCKT_NAME])
|
||||
.quiet()
|
||||
.expect_success_status("Could not allow website on bucket");
|
||||
|
||||
let mut resp = client.request(req("")).await.unwrap();
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
BODY.as_ref()
|
||||
);
|
||||
|
||||
resp = client.request(req(".web.garage")).await.unwrap();
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
BODY.as_ref()
|
||||
);
|
||||
|
||||
for bname in [
|
||||
BCKT_NAME.to_string(),
|
||||
format!("{BCKT_NAME}.web.garage"),
|
||||
format!("{BCKT_NAME}.s3.garage"),
|
||||
] {
|
||||
let admin_req = || {
|
||||
Request::builder()
|
||||
.method("GET")
|
||||
.uri(format!(
|
||||
"http://127.0.0.1:{0}/check?domain={1}",
|
||||
ctx.garage.admin_port, bname
|
||||
))
|
||||
.body(Body::new(Bytes::new()))
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let admin_resp = client.request(admin_req()).await.unwrap();
|
||||
assert_eq!(admin_resp.status(), StatusCode::OK);
|
||||
assert_eq!(
|
||||
admin_resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
format!("Domain '{bname}' is managed by Garage").as_bytes()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_website_object_not_found() {
|
||||
const BCKT_NAME: &str = "not-found";
|
||||
let ctx = common::context();
|
||||
let _bucket = ctx.create_bucket(BCKT_NAME);
|
||||
|
||||
let client = Client::builder(TokioExecutor::new()).build_http();
|
||||
|
||||
let req = |suffix| {
|
||||
Request::builder()
|
||||
.method("GET")
|
||||
.uri(format!("http://127.0.0.1:{}/", ctx.garage.web_port))
|
||||
.header("Host", format!("{}{}", BCKT_NAME, suffix))
|
||||
.body(Body::new(Bytes::new()))
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
ctx.garage
|
||||
.command()
|
||||
.args(["bucket", "website", "--allow", BCKT_NAME])
|
||||
.quiet()
|
||||
.expect_success_status("Could not allow website on bucket");
|
||||
|
||||
let resp = client.request(req("")).await.unwrap();
|
||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||
// the error we return by default are *not* xml
|
||||
assert_eq!(
|
||||
resp.headers().get(http::header::CONTENT_TYPE).unwrap(),
|
||||
"text/html; charset=utf-8"
|
||||
);
|
||||
let result = String::from_utf8(
|
||||
resp.into_body()
|
||||
.collect()
|
||||
.await
|
||||
.unwrap()
|
||||
.to_bytes()
|
||||
.to_vec(),
|
||||
)
|
||||
.unwrap();
|
||||
assert!(result.contains("not found"));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -72,6 +72,16 @@ impl K2vClient {
|
|||
.enable_http2()
|
||||
.build();
|
||||
let client = HttpClient::builder(TokioExecutor::new()).build(connector);
|
||||
Self::new_with_client(config, client)
|
||||
}
|
||||
|
||||
/// Create a new K2V client with an external client.
|
||||
/// Useful for example if you plan on creating many clients but you want to mutualize the
|
||||
/// underlying thread pools & co.
|
||||
pub fn new_with_client(
|
||||
config: K2vClientConfig,
|
||||
client: HttpClient<HttpsConnector<HttpConnector>, Body>,
|
||||
) -> Result<Self, Error> {
|
||||
let user_agent: std::borrow::Cow<str> = match &config.user_agent {
|
||||
Some(ua) => ua.into(),
|
||||
None => format!("k2v/{}", env!("CARGO_PKG_VERSION")).into(),
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_model"
|
||||
version = "1.0.1"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -24,7 +24,7 @@ garage_net.workspace = true
|
|||
async-trait.workspace = true
|
||||
blake2.workspace = true
|
||||
chrono.workspace = true
|
||||
err-derive.workspace = true
|
||||
thiserror.workspace = true
|
||||
hex.workspace = true
|
||||
http.workspace = true
|
||||
base64.workspace = true
|
||||
|
|
@ -44,3 +44,4 @@ default = [ "lmdb", "sqlite" ]
|
|||
k2v = [ "garage_util/k2v" ]
|
||||
lmdb = [ "garage_db/lmdb" ]
|
||||
sqlite = [ "garage_db/sqlite" ]
|
||||
fjall = [ "garage_db/fjall" ]
|
||||
|
|
|
|||
|
|
@ -22,14 +22,10 @@ mod v08 {
|
|||
pub use v08::*;
|
||||
|
||||
impl BucketAlias {
|
||||
pub fn new(name: String, ts: u64, bucket_id: Option<Uuid>) -> Option<Self> {
|
||||
if !is_valid_bucket_name(&name) {
|
||||
None
|
||||
} else {
|
||||
Some(BucketAlias {
|
||||
name,
|
||||
state: crdt::Lww::raw(ts, bucket_id),
|
||||
})
|
||||
pub fn new(name: String, ts: u64, bucket_id: Option<Uuid>) -> Self {
|
||||
BucketAlias {
|
||||
name,
|
||||
state: crdt::Lww::raw(ts, bucket_id),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -80,7 +76,7 @@ impl TableSchema for BucketAliasTable {
|
|||
/// In the case of Garage, bucket names must not be hex-encoded
|
||||
/// 32 byte string, which is excluded thanks to the
|
||||
/// maximum length of 63 bytes given in the spec.
|
||||
pub fn is_valid_bucket_name(n: &str) -> bool {
|
||||
pub fn is_valid_bucket_name(n: &str, puny: bool) -> bool {
|
||||
// Bucket names must be between 3 and 63 characters
|
||||
n.len() >= 3 && n.len() <= 63
|
||||
// Bucket names must be composed of lowercase letters, numbers,
|
||||
|
|
@ -92,7 +88,9 @@ pub fn is_valid_bucket_name(n: &str) -> bool {
|
|||
// Bucket names must not be formatted as an IP address
|
||||
&& n.parse::<std::net::IpAddr>().is_err()
|
||||
// Bucket names must not start with "xn--"
|
||||
&& !n.starts_with("xn--")
|
||||
&& (!n.starts_with("xn--") || puny)
|
||||
// We are a bit stricter, to properly restrict punycode in all labels
|
||||
&& (!n.contains(".xn--") || puny)
|
||||
// Bucket names must not end with "-s3alias"
|
||||
&& !n.ends_with("-s3alias")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -116,21 +116,17 @@ impl Garage {
|
|||
info!("Opening database...");
|
||||
let db_engine = db::Engine::from_str(&config.db_engine)
|
||||
.ok_or_message("Invalid `db_engine` value in configuration file")?;
|
||||
let mut db_path = config.metadata_dir.clone();
|
||||
match db_engine {
|
||||
db::Engine::Sqlite => {
|
||||
db_path.push("db.sqlite");
|
||||
}
|
||||
db::Engine::Lmdb => {
|
||||
db_path.push("db.lmdb");
|
||||
}
|
||||
}
|
||||
let db_path = db_engine.db_path(&config.metadata_dir);
|
||||
let db_opt = db::OpenOpt {
|
||||
fsync: config.metadata_fsync,
|
||||
lmdb_map_size: match config.lmdb_map_size {
|
||||
v if v == usize::default() => None,
|
||||
v => Some(v),
|
||||
},
|
||||
fjall_block_cache_size: match config.fjall_block_cache_size {
|
||||
v if v == usize::default() => None,
|
||||
v => Some(v),
|
||||
},
|
||||
};
|
||||
let db = db::open_db(&db_path, db_engine, &db_opt)
|
||||
.ok_or_message("Unable to open metadata db")?;
|
||||
|
|
@ -319,15 +315,15 @@ impl Garage {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
|
||||
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper<'_> {
|
||||
helper::bucket::BucketHelper(self)
|
||||
}
|
||||
|
||||
pub fn key_helper(&self) -> helper::key::KeyHelper {
|
||||
pub fn key_helper(&self) -> helper::key::KeyHelper<'_> {
|
||||
helper::key::KeyHelper(self)
|
||||
}
|
||||
|
||||
pub async fn locked_helper(&self) -> helper::locked::LockedHelper {
|
||||
pub async fn locked_helper(&self) -> helper::locked::LockedHelper<'_> {
|
||||
let lock = self.bucket_lock.lock().await;
|
||||
helper::locked::LockedHelper(self, Some(lock))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,24 +1,24 @@
|
|||
use err_derive::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
|
||||
use garage_util::error::Error as GarageError;
|
||||
|
||||
#[derive(Debug, Error, Serialize, Deserialize)]
|
||||
pub enum Error {
|
||||
#[error(display = "Internal error: {}", _0)]
|
||||
Internal(#[error(source)] GarageError),
|
||||
#[error("Internal error: {0}")]
|
||||
Internal(#[from] GarageError),
|
||||
|
||||
#[error(display = "Bad request: {}", _0)]
|
||||
#[error("Bad request: {0}")]
|
||||
BadRequest(String),
|
||||
|
||||
/// Bucket name is not valid according to AWS S3 specs
|
||||
#[error(display = "Invalid bucket name: {}", _0)]
|
||||
#[error("Invalid bucket name: {0}")]
|
||||
InvalidBucketName(String),
|
||||
|
||||
#[error(display = "Access key not found: {}", _0)]
|
||||
#[error("Access key not found: {0}")]
|
||||
NoSuchAccessKey(String),
|
||||
|
||||
#[error(display = "Bucket not found: {}", _0)]
|
||||
#[error("Bucket not found: {0}")]
|
||||
NoSuchBucket(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,7 @@
|
|||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use garage_db as db;
|
||||
|
||||
use garage_util::crdt::*;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::{Error as GarageError, OkOrMessage};
|
||||
|
|
@ -47,6 +51,10 @@ impl<'a> LockedHelper<'a> {
|
|||
KeyHelper(self.0)
|
||||
}
|
||||
|
||||
// ================================================
|
||||
// global bucket aliases
|
||||
// ================================================
|
||||
|
||||
/// Sets a new alias for a bucket in global namespace.
|
||||
/// This function fails if:
|
||||
/// - alias name is not valid according to S3 spec
|
||||
|
|
@ -57,7 +65,7 @@ impl<'a> LockedHelper<'a> {
|
|||
bucket_id: Uuid,
|
||||
alias_name: &String,
|
||||
) -> Result<(), Error> {
|
||||
if !is_valid_bucket_name(alias_name) {
|
||||
if !is_valid_bucket_name(alias_name, self.0.config.allow_punycode) {
|
||||
return Err(Error::InvalidBucketName(alias_name.to_string()));
|
||||
}
|
||||
|
||||
|
|
@ -88,8 +96,7 @@ impl<'a> LockedHelper<'a> {
|
|||
// writes are now done and all writes use timestamp alias_ts
|
||||
|
||||
let alias = match alias {
|
||||
None => BucketAlias::new(alias_name.clone(), alias_ts, Some(bucket_id))
|
||||
.ok_or_else(|| Error::InvalidBucketName(alias_name.clone()))?,
|
||||
None => BucketAlias::new(alias_name.clone(), alias_ts, Some(bucket_id)),
|
||||
Some(mut a) => {
|
||||
a.state = Lww::raw(alias_ts, Some(bucket_id));
|
||||
a
|
||||
|
|
@ -180,13 +187,14 @@ impl<'a> LockedHelper<'a> {
|
|||
.ok_or_else(|| Error::NoSuchBucket(alias_name.to_string()))?;
|
||||
|
||||
// Checks ok, remove alias
|
||||
let alias_ts = match bucket.state.as_option() {
|
||||
Some(bucket_state) => increment_logical_clock_2(
|
||||
alias.state.timestamp(),
|
||||
bucket_state.aliases.get_timestamp(alias_name),
|
||||
),
|
||||
None => increment_logical_clock(alias.state.timestamp()),
|
||||
};
|
||||
let alias_ts = increment_logical_clock_2(
|
||||
alias.state.timestamp(),
|
||||
bucket
|
||||
.state
|
||||
.as_option()
|
||||
.map(|p| p.aliases.get_timestamp(alias_name))
|
||||
.unwrap_or(0),
|
||||
);
|
||||
|
||||
// ---- timestamp-ensured causality barrier ----
|
||||
// writes are now done and all writes use timestamp alias_ts
|
||||
|
|
@ -204,6 +212,10 @@ impl<'a> LockedHelper<'a> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// ================================================
|
||||
// local bucket aliases
|
||||
// ================================================
|
||||
|
||||
/// Sets a new alias for a bucket in the local namespace of a key.
|
||||
/// This function fails if:
|
||||
/// - alias name is not valid according to S3 spec
|
||||
|
|
@ -216,14 +228,12 @@ impl<'a> LockedHelper<'a> {
|
|||
key_id: &String,
|
||||
alias_name: &String,
|
||||
) -> Result<(), Error> {
|
||||
let key_helper = KeyHelper(self.0);
|
||||
|
||||
if !is_valid_bucket_name(alias_name) {
|
||||
if !is_valid_bucket_name(alias_name, self.0.config.allow_punycode) {
|
||||
return Err(Error::InvalidBucketName(alias_name.to_string()));
|
||||
}
|
||||
|
||||
let mut bucket = self.bucket().get_existing_bucket(bucket_id).await?;
|
||||
let mut key = key_helper.get_existing_key(key_id).await?;
|
||||
let mut key = self.key().get_existing_key(key_id).await?;
|
||||
|
||||
let key_param = key.state.as_option_mut().unwrap();
|
||||
|
||||
|
|
@ -272,23 +282,13 @@ impl<'a> LockedHelper<'a> {
|
|||
key_id: &String,
|
||||
alias_name: &String,
|
||||
) -> Result<(), Error> {
|
||||
let key_helper = KeyHelper(self.0);
|
||||
|
||||
let mut bucket = self.bucket().get_existing_bucket(bucket_id).await?;
|
||||
let mut key = key_helper.get_existing_key(key_id).await?;
|
||||
let mut key = self.key().get_existing_key(key_id).await?;
|
||||
|
||||
let key_p = key.state.as_option().unwrap();
|
||||
let bucket_p = bucket.state.as_option_mut().unwrap();
|
||||
|
||||
if key
|
||||
.state
|
||||
.as_option()
|
||||
.unwrap()
|
||||
.local_aliases
|
||||
.get(alias_name)
|
||||
.cloned()
|
||||
.flatten()
|
||||
!= Some(bucket_id)
|
||||
{
|
||||
if key_p.local_aliases.get(alias_name).cloned().flatten() != Some(bucket_id) {
|
||||
return Err(GarageError::Message(format!(
|
||||
"Bucket {:?} does not have alias {} in namespace of key {}",
|
||||
bucket_id, alias_name, key_id
|
||||
|
|
@ -305,17 +305,17 @@ impl<'a> LockedHelper<'a> {
|
|||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
.any(|((k, n), _, active)| *k == key.key_id && n == alias_name && *active);
|
||||
.any(|((k, n), _, active)| (*k != key.key_id || n != alias_name) && *active);
|
||||
|
||||
if !has_other_global_aliases && !has_other_local_aliases {
|
||||
return Err(Error::BadRequest(format!("Bucket {} doesn't have other aliases, please delete it instead of just unaliasing.", alias_name)));
|
||||
}
|
||||
|
||||
// Checks ok, remove alias
|
||||
let key_param = key.state.as_option_mut().unwrap();
|
||||
let bucket_p_local_alias_key = (key.key_id.clone(), alias_name.clone());
|
||||
|
||||
let alias_ts = increment_logical_clock_2(
|
||||
key_param.local_aliases.get_timestamp(alias_name),
|
||||
key_p.local_aliases.get_timestamp(alias_name),
|
||||
bucket_p
|
||||
.local_aliases
|
||||
.get_timestamp(&bucket_p_local_alias_key),
|
||||
|
|
@ -324,7 +324,8 @@ impl<'a> LockedHelper<'a> {
|
|||
// ---- timestamp-ensured causality barrier ----
|
||||
// writes are now done and all writes use timestamp alias_ts
|
||||
|
||||
key_param.local_aliases = LwwMap::raw_item(alias_name.clone(), alias_ts, None);
|
||||
key.state.as_option_mut().unwrap().local_aliases =
|
||||
LwwMap::raw_item(alias_name.clone(), alias_ts, None);
|
||||
self.0.key_table.insert(&key).await?;
|
||||
|
||||
bucket_p.local_aliases = LwwMap::raw_item(bucket_p_local_alias_key, alias_ts, false);
|
||||
|
|
@ -333,21 +334,68 @@ impl<'a> LockedHelper<'a> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Ensures a bucket does not have a certain local alias.
|
||||
/// Contrarily to unset_local_bucket_alias, this does not
|
||||
/// fail on any condition other than:
|
||||
/// - bucket cannot be found (its fine if it is in deleted state)
|
||||
/// - key cannot be found (its fine if alias in key points to nothing
|
||||
/// or to another bucket)
|
||||
pub async fn purge_local_bucket_alias(
|
||||
&self,
|
||||
bucket_id: Uuid,
|
||||
key_id: &String,
|
||||
alias_name: &String,
|
||||
) -> Result<(), Error> {
|
||||
let mut bucket = self.bucket().get_internal_bucket(bucket_id).await?;
|
||||
let mut key = self.key().get_internal_key(key_id).await?;
|
||||
|
||||
let bucket_p_local_alias_key = (key.key_id.clone(), alias_name.clone());
|
||||
|
||||
let alias_ts = increment_logical_clock_2(
|
||||
key.state
|
||||
.as_option()
|
||||
.map(|p| p.local_aliases.get_timestamp(alias_name))
|
||||
.unwrap_or(0),
|
||||
bucket
|
||||
.state
|
||||
.as_option()
|
||||
.map(|p| p.local_aliases.get_timestamp(&bucket_p_local_alias_key))
|
||||
.unwrap_or(0),
|
||||
);
|
||||
|
||||
// ---- timestamp-ensured causality barrier ----
|
||||
// writes are now done and all writes use timestamp alias_ts
|
||||
|
||||
if let Some(kp) = key.state.as_option_mut() {
|
||||
kp.local_aliases = LwwMap::raw_item(alias_name.clone(), alias_ts, None);
|
||||
self.0.key_table.insert(&key).await?;
|
||||
}
|
||||
|
||||
if let Some(bp) = bucket.state.as_option_mut() {
|
||||
bp.local_aliases = LwwMap::raw_item(bucket_p_local_alias_key, alias_ts, false);
|
||||
self.0.bucket_table.insert(&bucket).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ================================================
|
||||
// permissions
|
||||
// ================================================
|
||||
|
||||
/// Sets permissions for a key on a bucket.
|
||||
/// This function fails if:
|
||||
/// - bucket or key cannot be found at all (its ok if they are in deleted state)
|
||||
/// - bucket or key is in deleted state and we are trying to set permissions other than "deny
|
||||
/// all"
|
||||
/// - bucket or key is in deleted state and we are trying to set
|
||||
/// permissions other than "deny all"
|
||||
pub async fn set_bucket_key_permissions(
|
||||
&self,
|
||||
bucket_id: Uuid,
|
||||
key_id: &String,
|
||||
mut perm: BucketKeyPerm,
|
||||
) -> Result<(), Error> {
|
||||
let key_helper = KeyHelper(self.0);
|
||||
|
||||
let mut bucket = self.bucket().get_internal_bucket(bucket_id).await?;
|
||||
let mut key = key_helper.get_internal_key(key_id).await?;
|
||||
let mut key = self.key().get_internal_key(key_id).await?;
|
||||
|
||||
if let Some(bstate) = bucket.state.as_option() {
|
||||
if let Some(kp) = bstate.authorized_keys.get(key_id) {
|
||||
|
|
@ -384,21 +432,20 @@ impl<'a> LockedHelper<'a> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// ----
|
||||
// ================================================
|
||||
// keys
|
||||
// ================================================
|
||||
|
||||
/// Deletes an API access key
|
||||
pub async fn delete_key(&self, key: &mut Key) -> Result<(), Error> {
|
||||
let state = key.state.as_option_mut().unwrap();
|
||||
|
||||
// --- done checking, now commit ---
|
||||
// (the step at unset_local_bucket_alias will fail if a bucket
|
||||
// does not have another alias, the deletion will be
|
||||
// interrupted in the middle if that happens)
|
||||
|
||||
// 1. Delete local aliases
|
||||
for (alias, _, to) in state.local_aliases.items().iter() {
|
||||
if let Some(bucket_id) = to {
|
||||
self.unset_local_bucket_alias(*bucket_id, &key.key_id, alias)
|
||||
self.purge_local_bucket_alias(*bucket_id, &key.key_id, alias)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
|
@ -415,4 +462,193 @@ impl<'a> LockedHelper<'a> {
|
|||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ================================================
|
||||
// repair procedure
|
||||
// ================================================
|
||||
|
||||
pub async fn repair_aliases(&self) -> Result<(), GarageError> {
|
||||
self.0.db.transaction(|tx| {
|
||||
info!("--- begin repair_aliases transaction ----");
|
||||
|
||||
// 1. List all non-deleted buckets, so that we can fix bad aliases
|
||||
let mut all_buckets: HashSet<Uuid> = HashSet::new();
|
||||
|
||||
for item in tx.range::<&[u8], _>(&self.0.bucket_table.data.store, ..)? {
|
||||
let bucket = self
|
||||
.0
|
||||
.bucket_table
|
||||
.data
|
||||
.decode_entry(&(item?.1))
|
||||
.map_err(db::TxError::Abort)?;
|
||||
if !bucket.is_deleted() {
|
||||
all_buckets.insert(bucket.id);
|
||||
}
|
||||
}
|
||||
|
||||
info!("number of buckets: {}", all_buckets.len());
|
||||
|
||||
// 2. List all aliases declared in bucket_alias_table and key_table
|
||||
// Take note of aliases that point to non-existing buckets
|
||||
let mut global_aliases: HashMap<String, Uuid> = HashMap::new();
|
||||
|
||||
{
|
||||
let mut delete_global = vec![];
|
||||
for item in tx.range::<&[u8], _>(&self.0.bucket_alias_table.data.store, ..)? {
|
||||
let mut alias = self
|
||||
.0
|
||||
.bucket_alias_table
|
||||
.data
|
||||
.decode_entry(&(item?.1))
|
||||
.map_err(db::TxError::Abort)?;
|
||||
if let Some(id) = alias.state.get() {
|
||||
if all_buckets.contains(id) {
|
||||
// keep aliases
|
||||
global_aliases.insert(alias.name().to_string(), *id);
|
||||
} else {
|
||||
// delete alias
|
||||
warn!(
|
||||
"global alias: remove {} -> {:?} (bucket is deleted)",
|
||||
alias.name(),
|
||||
id
|
||||
);
|
||||
alias.state.update(None);
|
||||
delete_global.push(alias);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("number of global aliases: {}", global_aliases.len());
|
||||
|
||||
info!("global alias table: {} entries fixed", delete_global.len());
|
||||
for ga in delete_global {
|
||||
debug!("Enqueue update to global alias table: {:?}", ga);
|
||||
self.0.bucket_alias_table.queue_insert(tx, &ga)?;
|
||||
}
|
||||
}
|
||||
|
||||
let mut local_aliases: HashMap<(String, String), Uuid> = HashMap::new();
|
||||
|
||||
{
|
||||
let mut delete_local = vec![];
|
||||
|
||||
for item in tx.range::<&[u8], _>(&self.0.key_table.data.store, ..)? {
|
||||
let mut key = self
|
||||
.0
|
||||
.key_table
|
||||
.data
|
||||
.decode_entry(&(item?.1))
|
||||
.map_err(db::TxError::Abort)?;
|
||||
let Some(p) = key.state.as_option_mut() else {
|
||||
continue;
|
||||
};
|
||||
let mut has_changes = false;
|
||||
for (name, _, to) in p.local_aliases.items().to_vec() {
|
||||
if let Some(id) = to {
|
||||
if all_buckets.contains(&id) {
|
||||
local_aliases.insert((key.key_id.clone(), name), id);
|
||||
} else {
|
||||
warn!(
|
||||
"local alias: remove ({}, {}) -> {:?} (bucket is deleted)",
|
||||
key.key_id, name, id
|
||||
);
|
||||
p.local_aliases.update_in_place(name, None);
|
||||
has_changes = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if has_changes {
|
||||
delete_local.push(key);
|
||||
}
|
||||
}
|
||||
|
||||
info!("number of local aliases: {}", local_aliases.len());
|
||||
|
||||
info!("key table: {} entries fixed", delete_local.len());
|
||||
for la in delete_local {
|
||||
debug!("Enqueue update to key table: {:?}", la);
|
||||
self.0.key_table.queue_insert(tx, &la)?;
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Reverse the alias maps to determine the aliases per-bucket
|
||||
let mut bucket_global: HashMap<Uuid, Vec<String>> = HashMap::new();
|
||||
let mut bucket_local: HashMap<Uuid, Vec<(String, String)>> = HashMap::new();
|
||||
|
||||
for (name, bucket) in global_aliases {
|
||||
bucket_global.entry(bucket).or_default().push(name);
|
||||
}
|
||||
for ((key, name), bucket) in local_aliases {
|
||||
bucket_local.entry(bucket).or_default().push((key, name));
|
||||
}
|
||||
|
||||
// 5. Fix the bucket table to ensure consistency
|
||||
let mut bucket_updates = vec![];
|
||||
|
||||
for item in tx.range::<&[u8], _>(&self.0.bucket_table.data.store, ..)? {
|
||||
let bucket = self
|
||||
.0
|
||||
.bucket_table
|
||||
.data
|
||||
.decode_entry(&(item?.1))
|
||||
.map_err(db::TxError::Abort)?;
|
||||
let mut bucket2 = bucket.clone();
|
||||
let Some(param) = bucket2.state.as_option_mut() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
// fix global aliases
|
||||
{
|
||||
let ga = bucket_global.remove(&bucket.id).unwrap_or_default();
|
||||
for (name, _, active) in param.aliases.items().to_vec() {
|
||||
if active && !ga.contains(&name) {
|
||||
warn!("bucket {:?}: remove global alias {}", bucket.id, name);
|
||||
param.aliases.update_in_place(name, false);
|
||||
}
|
||||
}
|
||||
for name in ga {
|
||||
if param.aliases.get(&name).copied() != Some(true) {
|
||||
warn!("bucket {:?}: add global alias {}", bucket.id, name);
|
||||
param.aliases.update_in_place(name, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fix local aliases
|
||||
{
|
||||
let la = bucket_local.remove(&bucket.id).unwrap_or_default();
|
||||
for (pair, _, active) in param.local_aliases.items().to_vec() {
|
||||
if active && !la.contains(&pair) {
|
||||
warn!("bucket {:?}: remove local alias {:?}", bucket.id, pair);
|
||||
param.local_aliases.update_in_place(pair, false);
|
||||
}
|
||||
}
|
||||
for pair in la {
|
||||
if param.local_aliases.get(&pair).copied() != Some(true) {
|
||||
warn!("bucket {:?}: add local alias {:?}", bucket.id, pair);
|
||||
param.local_aliases.update_in_place(pair, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if bucket2 != bucket {
|
||||
bucket_updates.push(bucket2);
|
||||
}
|
||||
}
|
||||
|
||||
info!("bucket table: {} entries fixed", bucket_updates.len());
|
||||
for b in bucket_updates {
|
||||
debug!("Enqueue update to bucket table: {:?}", b);
|
||||
self.0.bucket_table.queue_insert(tx, &b)?;
|
||||
}
|
||||
|
||||
info!("--- end repair_aliases transaction ----");
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
info!("repair_aliases is done");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue