mirror of
https://git.deuxfleurs.fr/Deuxfleurs/garage.git
synced 2026-05-14 21:26:53 -04:00
Compare commits
55 commits
dump-table
...
main-v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b6b18427a5 | ||
|
|
9987166b2b | ||
|
|
b72b090a09 | ||
|
|
8551aefed4 | ||
|
|
47bf5d9fb0 | ||
|
|
5df37dae5e | ||
|
|
44af0bdab3 | ||
|
|
a7d6620e18 | ||
|
|
8eb12755e4 | ||
|
|
c685a2cbaf | ||
|
|
969f42a970 | ||
|
|
424d4f8d4d | ||
|
|
bf5290036f |
||
|
|
4efc8bac07 | ||
|
|
f3dcc39903 | ||
|
|
43e02920c2 | ||
|
|
dcc2fe4ac5 |
||
|
|
e3a5ec6ef6 | ||
|
|
4d124e1c76 | ||
|
|
d769a7be5d | ||
|
|
511cf0c6ec | ||
|
|
95693d45b2 | ||
|
|
ca296477f3 | ||
|
|
ca3b4a050d | ||
|
|
a057ab23ea | ||
|
|
58bc65b9a8 | ||
|
|
ac851d6dee | ||
|
|
eac2aa6fe4 | ||
|
|
1e0201ada2 | ||
|
|
82297371bf | ||
|
|
174f4f01a8 | ||
|
|
1aac7b4875 | ||
|
|
b43c58cbe5 | ||
|
|
9481ac428e | ||
|
|
1c29d04cc5 | ||
|
|
b48a8eaa1f | ||
|
|
42fd8583bd | ||
|
|
236af3a958 | ||
|
|
4b1fdbef55 | ||
|
|
0f1b488be0 | ||
|
|
0bbf63ee0e | ||
|
|
879d941d7b | ||
|
|
d726cf0299 | ||
|
|
0c7aeab6f8 | ||
|
|
5687fc0375 | ||
|
|
97f1e9ab52 | ||
|
|
60b1d78b56 | ||
|
|
4c895a7186 | ||
|
|
c3b5cbf212 | ||
|
|
57a467b5c0 | ||
|
|
6cf6db5c61 | ||
|
|
d5a57e3e13 | ||
|
|
5cf354acb4 | ||
|
|
2b007ddea3 | ||
|
|
c8599a8636 |
65 changed files with 1497 additions and 1145 deletions
|
|
@ -1,3 +1,6 @@
|
||||||
|
labels:
|
||||||
|
nix: "enabled"
|
||||||
|
|
||||||
when:
|
when:
|
||||||
event:
|
event:
|
||||||
- push
|
- push
|
||||||
|
|
@ -9,32 +12,32 @@ when:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: check formatting
|
- name: check formatting
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --attr devShell --run "cargo fmt -- --check"
|
- nix-build -j4 --attr flakePackages.fmt
|
||||||
|
|
||||||
- name: build
|
- name: build
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-build -j4 --attr flakePackages.dev
|
- nix-build -j4 --attr flakePackages.dev
|
||||||
|
|
||||||
- name: unit + func tests (lmdb)
|
- name: unit + func tests (lmdb)
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-build -j4 --attr flakePackages.tests-lmdb
|
- nix-build -j4 --attr flakePackages.tests-lmdb
|
||||||
|
|
||||||
- name: unit + func tests (sqlite)
|
- name: unit + func tests (sqlite)
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-build -j4 --attr flakePackages.tests-sqlite
|
- nix-build -j4 --attr flakePackages.tests-sqlite
|
||||||
|
|
||||||
- name: unit + func tests (fjall)
|
- name: unit + func tests (fjall)
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-build -j4 --attr flakePackages.tests-fjall
|
- nix-build -j4 --attr flakePackages.tests-fjall
|
||||||
|
|
||||||
- name: integration tests
|
- name: integration tests
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-build -j4 --attr flakePackages.dev
|
- nix-build -j4 --attr flakePackages.dev
|
||||||
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,6 @@
|
||||||
|
labels:
|
||||||
|
nix: "enabled"
|
||||||
|
|
||||||
when:
|
when:
|
||||||
event:
|
event:
|
||||||
- deployment
|
- deployment
|
||||||
|
|
@ -8,7 +11,7 @@ depends_on:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: refresh-index
|
- name: refresh-index
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
environment:
|
environment:
|
||||||
AWS_ACCESS_KEY_ID:
|
AWS_ACCESS_KEY_ID:
|
||||||
from_secret: garagehq_aws_access_key_id
|
from_secret: garagehq_aws_access_key_id
|
||||||
|
|
@ -19,7 +22,7 @@ steps:
|
||||||
- nix-shell --attr ci --run "refresh_index"
|
- nix-shell --attr ci --run "refresh_index"
|
||||||
|
|
||||||
- name: multiarch-docker
|
- name: multiarch-docker
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
environment:
|
environment:
|
||||||
DOCKER_AUTH:
|
DOCKER_AUTH:
|
||||||
from_secret: docker_auth
|
from_secret: docker_auth
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,6 @@
|
||||||
|
labels:
|
||||||
|
nix: "enabled"
|
||||||
|
|
||||||
when:
|
when:
|
||||||
event:
|
event:
|
||||||
- deployment
|
- deployment
|
||||||
|
|
@ -16,17 +19,17 @@ matrix:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: build
|
- name: build
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-build --attr releasePackages.${ARCH} --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
- nix-build --attr releasePackages.${ARCH} --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
|
|
||||||
- name: check is static binary
|
- name: check is static binary
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --attr ci --run "./script/not-dynamic.sh result/bin/garage"
|
- nix-shell --attr ci --run "./script/not-dynamic.sh result/bin/garage"
|
||||||
|
|
||||||
- name: integration tests
|
- name: integration tests
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
||||||
when:
|
when:
|
||||||
|
|
@ -36,7 +39,7 @@ steps:
|
||||||
ARCH: i386
|
ARCH: i386
|
||||||
|
|
||||||
- name: upgrade tests
|
- name: upgrade tests
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
|
- nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
|
||||||
when:
|
when:
|
||||||
|
|
@ -44,7 +47,7 @@ steps:
|
||||||
ARCH: amd64
|
ARCH: amd64
|
||||||
|
|
||||||
- name: push static binary
|
- name: push static binary
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
environment:
|
environment:
|
||||||
TARGET: "${TARGET}"
|
TARGET: "${TARGET}"
|
||||||
AWS_ACCESS_KEY_ID:
|
AWS_ACCESS_KEY_ID:
|
||||||
|
|
@ -55,7 +58,7 @@ steps:
|
||||||
- nix-shell --attr ci --run "to_s3"
|
- nix-shell --attr ci --run "to_s3"
|
||||||
|
|
||||||
- name: docker build and publish
|
- name: docker build and publish
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-24.05
|
||||||
environment:
|
environment:
|
||||||
DOCKER_PLATFORM: "linux/${ARCH}"
|
DOCKER_PLATFORM: "linux/${ARCH}"
|
||||||
CONTAINER_NAME: "dxflrs/${ARCH}_garage"
|
CONTAINER_NAME: "dxflrs/${ARCH}_garage"
|
||||||
|
|
|
||||||
1744
Cargo.lock
generated
1744
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
43
Cargo.toml
43
Cargo.toml
|
|
@ -24,18 +24,18 @@ default-members = ["src/garage"]
|
||||||
|
|
||||||
# Internal Garage crates
|
# Internal Garage crates
|
||||||
format_table = { version = "0.1.1", path = "src/format-table" }
|
format_table = { version = "0.1.1", path = "src/format-table" }
|
||||||
garage_api_common = { version = "1.2.0", path = "src/api/common" }
|
garage_api_common = { version = "1.3.1", path = "src/api/common" }
|
||||||
garage_api_admin = { version = "1.2.0", path = "src/api/admin" }
|
garage_api_admin = { version = "1.3.1", path = "src/api/admin" }
|
||||||
garage_api_s3 = { version = "1.2.0", path = "src/api/s3" }
|
garage_api_s3 = { version = "1.3.1", path = "src/api/s3" }
|
||||||
garage_api_k2v = { version = "1.2.0", path = "src/api/k2v" }
|
garage_api_k2v = { version = "1.3.1", path = "src/api/k2v" }
|
||||||
garage_block = { version = "1.2.0", path = "src/block" }
|
garage_block = { version = "1.3.1", path = "src/block" }
|
||||||
garage_db = { version = "1.2.0", path = "src/db", default-features = false }
|
garage_db = { version = "1.3.1", path = "src/db", default-features = false }
|
||||||
garage_model = { version = "1.2.0", path = "src/model", default-features = false }
|
garage_model = { version = "1.3.1", path = "src/model", default-features = false }
|
||||||
garage_net = { version = "1.2.0", path = "src/net" }
|
garage_net = { version = "1.3.1", path = "src/net" }
|
||||||
garage_rpc = { version = "1.2.0", path = "src/rpc" }
|
garage_rpc = { version = "1.3.1", path = "src/rpc" }
|
||||||
garage_table = { version = "1.2.0", path = "src/table" }
|
garage_table = { version = "1.3.1", path = "src/table" }
|
||||||
garage_util = { version = "1.2.0", path = "src/util" }
|
garage_util = { version = "1.3.1", path = "src/util" }
|
||||||
garage_web = { version = "1.2.0", path = "src/web" }
|
garage_web = { version = "1.3.1", path = "src/web" }
|
||||||
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
||||||
|
|
||||||
# External crates from crates.io
|
# External crates from crates.io
|
||||||
|
|
@ -52,7 +52,6 @@ chrono = "0.4"
|
||||||
crc32fast = "1.4"
|
crc32fast = "1.4"
|
||||||
crc32c = "0.6"
|
crc32c = "0.6"
|
||||||
crypto-common = "0.1"
|
crypto-common = "0.1"
|
||||||
err-derive = "0.3"
|
|
||||||
gethostname = "0.4"
|
gethostname = "0.4"
|
||||||
git-version = "0.3.4"
|
git-version = "0.3.4"
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
|
|
@ -88,9 +87,9 @@ tracing-journald = "0.3.1"
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
|
|
||||||
heed = { version = "0.11", default-features = false, features = ["lmdb"] }
|
heed = { version = "0.11", default-features = false, features = ["lmdb"] }
|
||||||
rusqlite = "0.31.0"
|
rusqlite = "0.37"
|
||||||
r2d2 = "0.8"
|
r2d2 = "0.8"
|
||||||
r2d2_sqlite = "0.24"
|
r2d2_sqlite = "0.31"
|
||||||
fjall = "2.4"
|
fjall = "2.4"
|
||||||
|
|
||||||
async-compression = { version = "0.4", features = ["tokio", "zstd"] }
|
async-compression = { version = "0.4", features = ["tokio", "zstd"] }
|
||||||
|
|
@ -137,7 +136,7 @@ prometheus = "0.13"
|
||||||
aws-sigv4 = { version = "1.1", default-features = false }
|
aws-sigv4 = { version = "1.1", default-features = false }
|
||||||
hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "http2", "ring", "rustls-native-certs"] }
|
hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "http2", "ring", "rustls-native-certs"] }
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
thiserror = "1.0"
|
thiserror = "2.0"
|
||||||
|
|
||||||
# ---- used only as build / dev dependencies ----
|
# ---- used only as build / dev dependencies ----
|
||||||
assert-json-diff = "2.0"
|
assert-json-diff = "2.0"
|
||||||
|
|
@ -147,12 +146,8 @@ aws-smithy-runtime = { version = "1.8", default-features = false, features = ["t
|
||||||
aws-sdk-config = { version = "1.62", default-features = false }
|
aws-sdk-config = { version = "1.62", default-features = false }
|
||||||
aws-sdk-s3 = { version = "1.79", default-features = false, features = ["rt-tokio"] }
|
aws-sdk-s3 = { version = "1.79", default-features = false, features = ["rt-tokio"] }
|
||||||
|
|
||||||
[profile.dev]
|
|
||||||
#lto = "thin" # disabled for now, adds 2-4 min to each CI build
|
|
||||||
lto = "off"
|
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
lto = true
|
lto = "thin"
|
||||||
codegen-units = 1
|
codegen-units = 16
|
||||||
opt-level = "s"
|
opt-level = 3
|
||||||
strip = true
|
strip = "debuginfo"
|
||||||
|
|
|
||||||
|
|
@ -161,3 +161,49 @@ kopia repository validate-provider
|
||||||
|
|
||||||
You can then run all the standard kopia commands: `kopia snapshot create`, `kopia mount`...
|
You can then run all the standard kopia commands: `kopia snapshot create`, `kopia mount`...
|
||||||
Everything should work out-of-the-box.
|
Everything should work out-of-the-box.
|
||||||
|
|
||||||
|
## Plakar
|
||||||
|
|
||||||
|
Create your key and bucket on Garage server:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
garage key create my-plakar-key
|
||||||
|
garage bucket create plakar-backups
|
||||||
|
garage bucket allow plakar-backups --read --write --key my-plakar-key
|
||||||
|
…
|
||||||
|
```
|
||||||
|
|
||||||
|
On Plakar server, add your Garage as a storage location:
|
||||||
|
```bash
|
||||||
|
plakar store add garageS3 s3://my-garage.tld/plakar-backups \
|
||||||
|
region=garage # Or as you've specified in garage.toml \
|
||||||
|
access_key=<Key ID from "garage key info my-plakar-key"> \
|
||||||
|
secret_access_key=<Secret key from "garage key info my-plakar-key">
|
||||||
|
```
|
||||||
|
|
||||||
|
Then create the repository.
|
||||||
|
```bash
|
||||||
|
plakar at @garageS3 create -plaintext # Unencrypted
|
||||||
|
# or
|
||||||
|
plakar at @garageS3 create #encrypted
|
||||||
|
```
|
||||||
|
|
||||||
|
If you encrypt your backups (Plakar default), you will need to define a strong passphrase. Do not forget to save your password safely. It will be needed to decrypt your backups.
|
||||||
|
|
||||||
|
|
||||||
|
After the repository has been created, check that everything works as expected (that might give an empty result as no file has been added yet, but no error message):
|
||||||
|
```bash
|
||||||
|
plakar at @garageS3 check
|
||||||
|
```
|
||||||
|
|
||||||
|
Now that everything is configure, you can use Garage as your backups storage. For instance sync it with a local backup storage:
|
||||||
|
```bash
|
||||||
|
$ plakar at ~/backups sync to @garageS3
|
||||||
|
```
|
||||||
|
|
||||||
|
Or list the S3 storage content:
|
||||||
|
```bash
|
||||||
|
$ plakar at @garageS3 ls
|
||||||
|
```
|
||||||
|
|
||||||
|
More information in Plakar documentation: https://www.plakar.io/docs/main/quickstart/
|
||||||
|
|
|
||||||
|
|
@ -15,9 +15,10 @@ Alpine Linux repositories (available since v3.17):
|
||||||
apk add garage
|
apk add garage
|
||||||
```
|
```
|
||||||
|
|
||||||
The default configuration file is installed to `/etc/garage.toml`. You can run
|
The default configuration file is installed to `/etc/garage/garage.toml`. You can run
|
||||||
Garage using: `rc-service garage start`. If you don't specify `rpc_secret`, it
|
Garage using: `rc-service garage start`.
|
||||||
will be automatically replaced with a random string on the first start.
|
|
||||||
|
If you don't specify `rpc_secret`, it will be automatically replaced with a random string on the first start.
|
||||||
|
|
||||||
Please note that this package is built without Consul discovery, Kubernetes
|
Please note that this package is built without Consul discovery, Kubernetes
|
||||||
discovery, OpenTelemetry exporter, and K2V features (K2V will be enabled once
|
discovery, OpenTelemetry exporter, and K2V features (K2V will be enabled once
|
||||||
|
|
@ -26,7 +27,7 @@ it's stable).
|
||||||
|
|
||||||
## Arch Linux
|
## Arch Linux
|
||||||
|
|
||||||
Garage is available in the [AUR](https://aur.archlinux.org/packages/garage).
|
Garage is available in the official repositories under [extra](https://archlinux.org/packages/extra/x86_64/garage).
|
||||||
|
|
||||||
## FreeBSD
|
## FreeBSD
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@ Firstly clone the repository:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://git.deuxfleurs.fr/Deuxfleurs/garage
|
git clone https://git.deuxfleurs.fr/Deuxfleurs/garage
|
||||||
cd garage/scripts/helm
|
cd garage/script/helm
|
||||||
```
|
```
|
||||||
|
|
||||||
Deploy with default options:
|
Deploy with default options:
|
||||||
|
|
|
||||||
|
|
@ -96,14 +96,14 @@ to store 2 TB of data in total.
|
||||||
## Get a Docker image
|
## Get a Docker image
|
||||||
|
|
||||||
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||||
We encourage you to use a fixed tag (eg. `v1.2.0`) and not the `latest` tag.
|
We encourage you to use a fixed tag (eg. `v1.3.0`) and not the `latest` tag.
|
||||||
For this example, we will use the latest published version at the time of the writing which is `v1.2.0` but it's up to you
|
For this example, we will use the latest published version at the time of the writing which is `v1.3.0` but it's up to you
|
||||||
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo docker pull dxflrs/garage:v1.2.0
|
sudo docker pull dxflrs/garage:v1.3.0
|
||||||
```
|
```
|
||||||
|
|
||||||
## Deploying and configuring Garage
|
## Deploying and configuring Garage
|
||||||
|
|
@ -171,7 +171,7 @@ docker run \
|
||||||
-v /etc/garage.toml:/etc/garage.toml \
|
-v /etc/garage.toml:/etc/garage.toml \
|
||||||
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
||||||
-v /var/lib/garage/data:/var/lib/garage/data \
|
-v /var/lib/garage/data:/var/lib/garage/data \
|
||||||
dxflrs/garage:v1.2.0
|
dxflrs/garage:v1.3.0
|
||||||
```
|
```
|
||||||
|
|
||||||
With this command line, Garage should be started automatically at each boot.
|
With this command line, Garage should be started automatically at each boot.
|
||||||
|
|
@ -185,7 +185,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
|
||||||
version: "3"
|
version: "3"
|
||||||
services:
|
services:
|
||||||
garage:
|
garage:
|
||||||
image: dxflrs/garage:v1.2.0
|
image: dxflrs/garage:v1.3.0
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
|
|
|
||||||
|
|
@ -132,7 +132,7 @@ docker run \
|
||||||
-v /path/to/garage.toml:/etc/garage.toml \
|
-v /path/to/garage.toml:/etc/garage.toml \
|
||||||
-v /path/to/garage/meta:/var/lib/garage/meta \
|
-v /path/to/garage/meta:/var/lib/garage/meta \
|
||||||
-v /path/to/garage/data:/var/lib/garage/data \
|
-v /path/to/garage/data:/var/lib/garage/data \
|
||||||
dxflrs/garage:v1.2.0
|
dxflrs/garage:v1.3.0
|
||||||
```
|
```
|
||||||
|
|
||||||
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
|
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,8 @@ db_engine = "lmdb"
|
||||||
|
|
||||||
block_size = "1M"
|
block_size = "1M"
|
||||||
block_ram_buffer_max = "256MiB"
|
block_ram_buffer_max = "256MiB"
|
||||||
|
block_max_concurrent_reads = 16
|
||||||
|
block_max_concurrent_writes_per_request =10
|
||||||
lmdb_map_size = "1T"
|
lmdb_map_size = "1T"
|
||||||
|
|
||||||
compression_level = 1
|
compression_level = 1
|
||||||
|
|
@ -96,7 +97,9 @@ The following gives details about each available configuration option.
|
||||||
Top-level configuration options, in alphabetical order:
|
Top-level configuration options, in alphabetical order:
|
||||||
[`allow_punycode`](#allow_punycode),
|
[`allow_punycode`](#allow_punycode),
|
||||||
[`allow_world_readable_secrets`](#allow_world_readable_secrets),
|
[`allow_world_readable_secrets`](#allow_world_readable_secrets),
|
||||||
|
[`block_max_concurrent_reads`](`block_max_concurrent_reads),
|
||||||
[`block_ram_buffer_max`](#block_ram_buffer_max),
|
[`block_ram_buffer_max`](#block_ram_buffer_max),
|
||||||
|
[`block_max_concurrent_writes_per_request`](#block_max_concurrent_writes_per_request),
|
||||||
[`block_size`](#block_size),
|
[`block_size`](#block_size),
|
||||||
[`bootstrap_peers`](#bootstrap_peers),
|
[`bootstrap_peers`](#bootstrap_peers),
|
||||||
[`compression_level`](#compression_level),
|
[`compression_level`](#compression_level),
|
||||||
|
|
@ -522,6 +525,37 @@ node.
|
||||||
|
|
||||||
The default value is 256MiB.
|
The default value is 256MiB.
|
||||||
|
|
||||||
|
#### `block_max_concurrent_reads` (since `v1.3.0` / `v2.1.0`) {#block_max_concurrent_reads}
|
||||||
|
|
||||||
|
The maximum number of blocks (individual files in the data directory) open
|
||||||
|
simultaneously for reading.
|
||||||
|
|
||||||
|
Reducing this number does not limit the number of data blocks that can be
|
||||||
|
transferred through the network simultaneously. This mechanism was just added
|
||||||
|
as a backpressure mechanism for HDD read speed: it helps avoid a situation
|
||||||
|
where too many requests are coming in and Garage is reading too many block
|
||||||
|
files simultaneously, thus not making timely progress on any of the reads.
|
||||||
|
|
||||||
|
When a request to read a data block comes in through the network, the requests
|
||||||
|
awaits for one of the `block_max_concurrent_reads` slots to be available
|
||||||
|
(internally implemented using a Semaphore object). Once it acquired a read
|
||||||
|
slot, it reads the entire block file to RAM and frees the slot as soon as the
|
||||||
|
block file is finished reading. Only after the slot is released will the
|
||||||
|
block's data start being transferred over the network. If the request fails to
|
||||||
|
acquire a reading slot wihtin 15 seconds, it fails with a timeout error.
|
||||||
|
Timeout events can be monitored through the `block_read_semaphore_timeouts`
|
||||||
|
metric in Prometheus: a non-zero number of such events indicates an I/O
|
||||||
|
bottleneck on HDD read speed.
|
||||||
|
|
||||||
|
|
||||||
|
#### `block_max_concurrent_writes_per_request` (since `v2.1.0`) {#block_max_concurrent_writes_per_request}
|
||||||
|
|
||||||
|
This parameter is designed to adapt to the concurrent write performance of
|
||||||
|
different storage media.Maximum number of parallel block writes per put request
|
||||||
|
Higher values improve throughput but increase memory usage.
|
||||||
|
|
||||||
|
Default: 3, Recommended: 10-30 for NVMe, 3-10 for HDD
|
||||||
|
|
||||||
#### `lmdb_map_size` {#lmdb_map_size}
|
#### `lmdb_map_size` {#lmdb_map_size}
|
||||||
|
|
||||||
This parameters can be used to set the map size used by LMDB,
|
This parameters can be used to set the map size used by LMDB,
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ Feel free to open a PR to suggest fixes this table. Minio is missing because the
|
||||||
|
|
||||||
| Feature | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
| Feature | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
||||||
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
||||||
| [signature v2](https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html) (deprecated) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
| [signature v2](https://docs.aws.amazon.com/AmazonS3/latest/API/Appendix-Sigv2.html) (deprecated) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
||||||
| [signature v4](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) | ✅ Implemented | ✅ | ✅ | ❌ | ✅ |
|
| [signature v4](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) | ✅ Implemented | ✅ | ✅ | ❌ | ✅ |
|
||||||
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
|
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
|
||||||
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
|
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
|
||||||
|
|
|
||||||
|
|
@ -70,7 +70,7 @@ Example response body:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||||
"garageVersion": "v1.2.0",
|
"garageVersion": "v1.3.0",
|
||||||
"garageFeatures": [
|
"garageFeatures": [
|
||||||
"k2v",
|
"k2v",
|
||||||
"lmdb",
|
"lmdb",
|
||||||
|
|
|
||||||
16
flake.lock
generated
16
flake.lock
generated
|
|
@ -50,17 +50,17 @@
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1736692550,
|
"lastModified": 1763977559,
|
||||||
"narHash": "sha256-7tk8xH+g0sJkKLTJFOxphJxxOjMDFMWv24nXslaU2ro=",
|
"narHash": "sha256-g4MKqsIRy5yJwEsI+fYODqLUnAqIY4kZai0nldAP6EM=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
|
"rev": "cfe2c7d5b5d3032862254e68c37a6576b633d632",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
|
"rev": "cfe2c7d5b5d3032862254e68c37a6576b633d632",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
@ -80,17 +80,17 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1738549608,
|
"lastModified": 1763952169,
|
||||||
"narHash": "sha256-GdyT9QEUSx5k/n8kILuNy83vxxdyUfJ8jL5mMpQZWfw=",
|
"narHash": "sha256-+PeDBD8P+NKauH+w7eO/QWCIp8Cx4mCfWnh9sJmy9CM=",
|
||||||
"owner": "oxalica",
|
"owner": "oxalica",
|
||||||
"repo": "rust-overlay",
|
"repo": "rust-overlay",
|
||||||
"rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
|
"rev": "ab726555a9a72e6dc80649809147823a813fa95b",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "oxalica",
|
"owner": "oxalica",
|
||||||
"repo": "rust-overlay",
|
"repo": "rust-overlay",
|
||||||
"rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
|
"rev": "ab726555a9a72e6dc80649809147823a813fa95b",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
||||||
16
flake.nix
16
flake.nix
|
|
@ -2,13 +2,13 @@
|
||||||
description =
|
description =
|
||||||
"Garage, an S3-compatible distributed object store for self-hosted deployments";
|
"Garage, an S3-compatible distributed object store for self-hosted deployments";
|
||||||
|
|
||||||
# Nixpkgs 24.11 as of 2025-01-12
|
# Nixpkgs 25.05 as of 2025-11-24
|
||||||
inputs.nixpkgs.url =
|
inputs.nixpkgs.url =
|
||||||
"github:NixOS/nixpkgs/7c4869c47090dd7f9f1bdfb49a22aea026996815";
|
"github:NixOS/nixpkgs/cfe2c7d5b5d3032862254e68c37a6576b633d632";
|
||||||
|
|
||||||
# Rust overlay as of 2025-02-03
|
# Rust overlay as of 2025-11-24
|
||||||
inputs.rust-overlay.url =
|
inputs.rust-overlay.url =
|
||||||
"github:oxalica/rust-overlay/35c6f8c4352f995ecd53896200769f80a3e8f22d";
|
"github:oxalica/rust-overlay/ab726555a9a72e6dc80649809147823a813fa95b";
|
||||||
inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs";
|
inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
|
||||||
inputs.crane.url = "github:ipetkov/crane";
|
inputs.crane.url = "github:ipetkov/crane";
|
||||||
|
|
@ -30,6 +30,10 @@
|
||||||
inherit system nixpkgs crane rust-overlay extraTestEnv;
|
inherit system nixpkgs crane rust-overlay extraTestEnv;
|
||||||
release = false;
|
release = false;
|
||||||
}).garage-test;
|
}).garage-test;
|
||||||
|
lints = (compile {
|
||||||
|
inherit system nixpkgs crane rust-overlay;
|
||||||
|
release = false;
|
||||||
|
});
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
packages = {
|
packages = {
|
||||||
|
|
@ -56,6 +60,10 @@
|
||||||
tests-fjall = testWith {
|
tests-fjall = testWith {
|
||||||
GARAGE_TEST_INTEGRATION_DB_ENGINE = "fjall";
|
GARAGE_TEST_INTEGRATION_DB_ENGINE = "fjall";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# lints (fmt, clippy)
|
||||||
|
fmt = lints.garage-cargo-fmt;
|
||||||
|
clippy = lints.garage-cargo-clippy;
|
||||||
};
|
};
|
||||||
|
|
||||||
# ---- developpment shell, for making native builds only ----
|
# ---- developpment shell, for making native builds only ----
|
||||||
|
|
|
||||||
|
|
@ -48,7 +48,7 @@ let
|
||||||
|
|
||||||
inherit (pkgs) lib stdenv;
|
inherit (pkgs) lib stdenv;
|
||||||
|
|
||||||
toolchainFn = (p: p.rust-bin.stable."1.82.0".default.override {
|
toolchainFn = (p: p.rust-bin.stable."1.91.0".default.override {
|
||||||
targets = lib.optionals (target != null) [ rustTarget ];
|
targets = lib.optionals (target != null) [ rustTarget ];
|
||||||
extensions = [
|
extensions = [
|
||||||
"rust-src"
|
"rust-src"
|
||||||
|
|
@ -190,4 +190,15 @@ in rec {
|
||||||
pkgs.cacert
|
pkgs.cacert
|
||||||
];
|
];
|
||||||
} // extraTestEnv);
|
} // extraTestEnv);
|
||||||
|
|
||||||
|
# ---- source code linting ----
|
||||||
|
|
||||||
|
garage-cargo-fmt = craneLib.cargoFmt (commonArgs // {
|
||||||
|
cargoExtraArgs = "";
|
||||||
|
});
|
||||||
|
|
||||||
|
garage-cargo-clippy = craneLib.cargoClippy (commonArgs // {
|
||||||
|
cargoArtifacts = garage-deps;
|
||||||
|
cargoClippyExtraArgs = "--all-targets -- -D warnings";
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
|
export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
|
||||||
export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
|
export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
|
||||||
export AWS_DEFAULT_REGION='garage'
|
export AWS_DEFAULT_REGION='garage'
|
||||||
|
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
|
||||||
# FUTUREWORK: set AWS_ENDPOINT_URL instead, once nixpkgs bumps awscli to >=2.13.0.
|
# FUTUREWORK: set AWS_ENDPOINT_URL instead, once nixpkgs bumps awscli to >=2.13.0.
|
||||||
function aws { command aws --endpoint-url http://127.0.0.1:3911 $@ ; }
|
function aws { command aws --endpoint-url http://127.0.0.1:3911 $@ ; }
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,8 +2,8 @@ apiVersion: v2
|
||||||
name: garage
|
name: garage
|
||||||
description: S3-compatible object store for small self-hosted geo-distributed deployments
|
description: S3-compatible object store for small self-hosted geo-distributed deployments
|
||||||
type: application
|
type: application
|
||||||
version: 0.7.1
|
version: 0.7.3
|
||||||
appVersion: "v1.2.0"
|
appVersion: "v1.3.1"
|
||||||
home: https://garagehq.deuxfleurs.fr/
|
home: https://garagehq.deuxfleurs.fr/
|
||||||
icon: https://garagehq.deuxfleurs.fr/images/garage-logo.svg
|
icon: https://garagehq.deuxfleurs.fr/images/garage-logo.svg
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# garage
|
# garage
|
||||||
|
|
||||||
  
|
  
|
||||||
|
|
||||||
S3-compatible object store for small self-hosted geo-distributed deployments
|
S3-compatible object store for small self-hosted geo-distributed deployments
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,10 @@ metadata:
|
||||||
name: {{ include "garage.fullname" . }}
|
name: {{ include "garage.fullname" . }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "garage.labels" . | nindent 4 }}
|
{{- include "garage.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.service.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
spec:
|
spec:
|
||||||
type: {{ .Values.service.type }}
|
type: {{ .Values.service.type }}
|
||||||
ports:
|
ports:
|
||||||
|
|
@ -37,4 +41,4 @@ spec:
|
||||||
name: metrics
|
name: metrics
|
||||||
selector:
|
selector:
|
||||||
{{- include "garage.selectorLabels" . | nindent 4 }}
|
{{- include "garage.selectorLabels" . | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
|
||||||
|
|
@ -124,6 +124,8 @@ service:
|
||||||
# - NodePort (+ Ingress)
|
# - NodePort (+ Ingress)
|
||||||
# - LoadBalancer
|
# - LoadBalancer
|
||||||
type: ClusterIP
|
type: ClusterIP
|
||||||
|
# -- Annotations to add to the service
|
||||||
|
annotations: {}
|
||||||
s3:
|
s3:
|
||||||
api:
|
api:
|
||||||
port: 3900
|
port: 3900
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,8 @@ in
|
||||||
jq
|
jq
|
||||||
];
|
];
|
||||||
shellHook = ''
|
shellHook = ''
|
||||||
|
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
|
||||||
|
|
||||||
function to_s3 {
|
function to_s3 {
|
||||||
aws \
|
aws \
|
||||||
--endpoint-url https://garage.deuxfleurs.fr \
|
--endpoint-url https://garage.deuxfleurs.fr \
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_api_admin"
|
name = "garage_api_admin"
|
||||||
version = "1.2.0"
|
version = "1.3.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
@ -22,7 +22,7 @@ garage_api_common.workspace = true
|
||||||
|
|
||||||
argon2.workspace = true
|
argon2.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
err-derive.workspace = true
|
thiserror.workspace = true
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,8 @@
|
||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
|
|
||||||
use err_derive::Error;
|
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
pub use garage_model::helper::error::Error as HelperError;
|
pub use garage_model::helper::error::Error as HelperError;
|
||||||
|
|
||||||
|
|
@ -16,20 +16,17 @@ use garage_api_common::helpers::*;
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[error(display = "{}", _0)]
|
#[error("{0}")]
|
||||||
/// Error from common error
|
/// Error from common error
|
||||||
Common(#[error(source)] CommonError),
|
Common(#[from] CommonError),
|
||||||
|
|
||||||
// Category: cannot process
|
// Category: cannot process
|
||||||
/// The API access key does not exist
|
/// The API access key does not exist
|
||||||
#[error(display = "Access key not found: {}", _0)]
|
#[error("Access key not found: {0}")]
|
||||||
NoSuchAccessKey(String),
|
NoSuchAccessKey(String),
|
||||||
|
|
||||||
/// In Import key, the key already exists
|
/// In Import key, the key already exists
|
||||||
#[error(
|
#[error("Key {0} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.")]
|
||||||
display = "Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.",
|
|
||||||
_0
|
|
||||||
)]
|
|
||||||
KeyAlreadyExists(String),
|
KeyAlreadyExists(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_api_common"
|
name = "garage_api_common"
|
||||||
version = "1.2.0"
|
version = "1.3.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
@ -24,7 +24,7 @@ chrono.workspace = true
|
||||||
crc32fast.workspace = true
|
crc32fast.workspace = true
|
||||||
crc32c.workspace = true
|
crc32c.workspace = true
|
||||||
crypto-common.workspace = true
|
crypto-common.workspace = true
|
||||||
err-derive.workspace = true
|
thiserror.workspace = true
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
hmac.workspace = true
|
hmac.workspace = true
|
||||||
md-5.workspace = true
|
md-5.workspace = true
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
|
|
||||||
use err_derive::Error;
|
|
||||||
use hyper::StatusCode;
|
use hyper::StatusCode;
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
|
|
||||||
|
|
@ -12,48 +12,48 @@ use garage_model::helper::error::Error as HelperError;
|
||||||
pub enum CommonError {
|
pub enum CommonError {
|
||||||
// ---- INTERNAL ERRORS ----
|
// ---- INTERNAL ERRORS ----
|
||||||
/// Error related to deeper parts of Garage
|
/// Error related to deeper parts of Garage
|
||||||
#[error(display = "Internal error: {}", _0)]
|
#[error("Internal error: {0}")]
|
||||||
InternalError(#[error(source)] GarageError),
|
InternalError(#[from] GarageError),
|
||||||
|
|
||||||
/// Error related to Hyper
|
/// Error related to Hyper
|
||||||
#[error(display = "Internal error (Hyper error): {}", _0)]
|
#[error("Internal error (Hyper error): {0}")]
|
||||||
Hyper(#[error(source)] hyper::Error),
|
Hyper(#[from] hyper::Error),
|
||||||
|
|
||||||
/// Error related to HTTP
|
/// Error related to HTTP
|
||||||
#[error(display = "Internal error (HTTP error): {}", _0)]
|
#[error("Internal error (HTTP error): {0}")]
|
||||||
Http(#[error(source)] http::Error),
|
Http(#[from] http::Error),
|
||||||
|
|
||||||
// ---- GENERIC CLIENT ERRORS ----
|
// ---- GENERIC CLIENT ERRORS ----
|
||||||
/// Proper authentication was not provided
|
/// Proper authentication was not provided
|
||||||
#[error(display = "Forbidden: {}", _0)]
|
#[error("Forbidden: {0}")]
|
||||||
Forbidden(String),
|
Forbidden(String),
|
||||||
|
|
||||||
/// Generic bad request response with custom message
|
/// Generic bad request response with custom message
|
||||||
#[error(display = "Bad request: {}", _0)]
|
#[error("Bad request: {0}")]
|
||||||
BadRequest(String),
|
BadRequest(String),
|
||||||
|
|
||||||
/// The client sent a header with invalid value
|
/// The client sent a header with invalid value
|
||||||
#[error(display = "Invalid header value: {}", _0)]
|
#[error("Invalid header value: {0}")]
|
||||||
InvalidHeader(#[error(source)] hyper::header::ToStrError),
|
InvalidHeader(#[from] hyper::header::ToStrError),
|
||||||
|
|
||||||
// ---- SPECIFIC ERROR CONDITIONS ----
|
// ---- SPECIFIC ERROR CONDITIONS ----
|
||||||
// These have to be error codes referenced in the S3 spec here:
|
// These have to be error codes referenced in the S3 spec here:
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
|
||||||
/// The bucket requested don't exists
|
/// The bucket requested don't exists
|
||||||
#[error(display = "Bucket not found: {}", _0)]
|
#[error("Bucket not found: {0}")]
|
||||||
NoSuchBucket(String),
|
NoSuchBucket(String),
|
||||||
|
|
||||||
/// Tried to create a bucket that already exist
|
/// Tried to create a bucket that already exist
|
||||||
#[error(display = "Bucket already exists")]
|
#[error("Bucket already exists")]
|
||||||
BucketAlreadyExists,
|
BucketAlreadyExists,
|
||||||
|
|
||||||
/// Tried to delete a non-empty bucket
|
/// Tried to delete a non-empty bucket
|
||||||
#[error(display = "Tried to delete a non-empty bucket")]
|
#[error("Tried to delete a non-empty bucket")]
|
||||||
BucketNotEmpty,
|
BucketNotEmpty,
|
||||||
|
|
||||||
// Category: bad request
|
// Category: bad request
|
||||||
/// Bucket name is not valid according to AWS S3 specs
|
/// Bucket name is not valid according to AWS S3 specs
|
||||||
#[error(display = "Invalid bucket name: {}", _0)]
|
#[error("Invalid bucket name: {0}")]
|
||||||
InvalidBucketName(String),
|
InvalidBucketName(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -33,7 +33,6 @@ use garage_util::metrics::{gen_trace_id, RecordDuration};
|
||||||
use garage_util::socket_address::UnixOrTCPSocketAddress;
|
use garage_util::socket_address::UnixOrTCPSocketAddress;
|
||||||
|
|
||||||
use crate::helpers::{BoxBody, ErrorBody};
|
use crate::helpers::{BoxBody, ErrorBody};
|
||||||
use crate::signature::payload::Authorization;
|
|
||||||
|
|
||||||
pub trait ApiEndpoint: Send + Sync + 'static {
|
pub trait ApiEndpoint: Send + Sync + 'static {
|
||||||
fn name(&self) -> &'static str;
|
fn name(&self) -> &'static str;
|
||||||
|
|
@ -62,7 +61,7 @@ pub trait ApiHandler: Send + Sync + 'static {
|
||||||
|
|
||||||
/// Returns the key id used to authenticate this request. The ID returned must be safe to
|
/// Returns the key id used to authenticate this request. The ID returned must be safe to
|
||||||
/// log.
|
/// log.
|
||||||
fn key_id_from_request(&self, req: &Request<IncomingBody>) -> Option<String> {
|
fn key_id_from_request(&self, _req: &Request<IncomingBody>) -> Option<String> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
use err_derive::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
use crate::common_error::CommonError;
|
use crate::common_error::CommonError;
|
||||||
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||||
|
|
@ -6,21 +6,21 @@ pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInterna
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[error(display = "{}", _0)]
|
#[error("{0}")]
|
||||||
/// Error from common error
|
/// Error from common error
|
||||||
Common(CommonError),
|
Common(CommonError),
|
||||||
|
|
||||||
/// Authorization Header Malformed
|
/// Authorization Header Malformed
|
||||||
#[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
|
#[error("Authorization header malformed, unexpected scope: {0}")]
|
||||||
AuthorizationHeaderMalformed(String),
|
AuthorizationHeaderMalformed(String),
|
||||||
|
|
||||||
// Category: bad request
|
// Category: bad request
|
||||||
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
#[error("Invalid UTF-8: {0}")]
|
||||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
InvalidUtf8Str(#[from] std::str::Utf8Error),
|
||||||
|
|
||||||
/// The provided digest (checksum) value was invalid
|
/// The provided digest (checksum) value was invalid
|
||||||
#[error(display = "Invalid digest: {}", _0)]
|
#[error("Invalid digest: {0}")]
|
||||||
InvalidDigest(String),
|
InvalidDigest(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -104,7 +104,7 @@ async fn check_standard_signature(
|
||||||
// Verify that all necessary request headers are included in signed_headers
|
// Verify that all necessary request headers are included in signed_headers
|
||||||
// The following must be included for all signatures:
|
// The following must be included for all signatures:
|
||||||
// - the Host header (mandatory)
|
// - the Host header (mandatory)
|
||||||
// - all x-amz-* headers used in the request
|
// - all x-amz-* headers used in the request (except x-amz-content-sha256)
|
||||||
// AWS also indicates that the Content-Type header should be signed if
|
// AWS also indicates that the Content-Type header should be signed if
|
||||||
// it is used, but Minio client doesn't sign it so we don't check it for compatibility.
|
// it is used, but Minio client doesn't sign it so we don't check it for compatibility.
|
||||||
let signed_headers = split_signed_headers(&authorization)?;
|
let signed_headers = split_signed_headers(&authorization)?;
|
||||||
|
|
@ -151,7 +151,7 @@ async fn check_presigned_signature(
|
||||||
// Verify that all necessary request headers are included in signed_headers
|
// Verify that all necessary request headers are included in signed_headers
|
||||||
// For AWSv4 pre-signed URLs, the following must be included:
|
// For AWSv4 pre-signed URLs, the following must be included:
|
||||||
// - the Host header (mandatory)
|
// - the Host header (mandatory)
|
||||||
// - all x-amz-* headers used in the request
|
// - all x-amz-* headers used in the request (except x-amz-content-sha256)
|
||||||
let signed_headers = split_signed_headers(&authorization)?;
|
let signed_headers = split_signed_headers(&authorization)?;
|
||||||
verify_signed_headers(request.headers(), &signed_headers)?;
|
verify_signed_headers(request.headers(), &signed_headers)?;
|
||||||
|
|
||||||
|
|
@ -268,7 +268,9 @@ fn verify_signed_headers(headers: &HeaderMap, signed_headers: &[HeaderName]) ->
|
||||||
return Err(Error::bad_request("Header `Host` should be signed"));
|
return Err(Error::bad_request("Header `Host` should be signed"));
|
||||||
}
|
}
|
||||||
for (name, _) in headers.iter() {
|
for (name, _) in headers.iter() {
|
||||||
if name.as_str().starts_with("x-amz-") {
|
// Enforce signature of all x-amz-* headers, except x-amz-content-sh256
|
||||||
|
// because it is included in the canonical request in all cases
|
||||||
|
if name.as_str().starts_with("x-amz-") && name != X_AMZ_CONTENT_SHA256 {
|
||||||
if !signed_headers.contains(name) {
|
if !signed_headers.contains(name) {
|
||||||
return Err(Error::bad_request(format!(
|
return Err(Error::bad_request(format!(
|
||||||
"Header `{}` should be signed",
|
"Header `{}` should be signed",
|
||||||
|
|
@ -468,8 +470,7 @@ impl Authorization {
|
||||||
|
|
||||||
let date = headers
|
let date = headers
|
||||||
.get(X_AMZ_DATE)
|
.get(X_AMZ_DATE)
|
||||||
.ok_or_bad_request("Missing X-Amz-Date field")
|
.ok_or_bad_request("Missing X-Amz-Date field")?
|
||||||
.map_err(Error::from)?
|
|
||||||
.to_str()?;
|
.to_str()?;
|
||||||
let date = parse_date(date)?;
|
let date = parse_date(date)?;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_api_k2v"
|
name = "garage_api_k2v"
|
||||||
version = "1.2.0"
|
version = "1.3.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
@ -20,7 +20,7 @@ garage_util = { workspace = true, features = [ "k2v" ] }
|
||||||
garage_api_common.workspace = true
|
garage_api_common.workspace = true
|
||||||
|
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
err-derive.workspace = true
|
thiserror.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
|
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
use err_derive::Error;
|
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
use garage_api_common::common_error::{commonErrorDerivative, CommonError};
|
use garage_api_common::common_error::{commonErrorDerivative, CommonError};
|
||||||
pub(crate) use garage_api_common::common_error::{helper_error_as_internal, pass_helper_error};
|
pub(crate) use garage_api_common::common_error::{helper_error_as_internal, pass_helper_error};
|
||||||
|
|
@ -14,38 +14,38 @@ use garage_api_common::signature::error::Error as SignatureError;
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[error(display = "{}", _0)]
|
#[error("{0}")]
|
||||||
/// Error from common error
|
/// Error from common error
|
||||||
Common(#[error(source)] CommonError),
|
Common(#[from] CommonError),
|
||||||
|
|
||||||
// Category: cannot process
|
// Category: cannot process
|
||||||
/// Authorization Header Malformed
|
/// Authorization Header Malformed
|
||||||
#[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
|
#[error("Authorization header malformed, unexpected scope: {0}")]
|
||||||
AuthorizationHeaderMalformed(String),
|
AuthorizationHeaderMalformed(String),
|
||||||
|
|
||||||
/// The provided digest (checksum) value was invalid
|
/// The provided digest (checksum) value was invalid
|
||||||
#[error(display = "Invalid digest: {}", _0)]
|
#[error("Invalid digest: {0}")]
|
||||||
InvalidDigest(String),
|
InvalidDigest(String),
|
||||||
|
|
||||||
/// The object requested don't exists
|
/// The object requested don't exists
|
||||||
#[error(display = "Key not found")]
|
#[error("Key not found")]
|
||||||
NoSuchKey,
|
NoSuchKey,
|
||||||
|
|
||||||
/// Some base64 encoded data was badly encoded
|
/// Some base64 encoded data was badly encoded
|
||||||
#[error(display = "Invalid base64: {}", _0)]
|
#[error("Invalid base64: {0}")]
|
||||||
InvalidBase64(#[error(source)] base64::DecodeError),
|
InvalidBase64(#[from] base64::DecodeError),
|
||||||
|
|
||||||
/// Invalid causality token
|
/// Invalid causality token
|
||||||
#[error(display = "Invalid causality token")]
|
#[error("Invalid causality token")]
|
||||||
InvalidCausalityToken,
|
InvalidCausalityToken,
|
||||||
|
|
||||||
/// The client asked for an invalid return format (invalid Accept header)
|
/// The client asked for an invalid return format (invalid Accept header)
|
||||||
#[error(display = "Not acceptable: {}", _0)]
|
#[error("Not acceptable: {0}")]
|
||||||
NotAcceptable(String),
|
NotAcceptable(String),
|
||||||
|
|
||||||
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
#[error("Invalid UTF-8: {0}")]
|
||||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
InvalidUtf8Str(#[from] std::str::Utf8Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
commonErrorDerivative!(Error);
|
commonErrorDerivative!(Error);
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_api_s3"
|
name = "garage_api_s3"
|
||||||
version = "1.2.0"
|
version = "1.3.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
@ -29,7 +29,7 @@ bytes.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
crc32fast.workspace = true
|
crc32fast.workspace = true
|
||||||
crc32c.workspace = true
|
crc32c.workspace = true
|
||||||
err-derive.workspace = true
|
thiserror.workspace = true
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
md-5.workspace = true
|
md-5.workspace = true
|
||||||
|
|
|
||||||
|
|
@ -88,7 +88,9 @@ pub async fn handle_put_cors(
|
||||||
pub struct CorsConfiguration {
|
pub struct CorsConfiguration {
|
||||||
#[serde(serialize_with = "xmlns_tag", skip_deserializing)]
|
#[serde(serialize_with = "xmlns_tag", skip_deserializing)]
|
||||||
pub xmlns: (),
|
pub xmlns: (),
|
||||||
#[serde(rename = "CORSRule")]
|
// "default" is required to be able to parse an empty list of rules,
|
||||||
|
// cf https://docs.rs/quick-xml/latest/quick_xml/de/#sequences-xsall-and-xssequence-xml-schema-types
|
||||||
|
#[serde(rename = "CORSRule", default)]
|
||||||
pub cors_rules: Vec<CorsRule>,
|
pub cors_rules: Vec<CorsRule>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -270,4 +272,26 @@ mod tests {
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_deserialize_norules() -> Result<(), Error> {
|
||||||
|
let message = r#"<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/" />"#;
|
||||||
|
let conf: CorsConfiguration = from_str(message).unwrap();
|
||||||
|
let ref_value = CorsConfiguration {
|
||||||
|
xmlns: (),
|
||||||
|
cors_rules: vec![],
|
||||||
|
};
|
||||||
|
assert_eq! {
|
||||||
|
ref_value,
|
||||||
|
conf
|
||||||
|
};
|
||||||
|
|
||||||
|
let message2 = to_xml_with_header(&ref_value)?;
|
||||||
|
|
||||||
|
let cleanup = |c: &str| c.replace(char::is_whitespace, "");
|
||||||
|
assert_eq!(cleanup(message), cleanup(&message2));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,8 @@
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
|
|
||||||
use err_derive::Error;
|
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
use garage_model::helper::error::Error as HelperError;
|
use garage_model::helper::error::Error as HelperError;
|
||||||
|
|
||||||
|
|
@ -25,67 +25,67 @@ use crate::xml as s3_xml;
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[error(display = "{}", _0)]
|
#[error("{0}")]
|
||||||
/// Error from common error
|
/// Error from common error
|
||||||
Common(#[error(source)] CommonError),
|
Common(#[from] CommonError),
|
||||||
|
|
||||||
// Category: cannot process
|
// Category: cannot process
|
||||||
/// Authorization Header Malformed
|
/// Authorization Header Malformed
|
||||||
#[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
|
#[error("Authorization header malformed, unexpected scope: {0}")]
|
||||||
AuthorizationHeaderMalformed(String),
|
AuthorizationHeaderMalformed(String),
|
||||||
|
|
||||||
/// The object requested don't exists
|
/// The object requested don't exists
|
||||||
#[error(display = "Key not found")]
|
#[error("Key not found")]
|
||||||
NoSuchKey,
|
NoSuchKey,
|
||||||
|
|
||||||
/// The multipart upload requested don't exists
|
/// The multipart upload requested don't exists
|
||||||
#[error(display = "Upload not found")]
|
#[error("Upload not found")]
|
||||||
NoSuchUpload,
|
NoSuchUpload,
|
||||||
|
|
||||||
/// Precondition failed (e.g. x-amz-copy-source-if-match)
|
/// Precondition failed (e.g. x-amz-copy-source-if-match)
|
||||||
#[error(display = "At least one of the preconditions you specified did not hold")]
|
#[error("At least one of the preconditions you specified did not hold")]
|
||||||
PreconditionFailed,
|
PreconditionFailed,
|
||||||
|
|
||||||
/// Parts specified in CMU request do not match parts actually uploaded
|
/// Parts specified in CMU request do not match parts actually uploaded
|
||||||
#[error(display = "Parts given to CompleteMultipartUpload do not match uploaded parts")]
|
#[error("Parts given to CompleteMultipartUpload do not match uploaded parts")]
|
||||||
InvalidPart,
|
InvalidPart,
|
||||||
|
|
||||||
/// Parts given to CompleteMultipartUpload were not in ascending order
|
/// Parts given to CompleteMultipartUpload were not in ascending order
|
||||||
#[error(display = "Parts given to CompleteMultipartUpload were not in ascending order")]
|
#[error("Parts given to CompleteMultipartUpload were not in ascending order")]
|
||||||
InvalidPartOrder,
|
InvalidPartOrder,
|
||||||
|
|
||||||
/// In CompleteMultipartUpload: not enough data
|
/// In CompleteMultipartUpload: not enough data
|
||||||
/// (here we are more lenient than AWS S3)
|
/// (here we are more lenient than AWS S3)
|
||||||
#[error(display = "Proposed upload is smaller than the minimum allowed object size")]
|
#[error("Proposed upload is smaller than the minimum allowed object size")]
|
||||||
EntityTooSmall,
|
EntityTooSmall,
|
||||||
|
|
||||||
// Category: bad request
|
// Category: bad request
|
||||||
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
#[error("Invalid UTF-8: {0}")]
|
||||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
InvalidUtf8Str(#[from] std::str::Utf8Error),
|
||||||
|
|
||||||
/// The request used an invalid path
|
/// The request used an invalid path
|
||||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
#[error("Invalid UTF-8: {0}")]
|
||||||
InvalidUtf8String(#[error(source)] std::string::FromUtf8Error),
|
InvalidUtf8String(#[from] std::string::FromUtf8Error),
|
||||||
|
|
||||||
/// The client sent invalid XML data
|
/// The client sent invalid XML data
|
||||||
#[error(display = "Invalid XML: {}", _0)]
|
#[error("Invalid XML: {0}")]
|
||||||
InvalidXml(String),
|
InvalidXml(String),
|
||||||
|
|
||||||
/// The client sent a range header with invalid value
|
/// The client sent a range header with invalid value
|
||||||
#[error(display = "Invalid HTTP range: {:?}", _0)]
|
#[error("Invalid HTTP range: {0:?}")]
|
||||||
InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)),
|
InvalidRange((http_range::HttpRangeParseError, u64)),
|
||||||
|
|
||||||
/// The client sent a range header with invalid value
|
/// The client sent a range header with invalid value
|
||||||
#[error(display = "Invalid encryption algorithm: {:?}, should be AES256", _0)]
|
#[error("Invalid encryption algorithm: {0:?}, should be AES256")]
|
||||||
InvalidEncryptionAlgorithm(String),
|
InvalidEncryptionAlgorithm(String),
|
||||||
|
|
||||||
/// The provided digest (checksum) value was invalid
|
/// The provided digest (checksum) value was invalid
|
||||||
#[error(display = "Invalid digest: {}", _0)]
|
#[error("Invalid digest: {0}")]
|
||||||
InvalidDigest(String),
|
InvalidDigest(String),
|
||||||
|
|
||||||
/// The client sent a request for an action not supported by garage
|
/// The client sent a request for an action not supported by garage
|
||||||
#[error(display = "Unimplemented action: {}", _0)]
|
#[error("Unimplemented action: {0}")]
|
||||||
NotImplemented(String),
|
NotImplemented(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -99,6 +99,12 @@ impl From<HelperError> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<(http_range::HttpRangeParseError, u64)> for Error {
|
||||||
|
fn from(err: (http_range::HttpRangeParseError, u64)) -> Error {
|
||||||
|
Error::InvalidRange(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<roxmltree::Error> for Error {
|
impl From<roxmltree::Error> for Error {
|
||||||
fn from(err: roxmltree::Error) -> Self {
|
fn from(err: roxmltree::Error) -> Self {
|
||||||
Self::InvalidXml(format!("{}", err))
|
Self::InvalidXml(format!("{}", err))
|
||||||
|
|
|
||||||
|
|
@ -845,7 +845,9 @@ impl PreconditionHeaders {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check(&self, v: &ObjectVersion, etag: &str) -> Result<Option<StatusCode>, Error> {
|
fn check(&self, v: &ObjectVersion, etag: &str) -> Result<Option<StatusCode>, Error> {
|
||||||
let v_date = UNIX_EPOCH + Duration::from_millis(v.timestamp);
|
// we store date with ms precision, but headers are precise to the second: truncate
|
||||||
|
// the timestamp to handle the same-second edge case
|
||||||
|
let v_date = UNIX_EPOCH + Duration::from_secs(v.timestamp / 1000);
|
||||||
|
|
||||||
// Implemented from https://datatracker.ietf.org/doc/html/rfc7232#section-6
|
// Implemented from https://datatracker.ietf.org/doc/html/rfc7232#section-6
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -141,10 +141,26 @@ pub async fn handle_post_object(
|
||||||
|
|
||||||
let mut conditions = decoded_policy.into_conditions()?;
|
let mut conditions = decoded_policy.into_conditions()?;
|
||||||
|
|
||||||
|
// If there are conditions on the bucket name, check these against the actual bucket_name rather
|
||||||
|
// than the one in params, which is allowed to be absent.
|
||||||
|
if let Some(conds) = conditions.params.remove("bucket") {
|
||||||
|
for cond in conds {
|
||||||
|
let ok = match cond {
|
||||||
|
Operation::Equal(s) => s.as_str() == bucket_name,
|
||||||
|
Operation::StartsWith(s) => bucket_name.starts_with(&s),
|
||||||
|
};
|
||||||
|
if !ok {
|
||||||
|
return Err(Error::bad_request(
|
||||||
|
"Key 'bucket' has value not allowed in policy",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (param_key, value) in params.iter() {
|
for (param_key, value) in params.iter() {
|
||||||
let param_key = param_key.as_str();
|
let param_key = param_key.as_str();
|
||||||
match param_key {
|
match param_key {
|
||||||
"policy" | "x-amz-signature" => (), // this is always accepted, as it's required to validate other fields
|
"policy" | "x-amz-signature" | "bucket" => (), // this is always accepted, as it's required to validate other fields
|
||||||
"content-type" => {
|
"content-type" => {
|
||||||
let conds = conditions.params.remove("content-type").ok_or_else(|| {
|
let conds = conditions.params.remove("content-type").ok_or_else(|| {
|
||||||
Error::bad_request(format!("Key '{}' is not allowed in policy", param_key))
|
Error::bad_request(format!("Key '{}' is not allowed in policy", param_key))
|
||||||
|
|
|
||||||
|
|
@ -39,8 +39,6 @@ use crate::encryption::EncryptionParams;
|
||||||
use crate::error::*;
|
use crate::error::*;
|
||||||
use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION;
|
use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION;
|
||||||
|
|
||||||
const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
|
|
||||||
|
|
||||||
pub(crate) struct SaveStreamResult {
|
pub(crate) struct SaveStreamResult {
|
||||||
pub(crate) version_uuid: Uuid,
|
pub(crate) version_uuid: Uuid,
|
||||||
pub(crate) version_timestamp: u64,
|
pub(crate) version_timestamp: u64,
|
||||||
|
|
@ -493,7 +491,7 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
|
||||||
};
|
};
|
||||||
let recv_next = async {
|
let recv_next = async {
|
||||||
// If more than a maximum number of writes are in progress, don't add more for now
|
// If more than a maximum number of writes are in progress, don't add more for now
|
||||||
if currently_running >= PUT_BLOCKS_MAX_PARALLEL {
|
if currently_running >= ctx.garage.config.block_max_concurrent_writes_per_request {
|
||||||
futures::future::pending().await
|
futures::future::pending().await
|
||||||
} else {
|
} else {
|
||||||
block_rx3.recv().await
|
block_rx3.recv().await
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_block"
|
name = "garage_block"
|
||||||
version = "1.2.0"
|
version = "1.3.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
|
||||||
|
|
@ -50,6 +50,8 @@ pub const INLINE_THRESHOLD: usize = 3072;
|
||||||
// to delete the block locally.
|
// to delete the block locally.
|
||||||
pub(crate) const BLOCK_GC_DELAY: Duration = Duration::from_secs(600);
|
pub(crate) const BLOCK_GC_DELAY: Duration = Duration::from_secs(600);
|
||||||
|
|
||||||
|
const BLOCK_READ_SEMAPHORE_TIMEOUT: Duration = Duration::from_secs(15);
|
||||||
|
|
||||||
/// RPC messages used to share blocks of data between nodes
|
/// RPC messages used to share blocks of data between nodes
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub enum BlockRpc {
|
pub enum BlockRpc {
|
||||||
|
|
@ -87,6 +89,7 @@ pub struct BlockManager {
|
||||||
disable_scrub: bool,
|
disable_scrub: bool,
|
||||||
|
|
||||||
mutation_lock: Vec<Mutex<BlockManagerLocked>>,
|
mutation_lock: Vec<Mutex<BlockManagerLocked>>,
|
||||||
|
read_semaphore: Semaphore,
|
||||||
|
|
||||||
pub rc: BlockRc,
|
pub rc: BlockRc,
|
||||||
pub resync: BlockResyncManager,
|
pub resync: BlockResyncManager,
|
||||||
|
|
@ -176,6 +179,8 @@ impl BlockManager {
|
||||||
.iter()
|
.iter()
|
||||||
.map(|_| Mutex::new(BlockManagerLocked()))
|
.map(|_| Mutex::new(BlockManagerLocked()))
|
||||||
.collect::<Vec<_>>(),
|
.collect::<Vec<_>>(),
|
||||||
|
|
||||||
|
read_semaphore: Semaphore::new(config.block_max_concurrent_reads),
|
||||||
rc,
|
rc,
|
||||||
resync,
|
resync,
|
||||||
system,
|
system,
|
||||||
|
|
@ -557,9 +562,6 @@ impl BlockManager {
|
||||||
match self.find_block(hash).await {
|
match self.find_block(hash).await {
|
||||||
Some(p) => self.read_block_from(hash, &p).await,
|
Some(p) => self.read_block_from(hash, &p).await,
|
||||||
None => {
|
None => {
|
||||||
// Not found but maybe we should have had it ??
|
|
||||||
self.resync
|
|
||||||
.put_to_resync(hash, 2 * self.system.rpc_helper().rpc_timeout())?;
|
|
||||||
return Err(Error::Message(format!(
|
return Err(Error::Message(format!(
|
||||||
"block {:?} not found on node",
|
"block {:?} not found on node",
|
||||||
hash
|
hash
|
||||||
|
|
@ -581,6 +583,15 @@ impl BlockManager {
|
||||||
) -> Result<DataBlock, Error> {
|
) -> Result<DataBlock, Error> {
|
||||||
let (header, path) = block_path.as_parts_ref();
|
let (header, path) = block_path.as_parts_ref();
|
||||||
|
|
||||||
|
let permit = tokio::select! {
|
||||||
|
sem = self.read_semaphore.acquire() => sem.ok_or_message("acquire read semaphore")?,
|
||||||
|
_ = tokio::time::sleep(BLOCK_READ_SEMAPHORE_TIMEOUT) => {
|
||||||
|
self.metrics.block_read_semaphore_timeouts.add(1);
|
||||||
|
debug!("read block {:?}: read_semaphore acquire timeout", hash);
|
||||||
|
return Err(Error::Message("read block: read_semaphore acquire timeout".into()));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let mut f = fs::File::open(&path).await?;
|
let mut f = fs::File::open(&path).await?;
|
||||||
let mut data = vec![];
|
let mut data = vec![];
|
||||||
f.read_to_end(&mut data).await?;
|
f.read_to_end(&mut data).await?;
|
||||||
|
|
@ -605,6 +616,8 @@ impl BlockManager {
|
||||||
return Err(Error::CorruptData(*hash));
|
return Err(Error::CorruptData(*hash));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drop(permit);
|
||||||
|
|
||||||
Ok(data)
|
Ok(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -770,6 +783,7 @@ impl BlockManagerLocked {
|
||||||
|
|
||||||
let mut f = fs::File::create(&path_tmp).await?;
|
let mut f = fs::File::create(&path_tmp).await?;
|
||||||
f.write_all(data).await?;
|
f.write_all(data).await?;
|
||||||
|
f.flush().await?;
|
||||||
mgr.metrics.bytes_written.add(data.len() as u64);
|
mgr.metrics.bytes_written.add(data.len() as u64);
|
||||||
|
|
||||||
if mgr.data_fsync {
|
if mgr.data_fsync {
|
||||||
|
|
|
||||||
|
|
@ -22,6 +22,7 @@ pub struct BlockManagerMetrics {
|
||||||
|
|
||||||
pub(crate) bytes_read: BoundCounter<u64>,
|
pub(crate) bytes_read: BoundCounter<u64>,
|
||||||
pub(crate) block_read_duration: BoundValueRecorder<f64>,
|
pub(crate) block_read_duration: BoundValueRecorder<f64>,
|
||||||
|
pub(crate) block_read_semaphore_timeouts: BoundCounter<u64>,
|
||||||
pub(crate) bytes_written: BoundCounter<u64>,
|
pub(crate) bytes_written: BoundCounter<u64>,
|
||||||
pub(crate) block_write_duration: BoundValueRecorder<f64>,
|
pub(crate) block_write_duration: BoundValueRecorder<f64>,
|
||||||
pub(crate) delete_counter: BoundCounter<u64>,
|
pub(crate) delete_counter: BoundCounter<u64>,
|
||||||
|
|
@ -119,6 +120,11 @@ impl BlockManagerMetrics {
|
||||||
.with_description("Duration of block read operations")
|
.with_description("Duration of block read operations")
|
||||||
.init()
|
.init()
|
||||||
.bind(&[]),
|
.bind(&[]),
|
||||||
|
block_read_semaphore_timeouts: meter
|
||||||
|
.u64_counter("block.read_semaphore_timeouts")
|
||||||
|
.with_description("Number of block reads that failed due to semaphore acquire timeout")
|
||||||
|
.init()
|
||||||
|
.bind(&[]),
|
||||||
bytes_written: meter
|
bytes_written: meter
|
||||||
.u64_counter("block.bytes_written")
|
.u64_counter("block.bytes_written")
|
||||||
.with_description("Number of bytes written to disk")
|
.with_description("Number of bytes written to disk")
|
||||||
|
|
|
||||||
|
|
@ -133,6 +133,14 @@ impl BlockResyncManager {
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Clear the entire resync queue and list of errored blocks
|
||||||
|
/// Corresponds to `garage repair clear-resync-queue`
|
||||||
|
pub fn clear_resync_queue(&self) -> Result<(), Error> {
|
||||||
|
self.queue.clear()?;
|
||||||
|
self.errors.clear()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
|
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
|
||||||
let notify = self.notify.clone();
|
let notify = self.notify.clone();
|
||||||
vars.register_rw(
|
vars.register_rw(
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_db"
|
name = "garage_db"
|
||||||
version = "1.2.0"
|
version = "1.3.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
@ -12,7 +12,7 @@ readme = "../../README.md"
|
||||||
path = "lib.rs"
|
path = "lib.rs"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
err-derive.workspace = true
|
thiserror.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
|
|
||||||
heed = { workspace = true, optional = true }
|
heed = { workspace = true, optional = true }
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ use std::cell::Cell;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use err_derive::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
pub use open::*;
|
pub use open::*;
|
||||||
|
|
||||||
|
|
@ -44,7 +44,7 @@ pub type TxValueIter<'a> = Box<dyn std::iter::Iterator<Item = TxOpResult<(Value,
|
||||||
// ----
|
// ----
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
#[error(display = "{}", _0)]
|
#[error("{0}")]
|
||||||
pub struct Error(pub Cow<'static, str>);
|
pub struct Error(pub Cow<'static, str>);
|
||||||
|
|
||||||
impl From<std::io::Error> for Error {
|
impl From<std::io::Error> for Error {
|
||||||
|
|
@ -56,7 +56,7 @@ impl From<std::io::Error> for Error {
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
#[error(display = "{}", _0)]
|
#[error("{0}")]
|
||||||
pub struct TxOpError(pub(crate) Error);
|
pub struct TxOpError(pub(crate) Error);
|
||||||
pub type TxOpResult<T> = std::result::Result<T, TxOpError>;
|
pub type TxOpResult<T> = std::result::Result<T, TxOpError>;
|
||||||
|
|
||||||
|
|
@ -106,32 +106,44 @@ impl Db {
|
||||||
result: Cell::new(None),
|
result: Cell::new(None),
|
||||||
};
|
};
|
||||||
let tx_res = self.0.transaction(&f);
|
let tx_res = self.0.transaction(&f);
|
||||||
let ret = f
|
let fn_res = f.result.into_inner();
|
||||||
.result
|
|
||||||
.into_inner()
|
|
||||||
.expect("Transaction did not store result");
|
|
||||||
|
|
||||||
match tx_res {
|
match (tx_res, fn_res) {
|
||||||
Ok(on_commit) => match ret {
|
(Ok(on_commit), Some(Ok(value))) => {
|
||||||
Ok(value) => {
|
// Transaction succeeded
|
||||||
on_commit.into_iter().for_each(|f| f());
|
// TxFn stored the value to return to the user in fn_res
|
||||||
Ok(value)
|
// tx_res contains the on_commit list of callbacks, run them now
|
||||||
}
|
on_commit.into_iter().for_each(|f| f());
|
||||||
_ => unreachable!(),
|
Ok(value)
|
||||||
},
|
}
|
||||||
Err(TxError::Abort(())) => match ret {
|
(Err(TxError::Abort(())), Some(Err(TxError::Abort(e)))) => {
|
||||||
Err(TxError::Abort(e)) => Err(TxError::Abort(e)),
|
// Transaction was aborted by user code
|
||||||
_ => unreachable!(),
|
// The abort error value is stored in fn_res
|
||||||
},
|
Err(TxError::Abort(e))
|
||||||
Err(TxError::Db(e2)) => match ret {
|
}
|
||||||
// Ok was stored -> the error occurred when finalizing
|
(Err(TxError::Db(_tx_e)), Some(Err(TxError::Db(fn_e)))) => {
|
||||||
// transaction
|
// Transaction encountered a DB error in user code
|
||||||
Ok(_) => Err(TxError::Db(e2)),
|
// The error value encountered is the one in fn_res,
|
||||||
// An error was already stored: that's the one we want to
|
// tx_res contains only a dummy error message
|
||||||
// return
|
Err(TxError::Db(fn_e))
|
||||||
Err(TxError::Db(e)) => Err(TxError::Db(e)),
|
}
|
||||||
_ => unreachable!(),
|
(Err(TxError::Db(tx_e)), None) => {
|
||||||
},
|
// Transaction encounterred a DB error when initializing the transaction,
|
||||||
|
// before user code was called
|
||||||
|
Err(TxError::Db(tx_e))
|
||||||
|
}
|
||||||
|
(Err(TxError::Db(tx_e)), Some(Ok(_))) => {
|
||||||
|
// Transaction encounterred a DB error when commiting the transaction,
|
||||||
|
// after user code was called
|
||||||
|
Err(TxError::Db(tx_e))
|
||||||
|
}
|
||||||
|
(tx_res, fn_res) => {
|
||||||
|
panic!(
|
||||||
|
"unexpected error case: tx_res={:?}, fn_res={:?}",
|
||||||
|
tx_res.map(|_| "..."),
|
||||||
|
fn_res.map(|x| x.map(|_| "...").map_err(|_| "..."))
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -151,30 +151,16 @@ impl IDb for SqliteDb {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn snapshot(&self, base_path: &PathBuf) -> Result<()> {
|
fn snapshot(&self, base_path: &PathBuf) -> Result<()> {
|
||||||
fn progress(p: rusqlite::backup::Progress) {
|
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
|
||||||
|
|
||||||
static LAST_LOG_TIME: AtomicU64 = AtomicU64::new(0);
|
|
||||||
|
|
||||||
let now = SystemTime::now()
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.expect("Fix your clock :o")
|
|
||||||
.as_millis() as u64;
|
|
||||||
if now >= LAST_LOG_TIME.load(Ordering::Relaxed) + 10 * 1000 {
|
|
||||||
let percent = (p.pagecount - p.remaining) * 100 / p.pagecount;
|
|
||||||
info!("Sqlite snapshot progress: {}%", percent);
|
|
||||||
|
|
||||||
LAST_LOG_TIME.fetch_max(now, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::fs::create_dir_all(base_path)?;
|
std::fs::create_dir_all(base_path)?;
|
||||||
let path = Engine::Sqlite.db_path(&base_path);
|
let path = Engine::Sqlite
|
||||||
|
.db_path(&base_path)
|
||||||
|
.into_os_string()
|
||||||
|
.into_string()
|
||||||
|
.map_err(|_| Error("invalid sqlite path string".into()))?;
|
||||||
|
|
||||||
self.db
|
info!("Start sqlite VACUUM INTO `{}`", path);
|
||||||
.get()?
|
self.db.get()?.execute("VACUUM INTO ?1", params![path])?;
|
||||||
.backup(rusqlite::DatabaseName::Main, path, Some(progress))?;
|
info!("Finished sqlite VACUUM INTO `{}`", path);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage"
|
name = "garage"
|
||||||
version = "1.2.0"
|
version = "1.3.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
|
||||||
|
|
@ -466,6 +466,10 @@ pub enum RepairWhat {
|
||||||
/// Repair (resync/rebalance) the set of stored blocks in the cluster
|
/// Repair (resync/rebalance) the set of stored blocks in the cluster
|
||||||
#[structopt(name = "blocks", version = garage_version())]
|
#[structopt(name = "blocks", version = garage_version())]
|
||||||
Blocks,
|
Blocks,
|
||||||
|
/// Clear the block resync queue. The list of blocks in errored state
|
||||||
|
/// is cleared as well. You MUST run `garage repair blocks` after invoking this.
|
||||||
|
#[structopt(name = "clear-resync-queue", version = garage_version())]
|
||||||
|
ClearResyncQueue,
|
||||||
/// Repropagate object deletions to the version table
|
/// Repropagate object deletions to the version table
|
||||||
#[structopt(name = "versions", version = garage_version())]
|
#[structopt(name = "versions", version = garage_version())]
|
||||||
Versions,
|
Versions,
|
||||||
|
|
|
||||||
|
|
@ -92,6 +92,11 @@ pub async fn launch_online_repair(
|
||||||
info!("Repairing bucket aliases (foreground)");
|
info!("Repairing bucket aliases (foreground)");
|
||||||
garage.locked_helper().await.repair_aliases().await?;
|
garage.locked_helper().await.repair_aliases().await?;
|
||||||
}
|
}
|
||||||
|
RepairWhat::ClearResyncQueue => {
|
||||||
|
let garage = garage.clone();
|
||||||
|
tokio::task::spawn_blocking(move || garage.block_manager.resync.clear_resync_queue())
|
||||||
|
.await??
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -198,6 +198,7 @@ async fn test_precondition() {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
let older_date = DateTime::from_secs_f64(last_modified.as_secs_f64() - 10.0);
|
let older_date = DateTime::from_secs_f64(last_modified.as_secs_f64() - 10.0);
|
||||||
|
let same_date = DateTime::from_secs_f64(last_modified.as_secs_f64());
|
||||||
let newer_date = DateTime::from_secs_f64(last_modified.as_secs_f64() + 10.0);
|
let newer_date = DateTime::from_secs_f64(last_modified.as_secs_f64() + 10.0);
|
||||||
{
|
{
|
||||||
let err = ctx
|
let err = ctx
|
||||||
|
|
@ -212,6 +213,18 @@ async fn test_precondition() {
|
||||||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
|
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let err = ctx
|
||||||
|
.client
|
||||||
|
.get_object()
|
||||||
|
.bucket(&bucket)
|
||||||
|
.key(STD_KEY)
|
||||||
|
.if_modified_since(same_date)
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
assert!(
|
||||||
|
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
|
||||||
|
);
|
||||||
|
|
||||||
let o = ctx
|
let o = ctx
|
||||||
.client
|
.client
|
||||||
.get_object()
|
.get_object()
|
||||||
|
|
@ -236,6 +249,17 @@ async fn test_precondition() {
|
||||||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 412)
|
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 412)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let o = ctx
|
||||||
|
.client
|
||||||
|
.get_object()
|
||||||
|
.bucket(&bucket)
|
||||||
|
.key(STD_KEY)
|
||||||
|
.if_unmodified_since(same_date)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
|
||||||
|
|
||||||
let o = ctx
|
let o = ctx
|
||||||
.client
|
.client
|
||||||
.get_object()
|
.get_object()
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_model"
|
name = "garage_model"
|
||||||
version = "1.2.0"
|
version = "1.3.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
@ -24,7 +24,7 @@ garage_net.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
blake2.workspace = true
|
blake2.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
err-derive.workspace = true
|
thiserror.workspace = true
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
http.workspace = true
|
http.workspace = true
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
|
|
@ -44,4 +44,4 @@ default = [ "lmdb", "sqlite" ]
|
||||||
k2v = [ "garage_util/k2v" ]
|
k2v = [ "garage_util/k2v" ]
|
||||||
lmdb = [ "garage_db/lmdb" ]
|
lmdb = [ "garage_db/lmdb" ]
|
||||||
sqlite = [ "garage_db/sqlite" ]
|
sqlite = [ "garage_db/sqlite" ]
|
||||||
fjall = [ "garage_db/fjall" ]
|
fjall = [ "garage_db/fjall" ]
|
||||||
|
|
|
||||||
|
|
@ -315,15 +315,15 @@ impl Garage {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
|
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper<'_> {
|
||||||
helper::bucket::BucketHelper(self)
|
helper::bucket::BucketHelper(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn key_helper(&self) -> helper::key::KeyHelper {
|
pub fn key_helper(&self) -> helper::key::KeyHelper<'_> {
|
||||||
helper::key::KeyHelper(self)
|
helper::key::KeyHelper(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn locked_helper(&self) -> helper::locked::LockedHelper {
|
pub async fn locked_helper(&self) -> helper::locked::LockedHelper<'_> {
|
||||||
let lock = self.bucket_lock.lock().await;
|
let lock = self.bucket_lock.lock().await;
|
||||||
helper::locked::LockedHelper(self, Some(lock))
|
helper::locked::LockedHelper(self, Some(lock))
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,24 +1,24 @@
|
||||||
use err_derive::Error;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
|
|
||||||
#[derive(Debug, Error, Serialize, Deserialize)]
|
#[derive(Debug, Error, Serialize, Deserialize)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[error(display = "Internal error: {}", _0)]
|
#[error("Internal error: {0}")]
|
||||||
Internal(#[error(source)] GarageError),
|
Internal(#[from] GarageError),
|
||||||
|
|
||||||
#[error(display = "Bad request: {}", _0)]
|
#[error("Bad request: {0}")]
|
||||||
BadRequest(String),
|
BadRequest(String),
|
||||||
|
|
||||||
/// Bucket name is not valid according to AWS S3 specs
|
/// Bucket name is not valid according to AWS S3 specs
|
||||||
#[error(display = "Invalid bucket name: {}", _0)]
|
#[error("Invalid bucket name: {0}")]
|
||||||
InvalidBucketName(String),
|
InvalidBucketName(String),
|
||||||
|
|
||||||
#[error(display = "Access key not found: {}", _0)]
|
#[error("Access key not found: {0}")]
|
||||||
NoSuchAccessKey(String),
|
NoSuchAccessKey(String),
|
||||||
|
|
||||||
#[error(display = "Bucket not found: {}", _0)]
|
#[error("Bucket not found: {0}")]
|
||||||
NoSuchBucket(String),
|
NoSuchBucket(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_net"
|
name = "garage_net"
|
||||||
version = "1.2.0"
|
version = "1.3.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
@ -30,7 +30,7 @@ rand.workspace = true
|
||||||
|
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
arc-swap.workspace = true
|
arc-swap.workspace = true
|
||||||
err-derive.workspace = true
|
thiserror.workspace = true
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
cfg-if.workspace = true
|
cfg-if.workspace = true
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -159,7 +159,7 @@ where
|
||||||
pub(crate) type DynEndpoint = Box<dyn GenericEndpoint + Send + Sync>;
|
pub(crate) type DynEndpoint = Box<dyn GenericEndpoint + Send + Sync>;
|
||||||
|
|
||||||
pub(crate) trait GenericEndpoint {
|
pub(crate) trait GenericEndpoint {
|
||||||
fn handle(&self, req_enc: ReqEnc, from: NodeID) -> BoxFuture<Result<RespEnc, Error>>;
|
fn handle(&self, req_enc: ReqEnc, from: NodeID) -> BoxFuture<'_, Result<RespEnc, Error>>;
|
||||||
fn drop_handler(&self);
|
fn drop_handler(&self);
|
||||||
fn clone_endpoint(&self) -> DynEndpoint;
|
fn clone_endpoint(&self) -> DynEndpoint;
|
||||||
}
|
}
|
||||||
|
|
@ -175,7 +175,7 @@ where
|
||||||
M: Message,
|
M: Message,
|
||||||
H: StreamingEndpointHandler<M> + 'static,
|
H: StreamingEndpointHandler<M> + 'static,
|
||||||
{
|
{
|
||||||
fn handle(&self, req_enc: ReqEnc, from: NodeID) -> BoxFuture<Result<RespEnc, Error>> {
|
fn handle(&self, req_enc: ReqEnc, from: NodeID) -> BoxFuture<'_, Result<RespEnc, Error>> {
|
||||||
async move {
|
async move {
|
||||||
match self.0.handler.load_full() {
|
match self.0.handler.load_full() {
|
||||||
None => Err(Error::NoHandler),
|
None => Err(Error::NoHandler),
|
||||||
|
|
|
||||||
|
|
@ -1,49 +1,49 @@
|
||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
use err_derive::Error;
|
|
||||||
use log::error;
|
use log::error;
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[error(display = "IO error: {}", _0)]
|
#[error("IO error: {0}")]
|
||||||
Io(#[error(source)] io::Error),
|
Io(#[from] io::Error),
|
||||||
|
|
||||||
#[error(display = "Messagepack encode error: {}", _0)]
|
#[error("Messagepack encode error: {0}")]
|
||||||
RMPEncode(#[error(source)] rmp_serde::encode::Error),
|
RMPEncode(#[from] rmp_serde::encode::Error),
|
||||||
#[error(display = "Messagepack decode error: {}", _0)]
|
#[error("Messagepack decode error: {0}")]
|
||||||
RMPDecode(#[error(source)] rmp_serde::decode::Error),
|
RMPDecode(#[from] rmp_serde::decode::Error),
|
||||||
|
|
||||||
#[error(display = "Tokio join error: {}", _0)]
|
#[error("Tokio join error: {0}")]
|
||||||
TokioJoin(#[error(source)] tokio::task::JoinError),
|
TokioJoin(#[from] tokio::task::JoinError),
|
||||||
|
|
||||||
#[error(display = "oneshot receive error: {}", _0)]
|
#[error("oneshot receive error: {0}")]
|
||||||
OneshotRecv(#[error(source)] tokio::sync::oneshot::error::RecvError),
|
OneshotRecv(#[from] tokio::sync::oneshot::error::RecvError),
|
||||||
|
|
||||||
#[error(display = "Handshake error: {}", _0)]
|
#[error("Handshake error: {0}")]
|
||||||
Handshake(#[error(source)] kuska_handshake::async_std::Error),
|
Handshake(#[from] kuska_handshake::async_std::Error),
|
||||||
|
|
||||||
#[error(display = "UTF8 error: {}", _0)]
|
#[error("UTF8 error: {0}")]
|
||||||
UTF8(#[error(source)] std::string::FromUtf8Error),
|
UTF8(#[from] std::string::FromUtf8Error),
|
||||||
|
|
||||||
#[error(display = "Framing protocol error")]
|
#[error("Framing protocol error")]
|
||||||
Framing,
|
Framing,
|
||||||
|
|
||||||
#[error(display = "Remote error ({:?}): {}", _0, _1)]
|
#[error("Remote error ({0:?}): {1}")]
|
||||||
Remote(io::ErrorKind, String),
|
Remote(io::ErrorKind, String),
|
||||||
|
|
||||||
#[error(display = "Request ID collision")]
|
#[error("Request ID collision")]
|
||||||
IdCollision,
|
IdCollision,
|
||||||
|
|
||||||
#[error(display = "{}", _0)]
|
#[error("{0}")]
|
||||||
Message(String),
|
Message(String),
|
||||||
|
|
||||||
#[error(display = "No handler / shutting down")]
|
#[error("No handler / shutting down")]
|
||||||
NoHandler,
|
NoHandler,
|
||||||
|
|
||||||
#[error(display = "Connection closed")]
|
#[error("Connection closed")]
|
||||||
ConnectionClosed,
|
ConnectionClosed,
|
||||||
|
|
||||||
#[error(display = "Version mismatch: {}", _0)]
|
#[error("Version mismatch: {0}")]
|
||||||
VersionMismatch(String),
|
VersionMismatch(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_rpc"
|
name = "garage_rpc"
|
||||||
version = "1.2.0"
|
version = "1.3.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
@ -33,7 +33,7 @@ async-trait.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_bytes.workspace = true
|
serde_bytes.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
err-derive = { workspace = true, optional = true }
|
thiserror = { workspace = true, optional = true }
|
||||||
|
|
||||||
# newer version requires rust edition 2021
|
# newer version requires rust edition 2021
|
||||||
kube = { workspace = true, optional = true }
|
kube = { workspace = true, optional = true }
|
||||||
|
|
@ -49,5 +49,5 @@ opentelemetry.workspace = true
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
kubernetes-discovery = [ "kube", "k8s-openapi", "schemars" ]
|
kubernetes-discovery = [ "kube", "k8s-openapi", "schemars" ]
|
||||||
consul-discovery = [ "reqwest", "err-derive" ]
|
consul-discovery = [ "reqwest", "thiserror" ]
|
||||||
system-libs = [ "sodiumoxide/use-pkg-config" ]
|
system-libs = [ "sodiumoxide/use-pkg-config" ]
|
||||||
|
|
|
||||||
|
|
@ -3,8 +3,8 @@ use std::fs::File;
|
||||||
use std::io::Read;
|
use std::io::Read;
|
||||||
use std::net::{IpAddr, SocketAddr};
|
use std::net::{IpAddr, SocketAddr};
|
||||||
|
|
||||||
use err_derive::Error;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
use garage_net::NodeID;
|
use garage_net::NodeID;
|
||||||
|
|
||||||
|
|
@ -219,12 +219,12 @@ impl ConsulDiscovery {
|
||||||
/// Regroup all Consul discovery errors
|
/// Regroup all Consul discovery errors
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum ConsulError {
|
pub enum ConsulError {
|
||||||
#[error(display = "IO error: {}", _0)]
|
#[error("IO error: {0}")]
|
||||||
Io(#[error(source)] std::io::Error),
|
Io(#[from] std::io::Error),
|
||||||
#[error(display = "HTTP error: {}", _0)]
|
#[error("HTTP error: {0}")]
|
||||||
Reqwest(#[error(source)] reqwest::Error),
|
Reqwest(#[from] reqwest::Error),
|
||||||
#[error(display = "Invalid Consul TLS configuration")]
|
#[error("Invalid Consul TLS configuration")]
|
||||||
InvalidTLSConfig,
|
InvalidTLSConfig,
|
||||||
#[error(display = "Token error: {}", _0)]
|
#[error("Token error: {0}")]
|
||||||
Token(#[error(source)] reqwest::header::InvalidHeaderValue),
|
Token(#[from] reqwest::header::InvalidHeaderValue),
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -229,13 +229,11 @@ impl LayoutManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Save cluster layout data to disk
|
/// Save cluster layout data to disk
|
||||||
async fn save_cluster_layout(&self) -> Result<(), Error> {
|
async fn save_cluster_layout(&self) {
|
||||||
let layout = self.layout.read().unwrap().inner().clone();
|
let layout = self.layout.read().unwrap().inner().clone();
|
||||||
self.persist_cluster_layout
|
if let Err(e) = self.persist_cluster_layout.save_async(&layout).await {
|
||||||
.save_async(&layout)
|
error!("Failed to save cluster_layout: {}", e);
|
||||||
.await
|
}
|
||||||
.expect("Cannot save current cluster layout");
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn broadcast_update(self: &Arc<Self>, rpc: SystemRpc) {
|
fn broadcast_update(self: &Arc<Self>, rpc: SystemRpc) {
|
||||||
|
|
@ -313,7 +311,7 @@ impl LayoutManager {
|
||||||
|
|
||||||
self.change_notify.notify_waiters();
|
self.change_notify.notify_waiters();
|
||||||
self.broadcast_update(SystemRpc::AdvertiseClusterLayout(new_layout));
|
self.broadcast_update(SystemRpc::AdvertiseClusterLayout(new_layout));
|
||||||
self.save_cluster_layout().await?;
|
self.save_cluster_layout().await;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(SystemRpc::Ok)
|
Ok(SystemRpc::Ok)
|
||||||
|
|
@ -328,7 +326,7 @@ impl LayoutManager {
|
||||||
if let Some(new_trackers) = self.merge_layout_trackers(trackers) {
|
if let Some(new_trackers) = self.merge_layout_trackers(trackers) {
|
||||||
self.change_notify.notify_waiters();
|
self.change_notify.notify_waiters();
|
||||||
self.broadcast_update(SystemRpc::AdvertiseClusterLayoutTrackers(new_trackers));
|
self.broadcast_update(SystemRpc::AdvertiseClusterLayoutTrackers(new_trackers));
|
||||||
self.save_cluster_layout().await?;
|
self.save_cluster_layout().await;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(SystemRpc::Ok)
|
Ok(SystemRpc::Ok)
|
||||||
|
|
|
||||||
|
|
@ -507,7 +507,7 @@ impl LayoutVersion {
|
||||||
g.compute_maximal_flow()?;
|
g.compute_maximal_flow()?;
|
||||||
if g.get_flow_value()? < (NB_PARTITIONS * self.replication_factor) as i64 {
|
if g.get_flow_value()? < (NB_PARTITIONS * self.replication_factor) as i64 {
|
||||||
return Err(Error::Message(
|
return Err(Error::Message(
|
||||||
"The storage capacity of he cluster is to small. It is \
|
"The storage capacity of the cluster is too small. It is \
|
||||||
impossible to store partitions of size 1."
|
impossible to store partitions of size 1."
|
||||||
.into(),
|
.into(),
|
||||||
));
|
));
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_table"
|
name = "garage_table"
|
||||||
version = "1.2.0"
|
version = "1.3.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_util"
|
name = "garage_util"
|
||||||
version = "1.2.0"
|
version = "1.3.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
@ -21,7 +21,7 @@ arc-swap.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
blake2.workspace = true
|
blake2.workspace = true
|
||||||
bytesize.workspace = true
|
bytesize.workspace = true
|
||||||
err-derive.workspace = true
|
thiserror.workspace = true
|
||||||
hexdump.workspace = true
|
hexdump.workspace = true
|
||||||
xxhash-rust.workspace = true
|
xxhash-rust.workspace = true
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
|
|
|
||||||
|
|
@ -115,32 +115,39 @@ impl WorkerProcessor {
|
||||||
trace!("{} (TID {}): {:?}", worker.worker.name(), worker.task_id, worker.state);
|
trace!("{} (TID {}): {:?}", worker.worker.name(), worker.task_id, worker.state);
|
||||||
|
|
||||||
// Save worker info
|
// Save worker info
|
||||||
let mut wi = self.worker_info.lock().unwrap();
|
{
|
||||||
match wi.get_mut(&worker.task_id) {
|
let mut wi = self.worker_info.lock().unwrap();
|
||||||
Some(i) => {
|
match wi.get_mut(&worker.task_id) {
|
||||||
i.state = worker.state;
|
Some(i) => {
|
||||||
i.status = worker.worker.status();
|
i.state = worker.state;
|
||||||
i.errors = worker.errors;
|
i.status = worker.worker.status();
|
||||||
i.consecutive_errors = worker.consecutive_errors;
|
i.errors = worker.errors;
|
||||||
if worker.last_error.is_some() {
|
i.consecutive_errors = worker.consecutive_errors;
|
||||||
i.last_error = worker.last_error.take();
|
if worker.last_error.is_some() {
|
||||||
|
i.last_error = worker.last_error.take();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
wi.insert(worker.task_id, WorkerInfo {
|
||||||
|
name: worker.worker.name(),
|
||||||
|
state: worker.state,
|
||||||
|
status: worker.worker.status(),
|
||||||
|
errors: worker.errors,
|
||||||
|
consecutive_errors: worker.consecutive_errors,
|
||||||
|
last_error: worker.last_error.take(),
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
|
||||||
None => {
|
|
||||||
wi.insert(worker.task_id, WorkerInfo {
|
|
||||||
name: worker.worker.name(),
|
|
||||||
state: worker.state,
|
|
||||||
status: worker.worker.status(),
|
|
||||||
errors: worker.errors,
|
|
||||||
consecutive_errors: worker.consecutive_errors,
|
|
||||||
last_error: worker.last_error.take(),
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if worker.state == WorkerState::Done {
|
if worker.state == WorkerState::Done {
|
||||||
info!("Worker {} (TID {}) exited", worker.worker.name(), worker.task_id);
|
info!("Worker {} (TID {}) exited", worker.worker.name(), worker.task_id);
|
||||||
} else {
|
} else {
|
||||||
|
// Yield to the Tokio scheduler between consecutive Busy steps so
|
||||||
|
// that a worker which never suspends on its own cannot starve other tasks.
|
||||||
|
if worker.state == WorkerState::Busy {
|
||||||
|
tokio::task::yield_now().await;
|
||||||
|
}
|
||||||
workers.push(async move {
|
workers.push(async move {
|
||||||
worker.step().await;
|
worker.step().await;
|
||||||
worker
|
worker
|
||||||
|
|
|
||||||
|
|
@ -45,6 +45,11 @@ pub struct Config {
|
||||||
)]
|
)]
|
||||||
pub block_size: usize,
|
pub block_size: usize,
|
||||||
|
|
||||||
|
/// Maximum number of parallel block writes per PUT request
|
||||||
|
/// Higher values improve throughput but increase memory usage
|
||||||
|
/// Default: 3, Recommended: 10-30 for NVMe, 3-10 for HDD
|
||||||
|
#[serde(default = "default_block_max_concurrent_writes_per_request")]
|
||||||
|
pub block_max_concurrent_writes_per_request: usize,
|
||||||
/// Number of replicas. Can be any positive integer, but uneven numbers are more favorable.
|
/// Number of replicas. Can be any positive integer, but uneven numbers are more favorable.
|
||||||
/// - 1 for single-node clusters, or to disable replication
|
/// - 1 for single-node clusters, or to disable replication
|
||||||
/// - 3 is the recommended and supported setting.
|
/// - 3 is the recommended and supported setting.
|
||||||
|
|
@ -75,6 +80,10 @@ pub struct Config {
|
||||||
)]
|
)]
|
||||||
pub block_ram_buffer_max: usize,
|
pub block_ram_buffer_max: usize,
|
||||||
|
|
||||||
|
/// Maximum number of concurrent reads of block files on disk
|
||||||
|
#[serde(default = "default_block_max_concurrent_reads")]
|
||||||
|
pub block_max_concurrent_reads: usize,
|
||||||
|
|
||||||
/// Skip the permission check of secret files. Useful when
|
/// Skip the permission check of secret files. Useful when
|
||||||
/// POSIX ACLs (or more complex chmods) are used.
|
/// POSIX ACLs (or more complex chmods) are used.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
|
|
@ -263,6 +272,9 @@ pub struct KubernetesDiscoveryConfig {
|
||||||
pub skip_crd: bool,
|
pub skip_crd: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn default_block_max_concurrent_writes_per_request() -> usize {
|
||||||
|
3
|
||||||
|
}
|
||||||
/// Read and parse configuration
|
/// Read and parse configuration
|
||||||
pub fn read_config(config_file: PathBuf) -> Result<Config, Error> {
|
pub fn read_config(config_file: PathBuf) -> Result<Config, Error> {
|
||||||
let config = std::fs::read_to_string(config_file)?;
|
let config = std::fs::read_to_string(config_file)?;
|
||||||
|
|
@ -280,6 +292,9 @@ fn default_block_size() -> usize {
|
||||||
fn default_block_ram_buffer_max() -> usize {
|
fn default_block_ram_buffer_max() -> usize {
|
||||||
256 * 1024 * 1024
|
256 * 1024 * 1024
|
||||||
}
|
}
|
||||||
|
fn default_block_max_concurrent_reads() -> usize {
|
||||||
|
16
|
||||||
|
}
|
||||||
|
|
||||||
fn default_consistency_mode() -> String {
|
fn default_consistency_mode() -> String {
|
||||||
"consistent".into()
|
"consistent".into()
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
use err_derive::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer};
|
use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
|
||||||
|
|
@ -12,68 +12,61 @@ use crate::encode::debug_serialize;
|
||||||
/// Regroup all Garage errors
|
/// Regroup all Garage errors
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[error(display = "IO error: {}", _0)]
|
#[error("IO error: {0}")]
|
||||||
Io(#[error(source)] io::Error),
|
Io(#[from] io::Error),
|
||||||
|
|
||||||
#[error(display = "Hyper error: {}", _0)]
|
#[error("Hyper error: {0}")]
|
||||||
Hyper(#[error(source)] hyper::Error),
|
Hyper(#[from] hyper::Error),
|
||||||
|
|
||||||
#[error(display = "HTTP error: {}", _0)]
|
#[error("HTTP error: {0}")]
|
||||||
Http(#[error(source)] http::Error),
|
Http(#[from] http::Error),
|
||||||
|
|
||||||
#[error(display = "Invalid HTTP header value: {}", _0)]
|
#[error("Invalid HTTP header value: {0}")]
|
||||||
HttpHeader(#[error(source)] http::header::ToStrError),
|
HttpHeader(#[from] http::header::ToStrError),
|
||||||
|
|
||||||
#[error(display = "Network error: {}", _0)]
|
#[error("Network error: {0}")]
|
||||||
Net(#[error(source)] garage_net::error::Error),
|
Net(#[from] garage_net::error::Error),
|
||||||
|
|
||||||
#[error(display = "DB error: {}", _0)]
|
#[error("DB error: {0}")]
|
||||||
Db(#[error(source)] garage_db::Error),
|
Db(#[from] garage_db::Error),
|
||||||
|
|
||||||
#[error(display = "Messagepack encode error: {}", _0)]
|
#[error("Messagepack encode error: {0}")]
|
||||||
RmpEncode(#[error(source)] rmp_serde::encode::Error),
|
RmpEncode(#[from] rmp_serde::encode::Error),
|
||||||
#[error(display = "Messagepack decode error: {}", _0)]
|
#[error("Messagepack decode error: {0}")]
|
||||||
RmpDecode(#[error(source)] rmp_serde::decode::Error),
|
RmpDecode(#[from] rmp_serde::decode::Error),
|
||||||
#[error(display = "JSON error: {}", _0)]
|
#[error("JSON error: {0}")]
|
||||||
Json(#[error(source)] serde_json::error::Error),
|
Json(#[from] serde_json::error::Error),
|
||||||
#[error(display = "TOML decode error: {}", _0)]
|
#[error("TOML decode error: {0}")]
|
||||||
TomlDecode(#[error(source)] toml::de::Error),
|
TomlDecode(#[from] toml::de::Error),
|
||||||
|
|
||||||
#[error(display = "Tokio join error: {}", _0)]
|
#[error("Tokio join error: {0}")]
|
||||||
TokioJoin(#[error(source)] tokio::task::JoinError),
|
TokioJoin(#[from] tokio::task::JoinError),
|
||||||
|
|
||||||
#[error(display = "Tokio semaphore acquire error: {}", _0)]
|
#[error("Tokio semaphore acquire error: {0}")]
|
||||||
TokioSemAcquire(#[error(source)] tokio::sync::AcquireError),
|
TokioSemAcquire(#[from] tokio::sync::AcquireError),
|
||||||
|
|
||||||
#[error(display = "Tokio broadcast receive error: {}", _0)]
|
#[error("Tokio broadcast receive error: {0}")]
|
||||||
TokioBcastRecv(#[error(source)] tokio::sync::broadcast::error::RecvError),
|
TokioBcastRecv(#[from] tokio::sync::broadcast::error::RecvError),
|
||||||
|
|
||||||
#[error(display = "Remote error: {}", _0)]
|
#[error("Remote error: {0}")]
|
||||||
RemoteError(String),
|
RemoteError(String),
|
||||||
|
|
||||||
#[error(display = "Timeout")]
|
#[error("Timeout")]
|
||||||
Timeout,
|
Timeout,
|
||||||
|
|
||||||
#[error(
|
#[error("Could not reach quorum of {0} (sets={1:?}). {2} of {3} request succeeded, others returned errors: {4:?}")]
|
||||||
display = "Could not reach quorum of {} (sets={:?}). {} of {} request succeeded, others returned errors: {:?}",
|
|
||||||
_0,
|
|
||||||
_1,
|
|
||||||
_2,
|
|
||||||
_3,
|
|
||||||
_4
|
|
||||||
)]
|
|
||||||
Quorum(usize, Option<usize>, usize, usize, Vec<String>),
|
Quorum(usize, Option<usize>, usize, usize, Vec<String>),
|
||||||
|
|
||||||
#[error(display = "Unexpected RPC message: {}", _0)]
|
#[error("Unexpected RPC message: {0}")]
|
||||||
UnexpectedRpcMessage(String),
|
UnexpectedRpcMessage(String),
|
||||||
|
|
||||||
#[error(display = "Corrupt data: does not match hash {:?}", _0)]
|
#[error("Corrupt data: does not match hash {0:?}")]
|
||||||
CorruptData(Hash),
|
CorruptData(Hash),
|
||||||
|
|
||||||
#[error(display = "Missing block {:?}: no node returned a valid block", _0)]
|
#[error("Missing block {0:?}: no node returned a valid block")]
|
||||||
MissingBlock(Hash),
|
MissingBlock(Hash),
|
||||||
|
|
||||||
#[error(display = "{}", _0)]
|
#[error("{0}")]
|
||||||
Message(String),
|
Message(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_web"
|
name = "garage_web"
|
||||||
version = "1.2.0"
|
version = "1.3.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
@ -20,7 +20,7 @@ garage_model.workspace = true
|
||||||
garage_util.workspace = true
|
garage_util.workspace = true
|
||||||
garage_table.workspace = true
|
garage_table.workspace = true
|
||||||
|
|
||||||
err-derive.workspace = true
|
thiserror.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
percent-encoding.workspace = true
|
percent-encoding.workspace = true
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
use err_derive::Error;
|
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
use garage_api_common::generic_server::ApiError;
|
use garage_api_common::generic_server::ApiError;
|
||||||
|
|
||||||
|
|
@ -8,15 +8,15 @@ use garage_api_common::generic_server::ApiError;
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
/// An error received from the API crate
|
/// An error received from the API crate
|
||||||
#[error(display = "API error: {}", _0)]
|
#[error("API error: {0}")]
|
||||||
ApiError(garage_api_s3::error::Error),
|
ApiError(garage_api_s3::error::Error),
|
||||||
|
|
||||||
/// The file does not exist
|
/// The file does not exist
|
||||||
#[error(display = "Not found")]
|
#[error("Not found")]
|
||||||
NotFound,
|
NotFound,
|
||||||
|
|
||||||
/// The client sent a request without host, or with unsupported method
|
/// The client sent a request without host, or with unsupported method
|
||||||
#[error(display = "Bad request: {}", _0)]
|
#[error("Bad request: {0}")]
|
||||||
BadRequest(String),
|
BadRequest(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue