Compare commits

..

1 commit

Author SHA1 Message Date
Alex Auvolat
8bccddb504 add garage node dump-table command for debugging purpose 2025-08-29 16:35:47 +02:00
67 changed files with 1264 additions and 1499 deletions

View file

@ -1,6 +1,3 @@
labels:
nix: "enabled"
when: when:
event: event:
- push - push
@ -12,32 +9,32 @@ when:
steps: steps:
- name: check formatting - name: check formatting
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-build -j4 --attr flakePackages.fmt - nix-shell --attr devShell --run "cargo fmt -- --check"
- name: build - name: build
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-build -j4 --attr flakePackages.dev - nix-build -j4 --attr flakePackages.dev
- name: unit + func tests (lmdb) - name: unit + func tests (lmdb)
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-build -j4 --attr flakePackages.tests-lmdb - nix-build -j4 --attr flakePackages.tests-lmdb
- name: unit + func tests (sqlite) - name: unit + func tests (sqlite)
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-build -j4 --attr flakePackages.tests-sqlite - nix-build -j4 --attr flakePackages.tests-sqlite
- name: unit + func tests (fjall) - name: unit + func tests (fjall)
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-build -j4 --attr flakePackages.tests-fjall - nix-build -j4 --attr flakePackages.tests-fjall
- name: integration tests - name: integration tests
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-build -j4 --attr flakePackages.dev - nix-build -j4 --attr flakePackages.dev
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false) - nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)

View file

@ -1,6 +1,3 @@
labels:
nix: "enabled"
when: when:
event: event:
- deployment - deployment
@ -11,7 +8,7 @@ depends_on:
steps: steps:
- name: refresh-index - name: refresh-index
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
environment: environment:
AWS_ACCESS_KEY_ID: AWS_ACCESS_KEY_ID:
from_secret: garagehq_aws_access_key_id from_secret: garagehq_aws_access_key_id
@ -22,7 +19,7 @@ steps:
- nix-shell --attr ci --run "refresh_index" - nix-shell --attr ci --run "refresh_index"
- name: multiarch-docker - name: multiarch-docker
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
environment: environment:
DOCKER_AUTH: DOCKER_AUTH:
from_secret: docker_auth from_secret: docker_auth

View file

@ -1,6 +1,3 @@
labels:
nix: "enabled"
when: when:
event: event:
- deployment - deployment
@ -19,17 +16,17 @@ matrix:
steps: steps:
- name: build - name: build
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-build --attr releasePackages.${ARCH} --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA} - nix-build --attr releasePackages.${ARCH} --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- name: check is static binary - name: check is static binary
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-shell --attr ci --run "./script/not-dynamic.sh result/bin/garage" - nix-shell --attr ci --run "./script/not-dynamic.sh result/bin/garage"
- name: integration tests - name: integration tests
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false) - nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
when: when:
@ -39,7 +36,7 @@ steps:
ARCH: i386 ARCH: i386
- name: upgrade tests - name: upgrade tests
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false) - nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
when: when:
@ -47,7 +44,7 @@ steps:
ARCH: amd64 ARCH: amd64
- name: push static binary - name: push static binary
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
environment: environment:
TARGET: "${TARGET}" TARGET: "${TARGET}"
AWS_ACCESS_KEY_ID: AWS_ACCESS_KEY_ID:
@ -58,7 +55,7 @@ steps:
- nix-shell --attr ci --run "to_s3" - nix-shell --attr ci --run "to_s3"
- name: docker build and publish - name: docker build and publish
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
environment: environment:
DOCKER_PLATFORM: "linux/${ARCH}" DOCKER_PLATFORM: "linux/${ARCH}"
CONTAINER_NAME: "dxflrs/${ARCH}_garage" CONTAINER_NAME: "dxflrs/${ARCH}_garage"

1747
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -24,18 +24,18 @@ default-members = ["src/garage"]
# Internal Garage crates # Internal Garage crates
format_table = { version = "0.1.1", path = "src/format-table" } format_table = { version = "0.1.1", path = "src/format-table" }
garage_api_common = { version = "1.3.1", path = "src/api/common" } garage_api_common = { version = "1.2.0", path = "src/api/common" }
garage_api_admin = { version = "1.3.1", path = "src/api/admin" } garage_api_admin = { version = "1.2.0", path = "src/api/admin" }
garage_api_s3 = { version = "1.3.1", path = "src/api/s3" } garage_api_s3 = { version = "1.2.0", path = "src/api/s3" }
garage_api_k2v = { version = "1.3.1", path = "src/api/k2v" } garage_api_k2v = { version = "1.2.0", path = "src/api/k2v" }
garage_block = { version = "1.3.1", path = "src/block" } garage_block = { version = "1.2.0", path = "src/block" }
garage_db = { version = "1.3.1", path = "src/db", default-features = false } garage_db = { version = "1.2.0", path = "src/db", default-features = false }
garage_model = { version = "1.3.1", path = "src/model", default-features = false } garage_model = { version = "1.2.0", path = "src/model", default-features = false }
garage_net = { version = "1.3.1", path = "src/net" } garage_net = { version = "1.2.0", path = "src/net" }
garage_rpc = { version = "1.3.1", path = "src/rpc" } garage_rpc = { version = "1.2.0", path = "src/rpc" }
garage_table = { version = "1.3.1", path = "src/table" } garage_table = { version = "1.2.0", path = "src/table" }
garage_util = { version = "1.3.1", path = "src/util" } garage_util = { version = "1.2.0", path = "src/util" }
garage_web = { version = "1.3.1", path = "src/web" } garage_web = { version = "1.2.0", path = "src/web" }
k2v-client = { version = "0.0.4", path = "src/k2v-client" } k2v-client = { version = "0.0.4", path = "src/k2v-client" }
# External crates from crates.io # External crates from crates.io
@ -52,6 +52,7 @@ chrono = "0.4"
crc32fast = "1.4" crc32fast = "1.4"
crc32c = "0.6" crc32c = "0.6"
crypto-common = "0.1" crypto-common = "0.1"
err-derive = "0.3"
gethostname = "0.4" gethostname = "0.4"
git-version = "0.3.4" git-version = "0.3.4"
hex = "0.4" hex = "0.4"
@ -87,9 +88,9 @@ tracing-journald = "0.3.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-subscriber = { version = "0.3", features = ["env-filter"] }
heed = { version = "0.11", default-features = false, features = ["lmdb"] } heed = { version = "0.11", default-features = false, features = ["lmdb"] }
rusqlite = "0.37" rusqlite = "0.31.0"
r2d2 = "0.8" r2d2 = "0.8"
r2d2_sqlite = "0.31" r2d2_sqlite = "0.24"
fjall = "2.4" fjall = "2.4"
async-compression = { version = "0.4", features = ["tokio", "zstd"] } async-compression = { version = "0.4", features = ["tokio", "zstd"] }
@ -136,7 +137,7 @@ prometheus = "0.13"
aws-sigv4 = { version = "1.1", default-features = false } aws-sigv4 = { version = "1.1", default-features = false }
hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "http2", "ring", "rustls-native-certs"] } hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "http2", "ring", "rustls-native-certs"] }
log = "0.4" log = "0.4"
thiserror = "2.0" thiserror = "1.0"
# ---- used only as build / dev dependencies ---- # ---- used only as build / dev dependencies ----
assert-json-diff = "2.0" assert-json-diff = "2.0"
@ -146,8 +147,12 @@ aws-smithy-runtime = { version = "1.8", default-features = false, features = ["t
aws-sdk-config = { version = "1.62", default-features = false } aws-sdk-config = { version = "1.62", default-features = false }
aws-sdk-s3 = { version = "1.79", default-features = false, features = ["rt-tokio"] } aws-sdk-s3 = { version = "1.79", default-features = false, features = ["rt-tokio"] }
[profile.dev]
#lto = "thin" # disabled for now, adds 2-4 min to each CI build
lto = "off"
[profile.release] [profile.release]
lto = "thin" lto = true
codegen-units = 16 codegen-units = 1
opt-level = 3 opt-level = "s"
strip = "debuginfo" strip = true

View file

@ -161,49 +161,3 @@ kopia repository validate-provider
You can then run all the standard kopia commands: `kopia snapshot create`, `kopia mount`... You can then run all the standard kopia commands: `kopia snapshot create`, `kopia mount`...
Everything should work out-of-the-box. Everything should work out-of-the-box.
## Plakar
Create your key and bucket on Garage server:
```bash
garage key create my-plakar-key
garage bucket create plakar-backups
garage bucket allow plakar-backups --read --write --key my-plakar-key
```
On Plakar server, add your Garage as a storage location:
```bash
plakar store add garageS3 s3://my-garage.tld/plakar-backups \
region=garage # Or as you've specified in garage.toml \
access_key=<Key ID from "garage key info my-plakar-key"> \
secret_access_key=<Secret key from "garage key info my-plakar-key">
```
Then create the repository.
```bash
plakar at @garageS3 create -plaintext # Unencrypted
# or
plakar at @garageS3 create #encrypted
```
If you encrypt your backups (Plakar default), you will need to define a strong passphrase. Do not forget to save your password safely. It will be needed to decrypt your backups.
After the repository has been created, check that everything works as expected (that might give an empty result as no file has been added yet, but no error message):
```bash
plakar at @garageS3 check
```
Now that everything is configure, you can use Garage as your backups storage. For instance sync it with a local backup storage:
```bash
$ plakar at ~/backups sync to @garageS3
```
Or list the S3 storage content:
```bash
$ plakar at @garageS3 ls
```
More information in Plakar documentation: https://www.plakar.io/docs/main/quickstart/

View file

@ -15,10 +15,9 @@ Alpine Linux repositories (available since v3.17):
apk add garage apk add garage
``` ```
The default configuration file is installed to `/etc/garage/garage.toml`. You can run The default configuration file is installed to `/etc/garage.toml`. You can run
Garage using: `rc-service garage start`. Garage using: `rc-service garage start`. If you don't specify `rpc_secret`, it
will be automatically replaced with a random string on the first start.
If you don't specify `rpc_secret`, it will be automatically replaced with a random string on the first start.
Please note that this package is built without Consul discovery, Kubernetes Please note that this package is built without Consul discovery, Kubernetes
discovery, OpenTelemetry exporter, and K2V features (K2V will be enabled once discovery, OpenTelemetry exporter, and K2V features (K2V will be enabled once
@ -27,7 +26,7 @@ it's stable).
## Arch Linux ## Arch Linux
Garage is available in the official repositories under [extra](https://archlinux.org/packages/extra/x86_64/garage). Garage is available in the [AUR](https://aur.archlinux.org/packages/garage).
## FreeBSD ## FreeBSD

View file

@ -11,7 +11,7 @@ Firstly clone the repository:
```bash ```bash
git clone https://git.deuxfleurs.fr/Deuxfleurs/garage git clone https://git.deuxfleurs.fr/Deuxfleurs/garage
cd garage/script/helm cd garage/scripts/helm
``` ```
Deploy with default options: Deploy with default options:

View file

@ -96,14 +96,14 @@ to store 2 TB of data in total.
## Get a Docker image ## Get a Docker image
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated). Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
We encourage you to use a fixed tag (eg. `v1.3.0`) and not the `latest` tag. We encourage you to use a fixed tag (eg. `v1.2.0`) and not the `latest` tag.
For this example, we will use the latest published version at the time of the writing which is `v1.3.0` but it's up to you For this example, we will use the latest published version at the time of the writing which is `v1.2.0` but it's up to you
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated). to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
For example: For example:
``` ```
sudo docker pull dxflrs/garage:v1.3.0 sudo docker pull dxflrs/garage:v1.2.0
``` ```
## Deploying and configuring Garage ## Deploying and configuring Garage
@ -171,7 +171,7 @@ docker run \
-v /etc/garage.toml:/etc/garage.toml \ -v /etc/garage.toml:/etc/garage.toml \
-v /var/lib/garage/meta:/var/lib/garage/meta \ -v /var/lib/garage/meta:/var/lib/garage/meta \
-v /var/lib/garage/data:/var/lib/garage/data \ -v /var/lib/garage/data:/var/lib/garage/data \
dxflrs/garage:v1.3.0 dxflrs/garage:v1.2.0
``` ```
With this command line, Garage should be started automatically at each boot. With this command line, Garage should be started automatically at each boot.
@ -185,7 +185,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
version: "3" version: "3"
services: services:
garage: garage:
image: dxflrs/garage:v1.3.0 image: dxflrs/garage:v1.2.0
network_mode: "host" network_mode: "host"
restart: unless-stopped restart: unless-stopped
volumes: volumes:

View file

@ -132,7 +132,7 @@ docker run \
-v /path/to/garage.toml:/etc/garage.toml \ -v /path/to/garage.toml:/etc/garage.toml \
-v /path/to/garage/meta:/var/lib/garage/meta \ -v /path/to/garage/meta:/var/lib/garage/meta \
-v /path/to/garage/data:/var/lib/garage/data \ -v /path/to/garage/data:/var/lib/garage/data \
dxflrs/garage:v1.3.0 dxflrs/garage:v1.2.0
``` ```
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903` Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`

View file

@ -24,8 +24,7 @@ db_engine = "lmdb"
block_size = "1M" block_size = "1M"
block_ram_buffer_max = "256MiB" block_ram_buffer_max = "256MiB"
block_max_concurrent_reads = 16
block_max_concurrent_writes_per_request =10
lmdb_map_size = "1T" lmdb_map_size = "1T"
compression_level = 1 compression_level = 1
@ -97,9 +96,7 @@ The following gives details about each available configuration option.
Top-level configuration options, in alphabetical order: Top-level configuration options, in alphabetical order:
[`allow_punycode`](#allow_punycode), [`allow_punycode`](#allow_punycode),
[`allow_world_readable_secrets`](#allow_world_readable_secrets), [`allow_world_readable_secrets`](#allow_world_readable_secrets),
[`block_max_concurrent_reads`](`block_max_concurrent_reads),
[`block_ram_buffer_max`](#block_ram_buffer_max), [`block_ram_buffer_max`](#block_ram_buffer_max),
[`block_max_concurrent_writes_per_request`](#block_max_concurrent_writes_per_request),
[`block_size`](#block_size), [`block_size`](#block_size),
[`bootstrap_peers`](#bootstrap_peers), [`bootstrap_peers`](#bootstrap_peers),
[`compression_level`](#compression_level), [`compression_level`](#compression_level),
@ -525,37 +522,6 @@ node.
The default value is 256MiB. The default value is 256MiB.
#### `block_max_concurrent_reads` (since `v1.3.0` / `v2.1.0`) {#block_max_concurrent_reads}
The maximum number of blocks (individual files in the data directory) open
simultaneously for reading.
Reducing this number does not limit the number of data blocks that can be
transferred through the network simultaneously. This mechanism was just added
as a backpressure mechanism for HDD read speed: it helps avoid a situation
where too many requests are coming in and Garage is reading too many block
files simultaneously, thus not making timely progress on any of the reads.
When a request to read a data block comes in through the network, the requests
awaits for one of the `block_max_concurrent_reads` slots to be available
(internally implemented using a Semaphore object). Once it acquired a read
slot, it reads the entire block file to RAM and frees the slot as soon as the
block file is finished reading. Only after the slot is released will the
block's data start being transferred over the network. If the request fails to
acquire a reading slot wihtin 15 seconds, it fails with a timeout error.
Timeout events can be monitored through the `block_read_semaphore_timeouts`
metric in Prometheus: a non-zero number of such events indicates an I/O
bottleneck on HDD read speed.
#### `block_max_concurrent_writes_per_request` (since `v2.1.0`) {#block_max_concurrent_writes_per_request}
This parameter is designed to adapt to the concurrent write performance of
different storage media.Maximum number of parallel block writes per put request
Higher values improve throughput but increase memory usage.
Default: 3, Recommended: 10-30 for NVMe, 3-10 for HDD
#### `lmdb_map_size` {#lmdb_map_size} #### `lmdb_map_size` {#lmdb_map_size}
This parameters can be used to set the map size used by LMDB, This parameters can be used to set the map size used by LMDB,

View file

@ -27,7 +27,7 @@ Feel free to open a PR to suggest fixes this table. Minio is missing because the
| Feature | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) | | Feature | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|------------------------------|----------------------------------|-----------------|---------------|---------|-----| |------------------------------|----------------------------------|-----------------|---------------|---------|-----|
| [signature v2](https://docs.aws.amazon.com/AmazonS3/latest/API/Appendix-Sigv2.html) (deprecated) | ❌ Missing | ✅ | ✅ | ✅ | ✅ | | [signature v2](https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html) (deprecated) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
| [signature v4](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) | ✅ Implemented | ✅ | ✅ | ❌ | ✅ | | [signature v4](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) | ✅ Implemented | ✅ | ✅ | ❌ | ✅ |
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ | | [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ | | [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |

View file

@ -70,7 +70,7 @@ Example response body:
```json ```json
{ {
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df", "node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
"garageVersion": "v1.3.0", "garageVersion": "v1.2.0",
"garageFeatures": [ "garageFeatures": [
"k2v", "k2v",
"lmdb", "lmdb",

16
flake.lock generated
View file

@ -50,17 +50,17 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1763977559, "lastModified": 1736692550,
"narHash": "sha256-g4MKqsIRy5yJwEsI+fYODqLUnAqIY4kZai0nldAP6EM=", "narHash": "sha256-7tk8xH+g0sJkKLTJFOxphJxxOjMDFMWv24nXslaU2ro=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "cfe2c7d5b5d3032862254e68c37a6576b633d632", "rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "cfe2c7d5b5d3032862254e68c37a6576b633d632", "rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
"type": "github" "type": "github"
} }
}, },
@ -80,17 +80,17 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1763952169, "lastModified": 1738549608,
"narHash": "sha256-+PeDBD8P+NKauH+w7eO/QWCIp8Cx4mCfWnh9sJmy9CM=", "narHash": "sha256-GdyT9QEUSx5k/n8kILuNy83vxxdyUfJ8jL5mMpQZWfw=",
"owner": "oxalica", "owner": "oxalica",
"repo": "rust-overlay", "repo": "rust-overlay",
"rev": "ab726555a9a72e6dc80649809147823a813fa95b", "rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "oxalica", "owner": "oxalica",
"repo": "rust-overlay", "repo": "rust-overlay",
"rev": "ab726555a9a72e6dc80649809147823a813fa95b", "rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
"type": "github" "type": "github"
} }
}, },

View file

@ -2,13 +2,13 @@
description = description =
"Garage, an S3-compatible distributed object store for self-hosted deployments"; "Garage, an S3-compatible distributed object store for self-hosted deployments";
# Nixpkgs 25.05 as of 2025-11-24 # Nixpkgs 24.11 as of 2025-01-12
inputs.nixpkgs.url = inputs.nixpkgs.url =
"github:NixOS/nixpkgs/cfe2c7d5b5d3032862254e68c37a6576b633d632"; "github:NixOS/nixpkgs/7c4869c47090dd7f9f1bdfb49a22aea026996815";
# Rust overlay as of 2025-11-24 # Rust overlay as of 2025-02-03
inputs.rust-overlay.url = inputs.rust-overlay.url =
"github:oxalica/rust-overlay/ab726555a9a72e6dc80649809147823a813fa95b"; "github:oxalica/rust-overlay/35c6f8c4352f995ecd53896200769f80a3e8f22d";
inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs"; inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs";
inputs.crane.url = "github:ipetkov/crane"; inputs.crane.url = "github:ipetkov/crane";
@ -30,10 +30,6 @@
inherit system nixpkgs crane rust-overlay extraTestEnv; inherit system nixpkgs crane rust-overlay extraTestEnv;
release = false; release = false;
}).garage-test; }).garage-test;
lints = (compile {
inherit system nixpkgs crane rust-overlay;
release = false;
});
in in
{ {
packages = { packages = {
@ -60,10 +56,6 @@
tests-fjall = testWith { tests-fjall = testWith {
GARAGE_TEST_INTEGRATION_DB_ENGINE = "fjall"; GARAGE_TEST_INTEGRATION_DB_ENGINE = "fjall";
}; };
# lints (fmt, clippy)
fmt = lints.garage-cargo-fmt;
clippy = lints.garage-cargo-clippy;
}; };
# ---- developpment shell, for making native builds only ---- # ---- developpment shell, for making native builds only ----

View file

@ -48,7 +48,7 @@ let
inherit (pkgs) lib stdenv; inherit (pkgs) lib stdenv;
toolchainFn = (p: p.rust-bin.stable."1.91.0".default.override { toolchainFn = (p: p.rust-bin.stable."1.82.0".default.override {
targets = lib.optionals (target != null) [ rustTarget ]; targets = lib.optionals (target != null) [ rustTarget ];
extensions = [ extensions = [
"rust-src" "rust-src"
@ -190,15 +190,4 @@ in rec {
pkgs.cacert pkgs.cacert
]; ];
} // extraTestEnv); } // extraTestEnv);
# ---- source code linting ----
garage-cargo-fmt = craneLib.cargoFmt (commonArgs // {
cargoExtraArgs = "";
});
garage-cargo-clippy = craneLib.cargoClippy (commonArgs // {
cargoArtifacts = garage-deps;
cargoClippyExtraArgs = "--all-targets -- -D warnings";
});
} }

View file

@ -1,7 +1,6 @@
export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1` export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2` export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
export AWS_DEFAULT_REGION='garage' export AWS_DEFAULT_REGION='garage'
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
# FUTUREWORK: set AWS_ENDPOINT_URL instead, once nixpkgs bumps awscli to >=2.13.0. # FUTUREWORK: set AWS_ENDPOINT_URL instead, once nixpkgs bumps awscli to >=2.13.0.
function aws { command aws --endpoint-url http://127.0.0.1:3911 $@ ; } function aws { command aws --endpoint-url http://127.0.0.1:3911 $@ ; }

View file

@ -2,8 +2,8 @@ apiVersion: v2
name: garage name: garage
description: S3-compatible object store for small self-hosted geo-distributed deployments description: S3-compatible object store for small self-hosted geo-distributed deployments
type: application type: application
version: 0.7.3 version: 0.7.1
appVersion: "v1.3.1" appVersion: "v1.2.0"
home: https://garagehq.deuxfleurs.fr/ home: https://garagehq.deuxfleurs.fr/
icon: https://garagehq.deuxfleurs.fr/images/garage-logo.svg icon: https://garagehq.deuxfleurs.fr/images/garage-logo.svg

View file

@ -1,6 +1,6 @@
# garage # garage
![Version: 0.7.3](https://img.shields.io/badge/Version-0.7.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.3.1](https://img.shields.io/badge/AppVersion-v1.3.1-informational?style=flat-square) ![Version: 0.7.1](https://img.shields.io/badge/Version-0.7.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.2.0](https://img.shields.io/badge/AppVersion-v1.2.0-informational?style=flat-square)
S3-compatible object store for small self-hosted geo-distributed deployments S3-compatible object store for small self-hosted geo-distributed deployments

View file

@ -4,10 +4,6 @@ metadata:
name: {{ include "garage.fullname" . }} name: {{ include "garage.fullname" . }}
labels: labels:
{{- include "garage.labels" . | nindent 4 }} {{- include "garage.labels" . | nindent 4 }}
{{- with .Values.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec: spec:
type: {{ .Values.service.type }} type: {{ .Values.service.type }}
ports: ports:

View file

@ -124,8 +124,6 @@ service:
# - NodePort (+ Ingress) # - NodePort (+ Ingress)
# - LoadBalancer # - LoadBalancer
type: ClusterIP type: ClusterIP
# -- Annotations to add to the service
annotations: {}
s3: s3:
api: api:
port: 3900 port: 3900

View file

@ -34,8 +34,6 @@ in
jq jq
]; ];
shellHook = '' shellHook = ''
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
function to_s3 { function to_s3 {
aws \ aws \
--endpoint-url https://garage.deuxfleurs.fr \ --endpoint-url https://garage.deuxfleurs.fr \

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_api_admin" name = "garage_api_admin"
version = "1.3.1" version = "1.2.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -22,7 +22,7 @@ garage_api_common.workspace = true
argon2.workspace = true argon2.workspace = true
async-trait.workspace = true async-trait.workspace = true
thiserror.workspace = true err-derive.workspace = true
hex.workspace = true hex.workspace = true
tracing.workspace = true tracing.workspace = true

View file

@ -1,8 +1,8 @@
use std::convert::TryFrom; use std::convert::TryFrom;
use err_derive::Error;
use hyper::header::HeaderValue; use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode}; use hyper::{HeaderMap, StatusCode};
use thiserror::Error;
pub use garage_model::helper::error::Error as HelperError; pub use garage_model::helper::error::Error as HelperError;
@ -16,17 +16,20 @@ use garage_api_common::helpers::*;
/// Errors of this crate /// Errors of this crate
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum Error { pub enum Error {
#[error("{0}")] #[error(display = "{}", _0)]
/// Error from common error /// Error from common error
Common(#[from] CommonError), Common(#[error(source)] CommonError),
// Category: cannot process // Category: cannot process
/// The API access key does not exist /// The API access key does not exist
#[error("Access key not found: {0}")] #[error(display = "Access key not found: {}", _0)]
NoSuchAccessKey(String), NoSuchAccessKey(String),
/// In Import key, the key already exists /// In Import key, the key already exists
#[error("Key {0} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.")] #[error(
display = "Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.",
_0
)]
KeyAlreadyExists(String), KeyAlreadyExists(String),
} }

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_api_common" name = "garage_api_common"
version = "1.3.1" version = "1.2.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -24,7 +24,7 @@ chrono.workspace = true
crc32fast.workspace = true crc32fast.workspace = true
crc32c.workspace = true crc32c.workspace = true
crypto-common.workspace = true crypto-common.workspace = true
thiserror.workspace = true err-derive.workspace = true
hex.workspace = true hex.workspace = true
hmac.workspace = true hmac.workspace = true
md-5.workspace = true md-5.workspace = true

View file

@ -1,7 +1,7 @@
use std::convert::TryFrom; use std::convert::TryFrom;
use err_derive::Error;
use hyper::StatusCode; use hyper::StatusCode;
use thiserror::Error;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
@ -12,48 +12,48 @@ use garage_model::helper::error::Error as HelperError;
pub enum CommonError { pub enum CommonError {
// ---- INTERNAL ERRORS ---- // ---- INTERNAL ERRORS ----
/// Error related to deeper parts of Garage /// Error related to deeper parts of Garage
#[error("Internal error: {0}")] #[error(display = "Internal error: {}", _0)]
InternalError(#[from] GarageError), InternalError(#[error(source)] GarageError),
/// Error related to Hyper /// Error related to Hyper
#[error("Internal error (Hyper error): {0}")] #[error(display = "Internal error (Hyper error): {}", _0)]
Hyper(#[from] hyper::Error), Hyper(#[error(source)] hyper::Error),
/// Error related to HTTP /// Error related to HTTP
#[error("Internal error (HTTP error): {0}")] #[error(display = "Internal error (HTTP error): {}", _0)]
Http(#[from] http::Error), Http(#[error(source)] http::Error),
// ---- GENERIC CLIENT ERRORS ---- // ---- GENERIC CLIENT ERRORS ----
/// Proper authentication was not provided /// Proper authentication was not provided
#[error("Forbidden: {0}")] #[error(display = "Forbidden: {}", _0)]
Forbidden(String), Forbidden(String),
/// Generic bad request response with custom message /// Generic bad request response with custom message
#[error("Bad request: {0}")] #[error(display = "Bad request: {}", _0)]
BadRequest(String), BadRequest(String),
/// The client sent a header with invalid value /// The client sent a header with invalid value
#[error("Invalid header value: {0}")] #[error(display = "Invalid header value: {}", _0)]
InvalidHeader(#[from] hyper::header::ToStrError), InvalidHeader(#[error(source)] hyper::header::ToStrError),
// ---- SPECIFIC ERROR CONDITIONS ---- // ---- SPECIFIC ERROR CONDITIONS ----
// These have to be error codes referenced in the S3 spec here: // These have to be error codes referenced in the S3 spec here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
/// The bucket requested don't exists /// The bucket requested don't exists
#[error("Bucket not found: {0}")] #[error(display = "Bucket not found: {}", _0)]
NoSuchBucket(String), NoSuchBucket(String),
/// Tried to create a bucket that already exist /// Tried to create a bucket that already exist
#[error("Bucket already exists")] #[error(display = "Bucket already exists")]
BucketAlreadyExists, BucketAlreadyExists,
/// Tried to delete a non-empty bucket /// Tried to delete a non-empty bucket
#[error("Tried to delete a non-empty bucket")] #[error(display = "Tried to delete a non-empty bucket")]
BucketNotEmpty, BucketNotEmpty,
// Category: bad request // Category: bad request
/// Bucket name is not valid according to AWS S3 specs /// Bucket name is not valid according to AWS S3 specs
#[error("Invalid bucket name: {0}")] #[error(display = "Invalid bucket name: {}", _0)]
InvalidBucketName(String), InvalidBucketName(String),
} }

View file

@ -33,6 +33,7 @@ use garage_util::metrics::{gen_trace_id, RecordDuration};
use garage_util::socket_address::UnixOrTCPSocketAddress; use garage_util::socket_address::UnixOrTCPSocketAddress;
use crate::helpers::{BoxBody, ErrorBody}; use crate::helpers::{BoxBody, ErrorBody};
use crate::signature::payload::Authorization;
pub trait ApiEndpoint: Send + Sync + 'static { pub trait ApiEndpoint: Send + Sync + 'static {
fn name(&self) -> &'static str; fn name(&self) -> &'static str;
@ -61,7 +62,7 @@ pub trait ApiHandler: Send + Sync + 'static {
/// Returns the key id used to authenticate this request. The ID returned must be safe to /// Returns the key id used to authenticate this request. The ID returned must be safe to
/// log. /// log.
fn key_id_from_request(&self, _req: &Request<IncomingBody>) -> Option<String> { fn key_id_from_request(&self, req: &Request<IncomingBody>) -> Option<String> {
None None
} }
} }

View file

@ -1,4 +1,4 @@
use thiserror::Error; use err_derive::Error;
use crate::common_error::CommonError; use crate::common_error::CommonError;
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError}; pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
@ -6,21 +6,21 @@ pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInterna
/// Errors of this crate /// Errors of this crate
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum Error { pub enum Error {
#[error("{0}")] #[error(display = "{}", _0)]
/// Error from common error /// Error from common error
Common(CommonError), Common(CommonError),
/// Authorization Header Malformed /// Authorization Header Malformed
#[error("Authorization header malformed, unexpected scope: {0}")] #[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
AuthorizationHeaderMalformed(String), AuthorizationHeaderMalformed(String),
// Category: bad request // Category: bad request
/// The request contained an invalid UTF-8 sequence in its path or in other parameters /// The request contained an invalid UTF-8 sequence in its path or in other parameters
#[error("Invalid UTF-8: {0}")] #[error(display = "Invalid UTF-8: {}", _0)]
InvalidUtf8Str(#[from] std::str::Utf8Error), InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
/// The provided digest (checksum) value was invalid /// The provided digest (checksum) value was invalid
#[error("Invalid digest: {0}")] #[error(display = "Invalid digest: {}", _0)]
InvalidDigest(String), InvalidDigest(String),
} }

View file

@ -104,7 +104,7 @@ async fn check_standard_signature(
// Verify that all necessary request headers are included in signed_headers // Verify that all necessary request headers are included in signed_headers
// The following must be included for all signatures: // The following must be included for all signatures:
// - the Host header (mandatory) // - the Host header (mandatory)
// - all x-amz-* headers used in the request (except x-amz-content-sha256) // - all x-amz-* headers used in the request
// AWS also indicates that the Content-Type header should be signed if // AWS also indicates that the Content-Type header should be signed if
// it is used, but Minio client doesn't sign it so we don't check it for compatibility. // it is used, but Minio client doesn't sign it so we don't check it for compatibility.
let signed_headers = split_signed_headers(&authorization)?; let signed_headers = split_signed_headers(&authorization)?;
@ -151,7 +151,7 @@ async fn check_presigned_signature(
// Verify that all necessary request headers are included in signed_headers // Verify that all necessary request headers are included in signed_headers
// For AWSv4 pre-signed URLs, the following must be included: // For AWSv4 pre-signed URLs, the following must be included:
// - the Host header (mandatory) // - the Host header (mandatory)
// - all x-amz-* headers used in the request (except x-amz-content-sha256) // - all x-amz-* headers used in the request
let signed_headers = split_signed_headers(&authorization)?; let signed_headers = split_signed_headers(&authorization)?;
verify_signed_headers(request.headers(), &signed_headers)?; verify_signed_headers(request.headers(), &signed_headers)?;
@ -268,9 +268,7 @@ fn verify_signed_headers(headers: &HeaderMap, signed_headers: &[HeaderName]) ->
return Err(Error::bad_request("Header `Host` should be signed")); return Err(Error::bad_request("Header `Host` should be signed"));
} }
for (name, _) in headers.iter() { for (name, _) in headers.iter() {
// Enforce signature of all x-amz-* headers, except x-amz-content-sh256 if name.as_str().starts_with("x-amz-") {
// because it is included in the canonical request in all cases
if name.as_str().starts_with("x-amz-") && name != X_AMZ_CONTENT_SHA256 {
if !signed_headers.contains(name) { if !signed_headers.contains(name) {
return Err(Error::bad_request(format!( return Err(Error::bad_request(format!(
"Header `{}` should be signed", "Header `{}` should be signed",
@ -470,7 +468,8 @@ impl Authorization {
let date = headers let date = headers
.get(X_AMZ_DATE) .get(X_AMZ_DATE)
.ok_or_bad_request("Missing X-Amz-Date field")? .ok_or_bad_request("Missing X-Amz-Date field")
.map_err(Error::from)?
.to_str()?; .to_str()?;
let date = parse_date(date)?; let date = parse_date(date)?;

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_api_k2v" name = "garage_api_k2v"
version = "1.3.1" version = "1.2.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -20,7 +20,7 @@ garage_util = { workspace = true, features = [ "k2v" ] }
garage_api_common.workspace = true garage_api_common.workspace = true
base64.workspace = true base64.workspace = true
thiserror.workspace = true err-derive.workspace = true
tracing.workspace = true tracing.workspace = true
futures.workspace = true futures.workspace = true

View file

@ -1,6 +1,6 @@
use err_derive::Error;
use hyper::header::HeaderValue; use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode}; use hyper::{HeaderMap, StatusCode};
use thiserror::Error;
use garage_api_common::common_error::{commonErrorDerivative, CommonError}; use garage_api_common::common_error::{commonErrorDerivative, CommonError};
pub(crate) use garage_api_common::common_error::{helper_error_as_internal, pass_helper_error}; pub(crate) use garage_api_common::common_error::{helper_error_as_internal, pass_helper_error};
@ -14,38 +14,38 @@ use garage_api_common::signature::error::Error as SignatureError;
/// Errors of this crate /// Errors of this crate
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum Error { pub enum Error {
#[error("{0}")] #[error(display = "{}", _0)]
/// Error from common error /// Error from common error
Common(#[from] CommonError), Common(#[error(source)] CommonError),
// Category: cannot process // Category: cannot process
/// Authorization Header Malformed /// Authorization Header Malformed
#[error("Authorization header malformed, unexpected scope: {0}")] #[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
AuthorizationHeaderMalformed(String), AuthorizationHeaderMalformed(String),
/// The provided digest (checksum) value was invalid /// The provided digest (checksum) value was invalid
#[error("Invalid digest: {0}")] #[error(display = "Invalid digest: {}", _0)]
InvalidDigest(String), InvalidDigest(String),
/// The object requested don't exists /// The object requested don't exists
#[error("Key not found")] #[error(display = "Key not found")]
NoSuchKey, NoSuchKey,
/// Some base64 encoded data was badly encoded /// Some base64 encoded data was badly encoded
#[error("Invalid base64: {0}")] #[error(display = "Invalid base64: {}", _0)]
InvalidBase64(#[from] base64::DecodeError), InvalidBase64(#[error(source)] base64::DecodeError),
/// Invalid causality token /// Invalid causality token
#[error("Invalid causality token")] #[error(display = "Invalid causality token")]
InvalidCausalityToken, InvalidCausalityToken,
/// The client asked for an invalid return format (invalid Accept header) /// The client asked for an invalid return format (invalid Accept header)
#[error("Not acceptable: {0}")] #[error(display = "Not acceptable: {}", _0)]
NotAcceptable(String), NotAcceptable(String),
/// The request contained an invalid UTF-8 sequence in its path or in other parameters /// The request contained an invalid UTF-8 sequence in its path or in other parameters
#[error("Invalid UTF-8: {0}")] #[error(display = "Invalid UTF-8: {}", _0)]
InvalidUtf8Str(#[from] std::str::Utf8Error), InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
} }
commonErrorDerivative!(Error); commonErrorDerivative!(Error);

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_api_s3" name = "garage_api_s3"
version = "1.3.1" version = "1.2.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -29,7 +29,7 @@ bytes.workspace = true
chrono.workspace = true chrono.workspace = true
crc32fast.workspace = true crc32fast.workspace = true
crc32c.workspace = true crc32c.workspace = true
thiserror.workspace = true err-derive.workspace = true
hex.workspace = true hex.workspace = true
tracing.workspace = true tracing.workspace = true
md-5.workspace = true md-5.workspace = true

View file

@ -88,9 +88,7 @@ pub async fn handle_put_cors(
pub struct CorsConfiguration { pub struct CorsConfiguration {
#[serde(serialize_with = "xmlns_tag", skip_deserializing)] #[serde(serialize_with = "xmlns_tag", skip_deserializing)]
pub xmlns: (), pub xmlns: (),
// "default" is required to be able to parse an empty list of rules, #[serde(rename = "CORSRule")]
// cf https://docs.rs/quick-xml/latest/quick_xml/de/#sequences-xsall-and-xssequence-xml-schema-types
#[serde(rename = "CORSRule", default)]
pub cors_rules: Vec<CorsRule>, pub cors_rules: Vec<CorsRule>,
} }
@ -272,26 +270,4 @@ mod tests {
Ok(()) Ok(())
} }
#[test]
fn test_deserialize_norules() -> Result<(), Error> {
let message = r#"<?xml version="1.0" encoding="UTF-8"?>
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/" />"#;
let conf: CorsConfiguration = from_str(message).unwrap();
let ref_value = CorsConfiguration {
xmlns: (),
cors_rules: vec![],
};
assert_eq! {
ref_value,
conf
};
let message2 = to_xml_with_header(&ref_value)?;
let cleanup = |c: &str| c.replace(char::is_whitespace, "");
assert_eq!(cleanup(message), cleanup(&message2));
Ok(())
}
} }

View file

@ -1,8 +1,8 @@
use std::convert::TryInto; use std::convert::TryInto;
use err_derive::Error;
use hyper::header::HeaderValue; use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode}; use hyper::{HeaderMap, StatusCode};
use thiserror::Error;
use garage_model::helper::error::Error as HelperError; use garage_model::helper::error::Error as HelperError;
@ -25,67 +25,67 @@ use crate::xml as s3_xml;
/// Errors of this crate /// Errors of this crate
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum Error { pub enum Error {
#[error("{0}")] #[error(display = "{}", _0)]
/// Error from common error /// Error from common error
Common(#[from] CommonError), Common(#[error(source)] CommonError),
// Category: cannot process // Category: cannot process
/// Authorization Header Malformed /// Authorization Header Malformed
#[error("Authorization header malformed, unexpected scope: {0}")] #[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
AuthorizationHeaderMalformed(String), AuthorizationHeaderMalformed(String),
/// The object requested don't exists /// The object requested don't exists
#[error("Key not found")] #[error(display = "Key not found")]
NoSuchKey, NoSuchKey,
/// The multipart upload requested don't exists /// The multipart upload requested don't exists
#[error("Upload not found")] #[error(display = "Upload not found")]
NoSuchUpload, NoSuchUpload,
/// Precondition failed (e.g. x-amz-copy-source-if-match) /// Precondition failed (e.g. x-amz-copy-source-if-match)
#[error("At least one of the preconditions you specified did not hold")] #[error(display = "At least one of the preconditions you specified did not hold")]
PreconditionFailed, PreconditionFailed,
/// Parts specified in CMU request do not match parts actually uploaded /// Parts specified in CMU request do not match parts actually uploaded
#[error("Parts given to CompleteMultipartUpload do not match uploaded parts")] #[error(display = "Parts given to CompleteMultipartUpload do not match uploaded parts")]
InvalidPart, InvalidPart,
/// Parts given to CompleteMultipartUpload were not in ascending order /// Parts given to CompleteMultipartUpload were not in ascending order
#[error("Parts given to CompleteMultipartUpload were not in ascending order")] #[error(display = "Parts given to CompleteMultipartUpload were not in ascending order")]
InvalidPartOrder, InvalidPartOrder,
/// In CompleteMultipartUpload: not enough data /// In CompleteMultipartUpload: not enough data
/// (here we are more lenient than AWS S3) /// (here we are more lenient than AWS S3)
#[error("Proposed upload is smaller than the minimum allowed object size")] #[error(display = "Proposed upload is smaller than the minimum allowed object size")]
EntityTooSmall, EntityTooSmall,
// Category: bad request // Category: bad request
/// The request contained an invalid UTF-8 sequence in its path or in other parameters /// The request contained an invalid UTF-8 sequence in its path or in other parameters
#[error("Invalid UTF-8: {0}")] #[error(display = "Invalid UTF-8: {}", _0)]
InvalidUtf8Str(#[from] std::str::Utf8Error), InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
/// The request used an invalid path /// The request used an invalid path
#[error("Invalid UTF-8: {0}")] #[error(display = "Invalid UTF-8: {}", _0)]
InvalidUtf8String(#[from] std::string::FromUtf8Error), InvalidUtf8String(#[error(source)] std::string::FromUtf8Error),
/// The client sent invalid XML data /// The client sent invalid XML data
#[error("Invalid XML: {0}")] #[error(display = "Invalid XML: {}", _0)]
InvalidXml(String), InvalidXml(String),
/// The client sent a range header with invalid value /// The client sent a range header with invalid value
#[error("Invalid HTTP range: {0:?}")] #[error(display = "Invalid HTTP range: {:?}", _0)]
InvalidRange((http_range::HttpRangeParseError, u64)), InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)),
/// The client sent a range header with invalid value /// The client sent a range header with invalid value
#[error("Invalid encryption algorithm: {0:?}, should be AES256")] #[error(display = "Invalid encryption algorithm: {:?}, should be AES256", _0)]
InvalidEncryptionAlgorithm(String), InvalidEncryptionAlgorithm(String),
/// The provided digest (checksum) value was invalid /// The provided digest (checksum) value was invalid
#[error("Invalid digest: {0}")] #[error(display = "Invalid digest: {}", _0)]
InvalidDigest(String), InvalidDigest(String),
/// The client sent a request for an action not supported by garage /// The client sent a request for an action not supported by garage
#[error("Unimplemented action: {0}")] #[error(display = "Unimplemented action: {}", _0)]
NotImplemented(String), NotImplemented(String),
} }
@ -99,12 +99,6 @@ impl From<HelperError> for Error {
} }
} }
impl From<(http_range::HttpRangeParseError, u64)> for Error {
fn from(err: (http_range::HttpRangeParseError, u64)) -> Error {
Error::InvalidRange(err)
}
}
impl From<roxmltree::Error> for Error { impl From<roxmltree::Error> for Error {
fn from(err: roxmltree::Error) -> Self { fn from(err: roxmltree::Error) -> Self {
Self::InvalidXml(format!("{}", err)) Self::InvalidXml(format!("{}", err))

View file

@ -845,9 +845,7 @@ impl PreconditionHeaders {
} }
fn check(&self, v: &ObjectVersion, etag: &str) -> Result<Option<StatusCode>, Error> { fn check(&self, v: &ObjectVersion, etag: &str) -> Result<Option<StatusCode>, Error> {
// we store date with ms precision, but headers are precise to the second: truncate let v_date = UNIX_EPOCH + Duration::from_millis(v.timestamp);
// the timestamp to handle the same-second edge case
let v_date = UNIX_EPOCH + Duration::from_secs(v.timestamp / 1000);
// Implemented from https://datatracker.ietf.org/doc/html/rfc7232#section-6 // Implemented from https://datatracker.ietf.org/doc/html/rfc7232#section-6

View file

@ -141,26 +141,10 @@ pub async fn handle_post_object(
let mut conditions = decoded_policy.into_conditions()?; let mut conditions = decoded_policy.into_conditions()?;
// If there are conditions on the bucket name, check these against the actual bucket_name rather
// than the one in params, which is allowed to be absent.
if let Some(conds) = conditions.params.remove("bucket") {
for cond in conds {
let ok = match cond {
Operation::Equal(s) => s.as_str() == bucket_name,
Operation::StartsWith(s) => bucket_name.starts_with(&s),
};
if !ok {
return Err(Error::bad_request(
"Key 'bucket' has value not allowed in policy",
));
}
}
}
for (param_key, value) in params.iter() { for (param_key, value) in params.iter() {
let param_key = param_key.as_str(); let param_key = param_key.as_str();
match param_key { match param_key {
"policy" | "x-amz-signature" | "bucket" => (), // this is always accepted, as it's required to validate other fields "policy" | "x-amz-signature" => (), // this is always accepted, as it's required to validate other fields
"content-type" => { "content-type" => {
let conds = conditions.params.remove("content-type").ok_or_else(|| { let conds = conditions.params.remove("content-type").ok_or_else(|| {
Error::bad_request(format!("Key '{}' is not allowed in policy", param_key)) Error::bad_request(format!("Key '{}' is not allowed in policy", param_key))

View file

@ -39,6 +39,8 @@ use crate::encryption::EncryptionParams;
use crate::error::*; use crate::error::*;
use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION; use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION;
const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
pub(crate) struct SaveStreamResult { pub(crate) struct SaveStreamResult {
pub(crate) version_uuid: Uuid, pub(crate) version_uuid: Uuid,
pub(crate) version_timestamp: u64, pub(crate) version_timestamp: u64,
@ -491,7 +493,7 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
}; };
let recv_next = async { let recv_next = async {
// If more than a maximum number of writes are in progress, don't add more for now // If more than a maximum number of writes are in progress, don't add more for now
if currently_running >= ctx.garage.config.block_max_concurrent_writes_per_request { if currently_running >= PUT_BLOCKS_MAX_PARALLEL {
futures::future::pending().await futures::future::pending().await
} else { } else {
block_rx3.recv().await block_rx3.recv().await

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_block" name = "garage_block"
version = "1.3.1" version = "1.2.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"

View file

@ -50,8 +50,6 @@ pub const INLINE_THRESHOLD: usize = 3072;
// to delete the block locally. // to delete the block locally.
pub(crate) const BLOCK_GC_DELAY: Duration = Duration::from_secs(600); pub(crate) const BLOCK_GC_DELAY: Duration = Duration::from_secs(600);
const BLOCK_READ_SEMAPHORE_TIMEOUT: Duration = Duration::from_secs(15);
/// RPC messages used to share blocks of data between nodes /// RPC messages used to share blocks of data between nodes
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub enum BlockRpc { pub enum BlockRpc {
@ -89,7 +87,6 @@ pub struct BlockManager {
disable_scrub: bool, disable_scrub: bool,
mutation_lock: Vec<Mutex<BlockManagerLocked>>, mutation_lock: Vec<Mutex<BlockManagerLocked>>,
read_semaphore: Semaphore,
pub rc: BlockRc, pub rc: BlockRc,
pub resync: BlockResyncManager, pub resync: BlockResyncManager,
@ -179,8 +176,6 @@ impl BlockManager {
.iter() .iter()
.map(|_| Mutex::new(BlockManagerLocked())) .map(|_| Mutex::new(BlockManagerLocked()))
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
read_semaphore: Semaphore::new(config.block_max_concurrent_reads),
rc, rc,
resync, resync,
system, system,
@ -562,6 +557,9 @@ impl BlockManager {
match self.find_block(hash).await { match self.find_block(hash).await {
Some(p) => self.read_block_from(hash, &p).await, Some(p) => self.read_block_from(hash, &p).await,
None => { None => {
// Not found but maybe we should have had it ??
self.resync
.put_to_resync(hash, 2 * self.system.rpc_helper().rpc_timeout())?;
return Err(Error::Message(format!( return Err(Error::Message(format!(
"block {:?} not found on node", "block {:?} not found on node",
hash hash
@ -583,15 +581,6 @@ impl BlockManager {
) -> Result<DataBlock, Error> { ) -> Result<DataBlock, Error> {
let (header, path) = block_path.as_parts_ref(); let (header, path) = block_path.as_parts_ref();
let permit = tokio::select! {
sem = self.read_semaphore.acquire() => sem.ok_or_message("acquire read semaphore")?,
_ = tokio::time::sleep(BLOCK_READ_SEMAPHORE_TIMEOUT) => {
self.metrics.block_read_semaphore_timeouts.add(1);
debug!("read block {:?}: read_semaphore acquire timeout", hash);
return Err(Error::Message("read block: read_semaphore acquire timeout".into()));
}
};
let mut f = fs::File::open(&path).await?; let mut f = fs::File::open(&path).await?;
let mut data = vec![]; let mut data = vec![];
f.read_to_end(&mut data).await?; f.read_to_end(&mut data).await?;
@ -616,8 +605,6 @@ impl BlockManager {
return Err(Error::CorruptData(*hash)); return Err(Error::CorruptData(*hash));
} }
drop(permit);
Ok(data) Ok(data)
} }
@ -783,7 +770,6 @@ impl BlockManagerLocked {
let mut f = fs::File::create(&path_tmp).await?; let mut f = fs::File::create(&path_tmp).await?;
f.write_all(data).await?; f.write_all(data).await?;
f.flush().await?;
mgr.metrics.bytes_written.add(data.len() as u64); mgr.metrics.bytes_written.add(data.len() as u64);
if mgr.data_fsync { if mgr.data_fsync {

View file

@ -22,7 +22,6 @@ pub struct BlockManagerMetrics {
pub(crate) bytes_read: BoundCounter<u64>, pub(crate) bytes_read: BoundCounter<u64>,
pub(crate) block_read_duration: BoundValueRecorder<f64>, pub(crate) block_read_duration: BoundValueRecorder<f64>,
pub(crate) block_read_semaphore_timeouts: BoundCounter<u64>,
pub(crate) bytes_written: BoundCounter<u64>, pub(crate) bytes_written: BoundCounter<u64>,
pub(crate) block_write_duration: BoundValueRecorder<f64>, pub(crate) block_write_duration: BoundValueRecorder<f64>,
pub(crate) delete_counter: BoundCounter<u64>, pub(crate) delete_counter: BoundCounter<u64>,
@ -120,11 +119,6 @@ impl BlockManagerMetrics {
.with_description("Duration of block read operations") .with_description("Duration of block read operations")
.init() .init()
.bind(&[]), .bind(&[]),
block_read_semaphore_timeouts: meter
.u64_counter("block.read_semaphore_timeouts")
.with_description("Number of block reads that failed due to semaphore acquire timeout")
.init()
.bind(&[]),
bytes_written: meter bytes_written: meter
.u64_counter("block.bytes_written") .u64_counter("block.bytes_written")
.with_description("Number of bytes written to disk") .with_description("Number of bytes written to disk")

View file

@ -133,14 +133,6 @@ impl BlockResyncManager {
))) )))
} }
/// Clear the entire resync queue and list of errored blocks
/// Corresponds to `garage repair clear-resync-queue`
pub fn clear_resync_queue(&self) -> Result<(), Error> {
self.queue.clear()?;
self.errors.clear()?;
Ok(())
}
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) { pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
let notify = self.notify.clone(); let notify = self.notify.clone();
vars.register_rw( vars.register_rw(

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_db" name = "garage_db"
version = "1.3.1" version = "1.2.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -12,7 +12,7 @@ readme = "../../README.md"
path = "lib.rs" path = "lib.rs"
[dependencies] [dependencies]
thiserror.workspace = true err-derive.workspace = true
tracing.workspace = true tracing.workspace = true
heed = { workspace = true, optional = true } heed = { workspace = true, optional = true }

View file

@ -20,7 +20,7 @@ use std::cell::Cell;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use thiserror::Error; use err_derive::Error;
pub use open::*; pub use open::*;
@ -44,7 +44,7 @@ pub type TxValueIter<'a> = Box<dyn std::iter::Iterator<Item = TxOpResult<(Value,
// ---- // ----
#[derive(Debug, Error)] #[derive(Debug, Error)]
#[error("{0}")] #[error(display = "{}", _0)]
pub struct Error(pub Cow<'static, str>); pub struct Error(pub Cow<'static, str>);
impl From<std::io::Error> for Error { impl From<std::io::Error> for Error {
@ -56,7 +56,7 @@ impl From<std::io::Error> for Error {
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Error)] #[derive(Debug, Error)]
#[error("{0}")] #[error(display = "{}", _0)]
pub struct TxOpError(pub(crate) Error); pub struct TxOpError(pub(crate) Error);
pub type TxOpResult<T> = std::result::Result<T, TxOpError>; pub type TxOpResult<T> = std::result::Result<T, TxOpError>;
@ -106,44 +106,32 @@ impl Db {
result: Cell::new(None), result: Cell::new(None),
}; };
let tx_res = self.0.transaction(&f); let tx_res = self.0.transaction(&f);
let fn_res = f.result.into_inner(); let ret = f
.result
.into_inner()
.expect("Transaction did not store result");
match (tx_res, fn_res) { match tx_res {
(Ok(on_commit), Some(Ok(value))) => { Ok(on_commit) => match ret {
// Transaction succeeded Ok(value) => {
// TxFn stored the value to return to the user in fn_res
// tx_res contains the on_commit list of callbacks, run them now
on_commit.into_iter().for_each(|f| f()); on_commit.into_iter().for_each(|f| f());
Ok(value) Ok(value)
} }
(Err(TxError::Abort(())), Some(Err(TxError::Abort(e)))) => { _ => unreachable!(),
// Transaction was aborted by user code },
// The abort error value is stored in fn_res Err(TxError::Abort(())) => match ret {
Err(TxError::Abort(e)) Err(TxError::Abort(e)) => Err(TxError::Abort(e)),
} _ => unreachable!(),
(Err(TxError::Db(_tx_e)), Some(Err(TxError::Db(fn_e)))) => { },
// Transaction encountered a DB error in user code Err(TxError::Db(e2)) => match ret {
// The error value encountered is the one in fn_res, // Ok was stored -> the error occurred when finalizing
// tx_res contains only a dummy error message // transaction
Err(TxError::Db(fn_e)) Ok(_) => Err(TxError::Db(e2)),
} // An error was already stored: that's the one we want to
(Err(TxError::Db(tx_e)), None) => { // return
// Transaction encounterred a DB error when initializing the transaction, Err(TxError::Db(e)) => Err(TxError::Db(e)),
// before user code was called _ => unreachable!(),
Err(TxError::Db(tx_e)) },
}
(Err(TxError::Db(tx_e)), Some(Ok(_))) => {
// Transaction encounterred a DB error when commiting the transaction,
// after user code was called
Err(TxError::Db(tx_e))
}
(tx_res, fn_res) => {
panic!(
"unexpected error case: tx_res={:?}, fn_res={:?}",
tx_res.map(|_| "..."),
fn_res.map(|x| x.map(|_| "...").map_err(|_| "..."))
);
}
} }
} }

View file

@ -151,16 +151,30 @@ impl IDb for SqliteDb {
} }
fn snapshot(&self, base_path: &PathBuf) -> Result<()> { fn snapshot(&self, base_path: &PathBuf) -> Result<()> {
std::fs::create_dir_all(base_path)?; fn progress(p: rusqlite::backup::Progress) {
let path = Engine::Sqlite use std::sync::atomic::{AtomicU64, Ordering};
.db_path(&base_path) use std::time::{SystemTime, UNIX_EPOCH};
.into_os_string()
.into_string()
.map_err(|_| Error("invalid sqlite path string".into()))?;
info!("Start sqlite VACUUM INTO `{}`", path); static LAST_LOG_TIME: AtomicU64 = AtomicU64::new(0);
self.db.get()?.execute("VACUUM INTO ?1", params![path])?;
info!("Finished sqlite VACUUM INTO `{}`", path); let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Fix your clock :o")
.as_millis() as u64;
if now >= LAST_LOG_TIME.load(Ordering::Relaxed) + 10 * 1000 {
let percent = (p.pagecount - p.remaining) * 100 / p.pagecount;
info!("Sqlite snapshot progress: {}%", percent);
LAST_LOG_TIME.fetch_max(now, Ordering::Relaxed);
}
}
std::fs::create_dir_all(base_path)?;
let path = Engine::Sqlite.db_path(&base_path);
self.db
.get()?
.backup(rusqlite::DatabaseName::Main, path, Some(progress))?;
Ok(()) Ok(())
} }

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage" name = "garage"
version = "1.3.1" version = "1.2.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -49,6 +49,8 @@ structopt.workspace = true
git-version.workspace = true git-version.workspace = true
serde.workspace = true serde.workspace = true
serde_json.workspace = true
serde_bytes.workspace = true
futures.workspace = true futures.workspace = true
tokio.workspace = true tokio.workspace = true

View file

@ -71,6 +71,10 @@ pub enum NodeOperation {
/// Connect to Garage node that is currently isolated from the system /// Connect to Garage node that is currently isolated from the system
#[structopt(name = "connect", version = garage_version())] #[structopt(name = "connect", version = garage_version())]
Connect(ConnectNodeOpt), Connect(ConnectNodeOpt),
/// Dump the content of a metadata table as JSON lines
#[structopt(name = "dump", version = garage_version())]
Dump(DumpNodeOpt),
} }
#[derive(StructOpt, Debug)] #[derive(StructOpt, Debug)]
@ -88,6 +92,12 @@ pub struct ConnectNodeOpt {
pub(crate) node: String, pub(crate) node: String,
} }
#[derive(StructOpt, Debug)]
pub struct DumpNodeOpt {
/// Name of the data table to dump
pub(crate) what: String,
}
#[derive(StructOpt, Debug)] #[derive(StructOpt, Debug)]
pub enum LayoutOperation { pub enum LayoutOperation {
/// Assign role to Garage node /// Assign role to Garage node
@ -466,10 +476,6 @@ pub enum RepairWhat {
/// Repair (resync/rebalance) the set of stored blocks in the cluster /// Repair (resync/rebalance) the set of stored blocks in the cluster
#[structopt(name = "blocks", version = garage_version())] #[structopt(name = "blocks", version = garage_version())]
Blocks, Blocks,
/// Clear the block resync queue. The list of blocks in errored state
/// is cleared as well. You MUST run `garage repair blocks` after invoking this.
#[structopt(name = "clear-resync-queue", version = garage_version())]
ClearResyncQueue,
/// Repropagate object deletions to the version table /// Repropagate object deletions to the version table
#[structopt(name = "versions", version = garage_version())] #[structopt(name = "versions", version = garage_version())]
Versions, Versions,

View file

@ -145,11 +145,14 @@ async fn main() {
let res = match opt.cmd { let res = match opt.cmd {
Command::Server => server::run_server(opt.config_file, opt.secrets).await, Command::Server => server::run_server(opt.config_file, opt.secrets).await,
Command::OfflineRepair(repair_opt) => { Command::OfflineRepair(repair_opt) => {
repair::offline::offline_repair(opt.config_file, opt.secrets, repair_opt).await repair::offline::offline_repair(opt.config_file, opt.secrets, repair_opt)
} }
Command::ConvertDb(conv_opt) => { Command::ConvertDb(conv_opt) => {
cli::convert_db::do_conversion(conv_opt).map_err(From::from) cli::convert_db::do_conversion(conv_opt).map_err(From::from)
} }
Command::Node(NodeOperation::Dump(dump_opt)) => {
repair::offline::dump(opt.config_file, opt.secrets, dump_opt)
}
Command::Node(NodeOperation::NodeId(node_id_opt)) => { Command::Node(NodeOperation::NodeId(node_id_opt)) => {
node_id_command(opt.config_file, node_id_opt.quiet) node_id_command(opt.config_file, node_id_opt.quiet)
} }

View file

@ -1,14 +1,18 @@
use std::io::Write;
use std::path::PathBuf; use std::path::PathBuf;
use serde::Serialize;
use garage_util::config::*; use garage_util::config::*;
use garage_util::error::*; use garage_util::error::*;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_table::{replication::TableReplication, *};
use crate::cli::structs::*; use crate::cli::structs::*;
use crate::secrets::{fill_secrets, Secrets}; use crate::secrets::{fill_secrets, Secrets};
pub async fn offline_repair( pub fn offline_repair(
config_file: PathBuf, config_file: PathBuf,
secrets: Secrets, secrets: Secrets,
opt: OfflineRepairOpt, opt: OfflineRepairOpt,
@ -45,3 +49,100 @@ pub async fn offline_repair(
Ok(()) Ok(())
} }
pub fn dump(config_file: PathBuf, secrets: Secrets, opt: DumpNodeOpt) -> Result<(), Error> {
let what = opt.what.as_str();
info!("Loading configuration...");
let config = fill_secrets(read_config(config_file)?, secrets)?;
info!("Initializing Garage main data store...");
let garage = Garage::new(config)?;
match what {
"bucket" | "buckets" => dump_table_inner(&garage.bucket_table),
"bucket_alias" | "bucket_aliases" => dump_table_inner(&garage.bucket_alias_table),
"key" | "keys" => dump_table_inner(&garage.key_table),
"object" | "objects" => dump_table_inner(&garage.object_table),
"object_counter" | "object_counters" => Err(Error::Message(
"object_counters cannot be JSON-serialized".into(),
)),
"mpu" => dump_table_inner(&garage.mpu_table),
"mpu_counter" | "mpu_counters" => Err(Error::Message(
"mpu_counters cannot be JSON-serialized".into(),
)),
"version" | "versions" => dump_table_inner(&garage.version_table),
"block_ref" | "block_refs" => dump_table_inner(&garage.block_ref_table),
#[cfg(feature = "k2v")]
"k2v_item" | "k2v_items" => dump_table_inner(&garage.k2v.item_table),
//#[cfg(feature = "k2v")]
"k2v_counter" | "k2v_counters" => Err(Error::Message(
"k2v_counters cannot be JSON-serialized".into(),
)),
other => {
let mut stdout = std::io::stdout().lock();
match other {
"cluster_layout" => Err(Error::Message(
"cluster_layout cannot be JSON-serialized".into(),
)),
_ => Err(Error::Message(format!("invalid thing to dump: {}", what))),
}
}
}
}
#[derive(Serialize)]
struct DumpEntry<'a, T: Serialize> {
#[serde(with = "serde_bytes")]
partition_key: &'a [u8],
#[serde(with = "serde_bytes")]
sort_key: &'a [u8],
entry: &'a T,
}
fn dump_table_inner<F, R>(table: &Table<F, R>) -> Result<(), Error>
where
F: TableSchema,
R: TableReplication,
{
eprintln!("Dumping table {}...", F::TABLE_NAME);
let mut stdout = std::io::stdout().lock();
for line in table.data.store.iter()? {
let (_k, v) = line?;
let v_dec = table.data.decode_entry(&v)?;
let pkh = v_dec.partition_key().hash();
let dump_entry = DumpEntry {
partition_key: pkh.as_slice(),
sort_key: v_dec.sort_key().sort_key(),
entry: &v_dec,
};
dump_line(&mut stdout, dump_entry)?;
}
stdout.flush()?;
Ok(())
}
fn dump_line<T: Serialize>(
mut stdout: &mut std::io::StdoutLock<'static>,
dump_entry: T,
) -> Result<(), Error> {
let mut ser = serde_json::ser::Serializer::with_formatter(&mut stdout, DumpFormatter);
dump_entry.serialize(&mut ser)?;
stdout.write_all(b"\n")?;
Ok(())
}
struct DumpFormatter;
impl serde_json::ser::Formatter for DumpFormatter {
fn write_byte_array<W>(&mut self, writer: &mut W, value: &[u8]) -> std::io::Result<()>
where
W: ?Sized + std::io::Write,
{
writer.write_all(b"\"")?;
writer.write_all(hex::encode(&value).as_bytes())?;
writer.write_all(b"\"")
}
}

View file

@ -92,11 +92,6 @@ pub async fn launch_online_repair(
info!("Repairing bucket aliases (foreground)"); info!("Repairing bucket aliases (foreground)");
garage.locked_helper().await.repair_aliases().await?; garage.locked_helper().await.repair_aliases().await?;
} }
RepairWhat::ClearResyncQueue => {
let garage = garage.clone();
tokio::task::spawn_blocking(move || garage.block_manager.resync.clear_resync_queue())
.await??
}
} }
Ok(()) Ok(())
} }

View file

@ -198,7 +198,6 @@ async fn test_precondition() {
); );
} }
let older_date = DateTime::from_secs_f64(last_modified.as_secs_f64() - 10.0); let older_date = DateTime::from_secs_f64(last_modified.as_secs_f64() - 10.0);
let same_date = DateTime::from_secs_f64(last_modified.as_secs_f64());
let newer_date = DateTime::from_secs_f64(last_modified.as_secs_f64() + 10.0); let newer_date = DateTime::from_secs_f64(last_modified.as_secs_f64() + 10.0);
{ {
let err = ctx let err = ctx
@ -213,18 +212,6 @@ async fn test_precondition() {
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304) matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
); );
let err = ctx
.client
.get_object()
.bucket(&bucket)
.key(STD_KEY)
.if_modified_since(same_date)
.send()
.await;
assert!(
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
);
let o = ctx let o = ctx
.client .client
.get_object() .get_object()
@ -249,17 +236,6 @@ async fn test_precondition() {
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 412) matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 412)
); );
let o = ctx
.client
.get_object()
.bucket(&bucket)
.key(STD_KEY)
.if_unmodified_since(same_date)
.send()
.await
.unwrap();
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
let o = ctx let o = ctx
.client .client
.get_object() .get_object()

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_model" name = "garage_model"
version = "1.3.1" version = "1.2.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -24,7 +24,7 @@ garage_net.workspace = true
async-trait.workspace = true async-trait.workspace = true
blake2.workspace = true blake2.workspace = true
chrono.workspace = true chrono.workspace = true
thiserror.workspace = true err-derive.workspace = true
hex.workspace = true hex.workspace = true
http.workspace = true http.workspace = true
base64.workspace = true base64.workspace = true

View file

@ -315,15 +315,15 @@ impl Garage {
Ok(()) Ok(())
} }
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper<'_> { pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
helper::bucket::BucketHelper(self) helper::bucket::BucketHelper(self)
} }
pub fn key_helper(&self) -> helper::key::KeyHelper<'_> { pub fn key_helper(&self) -> helper::key::KeyHelper {
helper::key::KeyHelper(self) helper::key::KeyHelper(self)
} }
pub async fn locked_helper(&self) -> helper::locked::LockedHelper<'_> { pub async fn locked_helper(&self) -> helper::locked::LockedHelper {
let lock = self.bucket_lock.lock().await; let lock = self.bucket_lock.lock().await;
helper::locked::LockedHelper(self, Some(lock)) helper::locked::LockedHelper(self, Some(lock))
} }

View file

@ -1,24 +1,24 @@
use err_derive::Error;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use thiserror::Error;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
#[derive(Debug, Error, Serialize, Deserialize)] #[derive(Debug, Error, Serialize, Deserialize)]
pub enum Error { pub enum Error {
#[error("Internal error: {0}")] #[error(display = "Internal error: {}", _0)]
Internal(#[from] GarageError), Internal(#[error(source)] GarageError),
#[error("Bad request: {0}")] #[error(display = "Bad request: {}", _0)]
BadRequest(String), BadRequest(String),
/// Bucket name is not valid according to AWS S3 specs /// Bucket name is not valid according to AWS S3 specs
#[error("Invalid bucket name: {0}")] #[error(display = "Invalid bucket name: {}", _0)]
InvalidBucketName(String), InvalidBucketName(String),
#[error("Access key not found: {0}")] #[error(display = "Access key not found: {}", _0)]
NoSuchAccessKey(String), NoSuchAccessKey(String),
#[error("Bucket not found: {0}")] #[error(display = "Bucket not found: {}", _0)]
NoSuchBucket(String), NoSuchBucket(String),
} }

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_net" name = "garage_net"
version = "1.3.1" version = "1.2.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -30,7 +30,7 @@ rand.workspace = true
log.workspace = true log.workspace = true
arc-swap.workspace = true arc-swap.workspace = true
thiserror.workspace = true err-derive.workspace = true
bytes.workspace = true bytes.workspace = true
cfg-if.workspace = true cfg-if.workspace = true

View file

@ -159,7 +159,7 @@ where
pub(crate) type DynEndpoint = Box<dyn GenericEndpoint + Send + Sync>; pub(crate) type DynEndpoint = Box<dyn GenericEndpoint + Send + Sync>;
pub(crate) trait GenericEndpoint { pub(crate) trait GenericEndpoint {
fn handle(&self, req_enc: ReqEnc, from: NodeID) -> BoxFuture<'_, Result<RespEnc, Error>>; fn handle(&self, req_enc: ReqEnc, from: NodeID) -> BoxFuture<Result<RespEnc, Error>>;
fn drop_handler(&self); fn drop_handler(&self);
fn clone_endpoint(&self) -> DynEndpoint; fn clone_endpoint(&self) -> DynEndpoint;
} }
@ -175,7 +175,7 @@ where
M: Message, M: Message,
H: StreamingEndpointHandler<M> + 'static, H: StreamingEndpointHandler<M> + 'static,
{ {
fn handle(&self, req_enc: ReqEnc, from: NodeID) -> BoxFuture<'_, Result<RespEnc, Error>> { fn handle(&self, req_enc: ReqEnc, from: NodeID) -> BoxFuture<Result<RespEnc, Error>> {
async move { async move {
match self.0.handler.load_full() { match self.0.handler.load_full() {
None => Err(Error::NoHandler), None => Err(Error::NoHandler),

View file

@ -1,49 +1,49 @@
use std::io; use std::io;
use err_derive::Error;
use log::error; use log::error;
use thiserror::Error;
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum Error { pub enum Error {
#[error("IO error: {0}")] #[error(display = "IO error: {}", _0)]
Io(#[from] io::Error), Io(#[error(source)] io::Error),
#[error("Messagepack encode error: {0}")] #[error(display = "Messagepack encode error: {}", _0)]
RMPEncode(#[from] rmp_serde::encode::Error), RMPEncode(#[error(source)] rmp_serde::encode::Error),
#[error("Messagepack decode error: {0}")] #[error(display = "Messagepack decode error: {}", _0)]
RMPDecode(#[from] rmp_serde::decode::Error), RMPDecode(#[error(source)] rmp_serde::decode::Error),
#[error("Tokio join error: {0}")] #[error(display = "Tokio join error: {}", _0)]
TokioJoin(#[from] tokio::task::JoinError), TokioJoin(#[error(source)] tokio::task::JoinError),
#[error("oneshot receive error: {0}")] #[error(display = "oneshot receive error: {}", _0)]
OneshotRecv(#[from] tokio::sync::oneshot::error::RecvError), OneshotRecv(#[error(source)] tokio::sync::oneshot::error::RecvError),
#[error("Handshake error: {0}")] #[error(display = "Handshake error: {}", _0)]
Handshake(#[from] kuska_handshake::async_std::Error), Handshake(#[error(source)] kuska_handshake::async_std::Error),
#[error("UTF8 error: {0}")] #[error(display = "UTF8 error: {}", _0)]
UTF8(#[from] std::string::FromUtf8Error), UTF8(#[error(source)] std::string::FromUtf8Error),
#[error("Framing protocol error")] #[error(display = "Framing protocol error")]
Framing, Framing,
#[error("Remote error ({0:?}): {1}")] #[error(display = "Remote error ({:?}): {}", _0, _1)]
Remote(io::ErrorKind, String), Remote(io::ErrorKind, String),
#[error("Request ID collision")] #[error(display = "Request ID collision")]
IdCollision, IdCollision,
#[error("{0}")] #[error(display = "{}", _0)]
Message(String), Message(String),
#[error("No handler / shutting down")] #[error(display = "No handler / shutting down")]
NoHandler, NoHandler,
#[error("Connection closed")] #[error(display = "Connection closed")]
ConnectionClosed, ConnectionClosed,
#[error("Version mismatch: {0}")] #[error(display = "Version mismatch: {}", _0)]
VersionMismatch(String), VersionMismatch(String),
} }

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_rpc" name = "garage_rpc"
version = "1.3.1" version = "1.2.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -33,7 +33,7 @@ async-trait.workspace = true
serde.workspace = true serde.workspace = true
serde_bytes.workspace = true serde_bytes.workspace = true
serde_json.workspace = true serde_json.workspace = true
thiserror = { workspace = true, optional = true } err-derive = { workspace = true, optional = true }
# newer version requires rust edition 2021 # newer version requires rust edition 2021
kube = { workspace = true, optional = true } kube = { workspace = true, optional = true }
@ -49,5 +49,5 @@ opentelemetry.workspace = true
[features] [features]
kubernetes-discovery = [ "kube", "k8s-openapi", "schemars" ] kubernetes-discovery = [ "kube", "k8s-openapi", "schemars" ]
consul-discovery = [ "reqwest", "thiserror" ] consul-discovery = [ "reqwest", "err-derive" ]
system-libs = [ "sodiumoxide/use-pkg-config" ] system-libs = [ "sodiumoxide/use-pkg-config" ]

View file

@ -3,8 +3,8 @@ use std::fs::File;
use std::io::Read; use std::io::Read;
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
use err_derive::Error;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use thiserror::Error;
use garage_net::NodeID; use garage_net::NodeID;
@ -219,12 +219,12 @@ impl ConsulDiscovery {
/// Regroup all Consul discovery errors /// Regroup all Consul discovery errors
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum ConsulError { pub enum ConsulError {
#[error("IO error: {0}")] #[error(display = "IO error: {}", _0)]
Io(#[from] std::io::Error), Io(#[error(source)] std::io::Error),
#[error("HTTP error: {0}")] #[error(display = "HTTP error: {}", _0)]
Reqwest(#[from] reqwest::Error), Reqwest(#[error(source)] reqwest::Error),
#[error("Invalid Consul TLS configuration")] #[error(display = "Invalid Consul TLS configuration")]
InvalidTLSConfig, InvalidTLSConfig,
#[error("Token error: {0}")] #[error(display = "Token error: {}", _0)]
Token(#[from] reqwest::header::InvalidHeaderValue), Token(#[error(source)] reqwest::header::InvalidHeaderValue),
} }

View file

@ -229,11 +229,13 @@ impl LayoutManager {
} }
/// Save cluster layout data to disk /// Save cluster layout data to disk
async fn save_cluster_layout(&self) { async fn save_cluster_layout(&self) -> Result<(), Error> {
let layout = self.layout.read().unwrap().inner().clone(); let layout = self.layout.read().unwrap().inner().clone();
if let Err(e) = self.persist_cluster_layout.save_async(&layout).await { self.persist_cluster_layout
error!("Failed to save cluster_layout: {}", e); .save_async(&layout)
} .await
.expect("Cannot save current cluster layout");
Ok(())
} }
fn broadcast_update(self: &Arc<Self>, rpc: SystemRpc) { fn broadcast_update(self: &Arc<Self>, rpc: SystemRpc) {
@ -311,7 +313,7 @@ impl LayoutManager {
self.change_notify.notify_waiters(); self.change_notify.notify_waiters();
self.broadcast_update(SystemRpc::AdvertiseClusterLayout(new_layout)); self.broadcast_update(SystemRpc::AdvertiseClusterLayout(new_layout));
self.save_cluster_layout().await; self.save_cluster_layout().await?;
} }
Ok(SystemRpc::Ok) Ok(SystemRpc::Ok)
@ -326,7 +328,7 @@ impl LayoutManager {
if let Some(new_trackers) = self.merge_layout_trackers(trackers) { if let Some(new_trackers) = self.merge_layout_trackers(trackers) {
self.change_notify.notify_waiters(); self.change_notify.notify_waiters();
self.broadcast_update(SystemRpc::AdvertiseClusterLayoutTrackers(new_trackers)); self.broadcast_update(SystemRpc::AdvertiseClusterLayoutTrackers(new_trackers));
self.save_cluster_layout().await; self.save_cluster_layout().await?;
} }
Ok(SystemRpc::Ok) Ok(SystemRpc::Ok)

View file

@ -507,7 +507,7 @@ impl LayoutVersion {
g.compute_maximal_flow()?; g.compute_maximal_flow()?;
if g.get_flow_value()? < (NB_PARTITIONS * self.replication_factor) as i64 { if g.get_flow_value()? < (NB_PARTITIONS * self.replication_factor) as i64 {
return Err(Error::Message( return Err(Error::Message(
"The storage capacity of the cluster is too small. It is \ "The storage capacity of he cluster is to small. It is \
impossible to store partitions of size 1." impossible to store partitions of size 1."
.into(), .into(),
)); ));

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_table" name = "garage_table"
version = "1.3.1" version = "1.2.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_util" name = "garage_util"
version = "1.3.1" version = "1.2.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -21,7 +21,7 @@ arc-swap.workspace = true
async-trait.workspace = true async-trait.workspace = true
blake2.workspace = true blake2.workspace = true
bytesize.workspace = true bytesize.workspace = true
thiserror.workspace = true err-derive.workspace = true
hexdump.workspace = true hexdump.workspace = true
xxhash-rust.workspace = true xxhash-rust.workspace = true
hex.workspace = true hex.workspace = true

View file

@ -115,7 +115,6 @@ impl WorkerProcessor {
trace!("{} (TID {}): {:?}", worker.worker.name(), worker.task_id, worker.state); trace!("{} (TID {}): {:?}", worker.worker.name(), worker.task_id, worker.state);
// Save worker info // Save worker info
{
let mut wi = self.worker_info.lock().unwrap(); let mut wi = self.worker_info.lock().unwrap();
match wi.get_mut(&worker.task_id) { match wi.get_mut(&worker.task_id) {
Some(i) => { Some(i) => {
@ -138,16 +137,10 @@ impl WorkerProcessor {
}); });
} }
} }
}
if worker.state == WorkerState::Done { if worker.state == WorkerState::Done {
info!("Worker {} (TID {}) exited", worker.worker.name(), worker.task_id); info!("Worker {} (TID {}) exited", worker.worker.name(), worker.task_id);
} else { } else {
// Yield to the Tokio scheduler between consecutive Busy steps so
// that a worker which never suspends on its own cannot starve other tasks.
if worker.state == WorkerState::Busy {
tokio::task::yield_now().await;
}
workers.push(async move { workers.push(async move {
worker.step().await; worker.step().await;
worker worker

View file

@ -45,11 +45,6 @@ pub struct Config {
)] )]
pub block_size: usize, pub block_size: usize,
/// Maximum number of parallel block writes per PUT request
/// Higher values improve throughput but increase memory usage
/// Default: 3, Recommended: 10-30 for NVMe, 3-10 for HDD
#[serde(default = "default_block_max_concurrent_writes_per_request")]
pub block_max_concurrent_writes_per_request: usize,
/// Number of replicas. Can be any positive integer, but uneven numbers are more favorable. /// Number of replicas. Can be any positive integer, but uneven numbers are more favorable.
/// - 1 for single-node clusters, or to disable replication /// - 1 for single-node clusters, or to disable replication
/// - 3 is the recommended and supported setting. /// - 3 is the recommended and supported setting.
@ -80,10 +75,6 @@ pub struct Config {
)] )]
pub block_ram_buffer_max: usize, pub block_ram_buffer_max: usize,
/// Maximum number of concurrent reads of block files on disk
#[serde(default = "default_block_max_concurrent_reads")]
pub block_max_concurrent_reads: usize,
/// Skip the permission check of secret files. Useful when /// Skip the permission check of secret files. Useful when
/// POSIX ACLs (or more complex chmods) are used. /// POSIX ACLs (or more complex chmods) are used.
#[serde(default)] #[serde(default)]
@ -272,9 +263,6 @@ pub struct KubernetesDiscoveryConfig {
pub skip_crd: bool, pub skip_crd: bool,
} }
pub fn default_block_max_concurrent_writes_per_request() -> usize {
3
}
/// Read and parse configuration /// Read and parse configuration
pub fn read_config(config_file: PathBuf) -> Result<Config, Error> { pub fn read_config(config_file: PathBuf) -> Result<Config, Error> {
let config = std::fs::read_to_string(config_file)?; let config = std::fs::read_to_string(config_file)?;
@ -292,9 +280,6 @@ fn default_block_size() -> usize {
fn default_block_ram_buffer_max() -> usize { fn default_block_ram_buffer_max() -> usize {
256 * 1024 * 1024 256 * 1024 * 1024
} }
fn default_block_max_concurrent_reads() -> usize {
16
}
fn default_consistency_mode() -> String { fn default_consistency_mode() -> String {
"consistent".into() "consistent".into()

View file

@ -2,7 +2,7 @@
use std::fmt; use std::fmt;
use std::io; use std::io;
use thiserror::Error; use err_derive::Error;
use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer}; use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer};
@ -12,61 +12,68 @@ use crate::encode::debug_serialize;
/// Regroup all Garage errors /// Regroup all Garage errors
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum Error { pub enum Error {
#[error("IO error: {0}")] #[error(display = "IO error: {}", _0)]
Io(#[from] io::Error), Io(#[error(source)] io::Error),
#[error("Hyper error: {0}")] #[error(display = "Hyper error: {}", _0)]
Hyper(#[from] hyper::Error), Hyper(#[error(source)] hyper::Error),
#[error("HTTP error: {0}")] #[error(display = "HTTP error: {}", _0)]
Http(#[from] http::Error), Http(#[error(source)] http::Error),
#[error("Invalid HTTP header value: {0}")] #[error(display = "Invalid HTTP header value: {}", _0)]
HttpHeader(#[from] http::header::ToStrError), HttpHeader(#[error(source)] http::header::ToStrError),
#[error("Network error: {0}")] #[error(display = "Network error: {}", _0)]
Net(#[from] garage_net::error::Error), Net(#[error(source)] garage_net::error::Error),
#[error("DB error: {0}")] #[error(display = "DB error: {}", _0)]
Db(#[from] garage_db::Error), Db(#[error(source)] garage_db::Error),
#[error("Messagepack encode error: {0}")] #[error(display = "Messagepack encode error: {}", _0)]
RmpEncode(#[from] rmp_serde::encode::Error), RmpEncode(#[error(source)] rmp_serde::encode::Error),
#[error("Messagepack decode error: {0}")] #[error(display = "Messagepack decode error: {}", _0)]
RmpDecode(#[from] rmp_serde::decode::Error), RmpDecode(#[error(source)] rmp_serde::decode::Error),
#[error("JSON error: {0}")] #[error(display = "JSON error: {}", _0)]
Json(#[from] serde_json::error::Error), Json(#[error(source)] serde_json::error::Error),
#[error("TOML decode error: {0}")] #[error(display = "TOML decode error: {}", _0)]
TomlDecode(#[from] toml::de::Error), TomlDecode(#[error(source)] toml::de::Error),
#[error("Tokio join error: {0}")] #[error(display = "Tokio join error: {}", _0)]
TokioJoin(#[from] tokio::task::JoinError), TokioJoin(#[error(source)] tokio::task::JoinError),
#[error("Tokio semaphore acquire error: {0}")] #[error(display = "Tokio semaphore acquire error: {}", _0)]
TokioSemAcquire(#[from] tokio::sync::AcquireError), TokioSemAcquire(#[error(source)] tokio::sync::AcquireError),
#[error("Tokio broadcast receive error: {0}")] #[error(display = "Tokio broadcast receive error: {}", _0)]
TokioBcastRecv(#[from] tokio::sync::broadcast::error::RecvError), TokioBcastRecv(#[error(source)] tokio::sync::broadcast::error::RecvError),
#[error("Remote error: {0}")] #[error(display = "Remote error: {}", _0)]
RemoteError(String), RemoteError(String),
#[error("Timeout")] #[error(display = "Timeout")]
Timeout, Timeout,
#[error("Could not reach quorum of {0} (sets={1:?}). {2} of {3} request succeeded, others returned errors: {4:?}")] #[error(
display = "Could not reach quorum of {} (sets={:?}). {} of {} request succeeded, others returned errors: {:?}",
_0,
_1,
_2,
_3,
_4
)]
Quorum(usize, Option<usize>, usize, usize, Vec<String>), Quorum(usize, Option<usize>, usize, usize, Vec<String>),
#[error("Unexpected RPC message: {0}")] #[error(display = "Unexpected RPC message: {}", _0)]
UnexpectedRpcMessage(String), UnexpectedRpcMessage(String),
#[error("Corrupt data: does not match hash {0:?}")] #[error(display = "Corrupt data: does not match hash {:?}", _0)]
CorruptData(Hash), CorruptData(Hash),
#[error("Missing block {0:?}: no node returned a valid block")] #[error(display = "Missing block {:?}: no node returned a valid block", _0)]
MissingBlock(Hash), MissingBlock(Hash),
#[error("{0}")] #[error(display = "{}", _0)]
Message(String), Message(String),
} }

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_web" name = "garage_web"
version = "1.3.1" version = "1.2.0"
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"] authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -20,7 +20,7 @@ garage_model.workspace = true
garage_util.workspace = true garage_util.workspace = true
garage_table.workspace = true garage_table.workspace = true
thiserror.workspace = true err-derive.workspace = true
tracing.workspace = true tracing.workspace = true
percent-encoding.workspace = true percent-encoding.workspace = true

View file

@ -1,6 +1,6 @@
use err_derive::Error;
use hyper::header::HeaderValue; use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode}; use hyper::{HeaderMap, StatusCode};
use thiserror::Error;
use garage_api_common::generic_server::ApiError; use garage_api_common::generic_server::ApiError;
@ -8,15 +8,15 @@ use garage_api_common::generic_server::ApiError;
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum Error { pub enum Error {
/// An error received from the API crate /// An error received from the API crate
#[error("API error: {0}")] #[error(display = "API error: {}", _0)]
ApiError(garage_api_s3::error::Error), ApiError(garage_api_s3::error::Error),
/// The file does not exist /// The file does not exist
#[error("Not found")] #[error(display = "Not found")]
NotFound, NotFound,
/// The client sent a request without host, or with unsupported method /// The client sent a request without host, or with unsupported method
#[error("Bad request: {0}")] #[error(display = "Bad request: {}", _0)]
BadRequest(String), BadRequest(String),
} }