mirror of
https://git.deuxfleurs.fr/Deuxfleurs/garage.git
synced 2026-05-14 21:26:53 -04:00
Compare commits
102 commits
v1.99.3-in
...
main-v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b6b18427a5 | ||
|
|
9987166b2b | ||
|
|
b72b090a09 | ||
|
|
8551aefed4 | ||
|
|
47bf5d9fb0 | ||
|
|
5df37dae5e | ||
|
|
44af0bdab3 | ||
|
|
a7d6620e18 | ||
|
|
8eb12755e4 | ||
|
|
c685a2cbaf | ||
|
|
969f42a970 | ||
|
|
424d4f8d4d | ||
|
|
bf5290036f |
||
|
|
4efc8bac07 | ||
|
|
f3dcc39903 | ||
|
|
43e02920c2 | ||
|
|
dcc2fe4ac5 |
||
|
|
e3a5ec6ef6 | ||
|
|
4d124e1c76 | ||
|
|
d769a7be5d | ||
|
|
511cf0c6ec | ||
|
|
95693d45b2 | ||
|
|
ca296477f3 | ||
|
|
ca3b4a050d | ||
|
|
a057ab23ea | ||
|
|
58bc65b9a8 | ||
|
|
ac851d6dee | ||
|
|
eac2aa6fe4 | ||
|
|
1e0201ada2 | ||
|
|
82297371bf | ||
|
|
174f4f01a8 | ||
|
|
1aac7b4875 | ||
|
|
b43c58cbe5 | ||
|
|
9481ac428e | ||
|
|
1c29d04cc5 | ||
|
|
b48a8eaa1f | ||
|
|
42fd8583bd | ||
|
|
236af3a958 | ||
|
|
4b1fdbef55 | ||
|
|
0f1b488be0 | ||
|
|
0bbf63ee0e | ||
|
|
879d941d7b | ||
|
|
d726cf0299 | ||
|
|
0c7aeab6f8 | ||
|
|
5687fc0375 | ||
|
|
97f1e9ab52 | ||
|
|
60b1d78b56 | ||
|
|
4c895a7186 | ||
|
|
c3b5cbf212 | ||
|
|
57a467b5c0 | ||
|
|
6cf6db5c61 | ||
|
|
d5a57e3e13 | ||
|
|
5cf354acb4 | ||
|
|
2b007ddea3 | ||
|
|
c8599a8636 | ||
|
|
0b901bf291 | ||
|
|
c8c20d6f47 | ||
|
|
e5db610e4c | ||
|
|
65c6f8adea | ||
|
|
54b9bf02a3 | ||
|
|
469153233f | ||
|
|
90bba5889a | ||
|
|
a64b567d43 | ||
|
|
6ea86db8cd | ||
|
|
aa69c06f2b | ||
|
|
a6c6c44310 | ||
|
|
96d7713915 | ||
|
|
d64498c3d3 | ||
|
|
b340599e68 | ||
|
|
5448012b27 | ||
|
|
ce34d11a65 | ||
|
|
8cb7623ebd | ||
|
|
5469c95877 | ||
|
|
f930c6f643 | ||
|
|
afcb22bf16 | ||
|
|
cc29a40d51 | ||
|
|
0f3f180c3e | ||
|
|
70cf6004ae | ||
|
|
c7571ff89b | ||
|
|
1b42919bf7 | ||
|
|
3f4ab3a4a3 | ||
|
|
3a4afc04a9 | ||
|
|
fbf03e9378 | ||
|
|
9eb07d4c7b | ||
|
|
85ee4f5d8c | ||
|
|
328072d122 | ||
|
|
26bc807905 | ||
|
|
a9f5f242b2 | ||
|
|
ae98abca5c | ||
|
|
adfa44ad70 | ||
|
|
47143b88ad | ||
|
|
8843aa92fa |
||
|
|
b601b3e46d | ||
|
|
a19d2f16e2 | ||
|
|
fc8fc60f6d | ||
|
|
77079a1498 | ||
|
|
2a4f729b57 | ||
|
|
1b042e379e |
||
|
|
ffbce0f689 | ||
|
|
37e5621dde | ||
|
|
6529ff379a | ||
|
|
8b35a946d9 |
89 changed files with 2821 additions and 1317 deletions
|
|
@ -1,3 +1,6 @@
|
|||
labels:
|
||||
nix: "enabled"
|
||||
|
||||
when:
|
||||
event:
|
||||
- push
|
||||
|
|
@ -9,27 +12,32 @@ when:
|
|||
|
||||
steps:
|
||||
- name: check formatting
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-shell --attr devShell --run "cargo fmt -- --check"
|
||||
- nix-build -j4 --attr flakePackages.fmt
|
||||
|
||||
- name: build
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.dev
|
||||
|
||||
- name: unit + func tests (lmdb)
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.tests-lmdb
|
||||
|
||||
- name: unit + func tests (sqlite)
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.tests-sqlite
|
||||
|
||||
- name: unit + func tests (fjall)
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.tests-fjall
|
||||
|
||||
- name: integration tests
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.dev
|
||||
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,6 @@
|
|||
labels:
|
||||
nix: "enabled"
|
||||
|
||||
when:
|
||||
event:
|
||||
- deployment
|
||||
|
|
@ -8,7 +11,7 @@ depends_on:
|
|||
|
||||
steps:
|
||||
- name: refresh-index
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
environment:
|
||||
AWS_ACCESS_KEY_ID:
|
||||
from_secret: garagehq_aws_access_key_id
|
||||
|
|
@ -19,7 +22,7 @@ steps:
|
|||
- nix-shell --attr ci --run "refresh_index"
|
||||
|
||||
- name: multiarch-docker
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
environment:
|
||||
DOCKER_AUTH:
|
||||
from_secret: docker_auth
|
||||
|
|
|
|||
|
|
@ -1,3 +1,6 @@
|
|||
labels:
|
||||
nix: "enabled"
|
||||
|
||||
when:
|
||||
event:
|
||||
- deployment
|
||||
|
|
@ -16,17 +19,17 @@ matrix:
|
|||
|
||||
steps:
|
||||
- name: build
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-build --attr releasePackages.${ARCH} --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||
|
||||
- name: check is static binary
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-shell --attr ci --run "./script/not-dynamic.sh result/bin/garage"
|
||||
|
||||
- name: integration tests
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
||||
when:
|
||||
|
|
@ -36,7 +39,7 @@ steps:
|
|||
ARCH: i386
|
||||
|
||||
- name: upgrade tests
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
|
||||
when:
|
||||
|
|
@ -44,7 +47,7 @@ steps:
|
|||
ARCH: amd64
|
||||
|
||||
- name: push static binary
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
environment:
|
||||
TARGET: "${TARGET}"
|
||||
AWS_ACCESS_KEY_ID:
|
||||
|
|
@ -55,7 +58,7 @@ steps:
|
|||
- nix-shell --attr ci --run "to_s3"
|
||||
|
||||
- name: docker build and publish
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
environment:
|
||||
DOCKER_PLATFORM: "linux/${ARCH}"
|
||||
CONTAINER_NAME: "dxflrs/${ARCH}_garage"
|
||||
|
|
|
|||
1928
Cargo.lock
generated
1928
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
46
Cargo.toml
46
Cargo.toml
|
|
@ -24,18 +24,18 @@ default-members = ["src/garage"]
|
|||
|
||||
# Internal Garage crates
|
||||
format_table = { version = "0.1.1", path = "src/format-table" }
|
||||
garage_api_common = { version = "1.1.0", path = "src/api/common" }
|
||||
garage_api_admin = { version = "1.1.0", path = "src/api/admin" }
|
||||
garage_api_s3 = { version = "1.1.0", path = "src/api/s3" }
|
||||
garage_api_k2v = { version = "1.1.0", path = "src/api/k2v" }
|
||||
garage_block = { version = "1.1.0", path = "src/block" }
|
||||
garage_db = { version = "1.1.0", path = "src/db", default-features = false }
|
||||
garage_model = { version = "1.1.0", path = "src/model", default-features = false }
|
||||
garage_net = { version = "1.1.0", path = "src/net" }
|
||||
garage_rpc = { version = "1.1.0", path = "src/rpc" }
|
||||
garage_table = { version = "1.1.0", path = "src/table" }
|
||||
garage_util = { version = "1.1.0", path = "src/util" }
|
||||
garage_web = { version = "1.1.0", path = "src/web" }
|
||||
garage_api_common = { version = "1.3.1", path = "src/api/common" }
|
||||
garage_api_admin = { version = "1.3.1", path = "src/api/admin" }
|
||||
garage_api_s3 = { version = "1.3.1", path = "src/api/s3" }
|
||||
garage_api_k2v = { version = "1.3.1", path = "src/api/k2v" }
|
||||
garage_block = { version = "1.3.1", path = "src/block" }
|
||||
garage_db = { version = "1.3.1", path = "src/db", default-features = false }
|
||||
garage_model = { version = "1.3.1", path = "src/model", default-features = false }
|
||||
garage_net = { version = "1.3.1", path = "src/net" }
|
||||
garage_rpc = { version = "1.3.1", path = "src/rpc" }
|
||||
garage_table = { version = "1.3.1", path = "src/table" }
|
||||
garage_util = { version = "1.3.1", path = "src/util" }
|
||||
garage_web = { version = "1.3.1", path = "src/web" }
|
||||
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
||||
|
||||
# External crates from crates.io
|
||||
|
|
@ -52,7 +52,6 @@ chrono = "0.4"
|
|||
crc32fast = "1.4"
|
||||
crc32c = "0.6"
|
||||
crypto-common = "0.1"
|
||||
err-derive = "0.3"
|
||||
gethostname = "0.4"
|
||||
git-version = "0.3.4"
|
||||
hex = "0.4"
|
||||
|
|
@ -65,6 +64,7 @@ md-5 = "0.10"
|
|||
mktemp = "0.5"
|
||||
nix = { version = "0.29", default-features = false, features = ["fs"] }
|
||||
nom = "7.1"
|
||||
parking_lot = "0.12"
|
||||
parse_duration = "2.1"
|
||||
pin-project = "1.0.12"
|
||||
pnet_datalink = "0.34"
|
||||
|
|
@ -83,12 +83,14 @@ pretty_env_logger = "0.5"
|
|||
structopt = { version = "0.3", default-features = false }
|
||||
syslog-tracing = "0.3"
|
||||
tracing = "0.1"
|
||||
tracing-journald = "0.3.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
heed = { version = "0.11", default-features = false, features = ["lmdb"] }
|
||||
rusqlite = "0.31.0"
|
||||
rusqlite = "0.37"
|
||||
r2d2 = "0.8"
|
||||
r2d2_sqlite = "0.24"
|
||||
r2d2_sqlite = "0.31"
|
||||
fjall = "2.4"
|
||||
|
||||
async-compression = { version = "0.4", features = ["tokio", "zstd"] }
|
||||
zstd = { version = "0.13", default-features = false }
|
||||
|
|
@ -134,7 +136,7 @@ prometheus = "0.13"
|
|||
aws-sigv4 = { version = "1.1", default-features = false }
|
||||
hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "http2", "ring", "rustls-native-certs"] }
|
||||
log = "0.4"
|
||||
thiserror = "1.0"
|
||||
thiserror = "2.0"
|
||||
|
||||
# ---- used only as build / dev dependencies ----
|
||||
assert-json-diff = "2.0"
|
||||
|
|
@ -144,12 +146,8 @@ aws-smithy-runtime = { version = "1.8", default-features = false, features = ["t
|
|||
aws-sdk-config = { version = "1.62", default-features = false }
|
||||
aws-sdk-s3 = { version = "1.79", default-features = false, features = ["rt-tokio"] }
|
||||
|
||||
[profile.dev]
|
||||
#lto = "thin" # disabled for now, adds 2-4 min to each CI build
|
||||
lto = "off"
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
opt-level = "s"
|
||||
strip = true
|
||||
lto = "thin"
|
||||
codegen-units = 16
|
||||
opt-level = 3
|
||||
strip = "debuginfo"
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ In this section, we cover the following web applications:
|
|||
| [Mastodon](#mastodon) | ✅ | Natively supported |
|
||||
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
|
||||
| [ejabberd](#ejabberd) | ✅ | `mod_s3_upload` |
|
||||
| [Pixelfed](#pixelfed) | ❓ | Not yet tested |
|
||||
| [Pixelfed](#pixelfed) | ✅ | Natively supported |
|
||||
| [Pleroma](#pleroma) | ❓ | Not yet tested |
|
||||
| [Lemmy](#lemmy) | ✅ | Supported with pict-rs |
|
||||
| [Funkwhale](#funkwhale) | ❓ | Not yet tested |
|
||||
|
|
@ -191,10 +191,10 @@ garage key create peertube-key
|
|||
|
||||
Keep the Key ID and the Secret key in a pad, they will be needed later.
|
||||
|
||||
We need two buckets, one for normal videos (named peertube-video) and one for webtorrent videos (named peertube-playlist).
|
||||
We need two buckets, one for normal videos (named peertube-videos) and one for webtorrent videos (named peertube-playlists).
|
||||
```bash
|
||||
garage bucket create peertube-videos
|
||||
garage bucket create peertube-playlist
|
||||
garage bucket create peertube-playlists
|
||||
```
|
||||
|
||||
Now we allow our key to read and write on these buckets:
|
||||
|
|
@ -253,7 +253,7 @@ object_storage:
|
|||
proxify_private_files: false
|
||||
|
||||
streaming_playlists:
|
||||
bucket_name: 'peertube-playlist'
|
||||
bucket_name: 'peertube-playlists'
|
||||
|
||||
# Keep it empty for our example
|
||||
prefix: ''
|
||||
|
|
|
|||
|
|
@ -161,3 +161,49 @@ kopia repository validate-provider
|
|||
|
||||
You can then run all the standard kopia commands: `kopia snapshot create`, `kopia mount`...
|
||||
Everything should work out-of-the-box.
|
||||
|
||||
## Plakar
|
||||
|
||||
Create your key and bucket on Garage server:
|
||||
|
||||
```bash
|
||||
garage key create my-plakar-key
|
||||
garage bucket create plakar-backups
|
||||
garage bucket allow plakar-backups --read --write --key my-plakar-key
|
||||
…
|
||||
```
|
||||
|
||||
On Plakar server, add your Garage as a storage location:
|
||||
```bash
|
||||
plakar store add garageS3 s3://my-garage.tld/plakar-backups \
|
||||
region=garage # Or as you've specified in garage.toml \
|
||||
access_key=<Key ID from "garage key info my-plakar-key"> \
|
||||
secret_access_key=<Secret key from "garage key info my-plakar-key">
|
||||
```
|
||||
|
||||
Then create the repository.
|
||||
```bash
|
||||
plakar at @garageS3 create -plaintext # Unencrypted
|
||||
# or
|
||||
plakar at @garageS3 create #encrypted
|
||||
```
|
||||
|
||||
If you encrypt your backups (Plakar default), you will need to define a strong passphrase. Do not forget to save your password safely. It will be needed to decrypt your backups.
|
||||
|
||||
|
||||
After the repository has been created, check that everything works as expected (that might give an empty result as no file has been added yet, but no error message):
|
||||
```bash
|
||||
plakar at @garageS3 check
|
||||
```
|
||||
|
||||
Now that everything is configure, you can use Garage as your backups storage. For instance sync it with a local backup storage:
|
||||
```bash
|
||||
$ plakar at ~/backups sync to @garageS3
|
||||
```
|
||||
|
||||
Or list the S3 storage content:
|
||||
```bash
|
||||
$ plakar at @garageS3 ls
|
||||
```
|
||||
|
||||
More information in Plakar documentation: https://www.plakar.io/docs/main/quickstart/
|
||||
|
|
|
|||
|
|
@ -8,18 +8,18 @@ have published Ansible roles. We list them and compare them below.
|
|||
|
||||
## Comparison of Ansible roles
|
||||
|
||||
| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) |
|
||||
|------------------------------------|---------------------------------------------|---------------------------------------------------------------|
|
||||
| **Runtime** | Systemd | Docker |
|
||||
| **Target OS** | Any Linux | Any Linux |
|
||||
| **Architecture** | amd64, arm64, i686 | amd64, arm64 |
|
||||
| **Additional software** | None | Traefik |
|
||||
| **Automatic node connection** | ❌ | ✅ |
|
||||
| **Layout management** | ❌ | ✅ |
|
||||
| **Manage buckets & keys** | ❌ | ✅ (basic) |
|
||||
| **Allow custom Garage config** | ✅ | ❌ |
|
||||
| **Facilitate Garage upgrades** | ✅ | ❌ |
|
||||
| **Multiple instances on one host** | ✅ | ✅ |
|
||||
| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | [eddster ansible-role-garage](#eddster-ansible-role-garage) |
|
||||
|------------------------------------|---------------------------------------------|---------------------------------------------------------------|---------------------------------|
|
||||
| **Runtime** | Systemd | Docker | Systemd |
|
||||
| **Target OS** | Any Linux | Any Linux | Any Linux |
|
||||
| **Architecture** | amd64, arm64, i686 | amd64, arm64 | arm64, arm, 386, amd64 |
|
||||
| **Additional software** | None | Traefik | Ngnix and Keepalived (optional) |
|
||||
| **Automatic node connection** | ❌ | ✅ | ✅ |
|
||||
| **Layout management** | ❌ | ✅ | ✅ |
|
||||
| **Manage buckets & keys** | ❌ | ✅ (basic) | ✅ |
|
||||
| **Allow custom Garage config** | ✅ | ❌ | ❌ |
|
||||
| **Facilitate Garage upgrades** | ✅ | ❌ | ✅ |
|
||||
| **Multiple instances on one host** | ✅ | ✅ | ❌ |
|
||||
|
||||
|
||||
## zorun/ansible-role-garage
|
||||
|
|
@ -49,3 +49,15 @@ structured DNS names, etc).
|
|||
|
||||
As a result, this role makes it easier to start with Garage on Ansible,
|
||||
but is less flexible.
|
||||
|
||||
## eddster2309/ansible-role-garage
|
||||
|
||||
[Source code](https://github.com/eddster2309/ansible-role-garage), [Ansible galaxy](https://galaxy.ansible.com/ui/standalone/roles/eddster2309/garage/)
|
||||
|
||||
This role is a opinionated but customisable role using the official Garage
|
||||
static binaries and only requires Systemd. As such it should work on any
|
||||
Linux based host. It includes all the nesscary configuration to
|
||||
automatically setup a clustered Garage deployment. Most Garage
|
||||
configuration options are exposed through Ansible variables so while you
|
||||
can't provide a custom config you can get very close. It can optionally
|
||||
installed a HA nginx deployment with Keepalived.
|
||||
|
|
|
|||
|
|
@ -15,9 +15,10 @@ Alpine Linux repositories (available since v3.17):
|
|||
apk add garage
|
||||
```
|
||||
|
||||
The default configuration file is installed to `/etc/garage.toml`. You can run
|
||||
Garage using: `rc-service garage start`. If you don't specify `rpc_secret`, it
|
||||
will be automatically replaced with a random string on the first start.
|
||||
The default configuration file is installed to `/etc/garage/garage.toml`. You can run
|
||||
Garage using: `rc-service garage start`.
|
||||
|
||||
If you don't specify `rpc_secret`, it will be automatically replaced with a random string on the first start.
|
||||
|
||||
Please note that this package is built without Consul discovery, Kubernetes
|
||||
discovery, OpenTelemetry exporter, and K2V features (K2V will be enabled once
|
||||
|
|
@ -26,7 +27,7 @@ it's stable).
|
|||
|
||||
## Arch Linux
|
||||
|
||||
Garage is available in the [AUR](https://aur.archlinux.org/packages/garage).
|
||||
Garage is available in the official repositories under [extra](https://archlinux.org/packages/extra/x86_64/garage).
|
||||
|
||||
## FreeBSD
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ Firstly clone the repository:
|
|||
|
||||
```bash
|
||||
git clone https://git.deuxfleurs.fr/Deuxfleurs/garage
|
||||
cd garage/scripts/helm
|
||||
cd garage/script/helm
|
||||
```
|
||||
|
||||
Deploy with default options:
|
||||
|
|
|
|||
|
|
@ -96,14 +96,14 @@ to store 2 TB of data in total.
|
|||
## Get a Docker image
|
||||
|
||||
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||
We encourage you to use a fixed tag (eg. `v1.1.0`) and not the `latest` tag.
|
||||
For this example, we will use the latest published version at the time of the writing which is `v1.1.0` but it's up to you
|
||||
We encourage you to use a fixed tag (eg. `v1.3.0`) and not the `latest` tag.
|
||||
For this example, we will use the latest published version at the time of the writing which is `v1.3.0` but it's up to you
|
||||
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
sudo docker pull dxflrs/garage:v1.1.0
|
||||
sudo docker pull dxflrs/garage:v1.3.0
|
||||
```
|
||||
|
||||
## Deploying and configuring Garage
|
||||
|
|
@ -171,7 +171,7 @@ docker run \
|
|||
-v /etc/garage.toml:/etc/garage.toml \
|
||||
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
||||
-v /var/lib/garage/data:/var/lib/garage/data \
|
||||
dxflrs/garage:v1.1.0
|
||||
dxflrs/garage:v1.3.0
|
||||
```
|
||||
|
||||
With this command line, Garage should be started automatically at each boot.
|
||||
|
|
@ -185,7 +185,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
|
|||
version: "3"
|
||||
services:
|
||||
garage:
|
||||
image: dxflrs/garage:v1.1.0
|
||||
image: dxflrs/garage:v1.3.0
|
||||
network_mode: "host"
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ docker run \
|
|||
-v /path/to/garage.toml:/etc/garage.toml \
|
||||
-v /path/to/garage/meta:/var/lib/garage/meta \
|
||||
-v /path/to/garage/data:/var/lib/garage/data \
|
||||
dxflrs/garage:v1.1.0
|
||||
dxflrs/garage:v1.3.0
|
||||
```
|
||||
|
||||
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
|
||||
|
|
@ -182,11 +182,12 @@ ID Hostname Address Tag Zone Capacit
|
|||
## Creating a cluster layout
|
||||
|
||||
Creating a cluster layout for a Garage deployment means informing Garage
|
||||
of the disk space available on each node of the cluster
|
||||
as well as the zone (e.g. datacenter) each machine is located in.
|
||||
of the disk space available on each node of the cluster, `-c`,
|
||||
as well as the name of the zone (e.g. datacenter), `-z`, each machine is located in.
|
||||
|
||||
For our test deployment, we are using only one node. The way in which we configure
|
||||
it does not matter, you can simply write:
|
||||
For our test deployment, we are have only one node with zone named `dc1` and a
|
||||
capacity of `1G`, though the capacity is ignored for a single node deployment
|
||||
and can be changed later when adding new nodes.
|
||||
|
||||
```bash
|
||||
garage layout assign -z dc1 -c 1G <node_id>
|
||||
|
|
|
|||
|
|
@ -24,7 +24,8 @@ db_engine = "lmdb"
|
|||
|
||||
block_size = "1M"
|
||||
block_ram_buffer_max = "256MiB"
|
||||
|
||||
block_max_concurrent_reads = 16
|
||||
block_max_concurrent_writes_per_request =10
|
||||
lmdb_map_size = "1T"
|
||||
|
||||
compression_level = 1
|
||||
|
|
@ -93,30 +94,32 @@ The following gives details about each available configuration option.
|
|||
|
||||
[Environment variables](#env_variables).
|
||||
|
||||
Top-level configuration options:
|
||||
Top-level configuration options, in alphabetical order:
|
||||
[`allow_punycode`](#allow_punycode),
|
||||
[`allow_world_readable_secrets`](#allow_world_readable_secrets),
|
||||
[`block_max_concurrent_reads`](`block_max_concurrent_reads),
|
||||
[`block_ram_buffer_max`](#block_ram_buffer_max),
|
||||
[`block_max_concurrent_writes_per_request`](#block_max_concurrent_writes_per_request),
|
||||
[`block_size`](#block_size),
|
||||
[`bootstrap_peers`](#bootstrap_peers),
|
||||
[`compression_level`](#compression_level),
|
||||
[`consistency_mode`](#consistency_mode),
|
||||
[`data_dir`](#data_dir),
|
||||
[`data_fsync`](#data_fsync),
|
||||
[`db_engine`](#db_engine),
|
||||
[`disable_scrub`](#disable_scrub),
|
||||
[`use_local_tz`](#use_local_tz),
|
||||
[`lmdb_map_size`](#lmdb_map_size),
|
||||
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
|
||||
[`metadata_dir`](#metadata_dir),
|
||||
[`metadata_fsync`](#metadata_fsync),
|
||||
[`metadata_snapshots_dir`](#metadata_snapshots_dir),
|
||||
[`replication_factor`](#replication_factor),
|
||||
[`consistency_mode`](#consistency_mode),
|
||||
[`rpc_bind_addr`](#rpc_bind_addr),
|
||||
[`rpc_bind_outgoing`](#rpc_bind_outgoing),
|
||||
[`rpc_public_addr`](#rpc_public_addr),
|
||||
[`rpc_public_addr_subnet`](#rpc_public_addr_subnet)
|
||||
[`rpc_secret`/`rpc_secret_file`](#rpc_secret).
|
||||
[`allow_punycode`](#allow_punycode).
|
||||
[`rpc_secret`/`rpc_secret_file`](#rpc_secret),
|
||||
[`use_local_tz`](#use_local_tz).
|
||||
|
||||
The `[consul_discovery]` section:
|
||||
[`api`](#consul_api),
|
||||
|
|
@ -153,13 +156,17 @@ The `[admin]` section:
|
|||
|
||||
### Environment variables {#env_variables}
|
||||
|
||||
The following configuration parameter must be specified as an environment
|
||||
variable, it does not exist in the configuration file:
|
||||
The following configuration parameters must be specified as environment variables,
|
||||
they do not exist in the configuration file:
|
||||
|
||||
- `GARAGE_LOG_TO_SYSLOG` (since `v0.9.4`): set this to `1` or `true` to make the
|
||||
Garage daemon send its logs to `syslog` (using the libc `syslog` function)
|
||||
instead of printing to stderr.
|
||||
|
||||
- `GARAGE_LOG_TO_JOURNALD` (since `v1.2.0`): set this to `1` or `true` to make the
|
||||
Garage daemon send its logs to `journald` (using the native protocol of `systemd-journald`)
|
||||
instead of printing to stderr.
|
||||
|
||||
The following environment variables can be used to override the corresponding
|
||||
values in the configuration file:
|
||||
|
||||
|
|
@ -171,7 +178,7 @@ values in the configuration file:
|
|||
|
||||
### Top-level configuration options
|
||||
|
||||
#### `replication_factor` {#replication_factor}
|
||||
#### `replication_factor` (since `v1.0.0`) {#replication_factor}
|
||||
|
||||
The replication factor can be any positive integer smaller or equal the node count in your cluster.
|
||||
The chosen replication factor has a big impact on the cluster's failure tolerancy and performance characteristics.
|
||||
|
|
@ -219,7 +226,7 @@ is in progress. In theory, no data should be lost as rebalancing is a
|
|||
routine operation for Garage, although we cannot guarantee you that everything
|
||||
will go right in such an extreme scenario.
|
||||
|
||||
#### `consistency_mode` {#consistency_mode}
|
||||
#### `consistency_mode` (since `v1.0.0`) {#consistency_mode}
|
||||
|
||||
The consistency mode setting determines the read and write behaviour of your cluster.
|
||||
|
||||
|
|
@ -329,6 +336,7 @@ Since `v0.8.0`, Garage can use alternative storage backends as follows:
|
|||
| --------- | ----------------- | ------------- |
|
||||
| [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
|
||||
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
|
||||
| [Fjall](https://github.com/fjall-rs/fjall) (**experimental support** since `v1.3.0`) | `"fjall"` | `<metadata_dir>/db.fjall/` |
|
||||
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
|
||||
|
||||
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
|
||||
|
|
@ -365,6 +373,14 @@ LMDB works very well, but is known to have the following limitations:
|
|||
so it is not the best choice for high-performance storage clusters,
|
||||
but it should work fine in many cases.
|
||||
|
||||
- Fjall: a storage engine based on LSM trees, which theoretically allow for
|
||||
higher write throughput than other storage engines that are based on B-trees.
|
||||
Using Fjall could potentially improve Garage's performance significantly in
|
||||
write-heavy workloads. **Support for Fjall is experimental at this point**,
|
||||
we have added it to Garage for evaluation purposes only. **Do not use it for
|
||||
production-critical workloads.**
|
||||
|
||||
|
||||
It is possible to convert Garage's metadata directory from one format to another
|
||||
using the `garage convert-db` command, which should be used as follows:
|
||||
|
||||
|
|
@ -402,6 +418,7 @@ Here is how this option impacts the different database engines:
|
|||
|----------|------------------------------------|-------------------------------|
|
||||
| Sqlite | `PRAGMA synchronous = OFF` | `PRAGMA synchronous = NORMAL` |
|
||||
| LMDB | `MDB_NOMETASYNC` + `MDB_NOSYNC` | `MDB_NOMETASYNC` |
|
||||
| Fjall | default options | not supported |
|
||||
|
||||
Note that the Sqlite database is always ran in `WAL` mode (`PRAGMA journal_mode = WAL`).
|
||||
|
||||
|
|
@ -508,6 +525,37 @@ node.
|
|||
|
||||
The default value is 256MiB.
|
||||
|
||||
#### `block_max_concurrent_reads` (since `v1.3.0` / `v2.1.0`) {#block_max_concurrent_reads}
|
||||
|
||||
The maximum number of blocks (individual files in the data directory) open
|
||||
simultaneously for reading.
|
||||
|
||||
Reducing this number does not limit the number of data blocks that can be
|
||||
transferred through the network simultaneously. This mechanism was just added
|
||||
as a backpressure mechanism for HDD read speed: it helps avoid a situation
|
||||
where too many requests are coming in and Garage is reading too many block
|
||||
files simultaneously, thus not making timely progress on any of the reads.
|
||||
|
||||
When a request to read a data block comes in through the network, the requests
|
||||
awaits for one of the `block_max_concurrent_reads` slots to be available
|
||||
(internally implemented using a Semaphore object). Once it acquired a read
|
||||
slot, it reads the entire block file to RAM and frees the slot as soon as the
|
||||
block file is finished reading. Only after the slot is released will the
|
||||
block's data start being transferred over the network. If the request fails to
|
||||
acquire a reading slot wihtin 15 seconds, it fails with a timeout error.
|
||||
Timeout events can be monitored through the `block_read_semaphore_timeouts`
|
||||
metric in Prometheus: a non-zero number of such events indicates an I/O
|
||||
bottleneck on HDD read speed.
|
||||
|
||||
|
||||
#### `block_max_concurrent_writes_per_request` (since `v2.1.0`) {#block_max_concurrent_writes_per_request}
|
||||
|
||||
This parameter is designed to adapt to the concurrent write performance of
|
||||
different storage media.Maximum number of parallel block writes per put request
|
||||
Higher values improve throughput but increase memory usage.
|
||||
|
||||
Default: 3, Recommended: 10-30 for NVMe, 3-10 for HDD
|
||||
|
||||
#### `lmdb_map_size` {#lmdb_map_size}
|
||||
|
||||
This parameters can be used to set the map size used by LMDB,
|
||||
|
|
|
|||
|
|
@ -23,17 +23,17 @@ Feel free to open a PR to suggest fixes this table. Minio is missing because the
|
|||
- 2022-05-25 - Many Ceph S3 endpoints are not documented but implemented. Following a notification from the Ceph community, we added them.
|
||||
|
||||
|
||||
|
||||
## High-level features
|
||||
|
||||
| Feature | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
||||
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
||||
| [signature v2](https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html) (deprecated) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
||||
| [signature v2](https://docs.aws.amazon.com/AmazonS3/latest/API/Appendix-Sigv2.html) (deprecated) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
||||
| [signature v4](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) | ✅ Implemented | ✅ | ✅ | ❌ | ✅ |
|
||||
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
|
||||
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
|
||||
| [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) |
|
||||
| [SSE-C encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) | ✅ Implemented | ❓ | ✅ | ❌ | ✅ |
|
||||
| [Bucket versioning](https://docs.aws.amazon.com/AmazonS3/latest/userguide/Versioning.html) | ❌ Missing | ✅ | ✅ | ❌ | ✅ |
|
||||
|
||||
*Note:* OpenIO does not says if it supports presigned URLs. Because it is part
|
||||
of signature v4 and they claim they support it without additional precisions,
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ Example response body:
|
|||
```json
|
||||
{
|
||||
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||
"garageVersion": "v1.1.0",
|
||||
"garageVersion": "v1.3.0",
|
||||
"garageFeatures": [
|
||||
"k2v",
|
||||
"lmdb",
|
||||
|
|
|
|||
16
flake.lock
generated
16
flake.lock
generated
|
|
@ -50,17 +50,17 @@
|
|||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1736692550,
|
||||
"narHash": "sha256-7tk8xH+g0sJkKLTJFOxphJxxOjMDFMWv24nXslaU2ro=",
|
||||
"lastModified": 1763977559,
|
||||
"narHash": "sha256-g4MKqsIRy5yJwEsI+fYODqLUnAqIY4kZai0nldAP6EM=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
|
||||
"rev": "cfe2c7d5b5d3032862254e68c37a6576b633d632",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
|
||||
"rev": "cfe2c7d5b5d3032862254e68c37a6576b633d632",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
|
|
@ -80,17 +80,17 @@
|
|||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1738549608,
|
||||
"narHash": "sha256-GdyT9QEUSx5k/n8kILuNy83vxxdyUfJ8jL5mMpQZWfw=",
|
||||
"lastModified": 1763952169,
|
||||
"narHash": "sha256-+PeDBD8P+NKauH+w7eO/QWCIp8Cx4mCfWnh9sJmy9CM=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
|
||||
"rev": "ab726555a9a72e6dc80649809147823a813fa95b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
|
||||
"rev": "ab726555a9a72e6dc80649809147823a813fa95b",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
|
|
|
|||
19
flake.nix
19
flake.nix
|
|
@ -2,13 +2,13 @@
|
|||
description =
|
||||
"Garage, an S3-compatible distributed object store for self-hosted deployments";
|
||||
|
||||
# Nixpkgs 24.11 as of 2025-01-12
|
||||
# Nixpkgs 25.05 as of 2025-11-24
|
||||
inputs.nixpkgs.url =
|
||||
"github:NixOS/nixpkgs/7c4869c47090dd7f9f1bdfb49a22aea026996815";
|
||||
"github:NixOS/nixpkgs/cfe2c7d5b5d3032862254e68c37a6576b633d632";
|
||||
|
||||
# Rust overlay as of 2025-02-03
|
||||
# Rust overlay as of 2025-11-24
|
||||
inputs.rust-overlay.url =
|
||||
"github:oxalica/rust-overlay/35c6f8c4352f995ecd53896200769f80a3e8f22d";
|
||||
"github:oxalica/rust-overlay/ab726555a9a72e6dc80649809147823a813fa95b";
|
||||
inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
inputs.crane.url = "github:ipetkov/crane";
|
||||
|
|
@ -30,6 +30,10 @@
|
|||
inherit system nixpkgs crane rust-overlay extraTestEnv;
|
||||
release = false;
|
||||
}).garage-test;
|
||||
lints = (compile {
|
||||
inherit system nixpkgs crane rust-overlay;
|
||||
release = false;
|
||||
});
|
||||
in
|
||||
{
|
||||
packages = {
|
||||
|
|
@ -53,6 +57,13 @@
|
|||
tests-sqlite = testWith {
|
||||
GARAGE_TEST_INTEGRATION_DB_ENGINE = "sqlite";
|
||||
};
|
||||
tests-fjall = testWith {
|
||||
GARAGE_TEST_INTEGRATION_DB_ENGINE = "fjall";
|
||||
};
|
||||
|
||||
# lints (fmt, clippy)
|
||||
fmt = lints.garage-cargo-fmt;
|
||||
clippy = lints.garage-cargo-clippy;
|
||||
};
|
||||
|
||||
# ---- developpment shell, for making native builds only ----
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ let
|
|||
|
||||
inherit (pkgs) lib stdenv;
|
||||
|
||||
toolchainFn = (p: p.rust-bin.stable."1.82.0".default.override {
|
||||
toolchainFn = (p: p.rust-bin.stable."1.91.0".default.override {
|
||||
targets = lib.optionals (target != null) [ rustTarget ];
|
||||
extensions = [
|
||||
"rust-src"
|
||||
|
|
@ -68,12 +68,13 @@ let
|
|||
rootFeatures = if features != null then
|
||||
features
|
||||
else
|
||||
([ "bundled-libs" "lmdb" "sqlite" "k2v" ] ++ (lib.optionals release [
|
||||
([ "bundled-libs" "lmdb" "sqlite" "fjall" "k2v" ] ++ (lib.optionals release [
|
||||
"consul-discovery"
|
||||
"kubernetes-discovery"
|
||||
"metrics"
|
||||
"telemetry-otlp"
|
||||
"syslog"
|
||||
"journald"
|
||||
]));
|
||||
|
||||
featuresStr = lib.concatStringsSep "," rootFeatures;
|
||||
|
|
@ -189,4 +190,15 @@ in rec {
|
|||
pkgs.cacert
|
||||
];
|
||||
} // extraTestEnv);
|
||||
|
||||
# ---- source code linting ----
|
||||
|
||||
garage-cargo-fmt = craneLib.cargoFmt (commonArgs // {
|
||||
cargoExtraArgs = "";
|
||||
});
|
||||
|
||||
garage-cargo-clippy = craneLib.cargoClippy (commonArgs // {
|
||||
cargoArtifacts = garage-deps;
|
||||
cargoClippyExtraArgs = "--all-targets -- -D warnings";
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
|
||||
export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
|
||||
export AWS_DEFAULT_REGION='garage'
|
||||
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
|
||||
# FUTUREWORK: set AWS_ENDPOINT_URL instead, once nixpkgs bumps awscli to >=2.13.0.
|
||||
function aws { command aws --endpoint-url http://127.0.0.1:3911 $@ ; }
|
||||
|
||||
|
|
|
|||
|
|
@ -2,8 +2,8 @@ apiVersion: v2
|
|||
name: garage
|
||||
description: S3-compatible object store for small self-hosted geo-distributed deployments
|
||||
type: application
|
||||
version: 0.7.0
|
||||
appVersion: "v1.1.0"
|
||||
version: 0.7.3
|
||||
appVersion: "v1.3.1"
|
||||
home: https://garagehq.deuxfleurs.fr/
|
||||
icon: https://garagehq.deuxfleurs.fr/images/garage-logo.svg
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# garage
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
S3-compatible object store for small self-hosted geo-distributed deployments
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,10 @@ metadata:
|
|||
name: {{ include "garage.fullname" . }}
|
||||
labels:
|
||||
{{- include "garage.labels" . | nindent 4 }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
|
|
|
|||
|
|
@ -124,6 +124,8 @@ service:
|
|||
# - NodePort (+ Ingress)
|
||||
# - LoadBalancer
|
||||
type: ClusterIP
|
||||
# -- Annotations to add to the service
|
||||
annotations: {}
|
||||
s3:
|
||||
api:
|
||||
port: 3900
|
||||
|
|
|
|||
|
|
@ -34,6 +34,8 @@ in
|
|||
jq
|
||||
];
|
||||
shellHook = ''
|
||||
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
|
||||
|
||||
function to_s3 {
|
||||
aws \
|
||||
--endpoint-url https://garage.deuxfleurs.fr \
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_api_admin"
|
||||
version = "1.1.0"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -22,7 +22,7 @@ garage_api_common.workspace = true
|
|||
|
||||
argon2.workspace = true
|
||||
async-trait.workspace = true
|
||||
err-derive.workspace = true
|
||||
thiserror.workspace = true
|
||||
hex.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
use std::convert::TryFrom;
|
||||
|
||||
use err_derive::Error;
|
||||
use hyper::header::HeaderValue;
|
||||
use hyper::{HeaderMap, StatusCode};
|
||||
use thiserror::Error;
|
||||
|
||||
pub use garage_model::helper::error::Error as HelperError;
|
||||
|
||||
|
|
@ -16,20 +16,17 @@ use garage_api_common::helpers::*;
|
|||
/// Errors of this crate
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error(display = "{}", _0)]
|
||||
#[error("{0}")]
|
||||
/// Error from common error
|
||||
Common(#[error(source)] CommonError),
|
||||
Common(#[from] CommonError),
|
||||
|
||||
// Category: cannot process
|
||||
/// The API access key does not exist
|
||||
#[error(display = "Access key not found: {}", _0)]
|
||||
#[error("Access key not found: {0}")]
|
||||
NoSuchAccessKey(String),
|
||||
|
||||
/// In Import key, the key already exists
|
||||
#[error(
|
||||
display = "Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.",
|
||||
_0
|
||||
)]
|
||||
#[error("Key {0} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.")]
|
||||
KeyAlreadyExists(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_api_common"
|
||||
version = "1.1.0"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -24,7 +24,7 @@ chrono.workspace = true
|
|||
crc32fast.workspace = true
|
||||
crc32c.workspace = true
|
||||
crypto-common.workspace = true
|
||||
err-derive.workspace = true
|
||||
thiserror.workspace = true
|
||||
hex.workspace = true
|
||||
hmac.workspace = true
|
||||
md-5.workspace = true
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
use std::convert::TryFrom;
|
||||
|
||||
use err_derive::Error;
|
||||
use hyper::StatusCode;
|
||||
use thiserror::Error;
|
||||
|
||||
use garage_util::error::Error as GarageError;
|
||||
|
||||
|
|
@ -12,48 +12,48 @@ use garage_model::helper::error::Error as HelperError;
|
|||
pub enum CommonError {
|
||||
// ---- INTERNAL ERRORS ----
|
||||
/// Error related to deeper parts of Garage
|
||||
#[error(display = "Internal error: {}", _0)]
|
||||
InternalError(#[error(source)] GarageError),
|
||||
#[error("Internal error: {0}")]
|
||||
InternalError(#[from] GarageError),
|
||||
|
||||
/// Error related to Hyper
|
||||
#[error(display = "Internal error (Hyper error): {}", _0)]
|
||||
Hyper(#[error(source)] hyper::Error),
|
||||
#[error("Internal error (Hyper error): {0}")]
|
||||
Hyper(#[from] hyper::Error),
|
||||
|
||||
/// Error related to HTTP
|
||||
#[error(display = "Internal error (HTTP error): {}", _0)]
|
||||
Http(#[error(source)] http::Error),
|
||||
#[error("Internal error (HTTP error): {0}")]
|
||||
Http(#[from] http::Error),
|
||||
|
||||
// ---- GENERIC CLIENT ERRORS ----
|
||||
/// Proper authentication was not provided
|
||||
#[error(display = "Forbidden: {}", _0)]
|
||||
#[error("Forbidden: {0}")]
|
||||
Forbidden(String),
|
||||
|
||||
/// Generic bad request response with custom message
|
||||
#[error(display = "Bad request: {}", _0)]
|
||||
#[error("Bad request: {0}")]
|
||||
BadRequest(String),
|
||||
|
||||
/// The client sent a header with invalid value
|
||||
#[error(display = "Invalid header value: {}", _0)]
|
||||
InvalidHeader(#[error(source)] hyper::header::ToStrError),
|
||||
#[error("Invalid header value: {0}")]
|
||||
InvalidHeader(#[from] hyper::header::ToStrError),
|
||||
|
||||
// ---- SPECIFIC ERROR CONDITIONS ----
|
||||
// These have to be error codes referenced in the S3 spec here:
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
|
||||
/// The bucket requested don't exists
|
||||
#[error(display = "Bucket not found: {}", _0)]
|
||||
#[error("Bucket not found: {0}")]
|
||||
NoSuchBucket(String),
|
||||
|
||||
/// Tried to create a bucket that already exist
|
||||
#[error(display = "Bucket already exists")]
|
||||
#[error("Bucket already exists")]
|
||||
BucketAlreadyExists,
|
||||
|
||||
/// Tried to delete a non-empty bucket
|
||||
#[error(display = "Tried to delete a non-empty bucket")]
|
||||
#[error("Tried to delete a non-empty bucket")]
|
||||
BucketNotEmpty,
|
||||
|
||||
// Category: bad request
|
||||
/// Bucket name is not valid according to AWS S3 specs
|
||||
#[error(display = "Invalid bucket name: {}", _0)]
|
||||
#[error("Invalid bucket name: {0}")]
|
||||
InvalidBucketName(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -58,6 +58,12 @@ pub trait ApiHandler: Send + Sync + 'static {
|
|||
req: Request<IncomingBody>,
|
||||
endpoint: Self::Endpoint,
|
||||
) -> impl Future<Output = Result<Response<BoxBody<Self::Error>>, Self::Error>> + Send;
|
||||
|
||||
/// Returns the key id used to authenticate this request. The ID returned must be safe to
|
||||
/// log.
|
||||
fn key_id_from_request(&self, _req: &Request<IncomingBody>) -> Option<String> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ApiServer<A: ApiHandler> {
|
||||
|
|
@ -142,19 +148,20 @@ impl<A: ApiHandler> ApiServer<A> {
|
|||
) -> Result<Response<BoxBody<A::Error>>, http::Error> {
|
||||
let uri = req.uri().clone();
|
||||
|
||||
if let Ok(forwarded_for_ip_addr) =
|
||||
let source = if let Ok(forwarded_for_ip_addr) =
|
||||
forwarded_headers::handle_forwarded_for_headers(req.headers())
|
||||
{
|
||||
info!(
|
||||
"{} (via {}) {} {}",
|
||||
forwarded_for_ip_addr,
|
||||
addr,
|
||||
req.method(),
|
||||
uri
|
||||
);
|
||||
format!("{forwarded_for_ip_addr} (via {addr})")
|
||||
} else {
|
||||
info!("{} {} {}", addr, req.method(), uri);
|
||||
}
|
||||
format!("{addr}")
|
||||
};
|
||||
// we only do this to log the access key, so we can discard any error
|
||||
let key = self
|
||||
.api_handler
|
||||
.key_id_from_request(&req)
|
||||
.map(|k| format!("(key {k}) "))
|
||||
.unwrap_or_default();
|
||||
info!("{source} {key}{} {uri}", req.method());
|
||||
debug!("{:?}", req);
|
||||
|
||||
let tracer = opentelemetry::global::tracer("garage");
|
||||
|
|
@ -343,7 +350,11 @@ where
|
|||
|
||||
while !*must_exit.borrow() {
|
||||
let (stream, client_addr) = tokio::select! {
|
||||
acc = listener.accept() => acc?,
|
||||
acc = listener.accept() => match acc {
|
||||
Ok(r) => r,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::ConnectionAborted => continue,
|
||||
Err(e) => return Err(e.into()),
|
||||
},
|
||||
_ = must_exit.changed() => continue,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
use err_derive::Error;
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::common_error::CommonError;
|
||||
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||
|
|
@ -6,21 +6,21 @@ pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInterna
|
|||
/// Errors of this crate
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error(display = "{}", _0)]
|
||||
#[error("{0}")]
|
||||
/// Error from common error
|
||||
Common(CommonError),
|
||||
|
||||
/// Authorization Header Malformed
|
||||
#[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
|
||||
#[error("Authorization header malformed, unexpected scope: {0}")]
|
||||
AuthorizationHeaderMalformed(String),
|
||||
|
||||
// Category: bad request
|
||||
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
||||
#[error("Invalid UTF-8: {0}")]
|
||||
InvalidUtf8Str(#[from] std::str::Utf8Error),
|
||||
|
||||
/// The provided digest (checksum) value was invalid
|
||||
#[error(display = "Invalid digest: {}", _0)]
|
||||
#[error("Invalid digest: {0}")]
|
||||
InvalidDigest(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ async fn check_standard_signature(
|
|||
// Verify that all necessary request headers are included in signed_headers
|
||||
// The following must be included for all signatures:
|
||||
// - the Host header (mandatory)
|
||||
// - all x-amz-* headers used in the request
|
||||
// - all x-amz-* headers used in the request (except x-amz-content-sha256)
|
||||
// AWS also indicates that the Content-Type header should be signed if
|
||||
// it is used, but Minio client doesn't sign it so we don't check it for compatibility.
|
||||
let signed_headers = split_signed_headers(&authorization)?;
|
||||
|
|
@ -151,7 +151,7 @@ async fn check_presigned_signature(
|
|||
// Verify that all necessary request headers are included in signed_headers
|
||||
// For AWSv4 pre-signed URLs, the following must be included:
|
||||
// - the Host header (mandatory)
|
||||
// - all x-amz-* headers used in the request
|
||||
// - all x-amz-* headers used in the request (except x-amz-content-sha256)
|
||||
let signed_headers = split_signed_headers(&authorization)?;
|
||||
verify_signed_headers(request.headers(), &signed_headers)?;
|
||||
|
||||
|
|
@ -268,7 +268,9 @@ fn verify_signed_headers(headers: &HeaderMap, signed_headers: &[HeaderName]) ->
|
|||
return Err(Error::bad_request("Header `Host` should be signed"));
|
||||
}
|
||||
for (name, _) in headers.iter() {
|
||||
if name.as_str().starts_with("x-amz-") {
|
||||
// Enforce signature of all x-amz-* headers, except x-amz-content-sh256
|
||||
// because it is included in the canonical request in all cases
|
||||
if name.as_str().starts_with("x-amz-") && name != X_AMZ_CONTENT_SHA256 {
|
||||
if !signed_headers.contains(name) {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Header `{}` should be signed",
|
||||
|
|
@ -417,7 +419,7 @@ pub async fn verify_v4(
|
|||
// ============ Authorization header, or X-Amz-* query params =========
|
||||
|
||||
pub struct Authorization {
|
||||
key_id: String,
|
||||
pub key_id: String,
|
||||
scope: String,
|
||||
signed_headers: String,
|
||||
signature: String,
|
||||
|
|
@ -426,7 +428,7 @@ pub struct Authorization {
|
|||
}
|
||||
|
||||
impl Authorization {
|
||||
fn parse_header(headers: &HeaderMap) -> Result<Self, Error> {
|
||||
pub fn parse_header(headers: &HeaderMap) -> Result<Self, Error> {
|
||||
let authorization = headers
|
||||
.get(AUTHORIZATION)
|
||||
.ok_or_bad_request("Missing authorization header")?
|
||||
|
|
@ -468,8 +470,7 @@ impl Authorization {
|
|||
|
||||
let date = headers
|
||||
.get(X_AMZ_DATE)
|
||||
.ok_or_bad_request("Missing X-Amz-Date field")
|
||||
.map_err(Error::from)?
|
||||
.ok_or_bad_request("Missing X-Amz-Date field")?
|
||||
.to_str()?;
|
||||
let date = parse_date(date)?;
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_api_k2v"
|
||||
version = "1.1.0"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -20,7 +20,7 @@ garage_util = { workspace = true, features = [ "k2v" ] }
|
|||
garage_api_common.workspace = true
|
||||
|
||||
base64.workspace = true
|
||||
err-derive.workspace = true
|
||||
thiserror.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
futures.workspace = true
|
||||
|
|
|
|||
|
|
@ -176,6 +176,12 @@ impl ApiHandler for K2VApiServer {
|
|||
|
||||
Ok(resp_ok)
|
||||
}
|
||||
|
||||
fn key_id_from_request(&self, req: &Request<IncomingBody>) -> Option<String> {
|
||||
garage_api_common::signature::payload::Authorization::parse_header(req.headers())
|
||||
.map(|auth| auth.key_id)
|
||||
.ok()
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiEndpoint for K2VApiEndpoint {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
use err_derive::Error;
|
||||
use hyper::header::HeaderValue;
|
||||
use hyper::{HeaderMap, StatusCode};
|
||||
use thiserror::Error;
|
||||
|
||||
use garage_api_common::common_error::{commonErrorDerivative, CommonError};
|
||||
pub(crate) use garage_api_common::common_error::{helper_error_as_internal, pass_helper_error};
|
||||
|
|
@ -14,38 +14,38 @@ use garage_api_common::signature::error::Error as SignatureError;
|
|||
/// Errors of this crate
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error(display = "{}", _0)]
|
||||
#[error("{0}")]
|
||||
/// Error from common error
|
||||
Common(#[error(source)] CommonError),
|
||||
Common(#[from] CommonError),
|
||||
|
||||
// Category: cannot process
|
||||
/// Authorization Header Malformed
|
||||
#[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
|
||||
#[error("Authorization header malformed, unexpected scope: {0}")]
|
||||
AuthorizationHeaderMalformed(String),
|
||||
|
||||
/// The provided digest (checksum) value was invalid
|
||||
#[error(display = "Invalid digest: {}", _0)]
|
||||
#[error("Invalid digest: {0}")]
|
||||
InvalidDigest(String),
|
||||
|
||||
/// The object requested don't exists
|
||||
#[error(display = "Key not found")]
|
||||
#[error("Key not found")]
|
||||
NoSuchKey,
|
||||
|
||||
/// Some base64 encoded data was badly encoded
|
||||
#[error(display = "Invalid base64: {}", _0)]
|
||||
InvalidBase64(#[error(source)] base64::DecodeError),
|
||||
#[error("Invalid base64: {0}")]
|
||||
InvalidBase64(#[from] base64::DecodeError),
|
||||
|
||||
/// Invalid causality token
|
||||
#[error(display = "Invalid causality token")]
|
||||
#[error("Invalid causality token")]
|
||||
InvalidCausalityToken,
|
||||
|
||||
/// The client asked for an invalid return format (invalid Accept header)
|
||||
#[error(display = "Not acceptable: {}", _0)]
|
||||
#[error("Not acceptable: {0}")]
|
||||
NotAcceptable(String),
|
||||
|
||||
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
||||
#[error("Invalid UTF-8: {0}")]
|
||||
InvalidUtf8Str(#[from] std::str::Utf8Error),
|
||||
}
|
||||
|
||||
commonErrorDerivative!(Error);
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_api_s3"
|
||||
version = "1.1.0"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -29,7 +29,7 @@ bytes.workspace = true
|
|||
chrono.workspace = true
|
||||
crc32fast.workspace = true
|
||||
crc32c.workspace = true
|
||||
err-derive.workspace = true
|
||||
thiserror.workspace = true
|
||||
hex.workspace = true
|
||||
tracing.workspace = true
|
||||
md-5.workspace = true
|
||||
|
|
|
|||
|
|
@ -226,6 +226,7 @@ impl ApiHandler for S3ApiServer {
|
|||
Endpoint::DeleteBucket {} => handle_delete_bucket(ctx).await,
|
||||
Endpoint::GetBucketLocation {} => handle_get_bucket_location(ctx),
|
||||
Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(),
|
||||
Endpoint::GetBucketAcl {} => handle_get_bucket_acl(ctx),
|
||||
Endpoint::ListObjects {
|
||||
delimiter,
|
||||
encoding_type,
|
||||
|
|
@ -342,6 +343,12 @@ impl ApiHandler for S3ApiServer {
|
|||
|
||||
Ok(resp_ok)
|
||||
}
|
||||
|
||||
fn key_id_from_request(&self, req: &Request<IncomingBody>) -> Option<String> {
|
||||
garage_api_common::signature::payload::Authorization::parse_header(req.headers())
|
||||
.map(|auth| auth.key_id)
|
||||
.ok()
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiEndpoint for S3ApiEndpoint {
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ use hyper::{Request, Response, StatusCode};
|
|||
use garage_model::bucket_alias_table::*;
|
||||
use garage_model::bucket_table::Bucket;
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::key_table::Key;
|
||||
use garage_model::key_table::{Key, KeyParams};
|
||||
use garage_model::permission::BucketKeyPerm;
|
||||
use garage_table::util::*;
|
||||
use garage_util::crdt::*;
|
||||
|
|
@ -44,6 +44,55 @@ pub fn handle_get_bucket_versioning() -> Result<Response<ResBody>, Error> {
|
|||
.body(string_body(xml))?)
|
||||
}
|
||||
|
||||
pub fn handle_get_bucket_acl(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
bucket_id, api_key, ..
|
||||
} = ctx;
|
||||
let key_p = api_key.params().ok_or_internal_error(
|
||||
"Key should not be in deleted state at this point (in handle_get_bucket_acl)",
|
||||
)?;
|
||||
|
||||
let mut grants: Vec<s3_xml::Grant> = vec![];
|
||||
let kp = api_key.bucket_permissions(&bucket_id);
|
||||
|
||||
if kp.allow_owner {
|
||||
grants.push(s3_xml::Grant {
|
||||
grantee: create_grantee(&key_p, &api_key),
|
||||
permission: s3_xml::Value("FULL_CONTROL".to_string()),
|
||||
});
|
||||
} else {
|
||||
if kp.allow_read {
|
||||
grants.push(s3_xml::Grant {
|
||||
grantee: create_grantee(&key_p, &api_key),
|
||||
permission: s3_xml::Value("READ".to_string()),
|
||||
});
|
||||
grants.push(s3_xml::Grant {
|
||||
grantee: create_grantee(&key_p, &api_key),
|
||||
permission: s3_xml::Value("READ_ACP".to_string()),
|
||||
});
|
||||
}
|
||||
if kp.allow_write {
|
||||
grants.push(s3_xml::Grant {
|
||||
grantee: create_grantee(&key_p, &api_key),
|
||||
permission: s3_xml::Value("WRITE".to_string()),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let access_control_policy = s3_xml::AccessControlPolicy {
|
||||
xmlns: (),
|
||||
owner: None,
|
||||
acl: s3_xml::AccessControlList { entries: grants },
|
||||
};
|
||||
|
||||
let xml = s3_xml::to_xml_with_header(&access_control_policy)?;
|
||||
trace!("xml: {}", xml);
|
||||
|
||||
Ok(Response::builder()
|
||||
.header("Content-Type", "application/xml")
|
||||
.body(string_body(xml))?)
|
||||
}
|
||||
|
||||
pub async fn handle_list_buckets(
|
||||
garage: &Garage,
|
||||
api_key: &Key,
|
||||
|
|
@ -311,6 +360,15 @@ fn parse_create_bucket_xml(xml_bytes: &[u8]) -> Option<Option<String>> {
|
|||
Some(ret)
|
||||
}
|
||||
|
||||
fn create_grantee(key_params: &KeyParams, api_key: &Key) -> s3_xml::Grantee {
|
||||
s3_xml::Grantee {
|
||||
xmlns_xsi: (),
|
||||
typ: "CanonicalUser".to_string(),
|
||||
display_name: Some(s3_xml::Value(key_params.name.get().to_string())),
|
||||
id: Some(s3_xml::Value(api_key.key_id.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ use garage_api_common::signature::checksum::*;
|
|||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::encryption::EncryptionParams;
|
||||
use crate::error::*;
|
||||
use crate::get::{full_object_byte_stream, PreconditionHeaders};
|
||||
use crate::get::{check_version_not_deleted, full_object_byte_stream, PreconditionHeaders};
|
||||
use crate::multipart;
|
||||
use crate::put::{extract_metadata_headers, save_stream, ChecksumMode, SaveStreamResult};
|
||||
use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION;
|
||||
|
|
@ -237,6 +237,7 @@ async fn handle_copy_metaonly(
|
|||
.get(&source_version.uuid, &EmptyKey)
|
||||
.await?;
|
||||
let source_version = source_version.ok_or(Error::NoSuchKey)?;
|
||||
check_version_not_deleted(&source_version)?;
|
||||
|
||||
// Write an "uploading" marker in Object table
|
||||
// This holds a reference to the object in the Version table
|
||||
|
|
@ -428,6 +429,7 @@ pub async fn handle_upload_part_copy(
|
|||
.get(&source_object_version.uuid, &EmptyKey)
|
||||
.await?
|
||||
.ok_or(Error::NoSuchKey)?;
|
||||
check_version_not_deleted(&source_version)?;
|
||||
|
||||
// We want to reuse blocks from the source version as much as possible.
|
||||
// However, we still need to get the data from these blocks
|
||||
|
|
@ -559,6 +561,7 @@ pub async fn handle_upload_part_copy(
|
|||
|
||||
let mut current_offset = 0;
|
||||
let mut next_block = defragmenter.next().await?;
|
||||
let mut blocks_to_dup = dest_version.clone();
|
||||
|
||||
// TODO this could be optimized similarly to read_and_put_blocks
|
||||
// low priority because uploadpartcopy is rarely used
|
||||
|
|
@ -588,8 +591,7 @@ pub async fn handle_upload_part_copy(
|
|||
.unwrap()?;
|
||||
checksummer = checksummer_updated;
|
||||
|
||||
dest_version.blocks.clear();
|
||||
dest_version.blocks.put(
|
||||
let (version_block_key, version_block) = (
|
||||
VersionBlockKey {
|
||||
part_number,
|
||||
offset: current_offset,
|
||||
|
|
@ -601,37 +603,56 @@ pub async fn handle_upload_part_copy(
|
|||
);
|
||||
current_offset += data_len;
|
||||
|
||||
let block_ref = BlockRef {
|
||||
block: final_hash,
|
||||
version: dest_version_id,
|
||||
deleted: false.into(),
|
||||
let next = if let Some(final_data) = data_to_upload {
|
||||
dest_version.blocks.clear();
|
||||
dest_version.blocks.put(version_block_key, version_block);
|
||||
let block_ref = BlockRef {
|
||||
block: final_hash,
|
||||
version: dest_version_id,
|
||||
deleted: false.into(),
|
||||
};
|
||||
let (_, _, _, next) = futures::try_join!(
|
||||
// Thing 1: if the block is not exactly a block that existed before,
|
||||
// we need to insert that data as a new block.
|
||||
garage.block_manager.rpc_put_block(
|
||||
final_hash,
|
||||
final_data,
|
||||
dest_encryption.is_encrypted(),
|
||||
None
|
||||
),
|
||||
// Thing 2: we need to insert the block in the version
|
||||
garage.version_table.insert(&dest_version),
|
||||
// Thing 3: we need to add a block reference
|
||||
garage.block_ref_table.insert(&block_ref),
|
||||
// Thing 4: we need to read the next block
|
||||
defragmenter.next(),
|
||||
)?;
|
||||
next
|
||||
} else {
|
||||
blocks_to_dup.blocks.put(version_block_key, version_block);
|
||||
defragmenter.next().await?
|
||||
};
|
||||
|
||||
let (_, _, _, next) = futures::try_join!(
|
||||
// Thing 1: if the block is not exactly a block that existed before,
|
||||
// we need to insert that data as a new block.
|
||||
async {
|
||||
if let Some(final_data) = data_to_upload {
|
||||
garage
|
||||
.block_manager
|
||||
.rpc_put_block(final_hash, final_data, dest_encryption.is_encrypted(), None)
|
||||
.await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
},
|
||||
// Thing 2: we need to insert the block in the version
|
||||
garage.version_table.insert(&dest_version),
|
||||
// Thing 3: we need to add a block reference
|
||||
garage.block_ref_table.insert(&block_ref),
|
||||
// Thing 4: we need to read the next block
|
||||
defragmenter.next(),
|
||||
)?;
|
||||
next_block = next;
|
||||
}
|
||||
|
||||
assert_eq!(current_offset, source_range.length);
|
||||
|
||||
// Put the duplicated blocks into the version & block_refs tables
|
||||
let block_refs_to_put = blocks_to_dup
|
||||
.blocks
|
||||
.items()
|
||||
.iter()
|
||||
.map(|b| BlockRef {
|
||||
block: b.1.hash,
|
||||
version: dest_version_id,
|
||||
deleted: false.into(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
futures::try_join!(
|
||||
garage.version_table.insert(&blocks_to_dup),
|
||||
garage.block_ref_table.insert_many(&block_refs_to_put[..]),
|
||||
)?;
|
||||
|
||||
let checksums = checksummer.finalize();
|
||||
let etag = dest_encryption.etag_from_md5(&checksums.md5);
|
||||
let checksum = checksums.extract(dest_object_checksum_algorithm);
|
||||
|
|
|
|||
|
|
@ -88,7 +88,9 @@ pub async fn handle_put_cors(
|
|||
pub struct CorsConfiguration {
|
||||
#[serde(serialize_with = "xmlns_tag", skip_deserializing)]
|
||||
pub xmlns: (),
|
||||
#[serde(rename = "CORSRule")]
|
||||
// "default" is required to be able to parse an empty list of rules,
|
||||
// cf https://docs.rs/quick-xml/latest/quick_xml/de/#sequences-xsall-and-xssequence-xml-schema-types
|
||||
#[serde(rename = "CORSRule", default)]
|
||||
pub cors_rules: Vec<CorsRule>,
|
||||
}
|
||||
|
||||
|
|
@ -270,4 +272,26 @@ mod tests {
|
|||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_norules() -> Result<(), Error> {
|
||||
let message = r#"<?xml version="1.0" encoding="UTF-8"?>
|
||||
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/" />"#;
|
||||
let conf: CorsConfiguration = from_str(message).unwrap();
|
||||
let ref_value = CorsConfiguration {
|
||||
xmlns: (),
|
||||
cors_rules: vec![],
|
||||
};
|
||||
assert_eq! {
|
||||
ref_value,
|
||||
conf
|
||||
};
|
||||
|
||||
let message2 = to_xml_with_header(&ref_value)?;
|
||||
|
||||
let cleanup = |c: &str| c.replace(char::is_whitespace, "");
|
||||
assert_eq!(cleanup(message), cleanup(&message2));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
use std::convert::TryInto;
|
||||
|
||||
use err_derive::Error;
|
||||
use hyper::header::HeaderValue;
|
||||
use hyper::{HeaderMap, StatusCode};
|
||||
use thiserror::Error;
|
||||
|
||||
use garage_model::helper::error::Error as HelperError;
|
||||
|
||||
|
|
@ -25,67 +25,67 @@ use crate::xml as s3_xml;
|
|||
/// Errors of this crate
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error(display = "{}", _0)]
|
||||
#[error("{0}")]
|
||||
/// Error from common error
|
||||
Common(#[error(source)] CommonError),
|
||||
Common(#[from] CommonError),
|
||||
|
||||
// Category: cannot process
|
||||
/// Authorization Header Malformed
|
||||
#[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
|
||||
#[error("Authorization header malformed, unexpected scope: {0}")]
|
||||
AuthorizationHeaderMalformed(String),
|
||||
|
||||
/// The object requested don't exists
|
||||
#[error(display = "Key not found")]
|
||||
#[error("Key not found")]
|
||||
NoSuchKey,
|
||||
|
||||
/// The multipart upload requested don't exists
|
||||
#[error(display = "Upload not found")]
|
||||
#[error("Upload not found")]
|
||||
NoSuchUpload,
|
||||
|
||||
/// Precondition failed (e.g. x-amz-copy-source-if-match)
|
||||
#[error(display = "At least one of the preconditions you specified did not hold")]
|
||||
#[error("At least one of the preconditions you specified did not hold")]
|
||||
PreconditionFailed,
|
||||
|
||||
/// Parts specified in CMU request do not match parts actually uploaded
|
||||
#[error(display = "Parts given to CompleteMultipartUpload do not match uploaded parts")]
|
||||
#[error("Parts given to CompleteMultipartUpload do not match uploaded parts")]
|
||||
InvalidPart,
|
||||
|
||||
/// Parts given to CompleteMultipartUpload were not in ascending order
|
||||
#[error(display = "Parts given to CompleteMultipartUpload were not in ascending order")]
|
||||
#[error("Parts given to CompleteMultipartUpload were not in ascending order")]
|
||||
InvalidPartOrder,
|
||||
|
||||
/// In CompleteMultipartUpload: not enough data
|
||||
/// (here we are more lenient than AWS S3)
|
||||
#[error(display = "Proposed upload is smaller than the minimum allowed object size")]
|
||||
#[error("Proposed upload is smaller than the minimum allowed object size")]
|
||||
EntityTooSmall,
|
||||
|
||||
// Category: bad request
|
||||
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
||||
#[error("Invalid UTF-8: {0}")]
|
||||
InvalidUtf8Str(#[from] std::str::Utf8Error),
|
||||
|
||||
/// The request used an invalid path
|
||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||
InvalidUtf8String(#[error(source)] std::string::FromUtf8Error),
|
||||
#[error("Invalid UTF-8: {0}")]
|
||||
InvalidUtf8String(#[from] std::string::FromUtf8Error),
|
||||
|
||||
/// The client sent invalid XML data
|
||||
#[error(display = "Invalid XML: {}", _0)]
|
||||
#[error("Invalid XML: {0}")]
|
||||
InvalidXml(String),
|
||||
|
||||
/// The client sent a range header with invalid value
|
||||
#[error(display = "Invalid HTTP range: {:?}", _0)]
|
||||
InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)),
|
||||
#[error("Invalid HTTP range: {0:?}")]
|
||||
InvalidRange((http_range::HttpRangeParseError, u64)),
|
||||
|
||||
/// The client sent a range header with invalid value
|
||||
#[error(display = "Invalid encryption algorithm: {:?}, should be AES256", _0)]
|
||||
#[error("Invalid encryption algorithm: {0:?}, should be AES256")]
|
||||
InvalidEncryptionAlgorithm(String),
|
||||
|
||||
/// The provided digest (checksum) value was invalid
|
||||
#[error(display = "Invalid digest: {}", _0)]
|
||||
#[error("Invalid digest: {0}")]
|
||||
InvalidDigest(String),
|
||||
|
||||
/// The client sent a request for an action not supported by garage
|
||||
#[error(display = "Unimplemented action: {}", _0)]
|
||||
#[error("Unimplemented action: {0}")]
|
||||
NotImplemented(String),
|
||||
}
|
||||
|
||||
|
|
@ -99,6 +99,12 @@ impl From<HelperError> for Error {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<(http_range::HttpRangeParseError, u64)> for Error {
|
||||
fn from(err: (http_range::HttpRangeParseError, u64)) -> Error {
|
||||
Error::InvalidRange(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<roxmltree::Error> for Error {
|
||||
fn from(err: roxmltree::Error) -> Self {
|
||||
Self::InvalidXml(format!("{}", err))
|
||||
|
|
|
|||
|
|
@ -19,12 +19,13 @@ use garage_net::stream::ByteStream;
|
|||
use garage_rpc::rpc_helper::OrderTag;
|
||||
use garage_table::EmptyKey;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::OkOrMessage;
|
||||
use garage_util::error::{Error as UtilError, OkOrMessage};
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::s3::object_table::*;
|
||||
use garage_model::s3::version_table::*;
|
||||
|
||||
use garage_api_common::common_error::CommonError;
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
|
||||
|
||||
|
|
@ -215,6 +216,7 @@ pub async fn handle_head_without_ctx(
|
|||
.get(&object_version.uuid, &EmptyKey)
|
||||
.await?
|
||||
.ok_or(Error::NoSuchKey)?;
|
||||
check_version_not_deleted(&version)?;
|
||||
|
||||
let (part_offset, part_end) =
|
||||
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
|
||||
|
|
@ -365,6 +367,21 @@ pub async fn handle_get_without_ctx(
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) fn check_version_not_deleted(version: &Version) -> Result<(), Error> {
|
||||
if version.deleted.get() {
|
||||
// the version was deleted between when the object_table was consulted
|
||||
// and now, this could mean the object was deleted, or overriden.
|
||||
// Rather than say the key doesn't exist, return a transient error
|
||||
// to signal the client to try again.
|
||||
return Err(CommonError::InternalError(UtilError::Message(
|
||||
"conflict/inconsistency between object and version state, version is deleted"
|
||||
.to_string(),
|
||||
))
|
||||
.into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_get_full(
|
||||
garage: Arc<Garage>,
|
||||
version: &ObjectVersion,
|
||||
|
|
@ -431,6 +448,7 @@ pub fn full_object_byte_stream(
|
|||
.ok_or_message("channel closed")?;
|
||||
|
||||
let version = version_fut.await.unwrap()?.ok_or(Error::NoSuchKey)?;
|
||||
check_version_not_deleted(&version)?;
|
||||
for (i, (_, vb)) in version.blocks.items().iter().enumerate().skip(1) {
|
||||
let stream_block_i = encryption
|
||||
.get_block(&garage, &vb.hash, Some(order_stream.order(i as u64)))
|
||||
|
|
@ -446,6 +464,14 @@ pub fn full_object_byte_stream(
|
|||
{
|
||||
Ok(()) => (),
|
||||
Err(e) => {
|
||||
// TODO i think this is a bad idea, we should log
|
||||
// an error and stop there. If the error happens to
|
||||
// be exactly the size of what hasn't been streamed
|
||||
// yet, the client will see the request as a
|
||||
// success
|
||||
// instead truncating the output notify the client
|
||||
// something happened with their download, so that
|
||||
// they can retry it
|
||||
let _ = tx.send(error_stream_item(e)).await;
|
||||
}
|
||||
}
|
||||
|
|
@ -497,7 +523,7 @@ async fn handle_get_range(
|
|||
.get(&version.uuid, &EmptyKey)
|
||||
.await?
|
||||
.ok_or(Error::NoSuchKey)?;
|
||||
|
||||
check_version_not_deleted(&version)?;
|
||||
let body =
|
||||
body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end);
|
||||
Ok(resp_builder.body(body)?)
|
||||
|
|
@ -548,6 +574,8 @@ async fn handle_get_part(
|
|||
.await?
|
||||
.ok_or(Error::NoSuchKey)?;
|
||||
|
||||
check_version_not_deleted(&version)?;
|
||||
|
||||
let (begin, end) =
|
||||
calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?;
|
||||
|
||||
|
|
@ -817,7 +845,9 @@ impl PreconditionHeaders {
|
|||
}
|
||||
|
||||
fn check(&self, v: &ObjectVersion, etag: &str) -> Result<Option<StatusCode>, Error> {
|
||||
let v_date = UNIX_EPOCH + Duration::from_millis(v.timestamp);
|
||||
// we store date with ms precision, but headers are precise to the second: truncate
|
||||
// the timestamp to handle the same-second edge case
|
||||
let v_date = UNIX_EPOCH + Duration::from_secs(v.timestamp / 1000);
|
||||
|
||||
// Implemented from https://datatracker.ietf.org/doc/html/rfc7232#section-6
|
||||
|
||||
|
|
|
|||
|
|
@ -141,10 +141,26 @@ pub async fn handle_post_object(
|
|||
|
||||
let mut conditions = decoded_policy.into_conditions()?;
|
||||
|
||||
// If there are conditions on the bucket name, check these against the actual bucket_name rather
|
||||
// than the one in params, which is allowed to be absent.
|
||||
if let Some(conds) = conditions.params.remove("bucket") {
|
||||
for cond in conds {
|
||||
let ok = match cond {
|
||||
Operation::Equal(s) => s.as_str() == bucket_name,
|
||||
Operation::StartsWith(s) => bucket_name.starts_with(&s),
|
||||
};
|
||||
if !ok {
|
||||
return Err(Error::bad_request(
|
||||
"Key 'bucket' has value not allowed in policy",
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (param_key, value) in params.iter() {
|
||||
let param_key = param_key.as_str();
|
||||
match param_key {
|
||||
"policy" | "x-amz-signature" => (), // this is always accepted, as it's required to validate other fields
|
||||
"policy" | "x-amz-signature" | "bucket" => (), // this is always accepted, as it's required to validate other fields
|
||||
"content-type" => {
|
||||
let conds = conditions.params.remove("content-type").ok_or_else(|| {
|
||||
Error::bad_request(format!("Key '{}' is not allowed in policy", param_key))
|
||||
|
|
|
|||
|
|
@ -39,8 +39,6 @@ use crate::encryption::EncryptionParams;
|
|||
use crate::error::*;
|
||||
use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION;
|
||||
|
||||
const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
|
||||
|
||||
pub(crate) struct SaveStreamResult {
|
||||
pub(crate) version_uuid: Uuid,
|
||||
pub(crate) version_timestamp: u64,
|
||||
|
|
@ -493,7 +491,7 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
|
|||
};
|
||||
let recv_next = async {
|
||||
// If more than a maximum number of writes are in progress, don't add more for now
|
||||
if currently_running >= PUT_BLOCKS_MAX_PARALLEL {
|
||||
if currently_running >= ctx.garage.config.block_max_concurrent_writes_per_request {
|
||||
futures::future::pending().await
|
||||
} else {
|
||||
block_rx3.recv().await
|
||||
|
|
|
|||
|
|
@ -13,6 +13,10 @@ pub fn xmlns_tag<S: Serializer>(_v: &(), s: S) -> Result<S::Ok, S::Error> {
|
|||
s.serialize_str("http://s3.amazonaws.com/doc/2006-03-01/")
|
||||
}
|
||||
|
||||
pub fn xmlns_xsi_tag<S: Serializer>(_v: &(), s: S) -> Result<S::Ok, S::Error> {
|
||||
s.serialize_str("http://www.w3.org/2001/XMLSchema-instance")
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct Value(#[serde(rename = "$value")] pub String);
|
||||
|
||||
|
|
@ -319,6 +323,42 @@ pub struct PostObject {
|
|||
pub etag: Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||
pub struct Grantee {
|
||||
#[serde(rename = "xmlns:xsi", serialize_with = "xmlns_xsi_tag")]
|
||||
pub xmlns_xsi: (),
|
||||
#[serde(rename = "xsi:type")]
|
||||
pub typ: String,
|
||||
#[serde(rename = "DisplayName")]
|
||||
pub display_name: Option<Value>,
|
||||
#[serde(rename = "ID")]
|
||||
pub id: Option<Value>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||
pub struct Grant {
|
||||
#[serde(rename = "Grantee")]
|
||||
pub grantee: Grantee,
|
||||
#[serde(rename = "Permission")]
|
||||
pub permission: Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||
pub struct AccessControlList {
|
||||
#[serde(rename = "Grant")]
|
||||
pub entries: Vec<Grant>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||
pub struct AccessControlPolicy {
|
||||
#[serde(serialize_with = "xmlns_tag")]
|
||||
pub xmlns: (),
|
||||
#[serde(rename = "Owner")]
|
||||
pub owner: Option<Owner>,
|
||||
#[serde(rename = "AccessControlList")]
|
||||
pub acl: AccessControlList,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
|
@ -427,6 +467,43 @@ mod tests {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_bucket_acl_result() -> Result<(), ApiError> {
|
||||
let grant = Grant {
|
||||
grantee: Grantee {
|
||||
xmlns_xsi: (),
|
||||
typ: "CanonicalUser".to_string(),
|
||||
display_name: Some(Value("owner_name".to_string())),
|
||||
id: Some(Value("qsdfjklm".to_string())),
|
||||
},
|
||||
permission: Value("FULL_CONTROL".to_string()),
|
||||
};
|
||||
|
||||
let get_bucket_acl = AccessControlPolicy {
|
||||
xmlns: (),
|
||||
owner: None,
|
||||
acl: AccessControlList {
|
||||
entries: vec![grant],
|
||||
},
|
||||
};
|
||||
assert_eq!(
|
||||
to_xml_with_header(&get_bucket_acl)?,
|
||||
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
||||
<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
|
||||
<AccessControlList>\
|
||||
<Grant>\
|
||||
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\">\
|
||||
<DisplayName>owner_name</DisplayName>\
|
||||
<ID>qsdfjklm</ID>\
|
||||
</Grantee>\
|
||||
<Permission>FULL_CONTROL</Permission>\
|
||||
</Grant>\
|
||||
</AccessControlList>\
|
||||
</AccessControlPolicy>"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn delete_result() -> Result<(), ApiError> {
|
||||
let delete_result = DeleteResult {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_block"
|
||||
version = "1.1.0"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
|
|||
|
|
@ -50,6 +50,8 @@ pub const INLINE_THRESHOLD: usize = 3072;
|
|||
// to delete the block locally.
|
||||
pub(crate) const BLOCK_GC_DELAY: Duration = Duration::from_secs(600);
|
||||
|
||||
const BLOCK_READ_SEMAPHORE_TIMEOUT: Duration = Duration::from_secs(15);
|
||||
|
||||
/// RPC messages used to share blocks of data between nodes
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum BlockRpc {
|
||||
|
|
@ -87,6 +89,7 @@ pub struct BlockManager {
|
|||
disable_scrub: bool,
|
||||
|
||||
mutation_lock: Vec<Mutex<BlockManagerLocked>>,
|
||||
read_semaphore: Semaphore,
|
||||
|
||||
pub rc: BlockRc,
|
||||
pub resync: BlockResyncManager,
|
||||
|
|
@ -176,6 +179,8 @@ impl BlockManager {
|
|||
.iter()
|
||||
.map(|_| Mutex::new(BlockManagerLocked()))
|
||||
.collect::<Vec<_>>(),
|
||||
|
||||
read_semaphore: Semaphore::new(config.block_max_concurrent_reads),
|
||||
rc,
|
||||
resync,
|
||||
system,
|
||||
|
|
@ -408,8 +413,8 @@ impl BlockManager {
|
|||
}
|
||||
|
||||
/// Get number of items in the refcount table
|
||||
pub fn rc_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.rc.rc_table.len()?)
|
||||
pub fn rc_approximate_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.rc.rc_table.approximate_len()?)
|
||||
}
|
||||
|
||||
/// Send command to start/stop/manager scrub worker
|
||||
|
|
@ -427,7 +432,7 @@ impl BlockManager {
|
|||
|
||||
/// List all resync errors
|
||||
pub fn list_resync_errors(&self) -> Result<Vec<BlockResyncErrorInfo>, Error> {
|
||||
let mut blocks = Vec::with_capacity(self.resync.errors.len()?);
|
||||
let mut blocks = Vec::with_capacity(self.resync.errors.approximate_len()?);
|
||||
for ent in self.resync.errors.iter()? {
|
||||
let (hash, cnt) = ent?;
|
||||
let cnt = ErrorCounter::decode(&cnt);
|
||||
|
|
@ -557,9 +562,6 @@ impl BlockManager {
|
|||
match self.find_block(hash).await {
|
||||
Some(p) => self.read_block_from(hash, &p).await,
|
||||
None => {
|
||||
// Not found but maybe we should have had it ??
|
||||
self.resync
|
||||
.put_to_resync(hash, 2 * self.system.rpc_helper().rpc_timeout())?;
|
||||
return Err(Error::Message(format!(
|
||||
"block {:?} not found on node",
|
||||
hash
|
||||
|
|
@ -581,6 +583,15 @@ impl BlockManager {
|
|||
) -> Result<DataBlock, Error> {
|
||||
let (header, path) = block_path.as_parts_ref();
|
||||
|
||||
let permit = tokio::select! {
|
||||
sem = self.read_semaphore.acquire() => sem.ok_or_message("acquire read semaphore")?,
|
||||
_ = tokio::time::sleep(BLOCK_READ_SEMAPHORE_TIMEOUT) => {
|
||||
self.metrics.block_read_semaphore_timeouts.add(1);
|
||||
debug!("read block {:?}: read_semaphore acquire timeout", hash);
|
||||
return Err(Error::Message("read block: read_semaphore acquire timeout".into()));
|
||||
}
|
||||
};
|
||||
|
||||
let mut f = fs::File::open(&path).await?;
|
||||
let mut data = vec![];
|
||||
f.read_to_end(&mut data).await?;
|
||||
|
|
@ -605,6 +616,8 @@ impl BlockManager {
|
|||
return Err(Error::CorruptData(*hash));
|
||||
}
|
||||
|
||||
drop(permit);
|
||||
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
|
|
@ -770,6 +783,7 @@ impl BlockManagerLocked {
|
|||
|
||||
let mut f = fs::File::create(&path_tmp).await?;
|
||||
f.write_all(data).await?;
|
||||
f.flush().await?;
|
||||
mgr.metrics.bytes_written.add(data.len() as u64);
|
||||
|
||||
if mgr.data_fsync {
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ pub struct BlockManagerMetrics {
|
|||
|
||||
pub(crate) bytes_read: BoundCounter<u64>,
|
||||
pub(crate) block_read_duration: BoundValueRecorder<f64>,
|
||||
pub(crate) block_read_semaphore_timeouts: BoundCounter<u64>,
|
||||
pub(crate) bytes_written: BoundCounter<u64>,
|
||||
pub(crate) block_write_duration: BoundValueRecorder<f64>,
|
||||
pub(crate) delete_counter: BoundCounter<u64>,
|
||||
|
|
@ -50,7 +51,7 @@ impl BlockManagerMetrics {
|
|||
.init(),
|
||||
_rc_size: meter
|
||||
.u64_value_observer("block.rc_size", move |observer| {
|
||||
if let Ok(value) = rc_tree.len() {
|
||||
if let Ok(value) = rc_tree.approximate_len() {
|
||||
observer.observe(value as u64, &[])
|
||||
}
|
||||
})
|
||||
|
|
@ -58,7 +59,7 @@ impl BlockManagerMetrics {
|
|||
.init(),
|
||||
_resync_queue_len: meter
|
||||
.u64_value_observer("block.resync_queue_length", move |observer| {
|
||||
if let Ok(value) = resync_queue.len() {
|
||||
if let Ok(value) = resync_queue.approximate_len() {
|
||||
observer.observe(value as u64, &[]);
|
||||
}
|
||||
})
|
||||
|
|
@ -68,7 +69,7 @@ impl BlockManagerMetrics {
|
|||
.init(),
|
||||
_resync_errored_blocks: meter
|
||||
.u64_value_observer("block.resync_errored_blocks", move |observer| {
|
||||
if let Ok(value) = resync_errors.len() {
|
||||
if let Ok(value) = resync_errors.approximate_len() {
|
||||
observer.observe(value as u64, &[]);
|
||||
}
|
||||
})
|
||||
|
|
@ -119,6 +120,11 @@ impl BlockManagerMetrics {
|
|||
.with_description("Duration of block read operations")
|
||||
.init()
|
||||
.bind(&[]),
|
||||
block_read_semaphore_timeouts: meter
|
||||
.u64_counter("block.read_semaphore_timeouts")
|
||||
.with_description("Number of block reads that failed due to semaphore acquire timeout")
|
||||
.init()
|
||||
.bind(&[]),
|
||||
bytes_written: meter
|
||||
.u64_counter("block.bytes_written")
|
||||
.with_description("Number of bytes written to disk")
|
||||
|
|
|
|||
|
|
@ -106,13 +106,13 @@ impl BlockResyncManager {
|
|||
}
|
||||
|
||||
/// Get length of resync queue
|
||||
pub fn queue_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.queue.len()?)
|
||||
pub fn queue_approximate_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.queue.approximate_len()?)
|
||||
}
|
||||
|
||||
/// Get number of blocks that have an error
|
||||
pub fn errors_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.errors.len()?)
|
||||
pub fn errors_approximate_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.errors.approximate_len()?)
|
||||
}
|
||||
|
||||
/// Clear the error counter for a block and put it in queue immediately
|
||||
|
|
@ -133,6 +133,14 @@ impl BlockResyncManager {
|
|||
)))
|
||||
}
|
||||
|
||||
/// Clear the entire resync queue and list of errored blocks
|
||||
/// Corresponds to `garage repair clear-resync-queue`
|
||||
pub fn clear_resync_queue(&self) -> Result<(), Error> {
|
||||
self.queue.clear()?;
|
||||
self.errors.clear()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
|
||||
let notify = self.notify.clone();
|
||||
vars.register_rw(
|
||||
|
|
@ -548,9 +556,11 @@ impl Worker for ResyncWorker {
|
|||
}
|
||||
|
||||
WorkerStatus {
|
||||
queue_length: Some(self.manager.resync.queue_len().unwrap_or(0) as u64),
|
||||
queue_length: Some(self.manager.resync.queue_approximate_len().unwrap_or(0) as u64),
|
||||
tranquility: Some(tranquility),
|
||||
persistent_errors: Some(self.manager.resync.errors_len().unwrap_or(0) as u64),
|
||||
persistent_errors: Some(
|
||||
self.manager.resync.errors_approximate_len().unwrap_or(0) as u64
|
||||
),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_db"
|
||||
version = "1.1.0"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -12,14 +12,18 @@ readme = "../../README.md"
|
|||
path = "lib.rs"
|
||||
|
||||
[dependencies]
|
||||
err-derive.workspace = true
|
||||
thiserror.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
heed = { workspace = true, optional = true }
|
||||
|
||||
rusqlite = { workspace = true, optional = true, features = ["backup"] }
|
||||
r2d2 = { workspace = true, optional = true }
|
||||
r2d2_sqlite = { workspace = true, optional = true }
|
||||
|
||||
fjall = { workspace = true, optional = true }
|
||||
parking_lot = { workspace = true, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
mktemp.workspace = true
|
||||
|
||||
|
|
@ -27,4 +31,5 @@ mktemp.workspace = true
|
|||
default = [ "lmdb", "sqlite" ]
|
||||
bundled-libs = [ "rusqlite?/bundled" ]
|
||||
lmdb = [ "heed" ]
|
||||
fjall = [ "dep:fjall", "dep:parking_lot" ]
|
||||
sqlite = [ "rusqlite", "r2d2", "r2d2_sqlite" ]
|
||||
|
|
|
|||
453
src/db/fjall_adapter.rs
Normal file
453
src/db/fjall_adapter.rs
Normal file
|
|
@ -0,0 +1,453 @@
|
|||
use core::ops::Bound;
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard};
|
||||
|
||||
use fjall::{
|
||||
PartitionCreateOptions, PersistMode, TransactionalKeyspace, TransactionalPartitionHandle,
|
||||
WriteTransaction,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
open::{Engine, OpenOpt},
|
||||
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
|
||||
TxResult, TxValueIter, Value, ValueIter,
|
||||
};
|
||||
|
||||
pub use fjall;
|
||||
|
||||
// --
|
||||
|
||||
pub(crate) fn open_db(path: &PathBuf, opt: &OpenOpt) -> Result<Db> {
|
||||
info!("Opening Fjall database at: {}", path.display());
|
||||
if opt.fsync {
|
||||
return Err(Error(
|
||||
"metadata_fsync is not supported with the Fjall database engine".into(),
|
||||
));
|
||||
}
|
||||
let mut config = fjall::Config::new(path);
|
||||
if let Some(block_cache_size) = opt.fjall_block_cache_size {
|
||||
config = config.cache_size(block_cache_size as u64);
|
||||
}
|
||||
let keyspace = config.open_transactional()?;
|
||||
Ok(FjallDb::init(keyspace))
|
||||
}
|
||||
|
||||
// -- err
|
||||
|
||||
impl From<fjall::Error> for Error {
|
||||
fn from(e: fjall::Error) -> Error {
|
||||
Error(format!("fjall: {}", e).into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<fjall::LsmError> for Error {
|
||||
fn from(e: fjall::LsmError) -> Error {
|
||||
Error(format!("fjall lsm_tree: {}", e).into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<fjall::Error> for TxOpError {
|
||||
fn from(e: fjall::Error) -> TxOpError {
|
||||
TxOpError(e.into())
|
||||
}
|
||||
}
|
||||
|
||||
// -- db
|
||||
|
||||
pub struct FjallDb {
|
||||
keyspace: TransactionalKeyspace,
|
||||
trees: RwLock<Vec<(String, TransactionalPartitionHandle)>>,
|
||||
}
|
||||
|
||||
type ByteRefRangeBound<'r> = (Bound<&'r [u8]>, Bound<&'r [u8]>);
|
||||
|
||||
impl FjallDb {
|
||||
pub fn init(keyspace: TransactionalKeyspace) -> Db {
|
||||
let s = Self {
|
||||
keyspace,
|
||||
trees: RwLock::new(Vec::new()),
|
||||
};
|
||||
Db(Arc::new(s))
|
||||
}
|
||||
|
||||
fn get_tree(
|
||||
&self,
|
||||
i: usize,
|
||||
) -> Result<MappedRwLockReadGuard<'_, TransactionalPartitionHandle>> {
|
||||
RwLockReadGuard::try_map(self.trees.read(), |trees: &Vec<_>| {
|
||||
trees.get(i).map(|tup| &tup.1)
|
||||
})
|
||||
.map_err(|_| Error("invalid tree id".into()))
|
||||
}
|
||||
}
|
||||
|
||||
impl IDb for FjallDb {
|
||||
fn engine(&self) -> String {
|
||||
"Fjall (EXPERIMENTAL!)".into()
|
||||
}
|
||||
|
||||
fn open_tree(&self, name: &str) -> Result<usize> {
|
||||
let mut trees = self.trees.write();
|
||||
let safe_name = encode_name(name)?;
|
||||
if let Some(i) = trees.iter().position(|(name, _)| *name == safe_name) {
|
||||
Ok(i)
|
||||
} else {
|
||||
let tree = self
|
||||
.keyspace
|
||||
.open_partition(&safe_name, PartitionCreateOptions::default())?;
|
||||
let i = trees.len();
|
||||
trees.push((safe_name, tree));
|
||||
Ok(i)
|
||||
}
|
||||
}
|
||||
|
||||
fn list_trees(&self) -> Result<Vec<String>> {
|
||||
Ok(self
|
||||
.keyspace
|
||||
.list_partitions()
|
||||
.iter()
|
||||
.map(|n| decode_name(&n))
|
||||
.collect::<Result<Vec<_>>>()?)
|
||||
}
|
||||
|
||||
fn snapshot(&self, base_path: &PathBuf) -> Result<()> {
|
||||
std::fs::create_dir_all(base_path)?;
|
||||
let path = Engine::Fjall.db_path(base_path);
|
||||
|
||||
let source_state = self.keyspace.read_tx();
|
||||
let copy_keyspace = fjall::Config::new(path).open()?;
|
||||
|
||||
for partition_name in self.keyspace.list_partitions() {
|
||||
let source_partition = self
|
||||
.keyspace
|
||||
.open_partition(&partition_name, PartitionCreateOptions::default())?;
|
||||
let copy_partition =
|
||||
copy_keyspace.open_partition(&partition_name, PartitionCreateOptions::default())?;
|
||||
|
||||
for entry in source_state.iter(&source_partition) {
|
||||
let (key, value) = entry?;
|
||||
copy_partition.insert(key, value)?;
|
||||
}
|
||||
}
|
||||
|
||||
copy_keyspace.persist(PersistMode::SyncAll)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
fn get(&self, tree_idx: usize, key: &[u8]) -> Result<Option<Value>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
let val = tx.get(&tree, key)?;
|
||||
match val {
|
||||
None => Ok(None),
|
||||
Some(v) => Ok(Some(v.to_vec())),
|
||||
}
|
||||
}
|
||||
|
||||
fn approximate_len(&self, tree_idx: usize) -> Result<usize> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
Ok(tree.approximate_len())
|
||||
}
|
||||
fn is_empty(&self, tree_idx: usize) -> Result<bool> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
Ok(tx.is_empty(&tree)?)
|
||||
}
|
||||
|
||||
fn insert(&self, tree_idx: usize, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let mut tx = self.keyspace.write_tx();
|
||||
tx.insert(&tree, key, value);
|
||||
tx.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove(&self, tree_idx: usize, key: &[u8]) -> Result<()> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let mut tx = self.keyspace.write_tx();
|
||||
tx.remove(&tree, key);
|
||||
tx.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn clear(&self, tree_idx: usize) -> Result<()> {
|
||||
let mut trees = self.trees.write();
|
||||
|
||||
if tree_idx >= trees.len() {
|
||||
return Err(Error("invalid tree id".into()));
|
||||
}
|
||||
let (name, tree) = trees.remove(tree_idx);
|
||||
|
||||
self.keyspace.delete_partition(tree)?;
|
||||
let tree = self
|
||||
.keyspace
|
||||
.open_partition(&name, PartitionCreateOptions::default())?;
|
||||
trees.insert(tree_idx, (name, tree));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn iter(&self, tree_idx: usize) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
Ok(Box::new(tx.iter(&tree).map(iterator_remap)))
|
||||
}
|
||||
|
||||
fn iter_rev(&self, tree_idx: usize) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
Ok(Box::new(tx.iter(&tree).rev().map(iterator_remap)))
|
||||
}
|
||||
|
||||
fn range<'r>(
|
||||
&self,
|
||||
tree_idx: usize,
|
||||
low: Bound<&'r [u8]>,
|
||||
high: Bound<&'r [u8]>,
|
||||
) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
Ok(Box::new(
|
||||
tx.range::<&'r [u8], ByteRefRangeBound>(&tree, (low, high))
|
||||
.map(iterator_remap),
|
||||
))
|
||||
}
|
||||
fn range_rev<'r>(
|
||||
&self,
|
||||
tree_idx: usize,
|
||||
low: Bound<&'r [u8]>,
|
||||
high: Bound<&'r [u8]>,
|
||||
) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
Ok(Box::new(
|
||||
tx.range::<&'r [u8], ByteRefRangeBound>(&tree, (low, high))
|
||||
.rev()
|
||||
.map(iterator_remap),
|
||||
))
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> {
|
||||
let trees = self.trees.read();
|
||||
let mut tx = FjallTx {
|
||||
trees: &trees[..],
|
||||
tx: self.keyspace.write_tx(),
|
||||
};
|
||||
|
||||
let res = f.try_on(&mut tx);
|
||||
match res {
|
||||
TxFnResult::Ok(on_commit) => {
|
||||
tx.tx.commit().map_err(Error::from).map_err(TxError::Db)?;
|
||||
Ok(on_commit)
|
||||
}
|
||||
TxFnResult::Abort => {
|
||||
tx.tx.rollback();
|
||||
Err(TxError::Abort(()))
|
||||
}
|
||||
TxFnResult::DbErr => {
|
||||
tx.tx.rollback();
|
||||
Err(TxError::Db(Error(
|
||||
"(this message will be discarded)".into(),
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
struct FjallTx<'a> {
|
||||
trees: &'a [(String, TransactionalPartitionHandle)],
|
||||
tx: WriteTransaction<'a>,
|
||||
}
|
||||
|
||||
impl<'a> FjallTx<'a> {
|
||||
fn get_tree(&self, i: usize) -> TxOpResult<&TransactionalPartitionHandle> {
|
||||
self.trees.get(i).map(|tup| &tup.1).ok_or_else(|| {
|
||||
TxOpError(Error(
|
||||
"invalid tree id (it might have been openned after the transaction started)".into(),
|
||||
))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ITx for FjallTx<'a> {
|
||||
fn get(&self, tree_idx: usize, key: &[u8]) -> TxOpResult<Option<Value>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
match self.tx.get(tree, key)? {
|
||||
Some(v) => Ok(Some(v.to_vec())),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
fn len(&self, tree_idx: usize) -> TxOpResult<usize> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
Ok(self.tx.len(tree)? as usize)
|
||||
}
|
||||
|
||||
fn insert(&mut self, tree_idx: usize, key: &[u8], value: &[u8]) -> TxOpResult<()> {
|
||||
let tree = self.get_tree(tree_idx)?.clone();
|
||||
self.tx.insert(&tree, key, value);
|
||||
Ok(())
|
||||
}
|
||||
fn remove(&mut self, tree_idx: usize, key: &[u8]) -> TxOpResult<()> {
|
||||
let tree = self.get_tree(tree_idx)?.clone();
|
||||
self.tx.remove(&tree, key);
|
||||
Ok(())
|
||||
}
|
||||
fn clear(&mut self, _tree_idx: usize) -> TxOpResult<()> {
|
||||
unimplemented!("LSM tree clearing in cross-partition transaction is not supported")
|
||||
}
|
||||
|
||||
fn iter(&self, tree_idx: usize) -> TxOpResult<TxValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?.clone();
|
||||
Ok(Box::new(self.tx.iter(&tree).map(iterator_remap_tx)))
|
||||
}
|
||||
fn iter_rev(&self, tree_idx: usize) -> TxOpResult<TxValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?.clone();
|
||||
Ok(Box::new(self.tx.iter(&tree).rev().map(iterator_remap_tx)))
|
||||
}
|
||||
|
||||
fn range<'r>(
|
||||
&self,
|
||||
tree_idx: usize,
|
||||
low: Bound<&'r [u8]>,
|
||||
high: Bound<&'r [u8]>,
|
||||
) -> TxOpResult<TxValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let low = clone_bound(low);
|
||||
let high = clone_bound(high);
|
||||
Ok(Box::new(
|
||||
self.tx
|
||||
.range::<Vec<u8>, ByteVecRangeBounds>(&tree, (low, high))
|
||||
.map(iterator_remap_tx),
|
||||
))
|
||||
}
|
||||
fn range_rev<'r>(
|
||||
&self,
|
||||
tree_idx: usize,
|
||||
low: Bound<&'r [u8]>,
|
||||
high: Bound<&'r [u8]>,
|
||||
) -> TxOpResult<TxValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let low = clone_bound(low);
|
||||
let high = clone_bound(high);
|
||||
Ok(Box::new(
|
||||
self.tx
|
||||
.range::<Vec<u8>, ByteVecRangeBounds>(&tree, (low, high))
|
||||
.rev()
|
||||
.map(iterator_remap_tx),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// -- maps fjall's (k, v) to ours
|
||||
|
||||
fn iterator_remap(r: fjall::Result<(fjall::Slice, fjall::Slice)>) -> Result<(Value, Value)> {
|
||||
r.map(|(k, v)| (k.to_vec(), v.to_vec()))
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
fn iterator_remap_tx(r: fjall::Result<(fjall::Slice, fjall::Slice)>) -> TxOpResult<(Value, Value)> {
|
||||
r.map(|(k, v)| (k.to_vec(), v.to_vec()))
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
// -- utils to deal with Garage's tightness on Bound lifetimes
|
||||
|
||||
type ByteVecBound = Bound<Vec<u8>>;
|
||||
type ByteVecRangeBounds = (ByteVecBound, ByteVecBound);
|
||||
|
||||
fn clone_bound(bound: Bound<&[u8]>) -> ByteVecBound {
|
||||
let value = match bound {
|
||||
Bound::Excluded(v) | Bound::Included(v) => v.to_vec(),
|
||||
Bound::Unbounded => vec![],
|
||||
};
|
||||
|
||||
match bound {
|
||||
Bound::Included(_) => Bound::Included(value),
|
||||
Bound::Excluded(_) => Bound::Excluded(value),
|
||||
Bound::Unbounded => Bound::Unbounded,
|
||||
}
|
||||
}
|
||||
|
||||
// -- utils to encode table names --
|
||||
|
||||
fn encode_name(s: &str) -> Result<String> {
|
||||
let base = 'A' as u32;
|
||||
|
||||
let mut ret = String::with_capacity(s.len() + 10);
|
||||
for c in s.chars() {
|
||||
if c.is_alphanumeric() || c == '_' || c == '-' || c == '#' {
|
||||
ret.push(c);
|
||||
} else if c <= u8::MAX as char {
|
||||
ret.push('$');
|
||||
let c_hi = c as u32 / 16;
|
||||
let c_lo = c as u32 % 16;
|
||||
ret.push(char::from_u32(base + c_hi).unwrap());
|
||||
ret.push(char::from_u32(base + c_lo).unwrap());
|
||||
} else {
|
||||
return Err(Error(
|
||||
format!("table name {} could not be safely encoded", s).into(),
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
fn decode_name(s: &str) -> Result<String> {
|
||||
use std::convert::TryFrom;
|
||||
|
||||
let errfn = || Error(format!("encoded table name {} is invalid", s).into());
|
||||
let c_map = |c: char| {
|
||||
let c = c as u32;
|
||||
let base = 'A' as u32;
|
||||
if (base..base + 16).contains(&c) {
|
||||
Some(c - base)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
let mut ret = String::with_capacity(s.len());
|
||||
let mut it = s.chars();
|
||||
while let Some(c) = it.next() {
|
||||
if c == '$' {
|
||||
let c_hi = it.next().and_then(c_map).ok_or_else(errfn)?;
|
||||
let c_lo = it.next().and_then(c_map).ok_or_else(errfn)?;
|
||||
let c_dec = char::try_from(c_hi * 16 + c_lo).map_err(|_| errfn())?;
|
||||
ret.push(c_dec);
|
||||
} else {
|
||||
ret.push(c);
|
||||
}
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_encdec_name() {
|
||||
for name in [
|
||||
"testname",
|
||||
"test_name",
|
||||
"test name",
|
||||
"test$name",
|
||||
"test:name@help.me$get/this**right",
|
||||
] {
|
||||
let encname = encode_name(name).unwrap();
|
||||
assert!(!encname.contains(' '));
|
||||
assert!(!encname.contains('.'));
|
||||
assert!(!encname.contains('*'));
|
||||
assert_eq!(*name, decode_name(&encname).unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,6 +1,8 @@
|
|||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
#[cfg(feature = "fjall")]
|
||||
pub mod fjall_adapter;
|
||||
#[cfg(feature = "lmdb")]
|
||||
pub mod lmdb_adapter;
|
||||
#[cfg(feature = "sqlite")]
|
||||
|
|
@ -18,7 +20,7 @@ use std::cell::Cell;
|
|||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use err_derive::Error;
|
||||
use thiserror::Error;
|
||||
|
||||
pub use open::*;
|
||||
|
||||
|
|
@ -42,7 +44,7 @@ pub type TxValueIter<'a> = Box<dyn std::iter::Iterator<Item = TxOpResult<(Value,
|
|||
// ----
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[error(display = "{}", _0)]
|
||||
#[error("{0}")]
|
||||
pub struct Error(pub Cow<'static, str>);
|
||||
|
||||
impl From<std::io::Error> for Error {
|
||||
|
|
@ -54,7 +56,7 @@ impl From<std::io::Error> for Error {
|
|||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[error(display = "{}", _0)]
|
||||
#[error("{0}")]
|
||||
pub struct TxOpError(pub(crate) Error);
|
||||
pub type TxOpResult<T> = std::result::Result<T, TxOpError>;
|
||||
|
||||
|
|
@ -104,32 +106,44 @@ impl Db {
|
|||
result: Cell::new(None),
|
||||
};
|
||||
let tx_res = self.0.transaction(&f);
|
||||
let ret = f
|
||||
.result
|
||||
.into_inner()
|
||||
.expect("Transaction did not store result");
|
||||
let fn_res = f.result.into_inner();
|
||||
|
||||
match tx_res {
|
||||
Ok(on_commit) => match ret {
|
||||
Ok(value) => {
|
||||
on_commit.into_iter().for_each(|f| f());
|
||||
Ok(value)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
},
|
||||
Err(TxError::Abort(())) => match ret {
|
||||
Err(TxError::Abort(e)) => Err(TxError::Abort(e)),
|
||||
_ => unreachable!(),
|
||||
},
|
||||
Err(TxError::Db(e2)) => match ret {
|
||||
// Ok was stored -> the error occurred when finalizing
|
||||
// transaction
|
||||
Ok(_) => Err(TxError::Db(e2)),
|
||||
// An error was already stored: that's the one we want to
|
||||
// return
|
||||
Err(TxError::Db(e)) => Err(TxError::Db(e)),
|
||||
_ => unreachable!(),
|
||||
},
|
||||
match (tx_res, fn_res) {
|
||||
(Ok(on_commit), Some(Ok(value))) => {
|
||||
// Transaction succeeded
|
||||
// TxFn stored the value to return to the user in fn_res
|
||||
// tx_res contains the on_commit list of callbacks, run them now
|
||||
on_commit.into_iter().for_each(|f| f());
|
||||
Ok(value)
|
||||
}
|
||||
(Err(TxError::Abort(())), Some(Err(TxError::Abort(e)))) => {
|
||||
// Transaction was aborted by user code
|
||||
// The abort error value is stored in fn_res
|
||||
Err(TxError::Abort(e))
|
||||
}
|
||||
(Err(TxError::Db(_tx_e)), Some(Err(TxError::Db(fn_e)))) => {
|
||||
// Transaction encountered a DB error in user code
|
||||
// The error value encountered is the one in fn_res,
|
||||
// tx_res contains only a dummy error message
|
||||
Err(TxError::Db(fn_e))
|
||||
}
|
||||
(Err(TxError::Db(tx_e)), None) => {
|
||||
// Transaction encounterred a DB error when initializing the transaction,
|
||||
// before user code was called
|
||||
Err(TxError::Db(tx_e))
|
||||
}
|
||||
(Err(TxError::Db(tx_e)), Some(Ok(_))) => {
|
||||
// Transaction encounterred a DB error when commiting the transaction,
|
||||
// after user code was called
|
||||
Err(TxError::Db(tx_e))
|
||||
}
|
||||
(tx_res, fn_res) => {
|
||||
panic!(
|
||||
"unexpected error case: tx_res={:?}, fn_res={:?}",
|
||||
tx_res.map(|_| "..."),
|
||||
fn_res.map(|x| x.map(|_| "...").map_err(|_| "..."))
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -152,7 +166,7 @@ impl Db {
|
|||
let tree_names = other.list_trees()?;
|
||||
for name in tree_names {
|
||||
let tree = self.open_tree(&name)?;
|
||||
if tree.len()? > 0 {
|
||||
if !tree.is_empty()? {
|
||||
return Err(Error(format!("tree {} already contains data", name).into()));
|
||||
}
|
||||
|
||||
|
|
@ -194,8 +208,12 @@ impl Tree {
|
|||
self.0.get(self.1, key.as_ref())
|
||||
}
|
||||
#[inline]
|
||||
pub fn len(&self) -> Result<usize> {
|
||||
self.0.len(self.1)
|
||||
pub fn approximate_len(&self) -> Result<usize> {
|
||||
self.0.approximate_len(self.1)
|
||||
}
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> Result<bool> {
|
||||
self.0.is_empty(self.1)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
|
@ -333,7 +351,8 @@ pub(crate) trait IDb: Send + Sync {
|
|||
fn snapshot(&self, path: &PathBuf) -> Result<()>;
|
||||
|
||||
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
||||
fn len(&self, tree: usize) -> Result<usize>;
|
||||
fn approximate_len(&self, tree: usize) -> Result<usize>;
|
||||
fn is_empty(&self, tree: usize) -> Result<bool>;
|
||||
|
||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()>;
|
||||
fn remove(&self, tree: usize, key: &[u8]) -> Result<()>;
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
use core::ops::Bound;
|
||||
use core::ptr::NonNull;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryInto;
|
||||
use std::marker::PhantomPinned;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
|
@ -11,12 +11,55 @@ use heed::types::ByteSlice;
|
|||
use heed::{BytesDecode, Env, RoTxn, RwTxn, UntypedDatabase as Database};
|
||||
|
||||
use crate::{
|
||||
open::{Engine, OpenOpt},
|
||||
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
|
||||
TxResult, TxValueIter, Value, ValueIter,
|
||||
};
|
||||
|
||||
pub use heed;
|
||||
|
||||
// ---- top-level open function
|
||||
|
||||
pub(crate) fn open_db(path: &PathBuf, opt: &OpenOpt) -> Result<Db> {
|
||||
info!("Opening LMDB database at: {}", path.display());
|
||||
if let Err(e) = std::fs::create_dir_all(&path) {
|
||||
return Err(Error(
|
||||
format!("Unable to create LMDB data directory: {}", e).into(),
|
||||
));
|
||||
}
|
||||
|
||||
let map_size = match opt.lmdb_map_size {
|
||||
None => recommended_map_size(),
|
||||
Some(v) => v - (v % 4096),
|
||||
};
|
||||
|
||||
let mut env_builder = heed::EnvOpenOptions::new();
|
||||
env_builder.max_dbs(100);
|
||||
env_builder.map_size(map_size);
|
||||
env_builder.max_readers(2048);
|
||||
unsafe {
|
||||
env_builder.flag(heed::flags::Flags::MdbNoRdAhead);
|
||||
env_builder.flag(heed::flags::Flags::MdbNoMetaSync);
|
||||
if !opt.fsync {
|
||||
env_builder.flag(heed::flags::Flags::MdbNoSync);
|
||||
}
|
||||
}
|
||||
match env_builder.open(&path) {
|
||||
Err(heed::Error::Io(e)) if e.kind() == std::io::ErrorKind::OutOfMemory => {
|
||||
return Err(Error(
|
||||
"OutOfMemory error while trying to open LMDB database. This can happen \
|
||||
if your operating system is not allowing you to use sufficient virtual \
|
||||
memory address space. Please check that no limit is set (ulimit -v). \
|
||||
You may also try to set a smaller `lmdb_map_size` configuration parameter. \
|
||||
On 32-bit machines, you should probably switch to another database engine."
|
||||
.into(),
|
||||
))
|
||||
}
|
||||
Err(e) => Err(Error(format!("Cannot open LMDB database: {}", e).into())),
|
||||
Ok(db) => Ok(LmdbDb::init(db)),
|
||||
}
|
||||
}
|
||||
|
||||
// -- err
|
||||
|
||||
impl From<heed::Error> for Error {
|
||||
|
|
@ -104,10 +147,9 @@ impl IDb for LmdbDb {
|
|||
Ok(ret2)
|
||||
}
|
||||
|
||||
fn snapshot(&self, to: &PathBuf) -> Result<()> {
|
||||
std::fs::create_dir_all(to)?;
|
||||
let mut path = to.clone();
|
||||
path.push("data.mdb");
|
||||
fn snapshot(&self, base_path: &PathBuf) -> Result<()> {
|
||||
std::fs::create_dir_all(base_path)?;
|
||||
let path = Engine::Lmdb.db_path(base_path);
|
||||
self.db
|
||||
.copy_to_path(path, heed::CompactionOption::Enabled)?;
|
||||
Ok(())
|
||||
|
|
@ -126,11 +168,16 @@ impl IDb for LmdbDb {
|
|||
}
|
||||
}
|
||||
|
||||
fn len(&self, tree: usize) -> Result<usize> {
|
||||
fn approximate_len(&self, tree: usize) -> Result<usize> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
Ok(tree.len(&tx)?.try_into().unwrap())
|
||||
}
|
||||
fn is_empty(&self, tree: usize) -> Result<bool> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
Ok(tree.is_empty(&tx)?)
|
||||
}
|
||||
|
||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
|
|
@ -159,13 +206,15 @@ impl IDb for LmdbDb {
|
|||
fn iter(&self, tree: usize) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
TxAndIterator::make(tx, |tx| Ok(tree.iter(tx)?))
|
||||
// Safety: the cloture does not store its argument anywhere,
|
||||
unsafe { TxAndIterator::make(tx, |tx| Ok(tree.iter(tx)?)) }
|
||||
}
|
||||
|
||||
fn iter_rev(&self, tree: usize) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
TxAndIterator::make(tx, |tx| Ok(tree.rev_iter(tx)?))
|
||||
// Safety: the cloture does not store its argument anywhere,
|
||||
unsafe { TxAndIterator::make(tx, |tx| Ok(tree.rev_iter(tx)?)) }
|
||||
}
|
||||
|
||||
fn range<'r>(
|
||||
|
|
@ -176,7 +225,8 @@ impl IDb for LmdbDb {
|
|||
) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
TxAndIterator::make(tx, |tx| Ok(tree.range(tx, &(low, high))?))
|
||||
// Safety: the cloture does not store its argument anywhere,
|
||||
unsafe { TxAndIterator::make(tx, |tx| Ok(tree.range(tx, &(low, high))?)) }
|
||||
}
|
||||
fn range_rev<'r>(
|
||||
&self,
|
||||
|
|
@ -186,7 +236,8 @@ impl IDb for LmdbDb {
|
|||
) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
TxAndIterator::make(tx, |tx| Ok(tree.rev_range(tx, &(low, high))?))
|
||||
// Safety: the cloture does not store its argument anywhere,
|
||||
unsafe { TxAndIterator::make(tx, |tx| Ok(tree.rev_range(tx, &(low, high))?)) }
|
||||
}
|
||||
|
||||
// ----
|
||||
|
|
@ -316,28 +367,41 @@ where
|
|||
{
|
||||
tx: RoTxn<'a>,
|
||||
iter: Option<I>,
|
||||
_pin: PhantomPinned,
|
||||
}
|
||||
|
||||
impl<'a, I> TxAndIterator<'a, I>
|
||||
where
|
||||
I: Iterator<Item = IteratorItem<'a>> + 'a,
|
||||
{
|
||||
fn make<F>(tx: RoTxn<'a>, iterfun: F) -> Result<ValueIter<'a>>
|
||||
fn iter(self: Pin<&mut Self>) -> &mut Option<I> {
|
||||
// Safety: iter is not structural
|
||||
unsafe { &mut self.get_unchecked_mut().iter }
|
||||
}
|
||||
|
||||
/// Safety: iterfun must not store its argument anywhere but in its result.
|
||||
unsafe fn make<F>(tx: RoTxn<'a>, iterfun: F) -> Result<ValueIter<'a>>
|
||||
where
|
||||
F: FnOnce(&'a RoTxn<'a>) -> Result<I>,
|
||||
{
|
||||
let res = TxAndIterator { tx, iter: None };
|
||||
let res = TxAndIterator {
|
||||
tx,
|
||||
iter: None,
|
||||
_pin: PhantomPinned,
|
||||
};
|
||||
let mut boxed = Box::pin(res);
|
||||
|
||||
// This unsafe allows us to bypass lifetime checks
|
||||
let tx = unsafe { NonNull::from(&boxed.tx).as_ref() };
|
||||
let iter = iterfun(tx)?;
|
||||
let tx_lifetime_overextended: &'a RoTxn<'a> = {
|
||||
let tx = &boxed.tx;
|
||||
// Safety: Artificially extending the lifetime because
|
||||
// this reference will only be stored and accessed from the
|
||||
// returned ValueIter which guarantees that it is destroyed
|
||||
// before the tx it is pointing to.
|
||||
unsafe { &*&raw const *tx }
|
||||
};
|
||||
let iter = iterfun(&tx_lifetime_overextended)?;
|
||||
|
||||
let mut_ref = Pin::as_mut(&mut boxed);
|
||||
// This unsafe allows us to write in a field of the pinned struct
|
||||
unsafe {
|
||||
Pin::get_unchecked_mut(mut_ref).iter = Some(iter);
|
||||
}
|
||||
*boxed.as_mut().iter() = Some(iter);
|
||||
|
||||
Ok(Box::new(TxAndIteratorPin(boxed)))
|
||||
}
|
||||
|
|
@ -348,8 +412,10 @@ where
|
|||
I: Iterator<Item = IteratorItem<'a>> + 'a,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
// ensure the iterator is dropped before the RoTxn it references
|
||||
drop(self.iter.take());
|
||||
// Safety: `new_unchecked` is okay because we know this value is never
|
||||
// used again after being dropped.
|
||||
let this = unsafe { Pin::new_unchecked(self) };
|
||||
drop(this.iter().take());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -365,13 +431,12 @@ where
|
|||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let mut_ref = Pin::as_mut(&mut self.0);
|
||||
// This unsafe allows us to mutably access the iterator field
|
||||
let next = unsafe { Pin::get_unchecked_mut(mut_ref).iter.as_mut()?.next() };
|
||||
match next {
|
||||
None => None,
|
||||
Some(Err(e)) => Some(Err(e.into())),
|
||||
Some(Ok((k, v))) => Some(Ok((k.to_vec(), v.to_vec()))),
|
||||
}
|
||||
let next = mut_ref.iter().as_mut()?.next()?;
|
||||
let res = match next {
|
||||
Err(e) => Err(e.into()),
|
||||
Ok((k, v)) => Ok((k.to_vec(), v.to_vec())),
|
||||
};
|
||||
Some(res)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ use crate::{Db, Error, Result};
|
|||
pub enum Engine {
|
||||
Lmdb,
|
||||
Sqlite,
|
||||
Fjall,
|
||||
}
|
||||
|
||||
impl Engine {
|
||||
|
|
@ -19,8 +20,26 @@ impl Engine {
|
|||
match self {
|
||||
Self::Lmdb => "lmdb",
|
||||
Self::Sqlite => "sqlite",
|
||||
Self::Fjall => "fjall",
|
||||
}
|
||||
}
|
||||
|
||||
/// Return engine-specific DB path from base path
|
||||
pub fn db_path(&self, base_path: &PathBuf) -> PathBuf {
|
||||
let mut ret = base_path.clone();
|
||||
match self {
|
||||
Self::Lmdb => {
|
||||
ret.push("db.lmdb");
|
||||
}
|
||||
Self::Sqlite => {
|
||||
ret.push("db.sqlite");
|
||||
}
|
||||
Self::Fjall => {
|
||||
ret.push("db.fjall");
|
||||
}
|
||||
}
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Engine {
|
||||
|
|
@ -36,10 +55,11 @@ impl std::str::FromStr for Engine {
|
|||
match text {
|
||||
"lmdb" | "heed" => Ok(Self::Lmdb),
|
||||
"sqlite" | "sqlite3" | "rusqlite" => Ok(Self::Sqlite),
|
||||
"fjall" => Ok(Self::Fjall),
|
||||
"sled" => Err(Error("Sled is no longer supported as a database engine. Converting your old metadata db can be done using an older Garage binary (e.g. v0.9.4).".into())),
|
||||
kind => Err(Error(
|
||||
format!(
|
||||
"Invalid DB engine: {} (options are: lmdb, sqlite)",
|
||||
"Invalid DB engine: {} (options are: lmdb, sqlite, fjall)",
|
||||
kind
|
||||
)
|
||||
.into(),
|
||||
|
|
@ -51,6 +71,7 @@ impl std::str::FromStr for Engine {
|
|||
pub struct OpenOpt {
|
||||
pub fsync: bool,
|
||||
pub lmdb_map_size: Option<usize>,
|
||||
pub fjall_block_cache_size: Option<usize>,
|
||||
}
|
||||
|
||||
impl Default for OpenOpt {
|
||||
|
|
@ -58,6 +79,7 @@ impl Default for OpenOpt {
|
|||
Self {
|
||||
fsync: false,
|
||||
lmdb_map_size: None,
|
||||
fjall_block_cache_size: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -66,53 +88,15 @@ pub fn open_db(path: &PathBuf, engine: Engine, opt: &OpenOpt) -> Result<Db> {
|
|||
match engine {
|
||||
// ---- Sqlite DB ----
|
||||
#[cfg(feature = "sqlite")]
|
||||
Engine::Sqlite => {
|
||||
info!("Opening Sqlite database at: {}", path.display());
|
||||
let manager = r2d2_sqlite::SqliteConnectionManager::file(path);
|
||||
Ok(crate::sqlite_adapter::SqliteDb::new(manager, opt.fsync)?)
|
||||
}
|
||||
Engine::Sqlite => crate::sqlite_adapter::open_db(path, opt),
|
||||
|
||||
// ---- LMDB DB ----
|
||||
#[cfg(feature = "lmdb")]
|
||||
Engine::Lmdb => {
|
||||
info!("Opening LMDB database at: {}", path.display());
|
||||
if let Err(e) = std::fs::create_dir_all(&path) {
|
||||
return Err(Error(
|
||||
format!("Unable to create LMDB data directory: {}", e).into(),
|
||||
));
|
||||
}
|
||||
Engine::Lmdb => crate::lmdb_adapter::open_db(path, opt),
|
||||
|
||||
let map_size = match opt.lmdb_map_size {
|
||||
None => crate::lmdb_adapter::recommended_map_size(),
|
||||
Some(v) => v - (v % 4096),
|
||||
};
|
||||
|
||||
let mut env_builder = heed::EnvOpenOptions::new();
|
||||
env_builder.max_dbs(100);
|
||||
env_builder.map_size(map_size);
|
||||
env_builder.max_readers(2048);
|
||||
unsafe {
|
||||
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoRdAhead);
|
||||
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoMetaSync);
|
||||
if !opt.fsync {
|
||||
env_builder.flag(heed::flags::Flags::MdbNoSync);
|
||||
}
|
||||
}
|
||||
match env_builder.open(&path) {
|
||||
Err(heed::Error::Io(e)) if e.kind() == std::io::ErrorKind::OutOfMemory => {
|
||||
return Err(Error(
|
||||
"OutOfMemory error while trying to open LMDB database. This can happen \
|
||||
if your operating system is not allowing you to use sufficient virtual \
|
||||
memory address space. Please check that no limit is set (ulimit -v). \
|
||||
You may also try to set a smaller `lmdb_map_size` configuration parameter. \
|
||||
On 32-bit machines, you should probably switch to another database engine."
|
||||
.into(),
|
||||
))
|
||||
}
|
||||
Err(e) => Err(Error(format!("Cannot open LMDB database: {}", e).into())),
|
||||
Ok(db) => Ok(crate::lmdb_adapter::LmdbDb::init(db)),
|
||||
}
|
||||
}
|
||||
// ---- Fjall DB ----
|
||||
#[cfg(feature = "fjall")]
|
||||
Engine::Fjall => crate::fjall_adapter::open_db(path, opt),
|
||||
|
||||
// Pattern is unreachable when all supported DB engines are compiled into binary. The allow
|
||||
// attribute is added so that we won't have to change this match in case stop building
|
||||
|
|
|
|||
|
|
@ -11,12 +11,23 @@ use r2d2_sqlite::SqliteConnectionManager;
|
|||
use rusqlite::{params, Rows, Statement, Transaction};
|
||||
|
||||
use crate::{
|
||||
open::{Engine, OpenOpt},
|
||||
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
|
||||
TxResult, TxValueIter, Value, ValueIter,
|
||||
};
|
||||
|
||||
pub use rusqlite;
|
||||
|
||||
// ---- top-level open function
|
||||
|
||||
pub(crate) fn open_db(path: &PathBuf, opt: &OpenOpt) -> Result<Db> {
|
||||
info!("Opening Sqlite database at: {}", path.display());
|
||||
let manager = r2d2_sqlite::SqliteConnectionManager::file(path);
|
||||
Ok(SqliteDb::new(manager, opt.fsync)?)
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
type Connection = r2d2::PooledConnection<SqliteConnectionManager>;
|
||||
|
||||
// --- err
|
||||
|
|
@ -139,17 +150,18 @@ impl IDb for SqliteDb {
|
|||
Ok(trees)
|
||||
}
|
||||
|
||||
fn snapshot(&self, to: &PathBuf) -> Result<()> {
|
||||
fn progress(p: rusqlite::backup::Progress) {
|
||||
let percent = (p.pagecount - p.remaining) * 100 / p.pagecount;
|
||||
info!("Sqlite snapshot progress: {}%", percent);
|
||||
}
|
||||
std::fs::create_dir_all(to)?;
|
||||
let mut path = to.clone();
|
||||
path.push("db.sqlite");
|
||||
self.db
|
||||
.get()?
|
||||
.backup(rusqlite::DatabaseName::Main, path, Some(progress))?;
|
||||
fn snapshot(&self, base_path: &PathBuf) -> Result<()> {
|
||||
std::fs::create_dir_all(base_path)?;
|
||||
let path = Engine::Sqlite
|
||||
.db_path(&base_path)
|
||||
.into_os_string()
|
||||
.into_string()
|
||||
.map_err(|_| Error("invalid sqlite path string".into()))?;
|
||||
|
||||
info!("Start sqlite VACUUM INTO `{}`", path);
|
||||
self.db.get()?.execute("VACUUM INTO ?1", params![path])?;
|
||||
info!("Finished sqlite VACUUM INTO `{}`", path);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
@ -160,7 +172,7 @@ impl IDb for SqliteDb {
|
|||
self.internal_get(&self.db.get()?, &tree, key)
|
||||
}
|
||||
|
||||
fn len(&self, tree: usize) -> Result<usize> {
|
||||
fn approximate_len(&self, tree: usize) -> Result<usize> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let db = self.db.get()?;
|
||||
|
||||
|
|
@ -172,6 +184,10 @@ impl IDb for SqliteDb {
|
|||
}
|
||||
}
|
||||
|
||||
fn is_empty(&self, tree: usize) -> Result<bool> {
|
||||
Ok(self.approximate_len(tree)? == 0)
|
||||
}
|
||||
|
||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let db = self.db.get()?;
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
use crate::*;
|
||||
|
||||
fn test_suite(db: Db) {
|
||||
let tree = db.open_tree("tree").unwrap();
|
||||
let tree = db.open_tree("tree:this_is_a_tree").unwrap();
|
||||
|
||||
let ka: &[u8] = &b"test"[..];
|
||||
let kb: &[u8] = &b"zwello"[..];
|
||||
|
|
@ -14,7 +14,7 @@ fn test_suite(db: Db) {
|
|||
|
||||
assert!(tree.insert(ka, va).is_ok());
|
||||
assert_eq!(tree.get(ka).unwrap().unwrap(), va);
|
||||
assert_eq!(tree.len().unwrap(), 1);
|
||||
assert_eq!(tree.iter().unwrap().count(), 1);
|
||||
|
||||
// ---- test transaction logic ----
|
||||
|
||||
|
|
@ -148,3 +148,15 @@ fn test_sqlite_db() {
|
|||
let db = SqliteDb::new(manager, false).unwrap();
|
||||
test_suite(db);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "fjall")]
|
||||
fn test_fjall_db() {
|
||||
use crate::fjall_adapter::{fjall, FjallDb};
|
||||
|
||||
let path = mktemp::Temp::new_dir().unwrap();
|
||||
let config = fjall::Config::new(path).temporary(true);
|
||||
let keyspace = config.open_transactional().unwrap();
|
||||
let db = FjallDb::init(keyspace);
|
||||
test_suite(db);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage"
|
||||
version = "1.1.0"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -57,6 +57,7 @@ opentelemetry.workspace = true
|
|||
opentelemetry-prometheus = { workspace = true, optional = true }
|
||||
opentelemetry-otlp = { workspace = true, optional = true }
|
||||
syslog-tracing = { workspace = true, optional = true }
|
||||
tracing-journald = { workspace = true, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
garage_api_common.workspace = true
|
||||
|
|
@ -90,6 +91,7 @@ k2v = [ "garage_util/k2v", "garage_api_k2v" ]
|
|||
# Database engines
|
||||
lmdb = [ "garage_model/lmdb" ]
|
||||
sqlite = [ "garage_model/sqlite" ]
|
||||
fjall = [ "garage_model/fjall" ]
|
||||
|
||||
# Automatic registration and discovery via Consul API
|
||||
consul-discovery = [ "garage_rpc/consul-discovery" ]
|
||||
|
|
@ -101,6 +103,8 @@ metrics = [ "garage_api_admin/metrics", "opentelemetry-prometheus" ]
|
|||
telemetry-otlp = [ "opentelemetry-otlp" ]
|
||||
# Logging to syslog
|
||||
syslog = [ "syslog-tracing" ]
|
||||
# Logging to journald
|
||||
journald = [ "tracing-journald" ]
|
||||
|
||||
# NOTE: bundled-libs and system-libs should be treat as mutually exclusive;
|
||||
# exactly one of them should be enabled.
|
||||
|
|
|
|||
|
|
@ -101,6 +101,7 @@ impl AdminRpcHandler {
|
|||
let mut obj_dels = 0;
|
||||
let mut mpu_dels = 0;
|
||||
let mut ver_dels = 0;
|
||||
let mut br_dels = 0;
|
||||
|
||||
for hash in blocks {
|
||||
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
||||
|
|
@ -131,12 +132,19 @@ impl AdminRpcHandler {
|
|||
ver_dels += 1;
|
||||
}
|
||||
}
|
||||
if !br.deleted.get() {
|
||||
let mut br = br;
|
||||
br.deleted.set();
|
||||
self.garage.block_ref_table.insert(&br).await?;
|
||||
br_dels += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Purged {} blocks, {} versions, {} objects, {} multipart uploads",
|
||||
"Purged {} blocks: marked {} block refs, {} versions, {} objects and {} multipart uploads as deleted",
|
||||
blocks.len(),
|
||||
br_dels,
|
||||
ver_dels,
|
||||
obj_dels,
|
||||
mpu_dels,
|
||||
|
|
|
|||
|
|
@ -219,7 +219,7 @@ impl AdminRpcHandler {
|
|||
|
||||
// Gather block manager statistics
|
||||
writeln!(&mut ret, "\nBlock manager stats:").unwrap();
|
||||
let rc_len = self.garage.block_manager.rc_len()?.to_string();
|
||||
let rc_len = self.garage.block_manager.rc_approximate_len()?.to_string();
|
||||
|
||||
writeln!(
|
||||
&mut ret,
|
||||
|
|
@ -230,13 +230,13 @@ impl AdminRpcHandler {
|
|||
writeln!(
|
||||
&mut ret,
|
||||
" resync queue length: {}",
|
||||
self.garage.block_manager.resync.queue_len()?
|
||||
self.garage.block_manager.resync.queue_approximate_len()?
|
||||
)
|
||||
.unwrap();
|
||||
writeln!(
|
||||
&mut ret,
|
||||
" blocks with resync errors: {}",
|
||||
self.garage.block_manager.resync.errors_len()?
|
||||
self.garage.block_manager.resync.errors_approximate_len()?
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
|
@ -346,16 +346,21 @@ impl AdminRpcHandler {
|
|||
F: TableSchema + 'static,
|
||||
R: TableReplication + 'static,
|
||||
{
|
||||
let data_len = t.data.store.len().map_err(GarageError::from)?.to_string();
|
||||
let mkl_len = t.merkle_updater.merkle_tree_len()?.to_string();
|
||||
let data_len = t
|
||||
.data
|
||||
.store
|
||||
.approximate_len()
|
||||
.map_err(GarageError::from)?
|
||||
.to_string();
|
||||
let mkl_len = t.merkle_updater.merkle_tree_approximate_len()?.to_string();
|
||||
|
||||
Ok(format!(
|
||||
" {}\t{}\t{}\t{}\t{}",
|
||||
F::TABLE_NAME,
|
||||
data_len,
|
||||
mkl_len,
|
||||
t.merkle_updater.todo_len()?,
|
||||
t.data.gc_todo_len()?
|
||||
t.merkle_updater.todo_approximate_len()?,
|
||||
t.data.gc_todo_approximate_len()?
|
||||
))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -466,6 +466,10 @@ pub enum RepairWhat {
|
|||
/// Repair (resync/rebalance) the set of stored blocks in the cluster
|
||||
#[structopt(name = "blocks", version = garage_version())]
|
||||
Blocks,
|
||||
/// Clear the block resync queue. The list of blocks in errored state
|
||||
/// is cleared as well. You MUST run `garage repair blocks` after invoking this.
|
||||
#[structopt(name = "clear-resync-queue", version = garage_version())]
|
||||
ClearResyncQueue,
|
||||
/// Repropagate object deletions to the version table
|
||||
#[structopt(name = "versions", version = garage_version())]
|
||||
Versions,
|
||||
|
|
|
|||
|
|
@ -208,6 +208,43 @@ fn init_logging(opt: &Opt) {
|
|||
}
|
||||
}
|
||||
|
||||
if std::env::var("GARAGE_LOG_TO_JOURNALD")
|
||||
.map(|x| x == "1" || x == "true")
|
||||
.unwrap_or(false)
|
||||
{
|
||||
#[cfg(feature = "journald")]
|
||||
{
|
||||
use tracing_journald::{Priority, PriorityMappings};
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
let registry = tracing_subscriber::registry()
|
||||
.with(tracing_subscriber::fmt::layer().with_writer(std::io::sink))
|
||||
.with(env_filter);
|
||||
match tracing_journald::layer() {
|
||||
Ok(layer) => {
|
||||
registry
|
||||
.with(layer.with_priority_mappings(PriorityMappings {
|
||||
info: Priority::Informational,
|
||||
debug: Priority::Debug,
|
||||
..PriorityMappings::new()
|
||||
}))
|
||||
.init();
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Couldn't connect to journald: {}.", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
#[cfg(not(feature = "journald"))]
|
||||
{
|
||||
eprintln!("Journald support is not enabled in this build.");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_writer(std::io::stderr)
|
||||
.with_env_filter(env_filter)
|
||||
|
|
|
|||
|
|
@ -92,6 +92,11 @@ pub async fn launch_online_repair(
|
|||
info!("Repairing bucket aliases (foreground)");
|
||||
garage.locked_helper().await.repair_aliases().await?;
|
||||
}
|
||||
RepairWhat::ClearResyncQueue => {
|
||||
let garage = garage.clone();
|
||||
tokio::task::spawn_blocking(move || garage.block_manager.resync.clear_resync_queue())
|
||||
.await??
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -183,10 +183,21 @@ fn watch_shutdown_signal() -> watch::Receiver<bool> {
|
|||
let mut sigterm =
|
||||
signal(SignalKind::terminate()).expect("Failed to install SIGTERM handler");
|
||||
let mut sighup = signal(SignalKind::hangup()).expect("Failed to install SIGHUP handler");
|
||||
tokio::select! {
|
||||
_ = sigint.recv() => info!("Received SIGINT, shutting down."),
|
||||
_ = sigterm.recv() => info!("Received SIGTERM, shutting down."),
|
||||
_ = sighup.recv() => info!("Received SIGHUP, shutting down."),
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = sigint.recv() => {
|
||||
info!("Received SIGINT, shutting down.");
|
||||
break
|
||||
}
|
||||
_ = sigterm.recv() => {
|
||||
info!("Received SIGTERM, shutting down.");
|
||||
break
|
||||
}
|
||||
_ = sighup.recv() => {
|
||||
info!("Received SIGHUP, reload not supported.");
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
send_cancel.send(true).unwrap();
|
||||
});
|
||||
|
|
|
|||
|
|
@ -198,6 +198,7 @@ async fn test_precondition() {
|
|||
);
|
||||
}
|
||||
let older_date = DateTime::from_secs_f64(last_modified.as_secs_f64() - 10.0);
|
||||
let same_date = DateTime::from_secs_f64(last_modified.as_secs_f64());
|
||||
let newer_date = DateTime::from_secs_f64(last_modified.as_secs_f64() + 10.0);
|
||||
{
|
||||
let err = ctx
|
||||
|
|
@ -212,6 +213,18 @@ async fn test_precondition() {
|
|||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
|
||||
);
|
||||
|
||||
let err = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_modified_since(same_date)
|
||||
.send()
|
||||
.await;
|
||||
assert!(
|
||||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
|
||||
);
|
||||
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
|
|
@ -236,6 +249,17 @@ async fn test_precondition() {
|
|||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 412)
|
||||
);
|
||||
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_unmodified_since(same_date)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
|
||||
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
|
|
|
|||
|
|
@ -606,3 +606,45 @@ async fn test_website_puny() {
|
|||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_website_object_not_found() {
|
||||
const BCKT_NAME: &str = "not-found";
|
||||
let ctx = common::context();
|
||||
let _bucket = ctx.create_bucket(BCKT_NAME);
|
||||
|
||||
let client = Client::builder(TokioExecutor::new()).build_http();
|
||||
|
||||
let req = |suffix| {
|
||||
Request::builder()
|
||||
.method("GET")
|
||||
.uri(format!("http://127.0.0.1:{}/", ctx.garage.web_port))
|
||||
.header("Host", format!("{}{}", BCKT_NAME, suffix))
|
||||
.body(Body::new(Bytes::new()))
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
ctx.garage
|
||||
.command()
|
||||
.args(["bucket", "website", "--allow", BCKT_NAME])
|
||||
.quiet()
|
||||
.expect_success_status("Could not allow website on bucket");
|
||||
|
||||
let resp = client.request(req("")).await.unwrap();
|
||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||
// the error we return by default are *not* xml
|
||||
assert_eq!(
|
||||
resp.headers().get(http::header::CONTENT_TYPE).unwrap(),
|
||||
"text/html; charset=utf-8"
|
||||
);
|
||||
let result = String::from_utf8(
|
||||
resp.into_body()
|
||||
.collect()
|
||||
.await
|
||||
.unwrap()
|
||||
.to_bytes()
|
||||
.to_vec(),
|
||||
)
|
||||
.unwrap();
|
||||
assert!(result.contains("not found"));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -72,6 +72,16 @@ impl K2vClient {
|
|||
.enable_http2()
|
||||
.build();
|
||||
let client = HttpClient::builder(TokioExecutor::new()).build(connector);
|
||||
Self::new_with_client(config, client)
|
||||
}
|
||||
|
||||
/// Create a new K2V client with an external client.
|
||||
/// Useful for example if you plan on creating many clients but you want to mutualize the
|
||||
/// underlying thread pools & co.
|
||||
pub fn new_with_client(
|
||||
config: K2vClientConfig,
|
||||
client: HttpClient<HttpsConnector<HttpConnector>, Body>,
|
||||
) -> Result<Self, Error> {
|
||||
let user_agent: std::borrow::Cow<str> = match &config.user_agent {
|
||||
Some(ua) => ua.into(),
|
||||
None => format!("k2v/{}", env!("CARGO_PKG_VERSION")).into(),
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_model"
|
||||
version = "1.1.0"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -24,7 +24,7 @@ garage_net.workspace = true
|
|||
async-trait.workspace = true
|
||||
blake2.workspace = true
|
||||
chrono.workspace = true
|
||||
err-derive.workspace = true
|
||||
thiserror.workspace = true
|
||||
hex.workspace = true
|
||||
http.workspace = true
|
||||
base64.workspace = true
|
||||
|
|
@ -44,3 +44,4 @@ default = [ "lmdb", "sqlite" ]
|
|||
k2v = [ "garage_util/k2v" ]
|
||||
lmdb = [ "garage_db/lmdb" ]
|
||||
sqlite = [ "garage_db/sqlite" ]
|
||||
fjall = [ "garage_db/fjall" ]
|
||||
|
|
|
|||
|
|
@ -116,21 +116,17 @@ impl Garage {
|
|||
info!("Opening database...");
|
||||
let db_engine = db::Engine::from_str(&config.db_engine)
|
||||
.ok_or_message("Invalid `db_engine` value in configuration file")?;
|
||||
let mut db_path = config.metadata_dir.clone();
|
||||
match db_engine {
|
||||
db::Engine::Sqlite => {
|
||||
db_path.push("db.sqlite");
|
||||
}
|
||||
db::Engine::Lmdb => {
|
||||
db_path.push("db.lmdb");
|
||||
}
|
||||
}
|
||||
let db_path = db_engine.db_path(&config.metadata_dir);
|
||||
let db_opt = db::OpenOpt {
|
||||
fsync: config.metadata_fsync,
|
||||
lmdb_map_size: match config.lmdb_map_size {
|
||||
v if v == usize::default() => None,
|
||||
v => Some(v),
|
||||
},
|
||||
fjall_block_cache_size: match config.fjall_block_cache_size {
|
||||
v if v == usize::default() => None,
|
||||
v => Some(v),
|
||||
},
|
||||
};
|
||||
let db = db::open_db(&db_path, db_engine, &db_opt)
|
||||
.ok_or_message("Unable to open metadata db")?;
|
||||
|
|
@ -319,15 +315,15 @@ impl Garage {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
|
||||
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper<'_> {
|
||||
helper::bucket::BucketHelper(self)
|
||||
}
|
||||
|
||||
pub fn key_helper(&self) -> helper::key::KeyHelper {
|
||||
pub fn key_helper(&self) -> helper::key::KeyHelper<'_> {
|
||||
helper::key::KeyHelper(self)
|
||||
}
|
||||
|
||||
pub async fn locked_helper(&self) -> helper::locked::LockedHelper {
|
||||
pub async fn locked_helper(&self) -> helper::locked::LockedHelper<'_> {
|
||||
let lock = self.bucket_lock.lock().await;
|
||||
helper::locked::LockedHelper(self, Some(lock))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,24 +1,24 @@
|
|||
use err_derive::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
|
||||
use garage_util::error::Error as GarageError;
|
||||
|
||||
#[derive(Debug, Error, Serialize, Deserialize)]
|
||||
pub enum Error {
|
||||
#[error(display = "Internal error: {}", _0)]
|
||||
Internal(#[error(source)] GarageError),
|
||||
#[error("Internal error: {0}")]
|
||||
Internal(#[from] GarageError),
|
||||
|
||||
#[error(display = "Bad request: {}", _0)]
|
||||
#[error("Bad request: {0}")]
|
||||
BadRequest(String),
|
||||
|
||||
/// Bucket name is not valid according to AWS S3 specs
|
||||
#[error(display = "Invalid bucket name: {}", _0)]
|
||||
#[error("Invalid bucket name: {0}")]
|
||||
InvalidBucketName(String),
|
||||
|
||||
#[error(display = "Access key not found: {}", _0)]
|
||||
#[error("Access key not found: {0}")]
|
||||
NoSuchAccessKey(String),
|
||||
|
||||
#[error(display = "Bucket not found: {}", _0)]
|
||||
#[error("Bucket not found: {0}")]
|
||||
NoSuchBucket(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -121,13 +121,13 @@ impl Worker for LifecycleWorker {
|
|||
mpu_aborted,
|
||||
..
|
||||
} => {
|
||||
let n_objects = self.garage.object_table.data.store.len().ok();
|
||||
let n_objects = self.garage.object_table.data.store.approximate_len().ok();
|
||||
let progress = match n_objects {
|
||||
None => "...".to_string(),
|
||||
Some(total) => format!(
|
||||
Some(total) if total > 0 => format!(
|
||||
"~{:.2}%",
|
||||
100. * std::cmp::min(*counter, total) as f32 / total as f32
|
||||
),
|
||||
_ => "...".to_string(),
|
||||
};
|
||||
WorkerStatus {
|
||||
progress: Some(progress),
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_net"
|
||||
version = "1.1.0"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -30,7 +30,7 @@ rand.workspace = true
|
|||
|
||||
log.workspace = true
|
||||
arc-swap.workspace = true
|
||||
err-derive.workspace = true
|
||||
thiserror.workspace = true
|
||||
bytes.workspace = true
|
||||
cfg-if.workspace = true
|
||||
|
||||
|
|
|
|||
|
|
@ -159,7 +159,7 @@ where
|
|||
pub(crate) type DynEndpoint = Box<dyn GenericEndpoint + Send + Sync>;
|
||||
|
||||
pub(crate) trait GenericEndpoint {
|
||||
fn handle(&self, req_enc: ReqEnc, from: NodeID) -> BoxFuture<Result<RespEnc, Error>>;
|
||||
fn handle(&self, req_enc: ReqEnc, from: NodeID) -> BoxFuture<'_, Result<RespEnc, Error>>;
|
||||
fn drop_handler(&self);
|
||||
fn clone_endpoint(&self) -> DynEndpoint;
|
||||
}
|
||||
|
|
@ -175,7 +175,7 @@ where
|
|||
M: Message,
|
||||
H: StreamingEndpointHandler<M> + 'static,
|
||||
{
|
||||
fn handle(&self, req_enc: ReqEnc, from: NodeID) -> BoxFuture<Result<RespEnc, Error>> {
|
||||
fn handle(&self, req_enc: ReqEnc, from: NodeID) -> BoxFuture<'_, Result<RespEnc, Error>> {
|
||||
async move {
|
||||
match self.0.handler.load_full() {
|
||||
None => Err(Error::NoHandler),
|
||||
|
|
|
|||
|
|
@ -1,49 +1,49 @@
|
|||
use std::io;
|
||||
|
||||
use err_derive::Error;
|
||||
use log::error;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error(display = "IO error: {}", _0)]
|
||||
Io(#[error(source)] io::Error),
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] io::Error),
|
||||
|
||||
#[error(display = "Messagepack encode error: {}", _0)]
|
||||
RMPEncode(#[error(source)] rmp_serde::encode::Error),
|
||||
#[error(display = "Messagepack decode error: {}", _0)]
|
||||
RMPDecode(#[error(source)] rmp_serde::decode::Error),
|
||||
#[error("Messagepack encode error: {0}")]
|
||||
RMPEncode(#[from] rmp_serde::encode::Error),
|
||||
#[error("Messagepack decode error: {0}")]
|
||||
RMPDecode(#[from] rmp_serde::decode::Error),
|
||||
|
||||
#[error(display = "Tokio join error: {}", _0)]
|
||||
TokioJoin(#[error(source)] tokio::task::JoinError),
|
||||
#[error("Tokio join error: {0}")]
|
||||
TokioJoin(#[from] tokio::task::JoinError),
|
||||
|
||||
#[error(display = "oneshot receive error: {}", _0)]
|
||||
OneshotRecv(#[error(source)] tokio::sync::oneshot::error::RecvError),
|
||||
#[error("oneshot receive error: {0}")]
|
||||
OneshotRecv(#[from] tokio::sync::oneshot::error::RecvError),
|
||||
|
||||
#[error(display = "Handshake error: {}", _0)]
|
||||
Handshake(#[error(source)] kuska_handshake::async_std::Error),
|
||||
#[error("Handshake error: {0}")]
|
||||
Handshake(#[from] kuska_handshake::async_std::Error),
|
||||
|
||||
#[error(display = "UTF8 error: {}", _0)]
|
||||
UTF8(#[error(source)] std::string::FromUtf8Error),
|
||||
#[error("UTF8 error: {0}")]
|
||||
UTF8(#[from] std::string::FromUtf8Error),
|
||||
|
||||
#[error(display = "Framing protocol error")]
|
||||
#[error("Framing protocol error")]
|
||||
Framing,
|
||||
|
||||
#[error(display = "Remote error ({:?}): {}", _0, _1)]
|
||||
#[error("Remote error ({0:?}): {1}")]
|
||||
Remote(io::ErrorKind, String),
|
||||
|
||||
#[error(display = "Request ID collision")]
|
||||
#[error("Request ID collision")]
|
||||
IdCollision,
|
||||
|
||||
#[error(display = "{}", _0)]
|
||||
#[error("{0}")]
|
||||
Message(String),
|
||||
|
||||
#[error(display = "No handler / shutting down")]
|
||||
#[error("No handler / shutting down")]
|
||||
NoHandler,
|
||||
|
||||
#[error(display = "Connection closed")]
|
||||
#[error("Connection closed")]
|
||||
ConnectionClosed,
|
||||
|
||||
#[error(display = "Version mismatch: {}", _0)]
|
||||
#[error("Version mismatch: {0}")]
|
||||
VersionMismatch(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_rpc"
|
||||
version = "1.1.0"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -33,7 +33,7 @@ async-trait.workspace = true
|
|||
serde.workspace = true
|
||||
serde_bytes.workspace = true
|
||||
serde_json.workspace = true
|
||||
err-derive = { workspace = true, optional = true }
|
||||
thiserror = { workspace = true, optional = true }
|
||||
|
||||
# newer version requires rust edition 2021
|
||||
kube = { workspace = true, optional = true }
|
||||
|
|
@ -49,5 +49,5 @@ opentelemetry.workspace = true
|
|||
|
||||
[features]
|
||||
kubernetes-discovery = [ "kube", "k8s-openapi", "schemars" ]
|
||||
consul-discovery = [ "reqwest", "err-derive" ]
|
||||
consul-discovery = [ "reqwest", "thiserror" ]
|
||||
system-libs = [ "sodiumoxide/use-pkg-config" ]
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@ use std::fs::File;
|
|||
use std::io::Read;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
|
||||
use err_derive::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
|
||||
use garage_net::NodeID;
|
||||
|
||||
|
|
@ -219,12 +219,12 @@ impl ConsulDiscovery {
|
|||
/// Regroup all Consul discovery errors
|
||||
#[derive(Debug, Error)]
|
||||
pub enum ConsulError {
|
||||
#[error(display = "IO error: {}", _0)]
|
||||
Io(#[error(source)] std::io::Error),
|
||||
#[error(display = "HTTP error: {}", _0)]
|
||||
Reqwest(#[error(source)] reqwest::Error),
|
||||
#[error(display = "Invalid Consul TLS configuration")]
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
#[error("HTTP error: {0}")]
|
||||
Reqwest(#[from] reqwest::Error),
|
||||
#[error("Invalid Consul TLS configuration")]
|
||||
InvalidTLSConfig,
|
||||
#[error(display = "Token error: {}", _0)]
|
||||
Token(#[error(source)] reqwest::header::InvalidHeaderValue),
|
||||
#[error("Token error: {0}")]
|
||||
Token(#[from] reqwest::header::InvalidHeaderValue),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -229,13 +229,11 @@ impl LayoutManager {
|
|||
}
|
||||
|
||||
/// Save cluster layout data to disk
|
||||
async fn save_cluster_layout(&self) -> Result<(), Error> {
|
||||
async fn save_cluster_layout(&self) {
|
||||
let layout = self.layout.read().unwrap().inner().clone();
|
||||
self.persist_cluster_layout
|
||||
.save_async(&layout)
|
||||
.await
|
||||
.expect("Cannot save current cluster layout");
|
||||
Ok(())
|
||||
if let Err(e) = self.persist_cluster_layout.save_async(&layout).await {
|
||||
error!("Failed to save cluster_layout: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
fn broadcast_update(self: &Arc<Self>, rpc: SystemRpc) {
|
||||
|
|
@ -313,7 +311,7 @@ impl LayoutManager {
|
|||
|
||||
self.change_notify.notify_waiters();
|
||||
self.broadcast_update(SystemRpc::AdvertiseClusterLayout(new_layout));
|
||||
self.save_cluster_layout().await?;
|
||||
self.save_cluster_layout().await;
|
||||
}
|
||||
|
||||
Ok(SystemRpc::Ok)
|
||||
|
|
@ -328,7 +326,7 @@ impl LayoutManager {
|
|||
if let Some(new_trackers) = self.merge_layout_trackers(trackers) {
|
||||
self.change_notify.notify_waiters();
|
||||
self.broadcast_update(SystemRpc::AdvertiseClusterLayoutTrackers(new_trackers));
|
||||
self.save_cluster_layout().await?;
|
||||
self.save_cluster_layout().await;
|
||||
}
|
||||
|
||||
Ok(SystemRpc::Ok)
|
||||
|
|
|
|||
|
|
@ -507,7 +507,7 @@ impl LayoutVersion {
|
|||
g.compute_maximal_flow()?;
|
||||
if g.get_flow_value()? < (NB_PARTITIONS * self.replication_factor) as i64 {
|
||||
return Err(Error::Message(
|
||||
"The storage capacity of he cluster is to small. It is \
|
||||
"The storage capacity of the cluster is too small. It is \
|
||||
impossible to store partitions of size 1."
|
||||
.into(),
|
||||
));
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_table"
|
||||
version = "1.1.0"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
|
|||
|
|
@ -367,7 +367,7 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn gc_todo_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.gc_todo.len()?)
|
||||
pub fn gc_todo_approximate_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.gc_todo.approximate_len()?)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -313,7 +313,7 @@ impl<F: TableSchema, R: TableReplication> Worker for GcWorker<F, R> {
|
|||
|
||||
fn status(&self) -> WorkerStatus {
|
||||
WorkerStatus {
|
||||
queue_length: Some(self.gc.data.gc_todo_len().unwrap_or(0) as u64),
|
||||
queue_length: Some(self.gc.data.gc_todo_approximate_len().unwrap_or(0) as u64),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -287,12 +287,12 @@ impl<F: TableSchema, R: TableReplication> MerkleUpdater<F, R> {
|
|||
MerkleNode::decode_opt(&ent)
|
||||
}
|
||||
|
||||
pub fn merkle_tree_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.data.merkle_tree.len()?)
|
||||
pub fn merkle_tree_approximate_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.data.merkle_tree.approximate_len()?)
|
||||
}
|
||||
|
||||
pub fn todo_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.data.merkle_todo.len()?)
|
||||
pub fn todo_approximate_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.data.merkle_todo.approximate_len()?)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -306,7 +306,7 @@ impl<F: TableSchema, R: TableReplication> Worker for MerkleWorker<F, R> {
|
|||
|
||||
fn status(&self) -> WorkerStatus {
|
||||
WorkerStatus {
|
||||
queue_length: Some(self.0.todo_len().unwrap_or(0) as u64),
|
||||
queue_length: Some(self.0.todo_approximate_len().unwrap_or(0) as u64),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ impl TableMetrics {
|
|||
.u64_value_observer(
|
||||
"table.size",
|
||||
move |observer| {
|
||||
if let Ok(value) = store.len() {
|
||||
if let Ok(value) = store.approximate_len() {
|
||||
observer.observe(
|
||||
value as u64,
|
||||
&[KeyValue::new("table_name", table_name)],
|
||||
|
|
@ -48,7 +48,7 @@ impl TableMetrics {
|
|||
.u64_value_observer(
|
||||
"table.merkle_tree_size",
|
||||
move |observer| {
|
||||
if let Ok(value) = merkle_tree.len() {
|
||||
if let Ok(value) = merkle_tree.approximate_len() {
|
||||
observer.observe(
|
||||
value as u64,
|
||||
&[KeyValue::new("table_name", table_name)],
|
||||
|
|
@ -62,7 +62,7 @@ impl TableMetrics {
|
|||
.u64_value_observer(
|
||||
"table.merkle_updater_todo_queue_length",
|
||||
move |observer| {
|
||||
if let Ok(v) = merkle_todo.len() {
|
||||
if let Ok(v) = merkle_todo.approximate_len() {
|
||||
observer.observe(
|
||||
v as u64,
|
||||
&[KeyValue::new("table_name", table_name)],
|
||||
|
|
@ -76,7 +76,7 @@ impl TableMetrics {
|
|||
.u64_value_observer(
|
||||
"table.gc_todo_queue_length",
|
||||
move |observer| {
|
||||
if let Ok(value) = gc_todo.len() {
|
||||
if let Ok(value) = gc_todo.approximate_len() {
|
||||
observer.observe(
|
||||
value as u64,
|
||||
&[KeyValue::new("table_name", table_name)],
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ impl<F: TableSchema, R: TableReplication> Worker for InsertQueueWorker<F, R> {
|
|||
|
||||
fn status(&self) -> WorkerStatus {
|
||||
WorkerStatus {
|
||||
queue_length: Some(self.0.data.insert_queue.len().unwrap_or(0) as u64),
|
||||
queue_length: Some(self.0.data.insert_queue.approximate_len().unwrap_or(0) as u64),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_util"
|
||||
version = "1.1.0"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -21,7 +21,7 @@ arc-swap.workspace = true
|
|||
async-trait.workspace = true
|
||||
blake2.workspace = true
|
||||
bytesize.workspace = true
|
||||
err-derive.workspace = true
|
||||
thiserror.workspace = true
|
||||
hexdump.workspace = true
|
||||
xxhash-rust.workspace = true
|
||||
hex.workspace = true
|
||||
|
|
|
|||
|
|
@ -115,32 +115,39 @@ impl WorkerProcessor {
|
|||
trace!("{} (TID {}): {:?}", worker.worker.name(), worker.task_id, worker.state);
|
||||
|
||||
// Save worker info
|
||||
let mut wi = self.worker_info.lock().unwrap();
|
||||
match wi.get_mut(&worker.task_id) {
|
||||
Some(i) => {
|
||||
i.state = worker.state;
|
||||
i.status = worker.worker.status();
|
||||
i.errors = worker.errors;
|
||||
i.consecutive_errors = worker.consecutive_errors;
|
||||
if worker.last_error.is_some() {
|
||||
i.last_error = worker.last_error.take();
|
||||
{
|
||||
let mut wi = self.worker_info.lock().unwrap();
|
||||
match wi.get_mut(&worker.task_id) {
|
||||
Some(i) => {
|
||||
i.state = worker.state;
|
||||
i.status = worker.worker.status();
|
||||
i.errors = worker.errors;
|
||||
i.consecutive_errors = worker.consecutive_errors;
|
||||
if worker.last_error.is_some() {
|
||||
i.last_error = worker.last_error.take();
|
||||
}
|
||||
}
|
||||
None => {
|
||||
wi.insert(worker.task_id, WorkerInfo {
|
||||
name: worker.worker.name(),
|
||||
state: worker.state,
|
||||
status: worker.worker.status(),
|
||||
errors: worker.errors,
|
||||
consecutive_errors: worker.consecutive_errors,
|
||||
last_error: worker.last_error.take(),
|
||||
});
|
||||
}
|
||||
}
|
||||
None => {
|
||||
wi.insert(worker.task_id, WorkerInfo {
|
||||
name: worker.worker.name(),
|
||||
state: worker.state,
|
||||
status: worker.worker.status(),
|
||||
errors: worker.errors,
|
||||
consecutive_errors: worker.consecutive_errors,
|
||||
last_error: worker.last_error.take(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if worker.state == WorkerState::Done {
|
||||
info!("Worker {} (TID {}) exited", worker.worker.name(), worker.task_id);
|
||||
} else {
|
||||
// Yield to the Tokio scheduler between consecutive Busy steps so
|
||||
// that a worker which never suspends on its own cannot starve other tasks.
|
||||
if worker.state == WorkerState::Busy {
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
workers.push(async move {
|
||||
worker.step().await;
|
||||
worker
|
||||
|
|
|
|||
|
|
@ -45,6 +45,11 @@ pub struct Config {
|
|||
)]
|
||||
pub block_size: usize,
|
||||
|
||||
/// Maximum number of parallel block writes per PUT request
|
||||
/// Higher values improve throughput but increase memory usage
|
||||
/// Default: 3, Recommended: 10-30 for NVMe, 3-10 for HDD
|
||||
#[serde(default = "default_block_max_concurrent_writes_per_request")]
|
||||
pub block_max_concurrent_writes_per_request: usize,
|
||||
/// Number of replicas. Can be any positive integer, but uneven numbers are more favorable.
|
||||
/// - 1 for single-node clusters, or to disable replication
|
||||
/// - 3 is the recommended and supported setting.
|
||||
|
|
@ -75,6 +80,10 @@ pub struct Config {
|
|||
)]
|
||||
pub block_ram_buffer_max: usize,
|
||||
|
||||
/// Maximum number of concurrent reads of block files on disk
|
||||
#[serde(default = "default_block_max_concurrent_reads")]
|
||||
pub block_max_concurrent_reads: usize,
|
||||
|
||||
/// Skip the permission check of secret files. Useful when
|
||||
/// POSIX ACLs (or more complex chmods) are used.
|
||||
#[serde(default)]
|
||||
|
|
@ -122,6 +131,10 @@ pub struct Config {
|
|||
#[serde(deserialize_with = "deserialize_capacity", default)]
|
||||
pub lmdb_map_size: usize,
|
||||
|
||||
/// Fjall block cache size
|
||||
#[serde(deserialize_with = "deserialize_capacity", default)]
|
||||
pub fjall_block_cache_size: usize,
|
||||
|
||||
// -- APIs
|
||||
/// Configuration for S3 api
|
||||
pub s3_api: S3ApiConfig,
|
||||
|
|
@ -259,6 +272,9 @@ pub struct KubernetesDiscoveryConfig {
|
|||
pub skip_crd: bool,
|
||||
}
|
||||
|
||||
pub fn default_block_max_concurrent_writes_per_request() -> usize {
|
||||
3
|
||||
}
|
||||
/// Read and parse configuration
|
||||
pub fn read_config(config_file: PathBuf) -> Result<Config, Error> {
|
||||
let config = std::fs::read_to_string(config_file)?;
|
||||
|
|
@ -276,6 +292,9 @@ fn default_block_size() -> usize {
|
|||
fn default_block_ram_buffer_max() -> usize {
|
||||
256 * 1024 * 1024
|
||||
}
|
||||
fn default_block_max_concurrent_reads() -> usize {
|
||||
16
|
||||
}
|
||||
|
||||
fn default_consistency_mode() -> String {
|
||||
"consistent".into()
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
use std::fmt;
|
||||
use std::io;
|
||||
|
||||
use err_derive::Error;
|
||||
use thiserror::Error;
|
||||
|
||||
use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
|
|
@ -12,68 +12,61 @@ use crate::encode::debug_serialize;
|
|||
/// Regroup all Garage errors
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error(display = "IO error: {}", _0)]
|
||||
Io(#[error(source)] io::Error),
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] io::Error),
|
||||
|
||||
#[error(display = "Hyper error: {}", _0)]
|
||||
Hyper(#[error(source)] hyper::Error),
|
||||
#[error("Hyper error: {0}")]
|
||||
Hyper(#[from] hyper::Error),
|
||||
|
||||
#[error(display = "HTTP error: {}", _0)]
|
||||
Http(#[error(source)] http::Error),
|
||||
#[error("HTTP error: {0}")]
|
||||
Http(#[from] http::Error),
|
||||
|
||||
#[error(display = "Invalid HTTP header value: {}", _0)]
|
||||
HttpHeader(#[error(source)] http::header::ToStrError),
|
||||
#[error("Invalid HTTP header value: {0}")]
|
||||
HttpHeader(#[from] http::header::ToStrError),
|
||||
|
||||
#[error(display = "Network error: {}", _0)]
|
||||
Net(#[error(source)] garage_net::error::Error),
|
||||
#[error("Network error: {0}")]
|
||||
Net(#[from] garage_net::error::Error),
|
||||
|
||||
#[error(display = "DB error: {}", _0)]
|
||||
Db(#[error(source)] garage_db::Error),
|
||||
#[error("DB error: {0}")]
|
||||
Db(#[from] garage_db::Error),
|
||||
|
||||
#[error(display = "Messagepack encode error: {}", _0)]
|
||||
RmpEncode(#[error(source)] rmp_serde::encode::Error),
|
||||
#[error(display = "Messagepack decode error: {}", _0)]
|
||||
RmpDecode(#[error(source)] rmp_serde::decode::Error),
|
||||
#[error(display = "JSON error: {}", _0)]
|
||||
Json(#[error(source)] serde_json::error::Error),
|
||||
#[error(display = "TOML decode error: {}", _0)]
|
||||
TomlDecode(#[error(source)] toml::de::Error),
|
||||
#[error("Messagepack encode error: {0}")]
|
||||
RmpEncode(#[from] rmp_serde::encode::Error),
|
||||
#[error("Messagepack decode error: {0}")]
|
||||
RmpDecode(#[from] rmp_serde::decode::Error),
|
||||
#[error("JSON error: {0}")]
|
||||
Json(#[from] serde_json::error::Error),
|
||||
#[error("TOML decode error: {0}")]
|
||||
TomlDecode(#[from] toml::de::Error),
|
||||
|
||||
#[error(display = "Tokio join error: {}", _0)]
|
||||
TokioJoin(#[error(source)] tokio::task::JoinError),
|
||||
#[error("Tokio join error: {0}")]
|
||||
TokioJoin(#[from] tokio::task::JoinError),
|
||||
|
||||
#[error(display = "Tokio semaphore acquire error: {}", _0)]
|
||||
TokioSemAcquire(#[error(source)] tokio::sync::AcquireError),
|
||||
#[error("Tokio semaphore acquire error: {0}")]
|
||||
TokioSemAcquire(#[from] tokio::sync::AcquireError),
|
||||
|
||||
#[error(display = "Tokio broadcast receive error: {}", _0)]
|
||||
TokioBcastRecv(#[error(source)] tokio::sync::broadcast::error::RecvError),
|
||||
#[error("Tokio broadcast receive error: {0}")]
|
||||
TokioBcastRecv(#[from] tokio::sync::broadcast::error::RecvError),
|
||||
|
||||
#[error(display = "Remote error: {}", _0)]
|
||||
#[error("Remote error: {0}")]
|
||||
RemoteError(String),
|
||||
|
||||
#[error(display = "Timeout")]
|
||||
#[error("Timeout")]
|
||||
Timeout,
|
||||
|
||||
#[error(
|
||||
display = "Could not reach quorum of {} (sets={:?}). {} of {} request succeeded, others returned errors: {:?}",
|
||||
_0,
|
||||
_1,
|
||||
_2,
|
||||
_3,
|
||||
_4
|
||||
)]
|
||||
#[error("Could not reach quorum of {0} (sets={1:?}). {2} of {3} request succeeded, others returned errors: {4:?}")]
|
||||
Quorum(usize, Option<usize>, usize, usize, Vec<String>),
|
||||
|
||||
#[error(display = "Unexpected RPC message: {}", _0)]
|
||||
#[error("Unexpected RPC message: {0}")]
|
||||
UnexpectedRpcMessage(String),
|
||||
|
||||
#[error(display = "Corrupt data: does not match hash {:?}", _0)]
|
||||
#[error("Corrupt data: does not match hash {0:?}")]
|
||||
CorruptData(Hash),
|
||||
|
||||
#[error(display = "Missing block {:?}: no node returned a valid block", _0)]
|
||||
#[error("Missing block {0:?}: no node returned a valid block")]
|
||||
MissingBlock(Hash),
|
||||
|
||||
#[error(display = "{}", _0)]
|
||||
#[error("{0}")]
|
||||
Message(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_web"
|
||||
version = "1.1.0"
|
||||
version = "1.3.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -20,7 +20,7 @@ garage_model.workspace = true
|
|||
garage_util.workspace = true
|
||||
garage_table.workspace = true
|
||||
|
||||
err-derive.workspace = true
|
||||
thiserror.workspace = true
|
||||
tracing.workspace = true
|
||||
percent-encoding.workspace = true
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
use err_derive::Error;
|
||||
use hyper::header::HeaderValue;
|
||||
use hyper::{HeaderMap, StatusCode};
|
||||
use thiserror::Error;
|
||||
|
||||
use garage_api_common::generic_server::ApiError;
|
||||
|
||||
|
|
@ -8,15 +8,15 @@ use garage_api_common::generic_server::ApiError;
|
|||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
/// An error received from the API crate
|
||||
#[error(display = "API error: {}", _0)]
|
||||
#[error("API error: {0}")]
|
||||
ApiError(garage_api_s3::error::Error),
|
||||
|
||||
/// The file does not exist
|
||||
#[error(display = "Not found")]
|
||||
#[error("Not found")]
|
||||
NotFound,
|
||||
|
||||
/// The client sent a request without host, or with unsupported method
|
||||
#[error(display = "Bad request: {}", _0)]
|
||||
#[error("Bad request: {0}")]
|
||||
BadRequest(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -397,10 +397,30 @@ fn error_to_res(e: Error) -> Response<BoxBody<Error>> {
|
|||
// was a HEAD request or we couldn't get the error document)
|
||||
// We do NOT enter this code path when returning the bucket's
|
||||
// error document (this is handled in serve_file)
|
||||
let body = string_body(format!("{}\n", e));
|
||||
let mut http_error = Response::new(body);
|
||||
let mut body_str = format!(
|
||||
r"<title>{http_code} {code_text}</title>
|
||||
<h1>{http_code} {code_text}</h1>",
|
||||
http_code = e.http_status_code().as_u16(),
|
||||
code_text = e.http_status_code().canonical_reason().unwrap_or("Unknown"),
|
||||
);
|
||||
if let Error::ApiError(ref err) = e {
|
||||
body_str.push_str(&format!(
|
||||
r"
|
||||
<ul>
|
||||
<li>Code: {s3_code}</li>
|
||||
<li>Message: {s3_message}.</li>
|
||||
</ul>",
|
||||
s3_code = err.aws_code(),
|
||||
s3_message = err,
|
||||
));
|
||||
}
|
||||
let mut http_error = Response::new(string_body(body_str));
|
||||
*http_error.status_mut() = e.http_status_code();
|
||||
e.add_headers(http_error.headers_mut());
|
||||
http_error.headers_mut().insert(
|
||||
http::header::CONTENT_TYPE,
|
||||
"text/html; charset=utf-8".parse().unwrap(),
|
||||
);
|
||||
http_error
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue