mirror of
https://git.deuxfleurs.fr/Deuxfleurs/garage.git
synced 2026-05-15 13:46:54 -04:00
Compare commits
4 commits
main-v1
...
v1.99.3-in
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ca2a190b2f | ||
|
|
b275dc5000 | ||
|
|
0a48d504fe | ||
|
|
c834f3f024 |
96 changed files with 2429 additions and 2953 deletions
|
|
@ -1,6 +1,3 @@
|
|||
labels:
|
||||
nix: "enabled"
|
||||
|
||||
when:
|
||||
event:
|
||||
- push
|
||||
|
|
@ -12,32 +9,27 @@ when:
|
|||
|
||||
steps:
|
||||
- name: check formatting
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.fmt
|
||||
- nix-shell --attr devShell --run "cargo fmt -- --check"
|
||||
|
||||
- name: build
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.dev
|
||||
|
||||
- name: unit + func tests (lmdb)
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.tests-lmdb
|
||||
|
||||
- name: unit + func tests (sqlite)
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.tests-sqlite
|
||||
|
||||
- name: unit + func tests (fjall)
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.tests-fjall
|
||||
|
||||
- name: integration tests
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
commands:
|
||||
- nix-build -j4 --attr flakePackages.dev
|
||||
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,3 @@
|
|||
labels:
|
||||
nix: "enabled"
|
||||
|
||||
when:
|
||||
event:
|
||||
- deployment
|
||||
|
|
@ -11,7 +8,7 @@ depends_on:
|
|||
|
||||
steps:
|
||||
- name: refresh-index
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
environment:
|
||||
AWS_ACCESS_KEY_ID:
|
||||
from_secret: garagehq_aws_access_key_id
|
||||
|
|
@ -22,7 +19,7 @@ steps:
|
|||
- nix-shell --attr ci --run "refresh_index"
|
||||
|
||||
- name: multiarch-docker
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
environment:
|
||||
DOCKER_AUTH:
|
||||
from_secret: docker_auth
|
||||
|
|
|
|||
|
|
@ -1,6 +1,3 @@
|
|||
labels:
|
||||
nix: "enabled"
|
||||
|
||||
when:
|
||||
event:
|
||||
- deployment
|
||||
|
|
@ -19,17 +16,17 @@ matrix:
|
|||
|
||||
steps:
|
||||
- name: build
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
commands:
|
||||
- nix-build --attr releasePackages.${ARCH} --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||
|
||||
- name: check is static binary
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
commands:
|
||||
- nix-shell --attr ci --run "./script/not-dynamic.sh result/bin/garage"
|
||||
|
||||
- name: integration tests
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
commands:
|
||||
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
||||
when:
|
||||
|
|
@ -39,7 +36,7 @@ steps:
|
|||
ARCH: i386
|
||||
|
||||
- name: upgrade tests
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
commands:
|
||||
- nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
|
||||
when:
|
||||
|
|
@ -47,7 +44,7 @@ steps:
|
|||
ARCH: amd64
|
||||
|
||||
- name: push static binary
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
environment:
|
||||
TARGET: "${TARGET}"
|
||||
AWS_ACCESS_KEY_ID:
|
||||
|
|
@ -58,7 +55,7 @@ steps:
|
|||
- nix-shell --attr ci --run "to_s3"
|
||||
|
||||
- name: docker build and publish
|
||||
image: nixpkgs/nix:nixos-24.05
|
||||
image: nixpkgs/nix:nixos-22.05
|
||||
environment:
|
||||
DOCKER_PLATFORM: "linux/${ARCH}"
|
||||
CONTAINER_NAME: "dxflrs/${ARCH}_garage"
|
||||
|
|
|
|||
1918
Cargo.lock
generated
1918
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
46
Cargo.toml
46
Cargo.toml
|
|
@ -24,18 +24,18 @@ default-members = ["src/garage"]
|
|||
|
||||
# Internal Garage crates
|
||||
format_table = { version = "0.1.1", path = "src/format-table" }
|
||||
garage_api_common = { version = "1.3.1", path = "src/api/common" }
|
||||
garage_api_admin = { version = "1.3.1", path = "src/api/admin" }
|
||||
garage_api_s3 = { version = "1.3.1", path = "src/api/s3" }
|
||||
garage_api_k2v = { version = "1.3.1", path = "src/api/k2v" }
|
||||
garage_block = { version = "1.3.1", path = "src/block" }
|
||||
garage_db = { version = "1.3.1", path = "src/db", default-features = false }
|
||||
garage_model = { version = "1.3.1", path = "src/model", default-features = false }
|
||||
garage_net = { version = "1.3.1", path = "src/net" }
|
||||
garage_rpc = { version = "1.3.1", path = "src/rpc" }
|
||||
garage_table = { version = "1.3.1", path = "src/table" }
|
||||
garage_util = { version = "1.3.1", path = "src/util" }
|
||||
garage_web = { version = "1.3.1", path = "src/web" }
|
||||
garage_api_common = { version = "1.1.0", path = "src/api/common" }
|
||||
garage_api_admin = { version = "1.1.0", path = "src/api/admin" }
|
||||
garage_api_s3 = { version = "1.1.0", path = "src/api/s3" }
|
||||
garage_api_k2v = { version = "1.1.0", path = "src/api/k2v" }
|
||||
garage_block = { version = "1.1.0", path = "src/block" }
|
||||
garage_db = { version = "1.1.0", path = "src/db", default-features = false }
|
||||
garage_model = { version = "1.1.0", path = "src/model", default-features = false }
|
||||
garage_net = { version = "1.1.0", path = "src/net" }
|
||||
garage_rpc = { version = "1.1.0", path = "src/rpc" }
|
||||
garage_table = { version = "1.1.0", path = "src/table" }
|
||||
garage_util = { version = "1.1.0", path = "src/util" }
|
||||
garage_web = { version = "1.1.0", path = "src/web" }
|
||||
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
||||
|
||||
# External crates from crates.io
|
||||
|
|
@ -52,6 +52,7 @@ chrono = "0.4"
|
|||
crc32fast = "1.4"
|
||||
crc32c = "0.6"
|
||||
crypto-common = "0.1"
|
||||
err-derive = "0.3"
|
||||
gethostname = "0.4"
|
||||
git-version = "0.3.4"
|
||||
hex = "0.4"
|
||||
|
|
@ -64,7 +65,6 @@ md-5 = "0.10"
|
|||
mktemp = "0.5"
|
||||
nix = { version = "0.29", default-features = false, features = ["fs"] }
|
||||
nom = "7.1"
|
||||
parking_lot = "0.12"
|
||||
parse_duration = "2.1"
|
||||
pin-project = "1.0.12"
|
||||
pnet_datalink = "0.34"
|
||||
|
|
@ -83,14 +83,12 @@ pretty_env_logger = "0.5"
|
|||
structopt = { version = "0.3", default-features = false }
|
||||
syslog-tracing = "0.3"
|
||||
tracing = "0.1"
|
||||
tracing-journald = "0.3.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
heed = { version = "0.11", default-features = false, features = ["lmdb"] }
|
||||
rusqlite = "0.37"
|
||||
rusqlite = "0.31.0"
|
||||
r2d2 = "0.8"
|
||||
r2d2_sqlite = "0.31"
|
||||
fjall = "2.4"
|
||||
r2d2_sqlite = "0.24"
|
||||
|
||||
async-compression = { version = "0.4", features = ["tokio", "zstd"] }
|
||||
zstd = { version = "0.13", default-features = false }
|
||||
|
|
@ -136,7 +134,7 @@ prometheus = "0.13"
|
|||
aws-sigv4 = { version = "1.1", default-features = false }
|
||||
hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "http2", "ring", "rustls-native-certs"] }
|
||||
log = "0.4"
|
||||
thiserror = "2.0"
|
||||
thiserror = "1.0"
|
||||
|
||||
# ---- used only as build / dev dependencies ----
|
||||
assert-json-diff = "2.0"
|
||||
|
|
@ -146,8 +144,12 @@ aws-smithy-runtime = { version = "1.8", default-features = false, features = ["t
|
|||
aws-sdk-config = { version = "1.62", default-features = false }
|
||||
aws-sdk-s3 = { version = "1.79", default-features = false, features = ["rt-tokio"] }
|
||||
|
||||
[profile.dev]
|
||||
#lto = "thin" # disabled for now, adds 2-4 min to each CI build
|
||||
lto = "off"
|
||||
|
||||
[profile.release]
|
||||
lto = "thin"
|
||||
codegen-units = 16
|
||||
opt-level = 3
|
||||
strip = "debuginfo"
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
opt-level = "s"
|
||||
strip = true
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ In this section, we cover the following web applications:
|
|||
| [Mastodon](#mastodon) | ✅ | Natively supported |
|
||||
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
|
||||
| [ejabberd](#ejabberd) | ✅ | `mod_s3_upload` |
|
||||
| [Pixelfed](#pixelfed) | ✅ | Natively supported |
|
||||
| [Pixelfed](#pixelfed) | ❓ | Not yet tested |
|
||||
| [Pleroma](#pleroma) | ❓ | Not yet tested |
|
||||
| [Lemmy](#lemmy) | ✅ | Supported with pict-rs |
|
||||
| [Funkwhale](#funkwhale) | ❓ | Not yet tested |
|
||||
|
|
@ -191,10 +191,10 @@ garage key create peertube-key
|
|||
|
||||
Keep the Key ID and the Secret key in a pad, they will be needed later.
|
||||
|
||||
We need two buckets, one for normal videos (named peertube-videos) and one for webtorrent videos (named peertube-playlists).
|
||||
We need two buckets, one for normal videos (named peertube-video) and one for webtorrent videos (named peertube-playlist).
|
||||
```bash
|
||||
garage bucket create peertube-videos
|
||||
garage bucket create peertube-playlists
|
||||
garage bucket create peertube-playlist
|
||||
```
|
||||
|
||||
Now we allow our key to read and write on these buckets:
|
||||
|
|
@ -253,7 +253,7 @@ object_storage:
|
|||
proxify_private_files: false
|
||||
|
||||
streaming_playlists:
|
||||
bucket_name: 'peertube-playlists'
|
||||
bucket_name: 'peertube-playlist'
|
||||
|
||||
# Keep it empty for our example
|
||||
prefix: ''
|
||||
|
|
|
|||
|
|
@ -161,49 +161,3 @@ kopia repository validate-provider
|
|||
|
||||
You can then run all the standard kopia commands: `kopia snapshot create`, `kopia mount`...
|
||||
Everything should work out-of-the-box.
|
||||
|
||||
## Plakar
|
||||
|
||||
Create your key and bucket on Garage server:
|
||||
|
||||
```bash
|
||||
garage key create my-plakar-key
|
||||
garage bucket create plakar-backups
|
||||
garage bucket allow plakar-backups --read --write --key my-plakar-key
|
||||
…
|
||||
```
|
||||
|
||||
On Plakar server, add your Garage as a storage location:
|
||||
```bash
|
||||
plakar store add garageS3 s3://my-garage.tld/plakar-backups \
|
||||
region=garage # Or as you've specified in garage.toml \
|
||||
access_key=<Key ID from "garage key info my-plakar-key"> \
|
||||
secret_access_key=<Secret key from "garage key info my-plakar-key">
|
||||
```
|
||||
|
||||
Then create the repository.
|
||||
```bash
|
||||
plakar at @garageS3 create -plaintext # Unencrypted
|
||||
# or
|
||||
plakar at @garageS3 create #encrypted
|
||||
```
|
||||
|
||||
If you encrypt your backups (Plakar default), you will need to define a strong passphrase. Do not forget to save your password safely. It will be needed to decrypt your backups.
|
||||
|
||||
|
||||
After the repository has been created, check that everything works as expected (that might give an empty result as no file has been added yet, but no error message):
|
||||
```bash
|
||||
plakar at @garageS3 check
|
||||
```
|
||||
|
||||
Now that everything is configure, you can use Garage as your backups storage. For instance sync it with a local backup storage:
|
||||
```bash
|
||||
$ plakar at ~/backups sync to @garageS3
|
||||
```
|
||||
|
||||
Or list the S3 storage content:
|
||||
```bash
|
||||
$ plakar at @garageS3 ls
|
||||
```
|
||||
|
||||
More information in Plakar documentation: https://www.plakar.io/docs/main/quickstart/
|
||||
|
|
|
|||
|
|
@ -8,18 +8,18 @@ have published Ansible roles. We list them and compare them below.
|
|||
|
||||
## Comparison of Ansible roles
|
||||
|
||||
| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | [eddster ansible-role-garage](#eddster-ansible-role-garage) |
|
||||
|------------------------------------|---------------------------------------------|---------------------------------------------------------------|---------------------------------|
|
||||
| **Runtime** | Systemd | Docker | Systemd |
|
||||
| **Target OS** | Any Linux | Any Linux | Any Linux |
|
||||
| **Architecture** | amd64, arm64, i686 | amd64, arm64 | arm64, arm, 386, amd64 |
|
||||
| **Additional software** | None | Traefik | Ngnix and Keepalived (optional) |
|
||||
| **Automatic node connection** | ❌ | ✅ | ✅ |
|
||||
| **Layout management** | ❌ | ✅ | ✅ |
|
||||
| **Manage buckets & keys** | ❌ | ✅ (basic) | ✅ |
|
||||
| **Allow custom Garage config** | ✅ | ❌ | ❌ |
|
||||
| **Facilitate Garage upgrades** | ✅ | ❌ | ✅ |
|
||||
| **Multiple instances on one host** | ✅ | ✅ | ❌ |
|
||||
| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) |
|
||||
|------------------------------------|---------------------------------------------|---------------------------------------------------------------|
|
||||
| **Runtime** | Systemd | Docker |
|
||||
| **Target OS** | Any Linux | Any Linux |
|
||||
| **Architecture** | amd64, arm64, i686 | amd64, arm64 |
|
||||
| **Additional software** | None | Traefik |
|
||||
| **Automatic node connection** | ❌ | ✅ |
|
||||
| **Layout management** | ❌ | ✅ |
|
||||
| **Manage buckets & keys** | ❌ | ✅ (basic) |
|
||||
| **Allow custom Garage config** | ✅ | ❌ |
|
||||
| **Facilitate Garage upgrades** | ✅ | ❌ |
|
||||
| **Multiple instances on one host** | ✅ | ✅ |
|
||||
|
||||
|
||||
## zorun/ansible-role-garage
|
||||
|
|
@ -49,15 +49,3 @@ structured DNS names, etc).
|
|||
|
||||
As a result, this role makes it easier to start with Garage on Ansible,
|
||||
but is less flexible.
|
||||
|
||||
## eddster2309/ansible-role-garage
|
||||
|
||||
[Source code](https://github.com/eddster2309/ansible-role-garage), [Ansible galaxy](https://galaxy.ansible.com/ui/standalone/roles/eddster2309/garage/)
|
||||
|
||||
This role is a opinionated but customisable role using the official Garage
|
||||
static binaries and only requires Systemd. As such it should work on any
|
||||
Linux based host. It includes all the nesscary configuration to
|
||||
automatically setup a clustered Garage deployment. Most Garage
|
||||
configuration options are exposed through Ansible variables so while you
|
||||
can't provide a custom config you can get very close. It can optionally
|
||||
installed a HA nginx deployment with Keepalived.
|
||||
|
|
|
|||
|
|
@ -15,10 +15,9 @@ Alpine Linux repositories (available since v3.17):
|
|||
apk add garage
|
||||
```
|
||||
|
||||
The default configuration file is installed to `/etc/garage/garage.toml`. You can run
|
||||
Garage using: `rc-service garage start`.
|
||||
|
||||
If you don't specify `rpc_secret`, it will be automatically replaced with a random string on the first start.
|
||||
The default configuration file is installed to `/etc/garage.toml`. You can run
|
||||
Garage using: `rc-service garage start`. If you don't specify `rpc_secret`, it
|
||||
will be automatically replaced with a random string on the first start.
|
||||
|
||||
Please note that this package is built without Consul discovery, Kubernetes
|
||||
discovery, OpenTelemetry exporter, and K2V features (K2V will be enabled once
|
||||
|
|
@ -27,7 +26,7 @@ it's stable).
|
|||
|
||||
## Arch Linux
|
||||
|
||||
Garage is available in the official repositories under [extra](https://archlinux.org/packages/extra/x86_64/garage).
|
||||
Garage is available in the [AUR](https://aur.archlinux.org/packages/garage).
|
||||
|
||||
## FreeBSD
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ Firstly clone the repository:
|
|||
|
||||
```bash
|
||||
git clone https://git.deuxfleurs.fr/Deuxfleurs/garage
|
||||
cd garage/script/helm
|
||||
cd garage/scripts/helm
|
||||
```
|
||||
|
||||
Deploy with default options:
|
||||
|
|
|
|||
|
|
@ -96,14 +96,14 @@ to store 2 TB of data in total.
|
|||
## Get a Docker image
|
||||
|
||||
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||
We encourage you to use a fixed tag (eg. `v1.3.0`) and not the `latest` tag.
|
||||
For this example, we will use the latest published version at the time of the writing which is `v1.3.0` but it's up to you
|
||||
We encourage you to use a fixed tag (eg. `v1.1.0`) and not the `latest` tag.
|
||||
For this example, we will use the latest published version at the time of the writing which is `v1.1.0` but it's up to you
|
||||
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
sudo docker pull dxflrs/garage:v1.3.0
|
||||
sudo docker pull dxflrs/garage:v1.1.0
|
||||
```
|
||||
|
||||
## Deploying and configuring Garage
|
||||
|
|
@ -171,7 +171,7 @@ docker run \
|
|||
-v /etc/garage.toml:/etc/garage.toml \
|
||||
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
||||
-v /var/lib/garage/data:/var/lib/garage/data \
|
||||
dxflrs/garage:v1.3.0
|
||||
dxflrs/garage:v1.1.0
|
||||
```
|
||||
|
||||
With this command line, Garage should be started automatically at each boot.
|
||||
|
|
@ -185,7 +185,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
|
|||
version: "3"
|
||||
services:
|
||||
garage:
|
||||
image: dxflrs/garage:v1.3.0
|
||||
image: dxflrs/garage:v1.1.0
|
||||
network_mode: "host"
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ docker run \
|
|||
-v /path/to/garage.toml:/etc/garage.toml \
|
||||
-v /path/to/garage/meta:/var/lib/garage/meta \
|
||||
-v /path/to/garage/data:/var/lib/garage/data \
|
||||
dxflrs/garage:v1.3.0
|
||||
dxflrs/garage:v1.1.0
|
||||
```
|
||||
|
||||
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
|
||||
|
|
@ -182,12 +182,11 @@ ID Hostname Address Tag Zone Capacit
|
|||
## Creating a cluster layout
|
||||
|
||||
Creating a cluster layout for a Garage deployment means informing Garage
|
||||
of the disk space available on each node of the cluster, `-c`,
|
||||
as well as the name of the zone (e.g. datacenter), `-z`, each machine is located in.
|
||||
of the disk space available on each node of the cluster
|
||||
as well as the zone (e.g. datacenter) each machine is located in.
|
||||
|
||||
For our test deployment, we are have only one node with zone named `dc1` and a
|
||||
capacity of `1G`, though the capacity is ignored for a single node deployment
|
||||
and can be changed later when adding new nodes.
|
||||
For our test deployment, we are using only one node. The way in which we configure
|
||||
it does not matter, you can simply write:
|
||||
|
||||
```bash
|
||||
garage layout assign -z dc1 -c 1G <node_id>
|
||||
|
|
|
|||
|
|
@ -24,8 +24,7 @@ db_engine = "lmdb"
|
|||
|
||||
block_size = "1M"
|
||||
block_ram_buffer_max = "256MiB"
|
||||
block_max_concurrent_reads = 16
|
||||
block_max_concurrent_writes_per_request =10
|
||||
|
||||
lmdb_map_size = "1T"
|
||||
|
||||
compression_level = 1
|
||||
|
|
@ -94,32 +93,30 @@ The following gives details about each available configuration option.
|
|||
|
||||
[Environment variables](#env_variables).
|
||||
|
||||
Top-level configuration options, in alphabetical order:
|
||||
[`allow_punycode`](#allow_punycode),
|
||||
Top-level configuration options:
|
||||
[`allow_world_readable_secrets`](#allow_world_readable_secrets),
|
||||
[`block_max_concurrent_reads`](`block_max_concurrent_reads),
|
||||
[`block_ram_buffer_max`](#block_ram_buffer_max),
|
||||
[`block_max_concurrent_writes_per_request`](#block_max_concurrent_writes_per_request),
|
||||
[`block_size`](#block_size),
|
||||
[`bootstrap_peers`](#bootstrap_peers),
|
||||
[`compression_level`](#compression_level),
|
||||
[`consistency_mode`](#consistency_mode),
|
||||
[`data_dir`](#data_dir),
|
||||
[`data_fsync`](#data_fsync),
|
||||
[`db_engine`](#db_engine),
|
||||
[`disable_scrub`](#disable_scrub),
|
||||
[`use_local_tz`](#use_local_tz),
|
||||
[`lmdb_map_size`](#lmdb_map_size),
|
||||
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
|
||||
[`metadata_dir`](#metadata_dir),
|
||||
[`metadata_fsync`](#metadata_fsync),
|
||||
[`metadata_snapshots_dir`](#metadata_snapshots_dir),
|
||||
[`replication_factor`](#replication_factor),
|
||||
[`consistency_mode`](#consistency_mode),
|
||||
[`rpc_bind_addr`](#rpc_bind_addr),
|
||||
[`rpc_bind_outgoing`](#rpc_bind_outgoing),
|
||||
[`rpc_public_addr`](#rpc_public_addr),
|
||||
[`rpc_public_addr_subnet`](#rpc_public_addr_subnet)
|
||||
[`rpc_secret`/`rpc_secret_file`](#rpc_secret),
|
||||
[`use_local_tz`](#use_local_tz).
|
||||
[`rpc_secret`/`rpc_secret_file`](#rpc_secret).
|
||||
[`allow_punycode`](#allow_punycode).
|
||||
|
||||
The `[consul_discovery]` section:
|
||||
[`api`](#consul_api),
|
||||
|
|
@ -156,17 +153,13 @@ The `[admin]` section:
|
|||
|
||||
### Environment variables {#env_variables}
|
||||
|
||||
The following configuration parameters must be specified as environment variables,
|
||||
they do not exist in the configuration file:
|
||||
The following configuration parameter must be specified as an environment
|
||||
variable, it does not exist in the configuration file:
|
||||
|
||||
- `GARAGE_LOG_TO_SYSLOG` (since `v0.9.4`): set this to `1` or `true` to make the
|
||||
Garage daemon send its logs to `syslog` (using the libc `syslog` function)
|
||||
instead of printing to stderr.
|
||||
|
||||
- `GARAGE_LOG_TO_JOURNALD` (since `v1.2.0`): set this to `1` or `true` to make the
|
||||
Garage daemon send its logs to `journald` (using the native protocol of `systemd-journald`)
|
||||
instead of printing to stderr.
|
||||
|
||||
The following environment variables can be used to override the corresponding
|
||||
values in the configuration file:
|
||||
|
||||
|
|
@ -178,7 +171,7 @@ values in the configuration file:
|
|||
|
||||
### Top-level configuration options
|
||||
|
||||
#### `replication_factor` (since `v1.0.0`) {#replication_factor}
|
||||
#### `replication_factor` {#replication_factor}
|
||||
|
||||
The replication factor can be any positive integer smaller or equal the node count in your cluster.
|
||||
The chosen replication factor has a big impact on the cluster's failure tolerancy and performance characteristics.
|
||||
|
|
@ -226,7 +219,7 @@ is in progress. In theory, no data should be lost as rebalancing is a
|
|||
routine operation for Garage, although we cannot guarantee you that everything
|
||||
will go right in such an extreme scenario.
|
||||
|
||||
#### `consistency_mode` (since `v1.0.0`) {#consistency_mode}
|
||||
#### `consistency_mode` {#consistency_mode}
|
||||
|
||||
The consistency mode setting determines the read and write behaviour of your cluster.
|
||||
|
||||
|
|
@ -336,7 +329,6 @@ Since `v0.8.0`, Garage can use alternative storage backends as follows:
|
|||
| --------- | ----------------- | ------------- |
|
||||
| [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
|
||||
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
|
||||
| [Fjall](https://github.com/fjall-rs/fjall) (**experimental support** since `v1.3.0`) | `"fjall"` | `<metadata_dir>/db.fjall/` |
|
||||
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
|
||||
|
||||
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
|
||||
|
|
@ -373,14 +365,6 @@ LMDB works very well, but is known to have the following limitations:
|
|||
so it is not the best choice for high-performance storage clusters,
|
||||
but it should work fine in many cases.
|
||||
|
||||
- Fjall: a storage engine based on LSM trees, which theoretically allow for
|
||||
higher write throughput than other storage engines that are based on B-trees.
|
||||
Using Fjall could potentially improve Garage's performance significantly in
|
||||
write-heavy workloads. **Support for Fjall is experimental at this point**,
|
||||
we have added it to Garage for evaluation purposes only. **Do not use it for
|
||||
production-critical workloads.**
|
||||
|
||||
|
||||
It is possible to convert Garage's metadata directory from one format to another
|
||||
using the `garage convert-db` command, which should be used as follows:
|
||||
|
||||
|
|
@ -418,7 +402,6 @@ Here is how this option impacts the different database engines:
|
|||
|----------|------------------------------------|-------------------------------|
|
||||
| Sqlite | `PRAGMA synchronous = OFF` | `PRAGMA synchronous = NORMAL` |
|
||||
| LMDB | `MDB_NOMETASYNC` + `MDB_NOSYNC` | `MDB_NOMETASYNC` |
|
||||
| Fjall | default options | not supported |
|
||||
|
||||
Note that the Sqlite database is always ran in `WAL` mode (`PRAGMA journal_mode = WAL`).
|
||||
|
||||
|
|
@ -525,37 +508,6 @@ node.
|
|||
|
||||
The default value is 256MiB.
|
||||
|
||||
#### `block_max_concurrent_reads` (since `v1.3.0` / `v2.1.0`) {#block_max_concurrent_reads}
|
||||
|
||||
The maximum number of blocks (individual files in the data directory) open
|
||||
simultaneously for reading.
|
||||
|
||||
Reducing this number does not limit the number of data blocks that can be
|
||||
transferred through the network simultaneously. This mechanism was just added
|
||||
as a backpressure mechanism for HDD read speed: it helps avoid a situation
|
||||
where too many requests are coming in and Garage is reading too many block
|
||||
files simultaneously, thus not making timely progress on any of the reads.
|
||||
|
||||
When a request to read a data block comes in through the network, the requests
|
||||
awaits for one of the `block_max_concurrent_reads` slots to be available
|
||||
(internally implemented using a Semaphore object). Once it acquired a read
|
||||
slot, it reads the entire block file to RAM and frees the slot as soon as the
|
||||
block file is finished reading. Only after the slot is released will the
|
||||
block's data start being transferred over the network. If the request fails to
|
||||
acquire a reading slot wihtin 15 seconds, it fails with a timeout error.
|
||||
Timeout events can be monitored through the `block_read_semaphore_timeouts`
|
||||
metric in Prometheus: a non-zero number of such events indicates an I/O
|
||||
bottleneck on HDD read speed.
|
||||
|
||||
|
||||
#### `block_max_concurrent_writes_per_request` (since `v2.1.0`) {#block_max_concurrent_writes_per_request}
|
||||
|
||||
This parameter is designed to adapt to the concurrent write performance of
|
||||
different storage media.Maximum number of parallel block writes per put request
|
||||
Higher values improve throughput but increase memory usage.
|
||||
|
||||
Default: 3, Recommended: 10-30 for NVMe, 3-10 for HDD
|
||||
|
||||
#### `lmdb_map_size` {#lmdb_map_size}
|
||||
|
||||
This parameters can be used to set the map size used by LMDB,
|
||||
|
|
|
|||
|
|
@ -23,17 +23,17 @@ Feel free to open a PR to suggest fixes this table. Minio is missing because the
|
|||
- 2022-05-25 - Many Ceph S3 endpoints are not documented but implemented. Following a notification from the Ceph community, we added them.
|
||||
|
||||
|
||||
|
||||
## High-level features
|
||||
|
||||
| Feature | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
||||
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
||||
| [signature v2](https://docs.aws.amazon.com/AmazonS3/latest/API/Appendix-Sigv2.html) (deprecated) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
||||
| [signature v2](https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html) (deprecated) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
||||
| [signature v4](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) | ✅ Implemented | ✅ | ✅ | ❌ | ✅ |
|
||||
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
|
||||
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
|
||||
| [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) |
|
||||
| [SSE-C encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) | ✅ Implemented | ❓ | ✅ | ❌ | ✅ |
|
||||
| [Bucket versioning](https://docs.aws.amazon.com/AmazonS3/latest/userguide/Versioning.html) | ❌ Missing | ✅ | ✅ | ❌ | ✅ |
|
||||
|
||||
*Note:* OpenIO does not says if it supports presigned URLs. Because it is part
|
||||
of signature v4 and they claim they support it without additional precisions,
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ Example response body:
|
|||
```json
|
||||
{
|
||||
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||
"garageVersion": "v1.3.0",
|
||||
"garageVersion": "v1.1.0",
|
||||
"garageFeatures": [
|
||||
"k2v",
|
||||
"lmdb",
|
||||
|
|
|
|||
16
flake.lock
generated
16
flake.lock
generated
|
|
@ -50,17 +50,17 @@
|
|||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1763977559,
|
||||
"narHash": "sha256-g4MKqsIRy5yJwEsI+fYODqLUnAqIY4kZai0nldAP6EM=",
|
||||
"lastModified": 1736692550,
|
||||
"narHash": "sha256-7tk8xH+g0sJkKLTJFOxphJxxOjMDFMWv24nXslaU2ro=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "cfe2c7d5b5d3032862254e68c37a6576b633d632",
|
||||
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "cfe2c7d5b5d3032862254e68c37a6576b633d632",
|
||||
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
|
|
@ -80,17 +80,17 @@
|
|||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1763952169,
|
||||
"narHash": "sha256-+PeDBD8P+NKauH+w7eO/QWCIp8Cx4mCfWnh9sJmy9CM=",
|
||||
"lastModified": 1738549608,
|
||||
"narHash": "sha256-GdyT9QEUSx5k/n8kILuNy83vxxdyUfJ8jL5mMpQZWfw=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "ab726555a9a72e6dc80649809147823a813fa95b",
|
||||
"rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "ab726555a9a72e6dc80649809147823a813fa95b",
|
||||
"rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
|
|
|
|||
19
flake.nix
19
flake.nix
|
|
@ -2,13 +2,13 @@
|
|||
description =
|
||||
"Garage, an S3-compatible distributed object store for self-hosted deployments";
|
||||
|
||||
# Nixpkgs 25.05 as of 2025-11-24
|
||||
# Nixpkgs 24.11 as of 2025-01-12
|
||||
inputs.nixpkgs.url =
|
||||
"github:NixOS/nixpkgs/cfe2c7d5b5d3032862254e68c37a6576b633d632";
|
||||
"github:NixOS/nixpkgs/7c4869c47090dd7f9f1bdfb49a22aea026996815";
|
||||
|
||||
# Rust overlay as of 2025-11-24
|
||||
# Rust overlay as of 2025-02-03
|
||||
inputs.rust-overlay.url =
|
||||
"github:oxalica/rust-overlay/ab726555a9a72e6dc80649809147823a813fa95b";
|
||||
"github:oxalica/rust-overlay/35c6f8c4352f995ecd53896200769f80a3e8f22d";
|
||||
inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
inputs.crane.url = "github:ipetkov/crane";
|
||||
|
|
@ -30,10 +30,6 @@
|
|||
inherit system nixpkgs crane rust-overlay extraTestEnv;
|
||||
release = false;
|
||||
}).garage-test;
|
||||
lints = (compile {
|
||||
inherit system nixpkgs crane rust-overlay;
|
||||
release = false;
|
||||
});
|
||||
in
|
||||
{
|
||||
packages = {
|
||||
|
|
@ -57,13 +53,6 @@
|
|||
tests-sqlite = testWith {
|
||||
GARAGE_TEST_INTEGRATION_DB_ENGINE = "sqlite";
|
||||
};
|
||||
tests-fjall = testWith {
|
||||
GARAGE_TEST_INTEGRATION_DB_ENGINE = "fjall";
|
||||
};
|
||||
|
||||
# lints (fmt, clippy)
|
||||
fmt = lints.garage-cargo-fmt;
|
||||
clippy = lints.garage-cargo-clippy;
|
||||
};
|
||||
|
||||
# ---- developpment shell, for making native builds only ----
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ let
|
|||
|
||||
inherit (pkgs) lib stdenv;
|
||||
|
||||
toolchainFn = (p: p.rust-bin.stable."1.91.0".default.override {
|
||||
toolchainFn = (p: p.rust-bin.stable."1.82.0".default.override {
|
||||
targets = lib.optionals (target != null) [ rustTarget ];
|
||||
extensions = [
|
||||
"rust-src"
|
||||
|
|
@ -68,13 +68,12 @@ let
|
|||
rootFeatures = if features != null then
|
||||
features
|
||||
else
|
||||
([ "bundled-libs" "lmdb" "sqlite" "fjall" "k2v" ] ++ (lib.optionals release [
|
||||
([ "bundled-libs" "lmdb" "sqlite" "k2v" ] ++ (lib.optionals release [
|
||||
"consul-discovery"
|
||||
"kubernetes-discovery"
|
||||
"metrics"
|
||||
"telemetry-otlp"
|
||||
"syslog"
|
||||
"journald"
|
||||
]));
|
||||
|
||||
featuresStr = lib.concatStringsSep "," rootFeatures;
|
||||
|
|
@ -190,15 +189,4 @@ in rec {
|
|||
pkgs.cacert
|
||||
];
|
||||
} // extraTestEnv);
|
||||
|
||||
# ---- source code linting ----
|
||||
|
||||
garage-cargo-fmt = craneLib.cargoFmt (commonArgs // {
|
||||
cargoExtraArgs = "";
|
||||
});
|
||||
|
||||
garage-cargo-clippy = craneLib.cargoClippy (commonArgs // {
|
||||
cargoArtifacts = garage-deps;
|
||||
cargoClippyExtraArgs = "--all-targets -- -D warnings";
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
|
||||
export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
|
||||
export AWS_DEFAULT_REGION='garage'
|
||||
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
|
||||
# FUTUREWORK: set AWS_ENDPOINT_URL instead, once nixpkgs bumps awscli to >=2.13.0.
|
||||
function aws { command aws --endpoint-url http://127.0.0.1:3911 $@ ; }
|
||||
|
||||
|
|
|
|||
|
|
@ -2,8 +2,8 @@ apiVersion: v2
|
|||
name: garage
|
||||
description: S3-compatible object store for small self-hosted geo-distributed deployments
|
||||
type: application
|
||||
version: 0.7.3
|
||||
appVersion: "v1.3.1"
|
||||
version: 0.7.0
|
||||
appVersion: "v1.1.0"
|
||||
home: https://garagehq.deuxfleurs.fr/
|
||||
icon: https://garagehq.deuxfleurs.fr/images/garage-logo.svg
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# garage
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
S3-compatible object store for small self-hosted geo-distributed deployments
|
||||
|
||||
|
|
|
|||
|
|
@ -4,10 +4,6 @@ metadata:
|
|||
name: {{ include "garage.fullname" . }}
|
||||
labels:
|
||||
{{- include "garage.labels" . | nindent 4 }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
|
|
|
|||
|
|
@ -124,8 +124,6 @@ service:
|
|||
# - NodePort (+ Ingress)
|
||||
# - LoadBalancer
|
||||
type: ClusterIP
|
||||
# -- Annotations to add to the service
|
||||
annotations: {}
|
||||
s3:
|
||||
api:
|
||||
port: 3900
|
||||
|
|
|
|||
|
|
@ -34,8 +34,6 @@ in
|
|||
jq
|
||||
];
|
||||
shellHook = ''
|
||||
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
|
||||
|
||||
function to_s3 {
|
||||
aws \
|
||||
--endpoint-url https://garage.deuxfleurs.fr \
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_api_admin"
|
||||
version = "1.3.1"
|
||||
version = "1.1.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -22,7 +22,7 @@ garage_api_common.workspace = true
|
|||
|
||||
argon2.workspace = true
|
||||
async-trait.workspace = true
|
||||
thiserror.workspace = true
|
||||
err-derive.workspace = true
|
||||
hex.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
|
|
|
|||
|
|
@ -419,11 +419,17 @@ pub async fn handle_update_bucket(
|
|||
|
||||
if let Some(wa) = req.website_access {
|
||||
if wa.enabled {
|
||||
let (redirect_all, routing_rules) = match state.website_config.get() {
|
||||
Some(wc) => (wc.redirect_all.clone(), wc.routing_rules.clone()),
|
||||
None => (None, Vec::new()),
|
||||
};
|
||||
state.website_config.update(Some(WebsiteConfig {
|
||||
index_document: wa.index_document.ok_or_bad_request(
|
||||
"Please specify indexDocument when enabling website access.",
|
||||
)?,
|
||||
error_document: wa.error_document,
|
||||
redirect_all,
|
||||
routing_rules,
|
||||
}));
|
||||
} else {
|
||||
if wa.index_document.is_some() || wa.error_document.is_some() {
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
use std::convert::TryFrom;
|
||||
|
||||
use err_derive::Error;
|
||||
use hyper::header::HeaderValue;
|
||||
use hyper::{HeaderMap, StatusCode};
|
||||
use thiserror::Error;
|
||||
|
||||
pub use garage_model::helper::error::Error as HelperError;
|
||||
|
||||
|
|
@ -16,17 +16,20 @@ use garage_api_common::helpers::*;
|
|||
/// Errors of this crate
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("{0}")]
|
||||
#[error(display = "{}", _0)]
|
||||
/// Error from common error
|
||||
Common(#[from] CommonError),
|
||||
Common(#[error(source)] CommonError),
|
||||
|
||||
// Category: cannot process
|
||||
/// The API access key does not exist
|
||||
#[error("Access key not found: {0}")]
|
||||
#[error(display = "Access key not found: {}", _0)]
|
||||
NoSuchAccessKey(String),
|
||||
|
||||
/// In Import key, the key already exists
|
||||
#[error("Key {0} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.")]
|
||||
#[error(
|
||||
display = "Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.",
|
||||
_0
|
||||
)]
|
||||
KeyAlreadyExists(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_api_common"
|
||||
version = "1.3.1"
|
||||
version = "1.1.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -24,7 +24,7 @@ chrono.workspace = true
|
|||
crc32fast.workspace = true
|
||||
crc32c.workspace = true
|
||||
crypto-common.workspace = true
|
||||
thiserror.workspace = true
|
||||
err-derive.workspace = true
|
||||
hex.workspace = true
|
||||
hmac.workspace = true
|
||||
md-5.workspace = true
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
use std::convert::TryFrom;
|
||||
|
||||
use err_derive::Error;
|
||||
use hyper::StatusCode;
|
||||
use thiserror::Error;
|
||||
|
||||
use garage_util::error::Error as GarageError;
|
||||
|
||||
|
|
@ -12,48 +12,48 @@ use garage_model::helper::error::Error as HelperError;
|
|||
pub enum CommonError {
|
||||
// ---- INTERNAL ERRORS ----
|
||||
/// Error related to deeper parts of Garage
|
||||
#[error("Internal error: {0}")]
|
||||
InternalError(#[from] GarageError),
|
||||
#[error(display = "Internal error: {}", _0)]
|
||||
InternalError(#[error(source)] GarageError),
|
||||
|
||||
/// Error related to Hyper
|
||||
#[error("Internal error (Hyper error): {0}")]
|
||||
Hyper(#[from] hyper::Error),
|
||||
#[error(display = "Internal error (Hyper error): {}", _0)]
|
||||
Hyper(#[error(source)] hyper::Error),
|
||||
|
||||
/// Error related to HTTP
|
||||
#[error("Internal error (HTTP error): {0}")]
|
||||
Http(#[from] http::Error),
|
||||
#[error(display = "Internal error (HTTP error): {}", _0)]
|
||||
Http(#[error(source)] http::Error),
|
||||
|
||||
// ---- GENERIC CLIENT ERRORS ----
|
||||
/// Proper authentication was not provided
|
||||
#[error("Forbidden: {0}")]
|
||||
#[error(display = "Forbidden: {}", _0)]
|
||||
Forbidden(String),
|
||||
|
||||
/// Generic bad request response with custom message
|
||||
#[error("Bad request: {0}")]
|
||||
#[error(display = "Bad request: {}", _0)]
|
||||
BadRequest(String),
|
||||
|
||||
/// The client sent a header with invalid value
|
||||
#[error("Invalid header value: {0}")]
|
||||
InvalidHeader(#[from] hyper::header::ToStrError),
|
||||
#[error(display = "Invalid header value: {}", _0)]
|
||||
InvalidHeader(#[error(source)] hyper::header::ToStrError),
|
||||
|
||||
// ---- SPECIFIC ERROR CONDITIONS ----
|
||||
// These have to be error codes referenced in the S3 spec here:
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
|
||||
/// The bucket requested don't exists
|
||||
#[error("Bucket not found: {0}")]
|
||||
#[error(display = "Bucket not found: {}", _0)]
|
||||
NoSuchBucket(String),
|
||||
|
||||
/// Tried to create a bucket that already exist
|
||||
#[error("Bucket already exists")]
|
||||
#[error(display = "Bucket already exists")]
|
||||
BucketAlreadyExists,
|
||||
|
||||
/// Tried to delete a non-empty bucket
|
||||
#[error("Tried to delete a non-empty bucket")]
|
||||
#[error(display = "Tried to delete a non-empty bucket")]
|
||||
BucketNotEmpty,
|
||||
|
||||
// Category: bad request
|
||||
/// Bucket name is not valid according to AWS S3 specs
|
||||
#[error("Invalid bucket name: {0}")]
|
||||
#[error(display = "Invalid bucket name: {}", _0)]
|
||||
InvalidBucketName(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -58,12 +58,6 @@ pub trait ApiHandler: Send + Sync + 'static {
|
|||
req: Request<IncomingBody>,
|
||||
endpoint: Self::Endpoint,
|
||||
) -> impl Future<Output = Result<Response<BoxBody<Self::Error>>, Self::Error>> + Send;
|
||||
|
||||
/// Returns the key id used to authenticate this request. The ID returned must be safe to
|
||||
/// log.
|
||||
fn key_id_from_request(&self, _req: &Request<IncomingBody>) -> Option<String> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ApiServer<A: ApiHandler> {
|
||||
|
|
@ -148,20 +142,19 @@ impl<A: ApiHandler> ApiServer<A> {
|
|||
) -> Result<Response<BoxBody<A::Error>>, http::Error> {
|
||||
let uri = req.uri().clone();
|
||||
|
||||
let source = if let Ok(forwarded_for_ip_addr) =
|
||||
if let Ok(forwarded_for_ip_addr) =
|
||||
forwarded_headers::handle_forwarded_for_headers(req.headers())
|
||||
{
|
||||
format!("{forwarded_for_ip_addr} (via {addr})")
|
||||
info!(
|
||||
"{} (via {}) {} {}",
|
||||
forwarded_for_ip_addr,
|
||||
addr,
|
||||
req.method(),
|
||||
uri
|
||||
);
|
||||
} else {
|
||||
format!("{addr}")
|
||||
};
|
||||
// we only do this to log the access key, so we can discard any error
|
||||
let key = self
|
||||
.api_handler
|
||||
.key_id_from_request(&req)
|
||||
.map(|k| format!("(key {k}) "))
|
||||
.unwrap_or_default();
|
||||
info!("{source} {key}{} {uri}", req.method());
|
||||
info!("{} {} {}", addr, req.method(), uri);
|
||||
}
|
||||
debug!("{:?}", req);
|
||||
|
||||
let tracer = opentelemetry::global::tracer("garage");
|
||||
|
|
@ -350,11 +343,7 @@ where
|
|||
|
||||
while !*must_exit.borrow() {
|
||||
let (stream, client_addr) = tokio::select! {
|
||||
acc = listener.accept() => match acc {
|
||||
Ok(r) => r,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::ConnectionAborted => continue,
|
||||
Err(e) => return Err(e.into()),
|
||||
},
|
||||
acc = listener.accept() => acc?,
|
||||
_ = must_exit.changed() => continue,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
use thiserror::Error;
|
||||
use err_derive::Error;
|
||||
|
||||
use crate::common_error::CommonError;
|
||||
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||
|
|
@ -6,21 +6,21 @@ pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInterna
|
|||
/// Errors of this crate
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("{0}")]
|
||||
#[error(display = "{}", _0)]
|
||||
/// Error from common error
|
||||
Common(CommonError),
|
||||
|
||||
/// Authorization Header Malformed
|
||||
#[error("Authorization header malformed, unexpected scope: {0}")]
|
||||
#[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
|
||||
AuthorizationHeaderMalformed(String),
|
||||
|
||||
// Category: bad request
|
||||
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||
#[error("Invalid UTF-8: {0}")]
|
||||
InvalidUtf8Str(#[from] std::str::Utf8Error),
|
||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
||||
|
||||
/// The provided digest (checksum) value was invalid
|
||||
#[error("Invalid digest: {0}")]
|
||||
#[error(display = "Invalid digest: {}", _0)]
|
||||
InvalidDigest(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ async fn check_standard_signature(
|
|||
// Verify that all necessary request headers are included in signed_headers
|
||||
// The following must be included for all signatures:
|
||||
// - the Host header (mandatory)
|
||||
// - all x-amz-* headers used in the request (except x-amz-content-sha256)
|
||||
// - all x-amz-* headers used in the request
|
||||
// AWS also indicates that the Content-Type header should be signed if
|
||||
// it is used, but Minio client doesn't sign it so we don't check it for compatibility.
|
||||
let signed_headers = split_signed_headers(&authorization)?;
|
||||
|
|
@ -151,7 +151,7 @@ async fn check_presigned_signature(
|
|||
// Verify that all necessary request headers are included in signed_headers
|
||||
// For AWSv4 pre-signed URLs, the following must be included:
|
||||
// - the Host header (mandatory)
|
||||
// - all x-amz-* headers used in the request (except x-amz-content-sha256)
|
||||
// - all x-amz-* headers used in the request
|
||||
let signed_headers = split_signed_headers(&authorization)?;
|
||||
verify_signed_headers(request.headers(), &signed_headers)?;
|
||||
|
||||
|
|
@ -268,9 +268,7 @@ fn verify_signed_headers(headers: &HeaderMap, signed_headers: &[HeaderName]) ->
|
|||
return Err(Error::bad_request("Header `Host` should be signed"));
|
||||
}
|
||||
for (name, _) in headers.iter() {
|
||||
// Enforce signature of all x-amz-* headers, except x-amz-content-sh256
|
||||
// because it is included in the canonical request in all cases
|
||||
if name.as_str().starts_with("x-amz-") && name != X_AMZ_CONTENT_SHA256 {
|
||||
if name.as_str().starts_with("x-amz-") {
|
||||
if !signed_headers.contains(name) {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Header `{}` should be signed",
|
||||
|
|
@ -419,7 +417,7 @@ pub async fn verify_v4(
|
|||
// ============ Authorization header, or X-Amz-* query params =========
|
||||
|
||||
pub struct Authorization {
|
||||
pub key_id: String,
|
||||
key_id: String,
|
||||
scope: String,
|
||||
signed_headers: String,
|
||||
signature: String,
|
||||
|
|
@ -428,7 +426,7 @@ pub struct Authorization {
|
|||
}
|
||||
|
||||
impl Authorization {
|
||||
pub fn parse_header(headers: &HeaderMap) -> Result<Self, Error> {
|
||||
fn parse_header(headers: &HeaderMap) -> Result<Self, Error> {
|
||||
let authorization = headers
|
||||
.get(AUTHORIZATION)
|
||||
.ok_or_bad_request("Missing authorization header")?
|
||||
|
|
@ -470,7 +468,8 @@ impl Authorization {
|
|||
|
||||
let date = headers
|
||||
.get(X_AMZ_DATE)
|
||||
.ok_or_bad_request("Missing X-Amz-Date field")?
|
||||
.ok_or_bad_request("Missing X-Amz-Date field")
|
||||
.map_err(Error::from)?
|
||||
.to_str()?;
|
||||
let date = parse_date(date)?;
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_api_k2v"
|
||||
version = "1.3.1"
|
||||
version = "1.1.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -20,7 +20,7 @@ garage_util = { workspace = true, features = [ "k2v" ] }
|
|||
garage_api_common.workspace = true
|
||||
|
||||
base64.workspace = true
|
||||
thiserror.workspace = true
|
||||
err-derive.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
futures.workspace = true
|
||||
|
|
|
|||
|
|
@ -176,12 +176,6 @@ impl ApiHandler for K2VApiServer {
|
|||
|
||||
Ok(resp_ok)
|
||||
}
|
||||
|
||||
fn key_id_from_request(&self, req: &Request<IncomingBody>) -> Option<String> {
|
||||
garage_api_common::signature::payload::Authorization::parse_header(req.headers())
|
||||
.map(|auth| auth.key_id)
|
||||
.ok()
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiEndpoint for K2VApiEndpoint {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
use err_derive::Error;
|
||||
use hyper::header::HeaderValue;
|
||||
use hyper::{HeaderMap, StatusCode};
|
||||
use thiserror::Error;
|
||||
|
||||
use garage_api_common::common_error::{commonErrorDerivative, CommonError};
|
||||
pub(crate) use garage_api_common::common_error::{helper_error_as_internal, pass_helper_error};
|
||||
|
|
@ -14,38 +14,38 @@ use garage_api_common::signature::error::Error as SignatureError;
|
|||
/// Errors of this crate
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("{0}")]
|
||||
#[error(display = "{}", _0)]
|
||||
/// Error from common error
|
||||
Common(#[from] CommonError),
|
||||
Common(#[error(source)] CommonError),
|
||||
|
||||
// Category: cannot process
|
||||
/// Authorization Header Malformed
|
||||
#[error("Authorization header malformed, unexpected scope: {0}")]
|
||||
#[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
|
||||
AuthorizationHeaderMalformed(String),
|
||||
|
||||
/// The provided digest (checksum) value was invalid
|
||||
#[error("Invalid digest: {0}")]
|
||||
#[error(display = "Invalid digest: {}", _0)]
|
||||
InvalidDigest(String),
|
||||
|
||||
/// The object requested don't exists
|
||||
#[error("Key not found")]
|
||||
#[error(display = "Key not found")]
|
||||
NoSuchKey,
|
||||
|
||||
/// Some base64 encoded data was badly encoded
|
||||
#[error("Invalid base64: {0}")]
|
||||
InvalidBase64(#[from] base64::DecodeError),
|
||||
#[error(display = "Invalid base64: {}", _0)]
|
||||
InvalidBase64(#[error(source)] base64::DecodeError),
|
||||
|
||||
/// Invalid causality token
|
||||
#[error("Invalid causality token")]
|
||||
#[error(display = "Invalid causality token")]
|
||||
InvalidCausalityToken,
|
||||
|
||||
/// The client asked for an invalid return format (invalid Accept header)
|
||||
#[error("Not acceptable: {0}")]
|
||||
#[error(display = "Not acceptable: {}", _0)]
|
||||
NotAcceptable(String),
|
||||
|
||||
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||
#[error("Invalid UTF-8: {0}")]
|
||||
InvalidUtf8Str(#[from] std::str::Utf8Error),
|
||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
||||
}
|
||||
|
||||
commonErrorDerivative!(Error);
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_api_s3"
|
||||
version = "1.3.1"
|
||||
version = "1.1.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -29,7 +29,7 @@ bytes.workspace = true
|
|||
chrono.workspace = true
|
||||
crc32fast.workspace = true
|
||||
crc32c.workspace = true
|
||||
thiserror.workspace = true
|
||||
err-derive.workspace = true
|
||||
hex.workspace = true
|
||||
tracing.workspace = true
|
||||
md-5.workspace = true
|
||||
|
|
|
|||
|
|
@ -226,7 +226,6 @@ impl ApiHandler for S3ApiServer {
|
|||
Endpoint::DeleteBucket {} => handle_delete_bucket(ctx).await,
|
||||
Endpoint::GetBucketLocation {} => handle_get_bucket_location(ctx),
|
||||
Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(),
|
||||
Endpoint::GetBucketAcl {} => handle_get_bucket_acl(ctx),
|
||||
Endpoint::ListObjects {
|
||||
delimiter,
|
||||
encoding_type,
|
||||
|
|
@ -343,12 +342,6 @@ impl ApiHandler for S3ApiServer {
|
|||
|
||||
Ok(resp_ok)
|
||||
}
|
||||
|
||||
fn key_id_from_request(&self, req: &Request<IncomingBody>) -> Option<String> {
|
||||
garage_api_common::signature::payload::Authorization::parse_header(req.headers())
|
||||
.map(|auth| auth.key_id)
|
||||
.ok()
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiEndpoint for S3ApiEndpoint {
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ use hyper::{Request, Response, StatusCode};
|
|||
use garage_model::bucket_alias_table::*;
|
||||
use garage_model::bucket_table::Bucket;
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::key_table::{Key, KeyParams};
|
||||
use garage_model::key_table::Key;
|
||||
use garage_model::permission::BucketKeyPerm;
|
||||
use garage_table::util::*;
|
||||
use garage_util::crdt::*;
|
||||
|
|
@ -44,55 +44,6 @@ pub fn handle_get_bucket_versioning() -> Result<Response<ResBody>, Error> {
|
|||
.body(string_body(xml))?)
|
||||
}
|
||||
|
||||
pub fn handle_get_bucket_acl(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
bucket_id, api_key, ..
|
||||
} = ctx;
|
||||
let key_p = api_key.params().ok_or_internal_error(
|
||||
"Key should not be in deleted state at this point (in handle_get_bucket_acl)",
|
||||
)?;
|
||||
|
||||
let mut grants: Vec<s3_xml::Grant> = vec![];
|
||||
let kp = api_key.bucket_permissions(&bucket_id);
|
||||
|
||||
if kp.allow_owner {
|
||||
grants.push(s3_xml::Grant {
|
||||
grantee: create_grantee(&key_p, &api_key),
|
||||
permission: s3_xml::Value("FULL_CONTROL".to_string()),
|
||||
});
|
||||
} else {
|
||||
if kp.allow_read {
|
||||
grants.push(s3_xml::Grant {
|
||||
grantee: create_grantee(&key_p, &api_key),
|
||||
permission: s3_xml::Value("READ".to_string()),
|
||||
});
|
||||
grants.push(s3_xml::Grant {
|
||||
grantee: create_grantee(&key_p, &api_key),
|
||||
permission: s3_xml::Value("READ_ACP".to_string()),
|
||||
});
|
||||
}
|
||||
if kp.allow_write {
|
||||
grants.push(s3_xml::Grant {
|
||||
grantee: create_grantee(&key_p, &api_key),
|
||||
permission: s3_xml::Value("WRITE".to_string()),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let access_control_policy = s3_xml::AccessControlPolicy {
|
||||
xmlns: (),
|
||||
owner: None,
|
||||
acl: s3_xml::AccessControlList { entries: grants },
|
||||
};
|
||||
|
||||
let xml = s3_xml::to_xml_with_header(&access_control_policy)?;
|
||||
trace!("xml: {}", xml);
|
||||
|
||||
Ok(Response::builder()
|
||||
.header("Content-Type", "application/xml")
|
||||
.body(string_body(xml))?)
|
||||
}
|
||||
|
||||
pub async fn handle_list_buckets(
|
||||
garage: &Garage,
|
||||
api_key: &Key,
|
||||
|
|
@ -360,15 +311,6 @@ fn parse_create_bucket_xml(xml_bytes: &[u8]) -> Option<Option<String>> {
|
|||
Some(ret)
|
||||
}
|
||||
|
||||
fn create_grantee(key_params: &KeyParams, api_key: &Key) -> s3_xml::Grantee {
|
||||
s3_xml::Grantee {
|
||||
xmlns_xsi: (),
|
||||
typ: "CanonicalUser".to_string(),
|
||||
display_name: Some(s3_xml::Value(key_params.name.get().to_string())),
|
||||
id: Some(s3_xml::Value(api_key.key_id.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ use garage_api_common::signature::checksum::*;
|
|||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::encryption::EncryptionParams;
|
||||
use crate::error::*;
|
||||
use crate::get::{check_version_not_deleted, full_object_byte_stream, PreconditionHeaders};
|
||||
use crate::get::{full_object_byte_stream, PreconditionHeaders};
|
||||
use crate::multipart;
|
||||
use crate::put::{extract_metadata_headers, save_stream, ChecksumMode, SaveStreamResult};
|
||||
use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION;
|
||||
|
|
@ -237,7 +237,6 @@ async fn handle_copy_metaonly(
|
|||
.get(&source_version.uuid, &EmptyKey)
|
||||
.await?;
|
||||
let source_version = source_version.ok_or(Error::NoSuchKey)?;
|
||||
check_version_not_deleted(&source_version)?;
|
||||
|
||||
// Write an "uploading" marker in Object table
|
||||
// This holds a reference to the object in the Version table
|
||||
|
|
@ -429,7 +428,6 @@ pub async fn handle_upload_part_copy(
|
|||
.get(&source_object_version.uuid, &EmptyKey)
|
||||
.await?
|
||||
.ok_or(Error::NoSuchKey)?;
|
||||
check_version_not_deleted(&source_version)?;
|
||||
|
||||
// We want to reuse blocks from the source version as much as possible.
|
||||
// However, we still need to get the data from these blocks
|
||||
|
|
@ -561,7 +559,6 @@ pub async fn handle_upload_part_copy(
|
|||
|
||||
let mut current_offset = 0;
|
||||
let mut next_block = defragmenter.next().await?;
|
||||
let mut blocks_to_dup = dest_version.clone();
|
||||
|
||||
// TODO this could be optimized similarly to read_and_put_blocks
|
||||
// low priority because uploadpartcopy is rarely used
|
||||
|
|
@ -591,7 +588,8 @@ pub async fn handle_upload_part_copy(
|
|||
.unwrap()?;
|
||||
checksummer = checksummer_updated;
|
||||
|
||||
let (version_block_key, version_block) = (
|
||||
dest_version.blocks.clear();
|
||||
dest_version.blocks.put(
|
||||
VersionBlockKey {
|
||||
part_number,
|
||||
offset: current_offset,
|
||||
|
|
@ -603,23 +601,25 @@ pub async fn handle_upload_part_copy(
|
|||
);
|
||||
current_offset += data_len;
|
||||
|
||||
let next = if let Some(final_data) = data_to_upload {
|
||||
dest_version.blocks.clear();
|
||||
dest_version.blocks.put(version_block_key, version_block);
|
||||
let block_ref = BlockRef {
|
||||
block: final_hash,
|
||||
version: dest_version_id,
|
||||
deleted: false.into(),
|
||||
};
|
||||
|
||||
let (_, _, _, next) = futures::try_join!(
|
||||
// Thing 1: if the block is not exactly a block that existed before,
|
||||
// we need to insert that data as a new block.
|
||||
garage.block_manager.rpc_put_block(
|
||||
final_hash,
|
||||
final_data,
|
||||
dest_encryption.is_encrypted(),
|
||||
None
|
||||
),
|
||||
async {
|
||||
if let Some(final_data) = data_to_upload {
|
||||
garage
|
||||
.block_manager
|
||||
.rpc_put_block(final_hash, final_data, dest_encryption.is_encrypted(), None)
|
||||
.await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
},
|
||||
// Thing 2: we need to insert the block in the version
|
||||
garage.version_table.insert(&dest_version),
|
||||
// Thing 3: we need to add a block reference
|
||||
|
|
@ -627,32 +627,11 @@ pub async fn handle_upload_part_copy(
|
|||
// Thing 4: we need to read the next block
|
||||
defragmenter.next(),
|
||||
)?;
|
||||
next
|
||||
} else {
|
||||
blocks_to_dup.blocks.put(version_block_key, version_block);
|
||||
defragmenter.next().await?
|
||||
};
|
||||
next_block = next;
|
||||
}
|
||||
|
||||
assert_eq!(current_offset, source_range.length);
|
||||
|
||||
// Put the duplicated blocks into the version & block_refs tables
|
||||
let block_refs_to_put = blocks_to_dup
|
||||
.blocks
|
||||
.items()
|
||||
.iter()
|
||||
.map(|b| BlockRef {
|
||||
block: b.1.hash,
|
||||
version: dest_version_id,
|
||||
deleted: false.into(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
futures::try_join!(
|
||||
garage.version_table.insert(&blocks_to_dup),
|
||||
garage.block_ref_table.insert_many(&block_refs_to_put[..]),
|
||||
)?;
|
||||
|
||||
let checksums = checksummer.finalize();
|
||||
let etag = dest_encryption.etag_from_md5(&checksums.md5);
|
||||
let checksum = checksums.extract(dest_object_checksum_algorithm);
|
||||
|
|
|
|||
|
|
@ -88,9 +88,7 @@ pub async fn handle_put_cors(
|
|||
pub struct CorsConfiguration {
|
||||
#[serde(serialize_with = "xmlns_tag", skip_deserializing)]
|
||||
pub xmlns: (),
|
||||
// "default" is required to be able to parse an empty list of rules,
|
||||
// cf https://docs.rs/quick-xml/latest/quick_xml/de/#sequences-xsall-and-xssequence-xml-schema-types
|
||||
#[serde(rename = "CORSRule", default)]
|
||||
#[serde(rename = "CORSRule")]
|
||||
pub cors_rules: Vec<CorsRule>,
|
||||
}
|
||||
|
||||
|
|
@ -272,26 +270,4 @@ mod tests {
|
|||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_norules() -> Result<(), Error> {
|
||||
let message = r#"<?xml version="1.0" encoding="UTF-8"?>
|
||||
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/" />"#;
|
||||
let conf: CorsConfiguration = from_str(message).unwrap();
|
||||
let ref_value = CorsConfiguration {
|
||||
xmlns: (),
|
||||
cors_rules: vec![],
|
||||
};
|
||||
assert_eq! {
|
||||
ref_value,
|
||||
conf
|
||||
};
|
||||
|
||||
let message2 = to_xml_with_header(&ref_value)?;
|
||||
|
||||
let cleanup = |c: &str| c.replace(char::is_whitespace, "");
|
||||
assert_eq!(cleanup(message), cleanup(&message2));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
use std::convert::TryInto;
|
||||
|
||||
use err_derive::Error;
|
||||
use hyper::header::HeaderValue;
|
||||
use hyper::{HeaderMap, StatusCode};
|
||||
use thiserror::Error;
|
||||
|
||||
use garage_model::helper::error::Error as HelperError;
|
||||
|
||||
|
|
@ -25,67 +25,67 @@ use crate::xml as s3_xml;
|
|||
/// Errors of this crate
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("{0}")]
|
||||
#[error(display = "{}", _0)]
|
||||
/// Error from common error
|
||||
Common(#[from] CommonError),
|
||||
Common(#[error(source)] CommonError),
|
||||
|
||||
// Category: cannot process
|
||||
/// Authorization Header Malformed
|
||||
#[error("Authorization header malformed, unexpected scope: {0}")]
|
||||
#[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
|
||||
AuthorizationHeaderMalformed(String),
|
||||
|
||||
/// The object requested don't exists
|
||||
#[error("Key not found")]
|
||||
#[error(display = "Key not found")]
|
||||
NoSuchKey,
|
||||
|
||||
/// The multipart upload requested don't exists
|
||||
#[error("Upload not found")]
|
||||
#[error(display = "Upload not found")]
|
||||
NoSuchUpload,
|
||||
|
||||
/// Precondition failed (e.g. x-amz-copy-source-if-match)
|
||||
#[error("At least one of the preconditions you specified did not hold")]
|
||||
#[error(display = "At least one of the preconditions you specified did not hold")]
|
||||
PreconditionFailed,
|
||||
|
||||
/// Parts specified in CMU request do not match parts actually uploaded
|
||||
#[error("Parts given to CompleteMultipartUpload do not match uploaded parts")]
|
||||
#[error(display = "Parts given to CompleteMultipartUpload do not match uploaded parts")]
|
||||
InvalidPart,
|
||||
|
||||
/// Parts given to CompleteMultipartUpload were not in ascending order
|
||||
#[error("Parts given to CompleteMultipartUpload were not in ascending order")]
|
||||
#[error(display = "Parts given to CompleteMultipartUpload were not in ascending order")]
|
||||
InvalidPartOrder,
|
||||
|
||||
/// In CompleteMultipartUpload: not enough data
|
||||
/// (here we are more lenient than AWS S3)
|
||||
#[error("Proposed upload is smaller than the minimum allowed object size")]
|
||||
#[error(display = "Proposed upload is smaller than the minimum allowed object size")]
|
||||
EntityTooSmall,
|
||||
|
||||
// Category: bad request
|
||||
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||
#[error("Invalid UTF-8: {0}")]
|
||||
InvalidUtf8Str(#[from] std::str::Utf8Error),
|
||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
||||
|
||||
/// The request used an invalid path
|
||||
#[error("Invalid UTF-8: {0}")]
|
||||
InvalidUtf8String(#[from] std::string::FromUtf8Error),
|
||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||
InvalidUtf8String(#[error(source)] std::string::FromUtf8Error),
|
||||
|
||||
/// The client sent invalid XML data
|
||||
#[error("Invalid XML: {0}")]
|
||||
#[error(display = "Invalid XML: {}", _0)]
|
||||
InvalidXml(String),
|
||||
|
||||
/// The client sent a range header with invalid value
|
||||
#[error("Invalid HTTP range: {0:?}")]
|
||||
InvalidRange((http_range::HttpRangeParseError, u64)),
|
||||
#[error(display = "Invalid HTTP range: {:?}", _0)]
|
||||
InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)),
|
||||
|
||||
/// The client sent a range header with invalid value
|
||||
#[error("Invalid encryption algorithm: {0:?}, should be AES256")]
|
||||
#[error(display = "Invalid encryption algorithm: {:?}, should be AES256", _0)]
|
||||
InvalidEncryptionAlgorithm(String),
|
||||
|
||||
/// The provided digest (checksum) value was invalid
|
||||
#[error("Invalid digest: {0}")]
|
||||
#[error(display = "Invalid digest: {}", _0)]
|
||||
InvalidDigest(String),
|
||||
|
||||
/// The client sent a request for an action not supported by garage
|
||||
#[error("Unimplemented action: {0}")]
|
||||
#[error(display = "Unimplemented action: {}", _0)]
|
||||
NotImplemented(String),
|
||||
}
|
||||
|
||||
|
|
@ -99,12 +99,6 @@ impl From<HelperError> for Error {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<(http_range::HttpRangeParseError, u64)> for Error {
|
||||
fn from(err: (http_range::HttpRangeParseError, u64)) -> Error {
|
||||
Error::InvalidRange(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<roxmltree::Error> for Error {
|
||||
fn from(err: roxmltree::Error) -> Self {
|
||||
Self::InvalidXml(format!("{}", err))
|
||||
|
|
|
|||
|
|
@ -19,13 +19,12 @@ use garage_net::stream::ByteStream;
|
|||
use garage_rpc::rpc_helper::OrderTag;
|
||||
use garage_table::EmptyKey;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::{Error as UtilError, OkOrMessage};
|
||||
use garage_util::error::OkOrMessage;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::s3::object_table::*;
|
||||
use garage_model::s3::version_table::*;
|
||||
|
||||
use garage_api_common::common_error::CommonError;
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
|
||||
|
||||
|
|
@ -216,7 +215,6 @@ pub async fn handle_head_without_ctx(
|
|||
.get(&object_version.uuid, &EmptyKey)
|
||||
.await?
|
||||
.ok_or(Error::NoSuchKey)?;
|
||||
check_version_not_deleted(&version)?;
|
||||
|
||||
let (part_offset, part_end) =
|
||||
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
|
||||
|
|
@ -367,21 +365,6 @@ pub async fn handle_get_without_ctx(
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) fn check_version_not_deleted(version: &Version) -> Result<(), Error> {
|
||||
if version.deleted.get() {
|
||||
// the version was deleted between when the object_table was consulted
|
||||
// and now, this could mean the object was deleted, or overriden.
|
||||
// Rather than say the key doesn't exist, return a transient error
|
||||
// to signal the client to try again.
|
||||
return Err(CommonError::InternalError(UtilError::Message(
|
||||
"conflict/inconsistency between object and version state, version is deleted"
|
||||
.to_string(),
|
||||
))
|
||||
.into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_get_full(
|
||||
garage: Arc<Garage>,
|
||||
version: &ObjectVersion,
|
||||
|
|
@ -448,7 +431,6 @@ pub fn full_object_byte_stream(
|
|||
.ok_or_message("channel closed")?;
|
||||
|
||||
let version = version_fut.await.unwrap()?.ok_or(Error::NoSuchKey)?;
|
||||
check_version_not_deleted(&version)?;
|
||||
for (i, (_, vb)) in version.blocks.items().iter().enumerate().skip(1) {
|
||||
let stream_block_i = encryption
|
||||
.get_block(&garage, &vb.hash, Some(order_stream.order(i as u64)))
|
||||
|
|
@ -464,14 +446,6 @@ pub fn full_object_byte_stream(
|
|||
{
|
||||
Ok(()) => (),
|
||||
Err(e) => {
|
||||
// TODO i think this is a bad idea, we should log
|
||||
// an error and stop there. If the error happens to
|
||||
// be exactly the size of what hasn't been streamed
|
||||
// yet, the client will see the request as a
|
||||
// success
|
||||
// instead truncating the output notify the client
|
||||
// something happened with their download, so that
|
||||
// they can retry it
|
||||
let _ = tx.send(error_stream_item(e)).await;
|
||||
}
|
||||
}
|
||||
|
|
@ -523,7 +497,7 @@ async fn handle_get_range(
|
|||
.get(&version.uuid, &EmptyKey)
|
||||
.await?
|
||||
.ok_or(Error::NoSuchKey)?;
|
||||
check_version_not_deleted(&version)?;
|
||||
|
||||
let body =
|
||||
body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end);
|
||||
Ok(resp_builder.body(body)?)
|
||||
|
|
@ -574,8 +548,6 @@ async fn handle_get_part(
|
|||
.await?
|
||||
.ok_or(Error::NoSuchKey)?;
|
||||
|
||||
check_version_not_deleted(&version)?;
|
||||
|
||||
let (begin, end) =
|
||||
calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?;
|
||||
|
||||
|
|
@ -845,9 +817,7 @@ impl PreconditionHeaders {
|
|||
}
|
||||
|
||||
fn check(&self, v: &ObjectVersion, etag: &str) -> Result<Option<StatusCode>, Error> {
|
||||
// we store date with ms precision, but headers are precise to the second: truncate
|
||||
// the timestamp to handle the same-second edge case
|
||||
let v_date = UNIX_EPOCH + Duration::from_secs(v.timestamp / 1000);
|
||||
let v_date = UNIX_EPOCH + Duration::from_millis(v.timestamp);
|
||||
|
||||
// Implemented from https://datatracker.ietf.org/doc/html/rfc7232#section-6
|
||||
|
||||
|
|
|
|||
|
|
@ -141,26 +141,10 @@ pub async fn handle_post_object(
|
|||
|
||||
let mut conditions = decoded_policy.into_conditions()?;
|
||||
|
||||
// If there are conditions on the bucket name, check these against the actual bucket_name rather
|
||||
// than the one in params, which is allowed to be absent.
|
||||
if let Some(conds) = conditions.params.remove("bucket") {
|
||||
for cond in conds {
|
||||
let ok = match cond {
|
||||
Operation::Equal(s) => s.as_str() == bucket_name,
|
||||
Operation::StartsWith(s) => bucket_name.starts_with(&s),
|
||||
};
|
||||
if !ok {
|
||||
return Err(Error::bad_request(
|
||||
"Key 'bucket' has value not allowed in policy",
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (param_key, value) in params.iter() {
|
||||
let param_key = param_key.as_str();
|
||||
match param_key {
|
||||
"policy" | "x-amz-signature" | "bucket" => (), // this is always accepted, as it's required to validate other fields
|
||||
"policy" | "x-amz-signature" => (), // this is always accepted, as it's required to validate other fields
|
||||
"content-type" => {
|
||||
let conds = conditions.params.remove("content-type").ok_or_else(|| {
|
||||
Error::bad_request(format!("Key '{}' is not allowed in policy", param_key))
|
||||
|
|
|
|||
|
|
@ -39,6 +39,8 @@ use crate::encryption::EncryptionParams;
|
|||
use crate::error::*;
|
||||
use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION;
|
||||
|
||||
const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
|
||||
|
||||
pub(crate) struct SaveStreamResult {
|
||||
pub(crate) version_uuid: Uuid,
|
||||
pub(crate) version_timestamp: u64,
|
||||
|
|
@ -491,7 +493,7 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
|
|||
};
|
||||
let recv_next = async {
|
||||
// If more than a maximum number of writes are in progress, don't add more for now
|
||||
if currently_running >= ctx.garage.config.block_max_concurrent_writes_per_request {
|
||||
if currently_running >= PUT_BLOCKS_MAX_PARALLEL {
|
||||
futures::future::pending().await
|
||||
} else {
|
||||
block_rx3.recv().await
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ use quick_xml::de::from_reader;
|
|||
use hyper::{header::HeaderName, Request, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_model::bucket_table::*;
|
||||
use garage_model::bucket_table::{self, *};
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
|
||||
|
|
@ -26,7 +26,28 @@ pub async fn handle_get_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error>
|
|||
suffix: Value(website.index_document.to_string()),
|
||||
}),
|
||||
redirect_all_requests_to: None,
|
||||
routing_rules: None,
|
||||
routing_rules: RoutingRules {
|
||||
rules: website
|
||||
.routing_rules
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|rule| RoutingRule {
|
||||
condition: rule.condition.map(|cond| Condition {
|
||||
http_error_code: cond.http_error_code.map(|c| IntValue(c as i64)),
|
||||
prefix: cond.prefix.map(Value),
|
||||
}),
|
||||
redirect: Redirect {
|
||||
hostname: rule.redirect.hostname.map(Value),
|
||||
http_redirect_code: Some(IntValue(
|
||||
rule.redirect.http_redirect_code as i64,
|
||||
)),
|
||||
protocol: rule.redirect.protocol.map(Value),
|
||||
replace_full: rule.redirect.replace_key.map(Value),
|
||||
replace_prefix: rule.redirect.replace_key_prefix.map(Value),
|
||||
},
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
};
|
||||
let xml = to_xml_with_header(&wc)?;
|
||||
Ok(Response::builder()
|
||||
|
|
@ -97,18 +118,28 @@ pub struct WebsiteConfiguration {
|
|||
pub index_document: Option<Suffix>,
|
||||
#[serde(rename = "RedirectAllRequestsTo")]
|
||||
pub redirect_all_requests_to: Option<Target>,
|
||||
#[serde(rename = "RoutingRules")]
|
||||
pub routing_rules: Option<Vec<RoutingRule>>,
|
||||
#[serde(
|
||||
rename = "RoutingRules",
|
||||
default,
|
||||
skip_serializing_if = "RoutingRules::is_empty"
|
||||
)]
|
||||
pub routing_rules: RoutingRules,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Default)]
|
||||
pub struct RoutingRules {
|
||||
#[serde(rename = "RoutingRule")]
|
||||
pub rules: Vec<RoutingRule>,
|
||||
}
|
||||
|
||||
impl RoutingRules {
|
||||
fn is_empty(&self) -> bool {
|
||||
self.rules.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct RoutingRule {
|
||||
#[serde(rename = "RoutingRule")]
|
||||
pub inner: RoutingRuleInner,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct RoutingRuleInner {
|
||||
#[serde(rename = "Condition")]
|
||||
pub condition: Option<Condition>,
|
||||
#[serde(rename = "Redirect")]
|
||||
|
|
@ -162,7 +193,7 @@ impl WebsiteConfiguration {
|
|||
if self.redirect_all_requests_to.is_some()
|
||||
&& (self.error_document.is_some()
|
||||
|| self.index_document.is_some()
|
||||
|| self.routing_rules.is_some())
|
||||
|| !self.routing_rules.is_empty())
|
||||
{
|
||||
return Err(Error::bad_request(
|
||||
"Bad XML: can't have RedirectAllRequestsTo and other fields",
|
||||
|
|
@ -177,10 +208,15 @@ impl WebsiteConfiguration {
|
|||
if let Some(ref rart) = self.redirect_all_requests_to {
|
||||
rart.validate()?;
|
||||
}
|
||||
if let Some(ref rrs) = self.routing_rules {
|
||||
for rr in rrs {
|
||||
rr.inner.validate()?;
|
||||
for rr in &self.routing_rules.rules {
|
||||
rr.validate()?;
|
||||
}
|
||||
if self.routing_rules.rules.len() > 1000 {
|
||||
// we will do linear scans, best to avoid overly long configuration. The
|
||||
// limit was choosen arbitrarily
|
||||
return Err(Error::bad_request(
|
||||
"Bad XML: RoutingRules can't have more than 1000 child elements",
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
|
@ -189,11 +225,7 @@ impl WebsiteConfiguration {
|
|||
pub fn into_garage_website_config(self) -> Result<WebsiteConfig, Error> {
|
||||
if self.redirect_all_requests_to.is_some() {
|
||||
Err(Error::NotImplemented(
|
||||
"S3 website redirects are not currently implemented in Garage.".into(),
|
||||
))
|
||||
} else if self.routing_rules.map(|x| !x.is_empty()).unwrap_or(false) {
|
||||
Err(Error::NotImplemented(
|
||||
"S3 routing rules are not currently implemented in Garage.".into(),
|
||||
"RedirectAllRequestsTo is not currently implemented in Garage, however its effect can be emulated using a single inconditional RoutingRule.".into(),
|
||||
))
|
||||
} else {
|
||||
Ok(WebsiteConfig {
|
||||
|
|
@ -202,6 +234,36 @@ impl WebsiteConfiguration {
|
|||
.map(|x| x.suffix.0)
|
||||
.unwrap_or_else(|| "index.html".to_string()),
|
||||
error_document: self.error_document.map(|x| x.key.0),
|
||||
redirect_all: None,
|
||||
routing_rules: self
|
||||
.routing_rules
|
||||
.rules
|
||||
.into_iter()
|
||||
.map(|rule| {
|
||||
bucket_table::RoutingRule {
|
||||
condition: rule.condition.map(|condition| {
|
||||
bucket_table::RedirectCondition {
|
||||
http_error_code: condition.http_error_code.map(|c| c.0 as u16),
|
||||
prefix: condition.prefix.map(|p| p.0),
|
||||
}
|
||||
}),
|
||||
redirect: bucket_table::Redirect {
|
||||
hostname: rule.redirect.hostname.map(|h| h.0),
|
||||
protocol: rule.redirect.protocol.map(|p| p.0),
|
||||
// aws default to 301, which i find punitive in case of
|
||||
// missconfiguration (can be permanently cached on the
|
||||
// user agent)
|
||||
http_redirect_code: rule
|
||||
.redirect
|
||||
.http_redirect_code
|
||||
.map(|c| c.0 as u16)
|
||||
.unwrap_or(302),
|
||||
replace_key_prefix: rule.redirect.replace_prefix.map(|k| k.0),
|
||||
replace_key: rule.redirect.replace_full.map(|k| k.0),
|
||||
},
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -242,37 +304,69 @@ impl Target {
|
|||
}
|
||||
}
|
||||
|
||||
impl RoutingRuleInner {
|
||||
impl RoutingRule {
|
||||
pub fn validate(&self) -> Result<(), Error> {
|
||||
let has_prefix = self
|
||||
.condition
|
||||
.as_ref()
|
||||
.and_then(|c| c.prefix.as_ref())
|
||||
.is_some();
|
||||
self.redirect.validate(has_prefix)
|
||||
if let Some(condition) = &self.condition {
|
||||
condition.validate()?;
|
||||
}
|
||||
self.redirect.validate()
|
||||
}
|
||||
}
|
||||
|
||||
impl Condition {
|
||||
pub fn validate(&self) -> Result<bool, Error> {
|
||||
if let Some(ref error_code) = self.http_error_code {
|
||||
// TODO do other error codes make sense? Aws only allows 4xx and 5xx
|
||||
if error_code.0 != 404 {
|
||||
return Err(Error::bad_request(
|
||||
"Bad XML: HttpErrorCodeReturnedEquals must be 404 or absent",
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(self.prefix.is_some())
|
||||
}
|
||||
}
|
||||
|
||||
impl Redirect {
|
||||
pub fn validate(&self, has_prefix: bool) -> Result<(), Error> {
|
||||
if self.replace_prefix.is_some() {
|
||||
if self.replace_full.is_some() {
|
||||
pub fn validate(&self) -> Result<(), Error> {
|
||||
if self.replace_prefix.is_some() && self.replace_full.is_some() {
|
||||
return Err(Error::bad_request(
|
||||
"Bad XML: both ReplaceKeyPrefixWith and ReplaceKeyWith are set",
|
||||
));
|
||||
}
|
||||
if !has_prefix {
|
||||
return Err(Error::bad_request(
|
||||
"Bad XML: ReplaceKeyPrefixWith is set, but KeyPrefixEquals isn't",
|
||||
));
|
||||
}
|
||||
}
|
||||
if let Some(ref protocol) = self.protocol {
|
||||
if protocol.0 != "http" && protocol.0 != "https" {
|
||||
return Err(Error::bad_request("Bad XML: invalid protocol"));
|
||||
}
|
||||
}
|
||||
// TODO there are probably more invalid cases, but which ones?
|
||||
if let Some(ref http_redirect_code) = self.http_redirect_code {
|
||||
match http_redirect_code.0 {
|
||||
// aws allows all 3xx except 300, but some are non-sensical (not modified,
|
||||
// use proxy...)
|
||||
301 | 302 | 303 | 307 | 308 => {
|
||||
if self.hostname.is_none() && self.protocol.is_some() {
|
||||
return Err(Error::bad_request(
|
||||
"Bad XML: HostName must be set if Protocol is set",
|
||||
));
|
||||
}
|
||||
}
|
||||
// aws doesn't allow these codes, but netlify does, and it seems like a
|
||||
// cool feature (change the page seen without changing the url shown by the
|
||||
// user agent)
|
||||
200 | 404 => {
|
||||
if self.hostname.is_some() || self.protocol.is_some() {
|
||||
// hostname would mean different bucket, protocol doesn't make
|
||||
// sense
|
||||
return Err(Error::bad_request(
|
||||
"Bad XML: an HttpRedirectCode of 200 is not acceptable alongside HostName or Protocol",
|
||||
));
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::bad_request("Bad XML: invalid HttpRedirectCode"));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
@ -311,6 +405,15 @@ mod tests {
|
|||
<ReplaceKeyWith>fullkey</ReplaceKeyWith>
|
||||
</Redirect>
|
||||
</RoutingRule>
|
||||
<RoutingRule>
|
||||
<Condition>
|
||||
<KeyPrefixEquals></KeyPrefixEquals>
|
||||
</Condition>
|
||||
<Redirect>
|
||||
<HttpRedirectCode>404</HttpRedirectCode>
|
||||
<ReplaceKeyWith>missing</ReplaceKeyWith>
|
||||
</Redirect>
|
||||
</RoutingRule>
|
||||
</RoutingRules>
|
||||
</WebsiteConfiguration>"#;
|
||||
let conf: WebsiteConfiguration = from_str(message).unwrap();
|
||||
|
|
@ -326,8 +429,9 @@ mod tests {
|
|||
hostname: Value("garage.tld".to_owned()),
|
||||
protocol: Some(Value("https".to_owned())),
|
||||
}),
|
||||
routing_rules: Some(vec![RoutingRule {
|
||||
inner: RoutingRuleInner {
|
||||
routing_rules: RoutingRules {
|
||||
rules: vec![
|
||||
RoutingRule {
|
||||
condition: Some(Condition {
|
||||
http_error_code: Some(IntValue(404)),
|
||||
prefix: Some(Value("prefix1".to_owned())),
|
||||
|
|
@ -340,7 +444,21 @@ mod tests {
|
|||
replace_full: Some(Value("fullkey".to_owned())),
|
||||
},
|
||||
},
|
||||
}]),
|
||||
RoutingRule {
|
||||
condition: Some(Condition {
|
||||
http_error_code: None,
|
||||
prefix: Some(Value("".to_owned())),
|
||||
}),
|
||||
redirect: Redirect {
|
||||
hostname: None,
|
||||
protocol: None,
|
||||
http_redirect_code: Some(IntValue(404)),
|
||||
replace_prefix: None,
|
||||
replace_full: Some(Value("missing".to_owned())),
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
assert_eq! {
|
||||
ref_value,
|
||||
|
|
|
|||
|
|
@ -13,10 +13,6 @@ pub fn xmlns_tag<S: Serializer>(_v: &(), s: S) -> Result<S::Ok, S::Error> {
|
|||
s.serialize_str("http://s3.amazonaws.com/doc/2006-03-01/")
|
||||
}
|
||||
|
||||
pub fn xmlns_xsi_tag<S: Serializer>(_v: &(), s: S) -> Result<S::Ok, S::Error> {
|
||||
s.serialize_str("http://www.w3.org/2001/XMLSchema-instance")
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct Value(#[serde(rename = "$value")] pub String);
|
||||
|
||||
|
|
@ -323,42 +319,6 @@ pub struct PostObject {
|
|||
pub etag: Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||
pub struct Grantee {
|
||||
#[serde(rename = "xmlns:xsi", serialize_with = "xmlns_xsi_tag")]
|
||||
pub xmlns_xsi: (),
|
||||
#[serde(rename = "xsi:type")]
|
||||
pub typ: String,
|
||||
#[serde(rename = "DisplayName")]
|
||||
pub display_name: Option<Value>,
|
||||
#[serde(rename = "ID")]
|
||||
pub id: Option<Value>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||
pub struct Grant {
|
||||
#[serde(rename = "Grantee")]
|
||||
pub grantee: Grantee,
|
||||
#[serde(rename = "Permission")]
|
||||
pub permission: Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||
pub struct AccessControlList {
|
||||
#[serde(rename = "Grant")]
|
||||
pub entries: Vec<Grant>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||
pub struct AccessControlPolicy {
|
||||
#[serde(serialize_with = "xmlns_tag")]
|
||||
pub xmlns: (),
|
||||
#[serde(rename = "Owner")]
|
||||
pub owner: Option<Owner>,
|
||||
#[serde(rename = "AccessControlList")]
|
||||
pub acl: AccessControlList,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
|
@ -467,43 +427,6 @@ mod tests {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_bucket_acl_result() -> Result<(), ApiError> {
|
||||
let grant = Grant {
|
||||
grantee: Grantee {
|
||||
xmlns_xsi: (),
|
||||
typ: "CanonicalUser".to_string(),
|
||||
display_name: Some(Value("owner_name".to_string())),
|
||||
id: Some(Value("qsdfjklm".to_string())),
|
||||
},
|
||||
permission: Value("FULL_CONTROL".to_string()),
|
||||
};
|
||||
|
||||
let get_bucket_acl = AccessControlPolicy {
|
||||
xmlns: (),
|
||||
owner: None,
|
||||
acl: AccessControlList {
|
||||
entries: vec![grant],
|
||||
},
|
||||
};
|
||||
assert_eq!(
|
||||
to_xml_with_header(&get_bucket_acl)?,
|
||||
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
|
||||
<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
|
||||
<AccessControlList>\
|
||||
<Grant>\
|
||||
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\">\
|
||||
<DisplayName>owner_name</DisplayName>\
|
||||
<ID>qsdfjklm</ID>\
|
||||
</Grantee>\
|
||||
<Permission>FULL_CONTROL</Permission>\
|
||||
</Grant>\
|
||||
</AccessControlList>\
|
||||
</AccessControlPolicy>"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn delete_result() -> Result<(), ApiError> {
|
||||
let delete_result = DeleteResult {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_block"
|
||||
version = "1.3.1"
|
||||
version = "1.1.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
|
|||
|
|
@ -50,8 +50,6 @@ pub const INLINE_THRESHOLD: usize = 3072;
|
|||
// to delete the block locally.
|
||||
pub(crate) const BLOCK_GC_DELAY: Duration = Duration::from_secs(600);
|
||||
|
||||
const BLOCK_READ_SEMAPHORE_TIMEOUT: Duration = Duration::from_secs(15);
|
||||
|
||||
/// RPC messages used to share blocks of data between nodes
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum BlockRpc {
|
||||
|
|
@ -89,7 +87,6 @@ pub struct BlockManager {
|
|||
disable_scrub: bool,
|
||||
|
||||
mutation_lock: Vec<Mutex<BlockManagerLocked>>,
|
||||
read_semaphore: Semaphore,
|
||||
|
||||
pub rc: BlockRc,
|
||||
pub resync: BlockResyncManager,
|
||||
|
|
@ -179,8 +176,6 @@ impl BlockManager {
|
|||
.iter()
|
||||
.map(|_| Mutex::new(BlockManagerLocked()))
|
||||
.collect::<Vec<_>>(),
|
||||
|
||||
read_semaphore: Semaphore::new(config.block_max_concurrent_reads),
|
||||
rc,
|
||||
resync,
|
||||
system,
|
||||
|
|
@ -413,8 +408,8 @@ impl BlockManager {
|
|||
}
|
||||
|
||||
/// Get number of items in the refcount table
|
||||
pub fn rc_approximate_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.rc.rc_table.approximate_len()?)
|
||||
pub fn rc_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.rc.rc_table.len()?)
|
||||
}
|
||||
|
||||
/// Send command to start/stop/manager scrub worker
|
||||
|
|
@ -432,7 +427,7 @@ impl BlockManager {
|
|||
|
||||
/// List all resync errors
|
||||
pub fn list_resync_errors(&self) -> Result<Vec<BlockResyncErrorInfo>, Error> {
|
||||
let mut blocks = Vec::with_capacity(self.resync.errors.approximate_len()?);
|
||||
let mut blocks = Vec::with_capacity(self.resync.errors.len()?);
|
||||
for ent in self.resync.errors.iter()? {
|
||||
let (hash, cnt) = ent?;
|
||||
let cnt = ErrorCounter::decode(&cnt);
|
||||
|
|
@ -562,6 +557,9 @@ impl BlockManager {
|
|||
match self.find_block(hash).await {
|
||||
Some(p) => self.read_block_from(hash, &p).await,
|
||||
None => {
|
||||
// Not found but maybe we should have had it ??
|
||||
self.resync
|
||||
.put_to_resync(hash, 2 * self.system.rpc_helper().rpc_timeout())?;
|
||||
return Err(Error::Message(format!(
|
||||
"block {:?} not found on node",
|
||||
hash
|
||||
|
|
@ -583,15 +581,6 @@ impl BlockManager {
|
|||
) -> Result<DataBlock, Error> {
|
||||
let (header, path) = block_path.as_parts_ref();
|
||||
|
||||
let permit = tokio::select! {
|
||||
sem = self.read_semaphore.acquire() => sem.ok_or_message("acquire read semaphore")?,
|
||||
_ = tokio::time::sleep(BLOCK_READ_SEMAPHORE_TIMEOUT) => {
|
||||
self.metrics.block_read_semaphore_timeouts.add(1);
|
||||
debug!("read block {:?}: read_semaphore acquire timeout", hash);
|
||||
return Err(Error::Message("read block: read_semaphore acquire timeout".into()));
|
||||
}
|
||||
};
|
||||
|
||||
let mut f = fs::File::open(&path).await?;
|
||||
let mut data = vec![];
|
||||
f.read_to_end(&mut data).await?;
|
||||
|
|
@ -616,8 +605,6 @@ impl BlockManager {
|
|||
return Err(Error::CorruptData(*hash));
|
||||
}
|
||||
|
||||
drop(permit);
|
||||
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
|
|
@ -783,7 +770,6 @@ impl BlockManagerLocked {
|
|||
|
||||
let mut f = fs::File::create(&path_tmp).await?;
|
||||
f.write_all(data).await?;
|
||||
f.flush().await?;
|
||||
mgr.metrics.bytes_written.add(data.len() as u64);
|
||||
|
||||
if mgr.data_fsync {
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ pub struct BlockManagerMetrics {
|
|||
|
||||
pub(crate) bytes_read: BoundCounter<u64>,
|
||||
pub(crate) block_read_duration: BoundValueRecorder<f64>,
|
||||
pub(crate) block_read_semaphore_timeouts: BoundCounter<u64>,
|
||||
pub(crate) bytes_written: BoundCounter<u64>,
|
||||
pub(crate) block_write_duration: BoundValueRecorder<f64>,
|
||||
pub(crate) delete_counter: BoundCounter<u64>,
|
||||
|
|
@ -51,7 +50,7 @@ impl BlockManagerMetrics {
|
|||
.init(),
|
||||
_rc_size: meter
|
||||
.u64_value_observer("block.rc_size", move |observer| {
|
||||
if let Ok(value) = rc_tree.approximate_len() {
|
||||
if let Ok(value) = rc_tree.len() {
|
||||
observer.observe(value as u64, &[])
|
||||
}
|
||||
})
|
||||
|
|
@ -59,7 +58,7 @@ impl BlockManagerMetrics {
|
|||
.init(),
|
||||
_resync_queue_len: meter
|
||||
.u64_value_observer("block.resync_queue_length", move |observer| {
|
||||
if let Ok(value) = resync_queue.approximate_len() {
|
||||
if let Ok(value) = resync_queue.len() {
|
||||
observer.observe(value as u64, &[]);
|
||||
}
|
||||
})
|
||||
|
|
@ -69,7 +68,7 @@ impl BlockManagerMetrics {
|
|||
.init(),
|
||||
_resync_errored_blocks: meter
|
||||
.u64_value_observer("block.resync_errored_blocks", move |observer| {
|
||||
if let Ok(value) = resync_errors.approximate_len() {
|
||||
if let Ok(value) = resync_errors.len() {
|
||||
observer.observe(value as u64, &[]);
|
||||
}
|
||||
})
|
||||
|
|
@ -120,11 +119,6 @@ impl BlockManagerMetrics {
|
|||
.with_description("Duration of block read operations")
|
||||
.init()
|
||||
.bind(&[]),
|
||||
block_read_semaphore_timeouts: meter
|
||||
.u64_counter("block.read_semaphore_timeouts")
|
||||
.with_description("Number of block reads that failed due to semaphore acquire timeout")
|
||||
.init()
|
||||
.bind(&[]),
|
||||
bytes_written: meter
|
||||
.u64_counter("block.bytes_written")
|
||||
.with_description("Number of bytes written to disk")
|
||||
|
|
|
|||
|
|
@ -106,13 +106,13 @@ impl BlockResyncManager {
|
|||
}
|
||||
|
||||
/// Get length of resync queue
|
||||
pub fn queue_approximate_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.queue.approximate_len()?)
|
||||
pub fn queue_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.queue.len()?)
|
||||
}
|
||||
|
||||
/// Get number of blocks that have an error
|
||||
pub fn errors_approximate_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.errors.approximate_len()?)
|
||||
pub fn errors_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.errors.len()?)
|
||||
}
|
||||
|
||||
/// Clear the error counter for a block and put it in queue immediately
|
||||
|
|
@ -133,14 +133,6 @@ impl BlockResyncManager {
|
|||
)))
|
||||
}
|
||||
|
||||
/// Clear the entire resync queue and list of errored blocks
|
||||
/// Corresponds to `garage repair clear-resync-queue`
|
||||
pub fn clear_resync_queue(&self) -> Result<(), Error> {
|
||||
self.queue.clear()?;
|
||||
self.errors.clear()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
|
||||
let notify = self.notify.clone();
|
||||
vars.register_rw(
|
||||
|
|
@ -556,11 +548,9 @@ impl Worker for ResyncWorker {
|
|||
}
|
||||
|
||||
WorkerStatus {
|
||||
queue_length: Some(self.manager.resync.queue_approximate_len().unwrap_or(0) as u64),
|
||||
queue_length: Some(self.manager.resync.queue_len().unwrap_or(0) as u64),
|
||||
tranquility: Some(tranquility),
|
||||
persistent_errors: Some(
|
||||
self.manager.resync.errors_approximate_len().unwrap_or(0) as u64
|
||||
),
|
||||
persistent_errors: Some(self.manager.resync.errors_len().unwrap_or(0) as u64),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_db"
|
||||
version = "1.3.1"
|
||||
version = "1.1.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -12,18 +12,14 @@ readme = "../../README.md"
|
|||
path = "lib.rs"
|
||||
|
||||
[dependencies]
|
||||
thiserror.workspace = true
|
||||
err-derive.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
heed = { workspace = true, optional = true }
|
||||
|
||||
rusqlite = { workspace = true, optional = true, features = ["backup"] }
|
||||
r2d2 = { workspace = true, optional = true }
|
||||
r2d2_sqlite = { workspace = true, optional = true }
|
||||
|
||||
fjall = { workspace = true, optional = true }
|
||||
parking_lot = { workspace = true, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
mktemp.workspace = true
|
||||
|
||||
|
|
@ -31,5 +27,4 @@ mktemp.workspace = true
|
|||
default = [ "lmdb", "sqlite" ]
|
||||
bundled-libs = [ "rusqlite?/bundled" ]
|
||||
lmdb = [ "heed" ]
|
||||
fjall = [ "dep:fjall", "dep:parking_lot" ]
|
||||
sqlite = [ "rusqlite", "r2d2", "r2d2_sqlite" ]
|
||||
|
|
|
|||
|
|
@ -1,453 +0,0 @@
|
|||
use core::ops::Bound;
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard};
|
||||
|
||||
use fjall::{
|
||||
PartitionCreateOptions, PersistMode, TransactionalKeyspace, TransactionalPartitionHandle,
|
||||
WriteTransaction,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
open::{Engine, OpenOpt},
|
||||
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
|
||||
TxResult, TxValueIter, Value, ValueIter,
|
||||
};
|
||||
|
||||
pub use fjall;
|
||||
|
||||
// --
|
||||
|
||||
pub(crate) fn open_db(path: &PathBuf, opt: &OpenOpt) -> Result<Db> {
|
||||
info!("Opening Fjall database at: {}", path.display());
|
||||
if opt.fsync {
|
||||
return Err(Error(
|
||||
"metadata_fsync is not supported with the Fjall database engine".into(),
|
||||
));
|
||||
}
|
||||
let mut config = fjall::Config::new(path);
|
||||
if let Some(block_cache_size) = opt.fjall_block_cache_size {
|
||||
config = config.cache_size(block_cache_size as u64);
|
||||
}
|
||||
let keyspace = config.open_transactional()?;
|
||||
Ok(FjallDb::init(keyspace))
|
||||
}
|
||||
|
||||
// -- err
|
||||
|
||||
impl From<fjall::Error> for Error {
|
||||
fn from(e: fjall::Error) -> Error {
|
||||
Error(format!("fjall: {}", e).into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<fjall::LsmError> for Error {
|
||||
fn from(e: fjall::LsmError) -> Error {
|
||||
Error(format!("fjall lsm_tree: {}", e).into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<fjall::Error> for TxOpError {
|
||||
fn from(e: fjall::Error) -> TxOpError {
|
||||
TxOpError(e.into())
|
||||
}
|
||||
}
|
||||
|
||||
// -- db
|
||||
|
||||
pub struct FjallDb {
|
||||
keyspace: TransactionalKeyspace,
|
||||
trees: RwLock<Vec<(String, TransactionalPartitionHandle)>>,
|
||||
}
|
||||
|
||||
type ByteRefRangeBound<'r> = (Bound<&'r [u8]>, Bound<&'r [u8]>);
|
||||
|
||||
impl FjallDb {
|
||||
pub fn init(keyspace: TransactionalKeyspace) -> Db {
|
||||
let s = Self {
|
||||
keyspace,
|
||||
trees: RwLock::new(Vec::new()),
|
||||
};
|
||||
Db(Arc::new(s))
|
||||
}
|
||||
|
||||
fn get_tree(
|
||||
&self,
|
||||
i: usize,
|
||||
) -> Result<MappedRwLockReadGuard<'_, TransactionalPartitionHandle>> {
|
||||
RwLockReadGuard::try_map(self.trees.read(), |trees: &Vec<_>| {
|
||||
trees.get(i).map(|tup| &tup.1)
|
||||
})
|
||||
.map_err(|_| Error("invalid tree id".into()))
|
||||
}
|
||||
}
|
||||
|
||||
impl IDb for FjallDb {
|
||||
fn engine(&self) -> String {
|
||||
"Fjall (EXPERIMENTAL!)".into()
|
||||
}
|
||||
|
||||
fn open_tree(&self, name: &str) -> Result<usize> {
|
||||
let mut trees = self.trees.write();
|
||||
let safe_name = encode_name(name)?;
|
||||
if let Some(i) = trees.iter().position(|(name, _)| *name == safe_name) {
|
||||
Ok(i)
|
||||
} else {
|
||||
let tree = self
|
||||
.keyspace
|
||||
.open_partition(&safe_name, PartitionCreateOptions::default())?;
|
||||
let i = trees.len();
|
||||
trees.push((safe_name, tree));
|
||||
Ok(i)
|
||||
}
|
||||
}
|
||||
|
||||
fn list_trees(&self) -> Result<Vec<String>> {
|
||||
Ok(self
|
||||
.keyspace
|
||||
.list_partitions()
|
||||
.iter()
|
||||
.map(|n| decode_name(&n))
|
||||
.collect::<Result<Vec<_>>>()?)
|
||||
}
|
||||
|
||||
fn snapshot(&self, base_path: &PathBuf) -> Result<()> {
|
||||
std::fs::create_dir_all(base_path)?;
|
||||
let path = Engine::Fjall.db_path(base_path);
|
||||
|
||||
let source_state = self.keyspace.read_tx();
|
||||
let copy_keyspace = fjall::Config::new(path).open()?;
|
||||
|
||||
for partition_name in self.keyspace.list_partitions() {
|
||||
let source_partition = self
|
||||
.keyspace
|
||||
.open_partition(&partition_name, PartitionCreateOptions::default())?;
|
||||
let copy_partition =
|
||||
copy_keyspace.open_partition(&partition_name, PartitionCreateOptions::default())?;
|
||||
|
||||
for entry in source_state.iter(&source_partition) {
|
||||
let (key, value) = entry?;
|
||||
copy_partition.insert(key, value)?;
|
||||
}
|
||||
}
|
||||
|
||||
copy_keyspace.persist(PersistMode::SyncAll)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
fn get(&self, tree_idx: usize, key: &[u8]) -> Result<Option<Value>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
let val = tx.get(&tree, key)?;
|
||||
match val {
|
||||
None => Ok(None),
|
||||
Some(v) => Ok(Some(v.to_vec())),
|
||||
}
|
||||
}
|
||||
|
||||
fn approximate_len(&self, tree_idx: usize) -> Result<usize> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
Ok(tree.approximate_len())
|
||||
}
|
||||
fn is_empty(&self, tree_idx: usize) -> Result<bool> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
Ok(tx.is_empty(&tree)?)
|
||||
}
|
||||
|
||||
fn insert(&self, tree_idx: usize, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let mut tx = self.keyspace.write_tx();
|
||||
tx.insert(&tree, key, value);
|
||||
tx.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove(&self, tree_idx: usize, key: &[u8]) -> Result<()> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let mut tx = self.keyspace.write_tx();
|
||||
tx.remove(&tree, key);
|
||||
tx.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn clear(&self, tree_idx: usize) -> Result<()> {
|
||||
let mut trees = self.trees.write();
|
||||
|
||||
if tree_idx >= trees.len() {
|
||||
return Err(Error("invalid tree id".into()));
|
||||
}
|
||||
let (name, tree) = trees.remove(tree_idx);
|
||||
|
||||
self.keyspace.delete_partition(tree)?;
|
||||
let tree = self
|
||||
.keyspace
|
||||
.open_partition(&name, PartitionCreateOptions::default())?;
|
||||
trees.insert(tree_idx, (name, tree));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn iter(&self, tree_idx: usize) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
Ok(Box::new(tx.iter(&tree).map(iterator_remap)))
|
||||
}
|
||||
|
||||
fn iter_rev(&self, tree_idx: usize) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
Ok(Box::new(tx.iter(&tree).rev().map(iterator_remap)))
|
||||
}
|
||||
|
||||
fn range<'r>(
|
||||
&self,
|
||||
tree_idx: usize,
|
||||
low: Bound<&'r [u8]>,
|
||||
high: Bound<&'r [u8]>,
|
||||
) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
Ok(Box::new(
|
||||
tx.range::<&'r [u8], ByteRefRangeBound>(&tree, (low, high))
|
||||
.map(iterator_remap),
|
||||
))
|
||||
}
|
||||
fn range_rev<'r>(
|
||||
&self,
|
||||
tree_idx: usize,
|
||||
low: Bound<&'r [u8]>,
|
||||
high: Bound<&'r [u8]>,
|
||||
) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let tx = self.keyspace.read_tx();
|
||||
Ok(Box::new(
|
||||
tx.range::<&'r [u8], ByteRefRangeBound>(&tree, (low, high))
|
||||
.rev()
|
||||
.map(iterator_remap),
|
||||
))
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> {
|
||||
let trees = self.trees.read();
|
||||
let mut tx = FjallTx {
|
||||
trees: &trees[..],
|
||||
tx: self.keyspace.write_tx(),
|
||||
};
|
||||
|
||||
let res = f.try_on(&mut tx);
|
||||
match res {
|
||||
TxFnResult::Ok(on_commit) => {
|
||||
tx.tx.commit().map_err(Error::from).map_err(TxError::Db)?;
|
||||
Ok(on_commit)
|
||||
}
|
||||
TxFnResult::Abort => {
|
||||
tx.tx.rollback();
|
||||
Err(TxError::Abort(()))
|
||||
}
|
||||
TxFnResult::DbErr => {
|
||||
tx.tx.rollback();
|
||||
Err(TxError::Db(Error(
|
||||
"(this message will be discarded)".into(),
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
struct FjallTx<'a> {
|
||||
trees: &'a [(String, TransactionalPartitionHandle)],
|
||||
tx: WriteTransaction<'a>,
|
||||
}
|
||||
|
||||
impl<'a> FjallTx<'a> {
|
||||
fn get_tree(&self, i: usize) -> TxOpResult<&TransactionalPartitionHandle> {
|
||||
self.trees.get(i).map(|tup| &tup.1).ok_or_else(|| {
|
||||
TxOpError(Error(
|
||||
"invalid tree id (it might have been openned after the transaction started)".into(),
|
||||
))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ITx for FjallTx<'a> {
|
||||
fn get(&self, tree_idx: usize, key: &[u8]) -> TxOpResult<Option<Value>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
match self.tx.get(tree, key)? {
|
||||
Some(v) => Ok(Some(v.to_vec())),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
fn len(&self, tree_idx: usize) -> TxOpResult<usize> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
Ok(self.tx.len(tree)? as usize)
|
||||
}
|
||||
|
||||
fn insert(&mut self, tree_idx: usize, key: &[u8], value: &[u8]) -> TxOpResult<()> {
|
||||
let tree = self.get_tree(tree_idx)?.clone();
|
||||
self.tx.insert(&tree, key, value);
|
||||
Ok(())
|
||||
}
|
||||
fn remove(&mut self, tree_idx: usize, key: &[u8]) -> TxOpResult<()> {
|
||||
let tree = self.get_tree(tree_idx)?.clone();
|
||||
self.tx.remove(&tree, key);
|
||||
Ok(())
|
||||
}
|
||||
fn clear(&mut self, _tree_idx: usize) -> TxOpResult<()> {
|
||||
unimplemented!("LSM tree clearing in cross-partition transaction is not supported")
|
||||
}
|
||||
|
||||
fn iter(&self, tree_idx: usize) -> TxOpResult<TxValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?.clone();
|
||||
Ok(Box::new(self.tx.iter(&tree).map(iterator_remap_tx)))
|
||||
}
|
||||
fn iter_rev(&self, tree_idx: usize) -> TxOpResult<TxValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?.clone();
|
||||
Ok(Box::new(self.tx.iter(&tree).rev().map(iterator_remap_tx)))
|
||||
}
|
||||
|
||||
fn range<'r>(
|
||||
&self,
|
||||
tree_idx: usize,
|
||||
low: Bound<&'r [u8]>,
|
||||
high: Bound<&'r [u8]>,
|
||||
) -> TxOpResult<TxValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let low = clone_bound(low);
|
||||
let high = clone_bound(high);
|
||||
Ok(Box::new(
|
||||
self.tx
|
||||
.range::<Vec<u8>, ByteVecRangeBounds>(&tree, (low, high))
|
||||
.map(iterator_remap_tx),
|
||||
))
|
||||
}
|
||||
fn range_rev<'r>(
|
||||
&self,
|
||||
tree_idx: usize,
|
||||
low: Bound<&'r [u8]>,
|
||||
high: Bound<&'r [u8]>,
|
||||
) -> TxOpResult<TxValueIter<'_>> {
|
||||
let tree = self.get_tree(tree_idx)?;
|
||||
let low = clone_bound(low);
|
||||
let high = clone_bound(high);
|
||||
Ok(Box::new(
|
||||
self.tx
|
||||
.range::<Vec<u8>, ByteVecRangeBounds>(&tree, (low, high))
|
||||
.rev()
|
||||
.map(iterator_remap_tx),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// -- maps fjall's (k, v) to ours
|
||||
|
||||
fn iterator_remap(r: fjall::Result<(fjall::Slice, fjall::Slice)>) -> Result<(Value, Value)> {
|
||||
r.map(|(k, v)| (k.to_vec(), v.to_vec()))
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
fn iterator_remap_tx(r: fjall::Result<(fjall::Slice, fjall::Slice)>) -> TxOpResult<(Value, Value)> {
|
||||
r.map(|(k, v)| (k.to_vec(), v.to_vec()))
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
// -- utils to deal with Garage's tightness on Bound lifetimes
|
||||
|
||||
type ByteVecBound = Bound<Vec<u8>>;
|
||||
type ByteVecRangeBounds = (ByteVecBound, ByteVecBound);
|
||||
|
||||
fn clone_bound(bound: Bound<&[u8]>) -> ByteVecBound {
|
||||
let value = match bound {
|
||||
Bound::Excluded(v) | Bound::Included(v) => v.to_vec(),
|
||||
Bound::Unbounded => vec![],
|
||||
};
|
||||
|
||||
match bound {
|
||||
Bound::Included(_) => Bound::Included(value),
|
||||
Bound::Excluded(_) => Bound::Excluded(value),
|
||||
Bound::Unbounded => Bound::Unbounded,
|
||||
}
|
||||
}
|
||||
|
||||
// -- utils to encode table names --
|
||||
|
||||
fn encode_name(s: &str) -> Result<String> {
|
||||
let base = 'A' as u32;
|
||||
|
||||
let mut ret = String::with_capacity(s.len() + 10);
|
||||
for c in s.chars() {
|
||||
if c.is_alphanumeric() || c == '_' || c == '-' || c == '#' {
|
||||
ret.push(c);
|
||||
} else if c <= u8::MAX as char {
|
||||
ret.push('$');
|
||||
let c_hi = c as u32 / 16;
|
||||
let c_lo = c as u32 % 16;
|
||||
ret.push(char::from_u32(base + c_hi).unwrap());
|
||||
ret.push(char::from_u32(base + c_lo).unwrap());
|
||||
} else {
|
||||
return Err(Error(
|
||||
format!("table name {} could not be safely encoded", s).into(),
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
fn decode_name(s: &str) -> Result<String> {
|
||||
use std::convert::TryFrom;
|
||||
|
||||
let errfn = || Error(format!("encoded table name {} is invalid", s).into());
|
||||
let c_map = |c: char| {
|
||||
let c = c as u32;
|
||||
let base = 'A' as u32;
|
||||
if (base..base + 16).contains(&c) {
|
||||
Some(c - base)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
let mut ret = String::with_capacity(s.len());
|
||||
let mut it = s.chars();
|
||||
while let Some(c) = it.next() {
|
||||
if c == '$' {
|
||||
let c_hi = it.next().and_then(c_map).ok_or_else(errfn)?;
|
||||
let c_lo = it.next().and_then(c_map).ok_or_else(errfn)?;
|
||||
let c_dec = char::try_from(c_hi * 16 + c_lo).map_err(|_| errfn())?;
|
||||
ret.push(c_dec);
|
||||
} else {
|
||||
ret.push(c);
|
||||
}
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_encdec_name() {
|
||||
for name in [
|
||||
"testname",
|
||||
"test_name",
|
||||
"test name",
|
||||
"test$name",
|
||||
"test:name@help.me$get/this**right",
|
||||
] {
|
||||
let encname = encode_name(name).unwrap();
|
||||
assert!(!encname.contains(' '));
|
||||
assert!(!encname.contains('.'));
|
||||
assert!(!encname.contains('*'));
|
||||
assert_eq!(*name, decode_name(&encname).unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,8 +1,6 @@
|
|||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
#[cfg(feature = "fjall")]
|
||||
pub mod fjall_adapter;
|
||||
#[cfg(feature = "lmdb")]
|
||||
pub mod lmdb_adapter;
|
||||
#[cfg(feature = "sqlite")]
|
||||
|
|
@ -20,7 +18,7 @@ use std::cell::Cell;
|
|||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use thiserror::Error;
|
||||
use err_derive::Error;
|
||||
|
||||
pub use open::*;
|
||||
|
||||
|
|
@ -44,7 +42,7 @@ pub type TxValueIter<'a> = Box<dyn std::iter::Iterator<Item = TxOpResult<(Value,
|
|||
// ----
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[error("{0}")]
|
||||
#[error(display = "{}", _0)]
|
||||
pub struct Error(pub Cow<'static, str>);
|
||||
|
||||
impl From<std::io::Error> for Error {
|
||||
|
|
@ -56,7 +54,7 @@ impl From<std::io::Error> for Error {
|
|||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[error("{0}")]
|
||||
#[error(display = "{}", _0)]
|
||||
pub struct TxOpError(pub(crate) Error);
|
||||
pub type TxOpResult<T> = std::result::Result<T, TxOpError>;
|
||||
|
||||
|
|
@ -106,44 +104,32 @@ impl Db {
|
|||
result: Cell::new(None),
|
||||
};
|
||||
let tx_res = self.0.transaction(&f);
|
||||
let fn_res = f.result.into_inner();
|
||||
let ret = f
|
||||
.result
|
||||
.into_inner()
|
||||
.expect("Transaction did not store result");
|
||||
|
||||
match (tx_res, fn_res) {
|
||||
(Ok(on_commit), Some(Ok(value))) => {
|
||||
// Transaction succeeded
|
||||
// TxFn stored the value to return to the user in fn_res
|
||||
// tx_res contains the on_commit list of callbacks, run them now
|
||||
match tx_res {
|
||||
Ok(on_commit) => match ret {
|
||||
Ok(value) => {
|
||||
on_commit.into_iter().for_each(|f| f());
|
||||
Ok(value)
|
||||
}
|
||||
(Err(TxError::Abort(())), Some(Err(TxError::Abort(e)))) => {
|
||||
// Transaction was aborted by user code
|
||||
// The abort error value is stored in fn_res
|
||||
Err(TxError::Abort(e))
|
||||
}
|
||||
(Err(TxError::Db(_tx_e)), Some(Err(TxError::Db(fn_e)))) => {
|
||||
// Transaction encountered a DB error in user code
|
||||
// The error value encountered is the one in fn_res,
|
||||
// tx_res contains only a dummy error message
|
||||
Err(TxError::Db(fn_e))
|
||||
}
|
||||
(Err(TxError::Db(tx_e)), None) => {
|
||||
// Transaction encounterred a DB error when initializing the transaction,
|
||||
// before user code was called
|
||||
Err(TxError::Db(tx_e))
|
||||
}
|
||||
(Err(TxError::Db(tx_e)), Some(Ok(_))) => {
|
||||
// Transaction encounterred a DB error when commiting the transaction,
|
||||
// after user code was called
|
||||
Err(TxError::Db(tx_e))
|
||||
}
|
||||
(tx_res, fn_res) => {
|
||||
panic!(
|
||||
"unexpected error case: tx_res={:?}, fn_res={:?}",
|
||||
tx_res.map(|_| "..."),
|
||||
fn_res.map(|x| x.map(|_| "...").map_err(|_| "..."))
|
||||
);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
},
|
||||
Err(TxError::Abort(())) => match ret {
|
||||
Err(TxError::Abort(e)) => Err(TxError::Abort(e)),
|
||||
_ => unreachable!(),
|
||||
},
|
||||
Err(TxError::Db(e2)) => match ret {
|
||||
// Ok was stored -> the error occurred when finalizing
|
||||
// transaction
|
||||
Ok(_) => Err(TxError::Db(e2)),
|
||||
// An error was already stored: that's the one we want to
|
||||
// return
|
||||
Err(TxError::Db(e)) => Err(TxError::Db(e)),
|
||||
_ => unreachable!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -166,7 +152,7 @@ impl Db {
|
|||
let tree_names = other.list_trees()?;
|
||||
for name in tree_names {
|
||||
let tree = self.open_tree(&name)?;
|
||||
if !tree.is_empty()? {
|
||||
if tree.len()? > 0 {
|
||||
return Err(Error(format!("tree {} already contains data", name).into()));
|
||||
}
|
||||
|
||||
|
|
@ -208,12 +194,8 @@ impl Tree {
|
|||
self.0.get(self.1, key.as_ref())
|
||||
}
|
||||
#[inline]
|
||||
pub fn approximate_len(&self) -> Result<usize> {
|
||||
self.0.approximate_len(self.1)
|
||||
}
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> Result<bool> {
|
||||
self.0.is_empty(self.1)
|
||||
pub fn len(&self) -> Result<usize> {
|
||||
self.0.len(self.1)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
|
@ -351,8 +333,7 @@ pub(crate) trait IDb: Send + Sync {
|
|||
fn snapshot(&self, path: &PathBuf) -> Result<()>;
|
||||
|
||||
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
||||
fn approximate_len(&self, tree: usize) -> Result<usize>;
|
||||
fn is_empty(&self, tree: usize) -> Result<bool>;
|
||||
fn len(&self, tree: usize) -> Result<usize>;
|
||||
|
||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()>;
|
||||
fn remove(&self, tree: usize, key: &[u8]) -> Result<()>;
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
use core::ops::Bound;
|
||||
use core::ptr::NonNull;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryInto;
|
||||
use std::marker::PhantomPinned;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
|
@ -11,55 +11,12 @@ use heed::types::ByteSlice;
|
|||
use heed::{BytesDecode, Env, RoTxn, RwTxn, UntypedDatabase as Database};
|
||||
|
||||
use crate::{
|
||||
open::{Engine, OpenOpt},
|
||||
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
|
||||
TxResult, TxValueIter, Value, ValueIter,
|
||||
};
|
||||
|
||||
pub use heed;
|
||||
|
||||
// ---- top-level open function
|
||||
|
||||
pub(crate) fn open_db(path: &PathBuf, opt: &OpenOpt) -> Result<Db> {
|
||||
info!("Opening LMDB database at: {}", path.display());
|
||||
if let Err(e) = std::fs::create_dir_all(&path) {
|
||||
return Err(Error(
|
||||
format!("Unable to create LMDB data directory: {}", e).into(),
|
||||
));
|
||||
}
|
||||
|
||||
let map_size = match opt.lmdb_map_size {
|
||||
None => recommended_map_size(),
|
||||
Some(v) => v - (v % 4096),
|
||||
};
|
||||
|
||||
let mut env_builder = heed::EnvOpenOptions::new();
|
||||
env_builder.max_dbs(100);
|
||||
env_builder.map_size(map_size);
|
||||
env_builder.max_readers(2048);
|
||||
unsafe {
|
||||
env_builder.flag(heed::flags::Flags::MdbNoRdAhead);
|
||||
env_builder.flag(heed::flags::Flags::MdbNoMetaSync);
|
||||
if !opt.fsync {
|
||||
env_builder.flag(heed::flags::Flags::MdbNoSync);
|
||||
}
|
||||
}
|
||||
match env_builder.open(&path) {
|
||||
Err(heed::Error::Io(e)) if e.kind() == std::io::ErrorKind::OutOfMemory => {
|
||||
return Err(Error(
|
||||
"OutOfMemory error while trying to open LMDB database. This can happen \
|
||||
if your operating system is not allowing you to use sufficient virtual \
|
||||
memory address space. Please check that no limit is set (ulimit -v). \
|
||||
You may also try to set a smaller `lmdb_map_size` configuration parameter. \
|
||||
On 32-bit machines, you should probably switch to another database engine."
|
||||
.into(),
|
||||
))
|
||||
}
|
||||
Err(e) => Err(Error(format!("Cannot open LMDB database: {}", e).into())),
|
||||
Ok(db) => Ok(LmdbDb::init(db)),
|
||||
}
|
||||
}
|
||||
|
||||
// -- err
|
||||
|
||||
impl From<heed::Error> for Error {
|
||||
|
|
@ -147,9 +104,10 @@ impl IDb for LmdbDb {
|
|||
Ok(ret2)
|
||||
}
|
||||
|
||||
fn snapshot(&self, base_path: &PathBuf) -> Result<()> {
|
||||
std::fs::create_dir_all(base_path)?;
|
||||
let path = Engine::Lmdb.db_path(base_path);
|
||||
fn snapshot(&self, to: &PathBuf) -> Result<()> {
|
||||
std::fs::create_dir_all(to)?;
|
||||
let mut path = to.clone();
|
||||
path.push("data.mdb");
|
||||
self.db
|
||||
.copy_to_path(path, heed::CompactionOption::Enabled)?;
|
||||
Ok(())
|
||||
|
|
@ -168,16 +126,11 @@ impl IDb for LmdbDb {
|
|||
}
|
||||
}
|
||||
|
||||
fn approximate_len(&self, tree: usize) -> Result<usize> {
|
||||
fn len(&self, tree: usize) -> Result<usize> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
Ok(tree.len(&tx)?.try_into().unwrap())
|
||||
}
|
||||
fn is_empty(&self, tree: usize) -> Result<bool> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
Ok(tree.is_empty(&tx)?)
|
||||
}
|
||||
|
||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
|
|
@ -206,15 +159,13 @@ impl IDb for LmdbDb {
|
|||
fn iter(&self, tree: usize) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
// Safety: the cloture does not store its argument anywhere,
|
||||
unsafe { TxAndIterator::make(tx, |tx| Ok(tree.iter(tx)?)) }
|
||||
TxAndIterator::make(tx, |tx| Ok(tree.iter(tx)?))
|
||||
}
|
||||
|
||||
fn iter_rev(&self, tree: usize) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
// Safety: the cloture does not store its argument anywhere,
|
||||
unsafe { TxAndIterator::make(tx, |tx| Ok(tree.rev_iter(tx)?)) }
|
||||
TxAndIterator::make(tx, |tx| Ok(tree.rev_iter(tx)?))
|
||||
}
|
||||
|
||||
fn range<'r>(
|
||||
|
|
@ -225,8 +176,7 @@ impl IDb for LmdbDb {
|
|||
) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
// Safety: the cloture does not store its argument anywhere,
|
||||
unsafe { TxAndIterator::make(tx, |tx| Ok(tree.range(tx, &(low, high))?)) }
|
||||
TxAndIterator::make(tx, |tx| Ok(tree.range(tx, &(low, high))?))
|
||||
}
|
||||
fn range_rev<'r>(
|
||||
&self,
|
||||
|
|
@ -236,8 +186,7 @@ impl IDb for LmdbDb {
|
|||
) -> Result<ValueIter<'_>> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let tx = self.db.read_txn()?;
|
||||
// Safety: the cloture does not store its argument anywhere,
|
||||
unsafe { TxAndIterator::make(tx, |tx| Ok(tree.rev_range(tx, &(low, high))?)) }
|
||||
TxAndIterator::make(tx, |tx| Ok(tree.rev_range(tx, &(low, high))?))
|
||||
}
|
||||
|
||||
// ----
|
||||
|
|
@ -367,41 +316,28 @@ where
|
|||
{
|
||||
tx: RoTxn<'a>,
|
||||
iter: Option<I>,
|
||||
_pin: PhantomPinned,
|
||||
}
|
||||
|
||||
impl<'a, I> TxAndIterator<'a, I>
|
||||
where
|
||||
I: Iterator<Item = IteratorItem<'a>> + 'a,
|
||||
{
|
||||
fn iter(self: Pin<&mut Self>) -> &mut Option<I> {
|
||||
// Safety: iter is not structural
|
||||
unsafe { &mut self.get_unchecked_mut().iter }
|
||||
}
|
||||
|
||||
/// Safety: iterfun must not store its argument anywhere but in its result.
|
||||
unsafe fn make<F>(tx: RoTxn<'a>, iterfun: F) -> Result<ValueIter<'a>>
|
||||
fn make<F>(tx: RoTxn<'a>, iterfun: F) -> Result<ValueIter<'a>>
|
||||
where
|
||||
F: FnOnce(&'a RoTxn<'a>) -> Result<I>,
|
||||
{
|
||||
let res = TxAndIterator {
|
||||
tx,
|
||||
iter: None,
|
||||
_pin: PhantomPinned,
|
||||
};
|
||||
let res = TxAndIterator { tx, iter: None };
|
||||
let mut boxed = Box::pin(res);
|
||||
|
||||
let tx_lifetime_overextended: &'a RoTxn<'a> = {
|
||||
let tx = &boxed.tx;
|
||||
// Safety: Artificially extending the lifetime because
|
||||
// this reference will only be stored and accessed from the
|
||||
// returned ValueIter which guarantees that it is destroyed
|
||||
// before the tx it is pointing to.
|
||||
unsafe { &*&raw const *tx }
|
||||
};
|
||||
let iter = iterfun(&tx_lifetime_overextended)?;
|
||||
// This unsafe allows us to bypass lifetime checks
|
||||
let tx = unsafe { NonNull::from(&boxed.tx).as_ref() };
|
||||
let iter = iterfun(tx)?;
|
||||
|
||||
*boxed.as_mut().iter() = Some(iter);
|
||||
let mut_ref = Pin::as_mut(&mut boxed);
|
||||
// This unsafe allows us to write in a field of the pinned struct
|
||||
unsafe {
|
||||
Pin::get_unchecked_mut(mut_ref).iter = Some(iter);
|
||||
}
|
||||
|
||||
Ok(Box::new(TxAndIteratorPin(boxed)))
|
||||
}
|
||||
|
|
@ -412,10 +348,8 @@ where
|
|||
I: Iterator<Item = IteratorItem<'a>> + 'a,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
// Safety: `new_unchecked` is okay because we know this value is never
|
||||
// used again after being dropped.
|
||||
let this = unsafe { Pin::new_unchecked(self) };
|
||||
drop(this.iter().take());
|
||||
// ensure the iterator is dropped before the RoTxn it references
|
||||
drop(self.iter.take());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -431,12 +365,13 @@ where
|
|||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let mut_ref = Pin::as_mut(&mut self.0);
|
||||
let next = mut_ref.iter().as_mut()?.next()?;
|
||||
let res = match next {
|
||||
Err(e) => Err(e.into()),
|
||||
Ok((k, v)) => Ok((k.to_vec(), v.to_vec())),
|
||||
};
|
||||
Some(res)
|
||||
// This unsafe allows us to mutably access the iterator field
|
||||
let next = unsafe { Pin::get_unchecked_mut(mut_ref).iter.as_mut()?.next() };
|
||||
match next {
|
||||
None => None,
|
||||
Some(Err(e)) => Some(Err(e.into())),
|
||||
Some(Ok((k, v))) => Some(Ok((k.to_vec(), v.to_vec()))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ use crate::{Db, Error, Result};
|
|||
pub enum Engine {
|
||||
Lmdb,
|
||||
Sqlite,
|
||||
Fjall,
|
||||
}
|
||||
|
||||
impl Engine {
|
||||
|
|
@ -20,26 +19,8 @@ impl Engine {
|
|||
match self {
|
||||
Self::Lmdb => "lmdb",
|
||||
Self::Sqlite => "sqlite",
|
||||
Self::Fjall => "fjall",
|
||||
}
|
||||
}
|
||||
|
||||
/// Return engine-specific DB path from base path
|
||||
pub fn db_path(&self, base_path: &PathBuf) -> PathBuf {
|
||||
let mut ret = base_path.clone();
|
||||
match self {
|
||||
Self::Lmdb => {
|
||||
ret.push("db.lmdb");
|
||||
}
|
||||
Self::Sqlite => {
|
||||
ret.push("db.sqlite");
|
||||
}
|
||||
Self::Fjall => {
|
||||
ret.push("db.fjall");
|
||||
}
|
||||
}
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Engine {
|
||||
|
|
@ -55,11 +36,10 @@ impl std::str::FromStr for Engine {
|
|||
match text {
|
||||
"lmdb" | "heed" => Ok(Self::Lmdb),
|
||||
"sqlite" | "sqlite3" | "rusqlite" => Ok(Self::Sqlite),
|
||||
"fjall" => Ok(Self::Fjall),
|
||||
"sled" => Err(Error("Sled is no longer supported as a database engine. Converting your old metadata db can be done using an older Garage binary (e.g. v0.9.4).".into())),
|
||||
kind => Err(Error(
|
||||
format!(
|
||||
"Invalid DB engine: {} (options are: lmdb, sqlite, fjall)",
|
||||
"Invalid DB engine: {} (options are: lmdb, sqlite)",
|
||||
kind
|
||||
)
|
||||
.into(),
|
||||
|
|
@ -71,7 +51,6 @@ impl std::str::FromStr for Engine {
|
|||
pub struct OpenOpt {
|
||||
pub fsync: bool,
|
||||
pub lmdb_map_size: Option<usize>,
|
||||
pub fjall_block_cache_size: Option<usize>,
|
||||
}
|
||||
|
||||
impl Default for OpenOpt {
|
||||
|
|
@ -79,7 +58,6 @@ impl Default for OpenOpt {
|
|||
Self {
|
||||
fsync: false,
|
||||
lmdb_map_size: None,
|
||||
fjall_block_cache_size: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -88,15 +66,53 @@ pub fn open_db(path: &PathBuf, engine: Engine, opt: &OpenOpt) -> Result<Db> {
|
|||
match engine {
|
||||
// ---- Sqlite DB ----
|
||||
#[cfg(feature = "sqlite")]
|
||||
Engine::Sqlite => crate::sqlite_adapter::open_db(path, opt),
|
||||
Engine::Sqlite => {
|
||||
info!("Opening Sqlite database at: {}", path.display());
|
||||
let manager = r2d2_sqlite::SqliteConnectionManager::file(path);
|
||||
Ok(crate::sqlite_adapter::SqliteDb::new(manager, opt.fsync)?)
|
||||
}
|
||||
|
||||
// ---- LMDB DB ----
|
||||
#[cfg(feature = "lmdb")]
|
||||
Engine::Lmdb => crate::lmdb_adapter::open_db(path, opt),
|
||||
Engine::Lmdb => {
|
||||
info!("Opening LMDB database at: {}", path.display());
|
||||
if let Err(e) = std::fs::create_dir_all(&path) {
|
||||
return Err(Error(
|
||||
format!("Unable to create LMDB data directory: {}", e).into(),
|
||||
));
|
||||
}
|
||||
|
||||
// ---- Fjall DB ----
|
||||
#[cfg(feature = "fjall")]
|
||||
Engine::Fjall => crate::fjall_adapter::open_db(path, opt),
|
||||
let map_size = match opt.lmdb_map_size {
|
||||
None => crate::lmdb_adapter::recommended_map_size(),
|
||||
Some(v) => v - (v % 4096),
|
||||
};
|
||||
|
||||
let mut env_builder = heed::EnvOpenOptions::new();
|
||||
env_builder.max_dbs(100);
|
||||
env_builder.map_size(map_size);
|
||||
env_builder.max_readers(2048);
|
||||
unsafe {
|
||||
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoRdAhead);
|
||||
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoMetaSync);
|
||||
if !opt.fsync {
|
||||
env_builder.flag(heed::flags::Flags::MdbNoSync);
|
||||
}
|
||||
}
|
||||
match env_builder.open(&path) {
|
||||
Err(heed::Error::Io(e)) if e.kind() == std::io::ErrorKind::OutOfMemory => {
|
||||
return Err(Error(
|
||||
"OutOfMemory error while trying to open LMDB database. This can happen \
|
||||
if your operating system is not allowing you to use sufficient virtual \
|
||||
memory address space. Please check that no limit is set (ulimit -v). \
|
||||
You may also try to set a smaller `lmdb_map_size` configuration parameter. \
|
||||
On 32-bit machines, you should probably switch to another database engine."
|
||||
.into(),
|
||||
))
|
||||
}
|
||||
Err(e) => Err(Error(format!("Cannot open LMDB database: {}", e).into())),
|
||||
Ok(db) => Ok(crate::lmdb_adapter::LmdbDb::init(db)),
|
||||
}
|
||||
}
|
||||
|
||||
// Pattern is unreachable when all supported DB engines are compiled into binary. The allow
|
||||
// attribute is added so that we won't have to change this match in case stop building
|
||||
|
|
|
|||
|
|
@ -11,23 +11,12 @@ use r2d2_sqlite::SqliteConnectionManager;
|
|||
use rusqlite::{params, Rows, Statement, Transaction};
|
||||
|
||||
use crate::{
|
||||
open::{Engine, OpenOpt},
|
||||
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
|
||||
TxResult, TxValueIter, Value, ValueIter,
|
||||
};
|
||||
|
||||
pub use rusqlite;
|
||||
|
||||
// ---- top-level open function
|
||||
|
||||
pub(crate) fn open_db(path: &PathBuf, opt: &OpenOpt) -> Result<Db> {
|
||||
info!("Opening Sqlite database at: {}", path.display());
|
||||
let manager = r2d2_sqlite::SqliteConnectionManager::file(path);
|
||||
Ok(SqliteDb::new(manager, opt.fsync)?)
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
type Connection = r2d2::PooledConnection<SqliteConnectionManager>;
|
||||
|
||||
// --- err
|
||||
|
|
@ -150,18 +139,17 @@ impl IDb for SqliteDb {
|
|||
Ok(trees)
|
||||
}
|
||||
|
||||
fn snapshot(&self, base_path: &PathBuf) -> Result<()> {
|
||||
std::fs::create_dir_all(base_path)?;
|
||||
let path = Engine::Sqlite
|
||||
.db_path(&base_path)
|
||||
.into_os_string()
|
||||
.into_string()
|
||||
.map_err(|_| Error("invalid sqlite path string".into()))?;
|
||||
|
||||
info!("Start sqlite VACUUM INTO `{}`", path);
|
||||
self.db.get()?.execute("VACUUM INTO ?1", params![path])?;
|
||||
info!("Finished sqlite VACUUM INTO `{}`", path);
|
||||
|
||||
fn snapshot(&self, to: &PathBuf) -> Result<()> {
|
||||
fn progress(p: rusqlite::backup::Progress) {
|
||||
let percent = (p.pagecount - p.remaining) * 100 / p.pagecount;
|
||||
info!("Sqlite snapshot progress: {}%", percent);
|
||||
}
|
||||
std::fs::create_dir_all(to)?;
|
||||
let mut path = to.clone();
|
||||
path.push("db.sqlite");
|
||||
self.db
|
||||
.get()?
|
||||
.backup(rusqlite::DatabaseName::Main, path, Some(progress))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
@ -172,7 +160,7 @@ impl IDb for SqliteDb {
|
|||
self.internal_get(&self.db.get()?, &tree, key)
|
||||
}
|
||||
|
||||
fn approximate_len(&self, tree: usize) -> Result<usize> {
|
||||
fn len(&self, tree: usize) -> Result<usize> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let db = self.db.get()?;
|
||||
|
||||
|
|
@ -184,10 +172,6 @@ impl IDb for SqliteDb {
|
|||
}
|
||||
}
|
||||
|
||||
fn is_empty(&self, tree: usize) -> Result<bool> {
|
||||
Ok(self.approximate_len(tree)? == 0)
|
||||
}
|
||||
|
||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let db = self.db.get()?;
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
use crate::*;
|
||||
|
||||
fn test_suite(db: Db) {
|
||||
let tree = db.open_tree("tree:this_is_a_tree").unwrap();
|
||||
let tree = db.open_tree("tree").unwrap();
|
||||
|
||||
let ka: &[u8] = &b"test"[..];
|
||||
let kb: &[u8] = &b"zwello"[..];
|
||||
|
|
@ -14,7 +14,7 @@ fn test_suite(db: Db) {
|
|||
|
||||
assert!(tree.insert(ka, va).is_ok());
|
||||
assert_eq!(tree.get(ka).unwrap().unwrap(), va);
|
||||
assert_eq!(tree.iter().unwrap().count(), 1);
|
||||
assert_eq!(tree.len().unwrap(), 1);
|
||||
|
||||
// ---- test transaction logic ----
|
||||
|
||||
|
|
@ -148,15 +148,3 @@ fn test_sqlite_db() {
|
|||
let db = SqliteDb::new(manager, false).unwrap();
|
||||
test_suite(db);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "fjall")]
|
||||
fn test_fjall_db() {
|
||||
use crate::fjall_adapter::{fjall, FjallDb};
|
||||
|
||||
let path = mktemp::Temp::new_dir().unwrap();
|
||||
let config = fjall::Config::new(path).temporary(true);
|
||||
let keyspace = config.open_transactional().unwrap();
|
||||
let db = FjallDb::init(keyspace);
|
||||
test_suite(db);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage"
|
||||
version = "1.3.1"
|
||||
version = "1.1.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -57,7 +57,6 @@ opentelemetry.workspace = true
|
|||
opentelemetry-prometheus = { workspace = true, optional = true }
|
||||
opentelemetry-otlp = { workspace = true, optional = true }
|
||||
syslog-tracing = { workspace = true, optional = true }
|
||||
tracing-journald = { workspace = true, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
garage_api_common.workspace = true
|
||||
|
|
@ -91,7 +90,6 @@ k2v = [ "garage_util/k2v", "garage_api_k2v" ]
|
|||
# Database engines
|
||||
lmdb = [ "garage_model/lmdb" ]
|
||||
sqlite = [ "garage_model/sqlite" ]
|
||||
fjall = [ "garage_model/fjall" ]
|
||||
|
||||
# Automatic registration and discovery via Consul API
|
||||
consul-discovery = [ "garage_rpc/consul-discovery" ]
|
||||
|
|
@ -103,8 +101,6 @@ metrics = [ "garage_api_admin/metrics", "opentelemetry-prometheus" ]
|
|||
telemetry-otlp = [ "opentelemetry-otlp" ]
|
||||
# Logging to syslog
|
||||
syslog = [ "syslog-tracing" ]
|
||||
# Logging to journald
|
||||
journald = [ "tracing-journald" ]
|
||||
|
||||
# NOTE: bundled-libs and system-libs should be treat as mutually exclusive;
|
||||
# exactly one of them should be enabled.
|
||||
|
|
|
|||
|
|
@ -101,7 +101,6 @@ impl AdminRpcHandler {
|
|||
let mut obj_dels = 0;
|
||||
let mut mpu_dels = 0;
|
||||
let mut ver_dels = 0;
|
||||
let mut br_dels = 0;
|
||||
|
||||
for hash in blocks {
|
||||
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
||||
|
|
@ -132,19 +131,12 @@ impl AdminRpcHandler {
|
|||
ver_dels += 1;
|
||||
}
|
||||
}
|
||||
if !br.deleted.get() {
|
||||
let mut br = br;
|
||||
br.deleted.set();
|
||||
self.garage.block_ref_table.insert(&br).await?;
|
||||
br_dels += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Purged {} blocks: marked {} block refs, {} versions, {} objects and {} multipart uploads as deleted",
|
||||
"Purged {} blocks, {} versions, {} objects, {} multipart uploads",
|
||||
blocks.len(),
|
||||
br_dels,
|
||||
ver_dels,
|
||||
obj_dels,
|
||||
mpu_dels,
|
||||
|
|
|
|||
|
|
@ -390,9 +390,15 @@ impl AdminRpcHandler {
|
|||
}
|
||||
|
||||
let website = if query.allow {
|
||||
let (redirect_all, routing_rules) = match bucket_state.website_config.get() {
|
||||
Some(wc) => (wc.redirect_all.clone(), wc.routing_rules.clone()),
|
||||
None => (None, Vec::new()),
|
||||
};
|
||||
Some(WebsiteConfig {
|
||||
index_document: query.index_document.clone(),
|
||||
error_document: query.error_document.clone(),
|
||||
redirect_all,
|
||||
routing_rules,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
|
|
|
|||
|
|
@ -219,7 +219,7 @@ impl AdminRpcHandler {
|
|||
|
||||
// Gather block manager statistics
|
||||
writeln!(&mut ret, "\nBlock manager stats:").unwrap();
|
||||
let rc_len = self.garage.block_manager.rc_approximate_len()?.to_string();
|
||||
let rc_len = self.garage.block_manager.rc_len()?.to_string();
|
||||
|
||||
writeln!(
|
||||
&mut ret,
|
||||
|
|
@ -230,13 +230,13 @@ impl AdminRpcHandler {
|
|||
writeln!(
|
||||
&mut ret,
|
||||
" resync queue length: {}",
|
||||
self.garage.block_manager.resync.queue_approximate_len()?
|
||||
self.garage.block_manager.resync.queue_len()?
|
||||
)
|
||||
.unwrap();
|
||||
writeln!(
|
||||
&mut ret,
|
||||
" blocks with resync errors: {}",
|
||||
self.garage.block_manager.resync.errors_approximate_len()?
|
||||
self.garage.block_manager.resync.errors_len()?
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
|
@ -346,21 +346,16 @@ impl AdminRpcHandler {
|
|||
F: TableSchema + 'static,
|
||||
R: TableReplication + 'static,
|
||||
{
|
||||
let data_len = t
|
||||
.data
|
||||
.store
|
||||
.approximate_len()
|
||||
.map_err(GarageError::from)?
|
||||
.to_string();
|
||||
let mkl_len = t.merkle_updater.merkle_tree_approximate_len()?.to_string();
|
||||
let data_len = t.data.store.len().map_err(GarageError::from)?.to_string();
|
||||
let mkl_len = t.merkle_updater.merkle_tree_len()?.to_string();
|
||||
|
||||
Ok(format!(
|
||||
" {}\t{}\t{}\t{}\t{}",
|
||||
F::TABLE_NAME,
|
||||
data_len,
|
||||
mkl_len,
|
||||
t.merkle_updater.todo_approximate_len()?,
|
||||
t.data.gc_todo_approximate_len()?
|
||||
t.merkle_updater.todo_len()?,
|
||||
t.data.gc_todo_len()?
|
||||
))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -466,10 +466,6 @@ pub enum RepairWhat {
|
|||
/// Repair (resync/rebalance) the set of stored blocks in the cluster
|
||||
#[structopt(name = "blocks", version = garage_version())]
|
||||
Blocks,
|
||||
/// Clear the block resync queue. The list of blocks in errored state
|
||||
/// is cleared as well. You MUST run `garage repair blocks` after invoking this.
|
||||
#[structopt(name = "clear-resync-queue", version = garage_version())]
|
||||
ClearResyncQueue,
|
||||
/// Repropagate object deletions to the version table
|
||||
#[structopt(name = "versions", version = garage_version())]
|
||||
Versions,
|
||||
|
|
|
|||
|
|
@ -208,43 +208,6 @@ fn init_logging(opt: &Opt) {
|
|||
}
|
||||
}
|
||||
|
||||
if std::env::var("GARAGE_LOG_TO_JOURNALD")
|
||||
.map(|x| x == "1" || x == "true")
|
||||
.unwrap_or(false)
|
||||
{
|
||||
#[cfg(feature = "journald")]
|
||||
{
|
||||
use tracing_journald::{Priority, PriorityMappings};
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
let registry = tracing_subscriber::registry()
|
||||
.with(tracing_subscriber::fmt::layer().with_writer(std::io::sink))
|
||||
.with(env_filter);
|
||||
match tracing_journald::layer() {
|
||||
Ok(layer) => {
|
||||
registry
|
||||
.with(layer.with_priority_mappings(PriorityMappings {
|
||||
info: Priority::Informational,
|
||||
debug: Priority::Debug,
|
||||
..PriorityMappings::new()
|
||||
}))
|
||||
.init();
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Couldn't connect to journald: {}.", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
#[cfg(not(feature = "journald"))]
|
||||
{
|
||||
eprintln!("Journald support is not enabled in this build.");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_writer(std::io::stderr)
|
||||
.with_env_filter(env_filter)
|
||||
|
|
|
|||
|
|
@ -92,11 +92,6 @@ pub async fn launch_online_repair(
|
|||
info!("Repairing bucket aliases (foreground)");
|
||||
garage.locked_helper().await.repair_aliases().await?;
|
||||
}
|
||||
RepairWhat::ClearResyncQueue => {
|
||||
let garage = garage.clone();
|
||||
tokio::task::spawn_blocking(move || garage.block_manager.resync.clear_resync_queue())
|
||||
.await??
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -183,21 +183,10 @@ fn watch_shutdown_signal() -> watch::Receiver<bool> {
|
|||
let mut sigterm =
|
||||
signal(SignalKind::terminate()).expect("Failed to install SIGTERM handler");
|
||||
let mut sighup = signal(SignalKind::hangup()).expect("Failed to install SIGHUP handler");
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = sigint.recv() => {
|
||||
info!("Received SIGINT, shutting down.");
|
||||
break
|
||||
}
|
||||
_ = sigterm.recv() => {
|
||||
info!("Received SIGTERM, shutting down.");
|
||||
break
|
||||
}
|
||||
_ = sighup.recv() => {
|
||||
info!("Received SIGHUP, reload not supported.");
|
||||
continue
|
||||
}
|
||||
}
|
||||
_ = sigint.recv() => info!("Received SIGINT, shutting down."),
|
||||
_ = sigterm.recv() => info!("Received SIGTERM, shutting down."),
|
||||
_ = sighup.recv() => info!("Received SIGHUP, shutting down."),
|
||||
}
|
||||
send_cancel.send(true).unwrap();
|
||||
});
|
||||
|
|
|
|||
|
|
@ -198,7 +198,6 @@ async fn test_precondition() {
|
|||
);
|
||||
}
|
||||
let older_date = DateTime::from_secs_f64(last_modified.as_secs_f64() - 10.0);
|
||||
let same_date = DateTime::from_secs_f64(last_modified.as_secs_f64());
|
||||
let newer_date = DateTime::from_secs_f64(last_modified.as_secs_f64() + 10.0);
|
||||
{
|
||||
let err = ctx
|
||||
|
|
@ -213,18 +212,6 @@ async fn test_precondition() {
|
|||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
|
||||
);
|
||||
|
||||
let err = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_modified_since(same_date)
|
||||
.send()
|
||||
.await;
|
||||
assert!(
|
||||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
|
||||
);
|
||||
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
|
|
@ -249,17 +236,6 @@ async fn test_precondition() {
|
|||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 412)
|
||||
);
|
||||
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_unmodified_since(same_date)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
|
||||
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
|
|
|
|||
|
|
@ -5,7 +5,10 @@ use crate::json_body;
|
|||
use assert_json_diff::assert_json_eq;
|
||||
use aws_sdk_s3::{
|
||||
primitives::ByteStream,
|
||||
types::{CorsConfiguration, CorsRule, ErrorDocument, IndexDocument, WebsiteConfiguration},
|
||||
types::{
|
||||
Condition, CorsConfiguration, CorsRule, ErrorDocument, IndexDocument, Protocol, Redirect,
|
||||
RoutingRule, WebsiteConfiguration,
|
||||
},
|
||||
};
|
||||
use http::{Request, StatusCode};
|
||||
use http_body_util::BodyExt;
|
||||
|
|
@ -534,6 +537,491 @@ async fn test_website_check_domain() {
|
|||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_website_redirect_full_bucket() {
|
||||
const BCKT_NAME: &str = "my-redirect-full";
|
||||
let ctx = common::context();
|
||||
let bucket = ctx.create_bucket(BCKT_NAME);
|
||||
|
||||
let conf = WebsiteConfiguration::builder()
|
||||
.routing_rules(
|
||||
RoutingRule::builder()
|
||||
.condition(Condition::builder().key_prefix_equals("").build())
|
||||
.redirect(
|
||||
Redirect::builder()
|
||||
.protocol(Protocol::Https)
|
||||
.host_name("other.tld")
|
||||
.replace_key_prefix_with("")
|
||||
.build(),
|
||||
)
|
||||
.build(),
|
||||
)
|
||||
.build();
|
||||
|
||||
ctx.client
|
||||
.put_bucket_website()
|
||||
.bucket(&bucket)
|
||||
.website_configuration(conf)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
{
|
||||
let req = Request::builder()
|
||||
.method("GET")
|
||||
.uri(format!("http://127.0.0.1:{}/my-path", ctx.garage.web_port))
|
||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||
.body(Body::new(Bytes::new()))
|
||||
.unwrap();
|
||||
|
||||
let client = Client::builder(TokioExecutor::new()).build_http();
|
||||
let resp = client.request(req).await.unwrap();
|
||||
assert_eq!(resp.status(), StatusCode::FOUND);
|
||||
assert_eq!(
|
||||
resp.headers()
|
||||
.get(hyper::header::LOCATION)
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap(),
|
||||
"https://other.tld/my-path"
|
||||
);
|
||||
}
|
||||
|
||||
{
|
||||
let req = Request::builder()
|
||||
.method("GET")
|
||||
.uri(format!("http://127.0.0.1:{}/my-path/", ctx.garage.web_port))
|
||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||
.body(Body::new(Bytes::new()))
|
||||
.unwrap();
|
||||
|
||||
let client = Client::builder(TokioExecutor::new()).build_http();
|
||||
let resp = client.request(req).await.unwrap();
|
||||
assert_eq!(resp.status(), StatusCode::FOUND);
|
||||
assert_eq!(
|
||||
resp.headers()
|
||||
.get(hyper::header::LOCATION)
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap(),
|
||||
"https://other.tld/my-path/"
|
||||
);
|
||||
}
|
||||
|
||||
{
|
||||
let req = Request::builder()
|
||||
.method("GET")
|
||||
.uri(format!("http://127.0.0.1:{}/", ctx.garage.web_port))
|
||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||
.body(Body::new(Bytes::new()))
|
||||
.unwrap();
|
||||
|
||||
let client = Client::builder(TokioExecutor::new()).build_http();
|
||||
let resp = client.request(req).await.unwrap();
|
||||
assert_eq!(resp.status(), StatusCode::FOUND);
|
||||
assert_eq!(
|
||||
resp.headers()
|
||||
.get(hyper::header::LOCATION)
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap(),
|
||||
"https://other.tld/"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_website_redirect() {
|
||||
const BCKT_NAME: &str = "my-redirect";
|
||||
let ctx = common::context();
|
||||
let bucket = ctx.create_bucket(BCKT_NAME);
|
||||
|
||||
ctx.client
|
||||
.put_object()
|
||||
.bucket(&bucket)
|
||||
.key("index.html")
|
||||
.body(ByteStream::from_static(b"index"))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
ctx.client
|
||||
.put_object()
|
||||
.bucket(&bucket)
|
||||
.key("404.html")
|
||||
.body(ByteStream::from_static(b"main 404"))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
ctx.client
|
||||
.put_object()
|
||||
.bucket(&bucket)
|
||||
.key("static-file")
|
||||
.body(ByteStream::from_static(b"static file"))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut conf = WebsiteConfiguration::builder()
|
||||
.index_document(
|
||||
IndexDocument::builder()
|
||||
.suffix("home.html")
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.error_document(ErrorDocument::builder().key("404.html").build().unwrap());
|
||||
|
||||
for (prefix, condition) in [("unconditional", false), ("conditional", true)] {
|
||||
let code = condition.then(|| "404".to_string());
|
||||
conf = conf
|
||||
// simple redirect
|
||||
.routing_rules(
|
||||
RoutingRule::builder()
|
||||
.condition(
|
||||
Condition::builder()
|
||||
.set_http_error_code_returned_equals(code.clone())
|
||||
.key_prefix_equals(format!("{prefix}/redirect-prefix/"))
|
||||
.build(),
|
||||
)
|
||||
.redirect(
|
||||
Redirect::builder()
|
||||
.http_redirect_code("302")
|
||||
.replace_key_prefix_with("other-prefix/")
|
||||
.build(),
|
||||
)
|
||||
.build(),
|
||||
)
|
||||
.routing_rules(
|
||||
RoutingRule::builder()
|
||||
.condition(
|
||||
Condition::builder()
|
||||
.set_http_error_code_returned_equals(code.clone())
|
||||
.key_prefix_equals(format!("{prefix}/redirect-prefix-307/"))
|
||||
.build(),
|
||||
)
|
||||
.redirect(
|
||||
Redirect::builder()
|
||||
.http_redirect_code("307")
|
||||
.replace_key_prefix_with("other-prefix/")
|
||||
.build(),
|
||||
)
|
||||
.build(),
|
||||
)
|
||||
// simple redirect
|
||||
.routing_rules(
|
||||
RoutingRule::builder()
|
||||
.condition(
|
||||
Condition::builder()
|
||||
.set_http_error_code_returned_equals(code.clone())
|
||||
.key_prefix_equals(format!("{prefix}/redirect-fixed/"))
|
||||
.build(),
|
||||
)
|
||||
.redirect(
|
||||
Redirect::builder()
|
||||
.http_redirect_code("302")
|
||||
.replace_key_with("fixed_key")
|
||||
.build(),
|
||||
)
|
||||
.build(),
|
||||
)
|
||||
// stream other file
|
||||
.routing_rules(
|
||||
RoutingRule::builder()
|
||||
.condition(
|
||||
Condition::builder()
|
||||
.set_http_error_code_returned_equals(code.clone())
|
||||
.key_prefix_equals(format!("{prefix}/stream-fixed/"))
|
||||
.build(),
|
||||
)
|
||||
.redirect(
|
||||
Redirect::builder()
|
||||
.http_redirect_code("200")
|
||||
.replace_key_with("static-file")
|
||||
.build(),
|
||||
)
|
||||
.build(),
|
||||
)
|
||||
// stream other file as error
|
||||
.routing_rules(
|
||||
RoutingRule::builder()
|
||||
.condition(
|
||||
Condition::builder()
|
||||
.set_http_error_code_returned_equals(code.clone())
|
||||
.key_prefix_equals(format!("{prefix}/stream-404/"))
|
||||
.build(),
|
||||
)
|
||||
.redirect(
|
||||
Redirect::builder()
|
||||
.http_redirect_code("404")
|
||||
.replace_key_with("static-file")
|
||||
.build(),
|
||||
)
|
||||
.build(),
|
||||
)
|
||||
// fail to stream other file
|
||||
.routing_rules(
|
||||
RoutingRule::builder()
|
||||
.condition(
|
||||
Condition::builder()
|
||||
.set_http_error_code_returned_equals(code.clone())
|
||||
.key_prefix_equals(format!("{prefix}/stream-missing/"))
|
||||
.build(),
|
||||
)
|
||||
.redirect(
|
||||
Redirect::builder()
|
||||
.http_redirect_code("200")
|
||||
.replace_key_with("missing-file")
|
||||
.build(),
|
||||
)
|
||||
.build(),
|
||||
);
|
||||
}
|
||||
let conf = conf.build();
|
||||
|
||||
ctx.client
|
||||
.put_bucket_website()
|
||||
.bucket(&bucket)
|
||||
.website_configuration(conf.clone())
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_cfg = ctx
|
||||
.client
|
||||
.get_bucket_website()
|
||||
.bucket(&bucket)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(stored_cfg.index_document, conf.index_document);
|
||||
assert_eq!(stored_cfg.error_document, conf.error_document);
|
||||
assert_eq!(stored_cfg.routing_rules, conf.routing_rules);
|
||||
|
||||
let req = |path| {
|
||||
Request::builder()
|
||||
.method("GET")
|
||||
.uri(format!(
|
||||
"http://127.0.0.1:{}/{}/path",
|
||||
ctx.garage.web_port, path
|
||||
))
|
||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||
.body(Body::new(Bytes::new()))
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
test_redirect_helper("unconditional", true, &req).await;
|
||||
test_redirect_helper("conditional", true, &req).await;
|
||||
for prefix in ["unconditional", "conditional"] {
|
||||
for rule_path in [
|
||||
"redirect-prefix",
|
||||
"redirect-prefix-307",
|
||||
"redirect-fixed",
|
||||
"stream-fixed",
|
||||
"stream-404",
|
||||
"stream-missing",
|
||||
] {
|
||||
ctx.client
|
||||
.put_object()
|
||||
.bucket(&bucket)
|
||||
.key(format!("{prefix}/{rule_path}/path"))
|
||||
.body(ByteStream::from_static(b"i exist"))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
test_redirect_helper("unconditional", true, &req).await;
|
||||
test_redirect_helper("conditional", false, &req).await;
|
||||
}
|
||||
|
||||
async fn test_redirect_helper(
|
||||
prefix: &str,
|
||||
should_see_redirect: bool,
|
||||
req: impl Fn(String) -> Request<http_body_util::Full<Bytes>>,
|
||||
) {
|
||||
use http::header;
|
||||
let client = Client::builder(TokioExecutor::new()).build_http();
|
||||
let expected_body = b"i exist".as_ref();
|
||||
|
||||
let resp = client
|
||||
.request(req(format!("{prefix}/redirect-prefix")))
|
||||
.await
|
||||
.unwrap();
|
||||
if should_see_redirect {
|
||||
assert_eq!(resp.status(), StatusCode::FOUND);
|
||||
assert_eq!(
|
||||
resp.headers()
|
||||
.get(header::LOCATION)
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap(),
|
||||
"/other-prefix/path"
|
||||
);
|
||||
assert!(resp
|
||||
.into_body()
|
||||
.collect()
|
||||
.await
|
||||
.unwrap()
|
||||
.to_bytes()
|
||||
.is_empty());
|
||||
} else {
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
expected_body,
|
||||
);
|
||||
}
|
||||
|
||||
let resp = client
|
||||
.request(req(format!("{prefix}/redirect-prefix-307")))
|
||||
.await
|
||||
.unwrap();
|
||||
if should_see_redirect {
|
||||
assert_eq!(resp.status(), StatusCode::TEMPORARY_REDIRECT);
|
||||
assert_eq!(
|
||||
resp.headers()
|
||||
.get(header::LOCATION)
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap(),
|
||||
"/other-prefix/path"
|
||||
);
|
||||
assert!(resp
|
||||
.into_body()
|
||||
.collect()
|
||||
.await
|
||||
.unwrap()
|
||||
.to_bytes()
|
||||
.is_empty());
|
||||
} else {
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
expected_body,
|
||||
);
|
||||
}
|
||||
|
||||
let resp = client
|
||||
.request(req(format!("{prefix}/redirect-fixed")))
|
||||
.await
|
||||
.unwrap();
|
||||
if should_see_redirect {
|
||||
assert_eq!(resp.status(), StatusCode::FOUND);
|
||||
assert_eq!(
|
||||
resp.headers()
|
||||
.get(header::LOCATION)
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap(),
|
||||
"/fixed_key"
|
||||
);
|
||||
assert!(resp
|
||||
.into_body()
|
||||
.collect()
|
||||
.await
|
||||
.unwrap()
|
||||
.to_bytes()
|
||||
.is_empty());
|
||||
} else {
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
expected_body,
|
||||
);
|
||||
}
|
||||
let resp = client
|
||||
.request(req(format!("{prefix}/stream-fixed")))
|
||||
.await
|
||||
.unwrap();
|
||||
if should_see_redirect {
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
b"static file".as_ref(),
|
||||
);
|
||||
} else {
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
expected_body,
|
||||
);
|
||||
}
|
||||
let resp = client
|
||||
.request(req(format!("{prefix}/stream-404")))
|
||||
.await
|
||||
.unwrap();
|
||||
if should_see_redirect {
|
||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
b"static file".as_ref(),
|
||||
);
|
||||
} else {
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
expected_body,
|
||||
);
|
||||
}
|
||||
let resp = client
|
||||
.request(req(format!("{prefix}/stream-404")))
|
||||
.await
|
||||
.unwrap();
|
||||
if should_see_redirect {
|
||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
b"static file".as_ref(),
|
||||
);
|
||||
} else {
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
expected_body,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_website_invalid_redirect() {
|
||||
const BCKT_NAME: &str = "my-invalid-redirect";
|
||||
let ctx = common::context();
|
||||
let bucket = ctx.create_bucket(BCKT_NAME);
|
||||
|
||||
let conf = WebsiteConfiguration::builder()
|
||||
.routing_rules(
|
||||
RoutingRule::builder()
|
||||
.condition(Condition::builder().key_prefix_equals("").build())
|
||||
.redirect(
|
||||
Redirect::builder()
|
||||
.protocol(Protocol::Https)
|
||||
.host_name("other.tld")
|
||||
.replace_key_prefix_with("")
|
||||
// we don't allow 200 with hostname
|
||||
.http_redirect_code("200")
|
||||
.build(),
|
||||
)
|
||||
.build(),
|
||||
)
|
||||
.build();
|
||||
|
||||
ctx.client
|
||||
.put_bucket_website()
|
||||
.bucket(&bucket)
|
||||
.website_configuration(conf)
|
||||
.send()
|
||||
.await
|
||||
.unwrap_err();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_website_puny() {
|
||||
const BCKT_NAME: &str = "xn--pda.eu";
|
||||
|
|
@ -606,45 +1094,3 @@ async fn test_website_puny() {
|
|||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_website_object_not_found() {
|
||||
const BCKT_NAME: &str = "not-found";
|
||||
let ctx = common::context();
|
||||
let _bucket = ctx.create_bucket(BCKT_NAME);
|
||||
|
||||
let client = Client::builder(TokioExecutor::new()).build_http();
|
||||
|
||||
let req = |suffix| {
|
||||
Request::builder()
|
||||
.method("GET")
|
||||
.uri(format!("http://127.0.0.1:{}/", ctx.garage.web_port))
|
||||
.header("Host", format!("{}{}", BCKT_NAME, suffix))
|
||||
.body(Body::new(Bytes::new()))
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
ctx.garage
|
||||
.command()
|
||||
.args(["bucket", "website", "--allow", BCKT_NAME])
|
||||
.quiet()
|
||||
.expect_success_status("Could not allow website on bucket");
|
||||
|
||||
let resp = client.request(req("")).await.unwrap();
|
||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||
// the error we return by default are *not* xml
|
||||
assert_eq!(
|
||||
resp.headers().get(http::header::CONTENT_TYPE).unwrap(),
|
||||
"text/html; charset=utf-8"
|
||||
);
|
||||
let result = String::from_utf8(
|
||||
resp.into_body()
|
||||
.collect()
|
||||
.await
|
||||
.unwrap()
|
||||
.to_bytes()
|
||||
.to_vec(),
|
||||
)
|
||||
.unwrap();
|
||||
assert!(result.contains("not found"));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -72,16 +72,6 @@ impl K2vClient {
|
|||
.enable_http2()
|
||||
.build();
|
||||
let client = HttpClient::builder(TokioExecutor::new()).build(connector);
|
||||
Self::new_with_client(config, client)
|
||||
}
|
||||
|
||||
/// Create a new K2V client with an external client.
|
||||
/// Useful for example if you plan on creating many clients but you want to mutualize the
|
||||
/// underlying thread pools & co.
|
||||
pub fn new_with_client(
|
||||
config: K2vClientConfig,
|
||||
client: HttpClient<HttpsConnector<HttpConnector>, Body>,
|
||||
) -> Result<Self, Error> {
|
||||
let user_agent: std::borrow::Cow<str> = match &config.user_agent {
|
||||
Some(ua) => ua.into(),
|
||||
None => format!("k2v/{}", env!("CARGO_PKG_VERSION")).into(),
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_model"
|
||||
version = "1.3.1"
|
||||
version = "1.1.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -24,7 +24,7 @@ garage_net.workspace = true
|
|||
async-trait.workspace = true
|
||||
blake2.workspace = true
|
||||
chrono.workspace = true
|
||||
thiserror.workspace = true
|
||||
err-derive.workspace = true
|
||||
hex.workspace = true
|
||||
http.workspace = true
|
||||
base64.workspace = true
|
||||
|
|
@ -44,4 +44,3 @@ default = [ "lmdb", "sqlite" ]
|
|||
k2v = [ "garage_util/k2v" ]
|
||||
lmdb = [ "garage_db/lmdb" ]
|
||||
sqlite = [ "garage_db/sqlite" ]
|
||||
fjall = [ "garage_db/fjall" ]
|
||||
|
|
|
|||
|
|
@ -119,7 +119,122 @@ mod v08 {
|
|||
impl garage_util::migrate::InitialFormat for Bucket {}
|
||||
}
|
||||
|
||||
pub use v08::*;
|
||||
mod v2 {
|
||||
use crate::permission::BucketKeyPerm;
|
||||
use garage_util::crdt;
|
||||
use garage_util::data::Uuid;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::v08;
|
||||
|
||||
pub use v08::{BucketQuotas, CorsRule, LifecycleExpiration, LifecycleFilter, LifecycleRule};
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Bucket {
|
||||
/// ID of the bucket
|
||||
pub id: Uuid,
|
||||
/// State, and configuration if not deleted, of the bucket
|
||||
pub state: crdt::Deletable<BucketParams>,
|
||||
}
|
||||
|
||||
/// Configuration for a bucket
|
||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct BucketParams {
|
||||
/// Bucket's creation date
|
||||
pub creation_date: u64,
|
||||
/// Map of key with access to the bucket, and what kind of access they give
|
||||
pub authorized_keys: crdt::Map<String, BucketKeyPerm>,
|
||||
|
||||
/// Map of aliases that are or have been given to this bucket
|
||||
/// in the global namespace
|
||||
/// (not authoritative: this is just used as an indication to
|
||||
/// map back to aliases when doing ListBuckets)
|
||||
pub aliases: crdt::LwwMap<String, bool>,
|
||||
/// Map of aliases that are or have been given to this bucket
|
||||
/// in namespaces local to keys
|
||||
/// key = (access key id, alias name)
|
||||
pub local_aliases: crdt::LwwMap<(String, String), bool>,
|
||||
|
||||
/// Whether this bucket is allowed for website access
|
||||
/// (under all of its global alias names),
|
||||
/// and if so, the website configuration XML document
|
||||
pub website_config: crdt::Lww<Option<WebsiteConfig>>,
|
||||
/// CORS rules
|
||||
pub cors_config: crdt::Lww<Option<Vec<CorsRule>>>,
|
||||
/// Lifecycle configuration
|
||||
pub lifecycle_config: crdt::Lww<Option<Vec<LifecycleRule>>>,
|
||||
/// Bucket quotas
|
||||
pub quotas: crdt::Lww<BucketQuotas>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct WebsiteConfig {
|
||||
pub index_document: String,
|
||||
pub error_document: Option<String>,
|
||||
// this field is currently unused, but present so adding it in the future doesn't
|
||||
// need a new migration
|
||||
pub redirect_all: Option<RedirectAll>,
|
||||
pub routing_rules: Vec<RoutingRule>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct RedirectAll {
|
||||
pub hostname: String,
|
||||
pub protocol: String,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct RoutingRule {
|
||||
pub condition: Option<RedirectCondition>,
|
||||
pub redirect: Redirect,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct RedirectCondition {
|
||||
pub http_error_code: Option<u16>,
|
||||
pub prefix: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Redirect {
|
||||
pub hostname: Option<String>,
|
||||
pub http_redirect_code: u16,
|
||||
pub protocol: Option<String>,
|
||||
pub replace_key_prefix: Option<String>,
|
||||
pub replace_key: Option<String>,
|
||||
}
|
||||
|
||||
impl garage_util::migrate::Migrate for Bucket {
|
||||
const VERSION_MARKER: &'static [u8] = b"G2bkt";
|
||||
|
||||
type Previous = v08::Bucket;
|
||||
|
||||
fn migrate(old: v08::Bucket) -> Bucket {
|
||||
Bucket {
|
||||
id: old.id,
|
||||
state: old.state.map(|x| BucketParams {
|
||||
creation_date: x.creation_date,
|
||||
authorized_keys: x.authorized_keys,
|
||||
aliases: x.aliases,
|
||||
local_aliases: x.local_aliases,
|
||||
website_config: x.website_config.map(|wc_opt| {
|
||||
wc_opt.map(|wc| WebsiteConfig {
|
||||
index_document: wc.index_document,
|
||||
error_document: wc.error_document,
|
||||
redirect_all: None,
|
||||
routing_rules: vec![],
|
||||
})
|
||||
}),
|
||||
cors_config: x.cors_config,
|
||||
lifecycle_config: x.lifecycle_config,
|
||||
quotas: x.quotas,
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub use v2::*;
|
||||
|
||||
impl AutoCrdt for BucketQuotas {
|
||||
const WARN_IF_DIFFERENT: bool = true;
|
||||
|
|
|
|||
|
|
@ -116,17 +116,21 @@ impl Garage {
|
|||
info!("Opening database...");
|
||||
let db_engine = db::Engine::from_str(&config.db_engine)
|
||||
.ok_or_message("Invalid `db_engine` value in configuration file")?;
|
||||
let db_path = db_engine.db_path(&config.metadata_dir);
|
||||
let mut db_path = config.metadata_dir.clone();
|
||||
match db_engine {
|
||||
db::Engine::Sqlite => {
|
||||
db_path.push("db.sqlite");
|
||||
}
|
||||
db::Engine::Lmdb => {
|
||||
db_path.push("db.lmdb");
|
||||
}
|
||||
}
|
||||
let db_opt = db::OpenOpt {
|
||||
fsync: config.metadata_fsync,
|
||||
lmdb_map_size: match config.lmdb_map_size {
|
||||
v if v == usize::default() => None,
|
||||
v => Some(v),
|
||||
},
|
||||
fjall_block_cache_size: match config.fjall_block_cache_size {
|
||||
v if v == usize::default() => None,
|
||||
v => Some(v),
|
||||
},
|
||||
};
|
||||
let db = db::open_db(&db_path, db_engine, &db_opt)
|
||||
.ok_or_message("Unable to open metadata db")?;
|
||||
|
|
@ -315,15 +319,15 @@ impl Garage {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper<'_> {
|
||||
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
|
||||
helper::bucket::BucketHelper(self)
|
||||
}
|
||||
|
||||
pub fn key_helper(&self) -> helper::key::KeyHelper<'_> {
|
||||
pub fn key_helper(&self) -> helper::key::KeyHelper {
|
||||
helper::key::KeyHelper(self)
|
||||
}
|
||||
|
||||
pub async fn locked_helper(&self) -> helper::locked::LockedHelper<'_> {
|
||||
pub async fn locked_helper(&self) -> helper::locked::LockedHelper {
|
||||
let lock = self.bucket_lock.lock().await;
|
||||
helper::locked::LockedHelper(self, Some(lock))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,24 +1,24 @@
|
|||
use err_derive::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
|
||||
use garage_util::error::Error as GarageError;
|
||||
|
||||
#[derive(Debug, Error, Serialize, Deserialize)]
|
||||
pub enum Error {
|
||||
#[error("Internal error: {0}")]
|
||||
Internal(#[from] GarageError),
|
||||
#[error(display = "Internal error: {}", _0)]
|
||||
Internal(#[error(source)] GarageError),
|
||||
|
||||
#[error("Bad request: {0}")]
|
||||
#[error(display = "Bad request: {}", _0)]
|
||||
BadRequest(String),
|
||||
|
||||
/// Bucket name is not valid according to AWS S3 specs
|
||||
#[error("Invalid bucket name: {0}")]
|
||||
#[error(display = "Invalid bucket name: {}", _0)]
|
||||
InvalidBucketName(String),
|
||||
|
||||
#[error("Access key not found: {0}")]
|
||||
#[error(display = "Access key not found: {}", _0)]
|
||||
NoSuchAccessKey(String),
|
||||
|
||||
#[error("Bucket not found: {0}")]
|
||||
#[error(display = "Bucket not found: {}", _0)]
|
||||
NoSuchBucket(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -121,13 +121,13 @@ impl Worker for LifecycleWorker {
|
|||
mpu_aborted,
|
||||
..
|
||||
} => {
|
||||
let n_objects = self.garage.object_table.data.store.approximate_len().ok();
|
||||
let n_objects = self.garage.object_table.data.store.len().ok();
|
||||
let progress = match n_objects {
|
||||
Some(total) if total > 0 => format!(
|
||||
None => "...".to_string(),
|
||||
Some(total) => format!(
|
||||
"~{:.2}%",
|
||||
100. * std::cmp::min(*counter, total) as f32 / total as f32
|
||||
),
|
||||
_ => "...".to_string(),
|
||||
};
|
||||
WorkerStatus {
|
||||
progress: Some(progress),
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_net"
|
||||
version = "1.3.1"
|
||||
version = "1.1.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -30,7 +30,7 @@ rand.workspace = true
|
|||
|
||||
log.workspace = true
|
||||
arc-swap.workspace = true
|
||||
thiserror.workspace = true
|
||||
err-derive.workspace = true
|
||||
bytes.workspace = true
|
||||
cfg-if.workspace = true
|
||||
|
||||
|
|
|
|||
|
|
@ -159,7 +159,7 @@ where
|
|||
pub(crate) type DynEndpoint = Box<dyn GenericEndpoint + Send + Sync>;
|
||||
|
||||
pub(crate) trait GenericEndpoint {
|
||||
fn handle(&self, req_enc: ReqEnc, from: NodeID) -> BoxFuture<'_, Result<RespEnc, Error>>;
|
||||
fn handle(&self, req_enc: ReqEnc, from: NodeID) -> BoxFuture<Result<RespEnc, Error>>;
|
||||
fn drop_handler(&self);
|
||||
fn clone_endpoint(&self) -> DynEndpoint;
|
||||
}
|
||||
|
|
@ -175,7 +175,7 @@ where
|
|||
M: Message,
|
||||
H: StreamingEndpointHandler<M> + 'static,
|
||||
{
|
||||
fn handle(&self, req_enc: ReqEnc, from: NodeID) -> BoxFuture<'_, Result<RespEnc, Error>> {
|
||||
fn handle(&self, req_enc: ReqEnc, from: NodeID) -> BoxFuture<Result<RespEnc, Error>> {
|
||||
async move {
|
||||
match self.0.handler.load_full() {
|
||||
None => Err(Error::NoHandler),
|
||||
|
|
|
|||
|
|
@ -1,49 +1,49 @@
|
|||
use std::io;
|
||||
|
||||
use err_derive::Error;
|
||||
use log::error;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] io::Error),
|
||||
#[error(display = "IO error: {}", _0)]
|
||||
Io(#[error(source)] io::Error),
|
||||
|
||||
#[error("Messagepack encode error: {0}")]
|
||||
RMPEncode(#[from] rmp_serde::encode::Error),
|
||||
#[error("Messagepack decode error: {0}")]
|
||||
RMPDecode(#[from] rmp_serde::decode::Error),
|
||||
#[error(display = "Messagepack encode error: {}", _0)]
|
||||
RMPEncode(#[error(source)] rmp_serde::encode::Error),
|
||||
#[error(display = "Messagepack decode error: {}", _0)]
|
||||
RMPDecode(#[error(source)] rmp_serde::decode::Error),
|
||||
|
||||
#[error("Tokio join error: {0}")]
|
||||
TokioJoin(#[from] tokio::task::JoinError),
|
||||
#[error(display = "Tokio join error: {}", _0)]
|
||||
TokioJoin(#[error(source)] tokio::task::JoinError),
|
||||
|
||||
#[error("oneshot receive error: {0}")]
|
||||
OneshotRecv(#[from] tokio::sync::oneshot::error::RecvError),
|
||||
#[error(display = "oneshot receive error: {}", _0)]
|
||||
OneshotRecv(#[error(source)] tokio::sync::oneshot::error::RecvError),
|
||||
|
||||
#[error("Handshake error: {0}")]
|
||||
Handshake(#[from] kuska_handshake::async_std::Error),
|
||||
#[error(display = "Handshake error: {}", _0)]
|
||||
Handshake(#[error(source)] kuska_handshake::async_std::Error),
|
||||
|
||||
#[error("UTF8 error: {0}")]
|
||||
UTF8(#[from] std::string::FromUtf8Error),
|
||||
#[error(display = "UTF8 error: {}", _0)]
|
||||
UTF8(#[error(source)] std::string::FromUtf8Error),
|
||||
|
||||
#[error("Framing protocol error")]
|
||||
#[error(display = "Framing protocol error")]
|
||||
Framing,
|
||||
|
||||
#[error("Remote error ({0:?}): {1}")]
|
||||
#[error(display = "Remote error ({:?}): {}", _0, _1)]
|
||||
Remote(io::ErrorKind, String),
|
||||
|
||||
#[error("Request ID collision")]
|
||||
#[error(display = "Request ID collision")]
|
||||
IdCollision,
|
||||
|
||||
#[error("{0}")]
|
||||
#[error(display = "{}", _0)]
|
||||
Message(String),
|
||||
|
||||
#[error("No handler / shutting down")]
|
||||
#[error(display = "No handler / shutting down")]
|
||||
NoHandler,
|
||||
|
||||
#[error("Connection closed")]
|
||||
#[error(display = "Connection closed")]
|
||||
ConnectionClosed,
|
||||
|
||||
#[error("Version mismatch: {0}")]
|
||||
#[error(display = "Version mismatch: {}", _0)]
|
||||
VersionMismatch(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_rpc"
|
||||
version = "1.3.1"
|
||||
version = "1.1.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -33,7 +33,7 @@ async-trait.workspace = true
|
|||
serde.workspace = true
|
||||
serde_bytes.workspace = true
|
||||
serde_json.workspace = true
|
||||
thiserror = { workspace = true, optional = true }
|
||||
err-derive = { workspace = true, optional = true }
|
||||
|
||||
# newer version requires rust edition 2021
|
||||
kube = { workspace = true, optional = true }
|
||||
|
|
@ -49,5 +49,5 @@ opentelemetry.workspace = true
|
|||
|
||||
[features]
|
||||
kubernetes-discovery = [ "kube", "k8s-openapi", "schemars" ]
|
||||
consul-discovery = [ "reqwest", "thiserror" ]
|
||||
consul-discovery = [ "reqwest", "err-derive" ]
|
||||
system-libs = [ "sodiumoxide/use-pkg-config" ]
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@ use std::fs::File;
|
|||
use std::io::Read;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
|
||||
use err_derive::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
|
||||
use garage_net::NodeID;
|
||||
|
||||
|
|
@ -219,12 +219,12 @@ impl ConsulDiscovery {
|
|||
/// Regroup all Consul discovery errors
|
||||
#[derive(Debug, Error)]
|
||||
pub enum ConsulError {
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
#[error("HTTP error: {0}")]
|
||||
Reqwest(#[from] reqwest::Error),
|
||||
#[error("Invalid Consul TLS configuration")]
|
||||
#[error(display = "IO error: {}", _0)]
|
||||
Io(#[error(source)] std::io::Error),
|
||||
#[error(display = "HTTP error: {}", _0)]
|
||||
Reqwest(#[error(source)] reqwest::Error),
|
||||
#[error(display = "Invalid Consul TLS configuration")]
|
||||
InvalidTLSConfig,
|
||||
#[error("Token error: {0}")]
|
||||
Token(#[from] reqwest::header::InvalidHeaderValue),
|
||||
#[error(display = "Token error: {}", _0)]
|
||||
Token(#[error(source)] reqwest::header::InvalidHeaderValue),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -229,11 +229,13 @@ impl LayoutManager {
|
|||
}
|
||||
|
||||
/// Save cluster layout data to disk
|
||||
async fn save_cluster_layout(&self) {
|
||||
async fn save_cluster_layout(&self) -> Result<(), Error> {
|
||||
let layout = self.layout.read().unwrap().inner().clone();
|
||||
if let Err(e) = self.persist_cluster_layout.save_async(&layout).await {
|
||||
error!("Failed to save cluster_layout: {}", e);
|
||||
}
|
||||
self.persist_cluster_layout
|
||||
.save_async(&layout)
|
||||
.await
|
||||
.expect("Cannot save current cluster layout");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn broadcast_update(self: &Arc<Self>, rpc: SystemRpc) {
|
||||
|
|
@ -311,7 +313,7 @@ impl LayoutManager {
|
|||
|
||||
self.change_notify.notify_waiters();
|
||||
self.broadcast_update(SystemRpc::AdvertiseClusterLayout(new_layout));
|
||||
self.save_cluster_layout().await;
|
||||
self.save_cluster_layout().await?;
|
||||
}
|
||||
|
||||
Ok(SystemRpc::Ok)
|
||||
|
|
@ -326,7 +328,7 @@ impl LayoutManager {
|
|||
if let Some(new_trackers) = self.merge_layout_trackers(trackers) {
|
||||
self.change_notify.notify_waiters();
|
||||
self.broadcast_update(SystemRpc::AdvertiseClusterLayoutTrackers(new_trackers));
|
||||
self.save_cluster_layout().await;
|
||||
self.save_cluster_layout().await?;
|
||||
}
|
||||
|
||||
Ok(SystemRpc::Ok)
|
||||
|
|
|
|||
|
|
@ -507,7 +507,7 @@ impl LayoutVersion {
|
|||
g.compute_maximal_flow()?;
|
||||
if g.get_flow_value()? < (NB_PARTITIONS * self.replication_factor) as i64 {
|
||||
return Err(Error::Message(
|
||||
"The storage capacity of the cluster is too small. It is \
|
||||
"The storage capacity of he cluster is to small. It is \
|
||||
impossible to store partitions of size 1."
|
||||
.into(),
|
||||
));
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_table"
|
||||
version = "1.3.1"
|
||||
version = "1.1.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
|
|||
|
|
@ -367,7 +367,7 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn gc_todo_approximate_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.gc_todo.approximate_len()?)
|
||||
pub fn gc_todo_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.gc_todo.len()?)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -313,7 +313,7 @@ impl<F: TableSchema, R: TableReplication> Worker for GcWorker<F, R> {
|
|||
|
||||
fn status(&self) -> WorkerStatus {
|
||||
WorkerStatus {
|
||||
queue_length: Some(self.gc.data.gc_todo_approximate_len().unwrap_or(0) as u64),
|
||||
queue_length: Some(self.gc.data.gc_todo_len().unwrap_or(0) as u64),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -287,12 +287,12 @@ impl<F: TableSchema, R: TableReplication> MerkleUpdater<F, R> {
|
|||
MerkleNode::decode_opt(&ent)
|
||||
}
|
||||
|
||||
pub fn merkle_tree_approximate_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.data.merkle_tree.approximate_len()?)
|
||||
pub fn merkle_tree_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.data.merkle_tree.len()?)
|
||||
}
|
||||
|
||||
pub fn todo_approximate_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.data.merkle_todo.approximate_len()?)
|
||||
pub fn todo_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.data.merkle_todo.len()?)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -306,7 +306,7 @@ impl<F: TableSchema, R: TableReplication> Worker for MerkleWorker<F, R> {
|
|||
|
||||
fn status(&self) -> WorkerStatus {
|
||||
WorkerStatus {
|
||||
queue_length: Some(self.0.todo_approximate_len().unwrap_or(0) as u64),
|
||||
queue_length: Some(self.0.todo_len().unwrap_or(0) as u64),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ impl TableMetrics {
|
|||
.u64_value_observer(
|
||||
"table.size",
|
||||
move |observer| {
|
||||
if let Ok(value) = store.approximate_len() {
|
||||
if let Ok(value) = store.len() {
|
||||
observer.observe(
|
||||
value as u64,
|
||||
&[KeyValue::new("table_name", table_name)],
|
||||
|
|
@ -48,7 +48,7 @@ impl TableMetrics {
|
|||
.u64_value_observer(
|
||||
"table.merkle_tree_size",
|
||||
move |observer| {
|
||||
if let Ok(value) = merkle_tree.approximate_len() {
|
||||
if let Ok(value) = merkle_tree.len() {
|
||||
observer.observe(
|
||||
value as u64,
|
||||
&[KeyValue::new("table_name", table_name)],
|
||||
|
|
@ -62,7 +62,7 @@ impl TableMetrics {
|
|||
.u64_value_observer(
|
||||
"table.merkle_updater_todo_queue_length",
|
||||
move |observer| {
|
||||
if let Ok(v) = merkle_todo.approximate_len() {
|
||||
if let Ok(v) = merkle_todo.len() {
|
||||
observer.observe(
|
||||
v as u64,
|
||||
&[KeyValue::new("table_name", table_name)],
|
||||
|
|
@ -76,7 +76,7 @@ impl TableMetrics {
|
|||
.u64_value_observer(
|
||||
"table.gc_todo_queue_length",
|
||||
move |observer| {
|
||||
if let Ok(value) = gc_todo.approximate_len() {
|
||||
if let Ok(value) = gc_todo.len() {
|
||||
observer.observe(
|
||||
value as u64,
|
||||
&[KeyValue::new("table_name", table_name)],
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ impl<F: TableSchema, R: TableReplication> Worker for InsertQueueWorker<F, R> {
|
|||
|
||||
fn status(&self) -> WorkerStatus {
|
||||
WorkerStatus {
|
||||
queue_length: Some(self.0.data.insert_queue.approximate_len().unwrap_or(0) as u64),
|
||||
queue_length: Some(self.0.data.insert_queue.len().unwrap_or(0) as u64),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,13 +43,10 @@ impl TableReplication for TableFullReplication {
|
|||
}
|
||||
fn write_quorum(&self) -> usize {
|
||||
let nmembers = self.system.cluster_layout().current().all_nodes().len();
|
||||
|
||||
let max_faults = if nmembers > 1 { 1 } else { 0 };
|
||||
|
||||
if nmembers > max_faults {
|
||||
nmembers - max_faults
|
||||
} else {
|
||||
if nmembers < 3 {
|
||||
1
|
||||
} else {
|
||||
nmembers.div_euclid(2) + 1
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_util"
|
||||
version = "1.3.1"
|
||||
version = "1.1.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -21,7 +21,7 @@ arc-swap.workspace = true
|
|||
async-trait.workspace = true
|
||||
blake2.workspace = true
|
||||
bytesize.workspace = true
|
||||
thiserror.workspace = true
|
||||
err-derive.workspace = true
|
||||
hexdump.workspace = true
|
||||
xxhash-rust.workspace = true
|
||||
hex.workspace = true
|
||||
|
|
|
|||
|
|
@ -115,7 +115,6 @@ impl WorkerProcessor {
|
|||
trace!("{} (TID {}): {:?}", worker.worker.name(), worker.task_id, worker.state);
|
||||
|
||||
// Save worker info
|
||||
{
|
||||
let mut wi = self.worker_info.lock().unwrap();
|
||||
match wi.get_mut(&worker.task_id) {
|
||||
Some(i) => {
|
||||
|
|
@ -138,16 +137,10 @@ impl WorkerProcessor {
|
|||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if worker.state == WorkerState::Done {
|
||||
info!("Worker {} (TID {}) exited", worker.worker.name(), worker.task_id);
|
||||
} else {
|
||||
// Yield to the Tokio scheduler between consecutive Busy steps so
|
||||
// that a worker which never suspends on its own cannot starve other tasks.
|
||||
if worker.state == WorkerState::Busy {
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
workers.push(async move {
|
||||
worker.step().await;
|
||||
worker
|
||||
|
|
|
|||
|
|
@ -45,11 +45,6 @@ pub struct Config {
|
|||
)]
|
||||
pub block_size: usize,
|
||||
|
||||
/// Maximum number of parallel block writes per PUT request
|
||||
/// Higher values improve throughput but increase memory usage
|
||||
/// Default: 3, Recommended: 10-30 for NVMe, 3-10 for HDD
|
||||
#[serde(default = "default_block_max_concurrent_writes_per_request")]
|
||||
pub block_max_concurrent_writes_per_request: usize,
|
||||
/// Number of replicas. Can be any positive integer, but uneven numbers are more favorable.
|
||||
/// - 1 for single-node clusters, or to disable replication
|
||||
/// - 3 is the recommended and supported setting.
|
||||
|
|
@ -80,10 +75,6 @@ pub struct Config {
|
|||
)]
|
||||
pub block_ram_buffer_max: usize,
|
||||
|
||||
/// Maximum number of concurrent reads of block files on disk
|
||||
#[serde(default = "default_block_max_concurrent_reads")]
|
||||
pub block_max_concurrent_reads: usize,
|
||||
|
||||
/// Skip the permission check of secret files. Useful when
|
||||
/// POSIX ACLs (or more complex chmods) are used.
|
||||
#[serde(default)]
|
||||
|
|
@ -131,10 +122,6 @@ pub struct Config {
|
|||
#[serde(deserialize_with = "deserialize_capacity", default)]
|
||||
pub lmdb_map_size: usize,
|
||||
|
||||
/// Fjall block cache size
|
||||
#[serde(deserialize_with = "deserialize_capacity", default)]
|
||||
pub fjall_block_cache_size: usize,
|
||||
|
||||
// -- APIs
|
||||
/// Configuration for S3 api
|
||||
pub s3_api: S3ApiConfig,
|
||||
|
|
@ -272,9 +259,6 @@ pub struct KubernetesDiscoveryConfig {
|
|||
pub skip_crd: bool,
|
||||
}
|
||||
|
||||
pub fn default_block_max_concurrent_writes_per_request() -> usize {
|
||||
3
|
||||
}
|
||||
/// Read and parse configuration
|
||||
pub fn read_config(config_file: PathBuf) -> Result<Config, Error> {
|
||||
let config = std::fs::read_to_string(config_file)?;
|
||||
|
|
@ -292,9 +276,6 @@ fn default_block_size() -> usize {
|
|||
fn default_block_ram_buffer_max() -> usize {
|
||||
256 * 1024 * 1024
|
||||
}
|
||||
fn default_block_max_concurrent_reads() -> usize {
|
||||
16
|
||||
}
|
||||
|
||||
fn default_consistency_mode() -> String {
|
||||
"consistent".into()
|
||||
|
|
|
|||
|
|
@ -9,6 +9,16 @@ pub enum Deletable<T> {
|
|||
Deleted,
|
||||
}
|
||||
|
||||
impl<T> Deletable<T> {
|
||||
/// Map value, used for migrations
|
||||
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Deletable<U> {
|
||||
match self {
|
||||
Self::Present(x) => Deletable::<U>::Present(f(x)),
|
||||
Self::Deleted => Deletable::<U>::Deleted,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Crdt> Deletable<T> {
|
||||
/// Create a new deletable object that isn't deleted
|
||||
pub fn present(v: T) -> Self {
|
||||
|
|
|
|||
|
|
@ -43,6 +43,16 @@ pub struct Lww<T> {
|
|||
v: T,
|
||||
}
|
||||
|
||||
impl<T> Lww<T> {
|
||||
/// Map value, used for migrations
|
||||
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Lww<U> {
|
||||
Lww::<U> {
|
||||
ts: self.ts,
|
||||
v: f(self.v),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Lww<T>
|
||||
where
|
||||
T: Crdt,
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
use std::fmt;
|
||||
use std::io;
|
||||
|
||||
use thiserror::Error;
|
||||
use err_derive::Error;
|
||||
|
||||
use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
|
|
@ -12,61 +12,68 @@ use crate::encode::debug_serialize;
|
|||
/// Regroup all Garage errors
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] io::Error),
|
||||
#[error(display = "IO error: {}", _0)]
|
||||
Io(#[error(source)] io::Error),
|
||||
|
||||
#[error("Hyper error: {0}")]
|
||||
Hyper(#[from] hyper::Error),
|
||||
#[error(display = "Hyper error: {}", _0)]
|
||||
Hyper(#[error(source)] hyper::Error),
|
||||
|
||||
#[error("HTTP error: {0}")]
|
||||
Http(#[from] http::Error),
|
||||
#[error(display = "HTTP error: {}", _0)]
|
||||
Http(#[error(source)] http::Error),
|
||||
|
||||
#[error("Invalid HTTP header value: {0}")]
|
||||
HttpHeader(#[from] http::header::ToStrError),
|
||||
#[error(display = "Invalid HTTP header value: {}", _0)]
|
||||
HttpHeader(#[error(source)] http::header::ToStrError),
|
||||
|
||||
#[error("Network error: {0}")]
|
||||
Net(#[from] garage_net::error::Error),
|
||||
#[error(display = "Network error: {}", _0)]
|
||||
Net(#[error(source)] garage_net::error::Error),
|
||||
|
||||
#[error("DB error: {0}")]
|
||||
Db(#[from] garage_db::Error),
|
||||
#[error(display = "DB error: {}", _0)]
|
||||
Db(#[error(source)] garage_db::Error),
|
||||
|
||||
#[error("Messagepack encode error: {0}")]
|
||||
RmpEncode(#[from] rmp_serde::encode::Error),
|
||||
#[error("Messagepack decode error: {0}")]
|
||||
RmpDecode(#[from] rmp_serde::decode::Error),
|
||||
#[error("JSON error: {0}")]
|
||||
Json(#[from] serde_json::error::Error),
|
||||
#[error("TOML decode error: {0}")]
|
||||
TomlDecode(#[from] toml::de::Error),
|
||||
#[error(display = "Messagepack encode error: {}", _0)]
|
||||
RmpEncode(#[error(source)] rmp_serde::encode::Error),
|
||||
#[error(display = "Messagepack decode error: {}", _0)]
|
||||
RmpDecode(#[error(source)] rmp_serde::decode::Error),
|
||||
#[error(display = "JSON error: {}", _0)]
|
||||
Json(#[error(source)] serde_json::error::Error),
|
||||
#[error(display = "TOML decode error: {}", _0)]
|
||||
TomlDecode(#[error(source)] toml::de::Error),
|
||||
|
||||
#[error("Tokio join error: {0}")]
|
||||
TokioJoin(#[from] tokio::task::JoinError),
|
||||
#[error(display = "Tokio join error: {}", _0)]
|
||||
TokioJoin(#[error(source)] tokio::task::JoinError),
|
||||
|
||||
#[error("Tokio semaphore acquire error: {0}")]
|
||||
TokioSemAcquire(#[from] tokio::sync::AcquireError),
|
||||
#[error(display = "Tokio semaphore acquire error: {}", _0)]
|
||||
TokioSemAcquire(#[error(source)] tokio::sync::AcquireError),
|
||||
|
||||
#[error("Tokio broadcast receive error: {0}")]
|
||||
TokioBcastRecv(#[from] tokio::sync::broadcast::error::RecvError),
|
||||
#[error(display = "Tokio broadcast receive error: {}", _0)]
|
||||
TokioBcastRecv(#[error(source)] tokio::sync::broadcast::error::RecvError),
|
||||
|
||||
#[error("Remote error: {0}")]
|
||||
#[error(display = "Remote error: {}", _0)]
|
||||
RemoteError(String),
|
||||
|
||||
#[error("Timeout")]
|
||||
#[error(display = "Timeout")]
|
||||
Timeout,
|
||||
|
||||
#[error("Could not reach quorum of {0} (sets={1:?}). {2} of {3} request succeeded, others returned errors: {4:?}")]
|
||||
#[error(
|
||||
display = "Could not reach quorum of {} (sets={:?}). {} of {} request succeeded, others returned errors: {:?}",
|
||||
_0,
|
||||
_1,
|
||||
_2,
|
||||
_3,
|
||||
_4
|
||||
)]
|
||||
Quorum(usize, Option<usize>, usize, usize, Vec<String>),
|
||||
|
||||
#[error("Unexpected RPC message: {0}")]
|
||||
#[error(display = "Unexpected RPC message: {}", _0)]
|
||||
UnexpectedRpcMessage(String),
|
||||
|
||||
#[error("Corrupt data: does not match hash {0:?}")]
|
||||
#[error(display = "Corrupt data: does not match hash {:?}", _0)]
|
||||
CorruptData(Hash),
|
||||
|
||||
#[error("Missing block {0:?}: no node returned a valid block")]
|
||||
#[error(display = "Missing block {:?}: no node returned a valid block", _0)]
|
||||
MissingBlock(Hash),
|
||||
|
||||
#[error("{0}")]
|
||||
#[error(display = "{}", _0)]
|
||||
Message(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_web"
|
||||
version = "1.3.1"
|
||||
version = "1.1.0"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
@ -20,7 +20,7 @@ garage_model.workspace = true
|
|||
garage_util.workspace = true
|
||||
garage_table.workspace = true
|
||||
|
||||
thiserror.workspace = true
|
||||
err-derive.workspace = true
|
||||
tracing.workspace = true
|
||||
percent-encoding.workspace = true
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
use err_derive::Error;
|
||||
use hyper::header::HeaderValue;
|
||||
use hyper::{HeaderMap, StatusCode};
|
||||
use thiserror::Error;
|
||||
|
||||
use garage_api_common::generic_server::ApiError;
|
||||
|
||||
|
|
@ -8,15 +8,15 @@ use garage_api_common::generic_server::ApiError;
|
|||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
/// An error received from the API crate
|
||||
#[error("API error: {0}")]
|
||||
#[error(display = "API error: {}", _0)]
|
||||
ApiError(garage_api_s3::error::Error),
|
||||
|
||||
/// The file does not exist
|
||||
#[error("Not found")]
|
||||
#[error(display = "Not found")]
|
||||
NotFound,
|
||||
|
||||
/// The client sent a request without host, or with unsupported method
|
||||
#[error("Bad request: {0}")]
|
||||
#[error(display = "Bad request: {}", _0)]
|
||||
BadRequest(String),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -25,12 +25,14 @@ use garage_api_common::cors::{
|
|||
};
|
||||
use garage_api_common::generic_server::{server_loop, UnixListenerOn};
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_s3::api_server::ResBody;
|
||||
use garage_api_s3::error::{
|
||||
CommonErrorDerivative, Error as ApiError, OkOrBadRequest, OkOrInternalError,
|
||||
};
|
||||
use garage_api_s3::get::{handle_get_without_ctx, handle_head_without_ctx};
|
||||
use garage_api_s3::website::X_AMZ_WEBSITE_REDIRECT_LOCATION;
|
||||
|
||||
use garage_model::bucket_table::{self, RoutingRule};
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
use garage_table::*;
|
||||
|
|
@ -260,45 +262,71 @@ impl WebServer {
|
|||
// Get path
|
||||
let path = req.uri().path().to_string();
|
||||
let index = &website_config.index_document;
|
||||
let (key, may_redirect) = path_to_keys(&path, index)?;
|
||||
let routing_result = path_to_keys(&path, index, &website_config.routing_rules)?;
|
||||
|
||||
debug!(
|
||||
"Selected bucket: \"{}\" {:?}, target key: \"{}\", may redirect to: {:?}",
|
||||
bucket_name, bucket_id, key, may_redirect
|
||||
"Selected bucket: \"{}\" {:?}, routing to {:?}",
|
||||
bucket_name, bucket_id, routing_result,
|
||||
);
|
||||
|
||||
let ret_doc = match *req.method() {
|
||||
Method::OPTIONS => handle_options_for_bucket(req, &bucket_params)
|
||||
let ret_doc = match (req.method(), routing_result.main_target()) {
|
||||
(&Method::OPTIONS, _) => handle_options_for_bucket(req, &bucket_params)
|
||||
.map_err(ApiError::from)
|
||||
.map(|res| res.map(|_empty_body: EmptyBody| empty_body())),
|
||||
Method::HEAD => {
|
||||
handle_head_without_ctx(self.garage.clone(), req, bucket_id, &key, None).await
|
||||
(_, Err((url, code))) => Ok(Response::builder()
|
||||
.status(code)
|
||||
.header("Location", url)
|
||||
.body(empty_body())
|
||||
.unwrap()),
|
||||
(_, Ok((key, code))) => {
|
||||
handle_inner(self.garage.clone(), req, bucket_id, key, code).await
|
||||
}
|
||||
Method::GET => {
|
||||
handle_get_without_ctx(
|
||||
};
|
||||
|
||||
// Try handling errors if bucket configuration provided fallbacks
|
||||
let ret_doc_with_redir = match (&ret_doc, &routing_result) {
|
||||
(
|
||||
Err(ApiError::NoSuchKey),
|
||||
RoutingResult::LoadOrRedirect {
|
||||
redirect_if_exists,
|
||||
redirect_url,
|
||||
redirect_code,
|
||||
..
|
||||
},
|
||||
) => {
|
||||
let redirect = if let Some(redirect_key) = redirect_if_exists {
|
||||
self.check_key_exists(bucket_id, redirect_key.as_str())
|
||||
.await?
|
||||
} else {
|
||||
true
|
||||
};
|
||||
if redirect {
|
||||
Ok(Response::builder()
|
||||
.status(redirect_code)
|
||||
.header("Location", redirect_url)
|
||||
.body(empty_body())
|
||||
.unwrap())
|
||||
} else {
|
||||
ret_doc
|
||||
}
|
||||
}
|
||||
(
|
||||
Err(ApiError::NoSuchKey),
|
||||
RoutingResult::LoadOrAlternativeError {
|
||||
redirect_key,
|
||||
redirect_code,
|
||||
..
|
||||
},
|
||||
) => {
|
||||
handle_inner(
|
||||
self.garage.clone(),
|
||||
req,
|
||||
bucket_id,
|
||||
&key,
|
||||
None,
|
||||
Default::default(),
|
||||
redirect_key,
|
||||
*redirect_code,
|
||||
)
|
||||
.await
|
||||
}
|
||||
_ => Err(ApiError::bad_request("HTTP method not supported")),
|
||||
};
|
||||
|
||||
// Try implicit redirect on error
|
||||
let ret_doc_with_redir = match (&ret_doc, may_redirect) {
|
||||
(Err(ApiError::NoSuchKey), ImplicitRedirect::To { key, url })
|
||||
if self.check_key_exists(bucket_id, key.as_str()).await? =>
|
||||
{
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::FOUND)
|
||||
.header(LOCATION, url)
|
||||
.body(empty_body())
|
||||
.unwrap())
|
||||
}
|
||||
(Ok(ret), _) if ret.headers().contains_key(X_AMZ_WEBSITE_REDIRECT_LOCATION) => {
|
||||
let redirect_location = ret.headers().get(X_AMZ_WEBSITE_REDIRECT_LOCATION).unwrap();
|
||||
Ok(Response::builder()
|
||||
|
|
@ -332,17 +360,17 @@ impl WebServer {
|
|||
// We want to return the error document
|
||||
// Create a fake HTTP request with path = the error document
|
||||
let req2 = Request::builder()
|
||||
.method("GET")
|
||||
.uri(format!("http://{}/{}", host, &error_document))
|
||||
.body(())
|
||||
.unwrap();
|
||||
|
||||
match handle_get_without_ctx(
|
||||
match handle_inner(
|
||||
self.garage.clone(),
|
||||
&req2,
|
||||
bucket_id,
|
||||
&error_document,
|
||||
None,
|
||||
Default::default(),
|
||||
error.http_status_code(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
|
@ -357,8 +385,6 @@ impl WebServer {
|
|||
error
|
||||
);
|
||||
|
||||
*error_doc.status_mut() = error.http_status_code();
|
||||
|
||||
// Preserve error message in a special header
|
||||
for error_line in error.to_string().split('\n') {
|
||||
if let Ok(v) = HeaderValue::from_bytes(error_line.as_bytes()) {
|
||||
|
|
@ -389,6 +415,52 @@ impl WebServer {
|
|||
}
|
||||
}
|
||||
|
||||
async fn handle_inner(
|
||||
garage: Arc<Garage>,
|
||||
req: &Request<()>,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
status_code: StatusCode,
|
||||
) -> Result<Response<ResBody>, ApiError> {
|
||||
if status_code != StatusCode::OK {
|
||||
// If we are returning an error document, discard all headers from
|
||||
// the original request that would have influenced the result:
|
||||
// - Range header, we don't want to return a subrange of the error document
|
||||
// - Caching directives such as If-None-Match, etc, which are not relevant
|
||||
let cleaned_req = Request::builder().uri(req.uri()).body(()).unwrap();
|
||||
|
||||
let mut ret = match req.method() {
|
||||
&Method::HEAD => {
|
||||
handle_head_without_ctx(garage, &cleaned_req, bucket_id, key, None).await?
|
||||
}
|
||||
&Method::GET => {
|
||||
handle_get_without_ctx(
|
||||
garage,
|
||||
&cleaned_req,
|
||||
bucket_id,
|
||||
key,
|
||||
None,
|
||||
Default::default(),
|
||||
)
|
||||
.await?
|
||||
}
|
||||
_ => return Err(ApiError::bad_request("HTTP method not supported")),
|
||||
};
|
||||
|
||||
*ret.status_mut() = status_code;
|
||||
|
||||
Ok(ret)
|
||||
} else {
|
||||
match req.method() {
|
||||
&Method::HEAD => handle_head_without_ctx(garage, req, bucket_id, key, None).await,
|
||||
&Method::GET => {
|
||||
handle_get_without_ctx(garage, req, bucket_id, key, None, Default::default()).await
|
||||
}
|
||||
_ => Err(ApiError::bad_request("HTTP method not supported")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn error_to_res(e: Error) -> Response<BoxBody<Error>> {
|
||||
// If we are here, it is either that:
|
||||
// - there was an error before trying to get the requested URL
|
||||
|
|
@ -397,37 +469,52 @@ fn error_to_res(e: Error) -> Response<BoxBody<Error>> {
|
|||
// was a HEAD request or we couldn't get the error document)
|
||||
// We do NOT enter this code path when returning the bucket's
|
||||
// error document (this is handled in serve_file)
|
||||
let mut body_str = format!(
|
||||
r"<title>{http_code} {code_text}</title>
|
||||
<h1>{http_code} {code_text}</h1>",
|
||||
http_code = e.http_status_code().as_u16(),
|
||||
code_text = e.http_status_code().canonical_reason().unwrap_or("Unknown"),
|
||||
);
|
||||
if let Error::ApiError(ref err) = e {
|
||||
body_str.push_str(&format!(
|
||||
r"
|
||||
<ul>
|
||||
<li>Code: {s3_code}</li>
|
||||
<li>Message: {s3_message}.</li>
|
||||
</ul>",
|
||||
s3_code = err.aws_code(),
|
||||
s3_message = err,
|
||||
));
|
||||
}
|
||||
let mut http_error = Response::new(string_body(body_str));
|
||||
let body = string_body(format!("{}\n", e));
|
||||
let mut http_error = Response::new(body);
|
||||
*http_error.status_mut() = e.http_status_code();
|
||||
e.add_headers(http_error.headers_mut());
|
||||
http_error.headers_mut().insert(
|
||||
http::header::CONTENT_TYPE,
|
||||
"text/html; charset=utf-8".parse().unwrap(),
|
||||
);
|
||||
http_error
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
enum ImplicitRedirect {
|
||||
No,
|
||||
To { key: String, url: String },
|
||||
enum RoutingResult {
|
||||
// Load a key and use `code` as status, or fallback to normal 404 handler if not found
|
||||
LoadKey {
|
||||
key: String,
|
||||
code: StatusCode,
|
||||
},
|
||||
// Load a key and use `200` as status, or fallback with a redirection using `redirect_code`
|
||||
// as status
|
||||
LoadOrRedirect {
|
||||
key: String,
|
||||
redirect_if_exists: Option<String>,
|
||||
redirect_url: String,
|
||||
redirect_code: StatusCode,
|
||||
},
|
||||
// Load a key and use `200` as status, or fallback by loading a different key and use
|
||||
// `redirect_code` as status
|
||||
LoadOrAlternativeError {
|
||||
key: String,
|
||||
redirect_key: String,
|
||||
redirect_code: StatusCode,
|
||||
},
|
||||
// Send an http redirect with `code` as status
|
||||
Redirect {
|
||||
url: String,
|
||||
code: StatusCode,
|
||||
},
|
||||
}
|
||||
|
||||
impl RoutingResult {
|
||||
// return Ok((key_to_deref, status_code)) or Err((redirect_target, status_code))
|
||||
fn main_target(&self) -> Result<(&str, StatusCode), (&str, StatusCode)> {
|
||||
match self {
|
||||
RoutingResult::LoadKey { key, code } => Ok((key, *code)),
|
||||
RoutingResult::LoadOrRedirect { key, .. } => Ok((key, StatusCode::OK)),
|
||||
RoutingResult::LoadOrAlternativeError { key, .. } => Ok((key, StatusCode::OK)),
|
||||
RoutingResult::Redirect { url, code } => Err((url, *code)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Path to key
|
||||
|
|
@ -437,33 +524,152 @@ enum ImplicitRedirect {
|
|||
/// which is also AWS S3 behavior.
|
||||
///
|
||||
/// Check: https://docs.aws.amazon.com/AmazonS3/latest/userguide/IndexDocumentSupport.html
|
||||
fn path_to_keys<'a>(path: &'a str, index: &str) -> Result<(String, ImplicitRedirect), Error> {
|
||||
fn path_to_keys(
|
||||
path: &str,
|
||||
index: &str,
|
||||
routing_rules: &[RoutingRule],
|
||||
) -> Result<RoutingResult, Error> {
|
||||
let path_utf8 = percent_encoding::percent_decode_str(path).decode_utf8()?;
|
||||
|
||||
let base_key = match path_utf8.strip_prefix("/") {
|
||||
Some(bk) => bk,
|
||||
None => return Err(Error::BadRequest("Path must start with a / (slash)".into())),
|
||||
};
|
||||
let is_bucket_root = base_key.len() == 0;
|
||||
|
||||
let is_bucket_root = base_key.is_empty();
|
||||
let is_trailing_slash = path_utf8.ends_with("/");
|
||||
|
||||
match (is_bucket_root, is_trailing_slash) {
|
||||
// It is not possible to store something at the root of the bucket (ie. empty key),
|
||||
// the only option is to fetch the index
|
||||
(true, _) => Ok((index.to_string(), ImplicitRedirect::No)),
|
||||
let key = if is_bucket_root || is_trailing_slash {
|
||||
// we can't store anything at the root, so we need to query the index
|
||||
// if the key end with a slash, we always query the index
|
||||
format!("{base_key}{index}")
|
||||
} else {
|
||||
// if the key doesn't end with `/`, leave it unmodified
|
||||
base_key.to_string()
|
||||
};
|
||||
|
||||
// "If you create a folder structure in your bucket, you must have an index document at each level. In each folder, the index document must have the same name, for example, index.html. When a user specifies a URL that resembles a folder lookup, the presence or absence of a trailing slash determines the behavior of the website. For example, the following URL, with a trailing slash, returns the photos/index.html index document."
|
||||
(false, true) => Ok((format!("{base_key}{index}"), ImplicitRedirect::No)),
|
||||
let mut routing_rules_iter = routing_rules.iter();
|
||||
let key = loop {
|
||||
let Some(routing_rule) = routing_rules_iter.next() else {
|
||||
break key;
|
||||
};
|
||||
|
||||
// "However, if you exclude the trailing slash from the preceding URL, Amazon S3 first looks for an object photos in the bucket. If the photos object is not found, it searches for an index document, photos/index.html. If that document is found, Amazon S3 returns a 302 Found message and points to the photos/ key. For subsequent requests to photos/, Amazon S3 returns photos/index.html. If the index document is not found, Amazon S3 returns an error."
|
||||
(false, false) => Ok((
|
||||
base_key.to_string(),
|
||||
ImplicitRedirect::To {
|
||||
key: format!("{base_key}/{index}"),
|
||||
url: format!("{path}/"),
|
||||
},
|
||||
)),
|
||||
let Ok(status_code) = StatusCode::from_u16(routing_rule.redirect.http_redirect_code) else {
|
||||
continue;
|
||||
};
|
||||
if let Some(condition) = &routing_rule.condition {
|
||||
let suffix = if let Some(prefix) = &condition.prefix {
|
||||
let Some(suffix) = base_key.strip_prefix(prefix) else {
|
||||
continue;
|
||||
};
|
||||
Some(suffix)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let mut target = compute_redirect_target(&routing_rule.redirect, suffix);
|
||||
let query_alternative_key =
|
||||
status_code == StatusCode::OK || status_code == StatusCode::NOT_FOUND;
|
||||
let redirect_on_error =
|
||||
condition.http_error_code == Some(StatusCode::NOT_FOUND.as_u16());
|
||||
match (query_alternative_key, redirect_on_error) {
|
||||
(false, false) => {
|
||||
return Ok(RoutingResult::Redirect {
|
||||
url: target,
|
||||
code: status_code,
|
||||
})
|
||||
}
|
||||
(true, false) => {
|
||||
// we need to remove the leading /
|
||||
target.remove(0);
|
||||
if status_code == StatusCode::OK {
|
||||
break target;
|
||||
} else {
|
||||
return Ok(RoutingResult::LoadKey {
|
||||
key: target,
|
||||
code: status_code,
|
||||
});
|
||||
}
|
||||
}
|
||||
(false, true) => {
|
||||
return Ok(RoutingResult::LoadOrRedirect {
|
||||
key,
|
||||
redirect_if_exists: None,
|
||||
redirect_url: target,
|
||||
redirect_code: status_code,
|
||||
});
|
||||
}
|
||||
(true, true) => {
|
||||
target.remove(0);
|
||||
return Ok(RoutingResult::LoadOrAlternativeError {
|
||||
key,
|
||||
redirect_key: target,
|
||||
redirect_code: status_code,
|
||||
});
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let target = compute_redirect_target(&routing_rule.redirect, None);
|
||||
return Ok(RoutingResult::Redirect {
|
||||
url: target,
|
||||
code: status_code,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
if is_bucket_root || is_trailing_slash {
|
||||
Ok(RoutingResult::LoadKey {
|
||||
key,
|
||||
code: StatusCode::OK,
|
||||
})
|
||||
} else {
|
||||
Ok(RoutingResult::LoadOrRedirect {
|
||||
redirect_if_exists: Some(format!("{key}/{index}")),
|
||||
// we can't use `path` because key might have changed substentially in case of
|
||||
// routing rules
|
||||
redirect_url: percent_encoding::percent_encode(
|
||||
format!("/{key}/").as_bytes(),
|
||||
PATH_ENCODING_SET,
|
||||
)
|
||||
.to_string(),
|
||||
key,
|
||||
redirect_code: StatusCode::FOUND,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// per https://url.spec.whatwg.org/#path-percent-encode-set
|
||||
const PATH_ENCODING_SET: &percent_encoding::AsciiSet = &percent_encoding::CONTROLS
|
||||
.add(b' ')
|
||||
.add(b'"')
|
||||
.add(b'#')
|
||||
.add(b'<')
|
||||
.add(b'>')
|
||||
.add(b'?')
|
||||
.add(b'`')
|
||||
.add(b'{')
|
||||
.add(b'}');
|
||||
|
||||
fn compute_redirect_target(redirect: &bucket_table::Redirect, suffix: Option<&str>) -> String {
|
||||
let mut res = String::new();
|
||||
if let Some(hostname) = &redirect.hostname {
|
||||
if let Some(protocol) = &redirect.protocol {
|
||||
res.push_str(protocol);
|
||||
res.push_str("://");
|
||||
} else {
|
||||
res.push_str("//");
|
||||
}
|
||||
res.push_str(hostname);
|
||||
}
|
||||
res.push('/');
|
||||
if let Some(replace_key_prefix) = &redirect.replace_key_prefix {
|
||||
res.push_str(replace_key_prefix);
|
||||
if let Some(suffix) = suffix {
|
||||
res.push_str(suffix)
|
||||
}
|
||||
} else if let Some(replace_key) = &redirect.replace_key {
|
||||
res.push_str(replace_key)
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
@ -473,35 +679,39 @@ mod tests {
|
|||
#[test]
|
||||
fn path_to_keys_test() -> Result<(), Error> {
|
||||
assert_eq!(
|
||||
path_to_keys("/file%20.jpg", "index.html")?,
|
||||
(
|
||||
"file .jpg".to_string(),
|
||||
ImplicitRedirect::To {
|
||||
key: "file .jpg/index.html".to_string(),
|
||||
url: "/file%20.jpg/".to_string()
|
||||
path_to_keys("/file%20.jpg", "index.html", &[])?,
|
||||
RoutingResult::LoadOrRedirect {
|
||||
key: "file .jpg".to_string(),
|
||||
redirect_url: "/file%20.jpg/".to_string(),
|
||||
redirect_if_exists: Some("file .jpg/index.html".to_string()),
|
||||
redirect_code: StatusCode::FOUND,
|
||||
}
|
||||
)
|
||||
);
|
||||
assert_eq!(
|
||||
path_to_keys("/%20t/", "index.html")?,
|
||||
(" t/index.html".to_string(), ImplicitRedirect::No)
|
||||
);
|
||||
assert_eq!(
|
||||
path_to_keys("/", "index.html")?,
|
||||
("index.html".to_string(), ImplicitRedirect::No)
|
||||
);
|
||||
assert_eq!(
|
||||
path_to_keys("/hello", "index.html")?,
|
||||
(
|
||||
"hello".to_string(),
|
||||
ImplicitRedirect::To {
|
||||
key: "hello/index.html".to_string(),
|
||||
url: "/hello/".to_string()
|
||||
path_to_keys("/%20t/", "index.html", &[])?,
|
||||
RoutingResult::LoadKey {
|
||||
key: " t/index.html".to_string(),
|
||||
code: StatusCode::OK
|
||||
}
|
||||
)
|
||||
);
|
||||
assert!(path_to_keys("", "index.html").is_err());
|
||||
assert!(path_to_keys("i/am/relative", "index.html").is_err());
|
||||
assert_eq!(
|
||||
path_to_keys("/", "index.html", &[])?,
|
||||
RoutingResult::LoadKey {
|
||||
key: "index.html".to_string(),
|
||||
code: StatusCode::OK
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
path_to_keys("/hello", "index.html", &[])?,
|
||||
RoutingResult::LoadOrRedirect {
|
||||
key: "hello".to_string(),
|
||||
redirect_url: "/hello/".to_string(),
|
||||
redirect_if_exists: Some("hello/index.html".to_string()),
|
||||
redirect_code: StatusCode::FOUND,
|
||||
}
|
||||
);
|
||||
assert!(path_to_keys("", "index.html", &[]).is_err());
|
||||
assert!(path_to_keys("i/am/relative", "index.html", &[]).is_err());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue