Compare commits

..

3 commits

Author SHA1 Message Date
Alex Auvolat
0a2e809cb9 admin api: avoid overwriting redirect rules in UpdateBucket 2025-02-18 19:39:59 +01:00
trinity-1686a
e0f8a72a81 support redirection on s3 endpoint 2025-02-18 19:11:55 +01:00
Quentin Dufour
00b8239a2b decrease write quorum 2025-02-18 19:03:04 +01:00
125 changed files with 3364 additions and 5326 deletions

View file

@ -1,6 +1,3 @@
labels:
nix: "enabled"
when: when:
event: event:
- push - push
@ -12,32 +9,27 @@ when:
steps: steps:
- name: check formatting - name: check formatting
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-build -j4 --attr flakePackages.fmt - nix-shell --attr devShell --run "cargo fmt -- --check"
- name: build - name: build
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-build -j4 --attr flakePackages.dev - nix-build -j4 --attr flakePackages.dev
- name: unit + func tests (lmdb) - name: unit + func tests (lmdb)
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-build -j4 --attr flakePackages.tests-lmdb - nix-build -j4 --attr flakePackages.tests-lmdb
- name: unit + func tests (sqlite) - name: unit + func tests (sqlite)
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-build -j4 --attr flakePackages.tests-sqlite - nix-build -j4 --attr flakePackages.tests-sqlite
- name: unit + func tests (fjall)
image: nixpkgs/nix:nixos-24.05
commands:
- nix-build -j4 --attr flakePackages.tests-fjall
- name: integration tests - name: integration tests
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-build -j4 --attr flakePackages.dev - nix-build -j4 --attr flakePackages.dev
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false) - nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)

View file

@ -1,6 +1,3 @@
labels:
nix: "enabled"
when: when:
event: event:
- deployment - deployment
@ -11,7 +8,7 @@ depends_on:
steps: steps:
- name: refresh-index - name: refresh-index
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
environment: environment:
AWS_ACCESS_KEY_ID: AWS_ACCESS_KEY_ID:
from_secret: garagehq_aws_access_key_id from_secret: garagehq_aws_access_key_id
@ -22,7 +19,7 @@ steps:
- nix-shell --attr ci --run "refresh_index" - nix-shell --attr ci --run "refresh_index"
- name: multiarch-docker - name: multiarch-docker
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
environment: environment:
DOCKER_AUTH: DOCKER_AUTH:
from_secret: docker_auth from_secret: docker_auth

View file

@ -1,6 +1,3 @@
labels:
nix: "enabled"
when: when:
event: event:
- deployment - deployment
@ -19,17 +16,17 @@ matrix:
steps: steps:
- name: build - name: build
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-build --attr releasePackages.${ARCH} --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA} - nix-build --attr releasePackages.${ARCH} --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- name: check is static binary - name: check is static binary
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-shell --attr ci --run "./script/not-dynamic.sh result/bin/garage" - nix-shell --attr ci --run "./script/not-dynamic.sh result/bin/garage"
- name: integration tests - name: integration tests
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false) - nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
when: when:
@ -39,7 +36,7 @@ steps:
ARCH: i386 ARCH: i386
- name: upgrade tests - name: upgrade tests
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false) - nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
when: when:
@ -47,7 +44,7 @@ steps:
ARCH: amd64 ARCH: amd64
- name: push static binary - name: push static binary
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
environment: environment:
TARGET: "${TARGET}" TARGET: "${TARGET}"
AWS_ACCESS_KEY_ID: AWS_ACCESS_KEY_ID:
@ -58,7 +55,7 @@ steps:
- nix-shell --attr ci --run "to_s3" - nix-shell --attr ci --run "to_s3"
- name: docker build and publish - name: docker build and publish
image: nixpkgs/nix:nixos-24.05 image: nixpkgs/nix:nixos-22.05
environment: environment:
DOCKER_PLATFORM: "linux/${ARCH}" DOCKER_PLATFORM: "linux/${ARCH}"
CONTAINER_NAME: "dxflrs/${ARCH}_garage" CONTAINER_NAME: "dxflrs/${ARCH}_garage"

2307
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -24,18 +24,18 @@ default-members = ["src/garage"]
# Internal Garage crates # Internal Garage crates
format_table = { version = "0.1.1", path = "src/format-table" } format_table = { version = "0.1.1", path = "src/format-table" }
garage_api_common = { version = "1.3.1", path = "src/api/common" } garage_api_common = { version = "1.0.1", path = "src/api/common" }
garage_api_admin = { version = "1.3.1", path = "src/api/admin" } garage_api_admin = { version = "1.0.1", path = "src/api/admin" }
garage_api_s3 = { version = "1.3.1", path = "src/api/s3" } garage_api_s3 = { version = "1.0.1", path = "src/api/s3" }
garage_api_k2v = { version = "1.3.1", path = "src/api/k2v" } garage_api_k2v = { version = "1.0.1", path = "src/api/k2v" }
garage_block = { version = "1.3.1", path = "src/block" } garage_block = { version = "1.0.1", path = "src/block" }
garage_db = { version = "1.3.1", path = "src/db", default-features = false } garage_db = { version = "1.0.1", path = "src/db", default-features = false }
garage_model = { version = "1.3.1", path = "src/model", default-features = false } garage_model = { version = "1.0.1", path = "src/model", default-features = false }
garage_net = { version = "1.3.1", path = "src/net" } garage_net = { version = "1.0.1", path = "src/net" }
garage_rpc = { version = "1.3.1", path = "src/rpc" } garage_rpc = { version = "1.0.1", path = "src/rpc" }
garage_table = { version = "1.3.1", path = "src/table" } garage_table = { version = "1.0.1", path = "src/table" }
garage_util = { version = "1.3.1", path = "src/util" } garage_util = { version = "1.0.1", path = "src/util" }
garage_web = { version = "1.3.1", path = "src/web" } garage_web = { version = "1.0.1", path = "src/web" }
k2v-client = { version = "0.0.4", path = "src/k2v-client" } k2v-client = { version = "0.0.4", path = "src/k2v-client" }
# External crates from crates.io # External crates from crates.io
@ -52,11 +52,13 @@ chrono = "0.4"
crc32fast = "1.4" crc32fast = "1.4"
crc32c = "0.6" crc32c = "0.6"
crypto-common = "0.1" crypto-common = "0.1"
err-derive = "0.3"
gethostname = "0.4" gethostname = "0.4"
git-version = "0.3.4" git-version = "0.3.4"
hex = "0.4" hex = "0.4"
hexdump = "0.1" hexdump = "0.1"
hmac = "0.12" hmac = "0.12"
idna = "0.5"
itertools = "0.12" itertools = "0.12"
ipnet = "2.9.0" ipnet = "2.9.0"
lazy_static = "1.4" lazy_static = "1.4"
@ -64,7 +66,6 @@ md-5 = "0.10"
mktemp = "0.5" mktemp = "0.5"
nix = { version = "0.29", default-features = false, features = ["fs"] } nix = { version = "0.29", default-features = false, features = ["fs"] }
nom = "7.1" nom = "7.1"
parking_lot = "0.12"
parse_duration = "2.1" parse_duration = "2.1"
pin-project = "1.0.12" pin-project = "1.0.12"
pnet_datalink = "0.34" pnet_datalink = "0.34"
@ -83,14 +84,12 @@ pretty_env_logger = "0.5"
structopt = { version = "0.3", default-features = false } structopt = { version = "0.3", default-features = false }
syslog-tracing = "0.3" syslog-tracing = "0.3"
tracing = "0.1" tracing = "0.1"
tracing-journald = "0.3.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-subscriber = { version = "0.3", features = ["env-filter"] }
heed = { version = "0.11", default-features = false, features = ["lmdb"] } heed = { version = "0.11", default-features = false, features = ["lmdb"] }
rusqlite = "0.37" rusqlite = "0.31.0"
r2d2 = "0.8" r2d2 = "0.8"
r2d2_sqlite = "0.31" r2d2_sqlite = "0.24"
fjall = "2.4"
async-compression = { version = "0.4", features = ["tokio", "zstd"] } async-compression = { version = "0.4", features = ["tokio", "zstd"] }
zstd = { version = "0.13", default-features = false } zstd = { version = "0.13", default-features = false }
@ -133,21 +132,24 @@ opentelemetry-contrib = "0.9"
prometheus = "0.13" prometheus = "0.13"
# used by the k2v-client crate only # used by the k2v-client crate only
aws-sigv4 = { version = "1.1", default-features = false } aws-sigv4 = { version = "1.1" }
hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "http2", "ring", "rustls-native-certs"] } hyper-rustls = { version = "0.26", features = ["http2"] }
log = "0.4" log = "0.4"
thiserror = "2.0" thiserror = "1.0"
# ---- used only as build / dev dependencies ---- # ---- used only as build / dev dependencies ----
assert-json-diff = "2.0" assert-json-diff = "2.0"
rustc_version = "0.4.0" rustc_version = "0.4.0"
static_init = "1.0" static_init = "1.0"
aws-smithy-runtime = { version = "1.8", default-features = false, features = ["tls-rustls"] } aws-sdk-config = "1.13"
aws-sdk-config = { version = "1.62", default-features = false } aws-sdk-s3 = "1.14"
aws-sdk-s3 = { version = "1.79", default-features = false, features = ["rt-tokio"] }
[profile.dev]
#lto = "thin" # disabled for now, adds 2-4 min to each CI build
lto = "off"
[profile.release] [profile.release]
lto = "thin" lto = true
codegen-units = 16 codegen-units = 1
opt-level = 3 opt-level = "s"
strip = "debuginfo" strip = true

View file

@ -687,7 +687,7 @@ paths:
operationId: "GetBucketInfo" operationId: "GetBucketInfo"
summary: "Get a bucket" summary: "Get a bucket"
description: | description: |
Given a bucket identifier (`id`) or a global alias (`globalAlias`), get its information. Given a bucket identifier (`id`) or a global alias (`alias`), get its information.
It includes its aliases, its web configuration, keys that have some permissions It includes its aliases, its web configuration, keys that have some permissions
on it, some statistics (number of objects, size), number of dangling multipart uploads, on it, some statistics (number of objects, size), number of dangling multipart uploads,
and its quotas (if any). and its quotas (if any).
@ -701,7 +701,7 @@ paths:
example: "b4018dc61b27ccb5c64ec1b24f53454bbbd180697c758c4d47a22a8921864a87" example: "b4018dc61b27ccb5c64ec1b24f53454bbbd180697c758c4d47a22a8921864a87"
schema: schema:
type: string type: string
- name: globalAlias - name: alias
in: query in: query
description: | description: |
The exact global alias of one of the existing buckets. The exact global alias of one of the existing buckets.

View file

@ -12,7 +12,7 @@ In this section, we cover the following web applications:
| [Mastodon](#mastodon) | ✅ | Natively supported | | [Mastodon](#mastodon) | ✅ | Natively supported |
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` | | [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
| [ejabberd](#ejabberd) | ✅ | `mod_s3_upload` | | [ejabberd](#ejabberd) | ✅ | `mod_s3_upload` |
| [Pixelfed](#pixelfed) | ✅ | Natively supported | | [Pixelfed](#pixelfed) | ❓ | Not yet tested |
| [Pleroma](#pleroma) | ❓ | Not yet tested | | [Pleroma](#pleroma) | ❓ | Not yet tested |
| [Lemmy](#lemmy) | ✅ | Supported with pict-rs | | [Lemmy](#lemmy) | ✅ | Supported with pict-rs |
| [Funkwhale](#funkwhale) | ❓ | Not yet tested | | [Funkwhale](#funkwhale) | ❓ | Not yet tested |
@ -69,7 +69,7 @@ $CONFIG = array(
'hostname' => '127.0.0.1', // Can also be a domain name, eg. garage.example.com 'hostname' => '127.0.0.1', // Can also be a domain name, eg. garage.example.com
'port' => 3900, // Put your reverse proxy port or your S3 API port 'port' => 3900, // Put your reverse proxy port or your S3 API port
'use_ssl' => false, // Set it to true if you have a TLS enabled reverse proxy 'use_ssl' => false, // Set it to true if you have a TLS enabled reverse proxy
'region' => 'garage', // Garage default region is named "garage", edit according to your cluster config 'region' => 'garage', // Garage has only one region named "garage"
'use_path_style' => true // Garage supports only path style, must be set to true 'use_path_style' => true // Garage supports only path style, must be set to true
], ],
], ],
@ -135,7 +135,7 @@ bucket but doesn't also know the secret encryption key.
*Click on the picture to zoom* *Click on the picture to zoom*
Add a new external storage. Put what you want in "folder name" (eg. "shared"). Select "Amazon S3". Keep "Access Key" for the Authentication field. Add a new external storage. Put what you want in "folder name" (eg. "shared"). Select "Amazon S3". Keep "Access Key" for the Authentication field.
In Configuration, put your bucket name (eg. nextcloud), the host (eg. 127.0.0.1), the port (eg. 3900 or 443), the region ("garage" if you use the default, or the one your configured in your `garage.toml`). Tick the SSL box if you have put an HTTPS proxy in front of garage. You must tick the "Path access" box and you must leave the "Legacy authentication (v2)" box empty. Put your Key ID (eg. GK...) and your Secret Key in the last two input boxes. Finally click on the tick symbol on the right of your screen. In Configuration, put your bucket name (eg. nextcloud), the host (eg. 127.0.0.1), the port (eg. 3900 or 443), the region (garage). Tick the SSL box if you have put an HTTPS proxy in front of garage. You must tick the "Path access" box and you must leave the "Legacy authentication (v2)" box empty. Put your Key ID (eg. GK...) and your Secret Key in the last two input boxes. Finally click on the tick symbol on the right of your screen.
Now go to your "Files" app and a new "linked folder" has appeared with the name you chose earlier (eg. "shared"). Now go to your "Files" app and a new "linked folder" has appeared with the name you chose earlier (eg. "shared").
@ -191,10 +191,10 @@ garage key create peertube-key
Keep the Key ID and the Secret key in a pad, they will be needed later. Keep the Key ID and the Secret key in a pad, they will be needed later.
We need two buckets, one for normal videos (named peertube-videos) and one for webtorrent videos (named peertube-playlists). We need two buckets, one for normal videos (named peertube-video) and one for webtorrent videos (named peertube-playlist).
```bash ```bash
garage bucket create peertube-videos garage bucket create peertube-videos
garage bucket create peertube-playlists garage bucket create peertube-playlist
``` ```
Now we allow our key to read and write on these buckets: Now we allow our key to read and write on these buckets:
@ -238,7 +238,7 @@ object_storage:
# Put localhost only if you have a garage instance running on that node # Put localhost only if you have a garage instance running on that node
endpoint: 'http://localhost:3900' # or "garage.example.com" if you have TLS on port 443 endpoint: 'http://localhost:3900' # or "garage.example.com" if you have TLS on port 443
# Garage default region is named "garage", edit according to your config # Garage supports only one region for now, named garage
region: 'garage' region: 'garage'
credentials: credentials:
@ -253,7 +253,7 @@ object_storage:
proxify_private_files: false proxify_private_files: false
streaming_playlists: streaming_playlists:
bucket_name: 'peertube-playlists' bucket_name: 'peertube-playlist'
# Keep it empty for our example # Keep it empty for our example
prefix: '' prefix: ''
@ -441,7 +441,7 @@ media_storage_providers:
store_synchronous: True # do we want to wait that the file has been written before returning? store_synchronous: True # do we want to wait that the file has been written before returning?
config: config:
bucket: matrix # the name of our bucket, we chose matrix earlier bucket: matrix # the name of our bucket, we chose matrix earlier
region_name: garage # "garage" by default, edit according to your cluster config region_name: garage # only "garage" is supported for the region field
endpoint_url: http://localhost:3900 # the path to the S3 endpoint endpoint_url: http://localhost:3900 # the path to the S3 endpoint
access_key_id: "GKxxx" # your Key ID access_key_id: "GKxxx" # your Key ID
secret_access_key: "xxxx" # your Secret Key secret_access_key: "xxxx" # your Secret Key

View file

@ -161,49 +161,3 @@ kopia repository validate-provider
You can then run all the standard kopia commands: `kopia snapshot create`, `kopia mount`... You can then run all the standard kopia commands: `kopia snapshot create`, `kopia mount`...
Everything should work out-of-the-box. Everything should work out-of-the-box.
## Plakar
Create your key and bucket on Garage server:
```bash
garage key create my-plakar-key
garage bucket create plakar-backups
garage bucket allow plakar-backups --read --write --key my-plakar-key
```
On Plakar server, add your Garage as a storage location:
```bash
plakar store add garageS3 s3://my-garage.tld/plakar-backups \
region=garage # Or as you've specified in garage.toml \
access_key=<Key ID from "garage key info my-plakar-key"> \
secret_access_key=<Secret key from "garage key info my-plakar-key">
```
Then create the repository.
```bash
plakar at @garageS3 create -plaintext # Unencrypted
# or
plakar at @garageS3 create #encrypted
```
If you encrypt your backups (Plakar default), you will need to define a strong passphrase. Do not forget to save your password safely. It will be needed to decrypt your backups.
After the repository has been created, check that everything works as expected (that might give an empty result as no file has been added yet, but no error message):
```bash
plakar at @garageS3 check
```
Now that everything is configure, you can use Garage as your backups storage. For instance sync it with a local backup storage:
```bash
$ plakar at ~/backups sync to @garageS3
```
Or list the S3 storage content:
```bash
$ plakar at @garageS3 ls
```
More information in Plakar documentation: https://www.plakar.io/docs/main/quickstart/

View file

@ -8,18 +8,18 @@ have published Ansible roles. We list them and compare them below.
## Comparison of Ansible roles ## Comparison of Ansible roles
| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | [eddster ansible-role-garage](#eddster-ansible-role-garage) | | Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) |
|------------------------------------|---------------------------------------------|---------------------------------------------------------------|---------------------------------| |------------------------------------|---------------------------------------------|---------------------------------------------------------------|
| **Runtime** | Systemd | Docker | Systemd | | **Runtime** | Systemd | Docker |
| **Target OS** | Any Linux | Any Linux | Any Linux | | **Target OS** | Any Linux | Any Linux |
| **Architecture** | amd64, arm64, i686 | amd64, arm64 | arm64, arm, 386, amd64 | | **Architecture** | amd64, arm64, i686 | amd64, arm64 |
| **Additional software** | None | Traefik | Ngnix and Keepalived (optional) | | **Additional software** | None | Traefik |
| **Automatic node connection** | ❌ | ✅ | ✅ | | **Automatic node connection** | ❌ | ✅ |
| **Layout management** | ❌ | ✅ | ✅ | | **Layout management** | ❌ | ✅ |
| **Manage buckets & keys** | ❌ | ✅ (basic) | ✅ | | **Manage buckets & keys** | ❌ | ✅ (basic) |
| **Allow custom Garage config** | ✅ | ❌ | ❌ | | **Allow custom Garage config** | ✅ | ❌ |
| **Facilitate Garage upgrades** | ✅ | ❌ | ✅ | | **Facilitate Garage upgrades** | ✅ | ❌ |
| **Multiple instances on one host** | ✅ | ✅ | ❌ | | **Multiple instances on one host** | ✅ | ✅ |
## zorun/ansible-role-garage ## zorun/ansible-role-garage
@ -49,15 +49,3 @@ structured DNS names, etc).
As a result, this role makes it easier to start with Garage on Ansible, As a result, this role makes it easier to start with Garage on Ansible,
but is less flexible. but is less flexible.
## eddster2309/ansible-role-garage
[Source code](https://github.com/eddster2309/ansible-role-garage), [Ansible galaxy](https://galaxy.ansible.com/ui/standalone/roles/eddster2309/garage/)
This role is a opinionated but customisable role using the official Garage
static binaries and only requires Systemd. As such it should work on any
Linux based host. It includes all the nesscary configuration to
automatically setup a clustered Garage deployment. Most Garage
configuration options are exposed through Ansible variables so while you
can't provide a custom config you can get very close. It can optionally
installed a HA nginx deployment with Keepalived.

View file

@ -15,10 +15,9 @@ Alpine Linux repositories (available since v3.17):
apk add garage apk add garage
``` ```
The default configuration file is installed to `/etc/garage/garage.toml`. You can run The default configuration file is installed to `/etc/garage.toml`. You can run
Garage using: `rc-service garage start`. Garage using: `rc-service garage start`. If you don't specify `rpc_secret`, it
will be automatically replaced with a random string on the first start.
If you don't specify `rpc_secret`, it will be automatically replaced with a random string on the first start.
Please note that this package is built without Consul discovery, Kubernetes Please note that this package is built without Consul discovery, Kubernetes
discovery, OpenTelemetry exporter, and K2V features (K2V will be enabled once discovery, OpenTelemetry exporter, and K2V features (K2V will be enabled once
@ -27,7 +26,7 @@ it's stable).
## Arch Linux ## Arch Linux
Garage is available in the official repositories under [extra](https://archlinux.org/packages/extra/x86_64/garage). Garage is available in the [AUR](https://aur.archlinux.org/packages/garage).
## FreeBSD ## FreeBSD

View file

@ -11,7 +11,7 @@ Firstly clone the repository:
```bash ```bash
git clone https://git.deuxfleurs.fr/Deuxfleurs/garage git clone https://git.deuxfleurs.fr/Deuxfleurs/garage
cd garage/script/helm cd garage/scripts/helm
``` ```
Deploy with default options: Deploy with default options:
@ -26,13 +26,6 @@ Or deploy with custom values:
helm install --create-namespace --namespace garage garage ./garage -f values.override.yaml helm install --create-namespace --namespace garage garage ./garage -f values.override.yaml
``` ```
If you want to manage the CustomRessourceDefinition used by garage for its `kubernetes_discovery` outside of the helm chart, add `garage.kubernetesSkipCrd: true` to your custom values and use the kustomization before deploying the helm chart:
```bash
kubectl apply -k ../k8s/crd
helm install --create-namespace --namespace garage garage ./garage -f values.override.yaml
```
After deploying, cluster layout must be configured manually as described in [Creating a cluster layout](@/documentation/quick-start/_index.md#creating-a-cluster-layout). Use the following command to access garage CLI: After deploying, cluster layout must be configured manually as described in [Creating a cluster layout](@/documentation/quick-start/_index.md#creating-a-cluster-layout). Use the following command to access garage CLI:
```bash ```bash
@ -93,62 +86,3 @@ helm delete --namespace garage garage
``` ```
Note that this will leave behind custom CRD `garagenodes.deuxfleurs.fr`, which must be removed manually if desired. Note that this will leave behind custom CRD `garagenodes.deuxfleurs.fr`, which must be removed manually if desired.
## Increase PVC size on running Garage instances
Since the Garage Helm chart creates the data and meta PVC based on `StatefulSet` templates, increasing the PVC size can be a bit tricky.
### Confirm the `StorageClass` used for Garage supports volume expansion
Confirm the storage class used for garage.
```bash
kubectl -n garage get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
data-garage-0 Bound pvc-080360c9-8ce3-4acf-8579-1701e57b7f3f 30Gi RWO longhorn-local <unset> 77d
data-garage-1 Bound pvc-ab8ba697-6030-4fc7-ab3c-0d6df9e3dbc0 30Gi RWO longhorn-local <unset> 5d8h
data-garage-2 Bound pvc-3ab37551-0231-4604-986d-136d0fd950ec 30Gi RWO longhorn-local <unset> 5d5h
meta-garage-0 Bound pvc-3b457302-3023-4169-846e-c928c5f2ea65 3Gi RWO longhorn-local <unset> 77d
meta-garage-1 Bound pvc-49ace2b9-5c85-42df-9247-51c4cf64b460 3Gi RWO longhorn-local <unset> 5d8h
meta-garage-2 Bound pvc-99e2e50f-42b4-4128-ae2f-b52629259723 3Gi RWO longhorn-local <unset> 5d5h
```
In this case, the storage class is `longhorn-local`. Now, check if `ALLOWVOLUMEEXPANSION` is true for the used `StorageClass`.
```bash
kubectl get storageclasses.storage.k8s.io longhorn-local
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
longhorn-local driver.longhorn.io Delete Immediate true 103d
```
If your `StorageClass` does not support volume expansion, double check if you can enable it. Otherwise, your only real option is to spin up a new Garage cluster with increased size and migrate all data over.
If your `StorageClass` supports expansion, you are free to continue.
### Increase the size of the PVCs
Increase the size of all PVCs to your desired size.
```bash
kubectl -n garage edit pvc data-garage-0
kubectl -n garage edit pvc data-garage-1
kubectl -n garage edit pvc data-garage-2
kubectl -n garage edit pvc meta-garage-0
kubectl -n garage edit pvc meta-garage-1
kubectl -n garage edit pvc meta-garage-2
```
### Increase the size of the `StatefulSet` PVC template
This is an optional step, but if not done, future instances of Garage will be created with the original size from the template.
```bash
kubectl -n garage delete sts --cascade=orphan garage
statefulset.apps "garage" deleted
```
This will remove the Garage `StatefulSet` but leave the pods running. It may seem destructive but needs to be done this way since edits to the size of PVC templates are prohibited.
### Redeploy the `StatefulSet`
Now the size of future PVCs can be increased, and the Garage Helm chart can be upgraded. The new `StatefulSet` should take ownership of the orphaned pods again.

View file

@ -96,14 +96,14 @@ to store 2 TB of data in total.
## Get a Docker image ## Get a Docker image
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated). Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
We encourage you to use a fixed tag (eg. `v1.3.0`) and not the `latest` tag. We encourage you to use a fixed tag (eg. `v1.0.1`) and not the `latest` tag.
For this example, we will use the latest published version at the time of the writing which is `v1.3.0` but it's up to you For this example, we will use the latest published version at the time of the writing which is `v1.0.1` but it's up to you
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated). to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
For example: For example:
``` ```
sudo docker pull dxflrs/garage:v1.3.0 sudo docker pull dxflrs/garage:v1.0.1
``` ```
## Deploying and configuring Garage ## Deploying and configuring Garage
@ -171,7 +171,7 @@ docker run \
-v /etc/garage.toml:/etc/garage.toml \ -v /etc/garage.toml:/etc/garage.toml \
-v /var/lib/garage/meta:/var/lib/garage/meta \ -v /var/lib/garage/meta:/var/lib/garage/meta \
-v /var/lib/garage/data:/var/lib/garage/data \ -v /var/lib/garage/data:/var/lib/garage/data \
dxflrs/garage:v1.3.0 dxflrs/garage:v1.0.1
``` ```
With this command line, Garage should be started automatically at each boot. With this command line, Garage should be started automatically at each boot.
@ -185,7 +185,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
version: "3" version: "3"
services: services:
garage: garage:
image: dxflrs/garage:v1.3.0 image: dxflrs/garage:v1.0.1
network_mode: "host" network_mode: "host"
restart: unless-stopped restart: unless-stopped
volumes: volumes:

View file

@ -28,7 +28,6 @@ StateDirectory=garage
DynamicUser=true DynamicUser=true
ProtectHome=true ProtectHome=true
NoNewPrivileges=true NoNewPrivileges=true
LimitNOFILE=42000
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target

View file

@ -71,7 +71,7 @@ The entire procedure would look something like this:
2. Take each node offline individually to back up its metadata folder, bring them back online once the backup is done. 2. Take each node offline individually to back up its metadata folder, bring them back online once the backup is done.
You can do all of the nodes in a single zone at once as that won't impact global cluster availability. You can do all of the nodes in a single zone at once as that won't impact global cluster availability.
Do not try to manually copy the metadata folder of a running node. Do not try to make a backup of the metadata folder of a running node.
**Since Garage v0.9.4,** you can use the `garage meta snapshot --all` command **Since Garage v0.9.4,** you can use the `garage meta snapshot --all` command
to take a simultaneous snapshot of the metadata database files of all your to take a simultaneous snapshot of the metadata database files of all your

View file

@ -129,10 +129,10 @@ docker run \
-d \ -d \
--name garaged \ --name garaged \
-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903 \ -p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903 \
-v /path/to/garage.toml:/etc/garage.toml \ -v /etc/garage.toml:/path/to/garage.toml \
-v /path/to/garage/meta:/var/lib/garage/meta \ -v /var/lib/garage/meta:/path/to/garage/meta \
-v /path/to/garage/data:/var/lib/garage/data \ -v /var/lib/garage/data:/path/to/garage/data \
dxflrs/garage:v1.3.0 dxflrs/garage:v0.9.4
``` ```
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903` Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
@ -182,12 +182,11 @@ ID Hostname Address Tag Zone Capacit
## Creating a cluster layout ## Creating a cluster layout
Creating a cluster layout for a Garage deployment means informing Garage Creating a cluster layout for a Garage deployment means informing Garage
of the disk space available on each node of the cluster, `-c`, of the disk space available on each node of the cluster
as well as the name of the zone (e.g. datacenter), `-z`, each machine is located in. as well as the zone (e.g. datacenter) each machine is located in.
For our test deployment, we are have only one node with zone named `dc1` and a For our test deployment, we are using only one node. The way in which we configure
capacity of `1G`, though the capacity is ignored for a single node deployment it does not matter, you can simply write:
and can be changed later when adding new nodes.
```bash ```bash
garage layout assign -z dc1 -c 1G <node_id> garage layout assign -z dc1 -c 1G <node_id>

View file

@ -24,8 +24,7 @@ db_engine = "lmdb"
block_size = "1M" block_size = "1M"
block_ram_buffer_max = "256MiB" block_ram_buffer_max = "256MiB"
block_max_concurrent_reads = 16
block_max_concurrent_writes_per_request =10
lmdb_map_size = "1T" lmdb_map_size = "1T"
compression_level = 1 compression_level = 1
@ -47,7 +46,6 @@ bootstrap_peers = [
"212fd62eeaca72c122b45a7f4fa0f55e012aa5e24ac384a72a3016413fa724ff@[fc00:F::1]:3901", "212fd62eeaca72c122b45a7f4fa0f55e012aa5e24ac384a72a3016413fa724ff@[fc00:F::1]:3901",
] ]
allow_punycode = false
[consul_discovery] [consul_discovery]
api = "catalog" api = "catalog"
@ -94,32 +92,29 @@ The following gives details about each available configuration option.
[Environment variables](#env_variables). [Environment variables](#env_variables).
Top-level configuration options, in alphabetical order: Top-level configuration options:
[`allow_punycode`](#allow_punycode),
[`allow_world_readable_secrets`](#allow_world_readable_secrets), [`allow_world_readable_secrets`](#allow_world_readable_secrets),
[`block_max_concurrent_reads`](`block_max_concurrent_reads),
[`block_ram_buffer_max`](#block_ram_buffer_max), [`block_ram_buffer_max`](#block_ram_buffer_max),
[`block_max_concurrent_writes_per_request`](#block_max_concurrent_writes_per_request),
[`block_size`](#block_size), [`block_size`](#block_size),
[`bootstrap_peers`](#bootstrap_peers), [`bootstrap_peers`](#bootstrap_peers),
[`compression_level`](#compression_level), [`compression_level`](#compression_level),
[`consistency_mode`](#consistency_mode),
[`data_dir`](#data_dir), [`data_dir`](#data_dir),
[`data_fsync`](#data_fsync), [`data_fsync`](#data_fsync),
[`db_engine`](#db_engine), [`db_engine`](#db_engine),
[`disable_scrub`](#disable_scrub), [`disable_scrub`](#disable_scrub),
[`use_local_tz`](#use_local_tz),
[`lmdb_map_size`](#lmdb_map_size), [`lmdb_map_size`](#lmdb_map_size),
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval), [`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
[`metadata_dir`](#metadata_dir), [`metadata_dir`](#metadata_dir),
[`metadata_fsync`](#metadata_fsync), [`metadata_fsync`](#metadata_fsync),
[`metadata_snapshots_dir`](#metadata_snapshots_dir), [`metadata_snapshots_dir`](#metadata_snapshots_dir),
[`replication_factor`](#replication_factor), [`replication_factor`](#replication_factor),
[`consistency_mode`](#consistency_mode),
[`rpc_bind_addr`](#rpc_bind_addr), [`rpc_bind_addr`](#rpc_bind_addr),
[`rpc_bind_outgoing`](#rpc_bind_outgoing), [`rpc_bind_outgoing`](#rpc_bind_outgoing),
[`rpc_public_addr`](#rpc_public_addr), [`rpc_public_addr`](#rpc_public_addr),
[`rpc_public_addr_subnet`](#rpc_public_addr_subnet) [`rpc_public_addr_subnet`](#rpc_public_addr_subnet)
[`rpc_secret`/`rpc_secret_file`](#rpc_secret), [`rpc_secret`/`rpc_secret_file`](#rpc_secret).
[`use_local_tz`](#use_local_tz).
The `[consul_discovery]` section: The `[consul_discovery]` section:
[`api`](#consul_api), [`api`](#consul_api),
@ -156,17 +151,13 @@ The `[admin]` section:
### Environment variables {#env_variables} ### Environment variables {#env_variables}
The following configuration parameters must be specified as environment variables, The following configuration parameter must be specified as an environment
they do not exist in the configuration file: variable, it does not exist in the configuration file:
- `GARAGE_LOG_TO_SYSLOG` (since `v0.9.4`): set this to `1` or `true` to make the - `GARAGE_LOG_TO_SYSLOG` (since v0.9.4): set this to `1` or `true` to make the
Garage daemon send its logs to `syslog` (using the libc `syslog` function) Garage daemon send its logs to `syslog` (using the libc `syslog` function)
instead of printing to stderr. instead of printing to stderr.
- `GARAGE_LOG_TO_JOURNALD` (since `v1.2.0`): set this to `1` or `true` to make the
Garage daemon send its logs to `journald` (using the native protocol of `systemd-journald`)
instead of printing to stderr.
The following environment variables can be used to override the corresponding The following environment variables can be used to override the corresponding
values in the configuration file: values in the configuration file:
@ -178,7 +169,7 @@ values in the configuration file:
### Top-level configuration options ### Top-level configuration options
#### `replication_factor` (since `v1.0.0`) {#replication_factor} #### `replication_factor` {#replication_factor}
The replication factor can be any positive integer smaller or equal the node count in your cluster. The replication factor can be any positive integer smaller or equal the node count in your cluster.
The chosen replication factor has a big impact on the cluster's failure tolerancy and performance characteristics. The chosen replication factor has a big impact on the cluster's failure tolerancy and performance characteristics.
@ -226,7 +217,7 @@ is in progress. In theory, no data should be lost as rebalancing is a
routine operation for Garage, although we cannot guarantee you that everything routine operation for Garage, although we cannot guarantee you that everything
will go right in such an extreme scenario. will go right in such an extreme scenario.
#### `consistency_mode` (since `v1.0.0`) {#consistency_mode} #### `consistency_mode` {#consistency_mode}
The consistency mode setting determines the read and write behaviour of your cluster. The consistency mode setting determines the read and write behaviour of your cluster.
@ -309,7 +300,7 @@ data_dir = [
See [the dedicated documentation page](@/documentation/operations/multi-hdd.md) See [the dedicated documentation page](@/documentation/operations/multi-hdd.md)
on how to operate Garage in such a setup. on how to operate Garage in such a setup.
#### `metadata_snapshots_dir` (since `v1.1.0`) {#metadata_snapshots_dir} #### `metadata_snapshots_dir` (since Garage `v1.0.2`) {#metadata_snapshots_dir}
The directory in which Garage will store metadata snapshots when it The directory in which Garage will store metadata snapshots when it
performs a snapshot of the metadata database, either when instructed to do performs a snapshot of the metadata database, either when instructed to do
@ -336,7 +327,6 @@ Since `v0.8.0`, Garage can use alternative storage backends as follows:
| --------- | ----------------- | ------------- | | --------- | ----------------- | ------------- |
| [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` | | [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` | | [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
| [Fjall](https://github.com/fjall-rs/fjall) (**experimental support** since `v1.3.0`) | `"fjall"` | `<metadata_dir>/db.fjall/` |
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` | | [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0. Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
@ -373,14 +363,6 @@ LMDB works very well, but is known to have the following limitations:
so it is not the best choice for high-performance storage clusters, so it is not the best choice for high-performance storage clusters,
but it should work fine in many cases. but it should work fine in many cases.
- Fjall: a storage engine based on LSM trees, which theoretically allow for
higher write throughput than other storage engines that are based on B-trees.
Using Fjall could potentially improve Garage's performance significantly in
write-heavy workloads. **Support for Fjall is experimental at this point**,
we have added it to Garage for evaluation purposes only. **Do not use it for
production-critical workloads.**
It is possible to convert Garage's metadata directory from one format to another It is possible to convert Garage's metadata directory from one format to another
using the `garage convert-db` command, which should be used as follows: using the `garage convert-db` command, which should be used as follows:
@ -418,7 +400,6 @@ Here is how this option impacts the different database engines:
|----------|------------------------------------|-------------------------------| |----------|------------------------------------|-------------------------------|
| Sqlite | `PRAGMA synchronous = OFF` | `PRAGMA synchronous = NORMAL` | | Sqlite | `PRAGMA synchronous = OFF` | `PRAGMA synchronous = NORMAL` |
| LMDB | `MDB_NOMETASYNC` + `MDB_NOSYNC` | `MDB_NOMETASYNC` | | LMDB | `MDB_NOMETASYNC` + `MDB_NOSYNC` | `MDB_NOMETASYNC` |
| Fjall | default options | not supported |
Note that the Sqlite database is always ran in `WAL` mode (`PRAGMA journal_mode = WAL`). Note that the Sqlite database is always ran in `WAL` mode (`PRAGMA journal_mode = WAL`).
@ -435,7 +416,7 @@ at the cost of a moderate drop in write performance.
Similarly to `metatada_fsync`, this is likely not necessary Similarly to `metatada_fsync`, this is likely not necessary
if geographical replication is used. if geographical replication is used.
#### `metadata_auto_snapshot_interval` (since `v0.9.4`) {#metadata_auto_snapshot_interval} #### `metadata_auto_snapshot_interval` (since Garage v0.9.4) {#metadata_auto_snapshot_interval}
If this value is set, Garage will automatically take a snapshot of the metadata If this value is set, Garage will automatically take a snapshot of the metadata
DB file at a regular interval and save it in the metadata directory. DB file at a regular interval and save it in the metadata directory.
@ -472,7 +453,7 @@ you should delete it from the data directory and then call `garage repair
blocks` on the node to ensure that it re-obtains a copy from another node on blocks` on the node to ensure that it re-obtains a copy from another node on
the network. the network.
#### `use_local_tz` (since `v1.1.0`) {#use_local_tz} #### `use_local_tz` {#use_local_tz}
By default, Garage runs the lifecycle worker every day at midnight in UTC. Set the By default, Garage runs the lifecycle worker every day at midnight in UTC. Set the
`use_local_tz` configuration value to `true` if you want Garage to run the `use_local_tz` configuration value to `true` if you want Garage to run the
@ -494,7 +475,7 @@ files will remain available. This however means that chunks from existing files
will not be deduplicated with chunks from newly uploaded files, meaning you will not be deduplicated with chunks from newly uploaded files, meaning you
might use more storage space that is optimally possible. might use more storage space that is optimally possible.
#### `block_ram_buffer_max` (since `v0.9.4`) {#block_ram_buffer_max} #### `block_ram_buffer_max` (since v0.9.4) {#block_ram_buffer_max}
A limit on the total size of data blocks kept in RAM by S3 API nodes awaiting A limit on the total size of data blocks kept in RAM by S3 API nodes awaiting
to be sent to storage nodes asynchronously. to be sent to storage nodes asynchronously.
@ -525,37 +506,6 @@ node.
The default value is 256MiB. The default value is 256MiB.
#### `block_max_concurrent_reads` (since `v1.3.0` / `v2.1.0`) {#block_max_concurrent_reads}
The maximum number of blocks (individual files in the data directory) open
simultaneously for reading.
Reducing this number does not limit the number of data blocks that can be
transferred through the network simultaneously. This mechanism was just added
as a backpressure mechanism for HDD read speed: it helps avoid a situation
where too many requests are coming in and Garage is reading too many block
files simultaneously, thus not making timely progress on any of the reads.
When a request to read a data block comes in through the network, the requests
awaits for one of the `block_max_concurrent_reads` slots to be available
(internally implemented using a Semaphore object). Once it acquired a read
slot, it reads the entire block file to RAM and frees the slot as soon as the
block file is finished reading. Only after the slot is released will the
block's data start being transferred over the network. If the request fails to
acquire a reading slot wihtin 15 seconds, it fails with a timeout error.
Timeout events can be monitored through the `block_read_semaphore_timeouts`
metric in Prometheus: a non-zero number of such events indicates an I/O
bottleneck on HDD read speed.
#### `block_max_concurrent_writes_per_request` (since `v2.1.0`) {#block_max_concurrent_writes_per_request}
This parameter is designed to adapt to the concurrent write performance of
different storage media.Maximum number of parallel block writes per put request
Higher values improve throughput but increase memory usage.
Default: 3, Recommended: 10-30 for NVMe, 3-10 for HDD
#### `lmdb_map_size` {#lmdb_map_size} #### `lmdb_map_size` {#lmdb_map_size}
This parameters can be used to set the map size used by LMDB, This parameters can be used to set the map size used by LMDB,
@ -612,7 +562,7 @@ the node, even in the case of a NAT: the NAT should be configured to forward the
port number to the same internal port nubmer. This means that if you have several nodes running port number to the same internal port nubmer. This means that if you have several nodes running
behind a NAT, they should each use a different RPC port number. behind a NAT, they should each use a different RPC port number.
#### `rpc_bind_outgoing` (since `v0.9.2`) {#rpc_bind_outgoing} #### `rpc_bind_outgoing`(since v0.9.2) {#rpc_bind_outgoing}
If enabled, pre-bind all sockets for outgoing connections to the same IP address If enabled, pre-bind all sockets for outgoing connections to the same IP address
used for listening (the IP address specified in `rpc_bind_addr`) before used for listening (the IP address specified in `rpc_bind_addr`) before
@ -654,7 +604,7 @@ be obtained by running `garage node id` and then included directly in the
key will be returned by `garage node id` and you will have to add the IP key will be returned by `garage node id` and you will have to add the IP
yourself. yourself.
#### `allow_world_readable_secrets` or `GARAGE_ALLOW_WORLD_READABLE_SECRETS` (env) {#allow_world_readable_secrets} ### `allow_world_readable_secrets` or `GARAGE_ALLOW_WORLD_READABLE_SECRETS` (env) {#allow_world_readable_secrets}
Garage checks the permissions of your secret files to make sure they're not Garage checks the permissions of your secret files to make sure they're not
world-readable. In some cases, the check might fail and consider your files as world-readable. In some cases, the check might fail and consider your files as
@ -666,13 +616,6 @@ permission verification.
Alternatively, you can set the `GARAGE_ALLOW_WORLD_READABLE_SECRETS` Alternatively, you can set the `GARAGE_ALLOW_WORLD_READABLE_SECRETS`
environment variable to `true` to bypass the permissions check. environment variable to `true` to bypass the permissions check.
#### `allow_punycode` {#allow_punycode}
Allow creating buckets with names containing punycode. When used for buckets served
as websites, this allows using almost any unicode character in the domain name.
Default to `false`.
### The `[consul_discovery]` section ### The `[consul_discovery]` section
Garage supports discovering other nodes of the cluster using Consul. For this Garage supports discovering other nodes of the cluster using Consul. For this

View file

@ -23,17 +23,17 @@ Feel free to open a PR to suggest fixes this table. Minio is missing because the
- 2022-05-25 - Many Ceph S3 endpoints are not documented but implemented. Following a notification from the Ceph community, we added them. - 2022-05-25 - Many Ceph S3 endpoints are not documented but implemented. Following a notification from the Ceph community, we added them.
## High-level features ## High-level features
| Feature | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) | | Feature | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|------------------------------|----------------------------------|-----------------|---------------|---------|-----| |------------------------------|----------------------------------|-----------------|---------------|---------|-----|
| [signature v2](https://docs.aws.amazon.com/AmazonS3/latest/API/Appendix-Sigv2.html) (deprecated) | ❌ Missing | ✅ | ✅ | ✅ | ✅ | | [signature v2](https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html) (deprecated) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
| [signature v4](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) | ✅ Implemented | ✅ | ✅ | ❌ | ✅ | | [signature v4](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) | ✅ Implemented | ✅ | ✅ | ❌ | ✅ |
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ | | [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ | | [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
| [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) | | [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) |
| [SSE-C encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) | ✅ Implemented | ❓ | ✅ | ❌ | ✅ | | [SSE-C encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) | ✅ Implemented | ❓ | ✅ | ❌ | ✅ |
| [Bucket versioning](https://docs.aws.amazon.com/AmazonS3/latest/userguide/Versioning.html) | ❌ Missing | ✅ | ✅ | ❌ | ✅ |
*Note:* OpenIO does not says if it supports presigned URLs. Because it is part *Note:* OpenIO does not says if it supports presigned URLs. Because it is part
of signature v4 and they claim they support it without additional precisions, of signature v4 and they claim they support it without additional precisions,

View file

@ -70,7 +70,7 @@ Example response body:
```json ```json
{ {
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df", "node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
"garageVersion": "v1.3.0", "garageVersion": "v1.0.1",
"garageFeatures": [ "garageFeatures": [
"k2v", "k2v",
"lmdb", "lmdb",

16
flake.lock generated
View file

@ -50,17 +50,17 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1763977559, "lastModified": 1736692550,
"narHash": "sha256-g4MKqsIRy5yJwEsI+fYODqLUnAqIY4kZai0nldAP6EM=", "narHash": "sha256-7tk8xH+g0sJkKLTJFOxphJxxOjMDFMWv24nXslaU2ro=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "cfe2c7d5b5d3032862254e68c37a6576b633d632", "rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "cfe2c7d5b5d3032862254e68c37a6576b633d632", "rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
"type": "github" "type": "github"
} }
}, },
@ -80,17 +80,17 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1763952169, "lastModified": 1738549608,
"narHash": "sha256-+PeDBD8P+NKauH+w7eO/QWCIp8Cx4mCfWnh9sJmy9CM=", "narHash": "sha256-GdyT9QEUSx5k/n8kILuNy83vxxdyUfJ8jL5mMpQZWfw=",
"owner": "oxalica", "owner": "oxalica",
"repo": "rust-overlay", "repo": "rust-overlay",
"rev": "ab726555a9a72e6dc80649809147823a813fa95b", "rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "oxalica", "owner": "oxalica",
"repo": "rust-overlay", "repo": "rust-overlay",
"rev": "ab726555a9a72e6dc80649809147823a813fa95b", "rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
"type": "github" "type": "github"
} }
}, },

View file

@ -2,13 +2,13 @@
description = description =
"Garage, an S3-compatible distributed object store for self-hosted deployments"; "Garage, an S3-compatible distributed object store for self-hosted deployments";
# Nixpkgs 25.05 as of 2025-11-24 # Nixpkgs 24.11 as of 2025-01-12
inputs.nixpkgs.url = inputs.nixpkgs.url =
"github:NixOS/nixpkgs/cfe2c7d5b5d3032862254e68c37a6576b633d632"; "github:NixOS/nixpkgs/7c4869c47090dd7f9f1bdfb49a22aea026996815";
# Rust overlay as of 2025-11-24 # Rust overlay as of 2025-02-03
inputs.rust-overlay.url = inputs.rust-overlay.url =
"github:oxalica/rust-overlay/ab726555a9a72e6dc80649809147823a813fa95b"; "github:oxalica/rust-overlay/35c6f8c4352f995ecd53896200769f80a3e8f22d";
inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs"; inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs";
inputs.crane.url = "github:ipetkov/crane"; inputs.crane.url = "github:ipetkov/crane";
@ -30,10 +30,6 @@
inherit system nixpkgs crane rust-overlay extraTestEnv; inherit system nixpkgs crane rust-overlay extraTestEnv;
release = false; release = false;
}).garage-test; }).garage-test;
lints = (compile {
inherit system nixpkgs crane rust-overlay;
release = false;
});
in in
{ {
packages = { packages = {
@ -57,13 +53,6 @@
tests-sqlite = testWith { tests-sqlite = testWith {
GARAGE_TEST_INTEGRATION_DB_ENGINE = "sqlite"; GARAGE_TEST_INTEGRATION_DB_ENGINE = "sqlite";
}; };
tests-fjall = testWith {
GARAGE_TEST_INTEGRATION_DB_ENGINE = "fjall";
};
# lints (fmt, clippy)
fmt = lints.garage-cargo-fmt;
clippy = lints.garage-cargo-clippy;
}; };
# ---- developpment shell, for making native builds only ---- # ---- developpment shell, for making native builds only ----

View file

@ -48,7 +48,7 @@ let
inherit (pkgs) lib stdenv; inherit (pkgs) lib stdenv;
toolchainFn = (p: p.rust-bin.stable."1.91.0".default.override { toolchainFn = (p: p.rust-bin.stable."1.82.0".default.override {
targets = lib.optionals (target != null) [ rustTarget ]; targets = lib.optionals (target != null) [ rustTarget ];
extensions = [ extensions = [
"rust-src" "rust-src"
@ -68,13 +68,12 @@ let
rootFeatures = if features != null then rootFeatures = if features != null then
features features
else else
([ "bundled-libs" "lmdb" "sqlite" "fjall" "k2v" ] ++ (lib.optionals release [ ([ "bundled-libs" "lmdb" "sqlite" "k2v" ] ++ (lib.optionals release [
"consul-discovery" "consul-discovery"
"kubernetes-discovery" "kubernetes-discovery"
"metrics" "metrics"
"telemetry-otlp" "telemetry-otlp"
"syslog" "syslog"
"journald"
])); ]));
featuresStr = lib.concatStringsSep "," rootFeatures; featuresStr = lib.concatStringsSep "," rootFeatures;
@ -190,15 +189,4 @@ in rec {
pkgs.cacert pkgs.cacert
]; ];
} // extraTestEnv); } // extraTestEnv);
# ---- source code linting ----
garage-cargo-fmt = craneLib.cargoFmt (commonArgs // {
cargoExtraArgs = "";
});
garage-cargo-clippy = craneLib.cargoClippy (commonArgs // {
cargoArtifacts = garage-deps;
cargoClippyExtraArgs = "--all-targets -- -D warnings";
});
} }

View file

@ -11,7 +11,7 @@ PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
FANCYCOLORS=("41m" "42m" "44m" "45m" "100m" "104m") FANCYCOLORS=("41m" "42m" "44m" "45m" "100m" "104m")
export RUST_BACKTRACE=1 export RUST_BACKTRACE=1
export RUST_LOG=garage=info,garage_api_common=debug,garage_api_s3=debug export RUST_LOG=garage=info,garage_api=debug
MAIN_LABEL="\e[${FANCYCOLORS[0]}[main]\e[49m" MAIN_LABEL="\e[${FANCYCOLORS[0]}[main]\e[49m"
if [ -z "$GARAGE_BIN" ]; then if [ -z "$GARAGE_BIN" ]; then

View file

@ -1,7 +1,6 @@
export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1` export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2` export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
export AWS_DEFAULT_REGION='garage' export AWS_DEFAULT_REGION='garage'
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
# FUTUREWORK: set AWS_ENDPOINT_URL instead, once nixpkgs bumps awscli to >=2.13.0. # FUTUREWORK: set AWS_ENDPOINT_URL instead, once nixpkgs bumps awscli to >=2.13.0.
function aws { command aws --endpoint-url http://127.0.0.1:3911 $@ ; } function aws { command aws --endpoint-url http://127.0.0.1:3911 $@ ; }

View file

@ -1,3 +1,3 @@
# Garage helm3 chart # Garage helm3 chart
Documentation is located [here](https://garagehq.deuxfleurs.fr/documentation/cookbook/kubernetes/). Documentation is located [here](/doc/book/cookbook/kubernetes.md).

View file

@ -1,18 +1,24 @@
apiVersion: v2 apiVersion: v2
name: garage name: garage
description: S3-compatible object store for small self-hosted geo-distributed deployments description: S3-compatible object store for small self-hosted geo-distributed deployments
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application type: application
version: 0.7.3
appVersion: "v1.3.1"
home: https://garagehq.deuxfleurs.fr/
icon: https://garagehq.deuxfleurs.fr/images/garage-logo.svg
keywords: # This is the chart version. This version number should be incremented each time you make changes
- geo-distributed # to the chart and its templates, including the app version.
- read-after-write-consistency # Versions are expected to follow Semantic Versioning (https://semver.org/)
- s3-compatible version: 0.6.0
sources: # This is the version number of the application being deployed. This version number should be
- https://git.deuxfleurs.fr/Deuxfleurs/garage.git # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
maintainers: [] # It is recommended to use it with quotes.
appVersion: "v1.0.1"

View file

@ -1,15 +1,9 @@
# garage # garage
![Version: 0.7.3](https://img.shields.io/badge/Version-0.7.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.3.1](https://img.shields.io/badge/AppVersion-v1.3.1-informational?style=flat-square) ![Version: 0.6.0](https://img.shields.io/badge/Version-0.6.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.0.1](https://img.shields.io/badge/AppVersion-v1.0.1-informational?style=flat-square)
S3-compatible object store for small self-hosted geo-distributed deployments S3-compatible object store for small self-hosted geo-distributed deployments
**Homepage:** <https://garagehq.deuxfleurs.fr/>
## Source Code
* <https://git.deuxfleurs.fr/Deuxfleurs/garage.git>
## Values ## Values
| Key | Type | Default | Description | | Key | Type | Default | Description |
@ -29,7 +23,6 @@ S3-compatible object store for small self-hosted geo-distributed deployments
| garage.existingConfigMap | string | `""` | if not empty string, allow using an existing ConfigMap for the garage.toml, if set, ignores garage.toml | | garage.existingConfigMap | string | `""` | if not empty string, allow using an existing ConfigMap for the garage.toml, if set, ignores garage.toml |
| garage.garageTomlString | string | `""` | String Template for the garage configuration if set, ignores above values. Values can be templated, see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/ | | garage.garageTomlString | string | `""` | String Template for the garage configuration if set, ignores above values. Values can be templated, see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/ |
| garage.kubernetesSkipCrd | bool | `false` | Set to true if you want to use k8s discovery but install the CRDs manually outside of the helm chart, for example if you operate at namespace level without cluster ressources | | garage.kubernetesSkipCrd | bool | `false` | Set to true if you want to use k8s discovery but install the CRDs manually outside of the helm chart, for example if you operate at namespace level without cluster ressources |
| garage.metadataAutoSnapshotInterval | string | `""` | If this value is set, Garage will automatically take a snapshot of the metadata DB file at a regular interval and save it in the metadata directory. https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#metadata_auto_snapshot_interval |
| garage.replicationMode | string | `"3"` | Default to 3 replicas, see the replication_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode | | garage.replicationMode | string | `"3"` | Default to 3 replicas, see the replication_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode |
| garage.rpcBindAddr | string | `"[::]:3901"` | | | garage.rpcBindAddr | string | `"[::]:3901"` | |
| garage.rpcSecret | string | `""` | If not given, a random secret will be generated and stored in a Secret object | | garage.rpcSecret | string | `""` | If not given, a random secret will be generated and stored in a Secret object |
@ -56,7 +49,6 @@ S3-compatible object store for small self-hosted geo-distributed deployments
| initImage.pullPolicy | string | `"IfNotPresent"` | | | initImage.pullPolicy | string | `"IfNotPresent"` | |
| initImage.repository | string | `"busybox"` | | | initImage.repository | string | `"busybox"` | |
| initImage.tag | string | `"stable"` | | | initImage.tag | string | `"stable"` | |
| livenessProbe | object | `{}` | Specifies a livenessProbe |
| monitoring.metrics.enabled | bool | `false` | If true, a service for monitoring is created with a prometheus.io/scrape annotation | | monitoring.metrics.enabled | bool | `false` | If true, a service for monitoring is created with a prometheus.io/scrape annotation |
| monitoring.metrics.serviceMonitor.enabled | bool | `false` | If true, a ServiceMonitor CRD is created for a prometheus operator https://github.com/coreos/prometheus-operator | | monitoring.metrics.serviceMonitor.enabled | bool | `false` | If true, a ServiceMonitor CRD is created for a prometheus operator https://github.com/coreos/prometheus-operator |
| monitoring.metrics.serviceMonitor.interval | string | `"15s"` | | | monitoring.metrics.serviceMonitor.interval | string | `"15s"` | |
@ -79,7 +71,6 @@ S3-compatible object store for small self-hosted geo-distributed deployments
| podSecurityContext.runAsGroup | int | `1000` | | | podSecurityContext.runAsGroup | int | `1000` | |
| podSecurityContext.runAsNonRoot | bool | `true` | | | podSecurityContext.runAsNonRoot | bool | `true` | |
| podSecurityContext.runAsUser | int | `1000` | | | podSecurityContext.runAsUser | int | `1000` | |
| readinessProbe | object | `{}` | Specifies a readinessProbe |
| resources | object | `{}` | | | resources | object | `{}` | |
| securityContext.capabilities | object | `{"drop":["ALL"]}` | The default security context is heavily restricted, feel free to tune it to your requirements | | securityContext.capabilities | object | `{"drop":["ALL"]}` | The default security context is heavily restricted, feel free to tune it to your requirements |
| securityContext.readOnlyRootFilesystem | bool | `true` | | | securityContext.readOnlyRootFilesystem | bool | `true` | |

View file

@ -19,10 +19,6 @@ data:
compression_level = {{ .Values.garage.compressionLevel }} compression_level = {{ .Values.garage.compressionLevel }}
{{- if .Values.garage.metadataAutoSnapshotInterval }}
metadata_auto_snapshot_interval = {{ .Values.garage.metadataAutoSnapshotInterval | quote }}
{{- end }}
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}" rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
# rpc_secret will be populated by the init container from a k8s secret object # rpc_secret will be populated by the init container from a k8s secret object
rpc_secret = "__RPC_SECRET_REPLACE__" rpc_secret = "__RPC_SECRET_REPLACE__"

View file

@ -1,22 +0,0 @@
{{- if eq .Values.deployment.kind "StatefulSet" -}}
apiVersion: v1
kind: Service
metadata:
name: {{ include "garage.fullname" . }}-headless
labels:
{{- include "garage.labels" . | nindent 4 }}
spec:
type: ClusterIP
clusterIP: None
ports:
- port: {{ .Values.service.s3.api.port }}
targetPort: 3900
protocol: TCP
name: s3-api
- port: {{ .Values.service.s3.web.port }}
targetPort: 3902
protocol: TCP
name: s3-web
selector:
{{- include "garage.selectorLabels" . | nindent 4 }}
{{- end }}

View file

@ -4,10 +4,6 @@ metadata:
name: {{ include "garage.fullname" . }} name: {{ include "garage.fullname" . }}
labels: labels:
{{- include "garage.labels" . | nindent 4 }} {{- include "garage.labels" . | nindent 4 }}
{{- with .Values.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec: spec:
type: {{ .Values.service.type }} type: {{ .Values.service.type }}
ports: ports:

View file

@ -10,11 +10,12 @@ spec:
{{- include "garage.selectorLabels" . | nindent 6 }} {{- include "garage.selectorLabels" . | nindent 6 }}
{{- if eq .Values.deployment.kind "StatefulSet" }} {{- if eq .Values.deployment.kind "StatefulSet" }}
replicas: {{ .Values.deployment.replicaCount }} replicas: {{ .Values.deployment.replicaCount }}
serviceName: {{ include "garage.fullname" . }}-headless serviceName: {{ include "garage.fullname" . }}
podManagementPolicy: {{ .Values.deployment.podManagementPolicy }} podManagementPolicy: {{ .Values.deployment.podManagementPolicy }}
{{- end }} {{- end }}
template: template:
metadata: metadata:
annotations: annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }} {{- with .Values.podAnnotations }}
@ -78,14 +79,15 @@ spec:
{{- with .Values.extraVolumeMounts }} {{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 12 }} {{- toYaml . | nindent 12 }}
{{- end }} {{- end }}
{{- with .Values.livenessProbe }} # TODO
livenessProbe: # livenessProbe:
{{- toYaml . | nindent 12 }} # httpGet:
{{- end }} # path: /
{{- with .Values.readinessProbe }} # port: 3900
readinessProbe: # readinessProbe:
{{- toYaml . | nindent 12 }} # httpGet:
{{- end }} # path: /
# port: 3900
resources: resources:
{{- toYaml .Values.resources | nindent 12 }} {{- toYaml .Values.resources | nindent 12 }}
volumes: volumes:

View file

@ -21,10 +21,6 @@ garage:
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level
compressionLevel: "1" compressionLevel: "1"
# -- If this value is set, Garage will automatically take a snapshot of the metadata DB file at a regular interval and save it in the metadata directory.
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#metadata_auto_snapshot_interval
metadataAutoSnapshotInterval: ""
rpcBindAddr: "[::]:3901" rpcBindAddr: "[::]:3901"
# -- If not given, a random secret will be generated and stored in a Secret object # -- If not given, a random secret will be generated and stored in a Secret object
rpcSecret: "" rpcSecret: ""
@ -124,8 +120,6 @@ service:
# - NodePort (+ Ingress) # - NodePort (+ Ingress)
# - LoadBalancer # - LoadBalancer
type: ClusterIP type: ClusterIP
# -- Annotations to add to the service
annotations: {}
s3: s3:
api: api:
port: 3900 port: 3900
@ -197,21 +191,6 @@ resources: {}
# cpu: 100m # cpu: 100m
# memory: 512Mi # memory: 512Mi
# -- Specifies a livenessProbe
livenessProbe: {}
#httpGet:
# path: /health
# port: 3903
#initialDelaySeconds: 5
#periodSeconds: 30
# -- Specifies a readinessProbe
readinessProbe: {}
#httpGet:
# path: /health
# port: 3903
#initialDelaySeconds: 5
#periodSeconds: 30
nodeSelector: {} nodeSelector: {}
tolerations: [] tolerations: []

View file

@ -1,43 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: garagenodes.deuxfleurs.fr
spec:
conversion:
strategy: None
group: deuxfleurs.fr
names:
kind: GarageNode
listKind: GarageNodeList
plural: garagenodes
singular: garagenode
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: Auto-generated derived type for Node via `CustomResource`
properties:
spec:
properties:
address:
format: ip
type: string
hostname:
type: string
port:
format: uint16
minimum: 0
type: integer
required:
- address
- hostname
- port
type: object
required:
- spec
title: GarageNode
type: object
served: true
storage: true
subresources: {}

View file

@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- garagenodes.deuxfleurs.fr.yaml

View file

@ -34,8 +34,6 @@ in
jq jq
]; ];
shellHook = '' shellHook = ''
export AWS_REQUEST_CHECKSUM_CALCULATION='when_required'
function to_s3 { function to_s3 {
aws \ aws \
--endpoint-url https://garage.deuxfleurs.fr \ --endpoint-url https://garage.deuxfleurs.fr \

View file

@ -1,12 +1,12 @@
[package] [package]
name = "garage_api_admin" name = "garage_api_admin"
version = "1.3.1" version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
description = "Admin API server crate for the Garage object store" description = "Admin API server crate for the Garage object store"
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage" repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
readme = "../../../README.md" readme = "../../README.md"
[lib] [lib]
path = "lib.rs" path = "lib.rs"
@ -22,7 +22,7 @@ garage_api_common.workspace = true
argon2.workspace = true argon2.workspace = true
async-trait.workspace = true async-trait.workspace = true
thiserror.workspace = true err-derive.workspace = true
hex.workspace = true hex.workspace = true
tracing.workspace = true tracing.workspace = true

View file

@ -277,7 +277,7 @@ pub async fn handle_create_bucket(
let helper = garage.locked_helper().await; let helper = garage.locked_helper().await;
if let Some(ga) = &req.global_alias { if let Some(ga) = &req.global_alias {
if !is_valid_bucket_name(ga, garage.config.allow_punycode) { if !is_valid_bucket_name(ga) {
return Err(Error::bad_request(format!( return Err(Error::bad_request(format!(
"{}: {}", "{}: {}",
ga, INVALID_BUCKET_NAME_MESSAGE ga, INVALID_BUCKET_NAME_MESSAGE
@ -292,7 +292,7 @@ pub async fn handle_create_bucket(
} }
if let Some(la) = &req.local_alias { if let Some(la) = &req.local_alias {
if !is_valid_bucket_name(&la.alias, garage.config.allow_punycode) { if !is_valid_bucket_name(&la.alias) {
return Err(Error::bad_request(format!( return Err(Error::bad_request(format!(
"{}: {}", "{}: {}",
la.alias, INVALID_BUCKET_NAME_MESSAGE la.alias, INVALID_BUCKET_NAME_MESSAGE
@ -382,7 +382,7 @@ pub async fn handle_delete_bucket(
for ((key_id, alias), _, active) in state.local_aliases.items().iter() { for ((key_id, alias), _, active) in state.local_aliases.items().iter() {
if *active { if *active {
helper helper
.purge_local_bucket_alias(bucket.id, key_id, alias) .unset_local_bucket_alias(bucket.id, key_id, alias)
.await?; .await?;
} }
} }
@ -419,11 +419,17 @@ pub async fn handle_update_bucket(
if let Some(wa) = req.website_access { if let Some(wa) = req.website_access {
if wa.enabled { if wa.enabled {
let (redirect_all, routing_rules) = match state.website_config.get() {
Some(wc) => (wc.redirect_all.clone(), wc.routing_rules.clone()),
None => (None, Vec::new()),
};
state.website_config.update(Some(WebsiteConfig { state.website_config.update(Some(WebsiteConfig {
index_document: wa.index_document.ok_or_bad_request( index_document: wa.index_document.ok_or_bad_request(
"Please specify indexDocument when enabling website access.", "Please specify indexDocument when enabling website access.",
)?, )?,
error_document: wa.error_document, error_document: wa.error_document,
redirect_all,
routing_rules,
})); }));
} else { } else {
if wa.index_document.is_some() || wa.error_document.is_some() { if wa.index_document.is_some() || wa.error_document.is_some() {

View file

@ -1,8 +1,8 @@
use std::convert::TryFrom; use std::convert::TryFrom;
use err_derive::Error;
use hyper::header::HeaderValue; use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode}; use hyper::{HeaderMap, StatusCode};
use thiserror::Error;
pub use garage_model::helper::error::Error as HelperError; pub use garage_model::helper::error::Error as HelperError;
@ -16,17 +16,20 @@ use garage_api_common::helpers::*;
/// Errors of this crate /// Errors of this crate
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum Error { pub enum Error {
#[error("{0}")] #[error(display = "{}", _0)]
/// Error from common error /// Error from common error
Common(#[from] CommonError), Common(#[error(source)] CommonError),
// Category: cannot process // Category: cannot process
/// The API access key does not exist /// The API access key does not exist
#[error("Access key not found: {0}")] #[error(display = "Access key not found: {}", _0)]
NoSuchAccessKey(String), NoSuchAccessKey(String),
/// In Import key, the key already exists /// In Import key, the key already exists
#[error("Key {0} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.")] #[error(
display = "Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.",
_0
)]
KeyAlreadyExists(String), KeyAlreadyExists(String),
} }

View file

@ -1,12 +1,12 @@
[package] [package]
name = "garage_api_common" name = "garage_api_common"
version = "1.3.1" version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
description = "Common functions for the API server crates for the Garage object store" description = "Common functions for the API server crates for the Garage object store"
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage" repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
readme = "../../../README.md" readme = "../../README.md"
[lib] [lib]
path = "lib.rs" path = "lib.rs"
@ -18,20 +18,16 @@ garage_model.workspace = true
garage_table.workspace = true garage_table.workspace = true
garage_util.workspace = true garage_util.workspace = true
base64.workspace = true
bytes.workspace = true bytes.workspace = true
chrono.workspace = true chrono.workspace = true
crc32fast.workspace = true
crc32c.workspace = true
crypto-common.workspace = true crypto-common.workspace = true
thiserror.workspace = true err-derive.workspace = true
hex.workspace = true hex.workspace = true
hmac.workspace = true hmac.workspace = true
md-5.workspace = true idna.workspace = true
tracing.workspace = true tracing.workspace = true
nom.workspace = true nom.workspace = true
pin-project.workspace = true pin-project.workspace = true
sha1.workspace = true
sha2.workspace = true sha2.workspace = true
futures.workspace = true futures.workspace = true

View file

@ -1,7 +1,7 @@
use std::convert::TryFrom; use std::convert::TryFrom;
use err_derive::Error;
use hyper::StatusCode; use hyper::StatusCode;
use thiserror::Error;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
@ -12,48 +12,48 @@ use garage_model::helper::error::Error as HelperError;
pub enum CommonError { pub enum CommonError {
// ---- INTERNAL ERRORS ---- // ---- INTERNAL ERRORS ----
/// Error related to deeper parts of Garage /// Error related to deeper parts of Garage
#[error("Internal error: {0}")] #[error(display = "Internal error: {}", _0)]
InternalError(#[from] GarageError), InternalError(#[error(source)] GarageError),
/// Error related to Hyper /// Error related to Hyper
#[error("Internal error (Hyper error): {0}")] #[error(display = "Internal error (Hyper error): {}", _0)]
Hyper(#[from] hyper::Error), Hyper(#[error(source)] hyper::Error),
/// Error related to HTTP /// Error related to HTTP
#[error("Internal error (HTTP error): {0}")] #[error(display = "Internal error (HTTP error): {}", _0)]
Http(#[from] http::Error), Http(#[error(source)] http::Error),
// ---- GENERIC CLIENT ERRORS ---- // ---- GENERIC CLIENT ERRORS ----
/// Proper authentication was not provided /// Proper authentication was not provided
#[error("Forbidden: {0}")] #[error(display = "Forbidden: {}", _0)]
Forbidden(String), Forbidden(String),
/// Generic bad request response with custom message /// Generic bad request response with custom message
#[error("Bad request: {0}")] #[error(display = "Bad request: {}", _0)]
BadRequest(String), BadRequest(String),
/// The client sent a header with invalid value /// The client sent a header with invalid value
#[error("Invalid header value: {0}")] #[error(display = "Invalid header value: {}", _0)]
InvalidHeader(#[from] hyper::header::ToStrError), InvalidHeader(#[error(source)] hyper::header::ToStrError),
// ---- SPECIFIC ERROR CONDITIONS ---- // ---- SPECIFIC ERROR CONDITIONS ----
// These have to be error codes referenced in the S3 spec here: // These have to be error codes referenced in the S3 spec here:
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
/// The bucket requested don't exists /// The bucket requested don't exists
#[error("Bucket not found: {0}")] #[error(display = "Bucket not found: {}", _0)]
NoSuchBucket(String), NoSuchBucket(String),
/// Tried to create a bucket that already exist /// Tried to create a bucket that already exist
#[error("Bucket already exists")] #[error(display = "Bucket already exists")]
BucketAlreadyExists, BucketAlreadyExists,
/// Tried to delete a non-empty bucket /// Tried to delete a non-empty bucket
#[error("Tried to delete a non-empty bucket")] #[error(display = "Tried to delete a non-empty bucket")]
BucketNotEmpty, BucketNotEmpty,
// Category: bad request // Category: bad request
/// Bucket name is not valid according to AWS S3 specs /// Bucket name is not valid according to AWS S3 specs
#[error("Invalid bucket name: {0}")] #[error(display = "Invalid bucket name: {}", _0)]
InvalidBucketName(String), InvalidBucketName(String),
} }

View file

@ -14,9 +14,9 @@ use crate::common_error::{
}; };
use crate::helpers::*; use crate::helpers::*;
pub fn find_matching_cors_rule<'a, B>( pub fn find_matching_cors_rule<'a>(
bucket_params: &'a BucketParams, bucket_params: &'a BucketParams,
req: &Request<B>, req: &Request<impl Body>,
) -> Result<Option<&'a GarageCorsRule>, CommonError> { ) -> Result<Option<&'a GarageCorsRule>, CommonError> {
if let Some(cors_config) = bucket_params.cors_config.get() { if let Some(cors_config) = bucket_params.cors_config.get() {
if let Some(origin) = req.headers().get("Origin") { if let Some(origin) = req.headers().get("Origin") {
@ -132,8 +132,8 @@ pub async fn handle_options_api(
} }
} }
pub fn handle_options_for_bucket<B>( pub fn handle_options_for_bucket(
req: &Request<B>, req: &Request<IncomingBody>,
bucket_params: &BucketParams, bucket_params: &BucketParams,
) -> Result<Response<EmptyBody>, CommonError> { ) -> Result<Response<EmptyBody>, CommonError> {
let origin = req let origin = req

View file

@ -58,12 +58,6 @@ pub trait ApiHandler: Send + Sync + 'static {
req: Request<IncomingBody>, req: Request<IncomingBody>,
endpoint: Self::Endpoint, endpoint: Self::Endpoint,
) -> impl Future<Output = Result<Response<BoxBody<Self::Error>>, Self::Error>> + Send; ) -> impl Future<Output = Result<Response<BoxBody<Self::Error>>, Self::Error>> + Send;
/// Returns the key id used to authenticate this request. The ID returned must be safe to
/// log.
fn key_id_from_request(&self, _req: &Request<IncomingBody>) -> Option<String> {
None
}
} }
pub struct ApiServer<A: ApiHandler> { pub struct ApiServer<A: ApiHandler> {
@ -148,20 +142,19 @@ impl<A: ApiHandler> ApiServer<A> {
) -> Result<Response<BoxBody<A::Error>>, http::Error> { ) -> Result<Response<BoxBody<A::Error>>, http::Error> {
let uri = req.uri().clone(); let uri = req.uri().clone();
let source = if let Ok(forwarded_for_ip_addr) = if let Ok(forwarded_for_ip_addr) =
forwarded_headers::handle_forwarded_for_headers(req.headers()) forwarded_headers::handle_forwarded_for_headers(req.headers())
{ {
format!("{forwarded_for_ip_addr} (via {addr})") info!(
"{} (via {}) {} {}",
forwarded_for_ip_addr,
addr,
req.method(),
uri
);
} else { } else {
format!("{addr}") info!("{} {} {}", addr, req.method(), uri);
}; }
// we only do this to log the access key, so we can discard any error
let key = self
.api_handler
.key_id_from_request(&req)
.map(|k| format!("(key {k}) "))
.unwrap_or_default();
info!("{source} {key}{} {uri}", req.method());
debug!("{:?}", req); debug!("{:?}", req);
let tracer = opentelemetry::global::tracer("garage"); let tracer = opentelemetry::global::tracer("garage");
@ -350,11 +343,7 @@ where
while !*must_exit.borrow() { while !*must_exit.borrow() {
let (stream, client_addr) = tokio::select! { let (stream, client_addr) = tokio::select! {
acc = listener.accept() => match acc { acc = listener.accept() => acc?,
Ok(r) => r,
Err(e) if e.kind() == std::io::ErrorKind::ConnectionAborted => continue,
Err(e) => return Err(e.into()),
},
_ = must_exit.changed() => continue, _ = must_exit.changed() => continue,
}; };

View file

@ -8,6 +8,7 @@ use hyper::{
body::{Body, Bytes}, body::{Body, Bytes},
Request, Response, Request, Response,
}; };
use idna::domain_to_unicode;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use garage_model::bucket_table::BucketParams; use garage_model::bucket_table::BucketParams;
@ -96,7 +97,7 @@ pub fn authority_to_host(authority: &str) -> Result<String, Error> {
authority authority
))), ))),
}; };
authority.map(|h| h.to_ascii_lowercase()) authority.map(|h| domain_to_unicode(h).0)
} }
/// Extract the bucket name and the key name from an HTTP path and possibly a bucket provided in /// Extract the bucket name and the key name from an HTTP path and possibly a bucket provided in

View file

@ -1,135 +0,0 @@
use std::sync::Mutex;
use futures::prelude::*;
use futures::stream::BoxStream;
use http_body_util::{BodyExt, StreamBody};
use hyper::body::{Bytes, Frame};
use serde::Deserialize;
use tokio::sync::mpsc;
use tokio::task;
use super::*;
use crate::signature::checksum::*;
pub struct ReqBody {
// why need mutex to be sync??
pub(crate) stream: Mutex<BoxStream<'static, Result<Frame<Bytes>, Error>>>,
pub(crate) checksummer: Checksummer,
pub(crate) expected_checksums: ExpectedChecksums,
pub(crate) trailer_algorithm: Option<ChecksumAlgorithm>,
}
pub type StreamingChecksumReceiver = task::JoinHandle<Result<Checksums, Error>>;
impl ReqBody {
pub fn add_expected_checksums(&mut self, more: ExpectedChecksums) {
if more.md5.is_some() {
self.expected_checksums.md5 = more.md5;
}
if more.sha256.is_some() {
self.expected_checksums.sha256 = more.sha256;
}
if more.extra.is_some() {
self.expected_checksums.extra = more.extra;
}
self.checksummer.add_expected(&self.expected_checksums);
}
pub fn add_md5(&mut self) {
self.checksummer.add_md5();
}
// ============ non-streaming =============
pub async fn json<T: for<'a> Deserialize<'a>>(self) -> Result<T, Error> {
let body = self.collect().await?;
let resp: T = serde_json::from_slice(&body).ok_or_bad_request("Invalid JSON")?;
Ok(resp)
}
pub async fn collect(self) -> Result<Bytes, Error> {
self.collect_with_checksums().await.map(|(b, _)| b)
}
pub async fn collect_with_checksums(mut self) -> Result<(Bytes, Checksums), Error> {
let stream: BoxStream<_> = self.stream.into_inner().unwrap();
let bytes = BodyExt::collect(StreamBody::new(stream)).await?.to_bytes();
self.checksummer.update(&bytes);
let checksums = self.checksummer.finalize();
checksums.verify(&self.expected_checksums)?;
Ok((bytes, checksums))
}
// ============ streaming =============
pub fn streaming_with_checksums(
self,
) -> (
BoxStream<'static, Result<Bytes, Error>>,
StreamingChecksumReceiver,
) {
let Self {
stream,
mut checksummer,
mut expected_checksums,
trailer_algorithm,
} = self;
let (frame_tx, mut frame_rx) = mpsc::channel::<Frame<Bytes>>(5);
let join_checksums = tokio::spawn(async move {
while let Some(frame) = frame_rx.recv().await {
match frame.into_data() {
Ok(data) => {
checksummer = tokio::task::spawn_blocking(move || {
checksummer.update(&data);
checksummer
})
.await
.unwrap()
}
Err(frame) => {
let trailers = frame.into_trailers().unwrap();
let algo = trailer_algorithm.unwrap();
expected_checksums.extra = Some(extract_checksum_value(&trailers, algo)?);
break;
}
}
}
if trailer_algorithm.is_some() && expected_checksums.extra.is_none() {
return Err(Error::bad_request("trailing checksum was not sent"));
}
let checksums = checksummer.finalize();
checksums.verify(&expected_checksums)?;
Ok(checksums)
});
let stream: BoxStream<_> = stream.into_inner().unwrap();
let stream = stream.filter_map(move |x| {
let frame_tx = frame_tx.clone();
async move {
match x {
Err(e) => Some(Err(e)),
Ok(frame) => {
if frame.is_data() {
let data = frame.data_ref().unwrap().clone();
let _ = frame_tx.send(frame).await;
Some(Ok(data))
} else {
let _ = frame_tx.send(frame).await;
None
}
}
}
}
});
(stream.boxed(), join_checksums)
}
}

View file

@ -1,4 +1,4 @@
use thiserror::Error; use err_derive::Error;
use crate::common_error::CommonError; use crate::common_error::CommonError;
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError}; pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
@ -6,22 +6,18 @@ pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInterna
/// Errors of this crate /// Errors of this crate
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum Error { pub enum Error {
#[error("{0}")] #[error(display = "{}", _0)]
/// Error from common error /// Error from common error
Common(CommonError), Common(CommonError),
/// Authorization Header Malformed /// Authorization Header Malformed
#[error("Authorization header malformed, unexpected scope: {0}")] #[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
AuthorizationHeaderMalformed(String), AuthorizationHeaderMalformed(String),
// Category: bad request // Category: bad request
/// The request contained an invalid UTF-8 sequence in its path or in other parameters /// The request contained an invalid UTF-8 sequence in its path or in other parameters
#[error("Invalid UTF-8: {0}")] #[error(display = "Invalid UTF-8: {}", _0)]
InvalidUtf8Str(#[from] std::str::Utf8Error), InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
/// The provided digest (checksum) value was invalid
#[error("Invalid digest: {0}")]
InvalidDigest(String),
} }
impl<T> From<T> for Error impl<T> From<T> for Error

View file

@ -2,7 +2,6 @@ use chrono::{DateTime, Utc};
use hmac::{Hmac, Mac}; use hmac::{Hmac, Mac};
use sha2::Sha256; use sha2::Sha256;
use hyper::header::HeaderName;
use hyper::{body::Incoming as IncomingBody, Request}; use hyper::{body::Incoming as IncomingBody, Request};
use garage_model::garage::Garage; use garage_model::garage::Garage;
@ -11,8 +10,6 @@ use garage_util::data::{sha256sum, Hash};
use error::*; use error::*;
pub mod body;
pub mod checksum;
pub mod error; pub mod error;
pub mod payload; pub mod payload;
pub mod streaming; pub mod streaming;
@ -20,73 +17,36 @@ pub mod streaming;
pub const SHORT_DATE: &str = "%Y%m%d"; pub const SHORT_DATE: &str = "%Y%m%d";
pub const LONG_DATETIME: &str = "%Y%m%dT%H%M%SZ"; pub const LONG_DATETIME: &str = "%Y%m%dT%H%M%SZ";
// ---- Constants used in AWSv4 signatures ----
pub const X_AMZ_ALGORITHM: HeaderName = HeaderName::from_static("x-amz-algorithm");
pub const X_AMZ_CREDENTIAL: HeaderName = HeaderName::from_static("x-amz-credential");
pub const X_AMZ_DATE: HeaderName = HeaderName::from_static("x-amz-date");
pub const X_AMZ_EXPIRES: HeaderName = HeaderName::from_static("x-amz-expires");
pub const X_AMZ_SIGNEDHEADERS: HeaderName = HeaderName::from_static("x-amz-signedheaders");
pub const X_AMZ_SIGNATURE: HeaderName = HeaderName::from_static("x-amz-signature");
pub const X_AMZ_CONTENT_SHA256: HeaderName = HeaderName::from_static("x-amz-content-sha256");
pub const X_AMZ_TRAILER: HeaderName = HeaderName::from_static("x-amz-trailer");
/// Result of `sha256("")`
pub(crate) const EMPTY_STRING_HEX_DIGEST: &str =
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
// Signature calculation algorithm
pub const AWS4_HMAC_SHA256: &str = "AWS4-HMAC-SHA256";
type HmacSha256 = Hmac<Sha256>; type HmacSha256 = Hmac<Sha256>;
// Possible values for x-amz-content-sha256, in addition to the actual sha256
pub const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD";
pub const STREAMING_UNSIGNED_PAYLOAD_TRAILER: &str = "STREAMING-UNSIGNED-PAYLOAD-TRAILER";
pub const STREAMING_AWS4_HMAC_SHA256_PAYLOAD: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
// Used in the computation of StringToSign
pub const AWS4_HMAC_SHA256_PAYLOAD: &str = "AWS4-HMAC-SHA256-PAYLOAD";
// ---- enums to describe stuff going on in signature calculation ----
#[derive(Debug)]
pub enum ContentSha256Header {
UnsignedPayload,
Sha256Checksum(Hash),
StreamingPayload { trailer: bool, signed: bool },
}
// ---- top-level functions ----
pub struct VerifiedRequest {
pub request: Request<streaming::ReqBody>,
pub access_key: Key,
pub content_sha256_header: ContentSha256Header,
}
pub async fn verify_request( pub async fn verify_request(
garage: &Garage, garage: &Garage,
mut req: Request<IncomingBody>, mut req: Request<IncomingBody>,
service: &'static str, service: &'static str,
) -> Result<VerifiedRequest, Error> { ) -> Result<(Request<streaming::ReqBody>, Key, Option<Hash>), Error> {
let checked_signature = payload::check_payload_signature(&garage, &mut req, service).await?; let (api_key, mut content_sha256) =
payload::check_payload_signature(&garage, &mut req, service).await?;
let api_key =
api_key.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
let request = streaming::parse_streaming_body( let req = streaming::parse_streaming_body(
&api_key,
req, req,
&checked_signature, &mut content_sha256,
&garage.config.s3_api.s3_region, &garage.config.s3_api.s3_region,
service, service,
)?; )?;
let access_key = checked_signature Ok((req, api_key, content_sha256))
.key }
.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
Ok(VerifiedRequest { pub fn verify_signed_content(expected_sha256: Hash, body: &[u8]) -> Result<(), Error> {
request, if expected_sha256 != sha256sum(body) {
access_key, return Err(Error::bad_request(
content_sha256_header: checked_signature.content_sha256_header, "Request content hash does not match signed hash".to_string(),
}) ));
}
Ok(())
} }
pub fn signing_hmac( pub fn signing_hmac(

View file

@ -13,9 +13,23 @@ use garage_util::data::Hash;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::key_table::*; use garage_model::key_table::*;
use super::*; use super::LONG_DATETIME;
use super::{compute_scope, signing_hmac};
use crate::encoding::uri_encode; use crate::encoding::uri_encode;
use crate::signature::error::*;
pub const X_AMZ_ALGORITHM: HeaderName = HeaderName::from_static("x-amz-algorithm");
pub const X_AMZ_CREDENTIAL: HeaderName = HeaderName::from_static("x-amz-credential");
pub const X_AMZ_DATE: HeaderName = HeaderName::from_static("x-amz-date");
pub const X_AMZ_EXPIRES: HeaderName = HeaderName::from_static("x-amz-expires");
pub const X_AMZ_SIGNEDHEADERS: HeaderName = HeaderName::from_static("x-amz-signedheaders");
pub const X_AMZ_SIGNATURE: HeaderName = HeaderName::from_static("x-amz-signature");
pub const X_AMZ_CONTENT_SH256: HeaderName = HeaderName::from_static("x-amz-content-sha256");
pub const AWS4_HMAC_SHA256: &str = "AWS4-HMAC-SHA256";
pub const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD";
pub const STREAMING_AWS4_HMAC_SHA256_PAYLOAD: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
pub type QueryMap = HeaderMap<QueryValue>; pub type QueryMap = HeaderMap<QueryValue>;
pub struct QueryValue { pub struct QueryValue {
@ -25,18 +39,11 @@ pub struct QueryValue {
value: String, value: String,
} }
#[derive(Debug)]
pub struct CheckedSignature {
pub key: Option<Key>,
pub content_sha256_header: ContentSha256Header,
pub signature_header: Option<String>,
}
pub async fn check_payload_signature( pub async fn check_payload_signature(
garage: &Garage, garage: &Garage,
request: &mut Request<IncomingBody>, request: &mut Request<IncomingBody>,
service: &'static str, service: &'static str,
) -> Result<CheckedSignature, Error> { ) -> Result<(Option<Key>, Option<Hash>), Error> {
let query = parse_query_map(request.uri())?; let query = parse_query_map(request.uri())?;
if query.contains_key(&X_AMZ_ALGORITHM) { if query.contains_key(&X_AMZ_ALGORITHM) {
@ -50,46 +57,17 @@ pub async fn check_payload_signature(
// Unsigned (anonymous) request // Unsigned (anonymous) request
let content_sha256 = request let content_sha256 = request
.headers() .headers()
.get(X_AMZ_CONTENT_SHA256) .get("x-amz-content-sha256")
.map(|x| x.to_str()) .filter(|c| c.as_bytes() != UNSIGNED_PAYLOAD.as_bytes());
.transpose()?; if let Some(content_sha256) = content_sha256 {
Ok(CheckedSignature { let sha256 = hex::decode(content_sha256)
key: None, .ok()
content_sha256_header: parse_x_amz_content_sha256(content_sha256)?, .and_then(|bytes| Hash::try_from(&bytes))
signature_header: None, .ok_or_bad_request("Invalid content sha256 hash")?;
}) Ok((None, Some(sha256)))
}
}
fn parse_x_amz_content_sha256(header: Option<&str>) -> Result<ContentSha256Header, Error> {
let header = match header {
Some(x) => x,
None => return Ok(ContentSha256Header::UnsignedPayload),
};
if header == UNSIGNED_PAYLOAD {
Ok(ContentSha256Header::UnsignedPayload)
} else if let Some(rest) = header.strip_prefix("STREAMING-") {
let (trailer, algo) = if let Some(rest2) = rest.strip_suffix("-TRAILER") {
(true, rest2)
} else { } else {
(false, rest) Ok((None, None))
}; }
let signed = match algo {
AWS4_HMAC_SHA256_PAYLOAD => true,
UNSIGNED_PAYLOAD => false,
_ => {
return Err(Error::bad_request(
"invalid or unsupported x-amz-content-sha256",
))
}
};
Ok(ContentSha256Header::StreamingPayload { trailer, signed })
} else {
let sha256 = hex::decode(header)
.ok()
.and_then(|bytes| Hash::try_from(&bytes))
.ok_or_bad_request("Invalid content sha256 hash")?;
Ok(ContentSha256Header::Sha256Checksum(sha256))
} }
} }
@ -98,13 +76,13 @@ async fn check_standard_signature(
service: &'static str, service: &'static str,
request: &Request<IncomingBody>, request: &Request<IncomingBody>,
query: QueryMap, query: QueryMap,
) -> Result<CheckedSignature, Error> { ) -> Result<(Option<Key>, Option<Hash>), Error> {
let authorization = Authorization::parse_header(request.headers())?; let authorization = Authorization::parse_header(request.headers())?;
// Verify that all necessary request headers are included in signed_headers // Verify that all necessary request headers are included in signed_headers
// The following must be included for all signatures: // The following must be included for all signatures:
// - the Host header (mandatory) // - the Host header (mandatory)
// - all x-amz-* headers used in the request (except x-amz-content-sha256) // - all x-amz-* headers used in the request
// AWS also indicates that the Content-Type header should be signed if // AWS also indicates that the Content-Type header should be signed if
// it is used, but Minio client doesn't sign it so we don't check it for compatibility. // it is used, but Minio client doesn't sign it so we don't check it for compatibility.
let signed_headers = split_signed_headers(&authorization)?; let signed_headers = split_signed_headers(&authorization)?;
@ -130,13 +108,18 @@ async fn check_standard_signature(
let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?; let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?;
let content_sha256_header = parse_x_amz_content_sha256(Some(&authorization.content_sha256))?; let content_sha256 = if authorization.content_sha256 == UNSIGNED_PAYLOAD {
None
} else if authorization.content_sha256 == STREAMING_AWS4_HMAC_SHA256_PAYLOAD {
let bytes = hex::decode(authorization.signature).ok_or_bad_request("Invalid signature")?;
Some(Hash::try_from(&bytes).ok_or_bad_request("Invalid signature")?)
} else {
let bytes = hex::decode(authorization.content_sha256)
.ok_or_bad_request("Invalid content sha256 hash")?;
Some(Hash::try_from(&bytes).ok_or_bad_request("Invalid content sha256 hash")?)
};
Ok(CheckedSignature { Ok((Some(key), content_sha256))
key: Some(key),
content_sha256_header,
signature_header: Some(authorization.signature),
})
} }
async fn check_presigned_signature( async fn check_presigned_signature(
@ -144,14 +127,14 @@ async fn check_presigned_signature(
service: &'static str, service: &'static str,
request: &mut Request<IncomingBody>, request: &mut Request<IncomingBody>,
mut query: QueryMap, mut query: QueryMap,
) -> Result<CheckedSignature, Error> { ) -> Result<(Option<Key>, Option<Hash>), Error> {
let algorithm = query.get(&X_AMZ_ALGORITHM).unwrap(); let algorithm = query.get(&X_AMZ_ALGORITHM).unwrap();
let authorization = Authorization::parse_presigned(&algorithm.value, &query)?; let authorization = Authorization::parse_presigned(&algorithm.value, &query)?;
// Verify that all necessary request headers are included in signed_headers // Verify that all necessary request headers are included in signed_headers
// For AWSv4 pre-signed URLs, the following must be included: // For AWSv4 pre-signed URLs, the following must be included:
// - the Host header (mandatory) // - the Host header (mandatory)
// - all x-amz-* headers used in the request (except x-amz-content-sha256) // - all x-amz-* headers used in the request
let signed_headers = split_signed_headers(&authorization)?; let signed_headers = split_signed_headers(&authorization)?;
verify_signed_headers(request.headers(), &signed_headers)?; verify_signed_headers(request.headers(), &signed_headers)?;
@ -210,11 +193,7 @@ async fn check_presigned_signature(
// Presigned URLs always use UNSIGNED-PAYLOAD, // Presigned URLs always use UNSIGNED-PAYLOAD,
// so there is no sha256 hash to return. // so there is no sha256 hash to return.
Ok(CheckedSignature { Ok((Some(key), None))
key: Some(key),
content_sha256_header: ContentSha256Header::UnsignedPayload,
signature_header: Some(authorization.signature),
})
} }
pub fn parse_query_map(uri: &http::uri::Uri) -> Result<QueryMap, Error> { pub fn parse_query_map(uri: &http::uri::Uri) -> Result<QueryMap, Error> {
@ -268,9 +247,7 @@ fn verify_signed_headers(headers: &HeaderMap, signed_headers: &[HeaderName]) ->
return Err(Error::bad_request("Header `Host` should be signed")); return Err(Error::bad_request("Header `Host` should be signed"));
} }
for (name, _) in headers.iter() { for (name, _) in headers.iter() {
// Enforce signature of all x-amz-* headers, except x-amz-content-sh256 if name.as_str().starts_with("x-amz-") {
// because it is included in the canonical request in all cases
if name.as_str().starts_with("x-amz-") && name != X_AMZ_CONTENT_SHA256 {
if !signed_headers.contains(name) { if !signed_headers.contains(name) {
return Err(Error::bad_request(format!( return Err(Error::bad_request(format!(
"Header `{}` should be signed", "Header `{}` should be signed",
@ -419,7 +396,7 @@ pub async fn verify_v4(
// ============ Authorization header, or X-Amz-* query params ========= // ============ Authorization header, or X-Amz-* query params =========
pub struct Authorization { pub struct Authorization {
pub key_id: String, key_id: String,
scope: String, scope: String,
signed_headers: String, signed_headers: String,
signature: String, signature: String,
@ -428,7 +405,7 @@ pub struct Authorization {
} }
impl Authorization { impl Authorization {
pub fn parse_header(headers: &HeaderMap) -> Result<Self, Error> { fn parse_header(headers: &HeaderMap) -> Result<Self, Error> {
let authorization = headers let authorization = headers
.get(AUTHORIZATION) .get(AUTHORIZATION)
.ok_or_bad_request("Missing authorization header")? .ok_or_bad_request("Missing authorization header")?
@ -465,12 +442,13 @@ impl Authorization {
.to_string(); .to_string();
let content_sha256 = headers let content_sha256 = headers
.get(X_AMZ_CONTENT_SHA256) .get(X_AMZ_CONTENT_SH256)
.ok_or_bad_request("Missing X-Amz-Content-Sha256 field")?; .ok_or_bad_request("Missing X-Amz-Content-Sha256 field")?;
let date = headers let date = headers
.get(X_AMZ_DATE) .get(X_AMZ_DATE)
.ok_or_bad_request("Missing X-Amz-Date field")? .ok_or_bad_request("Missing X-Amz-Date field")
.map_err(Error::from)?
.to_str()?; .to_str()?;
let date = parse_date(date)?; let date = parse_date(date)?;

View file

@ -1,157 +1,84 @@
use std::pin::Pin; use std::pin::Pin;
use std::sync::Mutex;
use chrono::{DateTime, NaiveDateTime, TimeZone, Utc}; use chrono::{DateTime, NaiveDateTime, TimeZone, Utc};
use futures::prelude::*; use futures::prelude::*;
use futures::task; use futures::task;
use garage_model::key_table::Key;
use hmac::Mac; use hmac::Mac;
use http::header::{HeaderMap, HeaderValue, CONTENT_ENCODING}; use http_body_util::StreamBody;
use hyper::body::{Bytes, Frame, Incoming as IncomingBody}; use hyper::body::{Bytes, Incoming as IncomingBody};
use hyper::Request; use hyper::Request;
use garage_util::data::Hash; use garage_util::data::Hash;
use super::*; use super::{compute_scope, sha256sum, HmacSha256, LONG_DATETIME};
use crate::helpers::body_stream; use crate::helpers::*;
use crate::signature::checksum::*; use crate::signature::error::*;
use crate::signature::payload::CheckedSignature; use crate::signature::payload::{
STREAMING_AWS4_HMAC_SHA256_PAYLOAD, X_AMZ_CONTENT_SH256, X_AMZ_DATE,
};
pub use crate::signature::body::ReqBody; pub const AWS4_HMAC_SHA256_PAYLOAD: &str = "AWS4-HMAC-SHA256-PAYLOAD";
pub type ReqBody = BoxBody<Error>;
pub fn parse_streaming_body( pub fn parse_streaming_body(
mut req: Request<IncomingBody>, api_key: &Key,
checked_signature: &CheckedSignature, req: Request<IncomingBody>,
content_sha256: &mut Option<Hash>,
region: &str, region: &str,
service: &str, service: &str,
) -> Result<Request<ReqBody>, Error> { ) -> Result<Request<ReqBody>, Error> {
debug!( match req.headers().get(X_AMZ_CONTENT_SH256) {
"Content signature mode: {:?}", Some(header) if header == STREAMING_AWS4_HMAC_SHA256_PAYLOAD => {
checked_signature.content_sha256_header let signature = content_sha256
); .take()
.ok_or_bad_request("No signature provided")?;
match checked_signature.content_sha256_header { let secret_key = &api_key
ContentSha256Header::StreamingPayload { signed, trailer } => { .state
// Sanity checks .as_option()
if !signed && !trailer { .ok_or_internal_error("Deleted key state")?
return Err(Error::bad_request( .secret_key;
"STREAMING-UNSIGNED-PAYLOAD without trailer is not a valid combination",
));
}
// Remove the aws-chunked component in the content-encoding: header let date = req
// Note: this header is not properly sent by minio client, so don't fail .headers()
// if it is absent from the request. .get(X_AMZ_DATE)
if let Some(content_encoding) = req.headers_mut().remove(CONTENT_ENCODING) { .ok_or_bad_request("Missing X-Amz-Date field")?
if let Some(rest) = content_encoding.as_bytes().strip_prefix(b"aws-chunked,") { .to_str()?;
req.headers_mut() let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
.insert(CONTENT_ENCODING, HeaderValue::from_bytes(rest).unwrap()); .ok_or_bad_request("Invalid date")?;
} else if content_encoding != "aws-chunked" { let date: DateTime<Utc> = Utc.from_utc_datetime(&date);
return Err(Error::bad_request(
"content-encoding does not contain aws-chunked for STREAMING-*-PAYLOAD",
));
}
}
// If trailer header is announced, add the calculation of the requested checksum let scope = compute_scope(&date, region, service);
let mut checksummer = Checksummer::init(&Default::default(), false); let signing_hmac = crate::signature::signing_hmac(&date, secret_key, region, service)
let trailer_algorithm = if trailer { .ok_or_internal_error("Unable to build signing HMAC")?;
let algo = Some(
request_trailer_checksum_algorithm(req.headers())?
.ok_or_bad_request("Missing x-amz-trailer header")?,
);
checksummer = checksummer.add(algo);
algo
} else {
None
};
// For signed variants, determine signing parameters
let sign_params = if signed {
let signature = checked_signature
.signature_header
.clone()
.ok_or_bad_request("No signature provided")?;
let signature = hex::decode(signature)
.ok()
.and_then(|bytes| Hash::try_from(&bytes))
.ok_or_bad_request("Invalid signature")?;
let secret_key = checked_signature
.key
.as_ref()
.ok_or_bad_request("Cannot sign streaming payload without signing key")?
.state
.as_option()
.ok_or_internal_error("Deleted key state")?
.secret_key
.to_string();
let date = req
.headers()
.get(X_AMZ_DATE)
.ok_or_bad_request("Missing X-Amz-Date field")?
.to_str()?;
let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
.ok_or_bad_request("Invalid date")?;
let date: DateTime<Utc> = Utc.from_utc_datetime(&date);
let scope = compute_scope(&date, region, service);
let signing_hmac =
crate::signature::signing_hmac(&date, &secret_key, region, service)
.ok_or_internal_error("Unable to build signing HMAC")?;
Some(SignParams {
datetime: date,
scope,
signing_hmac,
previous_signature: signature,
})
} else {
None
};
Ok(req.map(move |body| { Ok(req.map(move |body| {
let stream = body_stream::<_, Error>(body); let stream = body_stream::<_, Error>(body);
let signed_payload_stream = let signed_payload_stream =
StreamingPayloadStream::new(stream, sign_params, trailer).map_err(Error::from); SignedPayloadStream::new(stream, signing_hmac, date, &scope, signature)
ReqBody { .map(|x| x.map(hyper::body::Frame::data))
stream: Mutex::new(signed_payload_stream.boxed()), .map_err(Error::from);
checksummer, ReqBody::new(StreamBody::new(signed_payload_stream))
expected_checksums: Default::default(),
trailer_algorithm,
}
})) }))
} }
_ => Ok(req.map(|body| { _ => Ok(req.map(|body| ReqBody::new(http_body_util::BodyExt::map_err(body, Error::from)))),
let expected_checksums = ExpectedChecksums {
sha256: match &checked_signature.content_sha256_header {
ContentSha256Header::Sha256Checksum(sha256) => Some(*sha256),
_ => None,
},
..Default::default()
};
let checksummer = Checksummer::init(&expected_checksums, false);
let stream = http_body_util::BodyStream::new(body).map_err(Error::from);
ReqBody {
stream: Mutex::new(stream.boxed()),
checksummer,
expected_checksums,
trailer_algorithm: None,
}
})),
} }
} }
/// Result of `sha256("")`
const EMPTY_STRING_HEX_DIGEST: &str =
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
fn compute_streaming_payload_signature( fn compute_streaming_payload_signature(
signing_hmac: &HmacSha256, signing_hmac: &HmacSha256,
date: DateTime<Utc>, date: DateTime<Utc>,
scope: &str, scope: &str,
previous_signature: Hash, previous_signature: Hash,
content_sha256: Hash, content_sha256: Hash,
) -> Result<Hash, StreamingPayloadError> { ) -> Result<Hash, Error> {
let string_to_sign = [ let string_to_sign = [
AWS4_HMAC_SHA256_PAYLOAD, AWS4_HMAC_SHA256_PAYLOAD,
&date.format(LONG_DATETIME).to_string(), &date.format(LONG_DATETIME).to_string(),
@ -165,49 +92,12 @@ fn compute_streaming_payload_signature(
let mut hmac = signing_hmac.clone(); let mut hmac = signing_hmac.clone();
hmac.update(string_to_sign.as_bytes()); hmac.update(string_to_sign.as_bytes());
Hash::try_from(&hmac.finalize().into_bytes()) Ok(Hash::try_from(&hmac.finalize().into_bytes()).ok_or_internal_error("Invalid signature")?)
.ok_or_else(|| StreamingPayloadError::Message("Could not build signature".into()))
}
fn compute_streaming_trailer_signature(
signing_hmac: &HmacSha256,
date: DateTime<Utc>,
scope: &str,
previous_signature: Hash,
trailer_sha256: Hash,
) -> Result<Hash, StreamingPayloadError> {
let string_to_sign = [
AWS4_HMAC_SHA256_PAYLOAD,
&date.format(LONG_DATETIME).to_string(),
scope,
&hex::encode(previous_signature),
&hex::encode(trailer_sha256),
]
.join("\n");
let mut hmac = signing_hmac.clone();
hmac.update(string_to_sign.as_bytes());
Hash::try_from(&hmac.finalize().into_bytes())
.ok_or_else(|| StreamingPayloadError::Message("Could not build signature".into()))
} }
mod payload { mod payload {
use http::{HeaderName, HeaderValue};
use garage_util::data::Hash; use garage_util::data::Hash;
use nom::bytes::streaming::{tag, take_while};
use nom::character::streaming::hex_digit1;
use nom::combinator::{map_res, opt};
use nom::number::streaming::hex_u32;
macro_rules! try_parse {
($expr:expr) => {
$expr.map_err(|e| e.map(Error::Parser))?
};
}
pub enum Error<I> { pub enum Error<I> {
Parser(nom::error::Error<I>), Parser(nom::error::Error<I>),
BadSignature, BadSignature,
@ -223,13 +113,24 @@ mod payload {
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct ChunkHeader { pub struct Header {
pub size: usize, pub size: usize,
pub signature: Option<Hash>, pub signature: Hash,
} }
impl ChunkHeader { impl Header {
pub fn parse_signed(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> { pub fn parse(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
use nom::bytes::streaming::tag;
use nom::character::streaming::hex_digit1;
use nom::combinator::map_res;
use nom::number::streaming::hex_u32;
macro_rules! try_parse {
($expr:expr) => {
$expr.map_err(|e| e.map(Error::Parser))?
};
}
let (input, size) = try_parse!(hex_u32(input)); let (input, size) = try_parse!(hex_u32(input));
let (input, _) = try_parse!(tag(";")(input)); let (input, _) = try_parse!(tag(";")(input));
@ -239,172 +140,96 @@ mod payload {
let (input, _) = try_parse!(tag("\r\n")(input)); let (input, _) = try_parse!(tag("\r\n")(input));
let header = ChunkHeader { let header = Header {
size: size as usize, size: size as usize,
signature: Some(signature), signature,
}; };
Ok((input, header)) Ok((input, header))
} }
pub fn parse_unsigned(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
let (input, size) = try_parse!(hex_u32(input));
let (input, _) = try_parse!(tag("\r\n")(input));
let header = ChunkHeader {
size: size as usize,
signature: None,
};
Ok((input, header))
}
}
#[derive(Debug, Clone)]
pub struct TrailerChunk {
pub header_name: HeaderName,
pub header_value: HeaderValue,
pub signature: Option<Hash>,
}
impl TrailerChunk {
fn parse_content(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
let (input, header_name) = try_parse!(map_res(
take_while(|c: u8| c.is_ascii_alphanumeric() || c == b'-'),
HeaderName::from_bytes
)(input));
let (input, _) = try_parse!(tag(b":")(input));
let (input, header_value) = try_parse!(map_res(
take_while(|c: u8| c.is_ascii_alphanumeric() || b"+/=".contains(&c)),
HeaderValue::from_bytes
)(input));
// Possible '\n' after the header value, depends on clients
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
let (input, _) = try_parse!(opt(tag(b"\n"))(input));
let (input, _) = try_parse!(tag(b"\r\n")(input));
Ok((
input,
TrailerChunk {
header_name,
header_value,
signature: None,
},
))
}
pub fn parse_signed(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
let (input, trailer) = Self::parse_content(input)?;
let (input, _) = try_parse!(tag(b"x-amz-trailer-signature:")(input));
let (input, data) = try_parse!(map_res(hex_digit1, hex::decode)(input));
let signature = Hash::try_from(&data).ok_or(nom::Err::Failure(Error::BadSignature))?;
let (input, _) = try_parse!(tag(b"\r\n")(input));
Ok((
input,
TrailerChunk {
signature: Some(signature),
..trailer
},
))
}
pub fn parse_unsigned(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
let (input, trailer) = Self::parse_content(input)?;
let (input, _) = try_parse!(tag(b"\r\n")(input));
Ok((input, trailer))
}
} }
} }
#[derive(Debug)] #[derive(Debug)]
pub enum StreamingPayloadError { pub enum SignedPayloadStreamError {
Stream(Error), Stream(Error),
InvalidSignature, InvalidSignature,
Message(String), Message(String),
} }
impl StreamingPayloadError { impl SignedPayloadStreamError {
fn message(msg: &str) -> Self { fn message(msg: &str) -> Self {
StreamingPayloadError::Message(msg.into()) SignedPayloadStreamError::Message(msg.into())
} }
} }
impl From<StreamingPayloadError> for Error { impl From<SignedPayloadStreamError> for Error {
fn from(err: StreamingPayloadError) -> Self { fn from(err: SignedPayloadStreamError) -> Self {
match err { match err {
StreamingPayloadError::Stream(e) => e, SignedPayloadStreamError::Stream(e) => e,
StreamingPayloadError::InvalidSignature => { SignedPayloadStreamError::InvalidSignature => {
Error::bad_request("Invalid payload signature") Error::bad_request("Invalid payload signature")
} }
StreamingPayloadError::Message(e) => { SignedPayloadStreamError::Message(e) => {
Error::bad_request(format!("Chunk format error: {}", e)) Error::bad_request(format!("Chunk format error: {}", e))
} }
} }
} }
} }
impl<I> From<payload::Error<I>> for StreamingPayloadError { impl<I> From<payload::Error<I>> for SignedPayloadStreamError {
fn from(err: payload::Error<I>) -> Self { fn from(err: payload::Error<I>) -> Self {
Self::message(err.description()) Self::message(err.description())
} }
} }
impl<I> From<nom::error::Error<I>> for StreamingPayloadError { impl<I> From<nom::error::Error<I>> for SignedPayloadStreamError {
fn from(err: nom::error::Error<I>) -> Self { fn from(err: nom::error::Error<I>) -> Self {
Self::message(err.code.description()) Self::message(err.code.description())
} }
} }
enum StreamingPayloadChunk { struct SignedPayload {
Chunk { header: payload::Header,
header: payload::ChunkHeader, data: Bytes,
data: Bytes,
},
Trailer(payload::TrailerChunk),
}
struct SignParams {
datetime: DateTime<Utc>,
scope: String,
signing_hmac: HmacSha256,
previous_signature: Hash,
} }
#[pin_project::pin_project] #[pin_project::pin_project]
pub struct StreamingPayloadStream<S> pub struct SignedPayloadStream<S>
where where
S: Stream<Item = Result<Bytes, Error>>, S: Stream<Item = Result<Bytes, Error>>,
{ {
#[pin] #[pin]
stream: S, stream: S,
buf: bytes::BytesMut, buf: bytes::BytesMut,
signing: Option<SignParams>, datetime: DateTime<Utc>,
has_trailer: bool, scope: String,
done: bool, signing_hmac: HmacSha256,
previous_signature: Hash,
} }
impl<S> StreamingPayloadStream<S> impl<S> SignedPayloadStream<S>
where where
S: Stream<Item = Result<Bytes, Error>>, S: Stream<Item = Result<Bytes, Error>>,
{ {
fn new(stream: S, signing: Option<SignParams>, has_trailer: bool) -> Self { pub fn new(
stream: S,
signing_hmac: HmacSha256,
datetime: DateTime<Utc>,
scope: &str,
seed_signature: Hash,
) -> Self {
Self { Self {
stream, stream,
buf: bytes::BytesMut::new(), buf: bytes::BytesMut::new(),
signing, datetime,
has_trailer, scope: scope.into(),
done: false, signing_hmac,
previous_signature: seed_signature,
} }
} }
fn parse_next( fn parse_next(input: &[u8]) -> nom::IResult<&[u8], SignedPayload, SignedPayloadStreamError> {
input: &[u8],
is_signed: bool,
has_trailer: bool,
) -> nom::IResult<&[u8], StreamingPayloadChunk, StreamingPayloadError> {
use nom::bytes::streaming::{tag, take}; use nom::bytes::streaming::{tag, take};
macro_rules! try_parse { macro_rules! try_parse {
@ -413,30 +238,17 @@ where
}; };
} }
let (input, header) = if is_signed { let (input, header) = try_parse!(payload::Header::parse(input));
try_parse!(payload::ChunkHeader::parse_signed(input))
} else {
try_parse!(payload::ChunkHeader::parse_unsigned(input))
};
// 0-sized chunk is the last // 0-sized chunk is the last
if header.size == 0 { if header.size == 0 {
if has_trailer { return Ok((
let (input, trailer) = if is_signed { input,
try_parse!(payload::TrailerChunk::parse_signed(input)) SignedPayload {
} else { header,
try_parse!(payload::TrailerChunk::parse_unsigned(input)) data: Bytes::new(),
}; },
return Ok((input, StreamingPayloadChunk::Trailer(trailer))); ));
} else {
return Ok((
input,
StreamingPayloadChunk::Chunk {
header,
data: Bytes::new(),
},
));
}
} }
let (input, data) = try_parse!(take::<_, _, nom::error::Error<_>>(header.size)(input)); let (input, data) = try_parse!(take::<_, _, nom::error::Error<_>>(header.size)(input));
@ -444,15 +256,15 @@ where
let data = Bytes::from(data.to_vec()); let data = Bytes::from(data.to_vec());
Ok((input, StreamingPayloadChunk::Chunk { header, data })) Ok((input, SignedPayload { header, data }))
} }
} }
impl<S> Stream for StreamingPayloadStream<S> impl<S> Stream for SignedPayloadStream<S>
where where
S: Stream<Item = Result<Bytes, Error>> + Unpin, S: Stream<Item = Result<Bytes, Error>> + Unpin,
{ {
type Item = Result<Frame<Bytes>, StreamingPayloadError>; type Item = Result<Bytes, SignedPayloadStreamError>;
fn poll_next( fn poll_next(
self: Pin<&mut Self>, self: Pin<&mut Self>,
@ -462,105 +274,56 @@ where
let mut this = self.project(); let mut this = self.project();
if *this.done {
return Poll::Ready(None);
}
loop { loop {
let (input, payload) = let (input, payload) = match Self::parse_next(this.buf) {
match Self::parse_next(this.buf, this.signing.is_some(), *this.has_trailer) { Ok(res) => res,
Ok(res) => res, Err(nom::Err::Incomplete(_)) => {
Err(nom::Err::Incomplete(_)) => { match futures::ready!(this.stream.as_mut().poll_next(cx)) {
match futures::ready!(this.stream.as_mut().poll_next(cx)) { Some(Ok(bytes)) => {
Some(Ok(bytes)) => { this.buf.extend(bytes);
this.buf.extend(bytes); continue;
continue; }
} Some(Err(e)) => {
Some(Err(e)) => { return Poll::Ready(Some(Err(SignedPayloadStreamError::Stream(e))))
return Poll::Ready(Some(Err(StreamingPayloadError::Stream(e)))) }
} None => {
None => { return Poll::Ready(Some(Err(SignedPayloadStreamError::message(
return Poll::Ready(Some(Err(StreamingPayloadError::message( "Unexpected EOF",
"Unexpected EOF", ))));
))));
}
} }
} }
Err(nom::Err::Error(e)) | Err(nom::Err::Failure(e)) => {
return Poll::Ready(Some(Err(e)))
}
};
match payload {
StreamingPayloadChunk::Chunk { data, header } => {
if let Some(signing) = this.signing.as_mut() {
let data_sha256sum = sha256sum(&data);
let expected_signature = compute_streaming_payload_signature(
&signing.signing_hmac,
signing.datetime,
&signing.scope,
signing.previous_signature,
data_sha256sum,
)?;
if header.signature.unwrap() != expected_signature {
return Poll::Ready(Some(Err(StreamingPayloadError::InvalidSignature)));
}
signing.previous_signature = header.signature.unwrap();
}
*this.buf = input.into();
// 0-sized chunk is the last
if data.is_empty() {
// if there was a trailer, it would have been returned by the parser
assert!(!*this.has_trailer);
*this.done = true;
return Poll::Ready(None);
}
return Poll::Ready(Some(Ok(Frame::data(data))));
} }
StreamingPayloadChunk::Trailer(trailer) => { Err(nom::Err::Error(e)) | Err(nom::Err::Failure(e)) => {
trace!( return Poll::Ready(Some(Err(e)))
"In StreamingPayloadStream::poll_next: got trailer {:?}",
trailer
);
if let Some(signing) = this.signing.as_mut() {
let data = [
trailer.header_name.as_ref(),
&b":"[..],
trailer.header_value.as_ref(),
&b"\n"[..],
]
.concat();
let trailer_sha256sum = sha256sum(&data);
let expected_signature = compute_streaming_trailer_signature(
&signing.signing_hmac,
signing.datetime,
&signing.scope,
signing.previous_signature,
trailer_sha256sum,
)?;
if trailer.signature.unwrap() != expected_signature {
return Poll::Ready(Some(Err(StreamingPayloadError::InvalidSignature)));
}
}
*this.buf = input.into();
*this.done = true;
let mut trailers_map = HeaderMap::new();
trailers_map.insert(trailer.header_name, trailer.header_value);
return Poll::Ready(Some(Ok(Frame::trailers(trailers_map))));
} }
};
// 0-sized chunk is the last
if payload.data.is_empty() {
return Poll::Ready(None);
} }
let data_sha256sum = sha256sum(&payload.data);
let expected_signature = compute_streaming_payload_signature(
this.signing_hmac,
*this.datetime,
this.scope,
*this.previous_signature,
data_sha256sum,
)
.map_err(|e| {
SignedPayloadStreamError::Message(format!("Could not build signature: {}", e))
})?;
if payload.header.signature != expected_signature {
return Poll::Ready(Some(Err(SignedPayloadStreamError::InvalidSignature)));
}
*this.buf = input.into();
*this.previous_signature = payload.header.signature;
return Poll::Ready(Some(Ok(payload.data)));
} }
} }
@ -573,7 +336,7 @@ where
mod tests { mod tests {
use futures::prelude::*; use futures::prelude::*;
use super::{SignParams, StreamingPayloadError, StreamingPayloadStream}; use super::{SignedPayloadStream, SignedPayloadStreamError};
#[tokio::test] #[tokio::test]
async fn test_interrupted_signed_payload_stream() { async fn test_interrupted_signed_payload_stream() {
@ -595,20 +358,12 @@ mod tests {
let seed_signature = Hash::default(); let seed_signature = Hash::default();
let mut stream = StreamingPayloadStream::new( let mut stream =
body, SignedPayloadStream::new(body, signing_hmac, datetime, &scope, seed_signature);
Some(SignParams {
signing_hmac,
datetime,
scope,
previous_signature: seed_signature,
}),
false,
);
assert!(stream.try_next().await.is_err()); assert!(stream.try_next().await.is_err());
match stream.try_next().await { match stream.try_next().await {
Err(StreamingPayloadError::Message(msg)) if msg == "Unexpected EOF" => {} Err(SignedPayloadStreamError::Message(msg)) if msg == "Unexpected EOF" => {}
item => panic!( item => panic!(
"Unexpected result, expected early EOF error, got {:?}", "Unexpected result, expected early EOF error, got {:?}",
item item

View file

@ -1,12 +1,12 @@
[package] [package]
name = "garage_api_k2v" name = "garage_api_k2v"
version = "1.3.1" version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
description = "K2V API server crate for the Garage object store" description = "K2V API server crate for the Garage object store"
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage" repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
readme = "../../../README.md" readme = "../../README.md"
[lib] [lib]
path = "lib.rs" path = "lib.rs"
@ -20,7 +20,7 @@ garage_util = { workspace = true, features = [ "k2v" ] }
garage_api_common.workspace = true garage_api_common.workspace = true
base64.workspace = true base64.workspace = true
thiserror.workspace = true err-derive.workspace = true
tracing.workspace = true tracing.workspace = true
futures.workspace = true futures.workspace = true

View file

@ -81,9 +81,7 @@ impl ApiHandler for K2VApiServer {
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body())); return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
} }
let verified_request = verify_request(&garage, req, "k2v").await?; let (req, api_key, _content_sha256) = verify_request(&garage, req, "k2v").await?;
let req = verified_request.request;
let api_key = verified_request.access_key;
let bucket_id = garage let bucket_id = garage
.bucket_helper() .bucket_helper()
@ -176,12 +174,6 @@ impl ApiHandler for K2VApiServer {
Ok(resp_ok) Ok(resp_ok)
} }
fn key_id_from_request(&self, req: &Request<IncomingBody>) -> Option<String> {
garage_api_common::signature::payload::Authorization::parse_header(req.headers())
.map(|auth| auth.key_id)
.ok()
}
} }
impl ApiEndpoint for K2VApiEndpoint { impl ApiEndpoint for K2VApiEndpoint {

View file

@ -20,7 +20,7 @@ pub async fn handle_insert_batch(
let ReqCtx { let ReqCtx {
garage, bucket_id, .. garage, bucket_id, ..
} = &ctx; } = &ctx;
let items = req.into_body().json::<Vec<InsertBatchItem>>().await?; let items = parse_json_body::<Vec<InsertBatchItem>, _, Error>(req).await?;
let mut items2 = vec![]; let mut items2 = vec![];
for it in items { for it in items {
@ -47,7 +47,7 @@ pub async fn handle_read_batch(
ctx: ReqCtx, ctx: ReqCtx,
req: Request<ReqBody>, req: Request<ReqBody>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let queries = req.into_body().json::<Vec<ReadBatchQuery>>().await?; let queries = parse_json_body::<Vec<ReadBatchQuery>, _, Error>(req).await?;
let resp_results = futures::future::join_all( let resp_results = futures::future::join_all(
queries queries
@ -141,7 +141,7 @@ pub async fn handle_delete_batch(
ctx: ReqCtx, ctx: ReqCtx,
req: Request<ReqBody>, req: Request<ReqBody>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let queries = req.into_body().json::<Vec<DeleteBatchQuery>>().await?; let queries = parse_json_body::<Vec<DeleteBatchQuery>, _, Error>(req).await?;
let resp_results = futures::future::join_all( let resp_results = futures::future::join_all(
queries queries
@ -262,7 +262,7 @@ pub(crate) async fn handle_poll_range(
} = ctx; } = ctx;
use garage_model::k2v::sub::PollRange; use garage_model::k2v::sub::PollRange;
let query = req.into_body().json::<PollRangeQuery>().await?; let query = parse_json_body::<PollRangeQuery, _, Error>(req).await?;
let timeout_msec = query.timeout.unwrap_or(300).clamp(1, 600) * 1000; let timeout_msec = query.timeout.unwrap_or(300).clamp(1, 600) * 1000;

View file

@ -1,6 +1,6 @@
use err_derive::Error;
use hyper::header::HeaderValue; use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode}; use hyper::{HeaderMap, StatusCode};
use thiserror::Error;
use garage_api_common::common_error::{commonErrorDerivative, CommonError}; use garage_api_common::common_error::{commonErrorDerivative, CommonError};
pub(crate) use garage_api_common::common_error::{helper_error_as_internal, pass_helper_error}; pub(crate) use garage_api_common::common_error::{helper_error_as_internal, pass_helper_error};
@ -14,38 +14,34 @@ use garage_api_common::signature::error::Error as SignatureError;
/// Errors of this crate /// Errors of this crate
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum Error { pub enum Error {
#[error("{0}")] #[error(display = "{}", _0)]
/// Error from common error /// Error from common error
Common(#[from] CommonError), Common(#[error(source)] CommonError),
// Category: cannot process // Category: cannot process
/// Authorization Header Malformed /// Authorization Header Malformed
#[error("Authorization header malformed, unexpected scope: {0}")] #[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
AuthorizationHeaderMalformed(String), AuthorizationHeaderMalformed(String),
/// The provided digest (checksum) value was invalid
#[error("Invalid digest: {0}")]
InvalidDigest(String),
/// The object requested don't exists /// The object requested don't exists
#[error("Key not found")] #[error(display = "Key not found")]
NoSuchKey, NoSuchKey,
/// Some base64 encoded data was badly encoded /// Some base64 encoded data was badly encoded
#[error("Invalid base64: {0}")] #[error(display = "Invalid base64: {}", _0)]
InvalidBase64(#[from] base64::DecodeError), InvalidBase64(#[error(source)] base64::DecodeError),
/// Invalid causality token /// Invalid causality token
#[error("Invalid causality token")] #[error(display = "Invalid causality token")]
InvalidCausalityToken, InvalidCausalityToken,
/// The client asked for an invalid return format (invalid Accept header) /// The client asked for an invalid return format (invalid Accept header)
#[error("Not acceptable: {0}")] #[error(display = "Not acceptable: {}", _0)]
NotAcceptable(String), NotAcceptable(String),
/// The request contained an invalid UTF-8 sequence in its path or in other parameters /// The request contained an invalid UTF-8 sequence in its path or in other parameters
#[error("Invalid UTF-8: {0}")] #[error(display = "Invalid UTF-8: {}", _0)]
InvalidUtf8Str(#[from] std::str::Utf8Error), InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
} }
commonErrorDerivative!(Error); commonErrorDerivative!(Error);
@ -58,7 +54,6 @@ impl From<SignatureError> for Error {
Self::AuthorizationHeaderMalformed(c) Self::AuthorizationHeaderMalformed(c)
} }
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i), SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
SignatureError::InvalidDigest(d) => Self::InvalidDigest(d),
} }
} }
} }
@ -76,7 +71,6 @@ impl Error {
Error::InvalidBase64(_) => "InvalidBase64", Error::InvalidBase64(_) => "InvalidBase64",
Error::InvalidUtf8Str(_) => "InvalidUtf8String", Error::InvalidUtf8Str(_) => "InvalidUtf8String",
Error::InvalidCausalityToken => "CausalityToken", Error::InvalidCausalityToken => "CausalityToken",
Error::InvalidDigest(_) => "InvalidDigest",
} }
} }
} }
@ -91,7 +85,6 @@ impl ApiError for Error {
Error::AuthorizationHeaderMalformed(_) Error::AuthorizationHeaderMalformed(_)
| Error::InvalidBase64(_) | Error::InvalidBase64(_)
| Error::InvalidUtf8Str(_) | Error::InvalidUtf8Str(_)
| Error::InvalidDigest(_)
| Error::InvalidCausalityToken => StatusCode::BAD_REQUEST, | Error::InvalidCausalityToken => StatusCode::BAD_REQUEST,
} }
} }

View file

@ -144,7 +144,9 @@ pub async fn handle_insert_item(
.map(parse_causality_token) .map(parse_causality_token)
.transpose()?; .transpose()?;
let body = req.into_body().collect().await?; let body = http_body_util::BodyExt::collect(req.into_body())
.await?
.to_bytes();
let value = DvvsValue::Value(body.to_vec()); let value = DvvsValue::Value(body.to_vec());

View file

@ -1,12 +1,12 @@
[package] [package]
name = "garage_api_s3" name = "garage_api_s3"
version = "1.3.1" version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
description = "S3 API server crate for the Garage object store" description = "S3 API server crate for the Garage object store"
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage" repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
readme = "../../../README.md" readme = "../../README.md"
[lib] [lib]
path = "lib.rs" path = "lib.rs"
@ -29,7 +29,7 @@ bytes.workspace = true
chrono.workspace = true chrono.workspace = true
crc32fast.workspace = true crc32fast.workspace = true
crc32c.workspace = true crc32c.workspace = true
thiserror.workspace = true err-derive.workspace = true
hex.workspace = true hex.workspace = true
tracing.workspace = true tracing.workspace = true
md-5.workspace = true md-5.workspace = true

View file

@ -121,9 +121,7 @@ impl ApiHandler for S3ApiServer {
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body())); return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
} }
let verified_request = verify_request(&garage, req, "s3").await?; let (req, api_key, content_sha256) = verify_request(&garage, req, "s3").await?;
let req = verified_request.request;
let api_key = verified_request.access_key;
let bucket_name = match bucket_name { let bucket_name = match bucket_name {
None => { None => {
@ -136,7 +134,14 @@ impl ApiHandler for S3ApiServer {
// Special code path for CreateBucket API endpoint // Special code path for CreateBucket API endpoint
if let Endpoint::CreateBucket {} = endpoint { if let Endpoint::CreateBucket {} = endpoint {
return handle_create_bucket(&garage, req, &api_key.key_id, bucket_name).await; return handle_create_bucket(
&garage,
req,
content_sha256,
&api_key.key_id,
bucket_name,
)
.await;
} }
let bucket_id = garage let bucket_id = garage
@ -174,7 +179,7 @@ impl ApiHandler for S3ApiServer {
let resp = match endpoint { let resp = match endpoint {
Endpoint::HeadObject { Endpoint::HeadObject {
key, part_number, .. key, part_number, ..
} => handle_head(ctx, &req.map(|_| ()), &key, part_number).await, } => handle_head(ctx, &req, &key, part_number).await,
Endpoint::GetObject { Endpoint::GetObject {
key, key,
part_number, part_number,
@ -194,20 +199,20 @@ impl ApiHandler for S3ApiServer {
response_content_type, response_content_type,
response_expires, response_expires,
}; };
handle_get(ctx, &req.map(|_| ()), &key, part_number, overrides).await handle_get(ctx, &req, &key, part_number, overrides).await
} }
Endpoint::UploadPart { Endpoint::UploadPart {
key, key,
part_number, part_number,
upload_id, upload_id,
} => handle_put_part(ctx, req, &key, part_number, &upload_id).await, } => handle_put_part(ctx, req, &key, part_number, &upload_id, content_sha256).await,
Endpoint::CopyObject { key } => handle_copy(ctx, &req, &key).await, Endpoint::CopyObject { key } => handle_copy(ctx, &req, &key).await,
Endpoint::UploadPartCopy { Endpoint::UploadPartCopy {
key, key,
part_number, part_number,
upload_id, upload_id,
} => handle_upload_part_copy(ctx, &req, &key, part_number, &upload_id).await, } => handle_upload_part_copy(ctx, &req, &key, part_number, &upload_id).await,
Endpoint::PutObject { key } => handle_put(ctx, req, &key).await, Endpoint::PutObject { key } => handle_put(ctx, req, &key, content_sha256).await,
Endpoint::AbortMultipartUpload { key, upload_id } => { Endpoint::AbortMultipartUpload { key, upload_id } => {
handle_abort_multipart_upload(ctx, &key, &upload_id).await handle_abort_multipart_upload(ctx, &key, &upload_id).await
} }
@ -216,7 +221,7 @@ impl ApiHandler for S3ApiServer {
handle_create_multipart_upload(ctx, &req, &key).await handle_create_multipart_upload(ctx, &req, &key).await
} }
Endpoint::CompleteMultipartUpload { key, upload_id } => { Endpoint::CompleteMultipartUpload { key, upload_id } => {
handle_complete_multipart_upload(ctx, req, &key, &upload_id).await handle_complete_multipart_upload(ctx, req, &key, &upload_id, content_sha256).await
} }
Endpoint::CreateBucket {} => unreachable!(), Endpoint::CreateBucket {} => unreachable!(),
Endpoint::HeadBucket {} => { Endpoint::HeadBucket {} => {
@ -226,7 +231,6 @@ impl ApiHandler for S3ApiServer {
Endpoint::DeleteBucket {} => handle_delete_bucket(ctx).await, Endpoint::DeleteBucket {} => handle_delete_bucket(ctx).await,
Endpoint::GetBucketLocation {} => handle_get_bucket_location(ctx), Endpoint::GetBucketLocation {} => handle_get_bucket_location(ctx),
Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(), Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(),
Endpoint::GetBucketAcl {} => handle_get_bucket_acl(ctx),
Endpoint::ListObjects { Endpoint::ListObjects {
delimiter, delimiter,
encoding_type, encoding_type,
@ -320,15 +324,17 @@ impl ApiHandler for S3ApiServer {
}; };
handle_list_parts(ctx, req, &query).await handle_list_parts(ctx, req, &query).await
} }
Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req).await, Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req, content_sha256).await,
Endpoint::GetBucketWebsite {} => handle_get_website(ctx).await, Endpoint::GetBucketWebsite {} => handle_get_website(ctx).await,
Endpoint::PutBucketWebsite {} => handle_put_website(ctx, req).await, Endpoint::PutBucketWebsite {} => handle_put_website(ctx, req, content_sha256).await,
Endpoint::DeleteBucketWebsite {} => handle_delete_website(ctx).await, Endpoint::DeleteBucketWebsite {} => handle_delete_website(ctx).await,
Endpoint::GetBucketCors {} => handle_get_cors(ctx).await, Endpoint::GetBucketCors {} => handle_get_cors(ctx).await,
Endpoint::PutBucketCors {} => handle_put_cors(ctx, req).await, Endpoint::PutBucketCors {} => handle_put_cors(ctx, req, content_sha256).await,
Endpoint::DeleteBucketCors {} => handle_delete_cors(ctx).await, Endpoint::DeleteBucketCors {} => handle_delete_cors(ctx).await,
Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(ctx).await, Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(ctx).await,
Endpoint::PutBucketLifecycleConfiguration {} => handle_put_lifecycle(ctx, req).await, Endpoint::PutBucketLifecycleConfiguration {} => {
handle_put_lifecycle(ctx, req, content_sha256).await
}
Endpoint::DeleteBucketLifecycle {} => handle_delete_lifecycle(ctx).await, Endpoint::DeleteBucketLifecycle {} => handle_delete_lifecycle(ctx).await,
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())), endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
}; };
@ -343,12 +349,6 @@ impl ApiHandler for S3ApiServer {
Ok(resp_ok) Ok(resp_ok)
} }
fn key_id_from_request(&self, req: &Request<IncomingBody>) -> Option<String> {
garage_api_common::signature::payload::Authorization::parse_header(req.headers())
.map(|auth| auth.key_id)
.ok()
}
} }
impl ApiEndpoint for S3ApiEndpoint { impl ApiEndpoint for S3ApiEndpoint {

View file

@ -1,18 +1,21 @@
use std::collections::HashMap; use std::collections::HashMap;
use http_body_util::BodyExt;
use hyper::{Request, Response, StatusCode}; use hyper::{Request, Response, StatusCode};
use garage_model::bucket_alias_table::*; use garage_model::bucket_alias_table::*;
use garage_model::bucket_table::Bucket; use garage_model::bucket_table::Bucket;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::key_table::{Key, KeyParams}; use garage_model::key_table::Key;
use garage_model::permission::BucketKeyPerm; use garage_model::permission::BucketKeyPerm;
use garage_table::util::*; use garage_table::util::*;
use garage_util::crdt::*; use garage_util::crdt::*;
use garage_util::data::*;
use garage_util::time::*; use garage_util::time::*;
use garage_api_common::common_error::CommonError; use garage_api_common::common_error::CommonError;
use garage_api_common::helpers::*; use garage_api_common::helpers::*;
use garage_api_common::signature::verify_signed_content;
use crate::api_server::{ReqBody, ResBody}; use crate::api_server::{ReqBody, ResBody};
use crate::error::*; use crate::error::*;
@ -44,55 +47,6 @@ pub fn handle_get_bucket_versioning() -> Result<Response<ResBody>, Error> {
.body(string_body(xml))?) .body(string_body(xml))?)
} }
pub fn handle_get_bucket_acl(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let ReqCtx {
bucket_id, api_key, ..
} = ctx;
let key_p = api_key.params().ok_or_internal_error(
"Key should not be in deleted state at this point (in handle_get_bucket_acl)",
)?;
let mut grants: Vec<s3_xml::Grant> = vec![];
let kp = api_key.bucket_permissions(&bucket_id);
if kp.allow_owner {
grants.push(s3_xml::Grant {
grantee: create_grantee(&key_p, &api_key),
permission: s3_xml::Value("FULL_CONTROL".to_string()),
});
} else {
if kp.allow_read {
grants.push(s3_xml::Grant {
grantee: create_grantee(&key_p, &api_key),
permission: s3_xml::Value("READ".to_string()),
});
grants.push(s3_xml::Grant {
grantee: create_grantee(&key_p, &api_key),
permission: s3_xml::Value("READ_ACP".to_string()),
});
}
if kp.allow_write {
grants.push(s3_xml::Grant {
grantee: create_grantee(&key_p, &api_key),
permission: s3_xml::Value("WRITE".to_string()),
});
}
}
let access_control_policy = s3_xml::AccessControlPolicy {
xmlns: (),
owner: None,
acl: s3_xml::AccessControlList { entries: grants },
};
let xml = s3_xml::to_xml_with_header(&access_control_policy)?;
trace!("xml: {}", xml);
Ok(Response::builder()
.header("Content-Type", "application/xml")
.body(string_body(xml))?)
}
pub async fn handle_list_buckets( pub async fn handle_list_buckets(
garage: &Garage, garage: &Garage,
api_key: &Key, api_key: &Key,
@ -168,10 +122,15 @@ pub async fn handle_list_buckets(
pub async fn handle_create_bucket( pub async fn handle_create_bucket(
garage: &Garage, garage: &Garage,
req: Request<ReqBody>, req: Request<ReqBody>,
content_sha256: Option<Hash>,
api_key_id: &String, api_key_id: &String,
bucket_name: String, bucket_name: String,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let body = req.into_body().collect().await?; let body = BodyExt::collect(req.into_body()).await?.to_bytes();
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let cmd = let cmd =
parse_create_bucket_xml(&body[..]).ok_or_bad_request("Invalid create bucket XML query")?; parse_create_bucket_xml(&body[..]).ok_or_bad_request("Invalid create bucket XML query")?;
@ -221,7 +180,7 @@ pub async fn handle_create_bucket(
} }
// Create the bucket! // Create the bucket!
if !is_valid_bucket_name(&bucket_name, garage.config.allow_punycode) { if !is_valid_bucket_name(&bucket_name) {
return Err(Error::bad_request(format!( return Err(Error::bad_request(format!(
"{}: {}", "{}: {}",
bucket_name, INVALID_BUCKET_NAME_MESSAGE bucket_name, INVALID_BUCKET_NAME_MESSAGE
@ -290,11 +249,11 @@ pub async fn handle_delete_bucket(ctx: ReqCtx) -> Result<Response<ResBody>, Erro
// 1. delete bucket alias // 1. delete bucket alias
if is_local_alias { if is_local_alias {
helper helper
.purge_local_bucket_alias(*bucket_id, &api_key.key_id, bucket_name) .unset_local_bucket_alias(*bucket_id, &api_key.key_id, bucket_name)
.await?; .await?;
} else { } else {
helper helper
.purge_global_bucket_alias(*bucket_id, bucket_name) .unset_global_bucket_alias(*bucket_id, bucket_name)
.await?; .await?;
} }
@ -360,15 +319,6 @@ fn parse_create_bucket_xml(xml_bytes: &[u8]) -> Option<Option<String>> {
Some(ret) Some(ret)
} }
fn create_grantee(key_params: &KeyParams, api_key: &Key) -> s3_xml::Grantee {
s3_xml::Grantee {
xmlns_xsi: (),
typ: "CanonicalUser".to_string(),
display_name: Some(s3_xml::Value(key_params.name.get().to_string())),
id: Some(s3_xml::Value(api_key.key_id.to_string())),
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View file

@ -11,12 +11,11 @@ use sha2::Sha256;
use http::{HeaderMap, HeaderName, HeaderValue}; use http::{HeaderMap, HeaderName, HeaderValue};
use garage_util::data::*; use garage_util::data::*;
use garage_util::error::OkOrMessage;
use super::*; use garage_model::s3::object_table::*;
pub use garage_model::s3::object_table::{ChecksumAlgorithm, ChecksumValue}; use crate::error::*;
pub const CONTENT_MD5: HeaderName = HeaderName::from_static("content-md5");
pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName = pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName =
HeaderName::from_static("x-amz-checksum-algorithm"); HeaderName::from_static("x-amz-checksum-algorithm");
@ -32,8 +31,8 @@ pub type Md5Checksum = [u8; 16];
pub type Sha1Checksum = [u8; 20]; pub type Sha1Checksum = [u8; 20];
pub type Sha256Checksum = [u8; 32]; pub type Sha256Checksum = [u8; 32];
#[derive(Debug, Default, Clone)] #[derive(Debug, Default)]
pub struct ExpectedChecksums { pub(crate) struct ExpectedChecksums {
// base64-encoded md5 (content-md5 header) // base64-encoded md5 (content-md5 header)
pub md5: Option<String>, pub md5: Option<String>,
// content_sha256 (as a Hash / FixedBytes32) // content_sha256 (as a Hash / FixedBytes32)
@ -42,7 +41,7 @@ pub struct ExpectedChecksums {
pub extra: Option<ChecksumValue>, pub extra: Option<ChecksumValue>,
} }
pub struct Checksummer { pub(crate) struct Checksummer {
pub crc32: Option<Crc32>, pub crc32: Option<Crc32>,
pub crc32c: Option<Crc32c>, pub crc32c: Option<Crc32c>,
pub md5: Option<Md5>, pub md5: Option<Md5>,
@ -51,7 +50,7 @@ pub struct Checksummer {
} }
#[derive(Default)] #[derive(Default)]
pub struct Checksums { pub(crate) struct Checksums {
pub crc32: Option<Crc32Checksum>, pub crc32: Option<Crc32Checksum>,
pub crc32c: Option<Crc32cChecksum>, pub crc32c: Option<Crc32cChecksum>,
pub md5: Option<Md5Checksum>, pub md5: Option<Md5Checksum>,
@ -60,48 +59,34 @@ pub struct Checksums {
} }
impl Checksummer { impl Checksummer {
pub fn new() -> Self { pub(crate) fn init(expected: &ExpectedChecksums, require_md5: bool) -> Self {
Self { let mut ret = Self {
crc32: None, crc32: None,
crc32c: None, crc32c: None,
md5: None, md5: None,
sha1: None, sha1: None,
sha256: None, sha256: None,
} };
}
pub fn init(expected: &ExpectedChecksums, add_md5: bool) -> Self { if expected.md5.is_some() || require_md5 {
let mut ret = Self::new(); ret.md5 = Some(Md5::new());
ret.add_expected(expected); }
if add_md5 { if expected.sha256.is_some() || matches!(&expected.extra, Some(ChecksumValue::Sha256(_))) {
ret.add_md5(); ret.sha256 = Some(Sha256::new());
}
if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) {
ret.crc32 = Some(Crc32::new());
}
if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) {
ret.crc32c = Some(Crc32c::default());
}
if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) {
ret.sha1 = Some(Sha1::new());
} }
ret ret
} }
pub fn add_md5(&mut self) { pub(crate) fn add(mut self, algo: Option<ChecksumAlgorithm>) -> Self {
self.md5 = Some(Md5::new());
}
pub fn add_expected(&mut self, expected: &ExpectedChecksums) {
if expected.md5.is_some() {
self.md5 = Some(Md5::new());
}
if expected.sha256.is_some() || matches!(&expected.extra, Some(ChecksumValue::Sha256(_))) {
self.sha256 = Some(Sha256::new());
}
if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) {
self.crc32 = Some(Crc32::new());
}
if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) {
self.crc32c = Some(Crc32c::default());
}
if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) {
self.sha1 = Some(Sha1::new());
}
}
pub fn add(mut self, algo: Option<ChecksumAlgorithm>) -> Self {
match algo { match algo {
Some(ChecksumAlgorithm::Crc32) => { Some(ChecksumAlgorithm::Crc32) => {
self.crc32 = Some(Crc32::new()); self.crc32 = Some(Crc32::new());
@ -120,7 +105,7 @@ impl Checksummer {
self self
} }
pub fn update(&mut self, bytes: &[u8]) { pub(crate) fn update(&mut self, bytes: &[u8]) {
if let Some(crc32) = &mut self.crc32 { if let Some(crc32) = &mut self.crc32 {
crc32.update(bytes); crc32.update(bytes);
} }
@ -138,7 +123,7 @@ impl Checksummer {
} }
} }
pub fn finalize(self) -> Checksums { pub(crate) fn finalize(self) -> Checksums {
Checksums { Checksums {
crc32: self.crc32.map(|x| u32::to_be_bytes(x.finalize())), crc32: self.crc32.map(|x| u32::to_be_bytes(x.finalize())),
crc32c: self crc32c: self
@ -198,56 +183,153 @@ impl Checksums {
// ---- // ----
pub fn parse_checksum_algorithm(algo: &str) -> Result<ChecksumAlgorithm, Error> { #[derive(Default)]
match algo { pub(crate) struct MultipartChecksummer {
"CRC32" => Ok(ChecksumAlgorithm::Crc32), pub md5: Md5,
"CRC32C" => Ok(ChecksumAlgorithm::Crc32c), pub extra: Option<MultipartExtraChecksummer>,
"SHA1" => Ok(ChecksumAlgorithm::Sha1), }
"SHA256" => Ok(ChecksumAlgorithm::Sha256),
_ => Err(Error::bad_request("invalid checksum algorithm")), pub(crate) enum MultipartExtraChecksummer {
Crc32(Crc32),
Crc32c(Crc32c),
Sha1(Sha1),
Sha256(Sha256),
}
impl MultipartChecksummer {
pub(crate) fn init(algo: Option<ChecksumAlgorithm>) -> Self {
Self {
md5: Md5::new(),
extra: match algo {
None => None,
Some(ChecksumAlgorithm::Crc32) => {
Some(MultipartExtraChecksummer::Crc32(Crc32::new()))
}
Some(ChecksumAlgorithm::Crc32c) => {
Some(MultipartExtraChecksummer::Crc32c(Crc32c::default()))
}
Some(ChecksumAlgorithm::Sha1) => Some(MultipartExtraChecksummer::Sha1(Sha1::new())),
Some(ChecksumAlgorithm::Sha256) => {
Some(MultipartExtraChecksummer::Sha256(Sha256::new()))
}
},
}
}
pub(crate) fn update(
&mut self,
etag: &str,
checksum: Option<ChecksumValue>,
) -> Result<(), Error> {
self.md5
.update(&hex::decode(&etag).ok_or_message("invalid etag hex")?);
match (&mut self.extra, checksum) {
(None, _) => (),
(
Some(MultipartExtraChecksummer::Crc32(ref mut crc32)),
Some(ChecksumValue::Crc32(x)),
) => {
crc32.update(&x);
}
(
Some(MultipartExtraChecksummer::Crc32c(ref mut crc32c)),
Some(ChecksumValue::Crc32c(x)),
) => {
crc32c.write(&x);
}
(Some(MultipartExtraChecksummer::Sha1(ref mut sha1)), Some(ChecksumValue::Sha1(x))) => {
sha1.update(&x);
}
(
Some(MultipartExtraChecksummer::Sha256(ref mut sha256)),
Some(ChecksumValue::Sha256(x)),
) => {
sha256.update(&x);
}
(Some(_), b) => {
return Err(Error::internal_error(format!(
"part checksum was not computed correctly, got: {:?}",
b
)))
}
}
Ok(())
}
pub(crate) fn finalize(self) -> (Md5Checksum, Option<ChecksumValue>) {
let md5 = self.md5.finalize()[..].try_into().unwrap();
let extra = match self.extra {
None => None,
Some(MultipartExtraChecksummer::Crc32(crc32)) => {
Some(ChecksumValue::Crc32(u32::to_be_bytes(crc32.finalize())))
}
Some(MultipartExtraChecksummer::Crc32c(crc32c)) => Some(ChecksumValue::Crc32c(
u32::to_be_bytes(u32::try_from(crc32c.finish()).unwrap()),
)),
Some(MultipartExtraChecksummer::Sha1(sha1)) => {
Some(ChecksumValue::Sha1(sha1.finalize()[..].try_into().unwrap()))
}
Some(MultipartExtraChecksummer::Sha256(sha256)) => Some(ChecksumValue::Sha256(
sha256.finalize()[..].try_into().unwrap(),
)),
};
(md5, extra)
} }
} }
// ----
/// Extract the value of the x-amz-checksum-algorithm header /// Extract the value of the x-amz-checksum-algorithm header
pub fn request_checksum_algorithm( pub(crate) fn request_checksum_algorithm(
headers: &HeaderMap<HeaderValue>, headers: &HeaderMap<HeaderValue>,
) -> Result<Option<ChecksumAlgorithm>, Error> { ) -> Result<Option<ChecksumAlgorithm>, Error> {
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) { match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
None => Ok(None), None => Ok(None),
Some(x) => parse_checksum_algorithm(x.to_str()?).map(Some), Some(x) if x == "CRC32" => Ok(Some(ChecksumAlgorithm::Crc32)),
} Some(x) if x == "CRC32C" => Ok(Some(ChecksumAlgorithm::Crc32c)),
} Some(x) if x == "SHA1" => Ok(Some(ChecksumAlgorithm::Sha1)),
Some(x) if x == "SHA256" => Ok(Some(ChecksumAlgorithm::Sha256)),
pub fn request_trailer_checksum_algorithm(
headers: &HeaderMap<HeaderValue>,
) -> Result<Option<ChecksumAlgorithm>, Error> {
match headers.get(X_AMZ_TRAILER).map(|x| x.to_str()).transpose()? {
None => Ok(None),
Some(x) if x == X_AMZ_CHECKSUM_CRC32 => Ok(Some(ChecksumAlgorithm::Crc32)),
Some(x) if x == X_AMZ_CHECKSUM_CRC32C => Ok(Some(ChecksumAlgorithm::Crc32c)),
Some(x) if x == X_AMZ_CHECKSUM_SHA1 => Ok(Some(ChecksumAlgorithm::Sha1)),
Some(x) if x == X_AMZ_CHECKSUM_SHA256 => Ok(Some(ChecksumAlgorithm::Sha256)),
_ => Err(Error::bad_request("invalid checksum algorithm")), _ => Err(Error::bad_request("invalid checksum algorithm")),
} }
} }
/// Extract the value of any of the x-amz-checksum-* headers /// Extract the value of any of the x-amz-checksum-* headers
pub fn request_checksum_value( pub(crate) fn request_checksum_value(
headers: &HeaderMap<HeaderValue>, headers: &HeaderMap<HeaderValue>,
) -> Result<Option<ChecksumValue>, Error> { ) -> Result<Option<ChecksumValue>, Error> {
let mut ret = vec![]; let mut ret = vec![];
if headers.contains_key(X_AMZ_CHECKSUM_CRC32) { if let Some(crc32_str) = headers.get(X_AMZ_CHECKSUM_CRC32) {
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Crc32)?); let crc32 = BASE64_STANDARD
.decode(&crc32_str)
.ok()
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
ret.push(ChecksumValue::Crc32(crc32))
} }
if headers.contains_key(X_AMZ_CHECKSUM_CRC32C) { if let Some(crc32c_str) = headers.get(X_AMZ_CHECKSUM_CRC32C) {
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Crc32c)?); let crc32c = BASE64_STANDARD
.decode(&crc32c_str)
.ok()
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
ret.push(ChecksumValue::Crc32c(crc32c))
} }
if headers.contains_key(X_AMZ_CHECKSUM_SHA1) { if let Some(sha1_str) = headers.get(X_AMZ_CHECKSUM_SHA1) {
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Sha1)?); let sha1 = BASE64_STANDARD
.decode(&sha1_str)
.ok()
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
ret.push(ChecksumValue::Sha1(sha1))
} }
if headers.contains_key(X_AMZ_CHECKSUM_SHA256) { if let Some(sha256_str) = headers.get(X_AMZ_CHECKSUM_SHA256) {
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Sha256)?); let sha256 = BASE64_STANDARD
.decode(&sha256_str)
.ok()
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
ret.push(ChecksumValue::Sha256(sha256))
} }
if ret.len() > 1 { if ret.len() > 1 {
@ -260,47 +342,48 @@ pub fn request_checksum_value(
/// Checks for the presence of x-amz-checksum-algorithm /// Checks for the presence of x-amz-checksum-algorithm
/// if so extract the corresponding x-amz-checksum-* value /// if so extract the corresponding x-amz-checksum-* value
pub fn extract_checksum_value( pub(crate) fn request_checksum_algorithm_value(
headers: &HeaderMap<HeaderValue>, headers: &HeaderMap<HeaderValue>,
algo: ChecksumAlgorithm, ) -> Result<Option<ChecksumValue>, Error> {
) -> Result<ChecksumValue, Error> { match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
match algo { Some(x) if x == "CRC32" => {
ChecksumAlgorithm::Crc32 => {
let crc32 = headers let crc32 = headers
.get(X_AMZ_CHECKSUM_CRC32) .get(X_AMZ_CHECKSUM_CRC32)
.and_then(|x| BASE64_STANDARD.decode(&x).ok()) .and_then(|x| BASE64_STANDARD.decode(&x).ok())
.and_then(|x| x.try_into().ok()) .and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?; .ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
Ok(ChecksumValue::Crc32(crc32)) Ok(Some(ChecksumValue::Crc32(crc32)))
} }
ChecksumAlgorithm::Crc32c => { Some(x) if x == "CRC32C" => {
let crc32c = headers let crc32c = headers
.get(X_AMZ_CHECKSUM_CRC32C) .get(X_AMZ_CHECKSUM_CRC32C)
.and_then(|x| BASE64_STANDARD.decode(&x).ok()) .and_then(|x| BASE64_STANDARD.decode(&x).ok())
.and_then(|x| x.try_into().ok()) .and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?; .ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
Ok(ChecksumValue::Crc32c(crc32c)) Ok(Some(ChecksumValue::Crc32c(crc32c)))
} }
ChecksumAlgorithm::Sha1 => { Some(x) if x == "SHA1" => {
let sha1 = headers let sha1 = headers
.get(X_AMZ_CHECKSUM_SHA1) .get(X_AMZ_CHECKSUM_SHA1)
.and_then(|x| BASE64_STANDARD.decode(&x).ok()) .and_then(|x| BASE64_STANDARD.decode(&x).ok())
.and_then(|x| x.try_into().ok()) .and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?; .ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
Ok(ChecksumValue::Sha1(sha1)) Ok(Some(ChecksumValue::Sha1(sha1)))
} }
ChecksumAlgorithm::Sha256 => { Some(x) if x == "SHA256" => {
let sha256 = headers let sha256 = headers
.get(X_AMZ_CHECKSUM_SHA256) .get(X_AMZ_CHECKSUM_SHA256)
.and_then(|x| BASE64_STANDARD.decode(&x).ok()) .and_then(|x| BASE64_STANDARD.decode(&x).ok())
.and_then(|x| x.try_into().ok()) .and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?; .ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
Ok(ChecksumValue::Sha256(sha256)) Ok(Some(ChecksumValue::Sha256(sha256)))
} }
Some(_) => Err(Error::bad_request("invalid x-amz-checksum-algorithm")),
None => Ok(None),
} }
} }
pub fn add_checksum_response_headers( pub(crate) fn add_checksum_response_headers(
checksum: &Option<ChecksumValue>, checksum: &Option<ChecksumValue>,
mut resp: http::response::Builder, mut resp: http::response::Builder,
) -> http::response::Builder { ) -> http::response::Builder {

View file

@ -1,9 +1,9 @@
use std::pin::Pin; use std::pin::Pin;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use futures::{stream, stream::Stream, StreamExt, TryStreamExt}; use futures::{stream, stream::Stream, StreamExt, TryStreamExt};
use bytes::Bytes; use bytes::Bytes;
use http::header::HeaderName;
use hyper::{Request, Response}; use hyper::{Request, Response};
use serde::Serialize; use serde::Serialize;
@ -21,26 +21,16 @@ use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*; use garage_model::s3::version_table::*;
use garage_api_common::helpers::*; use garage_api_common::helpers::*;
use garage_api_common::signature::checksum::*;
use crate::api_server::{ReqBody, ResBody}; use crate::api_server::{ReqBody, ResBody};
use crate::checksum::*;
use crate::encryption::EncryptionParams; use crate::encryption::EncryptionParams;
use crate::error::*; use crate::error::*;
use crate::get::{check_version_not_deleted, full_object_byte_stream, PreconditionHeaders}; use crate::get::full_object_byte_stream;
use crate::multipart; use crate::multipart;
use crate::put::{extract_metadata_headers, save_stream, ChecksumMode, SaveStreamResult}; use crate::put::{get_headers, save_stream, ChecksumMode, SaveStreamResult};
use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION;
use crate::xml::{self as s3_xml, xmlns_tag}; use crate::xml::{self as s3_xml, xmlns_tag};
pub const X_AMZ_COPY_SOURCE_IF_MATCH: HeaderName =
HeaderName::from_static("x-amz-copy-source-if-match");
pub const X_AMZ_COPY_SOURCE_IF_NONE_MATCH: HeaderName =
HeaderName::from_static("x-amz-copy-source-if-none-match");
pub const X_AMZ_COPY_SOURCE_IF_MODIFIED_SINCE: HeaderName =
HeaderName::from_static("x-amz-copy-source-if-modified-since");
pub const X_AMZ_COPY_SOURCE_IF_UNMODIFIED_SINCE: HeaderName =
HeaderName::from_static("x-amz-copy-source-if-unmodified-since");
// -------- CopyObject --------- // -------- CopyObject ---------
pub async fn handle_copy( pub async fn handle_copy(
@ -48,7 +38,7 @@ pub async fn handle_copy(
req: &Request<ReqBody>, req: &Request<ReqBody>,
dest_key: &str, dest_key: &str,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let copy_precondition = PreconditionHeaders::parse_copy_source(req)?; let copy_precondition = CopyPreconditionHeaders::parse(req)?;
let checksum_algorithm = request_checksum_algorithm(req.headers())?; let checksum_algorithm = request_checksum_algorithm(req.headers())?;
@ -58,7 +48,7 @@ pub async fn handle_copy(
extract_source_info(&source_object)?; extract_source_info(&source_object)?;
// Check precondition, e.g. x-amz-copy-source-if-match // Check precondition, e.g. x-amz-copy-source-if-match
copy_precondition.check_copy_source(source_version, &source_version_meta.etag)?; copy_precondition.check(source_version, &source_version_meta.etag)?;
// Determine encryption parameters // Determine encryption parameters
let (source_encryption, source_object_meta_inner) = let (source_encryption, source_object_meta_inner) =
@ -83,20 +73,9 @@ pub async fn handle_copy(
let dest_object_meta = ObjectVersionMetaInner { let dest_object_meta = ObjectVersionMetaInner {
headers: match req.headers().get("x-amz-metadata-directive") { headers: match req.headers().get("x-amz-metadata-directive") {
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => { Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => {
extract_metadata_headers(req.headers())? get_headers(req.headers())?
}
_ => {
// The x-amz-website-redirect-location header is not copied, instead
// it is replaced by the value from the request (or removed if no
// value was specified)
let is_redirect =
|(key, _): &(String, String)| key == X_AMZ_WEBSITE_REDIRECT_LOCATION.as_str();
let mut headers: Vec<_> = source_object_meta_inner.headers.clone();
headers.retain(|h| !is_redirect(h));
let new_headers = extract_metadata_headers(req.headers())?;
headers.extend(new_headers.into_iter().filter(is_redirect));
headers
} }
_ => source_object_meta_inner.into_owned().headers,
}, },
checksum: source_checksum, checksum: source_checksum,
}; };
@ -237,7 +216,6 @@ async fn handle_copy_metaonly(
.get(&source_version.uuid, &EmptyKey) .get(&source_version.uuid, &EmptyKey)
.await?; .await?;
let source_version = source_version.ok_or(Error::NoSuchKey)?; let source_version = source_version.ok_or(Error::NoSuchKey)?;
check_version_not_deleted(&source_version)?;
// Write an "uploading" marker in Object table // Write an "uploading" marker in Object table
// This holds a reference to the object in the Version table // This holds a reference to the object in the Version table
@ -357,7 +335,7 @@ pub async fn handle_upload_part_copy(
part_number: u64, part_number: u64,
upload_id: &str, upload_id: &str,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let copy_precondition = PreconditionHeaders::parse_copy_source(req)?; let copy_precondition = CopyPreconditionHeaders::parse(req)?;
let dest_upload_id = multipart::decode_upload_id(upload_id)?; let dest_upload_id = multipart::decode_upload_id(upload_id)?;
@ -373,7 +351,7 @@ pub async fn handle_upload_part_copy(
extract_source_info(&source_object)?; extract_source_info(&source_object)?;
// Check precondition on source, e.g. x-amz-copy-source-if-match // Check precondition on source, e.g. x-amz-copy-source-if-match
copy_precondition.check_copy_source(source_object_version, &source_version_meta.etag)?; copy_precondition.check(source_object_version, &source_version_meta.etag)?;
// Determine encryption parameters // Determine encryption parameters
let (source_encryption, _) = EncryptionParams::check_decrypt_for_copy_source( let (source_encryption, _) = EncryptionParams::check_decrypt_for_copy_source(
@ -429,7 +407,6 @@ pub async fn handle_upload_part_copy(
.get(&source_object_version.uuid, &EmptyKey) .get(&source_object_version.uuid, &EmptyKey)
.await? .await?
.ok_or(Error::NoSuchKey)?; .ok_or(Error::NoSuchKey)?;
check_version_not_deleted(&source_version)?;
// We want to reuse blocks from the source version as much as possible. // We want to reuse blocks from the source version as much as possible.
// However, we still need to get the data from these blocks // However, we still need to get the data from these blocks
@ -561,7 +538,6 @@ pub async fn handle_upload_part_copy(
let mut current_offset = 0; let mut current_offset = 0;
let mut next_block = defragmenter.next().await?; let mut next_block = defragmenter.next().await?;
let mut blocks_to_dup = dest_version.clone();
// TODO this could be optimized similarly to read_and_put_blocks // TODO this could be optimized similarly to read_and_put_blocks
// low priority because uploadpartcopy is rarely used // low priority because uploadpartcopy is rarely used
@ -591,7 +567,8 @@ pub async fn handle_upload_part_copy(
.unwrap()?; .unwrap()?;
checksummer = checksummer_updated; checksummer = checksummer_updated;
let (version_block_key, version_block) = ( dest_version.blocks.clear();
dest_version.blocks.put(
VersionBlockKey { VersionBlockKey {
part_number, part_number,
offset: current_offset, offset: current_offset,
@ -603,56 +580,37 @@ pub async fn handle_upload_part_copy(
); );
current_offset += data_len; current_offset += data_len;
let next = if let Some(final_data) = data_to_upload { let block_ref = BlockRef {
dest_version.blocks.clear(); block: final_hash,
dest_version.blocks.put(version_block_key, version_block); version: dest_version_id,
let block_ref = BlockRef { deleted: false.into(),
block: final_hash,
version: dest_version_id,
deleted: false.into(),
};
let (_, _, _, next) = futures::try_join!(
// Thing 1: if the block is not exactly a block that existed before,
// we need to insert that data as a new block.
garage.block_manager.rpc_put_block(
final_hash,
final_data,
dest_encryption.is_encrypted(),
None
),
// Thing 2: we need to insert the block in the version
garage.version_table.insert(&dest_version),
// Thing 3: we need to add a block reference
garage.block_ref_table.insert(&block_ref),
// Thing 4: we need to read the next block
defragmenter.next(),
)?;
next
} else {
blocks_to_dup.blocks.put(version_block_key, version_block);
defragmenter.next().await?
}; };
let (_, _, _, next) = futures::try_join!(
// Thing 1: if the block is not exactly a block that existed before,
// we need to insert that data as a new block.
async {
if let Some(final_data) = data_to_upload {
garage
.block_manager
.rpc_put_block(final_hash, final_data, dest_encryption.is_encrypted(), None)
.await
} else {
Ok(())
}
},
// Thing 2: we need to insert the block in the version
garage.version_table.insert(&dest_version),
// Thing 3: we need to add a block reference
garage.block_ref_table.insert(&block_ref),
// Thing 4: we need to read the next block
defragmenter.next(),
)?;
next_block = next; next_block = next;
} }
assert_eq!(current_offset, source_range.length); assert_eq!(current_offset, source_range.length);
// Put the duplicated blocks into the version & block_refs tables
let block_refs_to_put = blocks_to_dup
.blocks
.items()
.iter()
.map(|b| BlockRef {
block: b.1.hash,
version: dest_version_id,
deleted: false.into(),
})
.collect::<Vec<_>>();
futures::try_join!(
garage.version_table.insert(&blocks_to_dup),
garage.block_ref_table.insert_many(&block_refs_to_put[..]),
)?;
let checksums = checksummer.finalize(); let checksums = checksummer.finalize();
let etag = dest_encryption.etag_from_md5(&checksums.md5); let etag = dest_encryption.etag_from_md5(&checksums.md5);
let checksum = checksums.extract(dest_object_checksum_algorithm); let checksum = checksums.extract(dest_object_checksum_algorithm);
@ -745,6 +703,97 @@ fn extract_source_info(
Ok((source_version, source_version_data, source_version_meta)) Ok((source_version, source_version_data, source_version_meta))
} }
struct CopyPreconditionHeaders {
copy_source_if_match: Option<Vec<String>>,
copy_source_if_modified_since: Option<SystemTime>,
copy_source_if_none_match: Option<Vec<String>>,
copy_source_if_unmodified_since: Option<SystemTime>,
}
impl CopyPreconditionHeaders {
fn parse(req: &Request<ReqBody>) -> Result<Self, Error> {
Ok(Self {
copy_source_if_match: req
.headers()
.get("x-amz-copy-source-if-match")
.map(|x| x.to_str())
.transpose()?
.map(|x| {
x.split(',')
.map(|m| m.trim().trim_matches('"').to_string())
.collect::<Vec<_>>()
}),
copy_source_if_modified_since: req
.headers()
.get("x-amz-copy-source-if-modified-since")
.map(|x| x.to_str())
.transpose()?
.map(httpdate::parse_http_date)
.transpose()
.ok_or_bad_request("Invalid date in x-amz-copy-source-if-modified-since")?,
copy_source_if_none_match: req
.headers()
.get("x-amz-copy-source-if-none-match")
.map(|x| x.to_str())
.transpose()?
.map(|x| {
x.split(',')
.map(|m| m.trim().trim_matches('"').to_string())
.collect::<Vec<_>>()
}),
copy_source_if_unmodified_since: req
.headers()
.get("x-amz-copy-source-if-unmodified-since")
.map(|x| x.to_str())
.transpose()?
.map(httpdate::parse_http_date)
.transpose()
.ok_or_bad_request("Invalid date in x-amz-copy-source-if-unmodified-since")?,
})
}
fn check(&self, v: &ObjectVersion, etag: &str) -> Result<(), Error> {
let v_date = UNIX_EPOCH + Duration::from_millis(v.timestamp);
let ok = match (
&self.copy_source_if_match,
&self.copy_source_if_unmodified_since,
&self.copy_source_if_none_match,
&self.copy_source_if_modified_since,
) {
// TODO I'm not sure all of the conditions are evaluated correctly here
// If we have both if-match and if-unmodified-since,
// basically we don't care about if-unmodified-since,
// because in the spec it says that if if-match evaluates to
// true but if-unmodified-since evaluates to false,
// the copy is still done.
(Some(im), _, None, None) => im.iter().any(|x| x == etag || x == "*"),
(None, Some(ius), None, None) => v_date <= *ius,
// If we have both if-none-match and if-modified-since,
// then both of the two conditions must evaluate to true
(None, None, Some(inm), Some(ims)) => {
!inm.iter().any(|x| x == etag || x == "*") && v_date > *ims
}
(None, None, Some(inm), None) => !inm.iter().any(|x| x == etag || x == "*"),
(None, None, None, Some(ims)) => v_date > *ims,
(None, None, None, None) => true,
_ => {
return Err(Error::bad_request(
"Invalid combination of x-amz-copy-source-if-xxxxx headers",
))
}
};
if ok {
Ok(())
} else {
Err(Error::PreconditionFailed)
}
}
}
type BlockStreamItemOk = (Bytes, Option<Hash>); type BlockStreamItemOk = (Bytes, Option<Hash>);
type BlockStreamItem = Result<BlockStreamItemOk, garage_util::error::Error>; type BlockStreamItem = Result<BlockStreamItemOk, garage_util::error::Error>;

View file

@ -2,11 +2,15 @@ use quick_xml::de::from_reader;
use hyper::{header::HeaderName, Method, Request, Response, StatusCode}; use hyper::{header::HeaderName, Method, Request, Response, StatusCode};
use http_body_util::BodyExt;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use garage_model::bucket_table::{Bucket, CorsRule as GarageCorsRule}; use garage_model::bucket_table::{Bucket, CorsRule as GarageCorsRule};
use garage_util::data::*;
use garage_api_common::helpers::*; use garage_api_common::helpers::*;
use garage_api_common::signature::verify_signed_content;
use crate::api_server::{ReqBody, ResBody}; use crate::api_server::{ReqBody, ResBody};
use crate::error::*; use crate::error::*;
@ -55,6 +59,7 @@ pub async fn handle_delete_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error>
pub async fn handle_put_cors( pub async fn handle_put_cors(
ctx: ReqCtx, ctx: ReqCtx,
req: Request<ReqBody>, req: Request<ReqBody>,
content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx { let ReqCtx {
garage, garage,
@ -63,7 +68,11 @@ pub async fn handle_put_cors(
.. ..
} = ctx; } = ctx;
let body = req.into_body().collect().await?; let body = BodyExt::collect(req.into_body()).await?.to_bytes();
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let conf: CorsConfiguration = from_reader(&body as &[u8])?; let conf: CorsConfiguration = from_reader(&body as &[u8])?;
conf.validate()?; conf.validate()?;
@ -88,9 +97,7 @@ pub async fn handle_put_cors(
pub struct CorsConfiguration { pub struct CorsConfiguration {
#[serde(serialize_with = "xmlns_tag", skip_deserializing)] #[serde(serialize_with = "xmlns_tag", skip_deserializing)]
pub xmlns: (), pub xmlns: (),
// "default" is required to be able to parse an empty list of rules, #[serde(rename = "CORSRule")]
// cf https://docs.rs/quick-xml/latest/quick_xml/de/#sequences-xsall-and-xssequence-xml-schema-types
#[serde(rename = "CORSRule", default)]
pub cors_rules: Vec<CorsRule>, pub cors_rules: Vec<CorsRule>,
} }
@ -272,26 +279,4 @@ mod tests {
Ok(()) Ok(())
} }
#[test]
fn test_deserialize_norules() -> Result<(), Error> {
let message = r#"<?xml version="1.0" encoding="UTF-8"?>
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/" />"#;
let conf: CorsConfiguration = from_str(message).unwrap();
let ref_value = CorsConfiguration {
xmlns: (),
cors_rules: vec![],
};
assert_eq! {
ref_value,
conf
};
let message2 = to_xml_with_header(&ref_value)?;
let cleanup = |c: &str| c.replace(char::is_whitespace, "");
assert_eq!(cleanup(message), cleanup(&message2));
Ok(())
}
} }

View file

@ -1,3 +1,4 @@
use http_body_util::BodyExt;
use hyper::{Request, Response, StatusCode}; use hyper::{Request, Response, StatusCode};
use garage_util::data::*; use garage_util::data::*;
@ -5,6 +6,7 @@ use garage_util::data::*;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use garage_api_common::helpers::*; use garage_api_common::helpers::*;
use garage_api_common::signature::verify_signed_content;
use crate::api_server::{ReqBody, ResBody}; use crate::api_server::{ReqBody, ResBody};
use crate::error::*; use crate::error::*;
@ -66,8 +68,13 @@ pub async fn handle_delete(ctx: ReqCtx, key: &str) -> Result<Response<ResBody>,
pub async fn handle_delete_objects( pub async fn handle_delete_objects(
ctx: ReqCtx, ctx: ReqCtx,
req: Request<ReqBody>, req: Request<ReqBody>,
content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let body = req.into_body().collect().await?; let body = BodyExt::collect(req.into_body()).await?.to_bytes();
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let cmd_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?; let cmd_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?;
let cmd = parse_delete_objects_xml(&cmd_xml).ok_or_bad_request("Invalid delete XML query")?; let cmd = parse_delete_objects_xml(&cmd_xml).ok_or_bad_request("Invalid delete XML query")?;

View file

@ -29,8 +29,8 @@ use garage_model::garage::Garage;
use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionMetaInner}; use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionMetaInner};
use garage_api_common::common_error::*; use garage_api_common::common_error::*;
use garage_api_common::signature::checksum::Md5Checksum;
use crate::checksum::Md5Checksum;
use crate::error::Error; use crate::error::Error;
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName = const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =

View file

@ -1,8 +1,8 @@
use std::convert::TryInto; use std::convert::TryInto;
use err_derive::Error;
use hyper::header::HeaderValue; use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode}; use hyper::{HeaderMap, StatusCode};
use thiserror::Error;
use garage_model::helper::error::Error as HelperError; use garage_model::helper::error::Error as HelperError;
@ -25,67 +25,67 @@ use crate::xml as s3_xml;
/// Errors of this crate /// Errors of this crate
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum Error { pub enum Error {
#[error("{0}")] #[error(display = "{}", _0)]
/// Error from common error /// Error from common error
Common(#[from] CommonError), Common(#[error(source)] CommonError),
// Category: cannot process // Category: cannot process
/// Authorization Header Malformed /// Authorization Header Malformed
#[error("Authorization header malformed, unexpected scope: {0}")] #[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
AuthorizationHeaderMalformed(String), AuthorizationHeaderMalformed(String),
/// The object requested don't exists /// The object requested don't exists
#[error("Key not found")] #[error(display = "Key not found")]
NoSuchKey, NoSuchKey,
/// The multipart upload requested don't exists /// The multipart upload requested don't exists
#[error("Upload not found")] #[error(display = "Upload not found")]
NoSuchUpload, NoSuchUpload,
/// Precondition failed (e.g. x-amz-copy-source-if-match) /// Precondition failed (e.g. x-amz-copy-source-if-match)
#[error("At least one of the preconditions you specified did not hold")] #[error(display = "At least one of the preconditions you specified did not hold")]
PreconditionFailed, PreconditionFailed,
/// Parts specified in CMU request do not match parts actually uploaded /// Parts specified in CMU request do not match parts actually uploaded
#[error("Parts given to CompleteMultipartUpload do not match uploaded parts")] #[error(display = "Parts given to CompleteMultipartUpload do not match uploaded parts")]
InvalidPart, InvalidPart,
/// Parts given to CompleteMultipartUpload were not in ascending order /// Parts given to CompleteMultipartUpload were not in ascending order
#[error("Parts given to CompleteMultipartUpload were not in ascending order")] #[error(display = "Parts given to CompleteMultipartUpload were not in ascending order")]
InvalidPartOrder, InvalidPartOrder,
/// In CompleteMultipartUpload: not enough data /// In CompleteMultipartUpload: not enough data
/// (here we are more lenient than AWS S3) /// (here we are more lenient than AWS S3)
#[error("Proposed upload is smaller than the minimum allowed object size")] #[error(display = "Proposed upload is smaller than the minimum allowed object size")]
EntityTooSmall, EntityTooSmall,
// Category: bad request // Category: bad request
/// The request contained an invalid UTF-8 sequence in its path or in other parameters /// The request contained an invalid UTF-8 sequence in its path or in other parameters
#[error("Invalid UTF-8: {0}")] #[error(display = "Invalid UTF-8: {}", _0)]
InvalidUtf8Str(#[from] std::str::Utf8Error), InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
/// The request used an invalid path /// The request used an invalid path
#[error("Invalid UTF-8: {0}")] #[error(display = "Invalid UTF-8: {}", _0)]
InvalidUtf8String(#[from] std::string::FromUtf8Error), InvalidUtf8String(#[error(source)] std::string::FromUtf8Error),
/// The client sent invalid XML data /// The client sent invalid XML data
#[error("Invalid XML: {0}")] #[error(display = "Invalid XML: {}", _0)]
InvalidXml(String), InvalidXml(String),
/// The client sent a range header with invalid value /// The client sent a range header with invalid value
#[error("Invalid HTTP range: {0:?}")] #[error(display = "Invalid HTTP range: {:?}", _0)]
InvalidRange((http_range::HttpRangeParseError, u64)), InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)),
/// The client sent a range header with invalid value /// The client sent a range header with invalid value
#[error("Invalid encryption algorithm: {0:?}, should be AES256")] #[error(display = "Invalid encryption algorithm: {:?}, should be AES256", _0)]
InvalidEncryptionAlgorithm(String), InvalidEncryptionAlgorithm(String),
/// The provided digest (checksum) value was invalid /// The client sent invalid XML data
#[error("Invalid digest: {0}")] #[error(display = "Invalid digest: {}", _0)]
InvalidDigest(String), InvalidDigest(String),
/// The client sent a request for an action not supported by garage /// The client sent a request for an action not supported by garage
#[error("Unimplemented action: {0}")] #[error(display = "Unimplemented action: {}", _0)]
NotImplemented(String), NotImplemented(String),
} }
@ -99,12 +99,6 @@ impl From<HelperError> for Error {
} }
} }
impl From<(http_range::HttpRangeParseError, u64)> for Error {
fn from(err: (http_range::HttpRangeParseError, u64)) -> Error {
Error::InvalidRange(err)
}
}
impl From<roxmltree::Error> for Error { impl From<roxmltree::Error> for Error {
fn from(err: roxmltree::Error) -> Self { fn from(err: roxmltree::Error) -> Self {
Self::InvalidXml(format!("{}", err)) Self::InvalidXml(format!("{}", err))
@ -125,7 +119,6 @@ impl From<SignatureError> for Error {
Self::AuthorizationHeaderMalformed(c) Self::AuthorizationHeaderMalformed(c)
} }
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i), SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
SignatureError::InvalidDigest(d) => Self::InvalidDigest(d),
} }
} }
} }

View file

@ -2,39 +2,37 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::convert::TryInto; use std::convert::TryInto;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH}; use std::time::{Duration, UNIX_EPOCH};
use bytes::Bytes; use bytes::Bytes;
use futures::future; use futures::future;
use futures::stream::{self, Stream, StreamExt}; use futures::stream::{self, Stream, StreamExt};
use http::header::{ use http::header::{
HeaderMap, HeaderName, ACCEPT_RANGES, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING, ACCEPT_RANGES, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LANGUAGE,
CONTENT_LANGUAGE, CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, EXPIRES, IF_MATCH, CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, EXPIRES, IF_MODIFIED_SINCE, IF_NONE_MATCH,
IF_MODIFIED_SINCE, IF_NONE_MATCH, IF_UNMODIFIED_SINCE, LAST_MODIFIED, RANGE, LAST_MODIFIED, RANGE,
}; };
use hyper::{Request, Response, StatusCode}; use hyper::{body::Body, Request, Response, StatusCode};
use tokio::sync::mpsc; use tokio::sync::mpsc;
use garage_net::stream::ByteStream; use garage_net::stream::ByteStream;
use garage_rpc::rpc_helper::OrderTag; use garage_rpc::rpc_helper::OrderTag;
use garage_table::EmptyKey; use garage_table::EmptyKey;
use garage_util::data::*; use garage_util::data::*;
use garage_util::error::{Error as UtilError, OkOrMessage}; use garage_util::error::OkOrMessage;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*; use garage_model::s3::version_table::*;
use garage_api_common::common_error::CommonError;
use garage_api_common::helpers::*; use garage_api_common::helpers::*;
use garage_api_common::signature::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
use crate::api_server::ResBody; use crate::api_server::ResBody;
use crate::copy::*; use crate::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
use crate::encryption::EncryptionParams; use crate::encryption::EncryptionParams;
use crate::error::*; use crate::error::*;
const X_AMZ_MP_PARTS_COUNT: HeaderName = HeaderName::from_static("x-amz-mp-parts-count"); const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
#[derive(Default)] #[derive(Default)]
pub struct GetObjectOverrides { pub struct GetObjectOverrides {
@ -117,29 +115,49 @@ fn getobject_override_headers(
Ok(()) Ok(())
} }
fn handle_http_precondition( fn try_answer_cached(
version: &ObjectVersion, version: &ObjectVersion,
version_meta: &ObjectVersionMeta, version_meta: &ObjectVersionMeta,
req: &Request<()>, req: &Request<impl Body>,
) -> Result<Option<Response<ResBody>>, Error> { ) -> Option<Response<ResBody>> {
let precondition_headers = PreconditionHeaders::parse(req)?; // <trinity> It is possible, and is even usually the case, [that both If-None-Match and
// If-Modified-Since] are present in a request. In this situation If-None-Match takes
// precedence and If-Modified-Since is ignored (as per 6.Precedence from rfc7232). The rational
// being that etag based matching is more accurate, it has no issue with sub-second precision
// for instance (in case of very fast updates)
let cached = if let Some(none_match) = req.headers().get(IF_NONE_MATCH) {
let none_match = none_match.to_str().ok()?;
let expected = format!("\"{}\"", version_meta.etag);
let found = none_match
.split(',')
.map(str::trim)
.any(|etag| etag == expected || etag == "\"*\"");
found
} else if let Some(modified_since) = req.headers().get(IF_MODIFIED_SINCE) {
let modified_since = modified_since.to_str().ok()?;
let client_date = httpdate::parse_http_date(modified_since).ok()?;
let server_date = UNIX_EPOCH + Duration::from_millis(version.timestamp);
client_date >= server_date
} else {
false
};
if let Some(status_code) = precondition_headers.check(&version, &version_meta.etag)? { if cached {
Ok(Some( Some(
Response::builder() Response::builder()
.status(status_code) .status(StatusCode::NOT_MODIFIED)
.body(empty_body()) .body(empty_body())
.unwrap(), .unwrap(),
)) )
} else { } else {
Ok(None) None
} }
} }
/// Handle HEAD request /// Handle HEAD request
pub async fn handle_head( pub async fn handle_head(
ctx: ReqCtx, ctx: ReqCtx,
req: &Request<()>, req: &Request<impl Body>,
key: &str, key: &str,
part_number: Option<u64>, part_number: Option<u64>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
@ -149,7 +167,7 @@ pub async fn handle_head(
/// Handle HEAD request for website /// Handle HEAD request for website
pub async fn handle_head_without_ctx( pub async fn handle_head_without_ctx(
garage: Arc<Garage>, garage: Arc<Garage>,
req: &Request<()>, req: &Request<impl Body>,
bucket_id: Uuid, bucket_id: Uuid,
key: &str, key: &str,
part_number: Option<u64>, part_number: Option<u64>,
@ -178,8 +196,8 @@ pub async fn handle_head_without_ctx(
_ => unreachable!(), _ => unreachable!(),
}; };
if let Some(res) = handle_http_precondition(object_version, version_meta, req)? { if let Some(cached) = try_answer_cached(object_version, version_meta, req) {
return Ok(res); return Ok(cached);
} }
let (encryption, headers) = let (encryption, headers) =
@ -216,7 +234,6 @@ pub async fn handle_head_without_ctx(
.get(&object_version.uuid, &EmptyKey) .get(&object_version.uuid, &EmptyKey)
.await? .await?
.ok_or(Error::NoSuchKey)?; .ok_or(Error::NoSuchKey)?;
check_version_not_deleted(&version)?;
let (part_offset, part_end) = let (part_offset, part_end) =
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?; calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
@ -261,7 +278,7 @@ pub async fn handle_head_without_ctx(
/// Handle GET request /// Handle GET request
pub async fn handle_get( pub async fn handle_get(
ctx: ReqCtx, ctx: ReqCtx,
req: &Request<()>, req: &Request<impl Body>,
key: &str, key: &str,
part_number: Option<u64>, part_number: Option<u64>,
overrides: GetObjectOverrides, overrides: GetObjectOverrides,
@ -272,7 +289,7 @@ pub async fn handle_get(
/// Handle GET request /// Handle GET request
pub async fn handle_get_without_ctx( pub async fn handle_get_without_ctx(
garage: Arc<Garage>, garage: Arc<Garage>,
req: &Request<()>, req: &Request<impl Body>,
bucket_id: Uuid, bucket_id: Uuid,
key: &str, key: &str,
part_number: Option<u64>, part_number: Option<u64>,
@ -301,8 +318,8 @@ pub async fn handle_get_without_ctx(
ObjectVersionData::FirstBlock(meta, _) => meta, ObjectVersionData::FirstBlock(meta, _) => meta,
}; };
if let Some(res) = handle_http_precondition(last_v, last_v_meta, req)? { if let Some(cached) = try_answer_cached(last_v, last_v_meta, req) {
return Ok(res); return Ok(cached);
} }
let (enc, headers) = let (enc, headers) =
@ -323,12 +340,7 @@ pub async fn handle_get_without_ctx(
enc, enc,
&headers, &headers,
pn, pn,
ChecksumMode { checksum_mode,
// TODO: for multipart uploads, checksums of each part should be stored
// so that we can return the corresponding checksum here
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
enabled: false,
},
) )
.await .await
} }
@ -342,12 +354,7 @@ pub async fn handle_get_without_ctx(
&headers, &headers,
range.start, range.start,
range.start + range.length, range.start + range.length,
ChecksumMode { checksum_mode,
// TODO: for range queries that align with part boundaries,
// we should return the saved checksum of the part
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
enabled: false,
},
) )
.await .await
} }
@ -367,21 +374,6 @@ pub async fn handle_get_without_ctx(
} }
} }
pub(crate) fn check_version_not_deleted(version: &Version) -> Result<(), Error> {
if version.deleted.get() {
// the version was deleted between when the object_table was consulted
// and now, this could mean the object was deleted, or overriden.
// Rather than say the key doesn't exist, return a transient error
// to signal the client to try again.
return Err(CommonError::InternalError(UtilError::Message(
"conflict/inconsistency between object and version state, version is deleted"
.to_string(),
))
.into());
}
Ok(())
}
async fn handle_get_full( async fn handle_get_full(
garage: Arc<Garage>, garage: Arc<Garage>,
version: &ObjectVersion, version: &ObjectVersion,
@ -448,7 +440,6 @@ pub fn full_object_byte_stream(
.ok_or_message("channel closed")?; .ok_or_message("channel closed")?;
let version = version_fut.await.unwrap()?.ok_or(Error::NoSuchKey)?; let version = version_fut.await.unwrap()?.ok_or(Error::NoSuchKey)?;
check_version_not_deleted(&version)?;
for (i, (_, vb)) in version.blocks.items().iter().enumerate().skip(1) { for (i, (_, vb)) in version.blocks.items().iter().enumerate().skip(1) {
let stream_block_i = encryption let stream_block_i = encryption
.get_block(&garage, &vb.hash, Some(order_stream.order(i as u64))) .get_block(&garage, &vb.hash, Some(order_stream.order(i as u64)))
@ -464,14 +455,6 @@ pub fn full_object_byte_stream(
{ {
Ok(()) => (), Ok(()) => (),
Err(e) => { Err(e) => {
// TODO i think this is a bad idea, we should log
// an error and stop there. If the error happens to
// be exactly the size of what hasn't been streamed
// yet, the client will see the request as a
// success
// instead truncating the output notify the client
// something happened with their download, so that
// they can retry it
let _ = tx.send(error_stream_item(e)).await; let _ = tx.send(error_stream_item(e)).await;
} }
} }
@ -523,7 +506,7 @@ async fn handle_get_range(
.get(&version.uuid, &EmptyKey) .get(&version.uuid, &EmptyKey)
.await? .await?
.ok_or(Error::NoSuchKey)?; .ok_or(Error::NoSuchKey)?;
check_version_not_deleted(&version)?;
let body = let body =
body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end); body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end);
Ok(resp_builder.body(body)?) Ok(resp_builder.body(body)?)
@ -574,8 +557,6 @@ async fn handle_get_part(
.await? .await?
.ok_or(Error::NoSuchKey)?; .ok_or(Error::NoSuchKey)?;
check_version_not_deleted(&version)?;
let (begin, end) = let (begin, end) =
calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?; calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?;
@ -596,7 +577,7 @@ async fn handle_get_part(
} }
fn parse_range_header( fn parse_range_header(
req: &Request<()>, req: &Request<impl Body>,
total_size: u64, total_size: u64,
) -> Result<Option<http_range::HttpRange>, Error> { ) -> Result<Option<http_range::HttpRange>, Error> {
let range = match req.headers().get(RANGE) { let range = match req.headers().get(RANGE) {
@ -637,7 +618,7 @@ struct ChecksumMode {
enabled: bool, enabled: bool,
} }
fn checksum_mode(req: &Request<()>) -> ChecksumMode { fn checksum_mode(req: &Request<impl Body>) -> ChecksumMode {
ChecksumMode { ChecksumMode {
enabled: req enabled: req
.headers() .headers()
@ -770,118 +751,3 @@ fn std_error_from_read_error<E: std::fmt::Display>(e: E) -> std::io::Error {
format!("Error while reading object data: {}", e), format!("Error while reading object data: {}", e),
) )
} }
// ----
pub struct PreconditionHeaders {
if_match: Option<Vec<String>>,
if_modified_since: Option<SystemTime>,
if_none_match: Option<Vec<String>>,
if_unmodified_since: Option<SystemTime>,
}
impl PreconditionHeaders {
fn parse<B>(req: &Request<B>) -> Result<Self, Error> {
Self::parse_with(
req.headers(),
&IF_MATCH,
&IF_NONE_MATCH,
&IF_MODIFIED_SINCE,
&IF_UNMODIFIED_SINCE,
)
}
pub(crate) fn parse_copy_source<B>(req: &Request<B>) -> Result<Self, Error> {
Self::parse_with(
req.headers(),
&X_AMZ_COPY_SOURCE_IF_MATCH,
&X_AMZ_COPY_SOURCE_IF_NONE_MATCH,
&X_AMZ_COPY_SOURCE_IF_MODIFIED_SINCE,
&X_AMZ_COPY_SOURCE_IF_UNMODIFIED_SINCE,
)
}
fn parse_with(
headers: &HeaderMap,
hdr_if_match: &HeaderName,
hdr_if_none_match: &HeaderName,
hdr_if_modified_since: &HeaderName,
hdr_if_unmodified_since: &HeaderName,
) -> Result<Self, Error> {
Ok(Self {
if_match: headers
.get(hdr_if_match)
.map(|x| x.to_str())
.transpose()?
.map(|x| {
x.split(',')
.map(|m| m.trim().trim_matches('"').to_string())
.collect::<Vec<_>>()
}),
if_none_match: headers
.get(hdr_if_none_match)
.map(|x| x.to_str())
.transpose()?
.map(|x| {
x.split(',')
.map(|m| m.trim().trim_matches('"').to_string())
.collect::<Vec<_>>()
}),
if_modified_since: headers
.get(hdr_if_modified_since)
.map(|x| x.to_str())
.transpose()?
.map(httpdate::parse_http_date)
.transpose()
.ok_or_bad_request("Invalid date in if-modified-since")?,
if_unmodified_since: headers
.get(hdr_if_unmodified_since)
.map(|x| x.to_str())
.transpose()?
.map(httpdate::parse_http_date)
.transpose()
.ok_or_bad_request("Invalid date in if-unmodified-since")?,
})
}
fn check(&self, v: &ObjectVersion, etag: &str) -> Result<Option<StatusCode>, Error> {
// we store date with ms precision, but headers are precise to the second: truncate
// the timestamp to handle the same-second edge case
let v_date = UNIX_EPOCH + Duration::from_secs(v.timestamp / 1000);
// Implemented from https://datatracker.ietf.org/doc/html/rfc7232#section-6
if let Some(im) = &self.if_match {
// Step 1: if-match is present
if !im.iter().any(|x| x == etag || x == "*") {
return Ok(Some(StatusCode::PRECONDITION_FAILED));
}
} else if let Some(ius) = &self.if_unmodified_since {
// Step 2: if-unmodified-since is present, and if-match is absent
if v_date > *ius {
return Ok(Some(StatusCode::PRECONDITION_FAILED));
}
}
if let Some(inm) = &self.if_none_match {
// Step 3: if-none-match is present
if inm.iter().any(|x| x == etag || x == "*") {
return Ok(Some(StatusCode::NOT_MODIFIED));
}
} else if let Some(ims) = &self.if_modified_since {
// Step 4: if-modified-since is present, and if-none-match is absent
if v_date <= *ims {
return Ok(Some(StatusCode::NOT_MODIFIED));
}
}
Ok(None)
}
pub(crate) fn check_copy_source(&self, v: &ObjectVersion, etag: &str) -> Result<(), Error> {
match self.check(v, etag)? {
Some(_) => Err(Error::PreconditionFailed),
None => Ok(()),
}
}
}

View file

@ -14,8 +14,9 @@ mod list;
mod multipart; mod multipart;
mod post_object; mod post_object;
mod put; mod put;
pub mod website; mod website;
mod checksum;
mod encryption; mod encryption;
mod router; mod router;
pub mod xml; pub mod xml;

View file

@ -1,10 +1,12 @@
use quick_xml::de::from_reader; use quick_xml::de::from_reader;
use http_body_util::BodyExt;
use hyper::{Request, Response, StatusCode}; use hyper::{Request, Response, StatusCode};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use garage_api_common::helpers::*; use garage_api_common::helpers::*;
use garage_api_common::signature::verify_signed_content;
use crate::api_server::{ReqBody, ResBody}; use crate::api_server::{ReqBody, ResBody};
use crate::error::*; use crate::error::*;
@ -14,6 +16,7 @@ use garage_model::bucket_table::{
parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration, parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration,
LifecycleFilter as GarageLifecycleFilter, LifecycleRule as GarageLifecycleRule, LifecycleFilter as GarageLifecycleFilter, LifecycleRule as GarageLifecycleRule,
}; };
use garage_util::data::*;
pub async fn handle_get_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, Error> { pub async fn handle_get_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let ReqCtx { bucket_params, .. } = ctx; let ReqCtx { bucket_params, .. } = ctx;
@ -27,7 +30,7 @@ pub async fn handle_get_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, Erro
.body(string_body(xml))?) .body(string_body(xml))?)
} else { } else {
Ok(Response::builder() Ok(Response::builder()
.status(StatusCode::NOT_FOUND) .status(StatusCode::NO_CONTENT)
.body(empty_body())?) .body(empty_body())?)
} }
} }
@ -53,6 +56,7 @@ pub async fn handle_delete_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, E
pub async fn handle_put_lifecycle( pub async fn handle_put_lifecycle(
ctx: ReqCtx, ctx: ReqCtx,
req: Request<ReqBody>, req: Request<ReqBody>,
content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx { let ReqCtx {
garage, garage,
@ -61,7 +65,11 @@ pub async fn handle_put_lifecycle(
.. ..
} = ctx; } = ctx;
let body = req.into_body().collect().await?; let body = BodyExt::collect(req.into_body()).await?.to_bytes();
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let conf: LifecycleConfiguration = from_reader(&body as &[u8])?; let conf: LifecycleConfiguration = from_reader(&body as &[u8])?;
let config = conf let config = conf

View file

@ -1,20 +1,13 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::convert::{TryFrom, TryInto}; use std::convert::TryInto;
use std::hash::Hasher;
use std::sync::Arc; use std::sync::Arc;
use base64::prelude::*; use base64::prelude::*;
use crc32c::Crc32cHasher as Crc32c;
use crc32fast::Hasher as Crc32;
use futures::prelude::*; use futures::prelude::*;
use hyper::{Request, Response}; use hyper::{Request, Response};
use md5::{Digest, Md5};
use sha1::Sha1;
use sha2::Sha256;
use garage_table::*; use garage_table::*;
use garage_util::data::*; use garage_util::data::*;
use garage_util::error::OkOrMessage;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::s3::block_ref_table::*; use garage_model::s3::block_ref_table::*;
@ -23,9 +16,10 @@ use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*; use garage_model::s3::version_table::*;
use garage_api_common::helpers::*; use garage_api_common::helpers::*;
use garage_api_common::signature::checksum::*; use garage_api_common::signature::verify_signed_content;
use crate::api_server::{ReqBody, ResBody}; use crate::api_server::{ReqBody, ResBody};
use crate::checksum::*;
use crate::encryption::EncryptionParams; use crate::encryption::EncryptionParams;
use crate::error::*; use crate::error::*;
use crate::put::*; use crate::put::*;
@ -49,7 +43,7 @@ pub async fn handle_create_multipart_upload(
let upload_id = gen_uuid(); let upload_id = gen_uuid();
let timestamp = next_timestamp(existing_object.as_ref()); let timestamp = next_timestamp(existing_object.as_ref());
let headers = extract_metadata_headers(req.headers())?; let headers = get_headers(req.headers())?;
let meta = ObjectVersionMetaInner { let meta = ObjectVersionMetaInner {
headers, headers,
checksum: None, checksum: None,
@ -100,6 +94,7 @@ pub async fn handle_put_part(
key: &str, key: &str,
part_number: u64, part_number: u64,
upload_id: &str, upload_id: &str,
content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx { garage, .. } = &ctx; let ReqCtx { garage, .. } = &ctx;
@ -110,30 +105,17 @@ pub async fn handle_put_part(
Some(x) => Some(x.to_str()?.to_string()), Some(x) => Some(x.to_str()?.to_string()),
None => None, None => None,
}, },
sha256: None, sha256: content_sha256,
extra: request_checksum_value(req.headers())?, extra: request_checksum_value(req.headers())?,
}; };
// Read first chuck, and at the same time try to get object to see if it exists
let key = key.to_string(); let key = key.to_string();
let (req_head, mut req_body) = req.into_parts(); let (req_head, req_body) = req.into_parts();
let stream = body_stream(req_body);
// Before we stream the body, configure the needed checksums.
req_body.add_expected_checksums(expected_checksums.clone());
// TODO: avoid parsing encryption headers twice...
if !EncryptionParams::new_from_headers(&garage, &req_head.headers)?.is_encrypted() {
// For non-encrypted objects, we need to compute the md5sum in all cases
// (even if content-md5 is not set), because it is used as an etag of the
// part, which is in turn used in the etag computation of the whole object
req_body.add_md5();
}
let (stream, stream_checksums) = req_body.streaming_with_checksums();
let stream = stream.map_err(Error::from);
let mut chunker = StreamChunker::new(stream, garage.config.block_size); let mut chunker = StreamChunker::new(stream, garage.config.block_size);
// Read first chuck, and at the same time try to get object to see if it exists
let ((_, object_version, mut mpu), first_block) = let ((_, object_version, mut mpu), first_block) =
futures::try_join!(get_upload(&ctx, &key, &upload_id), chunker.next(),)?; futures::try_join!(get_upload(&ctx, &key, &upload_id), chunker.next(),)?;
@ -190,21 +172,21 @@ pub async fn handle_put_part(
garage.version_table.insert(&version).await?; garage.version_table.insert(&version).await?;
// Copy data to version // Copy data to version
let (total_size, _, _) = read_and_put_blocks( let checksummer =
Checksummer::init(&expected_checksums, !encryption.is_encrypted()).add(checksum_algorithm);
let (total_size, checksums, _) = read_and_put_blocks(
&ctx, &ctx,
&version, &version,
encryption, encryption,
part_number, part_number,
first_block, first_block,
chunker, &mut chunker,
Checksummer::new(), checksummer,
) )
.await?; .await?;
// Verify that checksums match // Verify that checksums map
let checksums = stream_checksums checksums.verify(&expected_checksums)?;
.await
.ok_or_internal_error("checksum calculation")??;
// Store part etag in version // Store part etag in version
let etag = encryption.etag_from_md5(&checksums.md5); let etag = encryption.etag_from_md5(&checksums.md5);
@ -266,6 +248,7 @@ pub async fn handle_complete_multipart_upload(
req: Request<ReqBody>, req: Request<ReqBody>,
key: &str, key: &str,
upload_id: &str, upload_id: &str,
content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx { let ReqCtx {
garage, garage,
@ -277,7 +260,11 @@ pub async fn handle_complete_multipart_upload(
let expected_checksum = request_checksum_value(&req_head.headers)?; let expected_checksum = request_checksum_value(&req_head.headers)?;
let body = req_body.collect().await?; let body = http_body_util::BodyExt::collect(req_body).await?.to_bytes();
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?; let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?;
let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml) let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml)
@ -615,99 +602,3 @@ fn parse_complete_multipart_upload_body(
Some(parts) Some(parts)
} }
// ====== checksummer ====
#[derive(Default)]
pub(crate) struct MultipartChecksummer {
pub md5: Md5,
pub extra: Option<MultipartExtraChecksummer>,
}
pub(crate) enum MultipartExtraChecksummer {
Crc32(Crc32),
Crc32c(Crc32c),
Sha1(Sha1),
Sha256(Sha256),
}
impl MultipartChecksummer {
pub(crate) fn init(algo: Option<ChecksumAlgorithm>) -> Self {
Self {
md5: Md5::new(),
extra: match algo {
None => None,
Some(ChecksumAlgorithm::Crc32) => {
Some(MultipartExtraChecksummer::Crc32(Crc32::new()))
}
Some(ChecksumAlgorithm::Crc32c) => {
Some(MultipartExtraChecksummer::Crc32c(Crc32c::default()))
}
Some(ChecksumAlgorithm::Sha1) => Some(MultipartExtraChecksummer::Sha1(Sha1::new())),
Some(ChecksumAlgorithm::Sha256) => {
Some(MultipartExtraChecksummer::Sha256(Sha256::new()))
}
},
}
}
pub(crate) fn update(
&mut self,
etag: &str,
checksum: Option<ChecksumValue>,
) -> Result<(), Error> {
self.md5
.update(&hex::decode(&etag).ok_or_message("invalid etag hex")?);
match (&mut self.extra, checksum) {
(None, _) => (),
(
Some(MultipartExtraChecksummer::Crc32(ref mut crc32)),
Some(ChecksumValue::Crc32(x)),
) => {
crc32.update(&x);
}
(
Some(MultipartExtraChecksummer::Crc32c(ref mut crc32c)),
Some(ChecksumValue::Crc32c(x)),
) => {
crc32c.write(&x);
}
(Some(MultipartExtraChecksummer::Sha1(ref mut sha1)), Some(ChecksumValue::Sha1(x))) => {
sha1.update(&x);
}
(
Some(MultipartExtraChecksummer::Sha256(ref mut sha256)),
Some(ChecksumValue::Sha256(x)),
) => {
sha256.update(&x);
}
(Some(_), b) => {
return Err(Error::internal_error(format!(
"part checksum was not computed correctly, got: {:?}",
b
)))
}
}
Ok(())
}
pub(crate) fn finalize(self) -> (Md5Checksum, Option<ChecksumValue>) {
let md5 = self.md5.finalize()[..].try_into().unwrap();
let extra = match self.extra {
None => None,
Some(MultipartExtraChecksummer::Crc32(crc32)) => {
Some(ChecksumValue::Crc32(u32::to_be_bytes(crc32.finalize())))
}
Some(MultipartExtraChecksummer::Crc32c(crc32c)) => Some(ChecksumValue::Crc32c(
u32::to_be_bytes(u32::try_from(crc32c.finish()).unwrap()),
)),
Some(MultipartExtraChecksummer::Sha1(sha1)) => {
Some(ChecksumValue::Sha1(sha1.finalize()[..].try_into().unwrap()))
}
Some(MultipartExtraChecksummer::Sha256(sha256)) => Some(ChecksumValue::Sha256(
sha256.finalize()[..].try_into().unwrap(),
)),
};
(md5, extra)
}
}

View file

@ -18,13 +18,13 @@ use garage_model::s3::object_table::*;
use garage_api_common::cors::*; use garage_api_common::cors::*;
use garage_api_common::helpers::*; use garage_api_common::helpers::*;
use garage_api_common::signature::checksum::*;
use garage_api_common::signature::payload::{verify_v4, Authorization}; use garage_api_common::signature::payload::{verify_v4, Authorization};
use crate::api_server::ResBody; use crate::api_server::ResBody;
use crate::checksum::*;
use crate::encryption::EncryptionParams; use crate::encryption::EncryptionParams;
use crate::error::*; use crate::error::*;
use crate::put::{extract_metadata_headers, save_stream, ChecksumMode}; use crate::put::{get_headers, save_stream, ChecksumMode};
use crate::xml as s3_xml; use crate::xml as s3_xml;
pub async fn handle_post_object( pub async fn handle_post_object(
@ -141,26 +141,10 @@ pub async fn handle_post_object(
let mut conditions = decoded_policy.into_conditions()?; let mut conditions = decoded_policy.into_conditions()?;
// If there are conditions on the bucket name, check these against the actual bucket_name rather
// than the one in params, which is allowed to be absent.
if let Some(conds) = conditions.params.remove("bucket") {
for cond in conds {
let ok = match cond {
Operation::Equal(s) => s.as_str() == bucket_name,
Operation::StartsWith(s) => bucket_name.starts_with(&s),
};
if !ok {
return Err(Error::bad_request(
"Key 'bucket' has value not allowed in policy",
));
}
}
}
for (param_key, value) in params.iter() { for (param_key, value) in params.iter() {
let param_key = param_key.as_str(); let param_key = param_key.as_str();
match param_key { match param_key {
"policy" | "x-amz-signature" | "bucket" => (), // this is always accepted, as it's required to validate other fields "policy" | "x-amz-signature" => (), // this is always accepted, as it's required to validate other fields
"content-type" => { "content-type" => {
let conds = conditions.params.remove("content-type").ok_or_else(|| { let conds = conditions.params.remove("content-type").ok_or_else(|| {
Error::bad_request(format!("Key '{}' is not allowed in policy", param_key)) Error::bad_request(format!("Key '{}' is not allowed in policy", param_key))
@ -232,9 +216,8 @@ pub async fn handle_post_object(
// if we ever start supporting ACLs, we likely want to map "acl" to x-amz-acl" somewhere // if we ever start supporting ACLs, we likely want to map "acl" to x-amz-acl" somewhere
// around here to make sure the rest of the machinery takes our acl into account. // around here to make sure the rest of the machinery takes our acl into account.
let headers = extract_metadata_headers(&params)?; let headers = get_headers(&params)?;
let checksum_algorithm = request_checksum_algorithm(&params)?;
let expected_checksums = ExpectedChecksums { let expected_checksums = ExpectedChecksums {
md5: params md5: params
.get("content-md5") .get("content-md5")
@ -242,9 +225,7 @@ pub async fn handle_post_object(
.transpose()? .transpose()?
.map(str::to_string), .map(str::to_string),
sha256: None, sha256: None,
extra: checksum_algorithm extra: request_checksum_algorithm_value(&params)?,
.map(|algo| extract_checksum_value(&params, algo))
.transpose()?,
}; };
let meta = ObjectVersionMetaInner { let meta = ObjectVersionMetaInner {

View file

@ -31,13 +31,13 @@ use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*; use garage_model::s3::version_table::*;
use garage_api_common::helpers::*; use garage_api_common::helpers::*;
use garage_api_common::signature::body::StreamingChecksumReceiver;
use garage_api_common::signature::checksum::*;
use crate::api_server::{ReqBody, ResBody}; use crate::api_server::{ReqBody, ResBody};
use crate::checksum::*;
use crate::encryption::EncryptionParams; use crate::encryption::EncryptionParams;
use crate::error::*; use crate::error::*;
use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION;
const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
pub(crate) struct SaveStreamResult { pub(crate) struct SaveStreamResult {
pub(crate) version_uuid: Uuid, pub(crate) version_uuid: Uuid,
@ -48,10 +48,6 @@ pub(crate) struct SaveStreamResult {
pub(crate) enum ChecksumMode<'a> { pub(crate) enum ChecksumMode<'a> {
Verify(&'a ExpectedChecksums), Verify(&'a ExpectedChecksums),
VerifyFrom {
checksummer: StreamingChecksumReceiver,
trailer_algo: Option<ChecksumAlgorithm>,
},
Calculate(Option<ChecksumAlgorithm>), Calculate(Option<ChecksumAlgorithm>),
} }
@ -59,9 +55,10 @@ pub async fn handle_put(
ctx: ReqCtx, ctx: ReqCtx,
req: Request<ReqBody>, req: Request<ReqBody>,
key: &String, key: &String,
content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
// Retrieve interesting headers from request // Retrieve interesting headers from request
let headers = extract_metadata_headers(req.headers())?; let headers = get_headers(req.headers())?;
debug!("Object headers: {:?}", headers); debug!("Object headers: {:?}", headers);
let expected_checksums = ExpectedChecksums { let expected_checksums = ExpectedChecksums {
@ -69,10 +66,9 @@ pub async fn handle_put(
Some(x) => Some(x.to_str()?.to_string()), Some(x) => Some(x.to_str()?.to_string()),
None => None, None => None,
}, },
sha256: None, sha256: content_sha256,
extra: request_checksum_value(req.headers())?, extra: request_checksum_value(req.headers())?,
}; };
let trailer_checksum_algorithm = request_trailer_checksum_algorithm(req.headers())?;
let meta = ObjectVersionMetaInner { let meta = ObjectVersionMetaInner {
headers, headers,
@ -82,19 +78,7 @@ pub async fn handle_put(
// Determine whether object should be encrypted, and if so the key // Determine whether object should be encrypted, and if so the key
let encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?; let encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
// The request body is a special ReqBody object (see garage_api_common::signature::body) let stream = body_stream(req.into_body());
// which supports calculating checksums while streaming the data.
// Before we start streaming, we configure it to calculate all the checksums we need.
let mut req_body = req.into_body();
req_body.add_expected_checksums(expected_checksums.clone());
if !encryption.is_encrypted() {
// For non-encrypted objects, we need to compute the md5sum in all cases
// (even if content-md5 is not set), because it is used as the object etag
req_body.add_md5();
}
let (stream, checksummer) = req_body.streaming_with_checksums();
let stream = stream.map_err(Error::from);
let res = save_stream( let res = save_stream(
&ctx, &ctx,
@ -102,10 +86,7 @@ pub async fn handle_put(
encryption, encryption,
stream, stream,
key, key,
ChecksumMode::VerifyFrom { ChecksumMode::Verify(&expected_checksums),
checksummer,
trailer_algo: trailer_checksum_algorithm,
},
) )
.await?; .await?;
@ -141,15 +122,10 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
let version_uuid = gen_uuid(); let version_uuid = gen_uuid();
let version_timestamp = next_timestamp(existing_object.as_ref()); let version_timestamp = next_timestamp(existing_object.as_ref());
let mut checksummer = match &checksum_mode { let mut checksummer = match checksum_mode {
ChecksumMode::Verify(expected) => Checksummer::init(expected, !encryption.is_encrypted()), ChecksumMode::Verify(expected) => Checksummer::init(expected, !encryption.is_encrypted()),
ChecksumMode::Calculate(algo) => { ChecksumMode::Calculate(algo) => {
Checksummer::init(&Default::default(), !encryption.is_encrypted()).add(*algo) Checksummer::init(&Default::default(), !encryption.is_encrypted()).add(algo)
}
ChecksumMode::VerifyFrom { .. } => {
// Checksums are calculated by the garage_api_common::signature module
// so here we can just have an empty checksummer that does nothing
Checksummer::new()
} }
}; };
@ -157,7 +133,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
// as "inline data". We can then return immediately. // as "inline data". We can then return immediately.
if first_block.len() < INLINE_THRESHOLD { if first_block.len() < INLINE_THRESHOLD {
checksummer.update(&first_block); checksummer.update(&first_block);
let mut checksums = checksummer.finalize(); let checksums = checksummer.finalize();
match checksum_mode { match checksum_mode {
ChecksumMode::Verify(expected) => { ChecksumMode::Verify(expected) => {
@ -166,18 +142,6 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
ChecksumMode::Calculate(algo) => { ChecksumMode::Calculate(algo) => {
meta.checksum = checksums.extract(algo); meta.checksum = checksums.extract(algo);
} }
ChecksumMode::VerifyFrom {
checksummer,
trailer_algo,
} => {
drop(chunker);
checksums = checksummer
.await
.ok_or_internal_error("checksum calculation")??;
if let Some(algo) = trailer_algo {
meta.checksum = checksums.extract(Some(algo));
}
}
}; };
let size = first_block.len() as u64; let size = first_block.len() as u64;
@ -249,13 +213,13 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
garage.version_table.insert(&version).await?; garage.version_table.insert(&version).await?;
// Transfer data // Transfer data
let (total_size, mut checksums, first_block_hash) = read_and_put_blocks( let (total_size, checksums, first_block_hash) = read_and_put_blocks(
ctx, ctx,
&version, &version,
encryption, encryption,
1, 1,
first_block, first_block,
chunker, &mut chunker,
checksummer, checksummer,
) )
.await?; .await?;
@ -268,17 +232,6 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
ChecksumMode::Calculate(algo) => { ChecksumMode::Calculate(algo) => {
meta.checksum = checksums.extract(algo); meta.checksum = checksums.extract(algo);
} }
ChecksumMode::VerifyFrom {
checksummer,
trailer_algo,
} => {
checksums = checksummer
.await
.ok_or_internal_error("checksum calculation")??;
if let Some(algo) = trailer_algo {
meta.checksum = checksums.extract(Some(algo));
}
}
}; };
// Verify quotas are respsected // Verify quotas are respsected
@ -379,7 +332,7 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
encryption: EncryptionParams, encryption: EncryptionParams,
part_number: u64, part_number: u64,
first_block: Bytes, first_block: Bytes,
mut chunker: StreamChunker<S>, chunker: &mut StreamChunker<S>,
checksummer: Checksummer, checksummer: Checksummer,
) -> Result<(u64, Checksums, Hash), Error> { ) -> Result<(u64, Checksums, Hash), Error> {
let tracer = opentelemetry::global::tracer("garage"); let tracer = opentelemetry::global::tracer("garage");
@ -491,7 +444,7 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
}; };
let recv_next = async { let recv_next = async {
// If more than a maximum number of writes are in progress, don't add more for now // If more than a maximum number of writes are in progress, don't add more for now
if currently_running >= ctx.garage.config.block_max_concurrent_writes_per_request { if currently_running >= PUT_BLOCKS_MAX_PARALLEL {
futures::future::pending().await futures::future::pending().await
} else { } else {
block_rx3.recv().await block_rx3.recv().await
@ -648,9 +601,7 @@ impl Drop for InterruptedCleanup {
// ============ helpers ============ // ============ helpers ============
pub(crate) fn extract_metadata_headers( pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<HeaderList, Error> {
headers: &HeaderMap<HeaderValue>,
) -> Result<HeaderList, Error> {
let mut ret = Vec::new(); let mut ret = Vec::new();
// Preserve standard headers // Preserve standard headers
@ -676,18 +627,6 @@ pub(crate) fn extract_metadata_headers(
std::str::from_utf8(value.as_bytes())?.to_string(), std::str::from_utf8(value.as_bytes())?.to_string(),
)); ));
} }
if name == X_AMZ_WEBSITE_REDIRECT_LOCATION {
let value = std::str::from_utf8(value.as_bytes())?.to_string();
if !(value.starts_with("/")
|| value.starts_with("http://")
|| value.starts_with("https://"))
{
return Err(Error::bad_request(format!(
"Invalid {X_AMZ_WEBSITE_REDIRECT_LOCATION} header",
)));
}
ret.push((X_AMZ_WEBSITE_REDIRECT_LOCATION.to_string(), value));
}
} }
Ok(ret) Ok(ret)

View file

@ -1,19 +1,19 @@
use quick_xml::de::from_reader; use quick_xml::de::from_reader;
use hyper::{header::HeaderName, Request, Response, StatusCode}; use http_body_util::BodyExt;
use hyper::{Request, Response, StatusCode};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use garage_model::bucket_table::*; use garage_model::bucket_table::{self, *};
use garage_util::data::*;
use garage_api_common::helpers::*; use garage_api_common::helpers::*;
use garage_api_common::signature::verify_signed_content;
use crate::api_server::{ReqBody, ResBody}; use crate::api_server::{ReqBody, ResBody};
use crate::error::*; use crate::error::*;
use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value}; use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
pub const X_AMZ_WEBSITE_REDIRECT_LOCATION: HeaderName =
HeaderName::from_static("x-amz-website-redirect-location");
pub async fn handle_get_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error> { pub async fn handle_get_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let ReqCtx { bucket_params, .. } = ctx; let ReqCtx { bucket_params, .. } = ctx;
if let Some(website) = bucket_params.website_config.get() { if let Some(website) = bucket_params.website_config.get() {
@ -26,7 +26,28 @@ pub async fn handle_get_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error>
suffix: Value(website.index_document.to_string()), suffix: Value(website.index_document.to_string()),
}), }),
redirect_all_requests_to: None, redirect_all_requests_to: None,
routing_rules: None, routing_rules: RoutingRules {
rules: website
.routing_rules
.clone()
.into_iter()
.map(|rule| RoutingRule {
condition: rule.condition.map(|cond| Condition {
http_error_code: cond.http_error_code.map(|c| IntValue(c as i64)),
prefix: cond.prefix.map(Value),
}),
redirect: Redirect {
hostname: rule.redirect.hostname.map(Value),
http_redirect_code: Some(IntValue(
rule.redirect.http_redirect_code as i64,
)),
protocol: rule.redirect.protocol.map(Value),
replace_full: rule.redirect.replace_key.map(Value),
replace_prefix: rule.redirect.replace_key_prefix.map(Value),
},
})
.collect(),
},
}; };
let xml = to_xml_with_header(&wc)?; let xml = to_xml_with_header(&wc)?;
Ok(Response::builder() Ok(Response::builder()
@ -61,6 +82,7 @@ pub async fn handle_delete_website(ctx: ReqCtx) -> Result<Response<ResBody>, Err
pub async fn handle_put_website( pub async fn handle_put_website(
ctx: ReqCtx, ctx: ReqCtx,
req: Request<ReqBody>, req: Request<ReqBody>,
content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx { let ReqCtx {
garage, garage,
@ -69,7 +91,11 @@ pub async fn handle_put_website(
.. ..
} = ctx; } = ctx;
let body = req.into_body().collect().await?; let body = BodyExt::collect(req.into_body()).await?.to_bytes();
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let conf: WebsiteConfiguration = from_reader(&body as &[u8])?; let conf: WebsiteConfiguration = from_reader(&body as &[u8])?;
conf.validate()?; conf.validate()?;
@ -97,18 +123,28 @@ pub struct WebsiteConfiguration {
pub index_document: Option<Suffix>, pub index_document: Option<Suffix>,
#[serde(rename = "RedirectAllRequestsTo")] #[serde(rename = "RedirectAllRequestsTo")]
pub redirect_all_requests_to: Option<Target>, pub redirect_all_requests_to: Option<Target>,
#[serde(rename = "RoutingRules")] #[serde(
pub routing_rules: Option<Vec<RoutingRule>>, rename = "RoutingRules",
default,
skip_serializing_if = "RoutingRules::is_empty"
)]
pub routing_rules: RoutingRules,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Default)]
pub struct RoutingRules {
#[serde(rename = "RoutingRule")]
pub rules: Vec<RoutingRule>,
}
impl RoutingRules {
fn is_empty(&self) -> bool {
self.rules.is_empty()
}
} }
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct RoutingRule { pub struct RoutingRule {
#[serde(rename = "RoutingRule")]
pub inner: RoutingRuleInner,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct RoutingRuleInner {
#[serde(rename = "Condition")] #[serde(rename = "Condition")]
pub condition: Option<Condition>, pub condition: Option<Condition>,
#[serde(rename = "Redirect")] #[serde(rename = "Redirect")]
@ -162,7 +198,7 @@ impl WebsiteConfiguration {
if self.redirect_all_requests_to.is_some() if self.redirect_all_requests_to.is_some()
&& (self.error_document.is_some() && (self.error_document.is_some()
|| self.index_document.is_some() || self.index_document.is_some()
|| self.routing_rules.is_some()) || !self.routing_rules.is_empty())
{ {
return Err(Error::bad_request( return Err(Error::bad_request(
"Bad XML: can't have RedirectAllRequestsTo and other fields", "Bad XML: can't have RedirectAllRequestsTo and other fields",
@ -177,10 +213,15 @@ impl WebsiteConfiguration {
if let Some(ref rart) = self.redirect_all_requests_to { if let Some(ref rart) = self.redirect_all_requests_to {
rart.validate()?; rart.validate()?;
} }
if let Some(ref rrs) = self.routing_rules { for rr in &self.routing_rules.rules {
for rr in rrs { rr.validate()?;
rr.inner.validate()?; }
} if self.routing_rules.rules.len() > 1000 {
// we will do linear scans, best to avoid overly long configuration. The
// limit was choosen arbitrarily
return Err(Error::bad_request(
"Bad XML: RoutingRules can't have more than 1000 child elements",
));
} }
Ok(()) Ok(())
@ -189,11 +230,7 @@ impl WebsiteConfiguration {
pub fn into_garage_website_config(self) -> Result<WebsiteConfig, Error> { pub fn into_garage_website_config(self) -> Result<WebsiteConfig, Error> {
if self.redirect_all_requests_to.is_some() { if self.redirect_all_requests_to.is_some() {
Err(Error::NotImplemented( Err(Error::NotImplemented(
"S3 website redirects are not currently implemented in Garage.".into(), "RedirectAllRequestsTo is not currently implemented in Garage, however its effect can be emulated using a single inconditional RoutingRule.".into(),
))
} else if self.routing_rules.map(|x| !x.is_empty()).unwrap_or(false) {
Err(Error::NotImplemented(
"S3 routing rules are not currently implemented in Garage.".into(),
)) ))
} else { } else {
Ok(WebsiteConfig { Ok(WebsiteConfig {
@ -202,6 +239,36 @@ impl WebsiteConfiguration {
.map(|x| x.suffix.0) .map(|x| x.suffix.0)
.unwrap_or_else(|| "index.html".to_string()), .unwrap_or_else(|| "index.html".to_string()),
error_document: self.error_document.map(|x| x.key.0), error_document: self.error_document.map(|x| x.key.0),
redirect_all: None,
routing_rules: self
.routing_rules
.rules
.into_iter()
.map(|rule| {
bucket_table::RoutingRule {
condition: rule.condition.map(|condition| {
bucket_table::RedirectCondition {
http_error_code: condition.http_error_code.map(|c| c.0 as u16),
prefix: condition.prefix.map(|p| p.0),
}
}),
redirect: bucket_table::Redirect {
hostname: rule.redirect.hostname.map(|h| h.0),
protocol: rule.redirect.protocol.map(|p| p.0),
// aws default to 301, which i find punitive in case of
// missconfiguration (can be permanently cached on the
// user agent)
http_redirect_code: rule
.redirect
.http_redirect_code
.map(|c| c.0 as u16)
.unwrap_or(302),
replace_key_prefix: rule.redirect.replace_prefix.map(|k| k.0),
replace_key: rule.redirect.replace_full.map(|k| k.0),
},
}
})
.collect(),
}) })
} }
} }
@ -242,37 +309,69 @@ impl Target {
} }
} }
impl RoutingRuleInner { impl RoutingRule {
pub fn validate(&self) -> Result<(), Error> { pub fn validate(&self) -> Result<(), Error> {
let has_prefix = self if let Some(condition) = &self.condition {
.condition condition.validate()?;
.as_ref() }
.and_then(|c| c.prefix.as_ref()) self.redirect.validate()
.is_some(); }
self.redirect.validate(has_prefix) }
impl Condition {
pub fn validate(&self) -> Result<bool, Error> {
if let Some(ref error_code) = self.http_error_code {
// TODO do other error codes make sense? Aws only allows 4xx and 5xx
if error_code.0 != 404 {
return Err(Error::bad_request(
"Bad XML: HttpErrorCodeReturnedEquals must be 404 or absent",
));
}
}
Ok(self.prefix.is_some())
} }
} }
impl Redirect { impl Redirect {
pub fn validate(&self, has_prefix: bool) -> Result<(), Error> { pub fn validate(&self) -> Result<(), Error> {
if self.replace_prefix.is_some() { if self.replace_prefix.is_some() && self.replace_full.is_some() {
if self.replace_full.is_some() { return Err(Error::bad_request(
return Err(Error::bad_request( "Bad XML: both ReplaceKeyPrefixWith and ReplaceKeyWith are set",
"Bad XML: both ReplaceKeyPrefixWith and ReplaceKeyWith are set", ));
));
}
if !has_prefix {
return Err(Error::bad_request(
"Bad XML: ReplaceKeyPrefixWith is set, but KeyPrefixEquals isn't",
));
}
} }
if let Some(ref protocol) = self.protocol { if let Some(ref protocol) = self.protocol {
if protocol.0 != "http" && protocol.0 != "https" { if protocol.0 != "http" && protocol.0 != "https" {
return Err(Error::bad_request("Bad XML: invalid protocol")); return Err(Error::bad_request("Bad XML: invalid protocol"));
} }
} }
// TODO there are probably more invalid cases, but which ones? if let Some(ref http_redirect_code) = self.http_redirect_code {
match http_redirect_code.0 {
// aws allows all 3xx except 300, but some are non-sensical (not modified,
// use proxy...)
301 | 302 | 303 | 307 | 308 => {
if self.hostname.is_none() && self.protocol.is_some() {
return Err(Error::bad_request(
"Bad XML: HostName must be set if Protocol is set",
));
}
}
// aws doesn't allow these codes, but netlify does, and it seems like a
// cool feature (change the page seen without changing the url shown by the
// user agent)
200 | 404 => {
if self.hostname.is_some() || self.protocol.is_some() {
// hostname would mean different bucket, protocol doesn't make
// sense
return Err(Error::bad_request(
"Bad XML: an HttpRedirectCode of 200 is not acceptable alongside HostName or Protocol",
));
}
}
_ => {
return Err(Error::bad_request("Bad XML: invalid HttpRedirectCode"));
}
}
}
Ok(()) Ok(())
} }
} }
@ -311,6 +410,15 @@ mod tests {
<ReplaceKeyWith>fullkey</ReplaceKeyWith> <ReplaceKeyWith>fullkey</ReplaceKeyWith>
</Redirect> </Redirect>
</RoutingRule> </RoutingRule>
<RoutingRule>
<Condition>
<KeyPrefixEquals></KeyPrefixEquals>
</Condition>
<Redirect>
<HttpRedirectCode>404</HttpRedirectCode>
<ReplaceKeyWith>missing</ReplaceKeyWith>
</Redirect>
</RoutingRule>
</RoutingRules> </RoutingRules>
</WebsiteConfiguration>"#; </WebsiteConfiguration>"#;
let conf: WebsiteConfiguration = from_str(message).unwrap(); let conf: WebsiteConfiguration = from_str(message).unwrap();
@ -326,21 +434,36 @@ mod tests {
hostname: Value("garage.tld".to_owned()), hostname: Value("garage.tld".to_owned()),
protocol: Some(Value("https".to_owned())), protocol: Some(Value("https".to_owned())),
}), }),
routing_rules: Some(vec![RoutingRule { routing_rules: RoutingRules {
inner: RoutingRuleInner { rules: vec![
condition: Some(Condition { RoutingRule {
http_error_code: Some(IntValue(404)), condition: Some(Condition {
prefix: Some(Value("prefix1".to_owned())), http_error_code: Some(IntValue(404)),
}), prefix: Some(Value("prefix1".to_owned())),
redirect: Redirect { }),
hostname: Some(Value("gara.ge".to_owned())), redirect: Redirect {
protocol: Some(Value("http".to_owned())), hostname: Some(Value("gara.ge".to_owned())),
http_redirect_code: Some(IntValue(303)), protocol: Some(Value("http".to_owned())),
replace_prefix: Some(Value("prefix2".to_owned())), http_redirect_code: Some(IntValue(303)),
replace_full: Some(Value("fullkey".to_owned())), replace_prefix: Some(Value("prefix2".to_owned())),
replace_full: Some(Value("fullkey".to_owned())),
},
}, },
}, RoutingRule {
}]), condition: Some(Condition {
http_error_code: None,
prefix: Some(Value("".to_owned())),
}),
redirect: Redirect {
hostname: None,
protocol: None,
http_redirect_code: Some(IntValue(404)),
replace_prefix: None,
replace_full: Some(Value("missing".to_owned())),
},
},
],
},
}; };
assert_eq! { assert_eq! {
ref_value, ref_value,

View file

@ -13,10 +13,6 @@ pub fn xmlns_tag<S: Serializer>(_v: &(), s: S) -> Result<S::Ok, S::Error> {
s.serialize_str("http://s3.amazonaws.com/doc/2006-03-01/") s.serialize_str("http://s3.amazonaws.com/doc/2006-03-01/")
} }
pub fn xmlns_xsi_tag<S: Serializer>(_v: &(), s: S) -> Result<S::Ok, S::Error> {
s.serialize_str("http://www.w3.org/2001/XMLSchema-instance")
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct Value(#[serde(rename = "$value")] pub String); pub struct Value(#[serde(rename = "$value")] pub String);
@ -323,42 +319,6 @@ pub struct PostObject {
pub etag: Value, pub etag: Value,
} }
#[derive(Debug, Serialize, PartialEq, Eq)]
pub struct Grantee {
#[serde(rename = "xmlns:xsi", serialize_with = "xmlns_xsi_tag")]
pub xmlns_xsi: (),
#[serde(rename = "xsi:type")]
pub typ: String,
#[serde(rename = "DisplayName")]
pub display_name: Option<Value>,
#[serde(rename = "ID")]
pub id: Option<Value>,
}
#[derive(Debug, Serialize, PartialEq, Eq)]
pub struct Grant {
#[serde(rename = "Grantee")]
pub grantee: Grantee,
#[serde(rename = "Permission")]
pub permission: Value,
}
#[derive(Debug, Serialize, PartialEq, Eq)]
pub struct AccessControlList {
#[serde(rename = "Grant")]
pub entries: Vec<Grant>,
}
#[derive(Debug, Serialize, PartialEq, Eq)]
pub struct AccessControlPolicy {
#[serde(serialize_with = "xmlns_tag")]
pub xmlns: (),
#[serde(rename = "Owner")]
pub owner: Option<Owner>,
#[serde(rename = "AccessControlList")]
pub acl: AccessControlList,
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -467,43 +427,6 @@ mod tests {
Ok(()) Ok(())
} }
#[test]
fn get_bucket_acl_result() -> Result<(), ApiError> {
let grant = Grant {
grantee: Grantee {
xmlns_xsi: (),
typ: "CanonicalUser".to_string(),
display_name: Some(Value("owner_name".to_string())),
id: Some(Value("qsdfjklm".to_string())),
},
permission: Value("FULL_CONTROL".to_string()),
};
let get_bucket_acl = AccessControlPolicy {
xmlns: (),
owner: None,
acl: AccessControlList {
entries: vec![grant],
},
};
assert_eq!(
to_xml_with_header(&get_bucket_acl)?,
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<AccessControlList>\
<Grant>\
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\">\
<DisplayName>owner_name</DisplayName>\
<ID>qsdfjklm</ID>\
</Grantee>\
<Permission>FULL_CONTROL</Permission>\
</Grant>\
</AccessControlList>\
</AccessControlPolicy>"
);
Ok(())
}
#[test] #[test]
fn delete_result() -> Result<(), ApiError> { fn delete_result() -> Result<(), ApiError> {
let delete_result = DeleteResult { let delete_result = DeleteResult {

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_block" name = "garage_block"
version = "1.3.1" version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"

View file

@ -50,8 +50,6 @@ pub const INLINE_THRESHOLD: usize = 3072;
// to delete the block locally. // to delete the block locally.
pub(crate) const BLOCK_GC_DELAY: Duration = Duration::from_secs(600); pub(crate) const BLOCK_GC_DELAY: Duration = Duration::from_secs(600);
const BLOCK_READ_SEMAPHORE_TIMEOUT: Duration = Duration::from_secs(15);
/// RPC messages used to share blocks of data between nodes /// RPC messages used to share blocks of data between nodes
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub enum BlockRpc { pub enum BlockRpc {
@ -89,7 +87,6 @@ pub struct BlockManager {
disable_scrub: bool, disable_scrub: bool,
mutation_lock: Vec<Mutex<BlockManagerLocked>>, mutation_lock: Vec<Mutex<BlockManagerLocked>>,
read_semaphore: Semaphore,
pub rc: BlockRc, pub rc: BlockRc,
pub resync: BlockResyncManager, pub resync: BlockResyncManager,
@ -179,8 +176,6 @@ impl BlockManager {
.iter() .iter()
.map(|_| Mutex::new(BlockManagerLocked())) .map(|_| Mutex::new(BlockManagerLocked()))
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
read_semaphore: Semaphore::new(config.block_max_concurrent_reads),
rc, rc,
resync, resync,
system, system,
@ -413,8 +408,8 @@ impl BlockManager {
} }
/// Get number of items in the refcount table /// Get number of items in the refcount table
pub fn rc_approximate_len(&self) -> Result<usize, Error> { pub fn rc_len(&self) -> Result<usize, Error> {
Ok(self.rc.rc_table.approximate_len()?) Ok(self.rc.rc_table.len()?)
} }
/// Send command to start/stop/manager scrub worker /// Send command to start/stop/manager scrub worker
@ -432,7 +427,7 @@ impl BlockManager {
/// List all resync errors /// List all resync errors
pub fn list_resync_errors(&self) -> Result<Vec<BlockResyncErrorInfo>, Error> { pub fn list_resync_errors(&self) -> Result<Vec<BlockResyncErrorInfo>, Error> {
let mut blocks = Vec::with_capacity(self.resync.errors.approximate_len()?); let mut blocks = Vec::with_capacity(self.resync.errors.len()?);
for ent in self.resync.errors.iter()? { for ent in self.resync.errors.iter()? {
let (hash, cnt) = ent?; let (hash, cnt) = ent?;
let cnt = ErrorCounter::decode(&cnt); let cnt = ErrorCounter::decode(&cnt);
@ -562,6 +557,9 @@ impl BlockManager {
match self.find_block(hash).await { match self.find_block(hash).await {
Some(p) => self.read_block_from(hash, &p).await, Some(p) => self.read_block_from(hash, &p).await,
None => { None => {
// Not found but maybe we should have had it ??
self.resync
.put_to_resync(hash, 2 * self.system.rpc_helper().rpc_timeout())?;
return Err(Error::Message(format!( return Err(Error::Message(format!(
"block {:?} not found on node", "block {:?} not found on node",
hash hash
@ -583,15 +581,6 @@ impl BlockManager {
) -> Result<DataBlock, Error> { ) -> Result<DataBlock, Error> {
let (header, path) = block_path.as_parts_ref(); let (header, path) = block_path.as_parts_ref();
let permit = tokio::select! {
sem = self.read_semaphore.acquire() => sem.ok_or_message("acquire read semaphore")?,
_ = tokio::time::sleep(BLOCK_READ_SEMAPHORE_TIMEOUT) => {
self.metrics.block_read_semaphore_timeouts.add(1);
debug!("read block {:?}: read_semaphore acquire timeout", hash);
return Err(Error::Message("read block: read_semaphore acquire timeout".into()));
}
};
let mut f = fs::File::open(&path).await?; let mut f = fs::File::open(&path).await?;
let mut data = vec![]; let mut data = vec![];
f.read_to_end(&mut data).await?; f.read_to_end(&mut data).await?;
@ -616,8 +605,6 @@ impl BlockManager {
return Err(Error::CorruptData(*hash)); return Err(Error::CorruptData(*hash));
} }
drop(permit);
Ok(data) Ok(data)
} }
@ -783,7 +770,6 @@ impl BlockManagerLocked {
let mut f = fs::File::create(&path_tmp).await?; let mut f = fs::File::create(&path_tmp).await?;
f.write_all(data).await?; f.write_all(data).await?;
f.flush().await?;
mgr.metrics.bytes_written.add(data.len() as u64); mgr.metrics.bytes_written.add(data.len() as u64);
if mgr.data_fsync { if mgr.data_fsync {

View file

@ -22,7 +22,6 @@ pub struct BlockManagerMetrics {
pub(crate) bytes_read: BoundCounter<u64>, pub(crate) bytes_read: BoundCounter<u64>,
pub(crate) block_read_duration: BoundValueRecorder<f64>, pub(crate) block_read_duration: BoundValueRecorder<f64>,
pub(crate) block_read_semaphore_timeouts: BoundCounter<u64>,
pub(crate) bytes_written: BoundCounter<u64>, pub(crate) bytes_written: BoundCounter<u64>,
pub(crate) block_write_duration: BoundValueRecorder<f64>, pub(crate) block_write_duration: BoundValueRecorder<f64>,
pub(crate) delete_counter: BoundCounter<u64>, pub(crate) delete_counter: BoundCounter<u64>,
@ -51,7 +50,7 @@ impl BlockManagerMetrics {
.init(), .init(),
_rc_size: meter _rc_size: meter
.u64_value_observer("block.rc_size", move |observer| { .u64_value_observer("block.rc_size", move |observer| {
if let Ok(value) = rc_tree.approximate_len() { if let Ok(value) = rc_tree.len() {
observer.observe(value as u64, &[]) observer.observe(value as u64, &[])
} }
}) })
@ -59,7 +58,7 @@ impl BlockManagerMetrics {
.init(), .init(),
_resync_queue_len: meter _resync_queue_len: meter
.u64_value_observer("block.resync_queue_length", move |observer| { .u64_value_observer("block.resync_queue_length", move |observer| {
if let Ok(value) = resync_queue.approximate_len() { if let Ok(value) = resync_queue.len() {
observer.observe(value as u64, &[]); observer.observe(value as u64, &[]);
} }
}) })
@ -69,7 +68,7 @@ impl BlockManagerMetrics {
.init(), .init(),
_resync_errored_blocks: meter _resync_errored_blocks: meter
.u64_value_observer("block.resync_errored_blocks", move |observer| { .u64_value_observer("block.resync_errored_blocks", move |observer| {
if let Ok(value) = resync_errors.approximate_len() { if let Ok(value) = resync_errors.len() {
observer.observe(value as u64, &[]); observer.observe(value as u64, &[]);
} }
}) })
@ -120,11 +119,6 @@ impl BlockManagerMetrics {
.with_description("Duration of block read operations") .with_description("Duration of block read operations")
.init() .init()
.bind(&[]), .bind(&[]),
block_read_semaphore_timeouts: meter
.u64_counter("block.read_semaphore_timeouts")
.with_description("Number of block reads that failed due to semaphore acquire timeout")
.init()
.bind(&[]),
bytes_written: meter bytes_written: meter
.u64_counter("block.bytes_written") .u64_counter("block.bytes_written")
.with_description("Number of bytes written to disk") .with_description("Number of bytes written to disk")

View file

@ -106,13 +106,13 @@ impl BlockResyncManager {
} }
/// Get length of resync queue /// Get length of resync queue
pub fn queue_approximate_len(&self) -> Result<usize, Error> { pub fn queue_len(&self) -> Result<usize, Error> {
Ok(self.queue.approximate_len()?) Ok(self.queue.len()?)
} }
/// Get number of blocks that have an error /// Get number of blocks that have an error
pub fn errors_approximate_len(&self) -> Result<usize, Error> { pub fn errors_len(&self) -> Result<usize, Error> {
Ok(self.errors.approximate_len()?) Ok(self.errors.len()?)
} }
/// Clear the error counter for a block and put it in queue immediately /// Clear the error counter for a block and put it in queue immediately
@ -133,14 +133,6 @@ impl BlockResyncManager {
))) )))
} }
/// Clear the entire resync queue and list of errored blocks
/// Corresponds to `garage repair clear-resync-queue`
pub fn clear_resync_queue(&self) -> Result<(), Error> {
self.queue.clear()?;
self.errors.clear()?;
Ok(())
}
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) { pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
let notify = self.notify.clone(); let notify = self.notify.clone();
vars.register_rw( vars.register_rw(
@ -556,11 +548,9 @@ impl Worker for ResyncWorker {
} }
WorkerStatus { WorkerStatus {
queue_length: Some(self.manager.resync.queue_approximate_len().unwrap_or(0) as u64), queue_length: Some(self.manager.resync.queue_len().unwrap_or(0) as u64),
tranquility: Some(tranquility), tranquility: Some(tranquility),
persistent_errors: Some( persistent_errors: Some(self.manager.resync.errors_len().unwrap_or(0) as u64),
self.manager.resync.errors_approximate_len().unwrap_or(0) as u64
),
..Default::default() ..Default::default()
} }
} }

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_db" name = "garage_db"
version = "1.3.1" version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -12,18 +12,14 @@ readme = "../../README.md"
path = "lib.rs" path = "lib.rs"
[dependencies] [dependencies]
thiserror.workspace = true err-derive.workspace = true
tracing.workspace = true tracing.workspace = true
heed = { workspace = true, optional = true } heed = { workspace = true, optional = true }
rusqlite = { workspace = true, optional = true, features = ["backup"] } rusqlite = { workspace = true, optional = true, features = ["backup"] }
r2d2 = { workspace = true, optional = true } r2d2 = { workspace = true, optional = true }
r2d2_sqlite = { workspace = true, optional = true } r2d2_sqlite = { workspace = true, optional = true }
fjall = { workspace = true, optional = true }
parking_lot = { workspace = true, optional = true }
[dev-dependencies] [dev-dependencies]
mktemp.workspace = true mktemp.workspace = true
@ -31,5 +27,4 @@ mktemp.workspace = true
default = [ "lmdb", "sqlite" ] default = [ "lmdb", "sqlite" ]
bundled-libs = [ "rusqlite?/bundled" ] bundled-libs = [ "rusqlite?/bundled" ]
lmdb = [ "heed" ] lmdb = [ "heed" ]
fjall = [ "dep:fjall", "dep:parking_lot" ]
sqlite = [ "rusqlite", "r2d2", "r2d2_sqlite" ] sqlite = [ "rusqlite", "r2d2", "r2d2_sqlite" ]

View file

@ -1,453 +0,0 @@
use core::ops::Bound;
use std::path::PathBuf;
use std::sync::Arc;
use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard};
use fjall::{
PartitionCreateOptions, PersistMode, TransactionalKeyspace, TransactionalPartitionHandle,
WriteTransaction,
};
use crate::{
open::{Engine, OpenOpt},
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
TxResult, TxValueIter, Value, ValueIter,
};
pub use fjall;
// --
pub(crate) fn open_db(path: &PathBuf, opt: &OpenOpt) -> Result<Db> {
info!("Opening Fjall database at: {}", path.display());
if opt.fsync {
return Err(Error(
"metadata_fsync is not supported with the Fjall database engine".into(),
));
}
let mut config = fjall::Config::new(path);
if let Some(block_cache_size) = opt.fjall_block_cache_size {
config = config.cache_size(block_cache_size as u64);
}
let keyspace = config.open_transactional()?;
Ok(FjallDb::init(keyspace))
}
// -- err
impl From<fjall::Error> for Error {
fn from(e: fjall::Error) -> Error {
Error(format!("fjall: {}", e).into())
}
}
impl From<fjall::LsmError> for Error {
fn from(e: fjall::LsmError) -> Error {
Error(format!("fjall lsm_tree: {}", e).into())
}
}
impl From<fjall::Error> for TxOpError {
fn from(e: fjall::Error) -> TxOpError {
TxOpError(e.into())
}
}
// -- db
pub struct FjallDb {
keyspace: TransactionalKeyspace,
trees: RwLock<Vec<(String, TransactionalPartitionHandle)>>,
}
type ByteRefRangeBound<'r> = (Bound<&'r [u8]>, Bound<&'r [u8]>);
impl FjallDb {
pub fn init(keyspace: TransactionalKeyspace) -> Db {
let s = Self {
keyspace,
trees: RwLock::new(Vec::new()),
};
Db(Arc::new(s))
}
fn get_tree(
&self,
i: usize,
) -> Result<MappedRwLockReadGuard<'_, TransactionalPartitionHandle>> {
RwLockReadGuard::try_map(self.trees.read(), |trees: &Vec<_>| {
trees.get(i).map(|tup| &tup.1)
})
.map_err(|_| Error("invalid tree id".into()))
}
}
impl IDb for FjallDb {
fn engine(&self) -> String {
"Fjall (EXPERIMENTAL!)".into()
}
fn open_tree(&self, name: &str) -> Result<usize> {
let mut trees = self.trees.write();
let safe_name = encode_name(name)?;
if let Some(i) = trees.iter().position(|(name, _)| *name == safe_name) {
Ok(i)
} else {
let tree = self
.keyspace
.open_partition(&safe_name, PartitionCreateOptions::default())?;
let i = trees.len();
trees.push((safe_name, tree));
Ok(i)
}
}
fn list_trees(&self) -> Result<Vec<String>> {
Ok(self
.keyspace
.list_partitions()
.iter()
.map(|n| decode_name(&n))
.collect::<Result<Vec<_>>>()?)
}
fn snapshot(&self, base_path: &PathBuf) -> Result<()> {
std::fs::create_dir_all(base_path)?;
let path = Engine::Fjall.db_path(base_path);
let source_state = self.keyspace.read_tx();
let copy_keyspace = fjall::Config::new(path).open()?;
for partition_name in self.keyspace.list_partitions() {
let source_partition = self
.keyspace
.open_partition(&partition_name, PartitionCreateOptions::default())?;
let copy_partition =
copy_keyspace.open_partition(&partition_name, PartitionCreateOptions::default())?;
for entry in source_state.iter(&source_partition) {
let (key, value) = entry?;
copy_partition.insert(key, value)?;
}
}
copy_keyspace.persist(PersistMode::SyncAll)?;
Ok(())
}
// ----
fn get(&self, tree_idx: usize, key: &[u8]) -> Result<Option<Value>> {
let tree = self.get_tree(tree_idx)?;
let tx = self.keyspace.read_tx();
let val = tx.get(&tree, key)?;
match val {
None => Ok(None),
Some(v) => Ok(Some(v.to_vec())),
}
}
fn approximate_len(&self, tree_idx: usize) -> Result<usize> {
let tree = self.get_tree(tree_idx)?;
Ok(tree.approximate_len())
}
fn is_empty(&self, tree_idx: usize) -> Result<bool> {
let tree = self.get_tree(tree_idx)?;
let tx = self.keyspace.read_tx();
Ok(tx.is_empty(&tree)?)
}
fn insert(&self, tree_idx: usize, key: &[u8], value: &[u8]) -> Result<()> {
let tree = self.get_tree(tree_idx)?;
let mut tx = self.keyspace.write_tx();
tx.insert(&tree, key, value);
tx.commit()?;
Ok(())
}
fn remove(&self, tree_idx: usize, key: &[u8]) -> Result<()> {
let tree = self.get_tree(tree_idx)?;
let mut tx = self.keyspace.write_tx();
tx.remove(&tree, key);
tx.commit()?;
Ok(())
}
fn clear(&self, tree_idx: usize) -> Result<()> {
let mut trees = self.trees.write();
if tree_idx >= trees.len() {
return Err(Error("invalid tree id".into()));
}
let (name, tree) = trees.remove(tree_idx);
self.keyspace.delete_partition(tree)?;
let tree = self
.keyspace
.open_partition(&name, PartitionCreateOptions::default())?;
trees.insert(tree_idx, (name, tree));
Ok(())
}
fn iter(&self, tree_idx: usize) -> Result<ValueIter<'_>> {
let tree = self.get_tree(tree_idx)?;
let tx = self.keyspace.read_tx();
Ok(Box::new(tx.iter(&tree).map(iterator_remap)))
}
fn iter_rev(&self, tree_idx: usize) -> Result<ValueIter<'_>> {
let tree = self.get_tree(tree_idx)?;
let tx = self.keyspace.read_tx();
Ok(Box::new(tx.iter(&tree).rev().map(iterator_remap)))
}
fn range<'r>(
&self,
tree_idx: usize,
low: Bound<&'r [u8]>,
high: Bound<&'r [u8]>,
) -> Result<ValueIter<'_>> {
let tree = self.get_tree(tree_idx)?;
let tx = self.keyspace.read_tx();
Ok(Box::new(
tx.range::<&'r [u8], ByteRefRangeBound>(&tree, (low, high))
.map(iterator_remap),
))
}
fn range_rev<'r>(
&self,
tree_idx: usize,
low: Bound<&'r [u8]>,
high: Bound<&'r [u8]>,
) -> Result<ValueIter<'_>> {
let tree = self.get_tree(tree_idx)?;
let tx = self.keyspace.read_tx();
Ok(Box::new(
tx.range::<&'r [u8], ByteRefRangeBound>(&tree, (low, high))
.rev()
.map(iterator_remap),
))
}
// ----
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> {
let trees = self.trees.read();
let mut tx = FjallTx {
trees: &trees[..],
tx: self.keyspace.write_tx(),
};
let res = f.try_on(&mut tx);
match res {
TxFnResult::Ok(on_commit) => {
tx.tx.commit().map_err(Error::from).map_err(TxError::Db)?;
Ok(on_commit)
}
TxFnResult::Abort => {
tx.tx.rollback();
Err(TxError::Abort(()))
}
TxFnResult::DbErr => {
tx.tx.rollback();
Err(TxError::Db(Error(
"(this message will be discarded)".into(),
)))
}
}
}
}
// ----
struct FjallTx<'a> {
trees: &'a [(String, TransactionalPartitionHandle)],
tx: WriteTransaction<'a>,
}
impl<'a> FjallTx<'a> {
fn get_tree(&self, i: usize) -> TxOpResult<&TransactionalPartitionHandle> {
self.trees.get(i).map(|tup| &tup.1).ok_or_else(|| {
TxOpError(Error(
"invalid tree id (it might have been openned after the transaction started)".into(),
))
})
}
}
impl<'a> ITx for FjallTx<'a> {
fn get(&self, tree_idx: usize, key: &[u8]) -> TxOpResult<Option<Value>> {
let tree = self.get_tree(tree_idx)?;
match self.tx.get(tree, key)? {
Some(v) => Ok(Some(v.to_vec())),
None => Ok(None),
}
}
fn len(&self, tree_idx: usize) -> TxOpResult<usize> {
let tree = self.get_tree(tree_idx)?;
Ok(self.tx.len(tree)? as usize)
}
fn insert(&mut self, tree_idx: usize, key: &[u8], value: &[u8]) -> TxOpResult<()> {
let tree = self.get_tree(tree_idx)?.clone();
self.tx.insert(&tree, key, value);
Ok(())
}
fn remove(&mut self, tree_idx: usize, key: &[u8]) -> TxOpResult<()> {
let tree = self.get_tree(tree_idx)?.clone();
self.tx.remove(&tree, key);
Ok(())
}
fn clear(&mut self, _tree_idx: usize) -> TxOpResult<()> {
unimplemented!("LSM tree clearing in cross-partition transaction is not supported")
}
fn iter(&self, tree_idx: usize) -> TxOpResult<TxValueIter<'_>> {
let tree = self.get_tree(tree_idx)?.clone();
Ok(Box::new(self.tx.iter(&tree).map(iterator_remap_tx)))
}
fn iter_rev(&self, tree_idx: usize) -> TxOpResult<TxValueIter<'_>> {
let tree = self.get_tree(tree_idx)?.clone();
Ok(Box::new(self.tx.iter(&tree).rev().map(iterator_remap_tx)))
}
fn range<'r>(
&self,
tree_idx: usize,
low: Bound<&'r [u8]>,
high: Bound<&'r [u8]>,
) -> TxOpResult<TxValueIter<'_>> {
let tree = self.get_tree(tree_idx)?;
let low = clone_bound(low);
let high = clone_bound(high);
Ok(Box::new(
self.tx
.range::<Vec<u8>, ByteVecRangeBounds>(&tree, (low, high))
.map(iterator_remap_tx),
))
}
fn range_rev<'r>(
&self,
tree_idx: usize,
low: Bound<&'r [u8]>,
high: Bound<&'r [u8]>,
) -> TxOpResult<TxValueIter<'_>> {
let tree = self.get_tree(tree_idx)?;
let low = clone_bound(low);
let high = clone_bound(high);
Ok(Box::new(
self.tx
.range::<Vec<u8>, ByteVecRangeBounds>(&tree, (low, high))
.rev()
.map(iterator_remap_tx),
))
}
}
// -- maps fjall's (k, v) to ours
fn iterator_remap(r: fjall::Result<(fjall::Slice, fjall::Slice)>) -> Result<(Value, Value)> {
r.map(|(k, v)| (k.to_vec(), v.to_vec()))
.map_err(|e| e.into())
}
fn iterator_remap_tx(r: fjall::Result<(fjall::Slice, fjall::Slice)>) -> TxOpResult<(Value, Value)> {
r.map(|(k, v)| (k.to_vec(), v.to_vec()))
.map_err(|e| e.into())
}
// -- utils to deal with Garage's tightness on Bound lifetimes
type ByteVecBound = Bound<Vec<u8>>;
type ByteVecRangeBounds = (ByteVecBound, ByteVecBound);
fn clone_bound(bound: Bound<&[u8]>) -> ByteVecBound {
let value = match bound {
Bound::Excluded(v) | Bound::Included(v) => v.to_vec(),
Bound::Unbounded => vec![],
};
match bound {
Bound::Included(_) => Bound::Included(value),
Bound::Excluded(_) => Bound::Excluded(value),
Bound::Unbounded => Bound::Unbounded,
}
}
// -- utils to encode table names --
fn encode_name(s: &str) -> Result<String> {
let base = 'A' as u32;
let mut ret = String::with_capacity(s.len() + 10);
for c in s.chars() {
if c.is_alphanumeric() || c == '_' || c == '-' || c == '#' {
ret.push(c);
} else if c <= u8::MAX as char {
ret.push('$');
let c_hi = c as u32 / 16;
let c_lo = c as u32 % 16;
ret.push(char::from_u32(base + c_hi).unwrap());
ret.push(char::from_u32(base + c_lo).unwrap());
} else {
return Err(Error(
format!("table name {} could not be safely encoded", s).into(),
));
}
}
Ok(ret)
}
fn decode_name(s: &str) -> Result<String> {
use std::convert::TryFrom;
let errfn = || Error(format!("encoded table name {} is invalid", s).into());
let c_map = |c: char| {
let c = c as u32;
let base = 'A' as u32;
if (base..base + 16).contains(&c) {
Some(c - base)
} else {
None
}
};
let mut ret = String::with_capacity(s.len());
let mut it = s.chars();
while let Some(c) = it.next() {
if c == '$' {
let c_hi = it.next().and_then(c_map).ok_or_else(errfn)?;
let c_lo = it.next().and_then(c_map).ok_or_else(errfn)?;
let c_dec = char::try_from(c_hi * 16 + c_lo).map_err(|_| errfn())?;
ret.push(c_dec);
} else {
ret.push(c);
}
}
Ok(ret)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_encdec_name() {
for name in [
"testname",
"test_name",
"test name",
"test$name",
"test:name@help.me$get/this**right",
] {
let encname = encode_name(name).unwrap();
assert!(!encname.contains(' '));
assert!(!encname.contains('.'));
assert!(!encname.contains('*'));
assert_eq!(*name, decode_name(&encname).unwrap());
}
}
}

View file

@ -1,8 +1,6 @@
#[macro_use] #[macro_use]
extern crate tracing; extern crate tracing;
#[cfg(feature = "fjall")]
pub mod fjall_adapter;
#[cfg(feature = "lmdb")] #[cfg(feature = "lmdb")]
pub mod lmdb_adapter; pub mod lmdb_adapter;
#[cfg(feature = "sqlite")] #[cfg(feature = "sqlite")]
@ -20,7 +18,7 @@ use std::cell::Cell;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use thiserror::Error; use err_derive::Error;
pub use open::*; pub use open::*;
@ -44,7 +42,7 @@ pub type TxValueIter<'a> = Box<dyn std::iter::Iterator<Item = TxOpResult<(Value,
// ---- // ----
#[derive(Debug, Error)] #[derive(Debug, Error)]
#[error("{0}")] #[error(display = "{}", _0)]
pub struct Error(pub Cow<'static, str>); pub struct Error(pub Cow<'static, str>);
impl From<std::io::Error> for Error { impl From<std::io::Error> for Error {
@ -56,7 +54,7 @@ impl From<std::io::Error> for Error {
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Error)] #[derive(Debug, Error)]
#[error("{0}")] #[error(display = "{}", _0)]
pub struct TxOpError(pub(crate) Error); pub struct TxOpError(pub(crate) Error);
pub type TxOpResult<T> = std::result::Result<T, TxOpError>; pub type TxOpResult<T> = std::result::Result<T, TxOpError>;
@ -106,44 +104,32 @@ impl Db {
result: Cell::new(None), result: Cell::new(None),
}; };
let tx_res = self.0.transaction(&f); let tx_res = self.0.transaction(&f);
let fn_res = f.result.into_inner(); let ret = f
.result
.into_inner()
.expect("Transaction did not store result");
match (tx_res, fn_res) { match tx_res {
(Ok(on_commit), Some(Ok(value))) => { Ok(on_commit) => match ret {
// Transaction succeeded Ok(value) => {
// TxFn stored the value to return to the user in fn_res on_commit.into_iter().for_each(|f| f());
// tx_res contains the on_commit list of callbacks, run them now Ok(value)
on_commit.into_iter().for_each(|f| f()); }
Ok(value) _ => unreachable!(),
} },
(Err(TxError::Abort(())), Some(Err(TxError::Abort(e)))) => { Err(TxError::Abort(())) => match ret {
// Transaction was aborted by user code Err(TxError::Abort(e)) => Err(TxError::Abort(e)),
// The abort error value is stored in fn_res _ => unreachable!(),
Err(TxError::Abort(e)) },
} Err(TxError::Db(e2)) => match ret {
(Err(TxError::Db(_tx_e)), Some(Err(TxError::Db(fn_e)))) => { // Ok was stored -> the error occurred when finalizing
// Transaction encountered a DB error in user code // transaction
// The error value encountered is the one in fn_res, Ok(_) => Err(TxError::Db(e2)),
// tx_res contains only a dummy error message // An error was already stored: that's the one we want to
Err(TxError::Db(fn_e)) // return
} Err(TxError::Db(e)) => Err(TxError::Db(e)),
(Err(TxError::Db(tx_e)), None) => { _ => unreachable!(),
// Transaction encounterred a DB error when initializing the transaction, },
// before user code was called
Err(TxError::Db(tx_e))
}
(Err(TxError::Db(tx_e)), Some(Ok(_))) => {
// Transaction encounterred a DB error when commiting the transaction,
// after user code was called
Err(TxError::Db(tx_e))
}
(tx_res, fn_res) => {
panic!(
"unexpected error case: tx_res={:?}, fn_res={:?}",
tx_res.map(|_| "..."),
fn_res.map(|x| x.map(|_| "...").map_err(|_| "..."))
);
}
} }
} }
@ -166,7 +152,7 @@ impl Db {
let tree_names = other.list_trees()?; let tree_names = other.list_trees()?;
for name in tree_names { for name in tree_names {
let tree = self.open_tree(&name)?; let tree = self.open_tree(&name)?;
if !tree.is_empty()? { if tree.len()? > 0 {
return Err(Error(format!("tree {} already contains data", name).into())); return Err(Error(format!("tree {} already contains data", name).into()));
} }
@ -208,12 +194,8 @@ impl Tree {
self.0.get(self.1, key.as_ref()) self.0.get(self.1, key.as_ref())
} }
#[inline] #[inline]
pub fn approximate_len(&self) -> Result<usize> { pub fn len(&self) -> Result<usize> {
self.0.approximate_len(self.1) self.0.len(self.1)
}
#[inline]
pub fn is_empty(&self) -> Result<bool> {
self.0.is_empty(self.1)
} }
#[inline] #[inline]
@ -351,8 +333,7 @@ pub(crate) trait IDb: Send + Sync {
fn snapshot(&self, path: &PathBuf) -> Result<()>; fn snapshot(&self, path: &PathBuf) -> Result<()>;
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>; fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
fn approximate_len(&self, tree: usize) -> Result<usize>; fn len(&self, tree: usize) -> Result<usize>;
fn is_empty(&self, tree: usize) -> Result<bool>;
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()>; fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()>;
fn remove(&self, tree: usize, key: &[u8]) -> Result<()>; fn remove(&self, tree: usize, key: &[u8]) -> Result<()>;

View file

@ -1,8 +1,8 @@
use core::ops::Bound; use core::ops::Bound;
use core::ptr::NonNull;
use std::collections::HashMap; use std::collections::HashMap;
use std::convert::TryInto; use std::convert::TryInto;
use std::marker::PhantomPinned;
use std::path::PathBuf; use std::path::PathBuf;
use std::pin::Pin; use std::pin::Pin;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
@ -11,55 +11,12 @@ use heed::types::ByteSlice;
use heed::{BytesDecode, Env, RoTxn, RwTxn, UntypedDatabase as Database}; use heed::{BytesDecode, Env, RoTxn, RwTxn, UntypedDatabase as Database};
use crate::{ use crate::{
open::{Engine, OpenOpt},
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult, Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
TxResult, TxValueIter, Value, ValueIter, TxResult, TxValueIter, Value, ValueIter,
}; };
pub use heed; pub use heed;
// ---- top-level open function
pub(crate) fn open_db(path: &PathBuf, opt: &OpenOpt) -> Result<Db> {
info!("Opening LMDB database at: {}", path.display());
if let Err(e) = std::fs::create_dir_all(&path) {
return Err(Error(
format!("Unable to create LMDB data directory: {}", e).into(),
));
}
let map_size = match opt.lmdb_map_size {
None => recommended_map_size(),
Some(v) => v - (v % 4096),
};
let mut env_builder = heed::EnvOpenOptions::new();
env_builder.max_dbs(100);
env_builder.map_size(map_size);
env_builder.max_readers(2048);
unsafe {
env_builder.flag(heed::flags::Flags::MdbNoRdAhead);
env_builder.flag(heed::flags::Flags::MdbNoMetaSync);
if !opt.fsync {
env_builder.flag(heed::flags::Flags::MdbNoSync);
}
}
match env_builder.open(&path) {
Err(heed::Error::Io(e)) if e.kind() == std::io::ErrorKind::OutOfMemory => {
return Err(Error(
"OutOfMemory error while trying to open LMDB database. This can happen \
if your operating system is not allowing you to use sufficient virtual \
memory address space. Please check that no limit is set (ulimit -v). \
You may also try to set a smaller `lmdb_map_size` configuration parameter. \
On 32-bit machines, you should probably switch to another database engine."
.into(),
))
}
Err(e) => Err(Error(format!("Cannot open LMDB database: {}", e).into())),
Ok(db) => Ok(LmdbDb::init(db)),
}
}
// -- err // -- err
impl From<heed::Error> for Error { impl From<heed::Error> for Error {
@ -147,11 +104,12 @@ impl IDb for LmdbDb {
Ok(ret2) Ok(ret2)
} }
fn snapshot(&self, base_path: &PathBuf) -> Result<()> { fn snapshot(&self, to: &PathBuf) -> Result<()> {
std::fs::create_dir_all(base_path)?; std::fs::create_dir_all(to)?;
let path = Engine::Lmdb.db_path(base_path); let mut path = to.clone();
path.push("data.mdb");
self.db self.db
.copy_to_path(path, heed::CompactionOption::Enabled)?; .copy_to_path(path, heed::CompactionOption::Disabled)?;
Ok(()) Ok(())
} }
@ -168,16 +126,11 @@ impl IDb for LmdbDb {
} }
} }
fn approximate_len(&self, tree: usize) -> Result<usize> { fn len(&self, tree: usize) -> Result<usize> {
let tree = self.get_tree(tree)?; let tree = self.get_tree(tree)?;
let tx = self.db.read_txn()?; let tx = self.db.read_txn()?;
Ok(tree.len(&tx)?.try_into().unwrap()) Ok(tree.len(&tx)?.try_into().unwrap())
} }
fn is_empty(&self, tree: usize) -> Result<bool> {
let tree = self.get_tree(tree)?;
let tx = self.db.read_txn()?;
Ok(tree.is_empty(&tx)?)
}
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> { fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
let tree = self.get_tree(tree)?; let tree = self.get_tree(tree)?;
@ -206,15 +159,13 @@ impl IDb for LmdbDb {
fn iter(&self, tree: usize) -> Result<ValueIter<'_>> { fn iter(&self, tree: usize) -> Result<ValueIter<'_>> {
let tree = self.get_tree(tree)?; let tree = self.get_tree(tree)?;
let tx = self.db.read_txn()?; let tx = self.db.read_txn()?;
// Safety: the cloture does not store its argument anywhere, TxAndIterator::make(tx, |tx| Ok(tree.iter(tx)?))
unsafe { TxAndIterator::make(tx, |tx| Ok(tree.iter(tx)?)) }
} }
fn iter_rev(&self, tree: usize) -> Result<ValueIter<'_>> { fn iter_rev(&self, tree: usize) -> Result<ValueIter<'_>> {
let tree = self.get_tree(tree)?; let tree = self.get_tree(tree)?;
let tx = self.db.read_txn()?; let tx = self.db.read_txn()?;
// Safety: the cloture does not store its argument anywhere, TxAndIterator::make(tx, |tx| Ok(tree.rev_iter(tx)?))
unsafe { TxAndIterator::make(tx, |tx| Ok(tree.rev_iter(tx)?)) }
} }
fn range<'r>( fn range<'r>(
@ -225,8 +176,7 @@ impl IDb for LmdbDb {
) -> Result<ValueIter<'_>> { ) -> Result<ValueIter<'_>> {
let tree = self.get_tree(tree)?; let tree = self.get_tree(tree)?;
let tx = self.db.read_txn()?; let tx = self.db.read_txn()?;
// Safety: the cloture does not store its argument anywhere, TxAndIterator::make(tx, |tx| Ok(tree.range(tx, &(low, high))?))
unsafe { TxAndIterator::make(tx, |tx| Ok(tree.range(tx, &(low, high))?)) }
} }
fn range_rev<'r>( fn range_rev<'r>(
&self, &self,
@ -236,8 +186,7 @@ impl IDb for LmdbDb {
) -> Result<ValueIter<'_>> { ) -> Result<ValueIter<'_>> {
let tree = self.get_tree(tree)?; let tree = self.get_tree(tree)?;
let tx = self.db.read_txn()?; let tx = self.db.read_txn()?;
// Safety: the cloture does not store its argument anywhere, TxAndIterator::make(tx, |tx| Ok(tree.rev_range(tx, &(low, high))?))
unsafe { TxAndIterator::make(tx, |tx| Ok(tree.rev_range(tx, &(low, high))?)) }
} }
// ---- // ----
@ -367,41 +316,28 @@ where
{ {
tx: RoTxn<'a>, tx: RoTxn<'a>,
iter: Option<I>, iter: Option<I>,
_pin: PhantomPinned,
} }
impl<'a, I> TxAndIterator<'a, I> impl<'a, I> TxAndIterator<'a, I>
where where
I: Iterator<Item = IteratorItem<'a>> + 'a, I: Iterator<Item = IteratorItem<'a>> + 'a,
{ {
fn iter(self: Pin<&mut Self>) -> &mut Option<I> { fn make<F>(tx: RoTxn<'a>, iterfun: F) -> Result<ValueIter<'a>>
// Safety: iter is not structural
unsafe { &mut self.get_unchecked_mut().iter }
}
/// Safety: iterfun must not store its argument anywhere but in its result.
unsafe fn make<F>(tx: RoTxn<'a>, iterfun: F) -> Result<ValueIter<'a>>
where where
F: FnOnce(&'a RoTxn<'a>) -> Result<I>, F: FnOnce(&'a RoTxn<'a>) -> Result<I>,
{ {
let res = TxAndIterator { let res = TxAndIterator { tx, iter: None };
tx,
iter: None,
_pin: PhantomPinned,
};
let mut boxed = Box::pin(res); let mut boxed = Box::pin(res);
let tx_lifetime_overextended: &'a RoTxn<'a> = { // This unsafe allows us to bypass lifetime checks
let tx = &boxed.tx; let tx = unsafe { NonNull::from(&boxed.tx).as_ref() };
// Safety: Artificially extending the lifetime because let iter = iterfun(tx)?;
// this reference will only be stored and accessed from the
// returned ValueIter which guarantees that it is destroyed
// before the tx it is pointing to.
unsafe { &*&raw const *tx }
};
let iter = iterfun(&tx_lifetime_overextended)?;
*boxed.as_mut().iter() = Some(iter); let mut_ref = Pin::as_mut(&mut boxed);
// This unsafe allows us to write in a field of the pinned struct
unsafe {
Pin::get_unchecked_mut(mut_ref).iter = Some(iter);
}
Ok(Box::new(TxAndIteratorPin(boxed))) Ok(Box::new(TxAndIteratorPin(boxed)))
} }
@ -412,10 +348,8 @@ where
I: Iterator<Item = IteratorItem<'a>> + 'a, I: Iterator<Item = IteratorItem<'a>> + 'a,
{ {
fn drop(&mut self) { fn drop(&mut self) {
// Safety: `new_unchecked` is okay because we know this value is never // ensure the iterator is dropped before the RoTxn it references
// used again after being dropped. drop(self.iter.take());
let this = unsafe { Pin::new_unchecked(self) };
drop(this.iter().take());
} }
} }
@ -431,12 +365,13 @@ where
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
let mut_ref = Pin::as_mut(&mut self.0); let mut_ref = Pin::as_mut(&mut self.0);
let next = mut_ref.iter().as_mut()?.next()?; // This unsafe allows us to mutably access the iterator field
let res = match next { let next = unsafe { Pin::get_unchecked_mut(mut_ref).iter.as_mut()?.next() };
Err(e) => Err(e.into()), match next {
Ok((k, v)) => Ok((k.to_vec(), v.to_vec())), None => None,
}; Some(Err(e)) => Some(Err(e.into())),
Some(res) Some(Ok((k, v))) => Some(Ok((k.to_vec(), v.to_vec()))),
}
} }
} }

View file

@ -11,7 +11,6 @@ use crate::{Db, Error, Result};
pub enum Engine { pub enum Engine {
Lmdb, Lmdb,
Sqlite, Sqlite,
Fjall,
} }
impl Engine { impl Engine {
@ -20,26 +19,8 @@ impl Engine {
match self { match self {
Self::Lmdb => "lmdb", Self::Lmdb => "lmdb",
Self::Sqlite => "sqlite", Self::Sqlite => "sqlite",
Self::Fjall => "fjall",
} }
} }
/// Return engine-specific DB path from base path
pub fn db_path(&self, base_path: &PathBuf) -> PathBuf {
let mut ret = base_path.clone();
match self {
Self::Lmdb => {
ret.push("db.lmdb");
}
Self::Sqlite => {
ret.push("db.sqlite");
}
Self::Fjall => {
ret.push("db.fjall");
}
}
ret
}
} }
impl std::fmt::Display for Engine { impl std::fmt::Display for Engine {
@ -55,11 +36,10 @@ impl std::str::FromStr for Engine {
match text { match text {
"lmdb" | "heed" => Ok(Self::Lmdb), "lmdb" | "heed" => Ok(Self::Lmdb),
"sqlite" | "sqlite3" | "rusqlite" => Ok(Self::Sqlite), "sqlite" | "sqlite3" | "rusqlite" => Ok(Self::Sqlite),
"fjall" => Ok(Self::Fjall),
"sled" => Err(Error("Sled is no longer supported as a database engine. Converting your old metadata db can be done using an older Garage binary (e.g. v0.9.4).".into())), "sled" => Err(Error("Sled is no longer supported as a database engine. Converting your old metadata db can be done using an older Garage binary (e.g. v0.9.4).".into())),
kind => Err(Error( kind => Err(Error(
format!( format!(
"Invalid DB engine: {} (options are: lmdb, sqlite, fjall)", "Invalid DB engine: {} (options are: lmdb, sqlite)",
kind kind
) )
.into(), .into(),
@ -71,7 +51,6 @@ impl std::str::FromStr for Engine {
pub struct OpenOpt { pub struct OpenOpt {
pub fsync: bool, pub fsync: bool,
pub lmdb_map_size: Option<usize>, pub lmdb_map_size: Option<usize>,
pub fjall_block_cache_size: Option<usize>,
} }
impl Default for OpenOpt { impl Default for OpenOpt {
@ -79,7 +58,6 @@ impl Default for OpenOpt {
Self { Self {
fsync: false, fsync: false,
lmdb_map_size: None, lmdb_map_size: None,
fjall_block_cache_size: None,
} }
} }
} }
@ -88,15 +66,53 @@ pub fn open_db(path: &PathBuf, engine: Engine, opt: &OpenOpt) -> Result<Db> {
match engine { match engine {
// ---- Sqlite DB ---- // ---- Sqlite DB ----
#[cfg(feature = "sqlite")] #[cfg(feature = "sqlite")]
Engine::Sqlite => crate::sqlite_adapter::open_db(path, opt), Engine::Sqlite => {
info!("Opening Sqlite database at: {}", path.display());
let manager = r2d2_sqlite::SqliteConnectionManager::file(path);
Ok(crate::sqlite_adapter::SqliteDb::new(manager, opt.fsync)?)
}
// ---- LMDB DB ---- // ---- LMDB DB ----
#[cfg(feature = "lmdb")] #[cfg(feature = "lmdb")]
Engine::Lmdb => crate::lmdb_adapter::open_db(path, opt), Engine::Lmdb => {
info!("Opening LMDB database at: {}", path.display());
if let Err(e) = std::fs::create_dir_all(&path) {
return Err(Error(
format!("Unable to create LMDB data directory: {}", e).into(),
));
}
// ---- Fjall DB ---- let map_size = match opt.lmdb_map_size {
#[cfg(feature = "fjall")] None => crate::lmdb_adapter::recommended_map_size(),
Engine::Fjall => crate::fjall_adapter::open_db(path, opt), Some(v) => v - (v % 4096),
};
let mut env_builder = heed::EnvOpenOptions::new();
env_builder.max_dbs(100);
env_builder.map_size(map_size);
env_builder.max_readers(2048);
unsafe {
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoRdAhead);
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoMetaSync);
if !opt.fsync {
env_builder.flag(heed::flags::Flags::MdbNoSync);
}
}
match env_builder.open(&path) {
Err(heed::Error::Io(e)) if e.kind() == std::io::ErrorKind::OutOfMemory => {
return Err(Error(
"OutOfMemory error while trying to open LMDB database. This can happen \
if your operating system is not allowing you to use sufficient virtual \
memory address space. Please check that no limit is set (ulimit -v). \
You may also try to set a smaller `lmdb_map_size` configuration parameter. \
On 32-bit machines, you should probably switch to another database engine."
.into(),
))
}
Err(e) => Err(Error(format!("Cannot open LMDB database: {}", e).into())),
Ok(db) => Ok(crate::lmdb_adapter::LmdbDb::init(db)),
}
}
// Pattern is unreachable when all supported DB engines are compiled into binary. The allow // Pattern is unreachable when all supported DB engines are compiled into binary. The allow
// attribute is added so that we won't have to change this match in case stop building // attribute is added so that we won't have to change this match in case stop building

View file

@ -11,23 +11,12 @@ use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::{params, Rows, Statement, Transaction}; use rusqlite::{params, Rows, Statement, Transaction};
use crate::{ use crate::{
open::{Engine, OpenOpt},
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult, Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
TxResult, TxValueIter, Value, ValueIter, TxResult, TxValueIter, Value, ValueIter,
}; };
pub use rusqlite; pub use rusqlite;
// ---- top-level open function
pub(crate) fn open_db(path: &PathBuf, opt: &OpenOpt) -> Result<Db> {
info!("Opening Sqlite database at: {}", path.display());
let manager = r2d2_sqlite::SqliteConnectionManager::file(path);
Ok(SqliteDb::new(manager, opt.fsync)?)
}
// ----
type Connection = r2d2::PooledConnection<SqliteConnectionManager>; type Connection = r2d2::PooledConnection<SqliteConnectionManager>;
// --- err // --- err
@ -150,18 +139,17 @@ impl IDb for SqliteDb {
Ok(trees) Ok(trees)
} }
fn snapshot(&self, base_path: &PathBuf) -> Result<()> { fn snapshot(&self, to: &PathBuf) -> Result<()> {
std::fs::create_dir_all(base_path)?; fn progress(p: rusqlite::backup::Progress) {
let path = Engine::Sqlite let percent = (p.pagecount - p.remaining) * 100 / p.pagecount;
.db_path(&base_path) info!("Sqlite snapshot progress: {}%", percent);
.into_os_string() }
.into_string() std::fs::create_dir_all(to)?;
.map_err(|_| Error("invalid sqlite path string".into()))?; let mut path = to.clone();
path.push("db.sqlite");
info!("Start sqlite VACUUM INTO `{}`", path); self.db
self.db.get()?.execute("VACUUM INTO ?1", params![path])?; .get()?
info!("Finished sqlite VACUUM INTO `{}`", path); .backup(rusqlite::DatabaseName::Main, path, Some(progress))?;
Ok(()) Ok(())
} }
@ -172,7 +160,7 @@ impl IDb for SqliteDb {
self.internal_get(&self.db.get()?, &tree, key) self.internal_get(&self.db.get()?, &tree, key)
} }
fn approximate_len(&self, tree: usize) -> Result<usize> { fn len(&self, tree: usize) -> Result<usize> {
let tree = self.get_tree(tree)?; let tree = self.get_tree(tree)?;
let db = self.db.get()?; let db = self.db.get()?;
@ -184,10 +172,6 @@ impl IDb for SqliteDb {
} }
} }
fn is_empty(&self, tree: usize) -> Result<bool> {
Ok(self.approximate_len(tree)? == 0)
}
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> { fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
let tree = self.get_tree(tree)?; let tree = self.get_tree(tree)?;
let db = self.db.get()?; let db = self.db.get()?;

View file

@ -1,7 +1,7 @@
use crate::*; use crate::*;
fn test_suite(db: Db) { fn test_suite(db: Db) {
let tree = db.open_tree("tree:this_is_a_tree").unwrap(); let tree = db.open_tree("tree").unwrap();
let ka: &[u8] = &b"test"[..]; let ka: &[u8] = &b"test"[..];
let kb: &[u8] = &b"zwello"[..]; let kb: &[u8] = &b"zwello"[..];
@ -14,7 +14,7 @@ fn test_suite(db: Db) {
assert!(tree.insert(ka, va).is_ok()); assert!(tree.insert(ka, va).is_ok());
assert_eq!(tree.get(ka).unwrap().unwrap(), va); assert_eq!(tree.get(ka).unwrap().unwrap(), va);
assert_eq!(tree.iter().unwrap().count(), 1); assert_eq!(tree.len().unwrap(), 1);
// ---- test transaction logic ---- // ---- test transaction logic ----
@ -148,15 +148,3 @@ fn test_sqlite_db() {
let db = SqliteDb::new(manager, false).unwrap(); let db = SqliteDb::new(manager, false).unwrap();
test_suite(db); test_suite(db);
} }
#[test]
#[cfg(feature = "fjall")]
fn test_fjall_db() {
use crate::fjall_adapter::{fjall, FjallDb};
let path = mktemp::Temp::new_dir().unwrap();
let config = fjall::Config::new(path).temporary(true);
let keyspace = config.open_transactional().unwrap();
let db = FjallDb::init(keyspace);
test_suite(db);
}

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage" name = "garage"
version = "1.3.1" version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -57,13 +57,11 @@ opentelemetry.workspace = true
opentelemetry-prometheus = { workspace = true, optional = true } opentelemetry-prometheus = { workspace = true, optional = true }
opentelemetry-otlp = { workspace = true, optional = true } opentelemetry-otlp = { workspace = true, optional = true }
syslog-tracing = { workspace = true, optional = true } syslog-tracing = { workspace = true, optional = true }
tracing-journald = { workspace = true, optional = true }
[dev-dependencies] [dev-dependencies]
garage_api_common.workspace = true garage_api_common.workspace = true
aws-sdk-s3.workspace = true aws-sdk-s3.workspace = true
aws-smithy-runtime.workspace = true
chrono.workspace = true chrono.workspace = true
http.workspace = true http.workspace = true
hmac.workspace = true hmac.workspace = true
@ -73,12 +71,10 @@ hyper-util.workspace = true
mktemp.workspace = true mktemp.workspace = true
sha2.workspace = true sha2.workspace = true
static_init.workspace = true static_init.workspace = true
assert-json-diff.workspace = true assert-json-diff.workspace = true
serde_json.workspace = true serde_json.workspace = true
base64.workspace = true base64.workspace = true
crc32fast.workspace = true
k2v-client.workspace = true k2v-client.workspace = true
@ -91,7 +87,6 @@ k2v = [ "garage_util/k2v", "garage_api_k2v" ]
# Database engines # Database engines
lmdb = [ "garage_model/lmdb" ] lmdb = [ "garage_model/lmdb" ]
sqlite = [ "garage_model/sqlite" ] sqlite = [ "garage_model/sqlite" ]
fjall = [ "garage_model/fjall" ]
# Automatic registration and discovery via Consul API # Automatic registration and discovery via Consul API
consul-discovery = [ "garage_rpc/consul-discovery" ] consul-discovery = [ "garage_rpc/consul-discovery" ]
@ -103,8 +98,6 @@ metrics = [ "garage_api_admin/metrics", "opentelemetry-prometheus" ]
telemetry-otlp = [ "opentelemetry-otlp" ] telemetry-otlp = [ "opentelemetry-otlp" ]
# Logging to syslog # Logging to syslog
syslog = [ "syslog-tracing" ] syslog = [ "syslog-tracing" ]
# Logging to journald
journald = [ "tracing-journald" ]
# NOTE: bundled-libs and system-libs should be treat as mutually exclusive; # NOTE: bundled-libs and system-libs should be treat as mutually exclusive;
# exactly one of them should be enabled. # exactly one of them should be enabled.

View file

@ -101,7 +101,6 @@ impl AdminRpcHandler {
let mut obj_dels = 0; let mut obj_dels = 0;
let mut mpu_dels = 0; let mut mpu_dels = 0;
let mut ver_dels = 0; let mut ver_dels = 0;
let mut br_dels = 0;
for hash in blocks { for hash in blocks {
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?; let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
@ -132,19 +131,12 @@ impl AdminRpcHandler {
ver_dels += 1; ver_dels += 1;
} }
} }
if !br.deleted.get() {
let mut br = br;
br.deleted.set();
self.garage.block_ref_table.insert(&br).await?;
br_dels += 1;
}
} }
} }
Ok(AdminRpc::Ok(format!( Ok(AdminRpc::Ok(format!(
"Purged {} blocks: marked {} block refs, {} versions, {} objects and {} multipart uploads as deleted", "Purged {} blocks, {} versions, {} objects, {} multipart uploads",
blocks.len(), blocks.len(),
br_dels,
ver_dels, ver_dels,
obj_dels, obj_dels,
mpu_dels, mpu_dels,

View file

@ -126,7 +126,7 @@ impl AdminRpcHandler {
#[allow(clippy::ptr_arg)] #[allow(clippy::ptr_arg)]
async fn handle_create_bucket(&self, name: &String) -> Result<AdminRpc, Error> { async fn handle_create_bucket(&self, name: &String) -> Result<AdminRpc, Error> {
if !is_valid_bucket_name(name, self.garage.config.allow_punycode) { if !is_valid_bucket_name(name) {
return Err(Error::BadRequest(format!( return Err(Error::BadRequest(format!(
"{}: {}", "{}: {}",
name, INVALID_BUCKET_NAME_MESSAGE name, INVALID_BUCKET_NAME_MESSAGE
@ -390,9 +390,15 @@ impl AdminRpcHandler {
} }
let website = if query.allow { let website = if query.allow {
let (redirect_all, routing_rules) = match bucket_state.website_config.get() {
Some(wc) => (wc.redirect_all.clone(), wc.routing_rules.clone()),
None => (None, Vec::new()),
};
Some(WebsiteConfig { Some(WebsiteConfig {
index_document: query.index_document.clone(), index_document: query.index_document.clone(),
error_document: query.error_document.clone(), error_document: query.error_document.clone(),
redirect_all,
routing_rules,
}) })
} else { } else {
None None

View file

@ -219,7 +219,7 @@ impl AdminRpcHandler {
// Gather block manager statistics // Gather block manager statistics
writeln!(&mut ret, "\nBlock manager stats:").unwrap(); writeln!(&mut ret, "\nBlock manager stats:").unwrap();
let rc_len = self.garage.block_manager.rc_approximate_len()?.to_string(); let rc_len = self.garage.block_manager.rc_len()?.to_string();
writeln!( writeln!(
&mut ret, &mut ret,
@ -230,13 +230,13 @@ impl AdminRpcHandler {
writeln!( writeln!(
&mut ret, &mut ret,
" resync queue length: {}", " resync queue length: {}",
self.garage.block_manager.resync.queue_approximate_len()? self.garage.block_manager.resync.queue_len()?
) )
.unwrap(); .unwrap();
writeln!( writeln!(
&mut ret, &mut ret,
" blocks with resync errors: {}", " blocks with resync errors: {}",
self.garage.block_manager.resync.errors_approximate_len()? self.garage.block_manager.resync.errors_len()?
) )
.unwrap(); .unwrap();
@ -346,21 +346,16 @@ impl AdminRpcHandler {
F: TableSchema + 'static, F: TableSchema + 'static,
R: TableReplication + 'static, R: TableReplication + 'static,
{ {
let data_len = t let data_len = t.data.store.len().map_err(GarageError::from)?.to_string();
.data let mkl_len = t.merkle_updater.merkle_tree_len()?.to_string();
.store
.approximate_len()
.map_err(GarageError::from)?
.to_string();
let mkl_len = t.merkle_updater.merkle_tree_approximate_len()?.to_string();
Ok(format!( Ok(format!(
" {}\t{}\t{}\t{}\t{}", " {}\t{}\t{}\t{}\t{}",
F::TABLE_NAME, F::TABLE_NAME,
data_len, data_len,
mkl_len, mkl_len,
t.merkle_updater.todo_approximate_len()?, t.merkle_updater.todo_len()?,
t.data.gc_todo_approximate_len()? t.data.gc_todo_len()?
)) ))
} }

View file

@ -466,10 +466,6 @@ pub enum RepairWhat {
/// Repair (resync/rebalance) the set of stored blocks in the cluster /// Repair (resync/rebalance) the set of stored blocks in the cluster
#[structopt(name = "blocks", version = garage_version())] #[structopt(name = "blocks", version = garage_version())]
Blocks, Blocks,
/// Clear the block resync queue. The list of blocks in errored state
/// is cleared as well. You MUST run `garage repair blocks` after invoking this.
#[structopt(name = "clear-resync-queue", version = garage_version())]
ClearResyncQueue,
/// Repropagate object deletions to the version table /// Repropagate object deletions to the version table
#[structopt(name = "versions", version = garage_version())] #[structopt(name = "versions", version = garage_version())]
Versions, Versions,
@ -482,9 +478,6 @@ pub enum RepairWhat {
/// Recalculate block reference counters /// Recalculate block reference counters
#[structopt(name = "block-rc", version = garage_version())] #[structopt(name = "block-rc", version = garage_version())]
BlockRc, BlockRc,
/// Fix inconsistency in bucket aliases (WARNING: EXPERIMENTAL)
#[structopt(name = "aliases", version = garage_version())]
Aliases,
/// Verify integrity of all blocks on disc /// Verify integrity of all blocks on disc
#[structopt(name = "scrub", version = garage_version())] #[structopt(name = "scrub", version = garage_version())]
Scrub { Scrub {

View file

@ -208,43 +208,6 @@ fn init_logging(opt: &Opt) {
} }
} }
if std::env::var("GARAGE_LOG_TO_JOURNALD")
.map(|x| x == "1" || x == "true")
.unwrap_or(false)
{
#[cfg(feature = "journald")]
{
use tracing_journald::{Priority, PriorityMappings};
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
let registry = tracing_subscriber::registry()
.with(tracing_subscriber::fmt::layer().with_writer(std::io::sink))
.with(env_filter);
match tracing_journald::layer() {
Ok(layer) => {
registry
.with(layer.with_priority_mappings(PriorityMappings {
info: Priority::Informational,
debug: Priority::Debug,
..PriorityMappings::new()
}))
.init();
}
Err(e) => {
eprintln!("Couldn't connect to journald: {}.", e);
std::process::exit(1);
}
}
return;
}
#[cfg(not(feature = "journald"))]
{
eprintln!("Journald support is not enabled in this build.");
std::process::exit(1);
}
}
tracing_subscriber::fmt() tracing_subscriber::fmt()
.with_writer(std::io::stderr) .with_writer(std::io::stderr)
.with_env_filter(env_filter) .with_env_filter(env_filter)

View file

@ -88,15 +88,6 @@ pub async fn launch_online_repair(
garage.block_manager.clone(), garage.block_manager.clone(),
)); ));
} }
RepairWhat::Aliases => {
info!("Repairing bucket aliases (foreground)");
garage.locked_helper().await.repair_aliases().await?;
}
RepairWhat::ClearResyncQueue => {
let garage = garage.clone();
tokio::task::spawn_blocking(move || garage.block_manager.resync.clear_resync_queue())
.await??
}
} }
Ok(()) Ok(())
} }

View file

@ -183,21 +183,10 @@ fn watch_shutdown_signal() -> watch::Receiver<bool> {
let mut sigterm = let mut sigterm =
signal(SignalKind::terminate()).expect("Failed to install SIGTERM handler"); signal(SignalKind::terminate()).expect("Failed to install SIGTERM handler");
let mut sighup = signal(SignalKind::hangup()).expect("Failed to install SIGHUP handler"); let mut sighup = signal(SignalKind::hangup()).expect("Failed to install SIGHUP handler");
loop { tokio::select! {
tokio::select! { _ = sigint.recv() => info!("Received SIGINT, shutting down."),
_ = sigint.recv() => { _ = sigterm.recv() => info!("Received SIGTERM, shutting down."),
info!("Received SIGINT, shutting down."); _ = sighup.recv() => info!("Received SIGHUP, shutting down."),
break
}
_ = sigterm.recv() => {
info!("Received SIGTERM, shutting down.");
break
}
_ = sighup.recv() => {
info!("Received SIGHUP, reload not supported.");
continue
}
}
} }
send_cancel.send(true).unwrap(); send_cancel.send(true).unwrap();
}); });

View file

@ -12,7 +12,7 @@ pub fn build_client(key: &Key) -> Client {
.endpoint_url(format!("http://127.0.0.1:{}", DEFAULT_PORT)) .endpoint_url(format!("http://127.0.0.1:{}", DEFAULT_PORT))
.region(super::REGION) .region(super::REGION)
.credentials_provider(credentials) .credentials_provider(credentials)
.behavior_version(BehaviorVersion::v2024_03_28()) .behavior_version(BehaviorVersion::v2023_11_09())
.build(); .build();
Client::from_conf(config) Client::from_conf(config)

View file

@ -192,13 +192,16 @@ impl<'a> RequestBuilder<'a> {
.collect::<HeaderMap>(); .collect::<HeaderMap>();
let date = now.format(signature::LONG_DATETIME).to_string(); let date = now.format(signature::LONG_DATETIME).to_string();
all_headers.insert(signature::X_AMZ_DATE, HeaderValue::from_str(&date).unwrap()); all_headers.insert(
signature::payload::X_AMZ_DATE,
HeaderValue::from_str(&date).unwrap(),
);
all_headers.insert(HOST, HeaderValue::from_str(&host).unwrap()); all_headers.insert(HOST, HeaderValue::from_str(&host).unwrap());
let body_sha = match &self.body_signature { let body_sha = match self.body_signature {
BodySignature::Unsigned => "UNSIGNED-PAYLOAD".to_owned(), BodySignature::Unsigned => "UNSIGNED-PAYLOAD".to_owned(),
BodySignature::Classic => hex::encode(garage_util::data::sha256sum(&self.body)), BodySignature::Classic => hex::encode(garage_util::data::sha256sum(&self.body)),
BodySignature::Streaming { chunk_size } => { BodySignature::Streaming(size) => {
all_headers.insert( all_headers.insert(
CONTENT_ENCODING, CONTENT_ENCODING,
HeaderValue::from_str("aws-chunked").unwrap(), HeaderValue::from_str("aws-chunked").unwrap(),
@ -213,59 +216,18 @@ impl<'a> RequestBuilder<'a> {
// code. // code.
all_headers.insert( all_headers.insert(
CONTENT_LENGTH, CONTENT_LENGTH,
to_streaming_body( to_streaming_body(&self.body, size, String::new(), signer.clone(), now, "")
&self.body, .len()
*chunk_size, .to_string()
String::new(), .try_into()
signer.clone(), .unwrap(),
now,
"",
)
.len()
.to_string()
.try_into()
.unwrap(),
); );
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD".to_owned() "STREAMING-AWS4-HMAC-SHA256-PAYLOAD".to_owned()
} }
BodySignature::StreamingUnsignedTrailer {
chunk_size,
trailer_algorithm,
trailer_value,
} => {
all_headers.insert(
CONTENT_ENCODING,
HeaderValue::from_str("aws-chunked").unwrap(),
);
all_headers.insert(
HeaderName::from_static("x-amz-decoded-content-length"),
HeaderValue::from_str(&self.body.len().to_string()).unwrap(),
);
all_headers.insert(
HeaderName::from_static("x-amz-trailer"),
HeaderValue::from_str(&trailer_algorithm).unwrap(),
);
all_headers.insert(
CONTENT_LENGTH,
to_streaming_unsigned_trailer_body(
&self.body,
*chunk_size,
&trailer_algorithm,
&trailer_value,
)
.len()
.to_string()
.try_into()
.unwrap(),
);
"STREAMING-UNSIGNED-PAYLOAD-TRAILER".to_owned()
}
}; };
all_headers.insert( all_headers.insert(
signature::X_AMZ_CONTENT_SHA256, signature::payload::X_AMZ_CONTENT_SH256,
HeaderValue::from_str(&body_sha).unwrap(), HeaderValue::from_str(&body_sha).unwrap(),
); );
@ -314,26 +276,10 @@ impl<'a> RequestBuilder<'a> {
let mut request = Request::builder(); let mut request = Request::builder();
*request.headers_mut().unwrap() = all_headers; *request.headers_mut().unwrap() = all_headers;
let body = match &self.body_signature { let body = if let BodySignature::Streaming(size) = self.body_signature {
BodySignature::Streaming { chunk_size } => to_streaming_body( to_streaming_body(&self.body, size, signature, streaming_signer, now, &scope)
&self.body, } else {
*chunk_size, self.body.clone()
signature,
streaming_signer,
now,
&scope,
),
BodySignature::StreamingUnsignedTrailer {
chunk_size,
trailer_algorithm,
trailer_value,
} => to_streaming_unsigned_trailer_body(
&self.body,
*chunk_size,
&trailer_algorithm,
&trailer_value,
),
_ => self.body.clone(),
}; };
let request = request let request = request
.uri(uri) .uri(uri)
@ -362,14 +308,7 @@ impl<'a> RequestBuilder<'a> {
pub enum BodySignature { pub enum BodySignature {
Unsigned, Unsigned,
Classic, Classic,
Streaming { Streaming(usize),
chunk_size: usize,
},
StreamingUnsignedTrailer {
chunk_size: usize,
trailer_algorithm: String,
trailer_value: String,
},
} }
fn query_param_to_string(params: &HashMap<String, Option<String>>) -> String { fn query_param_to_string(params: &HashMap<String, Option<String>>) -> String {
@ -424,26 +363,3 @@ fn to_streaming_body(
res res
} }
fn to_streaming_unsigned_trailer_body(
body: &[u8],
chunk_size: usize,
trailer_algorithm: &str,
trailer_value: &str,
) -> Vec<u8> {
let mut res = Vec::with_capacity(body.len());
for chunk in body.chunks(chunk_size) {
let header = format!("{:x}\r\n", chunk.len());
res.extend_from_slice(header.as_bytes());
res.extend_from_slice(chunk);
res.extend_from_slice(b"\r\n");
}
res.extend_from_slice(b"0\r\n");
res.extend_from_slice(trailer_algorithm.as_bytes());
res.extend_from_slice(b":");
res.extend_from_slice(trailer_value.as_bytes());
res.extend_from_slice(b"\n\r\n\r\n");
res
}

View file

@ -63,8 +63,6 @@ rpc_bind_addr = "127.0.0.1:{rpc_port}"
rpc_public_addr = "127.0.0.1:{rpc_port}" rpc_public_addr = "127.0.0.1:{rpc_port}"
rpc_secret = "{secret}" rpc_secret = "{secret}"
allow_punycode = true
[s3_api] [s3_api]
s3_region = "{region}" s3_region = "{region}"
api_bind_addr = "127.0.0.1:{s3_port}" api_bind_addr = "127.0.0.1:{s3_port}"
@ -101,10 +99,7 @@ api_bind_addr = "127.0.0.1:{admin_port}"
.arg("server") .arg("server")
.stdout(stdout) .stdout(stdout)
.stderr(stderr) .stderr(stderr)
.env( .env("RUST_LOG", "garage=debug,garage_api=trace")
"RUST_LOG",
"garage=debug,garage_api_common=trace,garage_api_s3=trace",
)
.spawn() .spawn()
.expect("Could not start garage"); .expect("Could not start garage");

View file

@ -1,6 +1,5 @@
use crate::common; use crate::common;
use aws_sdk_s3::error::SdkError; use aws_sdk_s3::primitives::ByteStream;
use aws_sdk_s3::primitives::{ByteStream, DateTime};
use aws_sdk_s3::types::{Delete, ObjectIdentifier}; use aws_sdk_s3::types::{Delete, ObjectIdentifier};
const STD_KEY: &str = "hello world"; const STD_KEY: &str = "hello world";
@ -126,153 +125,6 @@ async fn test_putobject() {
} }
} }
#[tokio::test]
async fn test_precondition() {
let ctx = common::context();
let bucket = ctx.create_bucket("precondition");
let etag = "\"46cf18a9b447991b450cad3facf5937e\"";
let etag2 = "\"ae4984b984cd984fe98d4efa954dce98\"";
let data = ByteStream::from_static(BODY);
let r = ctx
.client
.put_object()
.bucket(&bucket)
.key(STD_KEY)
.body(data)
.send()
.await
.unwrap();
assert_eq!(r.e_tag.unwrap().as_str(), etag);
let last_modified;
{
let o = ctx
.client
.get_object()
.bucket(&bucket)
.key(STD_KEY)
.if_match(etag)
.send()
.await
.unwrap();
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
last_modified = o.last_modified.unwrap();
let err = ctx
.client
.get_object()
.bucket(&bucket)
.key(STD_KEY)
.if_match(etag2)
.send()
.await;
assert!(
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 412)
);
}
{
let o = ctx
.client
.get_object()
.bucket(&bucket)
.key(STD_KEY)
.if_none_match(etag2)
.send()
.await
.unwrap();
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
let err = ctx
.client
.get_object()
.bucket(&bucket)
.key(STD_KEY)
.if_none_match(etag)
.send()
.await;
assert!(
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
);
}
let older_date = DateTime::from_secs_f64(last_modified.as_secs_f64() - 10.0);
let same_date = DateTime::from_secs_f64(last_modified.as_secs_f64());
let newer_date = DateTime::from_secs_f64(last_modified.as_secs_f64() + 10.0);
{
let err = ctx
.client
.get_object()
.bucket(&bucket)
.key(STD_KEY)
.if_modified_since(newer_date)
.send()
.await;
assert!(
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
);
let err = ctx
.client
.get_object()
.bucket(&bucket)
.key(STD_KEY)
.if_modified_since(same_date)
.send()
.await;
assert!(
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
);
let o = ctx
.client
.get_object()
.bucket(&bucket)
.key(STD_KEY)
.if_modified_since(older_date)
.send()
.await
.unwrap();
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
}
{
let err = ctx
.client
.get_object()
.bucket(&bucket)
.key(STD_KEY)
.if_unmodified_since(older_date)
.send()
.await;
assert!(
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 412)
);
let o = ctx
.client
.get_object()
.bucket(&bucket)
.key(STD_KEY)
.if_unmodified_since(same_date)
.send()
.await
.unwrap();
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
let o = ctx
.client
.get_object()
.bucket(&bucket)
.key(STD_KEY)
.if_unmodified_since(newer_date)
.send()
.await
.unwrap();
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
}
}
#[tokio::test] #[tokio::test]
async fn test_getobject() { async fn test_getobject() {
let ctx = common::context(); let ctx = common::context();
@ -337,14 +189,12 @@ async fn test_getobject() {
#[tokio::test] #[tokio::test]
async fn test_metadata() { async fn test_metadata() {
use aws_sdk_s3::primitives::{DateTime, DateTimeFormat};
let ctx = common::context(); let ctx = common::context();
let bucket = ctx.create_bucket("testmetadata"); let bucket = ctx.create_bucket("testmetadata");
let etag = "\"46cf18a9b447991b450cad3facf5937e\""; let etag = "\"46cf18a9b447991b450cad3facf5937e\"";
let exp = DateTime::from_secs(10000000000); let exp = aws_sdk_s3::primitives::DateTime::from_secs(10000000000);
let exp2 = DateTime::from_secs(10000500000); let exp2 = aws_sdk_s3::primitives::DateTime::from_secs(10000500000);
{ {
// Note. The AWS client SDK adds a Content-Type header // Note. The AWS client SDK adds a Content-Type header
@ -377,7 +227,7 @@ async fn test_metadata() {
assert_eq!(o.content_disposition, None); assert_eq!(o.content_disposition, None);
assert_eq!(o.content_encoding, None); assert_eq!(o.content_encoding, None);
assert_eq!(o.content_language, None); assert_eq!(o.content_language, None);
assert_eq!(o.expires_string, None); assert_eq!(o.expires, None);
assert_eq!(o.metadata.unwrap_or_default().len(), 0); assert_eq!(o.metadata.unwrap_or_default().len(), 0);
let o = ctx let o = ctx
@ -400,10 +250,7 @@ async fn test_metadata() {
assert_eq!(o.content_disposition.unwrap().as_str(), "cddummy"); assert_eq!(o.content_disposition.unwrap().as_str(), "cddummy");
assert_eq!(o.content_encoding.unwrap().as_str(), "cedummy"); assert_eq!(o.content_encoding.unwrap().as_str(), "cedummy");
assert_eq!(o.content_language.unwrap().as_str(), "cldummy"); assert_eq!(o.content_language.unwrap().as_str(), "cldummy");
assert_eq!( assert_eq!(o.expires.unwrap(), exp);
o.expires_string.unwrap(),
exp.fmt(DateTimeFormat::HttpDate).unwrap()
);
} }
{ {
@ -441,10 +288,7 @@ async fn test_metadata() {
assert_eq!(o.content_disposition.unwrap().as_str(), "cdtest"); assert_eq!(o.content_disposition.unwrap().as_str(), "cdtest");
assert_eq!(o.content_encoding.unwrap().as_str(), "cetest"); assert_eq!(o.content_encoding.unwrap().as_str(), "cetest");
assert_eq!(o.content_language.unwrap().as_str(), "cltest"); assert_eq!(o.content_language.unwrap().as_str(), "cltest");
assert_eq!( assert_eq!(o.expires.unwrap(), exp2);
o.expires_string.unwrap(),
exp2.fmt(DateTimeFormat::HttpDate).unwrap()
);
let mut meta = o.metadata.unwrap(); let mut meta = o.metadata.unwrap();
assert_eq!(meta.remove("testmeta").unwrap(), "hello people"); assert_eq!(meta.remove("testmeta").unwrap(), "hello people");
assert_eq!(meta.remove("nice-unicode-meta").unwrap(), "宅配便"); assert_eq!(meta.remove("nice-unicode-meta").unwrap(), "宅配便");
@ -470,10 +314,7 @@ async fn test_metadata() {
assert_eq!(o.content_disposition.unwrap().as_str(), "cddummy"); assert_eq!(o.content_disposition.unwrap().as_str(), "cddummy");
assert_eq!(o.content_encoding.unwrap().as_str(), "cedummy"); assert_eq!(o.content_encoding.unwrap().as_str(), "cedummy");
assert_eq!(o.content_language.unwrap().as_str(), "cldummy"); assert_eq!(o.content_language.unwrap().as_str(), "cldummy");
assert_eq!( assert_eq!(o.expires.unwrap(), exp);
o.expires_string.unwrap(),
exp.fmt(DateTimeFormat::HttpDate).unwrap()
);
} }
} }

View file

@ -1,8 +1,5 @@
use std::collections::HashMap; use std::collections::HashMap;
use base64::prelude::*;
use crc32fast::Hasher as Crc32;
use crate::common; use crate::common;
use crate::common::ext::CommandExt; use crate::common::ext::CommandExt;
use common::custom_requester::BodySignature; use common::custom_requester::BodySignature;
@ -24,7 +21,7 @@ async fn test_putobject_streaming() {
let content_type = "text/csv"; let content_type = "text/csv";
let mut headers = HashMap::new(); let mut headers = HashMap::new();
headers.insert("content-type".to_owned(), content_type.to_owned()); headers.insert("content-type".to_owned(), content_type.to_owned());
let res = ctx let _ = ctx
.custom_request .custom_request
.builder(bucket.clone()) .builder(bucket.clone())
.method(Method::PUT) .method(Method::PUT)
@ -32,11 +29,10 @@ async fn test_putobject_streaming() {
.signed_headers(headers) .signed_headers(headers)
.vhost_style(true) .vhost_style(true)
.body(vec![]) .body(vec![])
.body_signature(BodySignature::Streaming { chunk_size: 10 }) .body_signature(BodySignature::Streaming(10))
.send() .send()
.await .await
.unwrap(); .unwrap();
assert!(res.status().is_success(), "got response: {:?}", res);
// assert_eq!(r.e_tag.unwrap().as_str(), etag); // assert_eq!(r.e_tag.unwrap().as_str(), etag);
// We return a version ID here // We return a version ID here
@ -69,14 +65,7 @@ async fn test_putobject_streaming() {
{ {
let etag = "\"46cf18a9b447991b450cad3facf5937e\""; let etag = "\"46cf18a9b447991b450cad3facf5937e\"";
let mut crc32 = Crc32::new(); let _ = ctx
crc32.update(&BODY[..]);
let crc32 = BASE64_STANDARD.encode(&u32::to_be_bytes(crc32.finalize())[..]);
let mut headers = HashMap::new();
headers.insert("x-amz-checksum-crc32".to_owned(), crc32.clone());
let res = ctx
.custom_request .custom_request
.builder(bucket.clone()) .builder(bucket.clone())
.method(Method::PUT) .method(Method::PUT)
@ -84,13 +73,11 @@ async fn test_putobject_streaming() {
//fail //fail
.path("abc".to_owned()) .path("abc".to_owned())
.vhost_style(true) .vhost_style(true)
.signed_headers(headers)
.body(BODY.to_vec()) .body(BODY.to_vec())
.body_signature(BodySignature::Streaming { chunk_size: 16 }) .body_signature(BodySignature::Streaming(16))
.send() .send()
.await .await
.unwrap(); .unwrap();
assert!(res.status().is_success(), "got response: {:?}", res);
// assert_eq!(r.e_tag.unwrap().as_str(), etag); // assert_eq!(r.e_tag.unwrap().as_str(), etag);
// assert!(r.version_id.is_some()); // assert!(r.version_id.is_some());
@ -101,7 +88,6 @@ async fn test_putobject_streaming() {
.bucket(&bucket) .bucket(&bucket)
//.key(CTRL_KEY) //.key(CTRL_KEY)
.key("abc") .key("abc")
.checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled)
.send() .send()
.await .await
.unwrap(); .unwrap();
@ -112,142 +98,6 @@ async fn test_putobject_streaming() {
assert_eq!(o.content_length.unwrap(), 62); assert_eq!(o.content_length.unwrap(), 62);
assert_eq!(o.parts_count, None); assert_eq!(o.parts_count, None);
assert_eq!(o.tag_count, None); assert_eq!(o.tag_count, None);
assert_eq!(o.checksum_crc32.unwrap(), crc32);
}
}
#[tokio::test]
async fn test_putobject_streaming_unsigned_trailer() {
let ctx = common::context();
let bucket = ctx.create_bucket("putobject-streaming-unsigned-trailer");
{
// Send an empty object (can serve as a directory marker)
// with a content type
let etag = "\"d41d8cd98f00b204e9800998ecf8427e\"";
let content_type = "text/csv";
let mut headers = HashMap::new();
headers.insert("content-type".to_owned(), content_type.to_owned());
let empty_crc32 = BASE64_STANDARD.encode(&u32::to_be_bytes(Crc32::new().finalize())[..]);
let res = ctx
.custom_request
.builder(bucket.clone())
.method(Method::PUT)
.path(STD_KEY.to_owned())
.signed_headers(headers)
.vhost_style(true)
.body(vec![])
.body_signature(BodySignature::StreamingUnsignedTrailer {
chunk_size: 10,
trailer_algorithm: "x-amz-checksum-crc32".into(),
trailer_value: empty_crc32,
})
.send()
.await
.unwrap();
assert!(res.status().is_success(), "got response: {:?}", res);
// assert_eq!(r.e_tag.unwrap().as_str(), etag);
// We return a version ID here
// We should check if Amazon is returning one when versioning is not enabled
// assert!(r.version_id.is_some());
//let _version = r.version_id.unwrap();
let o = ctx
.client
.get_object()
.bucket(&bucket)
.key(STD_KEY)
.send()
.await
.unwrap();
assert_bytes_eq!(o.body, b"");
assert_eq!(o.e_tag.unwrap(), etag);
// We do not return version ID
// We should check if Amazon is returning one when versioning is not enabled
// assert_eq!(o.version_id.unwrap(), _version);
assert_eq!(o.content_type.unwrap(), content_type);
assert!(o.last_modified.is_some());
assert_eq!(o.content_length.unwrap(), 0);
assert_eq!(o.parts_count, None);
assert_eq!(o.tag_count, None);
}
{
let etag = "\"46cf18a9b447991b450cad3facf5937e\"";
let mut crc32 = Crc32::new();
crc32.update(&BODY[..]);
let crc32 = BASE64_STANDARD.encode(&u32::to_be_bytes(crc32.finalize())[..]);
// try sending with wrong crc32, check that it fails
let err_res = ctx
.custom_request
.builder(bucket.clone())
.method(Method::PUT)
//.path(CTRL_KEY.to_owned()) at the moment custom_request does not encode url so this
//fail
.path("abc".to_owned())
.vhost_style(true)
.body(BODY.to_vec())
.body_signature(BodySignature::StreamingUnsignedTrailer {
chunk_size: 16,
trailer_algorithm: "x-amz-checksum-crc32".into(),
trailer_value: "2Yp9Yw==".into(),
})
.send()
.await
.unwrap();
assert!(
err_res.status().is_client_error(),
"got response: {:?}",
err_res
);
let res = ctx
.custom_request
.builder(bucket.clone())
.method(Method::PUT)
//.path(CTRL_KEY.to_owned()) at the moment custom_request does not encode url so this
//fail
.path("abc".to_owned())
.vhost_style(true)
.body(BODY.to_vec())
.body_signature(BodySignature::StreamingUnsignedTrailer {
chunk_size: 16,
trailer_algorithm: "x-amz-checksum-crc32".into(),
trailer_value: crc32.clone(),
})
.send()
.await
.unwrap();
assert!(res.status().is_success(), "got response: {:?}", res);
// assert_eq!(r.e_tag.unwrap().as_str(), etag);
// assert!(r.version_id.is_some());
let o = ctx
.client
.get_object()
.bucket(&bucket)
//.key(CTRL_KEY)
.key("abc")
.checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled)
.send()
.await
.unwrap();
assert_bytes_eq!(o.body, BODY);
assert_eq!(o.e_tag.unwrap(), etag);
assert!(o.last_modified.is_some());
assert_eq!(o.content_length.unwrap(), 62);
assert_eq!(o.parts_count, None);
assert_eq!(o.tag_count, None);
assert_eq!(o.checksum_crc32.unwrap(), crc32);
} }
} }
@ -269,7 +119,7 @@ async fn test_create_bucket_streaming() {
.custom_request .custom_request
.builder(bucket.to_owned()) .builder(bucket.to_owned())
.method(Method::PUT) .method(Method::PUT)
.body_signature(BodySignature::Streaming { chunk_size: 10 }) .body_signature(BodySignature::Streaming(10))
.send() .send()
.await .await
.unwrap(); .unwrap();
@ -324,7 +174,7 @@ async fn test_put_website_streaming() {
.method(Method::PUT) .method(Method::PUT)
.query_params(query) .query_params(query)
.body(website_config.as_bytes().to_vec()) .body(website_config.as_bytes().to_vec())
.body_signature(BodySignature::Streaming { chunk_size: 10 }) .body_signature(BodySignature::Streaming(10))
.send() .send()
.await .await
.unwrap(); .unwrap();

View file

@ -5,13 +5,15 @@ use crate::json_body;
use assert_json_diff::assert_json_eq; use assert_json_diff::assert_json_eq;
use aws_sdk_s3::{ use aws_sdk_s3::{
primitives::ByteStream, primitives::ByteStream,
types::{CorsConfiguration, CorsRule, ErrorDocument, IndexDocument, WebsiteConfiguration}, types::{
Condition, CorsConfiguration, CorsRule, ErrorDocument, IndexDocument, Protocol, Redirect,
RoutingRule, WebsiteConfiguration,
},
}; };
use http::{Request, StatusCode}; use http::{Request, StatusCode};
use http_body_util::BodyExt; use http_body_util::BodyExt;
use http_body_util::Full as FullBody; use http_body_util::Full as FullBody;
use hyper::body::Bytes; use hyper::body::Bytes;
use hyper::header::LOCATION;
use hyper_util::client::legacy::Client; use hyper_util::client::legacy::Client;
use hyper_util::rt::TokioExecutor; use hyper_util::rt::TokioExecutor;
use serde_json::json; use serde_json::json;
@ -296,33 +298,6 @@ async fn test_website_s3_api() {
); );
} }
// Test x-amz-website-redirect-location
{
ctx.client
.put_object()
.bucket(&bucket)
.key("test-redirect.html")
.website_redirect_location("https://perdu.com")
.send()
.await
.unwrap();
let req = Request::builder()
.method("GET")
.uri(format!(
"http://127.0.0.1:{}/test-redirect.html",
ctx.garage.web_port
))
.header("Host", format!("{}.web.garage", BCKT_NAME))
.body(Body::new(Bytes::new()))
.unwrap();
let resp = client.request(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::MOVED_PERMANENTLY);
assert_eq!(resp.headers().get(LOCATION).unwrap(), "https://perdu.com");
}
// Test CORS with an allowed preflight request // Test CORS with an allowed preflight request
{ {
let req = Request::builder() let req = Request::builder()
@ -535,116 +510,442 @@ async fn test_website_check_domain() {
} }
#[tokio::test] #[tokio::test]
async fn test_website_puny() { async fn test_website_redirect_full_bucket() {
const BCKT_NAME: &str = "xn--pda.eu"; const BCKT_NAME: &str = "my-redirect-full";
let ctx = common::context(); let ctx = common::context();
let bucket = ctx.create_bucket(BCKT_NAME); let bucket = ctx.create_bucket(BCKT_NAME);
let data = ByteStream::from_static(BODY); let conf = WebsiteConfiguration::builder()
.routing_rules(
RoutingRule::builder()
.condition(Condition::builder().key_prefix_equals("").build())
.redirect(
Redirect::builder()
.protocol(Protocol::Https)
.host_name("other.tld")
.replace_key_prefix_with("")
.build(),
)
.build(),
)
.build();
ctx.client
.put_bucket_website()
.bucket(&bucket)
.website_configuration(conf)
.send()
.await
.unwrap();
let req = Request::builder()
.method("GET")
.uri(format!("http://127.0.0.1:{}/my-path", ctx.garage.web_port))
.header("Host", format!("{}.web.garage", BCKT_NAME))
.body(Body::new(Bytes::new()))
.unwrap();
let client = Client::builder(TokioExecutor::new()).build_http();
let resp = client.request(req).await.unwrap();
assert_eq!(resp.status(), StatusCode::FOUND);
assert_eq!(
resp.headers()
.get(hyper::header::LOCATION)
.unwrap()
.to_str()
.unwrap(),
"https://other.tld/my-path"
);
}
#[tokio::test]
async fn test_website_redirect() {
const BCKT_NAME: &str = "my-redirect";
let ctx = common::context();
let bucket = ctx.create_bucket(BCKT_NAME);
ctx.client ctx.client
.put_object() .put_object()
.bucket(&bucket) .bucket(&bucket)
.key("index.html") .key("index.html")
.body(data) .body(ByteStream::from_static(b"index"))
.send()
.await
.unwrap();
ctx.client
.put_object()
.bucket(&bucket)
.key("404.html")
.body(ByteStream::from_static(b"main 404"))
.send()
.await
.unwrap();
ctx.client
.put_object()
.bucket(&bucket)
.key("static-file")
.body(ByteStream::from_static(b"static file"))
.send() .send()
.await .await
.unwrap(); .unwrap();
let client = Client::builder(TokioExecutor::new()).build_http(); let mut conf = WebsiteConfiguration::builder()
.index_document(
IndexDocument::builder()
.suffix("home.html")
.build()
.unwrap(),
)
.error_document(ErrorDocument::builder().key("404.html").build().unwrap());
let req = |suffix| { for (prefix, condition) in [("unconditional", false), ("conditional", true)] {
let code = condition.then(|| "404".to_string());
conf = conf
// simple redirect
.routing_rules(
RoutingRule::builder()
.condition(
Condition::builder()
.set_http_error_code_returned_equals(code.clone())
.key_prefix_equals(format!("{prefix}/redirect-prefix/"))
.build(),
)
.redirect(
Redirect::builder()
.http_redirect_code("302")
.replace_key_prefix_with("other-prefix/")
.build(),
)
.build(),
)
.routing_rules(
RoutingRule::builder()
.condition(
Condition::builder()
.set_http_error_code_returned_equals(code.clone())
.key_prefix_equals(format!("{prefix}/redirect-prefix-307/"))
.build(),
)
.redirect(
Redirect::builder()
.http_redirect_code("307")
.replace_key_prefix_with("other-prefix/")
.build(),
)
.build(),
)
// simple redirect
.routing_rules(
RoutingRule::builder()
.condition(
Condition::builder()
.set_http_error_code_returned_equals(code.clone())
.key_prefix_equals(format!("{prefix}/redirect-fixed/"))
.build(),
)
.redirect(
Redirect::builder()
.http_redirect_code("302")
.replace_key_with("fixed_key")
.build(),
)
.build(),
)
// stream other file
.routing_rules(
RoutingRule::builder()
.condition(
Condition::builder()
.set_http_error_code_returned_equals(code.clone())
.key_prefix_equals(format!("{prefix}/stream-fixed/"))
.build(),
)
.redirect(
Redirect::builder()
.http_redirect_code("200")
.replace_key_with("static-file")
.build(),
)
.build(),
)
// stream other file as error
.routing_rules(
RoutingRule::builder()
.condition(
Condition::builder()
.set_http_error_code_returned_equals(code.clone())
.key_prefix_equals(format!("{prefix}/stream-404/"))
.build(),
)
.redirect(
Redirect::builder()
.http_redirect_code("404")
.replace_key_with("static-file")
.build(),
)
.build(),
)
// fail to stream other file
.routing_rules(
RoutingRule::builder()
.condition(
Condition::builder()
.set_http_error_code_returned_equals(code.clone())
.key_prefix_equals(format!("{prefix}/stream-missing/"))
.build(),
)
.redirect(
Redirect::builder()
.http_redirect_code("200")
.replace_key_with("missing-file")
.build(),
)
.build(),
);
}
let conf = conf.build();
ctx.client
.put_bucket_website()
.bucket(&bucket)
.website_configuration(conf.clone())
.send()
.await
.unwrap();
let stored_cfg = ctx
.client
.get_bucket_website()
.bucket(&bucket)
.send()
.await
.unwrap();
assert_eq!(stored_cfg.index_document, conf.index_document);
assert_eq!(stored_cfg.error_document, conf.error_document);
assert_eq!(stored_cfg.routing_rules, conf.routing_rules);
let req = |path| {
Request::builder() Request::builder()
.method("GET") .method("GET")
.uri(format!("http://127.0.0.1:{}/", ctx.garage.web_port)) .uri(format!(
.header("Host", format!("{}{}", BCKT_NAME, suffix)) "http://127.0.0.1:{}/{}/path",
ctx.garage.web_port, path
))
.header("Host", format!("{}.web.garage", BCKT_NAME))
.body(Body::new(Bytes::new())) .body(Body::new(Bytes::new()))
.unwrap() .unwrap()
}; };
ctx.garage test_redirect_helper("unconditional", true, &req).await;
.command() test_redirect_helper("conditional", true, &req).await;
.args(["bucket", "website", "--allow", BCKT_NAME]) for prefix in ["unconditional", "conditional"] {
.quiet() for rule_path in [
.expect_success_status("Could not allow website on bucket"); "redirect-prefix",
"redirect-prefix-307",
"redirect-fixed",
"stream-fixed",
"stream-404",
"stream-missing",
] {
ctx.client
.put_object()
.bucket(&bucket)
.key(format!("{prefix}/{rule_path}/path"))
.body(ByteStream::from_static(b"i exist"))
.send()
.await
.unwrap();
}
}
test_redirect_helper("unconditional", true, &req).await;
test_redirect_helper("conditional", false, &req).await;
}
let mut resp = client.request(req("")).await.unwrap(); async fn test_redirect_helper(
assert_eq!(resp.status(), StatusCode::OK); prefix: &str,
assert_eq!( should_see_redirect: bool,
resp.into_body().collect().await.unwrap().to_bytes(), req: impl Fn(String) -> Request<http_body_util::Full<Bytes>>,
BODY.as_ref() ) {
); use http::header;
let client = Client::builder(TokioExecutor::new()).build_http();
let expected_body = b"i exist".as_ref();
resp = client.request(req(".web.garage")).await.unwrap(); let resp = client
assert_eq!(resp.status(), StatusCode::OK); .request(req(format!("{prefix}/redirect-prefix")))
assert_eq!( .await
resp.into_body().collect().await.unwrap().to_bytes(), .unwrap();
BODY.as_ref() if should_see_redirect {
); assert_eq!(resp.status(), StatusCode::FOUND);
for bname in [
BCKT_NAME.to_string(),
format!("{BCKT_NAME}.web.garage"),
format!("{BCKT_NAME}.s3.garage"),
] {
let admin_req = || {
Request::builder()
.method("GET")
.uri(format!(
"http://127.0.0.1:{0}/check?domain={1}",
ctx.garage.admin_port, bname
))
.body(Body::new(Bytes::new()))
.unwrap()
};
let admin_resp = client.request(admin_req()).await.unwrap();
assert_eq!(admin_resp.status(), StatusCode::OK);
assert_eq!( assert_eq!(
admin_resp.into_body().collect().await.unwrap().to_bytes(), resp.headers()
format!("Domain '{bname}' is managed by Garage").as_bytes() .get(header::LOCATION)
.unwrap()
.to_str()
.unwrap(),
"/other-prefix/path"
);
assert!(resp
.into_body()
.collect()
.await
.unwrap()
.to_bytes()
.is_empty());
} else {
assert_eq!(resp.status(), StatusCode::OK);
assert!(resp.headers().get(header::LOCATION).is_none());
assert_eq!(
resp.into_body().collect().await.unwrap().to_bytes(),
expected_body,
);
}
let resp = client
.request(req(format!("{prefix}/redirect-prefix-307")))
.await
.unwrap();
if should_see_redirect {
assert_eq!(resp.status(), StatusCode::TEMPORARY_REDIRECT);
assert_eq!(
resp.headers()
.get(header::LOCATION)
.unwrap()
.to_str()
.unwrap(),
"/other-prefix/path"
);
assert!(resp
.into_body()
.collect()
.await
.unwrap()
.to_bytes()
.is_empty());
} else {
assert_eq!(resp.status(), StatusCode::OK);
assert!(resp.headers().get(header::LOCATION).is_none());
assert_eq!(
resp.into_body().collect().await.unwrap().to_bytes(),
expected_body,
);
}
let resp = client
.request(req(format!("{prefix}/redirect-fixed")))
.await
.unwrap();
if should_see_redirect {
assert_eq!(resp.status(), StatusCode::FOUND);
assert_eq!(
resp.headers()
.get(header::LOCATION)
.unwrap()
.to_str()
.unwrap(),
"/fixed_key"
);
assert!(resp
.into_body()
.collect()
.await
.unwrap()
.to_bytes()
.is_empty());
} else {
assert_eq!(resp.status(), StatusCode::OK);
assert!(resp.headers().get(header::LOCATION).is_none());
assert_eq!(
resp.into_body().collect().await.unwrap().to_bytes(),
expected_body,
);
}
let resp = client
.request(req(format!("{prefix}/stream-fixed")))
.await
.unwrap();
if should_see_redirect {
assert_eq!(resp.status(), StatusCode::OK);
assert!(resp.headers().get(header::LOCATION).is_none());
assert_eq!(
resp.into_body().collect().await.unwrap().to_bytes(),
b"static file".as_ref(),
);
} else {
assert_eq!(resp.status(), StatusCode::OK);
assert!(resp.headers().get(header::LOCATION).is_none());
assert_eq!(
resp.into_body().collect().await.unwrap().to_bytes(),
expected_body,
);
}
let resp = client
.request(req(format!("{prefix}/stream-404")))
.await
.unwrap();
if should_see_redirect {
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
assert!(resp.headers().get(header::LOCATION).is_none());
assert_eq!(
resp.into_body().collect().await.unwrap().to_bytes(),
b"static file".as_ref(),
);
} else {
assert_eq!(resp.status(), StatusCode::OK);
assert!(resp.headers().get(header::LOCATION).is_none());
assert_eq!(
resp.into_body().collect().await.unwrap().to_bytes(),
expected_body,
);
}
let resp = client
.request(req(format!("{prefix}/stream-404")))
.await
.unwrap();
if should_see_redirect {
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
assert!(resp.headers().get(header::LOCATION).is_none());
assert_eq!(
resp.into_body().collect().await.unwrap().to_bytes(),
b"static file".as_ref(),
);
} else {
assert_eq!(resp.status(), StatusCode::OK);
assert!(resp.headers().get(header::LOCATION).is_none());
assert_eq!(
resp.into_body().collect().await.unwrap().to_bytes(),
expected_body,
); );
} }
} }
#[tokio::test] #[tokio::test]
async fn test_website_object_not_found() { async fn test_website_invalid_redirect() {
const BCKT_NAME: &str = "not-found"; const BCKT_NAME: &str = "my-invalid-redirect";
let ctx = common::context(); let ctx = common::context();
let _bucket = ctx.create_bucket(BCKT_NAME); let bucket = ctx.create_bucket(BCKT_NAME);
let client = Client::builder(TokioExecutor::new()).build_http(); let conf = WebsiteConfiguration::builder()
.routing_rules(
RoutingRule::builder()
.condition(Condition::builder().key_prefix_equals("").build())
.redirect(
Redirect::builder()
.protocol(Protocol::Https)
.host_name("other.tld")
.replace_key_prefix_with("")
// we don't allow 200 with hostname
.http_redirect_code("200")
.build(),
)
.build(),
)
.build();
let req = |suffix| { ctx.client
Request::builder() .put_bucket_website()
.method("GET") .bucket(&bucket)
.uri(format!("http://127.0.0.1:{}/", ctx.garage.web_port)) .website_configuration(conf)
.header("Host", format!("{}{}", BCKT_NAME, suffix)) .send()
.body(Body::new(Bytes::new())) .await
.unwrap() .unwrap_err();
};
ctx.garage
.command()
.args(["bucket", "website", "--allow", BCKT_NAME])
.quiet()
.expect_success_status("Could not allow website on bucket");
let resp = client.request(req("")).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
// the error we return by default are *not* xml
assert_eq!(
resp.headers().get(http::header::CONTENT_TYPE).unwrap(),
"text/html; charset=utf-8"
);
let result = String::from_utf8(
resp.into_body()
.collect()
.await
.unwrap()
.to_bytes()
.to_vec(),
)
.unwrap();
assert!(result.contains("not found"));
} }

View file

@ -72,16 +72,6 @@ impl K2vClient {
.enable_http2() .enable_http2()
.build(); .build();
let client = HttpClient::builder(TokioExecutor::new()).build(connector); let client = HttpClient::builder(TokioExecutor::new()).build(connector);
Self::new_with_client(config, client)
}
/// Create a new K2V client with an external client.
/// Useful for example if you plan on creating many clients but you want to mutualize the
/// underlying thread pools & co.
pub fn new_with_client(
config: K2vClientConfig,
client: HttpClient<HttpsConnector<HttpConnector>, Body>,
) -> Result<Self, Error> {
let user_agent: std::borrow::Cow<str> = match &config.user_agent { let user_agent: std::borrow::Cow<str> = match &config.user_agent {
Some(ua) => ua.into(), Some(ua) => ua.into(),
None => format!("k2v/{}", env!("CARGO_PKG_VERSION")).into(), None => format!("k2v/{}", env!("CARGO_PKG_VERSION")).into(),

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_model" name = "garage_model"
version = "1.3.1" version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -24,7 +24,7 @@ garage_net.workspace = true
async-trait.workspace = true async-trait.workspace = true
blake2.workspace = true blake2.workspace = true
chrono.workspace = true chrono.workspace = true
thiserror.workspace = true err-derive.workspace = true
hex.workspace = true hex.workspace = true
http.workspace = true http.workspace = true
base64.workspace = true base64.workspace = true
@ -44,4 +44,3 @@ default = [ "lmdb", "sqlite" ]
k2v = [ "garage_util/k2v" ] k2v = [ "garage_util/k2v" ]
lmdb = [ "garage_db/lmdb" ] lmdb = [ "garage_db/lmdb" ]
sqlite = [ "garage_db/sqlite" ] sqlite = [ "garage_db/sqlite" ]
fjall = [ "garage_db/fjall" ]

View file

@ -22,10 +22,14 @@ mod v08 {
pub use v08::*; pub use v08::*;
impl BucketAlias { impl BucketAlias {
pub fn new(name: String, ts: u64, bucket_id: Option<Uuid>) -> Self { pub fn new(name: String, ts: u64, bucket_id: Option<Uuid>) -> Option<Self> {
BucketAlias { if !is_valid_bucket_name(&name) {
name, None
state: crdt::Lww::raw(ts, bucket_id), } else {
Some(BucketAlias {
name,
state: crdt::Lww::raw(ts, bucket_id),
})
} }
} }
@ -76,7 +80,7 @@ impl TableSchema for BucketAliasTable {
/// In the case of Garage, bucket names must not be hex-encoded /// In the case of Garage, bucket names must not be hex-encoded
/// 32 byte string, which is excluded thanks to the /// 32 byte string, which is excluded thanks to the
/// maximum length of 63 bytes given in the spec. /// maximum length of 63 bytes given in the spec.
pub fn is_valid_bucket_name(n: &str, puny: bool) -> bool { pub fn is_valid_bucket_name(n: &str) -> bool {
// Bucket names must be between 3 and 63 characters // Bucket names must be between 3 and 63 characters
n.len() >= 3 && n.len() <= 63 n.len() >= 3 && n.len() <= 63
// Bucket names must be composed of lowercase letters, numbers, // Bucket names must be composed of lowercase letters, numbers,
@ -88,9 +92,7 @@ pub fn is_valid_bucket_name(n: &str, puny: bool) -> bool {
// Bucket names must not be formatted as an IP address // Bucket names must not be formatted as an IP address
&& n.parse::<std::net::IpAddr>().is_err() && n.parse::<std::net::IpAddr>().is_err()
// Bucket names must not start with "xn--" // Bucket names must not start with "xn--"
&& (!n.starts_with("xn--") || puny) && !n.starts_with("xn--")
// We are a bit stricter, to properly restrict punycode in all labels
&& (!n.contains(".xn--") || puny)
// Bucket names must not end with "-s3alias" // Bucket names must not end with "-s3alias"
&& !n.ends_with("-s3alias") && !n.ends_with("-s3alias")
} }

View file

@ -119,7 +119,122 @@ mod v08 {
impl garage_util::migrate::InitialFormat for Bucket {} impl garage_util::migrate::InitialFormat for Bucket {}
} }
pub use v08::*; mod v2 {
use crate::permission::BucketKeyPerm;
use garage_util::crdt;
use garage_util::data::Uuid;
use serde::{Deserialize, Serialize};
use super::v08;
pub use v08::{BucketQuotas, CorsRule, LifecycleExpiration, LifecycleFilter, LifecycleRule};
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Bucket {
/// ID of the bucket
pub id: Uuid,
/// State, and configuration if not deleted, of the bucket
pub state: crdt::Deletable<BucketParams>,
}
/// Configuration for a bucket
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct BucketParams {
/// Bucket's creation date
pub creation_date: u64,
/// Map of key with access to the bucket, and what kind of access they give
pub authorized_keys: crdt::Map<String, BucketKeyPerm>,
/// Map of aliases that are or have been given to this bucket
/// in the global namespace
/// (not authoritative: this is just used as an indication to
/// map back to aliases when doing ListBuckets)
pub aliases: crdt::LwwMap<String, bool>,
/// Map of aliases that are or have been given to this bucket
/// in namespaces local to keys
/// key = (access key id, alias name)
pub local_aliases: crdt::LwwMap<(String, String), bool>,
/// Whether this bucket is allowed for website access
/// (under all of its global alias names),
/// and if so, the website configuration XML document
pub website_config: crdt::Lww<Option<WebsiteConfig>>,
/// CORS rules
pub cors_config: crdt::Lww<Option<Vec<CorsRule>>>,
/// Lifecycle configuration
pub lifecycle_config: crdt::Lww<Option<Vec<LifecycleRule>>>,
/// Bucket quotas
pub quotas: crdt::Lww<BucketQuotas>,
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct WebsiteConfig {
pub index_document: String,
pub error_document: Option<String>,
// this field is currently unused, but present so adding it in the future doesn't
// need a new migration
pub redirect_all: Option<RedirectAll>,
pub routing_rules: Vec<RoutingRule>,
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct RedirectAll {
pub hostname: String,
pub protocol: String,
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct RoutingRule {
pub condition: Option<RedirectCondition>,
pub redirect: Redirect,
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct RedirectCondition {
pub http_error_code: Option<u16>,
pub prefix: Option<String>,
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Redirect {
pub hostname: Option<String>,
pub http_redirect_code: u16,
pub protocol: Option<String>,
pub replace_key_prefix: Option<String>,
pub replace_key: Option<String>,
}
impl garage_util::migrate::Migrate for Bucket {
const VERSION_MARKER: &'static [u8] = b"G2bkt";
type Previous = v08::Bucket;
fn migrate(old: v08::Bucket) -> Bucket {
Bucket {
id: old.id,
state: old.state.map(|x| BucketParams {
creation_date: x.creation_date,
authorized_keys: x.authorized_keys,
aliases: x.aliases,
local_aliases: x.local_aliases,
website_config: x.website_config.map(|wc_opt| {
wc_opt.map(|wc| WebsiteConfig {
index_document: wc.index_document,
error_document: wc.error_document,
redirect_all: None,
routing_rules: vec![],
})
}),
cors_config: x.cors_config,
lifecycle_config: x.lifecycle_config,
quotas: x.quotas,
}),
}
}
}
}
pub use v2::*;
impl AutoCrdt for BucketQuotas { impl AutoCrdt for BucketQuotas {
const WARN_IF_DIFFERENT: bool = true; const WARN_IF_DIFFERENT: bool = true;

View file

@ -116,17 +116,21 @@ impl Garage {
info!("Opening database..."); info!("Opening database...");
let db_engine = db::Engine::from_str(&config.db_engine) let db_engine = db::Engine::from_str(&config.db_engine)
.ok_or_message("Invalid `db_engine` value in configuration file")?; .ok_or_message("Invalid `db_engine` value in configuration file")?;
let db_path = db_engine.db_path(&config.metadata_dir); let mut db_path = config.metadata_dir.clone();
match db_engine {
db::Engine::Sqlite => {
db_path.push("db.sqlite");
}
db::Engine::Lmdb => {
db_path.push("db.lmdb");
}
}
let db_opt = db::OpenOpt { let db_opt = db::OpenOpt {
fsync: config.metadata_fsync, fsync: config.metadata_fsync,
lmdb_map_size: match config.lmdb_map_size { lmdb_map_size: match config.lmdb_map_size {
v if v == usize::default() => None, v if v == usize::default() => None,
v => Some(v), v => Some(v),
}, },
fjall_block_cache_size: match config.fjall_block_cache_size {
v if v == usize::default() => None,
v => Some(v),
},
}; };
let db = db::open_db(&db_path, db_engine, &db_opt) let db = db::open_db(&db_path, db_engine, &db_opt)
.ok_or_message("Unable to open metadata db")?; .ok_or_message("Unable to open metadata db")?;
@ -315,15 +319,15 @@ impl Garage {
Ok(()) Ok(())
} }
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper<'_> { pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
helper::bucket::BucketHelper(self) helper::bucket::BucketHelper(self)
} }
pub fn key_helper(&self) -> helper::key::KeyHelper<'_> { pub fn key_helper(&self) -> helper::key::KeyHelper {
helper::key::KeyHelper(self) helper::key::KeyHelper(self)
} }
pub async fn locked_helper(&self) -> helper::locked::LockedHelper<'_> { pub async fn locked_helper(&self) -> helper::locked::LockedHelper {
let lock = self.bucket_lock.lock().await; let lock = self.bucket_lock.lock().await;
helper::locked::LockedHelper(self, Some(lock)) helper::locked::LockedHelper(self, Some(lock))
} }

View file

@ -1,24 +1,24 @@
use err_derive::Error;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use thiserror::Error;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
#[derive(Debug, Error, Serialize, Deserialize)] #[derive(Debug, Error, Serialize, Deserialize)]
pub enum Error { pub enum Error {
#[error("Internal error: {0}")] #[error(display = "Internal error: {}", _0)]
Internal(#[from] GarageError), Internal(#[error(source)] GarageError),
#[error("Bad request: {0}")] #[error(display = "Bad request: {}", _0)]
BadRequest(String), BadRequest(String),
/// Bucket name is not valid according to AWS S3 specs /// Bucket name is not valid according to AWS S3 specs
#[error("Invalid bucket name: {0}")] #[error(display = "Invalid bucket name: {}", _0)]
InvalidBucketName(String), InvalidBucketName(String),
#[error("Access key not found: {0}")] #[error(display = "Access key not found: {}", _0)]
NoSuchAccessKey(String), NoSuchAccessKey(String),
#[error("Bucket not found: {0}")] #[error(display = "Bucket not found: {}", _0)]
NoSuchBucket(String), NoSuchBucket(String),
} }

Some files were not shown because too many files have changed in this diff Show more