Compare commits

..

1 commit

Author SHA1 Message Date
Daniel García
c28eab74dd
Cached config operations 2025-12-28 23:44:50 +01:00
95 changed files with 8954 additions and 8295 deletions

View file

@ -372,22 +372,16 @@
## Note that clients cache the /api/config endpoint for about 1 hour and it could take some time before they are enabled or disabled!
##
## The following flags are available:
## - "pm-5594-safari-account-switching": Enable account switching in Safari. (Safari >= 2026.2.0)
## - "ssh-agent": Enable SSH agent support on Desktop. (Desktop >= 2024.12.0)
## - "ssh-agent-v2": Enable newer SSH agent support. (Desktop >= 2026.2.1)
## - "ssh-key-vault-item": Enable the creation and use of SSH key vault items. (Clients >= 2024.12.0)
## - "pm-25373-windows-biometrics-v2": Enable the new implementation of biometrics on Windows. (Desktop >= 2025.11.0)
## - "anon-addy-self-host-alias": Enable configuring self-hosted Anon Addy alias generator. (Android >= 2025.3.0, iOS >= 2025.4.0)
## - "simple-login-self-host-alias": Enable configuring self-hosted Simple Login alias generator. (Android >= 2025.3.0, iOS >= 2025.4.0)
## - "mutual-tls": Enable the use of mutual TLS on Android (Clients >= 2025.2.0)
## - "cxp-import-mobile": Enable the import via CXP on iOS (Clients >= 2025.9.2)
## - "cxp-export-mobile": Enable the export via CXP on iOS (Clients >= 2025.9.2)
## - "pm-30529-webauthn-related-origins":
## - "desktop-ui-migration-milestone-1": Special feature flag for desktop UI (Desktop >= 2026.2.0)
## - "desktop-ui-migration-milestone-2": Special feature flag for desktop UI (Desktop >= 2026.2.0)
## - "desktop-ui-migration-milestone-3": Special feature flag for desktop UI (Desktop >= 2026.2.0)
## - "desktop-ui-migration-milestone-4": Special feature flag for desktop UI (Desktop >= 2026.2.0)
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=
## - "inline-menu-positioning-improvements": Enable the use of inline menu password generator and identity suggestions in the browser extension.
## - "inline-menu-totp": Enable the use of inline menu TOTP codes in the browser extension.
## - "ssh-agent": Enable SSH agent support on Desktop. (Needs desktop >=2024.12.0)
## - "ssh-key-vault-item": Enable the creation and use of SSH key vault items. (Needs clients >=2024.12.0)
## - "pm-25373-windows-biometrics-v2": Enable the new implementation of biometrics on Windows. (Needs desktop >= 2025.11.0)
## - "export-attachments": Enable support for exporting attachments (Clients >=2025.4.0)
## - "anon-addy-self-host-alias": Enable configuring self-hosted Anon Addy alias generator. (Needs Android >=2025.3.0, iOS >=2025.4.0)
## - "simple-login-self-host-alias": Enable configuring self-hosted Simple Login alias generator. (Needs Android >=2025.3.0, iOS >=2025.4.0)
## - "mutual-tls": Enable the use of mutual TLS on Android (Client >= 2025.2.0)
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials
## Require new device emails. When a user logs in an email is required to be sent.
## If sending the email fails the login attempt will fail!!

1
.gitattributes vendored
View file

@ -1,2 +1,3 @@
# Ignore vendored scripts in GitHub stats
src/static/scripts/* linguist-vendored

View file

@ -1,10 +1,6 @@
name: Build
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
on:
push:
paths:
@ -34,10 +30,6 @@ on:
- "docker/DockerSettings.yaml"
- "macros/**"
defaults:
run:
shell: bash
jobs:
build:
name: Build and Test ${{ matrix.channel }}
@ -62,7 +54,7 @@ jobs:
# Checkout the repo
- name: "Checkout"
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0
with:
persist-credentials: false
fetch-depth: 0
@ -71,6 +63,7 @@ jobs:
# Determine rust-toolchain version
- name: Init Variables
id: toolchain
shell: bash
env:
CHANNEL: ${{ matrix.channel }}
run: |
@ -85,23 +78,32 @@ jobs:
# End Determine rust-toolchain version
- name: "Install toolchain ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} as default"
# Only install the clippy and rustfmt components on the default rust-toolchain
- name: "Install rust-toolchain version"
uses: dtolnay/rust-toolchain@f7ccc83f9ed1e5b9c81d8a67d7ad1a747e22a561 # master @ Dec 16, 2025, 6:11 PM GMT+1
if: ${{ matrix.channel == 'rust-toolchain' }}
with:
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
components: clippy, rustfmt
# End Uses the rust-toolchain file to determine version
# Install the any other channel to be used for which we do not execute clippy and rustfmt
- name: "Install MSRV version"
uses: dtolnay/rust-toolchain@f7ccc83f9ed1e5b9c81d8a67d7ad1a747e22a561 # master @ Dec 16, 2025, 6:11 PM GMT+1
if: ${{ matrix.channel != 'rust-toolchain' }}
with:
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
# End Install the MSRV channel to be used
# Set the current matrix toolchain version as default
- name: "Set toolchain ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} as default"
env:
CHANNEL: ${{ matrix.channel }}
RUST_TOOLCHAIN: ${{steps.toolchain.outputs.RUST_TOOLCHAIN}}
run: |
# Remove the rust-toolchain.toml
rm rust-toolchain.toml
# Install the correct toolchain version
rustup toolchain install "${RUST_TOOLCHAIN}" --profile minimal --no-self-update
# If this matrix is the `rust-toolchain` flow, also install rustfmt and clippy
if [[ "${CHANNEL}" == 'rust-toolchain' ]]; then
rustup component add --toolchain "${RUST_TOOLCHAIN}" rustfmt clippy
fi
# Set as the default toolchain
# Set the default
rustup default "${RUST_TOOLCHAIN}"
# Show environment
@ -113,7 +115,7 @@ jobs:
# Enable Rust Caching
- name: Rust Caching
uses: Swatinem/rust-cache@c19371144df3bb44fab255c43d04cbc2ab54d1c4 # v2.9.1
uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
with:
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.

View file

@ -1,16 +1,8 @@
name: Check templates
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
on: [ push, pull_request ]
defaults:
run:
shell: bash
jobs:
docker-templates:
name: Validate docker templates
@ -20,7 +12,7 @@ jobs:
steps:
# Checkout the repo
- name: "Checkout"
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0
with:
persist-credentials: false
# End Checkout the repo

View file

@ -1,15 +1,8 @@
name: Hadolint
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
on: [ push, pull_request ]
permissions: {}
defaults:
run:
shell: bash
jobs:
hadolint:
@ -20,7 +13,7 @@ jobs:
steps:
# Start Docker Buildx
- name: Setup Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
# https://github.com/moby/buildkit/issues/3969
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
with:
@ -32,6 +25,7 @@ jobs:
# Download hadolint - https://github.com/hadolint/hadolint/releases
- name: Download hadolint
shell: bash
run: |
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
sudo chmod +x /usr/local/bin/hadolint
@ -40,18 +34,20 @@ jobs:
# End Download hadolint
# Checkout the repo
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0
with:
persist-credentials: false
# End Checkout the repo
# Test Dockerfiles with hadolint
- name: Run hadolint
shell: bash
run: hadolint docker/Dockerfile.{debian,alpine}
# End Test Dockerfiles with hadolint
# Test Dockerfiles with docker build checks
- name: Run docker build check
shell: bash
run: |
echo "Checking docker/Dockerfile.debian"
docker build --check . -f docker/Dockerfile.debian

View file

@ -1,12 +1,6 @@
name: Release
permissions: {}
concurrency:
# Apply concurrency control only on the upstream repo
group: ${{ github.repository == 'dani-garcia/vaultwarden' && format('{0}-{1}', github.workflow, github.ref) || github.run_id }}
# Don't cancel other runs when creating a tag
cancel-in-progress: ${{ github.ref_type == 'branch' }}
on:
push:
branches:
@ -16,31 +10,33 @@ on:
# https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#filter-pattern-cheat-sheet
- '[1-2].[0-9]+.[0-9]+'
concurrency:
# Apply concurrency control only on the upstream repo
group: ${{ github.repository == 'dani-garcia/vaultwarden' && format('{0}-{1}', github.workflow, github.ref) || github.run_id }}
# Don't cancel other runs when creating a tag
cancel-in-progress: ${{ github.ref_type == 'branch' }}
defaults:
run:
shell: bash
# A "release" environment must be created in the repository settings
# (Settings > Environments > New environment) with the following
# variables and secrets configured as needed.
#
# Variables (only set the ones for registries you want to push to):
# DOCKERHUB_REPO: 'index.docker.io/<user>/<repo>'
# QUAY_REPO: 'quay.io/<user>/<repo>'
# GHCR_REPO: 'ghcr.io/<user>/<repo>'
#
# Secrets (only required when the corresponding *_REPO variable is set):
# DOCKERHUB_REPO => DOCKERHUB_USERNAME, DOCKERHUB_TOKEN
# QUAY_REPO => QUAY_USERNAME, QUAY_TOKEN
# GITHUB_TOKEN is provided automatically
env:
# The *_REPO variables need to be configured as repository variables
# Append `/settings/variables/actions` to your repo url
# DOCKERHUB_REPO needs to be 'index.docker.io/<user>/<repo>'
# Check for Docker hub credentials in secrets
HAVE_DOCKERHUB_LOGIN: ${{ vars.DOCKERHUB_REPO != '' && secrets.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }}
# GHCR_REPO needs to be 'ghcr.io/<user>/<repo>'
# Check for Github credentials in secrets
HAVE_GHCR_LOGIN: ${{ vars.GHCR_REPO != '' && github.repository_owner != '' && secrets.GITHUB_TOKEN != '' }}
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
# Check for Quay.io credentials in secrets
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
jobs:
docker-build:
name: Build Vaultwarden containers
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
environment:
name: release
deployment: false
permissions:
packages: write # Needed to upload packages and artifacts
contents: read
@ -58,13 +54,13 @@ jobs:
steps:
- name: Initialize QEMU binfmt support
uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4.0.0
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
with:
platforms: "arm64,arm"
# Start Docker Buildx
- name: Setup Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
# https://github.com/moby/buildkit/issues/3969
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
with:
@ -77,7 +73,7 @@ jobs:
# Checkout the repo
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0
# We need fetch-depth of 0 so we also get all the tag metadata
with:
persist-credentials: false
@ -106,14 +102,14 @@ jobs:
# Login to Docker Hub
- name: Login to Docker Hub
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
if: ${{ vars.DOCKERHUB_REPO != '' }}
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
- name: Add registry for DockerHub
if: ${{ vars.DOCKERHUB_REPO != '' }}
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
env:
DOCKERHUB_REPO: ${{ vars.DOCKERHUB_REPO }}
run: |
@ -121,15 +117,15 @@ jobs:
# Login to GitHub Container Registry
- name: Login to GitHub Container Registry
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
if: ${{ vars.GHCR_REPO != '' }}
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
- name: Add registry for ghcr.io
if: ${{ vars.GHCR_REPO != '' }}
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
env:
GHCR_REPO: ${{ vars.GHCR_REPO }}
run: |
@ -137,15 +133,15 @@ jobs:
# Login to Quay.io
- name: Login to Quay.io
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
password: ${{ secrets.QUAY_TOKEN }}
if: ${{ vars.QUAY_REPO != '' }}
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
- name: Add registry for Quay.io
if: ${{ vars.QUAY_REPO != '' }}
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
env:
QUAY_REPO: ${{ vars.QUAY_REPO }}
run: |
@ -159,7 +155,7 @@ jobs:
run: |
#
# Check if there is a GitHub Container Registry Login and use it for caching
if [[ -n "${GHCR_REPO}" ]]; then
if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then
echo "BAKE_CACHE_FROM=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE}-${NORMALIZED_ARCH}" | tee -a "${GITHUB_ENV}"
echo "BAKE_CACHE_TO=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE}-${NORMALIZED_ARCH},compression=zstd,mode=max" | tee -a "${GITHUB_ENV}"
else
@ -185,7 +181,7 @@ jobs:
- name: Bake ${{ matrix.base_image }} containers
id: bake_vw
uses: docker/bake-action@a66e1c87e2eca0503c343edf1d208c716d54b8a8 # v7.1.0
uses: docker/bake-action@5be5f02ff8819ecd3092ea6b2e6261c31774f2b4 # v6.10.0
env:
BASE_TAGS: "${{ steps.determine-version.outputs.BASE_TAGS }}"
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
@ -222,7 +218,7 @@ jobs:
touch "${RUNNER_TEMP}/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: digests-${{ env.NORMALIZED_ARCH }}-${{ matrix.base_image }}
path: ${{ runner.temp }}/digests/*
@ -237,12 +233,12 @@ jobs:
# Upload artifacts to Github Actions and Attest the binaries
- name: Attest binaries
uses: actions/attest@59d89421af93a897026c735860bf21b6eb4f7b26 # v4.1.0
uses: actions/attest-build-provenance@00014ed6ed5efc5b1ab7f7f34a39eb55d41aa4f8 # v3.1.0
with:
subject-path: vaultwarden-${{ env.NORMALIZED_ARCH }}
- name: Upload binaries as artifacts
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-${{ env.NORMALIZED_ARCH }}-${{ matrix.base_image }}
path: vaultwarden-${{ env.NORMALIZED_ARCH }}
@ -251,9 +247,6 @@ jobs:
name: Merge manifests
runs-on: ubuntu-latest
needs: docker-build
environment:
name: release
deployment: false
permissions:
packages: write # Needed to upload packages and artifacts
attestations: write # Needed to generate an artifact attestation for a build
@ -264,7 +257,7 @@ jobs:
steps:
- name: Download digests
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
with:
path: ${{ runner.temp }}/digests
pattern: digests-*-${{ matrix.base_image }}
@ -272,14 +265,14 @@ jobs:
# Login to Docker Hub
- name: Login to Docker Hub
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
if: ${{ vars.DOCKERHUB_REPO != '' }}
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
- name: Add registry for DockerHub
if: ${{ vars.DOCKERHUB_REPO != '' }}
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
env:
DOCKERHUB_REPO: ${{ vars.DOCKERHUB_REPO }}
run: |
@ -287,15 +280,15 @@ jobs:
# Login to GitHub Container Registry
- name: Login to GitHub Container Registry
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
if: ${{ vars.GHCR_REPO != '' }}
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
- name: Add registry for ghcr.io
if: ${{ vars.GHCR_REPO != '' }}
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
env:
GHCR_REPO: ${{ vars.GHCR_REPO }}
run: |
@ -303,15 +296,15 @@ jobs:
# Login to Quay.io
- name: Login to Quay.io
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
password: ${{ secrets.QUAY_TOKEN }}
if: ${{ vars.QUAY_REPO != '' }}
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
- name: Add registry for Quay.io
if: ${{ vars.QUAY_REPO != '' }}
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
env:
QUAY_REPO: ${{ vars.QUAY_REPO }}
run: |
@ -320,43 +313,45 @@ jobs:
# Determine Base Tags
- name: Determine Base Tags
env:
BASE_IMAGE_TAG: "${{ matrix.base_image != 'debian' && format('-{0}', matrix.base_image) || '' }}"
REF_TYPE: ${{ github.ref_type }}
run: |
# Check which main tag we are going to build determined by ref_type
if [[ "${REF_TYPE}" == "tag" ]]; then
echo "BASE_TAGS=latest${BASE_IMAGE_TAG},${GITHUB_REF#refs/*/}${BASE_IMAGE_TAG}${BASE_IMAGE_TAG//-/,}" | tee -a "${GITHUB_ENV}"
echo "BASE_TAGS=latest,${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_ENV}"
elif [[ "${REF_TYPE}" == "branch" ]]; then
echo "BASE_TAGS=testing${BASE_IMAGE_TAG}" | tee -a "${GITHUB_ENV}"
echo "BASE_TAGS=testing" | tee -a "${GITHUB_ENV}"
fi
- name: Create manifest list, push it and extract digest SHA
working-directory: ${{ runner.temp }}/digests
env:
BASE_IMAGE_TAG: "${{ matrix.base_image != 'debian' && format('-{0}', matrix.base_image) || '' }}"
BASE_TAGS: "${{ env.BASE_TAGS }}"
CONTAINER_REGISTRIES: "${{ env.CONTAINER_REGISTRIES }}"
run: |
set +e
IFS=',' read -ra IMAGES <<< "${CONTAINER_REGISTRIES}"
IFS=',' read -ra TAGS <<< "${BASE_TAGS}"
TAG_ARGS=()
for img in "${IMAGES[@]}"; do
for tag in "${TAGS[@]}"; do
TAG_ARGS+=("-t" "${img}:${tag}")
done
done
echo "Creating manifest for ${img}:${tag}${BASE_IMAGE_TAG}"
echo "Creating manifest"
if ! OUTPUT=$(docker buildx imagetools create \
"${TAG_ARGS[@]}" \
$(printf "${IMAGES[0]}@sha256:%s " *) 2>&1); then
echo "Manifest creation failed"
OUTPUT=$(docker buildx imagetools create \
-t "${img}:${tag}${BASE_IMAGE_TAG}" \
$(printf "${img}@sha256:%s " *) 2>&1)
STATUS=$?
if [ ${STATUS} -ne 0 ]; then
echo "Manifest creation failed for ${img}:${tag}${BASE_IMAGE_TAG}"
echo "${OUTPUT}"
exit 1
exit ${STATUS}
fi
echo "Manifest created successfully"
echo "Manifest created for ${img}:${tag}${BASE_IMAGE_TAG}"
echo "${OUTPUT}"
done
done
set -e
# Extract digest SHA for subsequent steps
GET_DIGEST_SHA="$(echo "${OUTPUT}" | grep -oE 'sha256:[a-f0-9]{64}' | tail -1)"
@ -364,24 +359,24 @@ jobs:
# Attest container images
- name: Attest - docker.io - ${{ matrix.base_image }}
if: ${{ vars.DOCKERHUB_REPO != '' && env.DIGEST_SHA != ''}}
uses: actions/attest@59d89421af93a897026c735860bf21b6eb4f7b26 # v4.1.0
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' && env.DIGEST_SHA != ''}}
uses: actions/attest-build-provenance@00014ed6ed5efc5b1ab7f7f34a39eb55d41aa4f8 # v3.1.0
with:
subject-name: ${{ vars.DOCKERHUB_REPO }}
subject-digest: ${{ env.DIGEST_SHA }}
push-to-registry: true
- name: Attest - ghcr.io - ${{ matrix.base_image }}
if: ${{ vars.GHCR_REPO != '' && env.DIGEST_SHA != ''}}
uses: actions/attest@59d89421af93a897026c735860bf21b6eb4f7b26 # v4.1.0
if: ${{ env.HAVE_GHCR_LOGIN == 'true' && env.DIGEST_SHA != ''}}
uses: actions/attest-build-provenance@00014ed6ed5efc5b1ab7f7f34a39eb55d41aa4f8 # v3.1.0
with:
subject-name: ${{ vars.GHCR_REPO }}
subject-digest: ${{ env.DIGEST_SHA }}
push-to-registry: true
- name: Attest - quay.io - ${{ matrix.base_image }}
if: ${{ vars.QUAY_REPO != '' && env.DIGEST_SHA != ''}}
uses: actions/attest@59d89421af93a897026c735860bf21b6eb4f7b26 # v4.1.0
if: ${{ env.HAVE_QUAY_LOGIN == 'true' && env.DIGEST_SHA != ''}}
uses: actions/attest-build-provenance@00014ed6ed5efc5b1ab7f7f34a39eb55d41aa4f8 # v3.1.0
with:
subject-name: ${{ vars.QUAY_REPO }}
subject-digest: ${{ env.DIGEST_SHA }}

View file

@ -1,10 +1,6 @@
name: Cleanup
permissions: {}
concurrency:
group: ${{ github.workflow }}
cancel-in-progress: false
on:
workflow_dispatch:
inputs:

View file

@ -1,10 +1,6 @@
name: Trivy
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
on:
push:
branches:
@ -33,12 +29,12 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0
with:
persist-credentials: false
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@ed142fd0673e97e23eac54620cfb913e5ce36c25 # v0.36.0
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # 0.33.1
env:
TRIVY_DB_REPOSITORY: docker.io/aquasec/trivy-db:2,public.ecr.aws/aquasecurity/trivy-db:2,ghcr.io/aquasecurity/trivy-db:2
TRIVY_JAVA_DB_REPOSITORY: docker.io/aquasec/trivy-java-db:1,public.ecr.aws/aquasecurity/trivy-java-db:1,ghcr.io/aquasecurity/trivy-java-db:1
@ -50,6 +46,6 @@ jobs:
severity: CRITICAL,HIGH
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2
uses: github/codeql-action/upload-sarif@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
with:
sarif_file: 'trivy-results.sarif'

View file

@ -1,11 +1,7 @@
name: Code Spell Checking
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
on: [ push, pull_request ]
permissions: {}
jobs:
typos:
@ -16,11 +12,11 @@ jobs:
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0
with:
persist-credentials: false
# End Checkout the repo
# When this version is updated, do not forget to update this in `.pre-commit-config.yaml` too
- name: Spell Check Repo
uses: crate-ci/typos@7c572958218557a3272c2d6719629443b5cc26fd # v1.45.2
uses: crate-ci/typos@2d0ce569feab1f8752f1dde43cc2f2aa53236e06 # v1.40.0

View file

@ -1,9 +1,4 @@
name: Security Analysis with zizmor
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
on:
push:
@ -11,6 +6,8 @@ on:
pull_request:
branches: ["**"]
permissions: {}
jobs:
zizmor:
name: Run zizmor
@ -19,12 +16,12 @@ jobs:
security-events: write # To write the security report
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0
with:
persist-credentials: false
- name: Run zizmor
uses: zizmorcore/zizmor-action@b1d7e1fb5de872772f31590499237e7cce841e8e # v0.5.3
uses: zizmorcore/zizmor-action@e639db99335bc9038abc0e066dfcd72e23d26fb4 # v0.3.0
with:
# intentionally not scanning the entire repository,
# since it contains integration tests.

View file

@ -15,13 +15,6 @@ repos:
- id: detect-private-key
- id: check-symlinks
- id: forbid-submodules
# When this version is updated, do not forget to update this in `.github/workflows/typos.yaml` too
- repo: https://github.com/crate-ci/typos
rev: 7c572958218557a3272c2d6719629443b5cc26fd # v1.45.2
hooks:
- id: typos
- repo: local
hooks:
- id: fmt
@ -58,3 +51,8 @@ repos:
args:
- "-c"
- "cd docker && make"
# When this version is updated, do not forget to update this in `.github/workflows/typos.yaml` too
- repo: https://github.com/crate-ci/typos
rev: 2d0ce569feab1f8752f1dde43cc2f2aa53236e06 # v1.40.0
hooks:
- id: typos

View file

@ -23,6 +23,4 @@ extend-ignore-re = [
# https://github.com/bitwarden/server/blob/dff9f1cf538198819911cf2c20f8cda3307701c5/src/Notifications/HubHelpers.cs#L86
# https://github.com/bitwarden/clients/blob/9612a4ac45063e372a6fbe87eb253c7cb3c588fb/libs/common/src/auth/services/anonymous-hub.service.ts#L45
"AuthRequestResponseRecieved",
# Ignore Punycode/IDN tests
"xn--.+"
]

1817
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[workspace.package]
edition = "2021"
rust-version = "1.93.0"
rust-version = "1.90.0"
license = "AGPL-3.0-only"
repository = "https://github.com/dani-garcia/vaultwarden"
publish = false
@ -23,17 +23,15 @@ publish.workspace = true
[features]
default = [
# "sqlite" or "sqlite_system",
# "sqlite",
# "mysql",
# "postgresql",
]
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
enable_syslog = []
# Please enable at least one of these DB backends.
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
sqlite_system = ["diesel/sqlite", "diesel_migrations/sqlite"]
sqlite = ["sqlite_system", "libsqlite3-sys/bundled"] # Alternative to the above, statically linked SQLite into the binary instead of dynamically.
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "dep:libsqlite3-sys"]
# Enable to use a vendored and statically linked openssl
vendored_openssl = ["openssl/vendored"]
# Enable MiMalloc memory allocator to replace the default malloc
@ -67,59 +65,59 @@ dotenvy = { version = "0.15.7", default-features = false }
# Numerical libraries
num-traits = "0.2.19"
num-derive = "0.4.2"
bigdecimal = "0.4.10"
bigdecimal = "0.4.9"
# Web framework
rocket = { version = "0.5.1", features = ["tls", "json"], default-features = false }
rocket_ws = { version ="0.1.1" }
# WebSockets libraries
rmpv = "1.3.1" # MessagePack library
rmpv = "1.3.0" # MessagePack library
# Concurrent HashMap used for WebSocket messaging and favicons
dashmap = "6.1.0"
# Async futures
futures = "0.3.32"
tokio = { version = "1.52.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
tokio-util = { version = "0.7.18", features = ["compat"]}
futures = "0.3.31"
tokio = { version = "1.48.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
tokio-util = { version = "0.7.17", features = ["compat"]}
# A generic serialization/deserialization framework
serde = { version = "1.0.228", features = ["derive"] }
serde_json = "1.0.149"
serde_json = "1.0.145"
# A safe, extensible ORM and Query builder
# Currently pinned diesel to v2.3.3 as newer version break MySQL/MariaDB compatibility
diesel = { version = "2.3.9", features = ["chrono", "r2d2", "numeric"] }
diesel_migrations = "2.3.2"
diesel = { version = "2.3.5", features = ["chrono", "r2d2", "numeric"] }
diesel_migrations = "2.3.1"
derive_more = { version = "2.1.1", features = ["from", "into", "as_ref", "deref", "display"] }
diesel-derive-newtype = "2.1.2"
# SQLite, statically bundled unless the `sqlite_system` feature is enabled
libsqlite3-sys = { version = "0.37.0", optional = true }
# Bundled/Static SQLite
libsqlite3-sys = { version = "0.35.0", features = ["bundled"], optional = true }
# Crypto-related libraries
rand = "0.10.1"
rand = "0.9.2"
ring = "0.17.14"
subtle = "2.6.1"
# UUID generation
uuid = { version = "1.23.1", features = ["v4"] }
uuid = { version = "1.19.0", features = ["v4"] }
# Date and time libraries
chrono = { version = "0.4.44", features = ["clock", "serde"], default-features = false }
chrono = { version = "0.4.42", features = ["clock", "serde"], default-features = false }
chrono-tz = "0.10.4"
time = "0.3.47"
time = "0.3.44"
# Job scheduler
job_scheduler_ng = "2.4.0"
# Data encoding library Hex/Base32/Base64
data-encoding = "2.11.0"
data-encoding = "2.9.0"
# JWT library
jsonwebtoken = { version = "10.3.0", features = ["use_pem", "rust_crypto"], default-features = false }
jsonwebtoken = { version = "10.2.0", features = ["use_pem", "rust_crypto"], default-features = false }
# TOTP library
totp-lite = "2.0.1"
@ -130,67 +128,67 @@ yubico = { package = "yubico_ng", version = "0.14.1", features = ["online-tokio"
# WebAuthn libraries
# danger-allow-state-serialisation is needed to save the state in the db
# danger-credential-internals is needed to support U2F to Webauthn migration
webauthn-rs = { version = "0.5.5", features = ["danger-allow-state-serialisation", "danger-credential-internals"] }
webauthn-rs-proto = "0.5.5"
webauthn-rs-core = "0.5.5"
webauthn-rs = { version = "0.5.4", features = ["danger-allow-state-serialisation", "danger-credential-internals"] }
webauthn-rs-proto = "0.5.4"
webauthn-rs-core = "0.5.4"
# Handling of URL's for WebAuthn and favicons
url = "2.5.8"
url = "2.5.7"
# Email libraries
lettre = { version = "0.11.21", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "hostname", "tracing", "tokio1-rustls", "ring", "rustls-native-certs"], default-features = false }
lettre = { version = "0.11.19", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "hostname", "tracing", "tokio1-rustls", "ring", "rustls-native-certs"], default-features = false }
percent-encoding = "2.3.2" # URL encoding library used for URL's in the emails
email_address = "0.2.9"
# HTML Template library
handlebars = { version = "6.4.0", features = ["dir_source"] }
handlebars = { version = "6.3.2", features = ["dir_source"] }
# HTTP client (Used for favicons, version check, DUO and HIBP API)
reqwest = { version = "0.12.28", features = ["rustls-tls", "rustls-tls-native-roots", "stream", "json", "deflate", "gzip", "brotli", "zstd", "socks", "cookies", "charset", "http2", "system-proxy"], default-features = false}
hickory-resolver = "0.26.1"
hickory-resolver = "0.25.2"
# Favicon extraction libraries
html5gum = "0.8.3"
regex = { version = "1.12.3", features = ["std", "perf", "unicode-perl"], default-features = false }
regex = { version = "1.12.2", features = ["std", "perf", "unicode-perl"], default-features = false }
data-url = "0.3.2"
bytes = "1.11.1"
svg-hush = "0.9.6"
bytes = "1.11.0"
svg-hush = "0.9.5"
# Cache function results (Used for version check and favicon fetching)
cached = { version = "0.59.0", features = ["async"] }
cached = { version = "0.56.0", features = ["async"] }
# Used for custom short lived cookie jar during favicon extraction
cookie = "0.18.1"
cookie_store = "0.22.1"
cookie_store = "0.22.0"
# Used by U2F, JWT and PostgreSQL
openssl = "0.10.78"
openssl = "0.10.75"
# CLI argument parsing
pico-args = "0.5.0"
# Macro ident concatenation
pastey = "0.2.2"
pastey = "0.2.1"
governor = "0.10.4"
# OIDC for SSO
openidconnect = { version = "4.0.1", features = ["reqwest", "rustls-tls"] }
moka = { version = "0.12.15", features = ["future"] }
openidconnect = { version = "4.0.1", features = ["reqwest", "native-tls"] }
mini-moka = "0.10.3"
# Check client versions for specific features.
semver = "1.0.28"
semver = "1.0.27"
# Allow overriding the default memory allocator
# Mainly used for the musl builds, since the default musl malloc is very slow
mimalloc = { version = "0.1.50", features = ["secure"], default-features = false, optional = true }
mimalloc = { version = "0.1.48", features = ["secure"], default-features = false, optional = true }
which = "8.0.2"
which = "8.0.0"
# Argon2 library with support for the PHC format
argon2 = "0.5.3"
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
rpassword = "7.5.1"
rpassword = "7.4.0"
# Loading a dynamic CSS Stylesheet
grass_compiler = { version = "0.13.4", default-features = false }
@ -199,10 +197,10 @@ grass_compiler = { version = "0.13.4", default-features = false }
opendal = { version = "0.55.0", features = ["services-fs"], default-features = false }
# For retrieving AWS credentials, including temporary SSO credentials
anyhow = { version = "1.0.102", optional = true }
aws-config = { version = "1.8.16", features = ["behavior-version-latest", "rt-tokio", "credentials-process", "sso"], default-features = false, optional = true }
aws-credential-types = { version = "1.2.14", optional = true }
aws-smithy-runtime-api = { version = "1.12.0", optional = true }
anyhow = { version = "1.0.100", optional = true }
aws-config = { version = "1.8.12", features = ["behavior-version-latest", "rt-tokio", "credentials-process", "sso"], default-features = false, optional = true }
aws-credential-types = { version = "1.2.11", optional = true }
aws-smithy-runtime-api = { version = "1.9.3", optional = true }
http = { version = "1.4.0", optional = true }
reqsign = { version = "0.16.5", optional = true }
@ -303,7 +301,6 @@ branches_sharing_code = "deny"
case_sensitive_file_extension_comparisons = "deny"
cast_lossless = "deny"
clone_on_ref_ptr = "deny"
duration_suboptimal_units = "deny"
equatable_if_let = "deny"
excessive_precision = "deny"
filter_map_next = "deny"
@ -325,7 +322,6 @@ needless_continue = "deny"
needless_lifetimes = "deny"
option_option = "deny"
redundant_clone = "deny"
ref_option = "deny"
string_add_assign = "deny"
unnecessary_join = "deny"
unnecessary_self_imports = "deny"

View file

@ -59,9 +59,8 @@ A nearly complete implementation of the Bitwarden Client API is provided, includ
## Usage
> [!IMPORTANT]
> The web-vault requires the use of HTTPS and a secure context for the [Web Crypto API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Crypto_API). <br>
> That means it will only work if you [enable HTTPS](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS). <br>
> We also suggest to use a [reverse proxy](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples).
> The web-vault requires the use a secure context for the [Web Crypto API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Crypto_API).
> That means it will only work via `http://localhost:8000` (using the port from the example below) or if you [enable HTTPS](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS).
The recommended way to install and use Vaultwarden is via our container images which are published to [ghcr.io](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden), [docker.io](https://hub.docker.com/r/vaultwarden/server) and [quay.io](https://quay.io/repository/vaultwarden/server).
See [which container image to use](https://github.com/dani-garcia/vaultwarden/wiki/Which-container-image-to-use) for an explanation of the provided tags.

View file

@ -2,21 +2,21 @@ use std::env;
use std::process::Command;
fn main() {
// These allow using e.g. #[cfg(mysql)] instead of #[cfg(feature = "mysql")], which helps when trying to add them through macros
#[cfg(feature = "sqlite_system")] // The `sqlite` feature implies this one.
// This allow using #[cfg(sqlite)] instead of #[cfg(feature = "sqlite")], which helps when trying to add them through macros
#[cfg(feature = "sqlite")]
println!("cargo:rustc-cfg=sqlite");
#[cfg(feature = "mysql")]
println!("cargo:rustc-cfg=mysql");
#[cfg(feature = "postgresql")]
println!("cargo:rustc-cfg=postgresql");
#[cfg(not(any(feature = "sqlite_system", feature = "mysql", feature = "postgresql")))]
#[cfg(feature = "s3")]
println!("cargo:rustc-cfg=s3");
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
compile_error!(
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
);
#[cfg(feature = "s3")]
println!("cargo:rustc-cfg=s3");
// Use check-cfg to let cargo know which cfg's we define,
// and avoid warnings when they are used in the code.
println!("cargo::rustc-check-cfg=cfg(sqlite)");

View file

@ -1,11 +1,11 @@
---
vault_version: "v2026.4.1"
vault_image_digest: "sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe"
vault_version: "v2025.12.0"
vault_image_digest: "sha256:bb7303efafdb7e2b41bee2c772e14f67676ae2c9047bd7bba80d3544d4162613"
# Cross Compile Docker Helper Scripts v1.9.0
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
# https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags
xx_image_digest: "sha256:c64defb9ed5a91eacb37f96ccc3d4cd72521c4bd18d5442905b95e2226b0e707"
rust_version: 1.95.0 # Rust version to be used
rust_version: 1.92.0 # Rust version to be used
debian_version: trixie # Debian release name to be used
alpine_version: "3.23" # Alpine version to be used
# For which platforms/architectures will we try to build images

View file

@ -19,23 +19,23 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull docker.io/vaultwarden/web-vault:v2026.4.1
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2026.4.1
# [docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe]
# $ docker pull docker.io/vaultwarden/web-vault:v2025.12.0
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2025.12.0
# [docker.io/vaultwarden/web-vault@sha256:bb7303efafdb7e2b41bee2c772e14f67676ae2c9047bd7bba80d3544d4162613]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe
# [docker.io/vaultwarden/web-vault:v2026.4.1]
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:bb7303efafdb7e2b41bee2c772e14f67676ae2c9047bd7bba80d3544d4162613
# [docker.io/vaultwarden/web-vault:v2025.12.0]
#
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe AS vault
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:bb7303efafdb7e2b41bee2c772e14f67676ae2c9047bd7bba80d3544d4162613 AS vault
########################## ALPINE BUILD IMAGES ##########################
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 and linux/arm64
## And for Alpine we define all build images here, they will only be loaded when actually used
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.95.0 AS build_amd64
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.95.0 AS build_arm64
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.95.0 AS build_armv7
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.95.0 AS build_armv6
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.92.0 AS build_amd64
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.92.0 AS build_arm64
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.92.0 AS build_armv7
FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.92.0 AS build_armv6
########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006

View file

@ -19,15 +19,15 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull docker.io/vaultwarden/web-vault:v2026.4.1
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2026.4.1
# [docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe]
# $ docker pull docker.io/vaultwarden/web-vault:v2025.12.0
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2025.12.0
# [docker.io/vaultwarden/web-vault@sha256:bb7303efafdb7e2b41bee2c772e14f67676ae2c9047bd7bba80d3544d4162613]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe
# [docker.io/vaultwarden/web-vault:v2026.4.1]
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:bb7303efafdb7e2b41bee2c772e14f67676ae2c9047bd7bba80d3544d4162613
# [docker.io/vaultwarden/web-vault:v2025.12.0]
#
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:ca2a4251c4e63c9ad428262b4dd452789a1b9f6fce71da351e93dceed0d2edbe AS vault
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:bb7303efafdb7e2b41bee2c772e14f67676ae2c9047bd7bba80d3544d4162613 AS vault
########################## Cross Compile Docker Helper Scripts ##########################
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
@ -36,7 +36,7 @@ FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c64defb9ed5a91eacb37f
########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.95.0-slim-trixie AS build
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.92.0-slim-trixie AS build
COPY --from=xx / /
ARG TARGETARCH
ARG TARGETVARIANT

View file

@ -19,13 +19,13 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull docker.io/vaultwarden/web-vault:{{ vault_version | replace('+', '_') }}
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" docker.io/vaultwarden/web-vault:{{ vault_version | replace('+', '_') }}
# $ docker pull docker.io/vaultwarden/web-vault:{{ vault_version }}
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" docker.io/vaultwarden/web-vault:{{ vault_version }}
# [docker.io/vaultwarden/web-vault@{{ vault_image_digest }}]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
# [docker.io/vaultwarden/web-vault:{{ vault_version | replace('+', '_') }}]
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
#
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} AS vault

View file

@ -13,8 +13,8 @@ path = "src/lib.rs"
proc-macro = true
[dependencies]
quote = "1.0.45"
syn = "2.0.117"
quote = "1.0.42"
syn = "2.0.111"
[lints]
workspace = true

View file

@ -1 +0,0 @@
DROP TABLE IF EXISTS archives;

View file

@ -1,10 +0,0 @@
DROP TABLE IF EXISTS archives;
CREATE TABLE archives (
user_uuid CHAR(36) NOT NULL,
cipher_uuid CHAR(36) NOT NULL,
archived_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (user_uuid, cipher_uuid),
FOREIGN KEY (user_uuid) REFERENCES users (uuid) ON DELETE CASCADE,
FOREIGN KEY (cipher_uuid) REFERENCES ciphers (uuid) ON DELETE CASCADE
);

View file

@ -1 +0,0 @@
ALTER TABLE sso_auth DROP COLUMN binding_hash;

View file

@ -1 +0,0 @@
ALTER TABLE sso_auth ADD COLUMN binding_hash TEXT;

View file

@ -1 +0,0 @@
DROP TABLE IF EXISTS archives;

View file

@ -1,8 +0,0 @@
DROP TABLE IF EXISTS archives;
CREATE TABLE archives (
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid) ON DELETE CASCADE,
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid) ON DELETE CASCADE,
archived_at TIMESTAMP NOT NULL DEFAULT now(),
PRIMARY KEY (user_uuid, cipher_uuid)
);

View file

@ -1 +0,0 @@
ALTER TABLE sso_auth DROP COLUMN binding_hash;

View file

@ -1 +0,0 @@
ALTER TABLE sso_auth ADD COLUMN binding_hash TEXT;

View file

@ -1 +0,0 @@
DROP TABLE IF EXISTS archives;

View file

@ -1,8 +0,0 @@
DROP TABLE IF EXISTS archives;
CREATE TABLE archives (
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid) ON DELETE CASCADE,
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid) ON DELETE CASCADE,
archived_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (user_uuid, cipher_uuid)
);

View file

@ -1 +0,0 @@
ALTER TABLE sso_auth DROP COLUMN binding_hash;

View file

@ -1 +0,0 @@
ALTER TABLE sso_auth ADD COLUMN binding_hash TEXT;

View file

@ -1,4 +1,4 @@
[toolchain]
channel = "1.95.0"
channel = "1.92.0"
components = [ "rustfmt", "clippy" ]
profile = "minimal"

View file

@ -30,10 +30,9 @@ use crate::{
error::{Error, MapResult},
http_client::make_http_request,
mail,
sso::FAKE_SSO_IDENTIFIER,
util::{
container_base_image, format_naive_datetime_local, get_active_web_release, get_display_size,
is_running_in_container, parse_experimental_client_feature_flags, FeatureFlagFilter, NumberOrString,
container_base_image, format_naive_datetime_local, get_display_size, get_web_vault_version,
is_running_in_container, NumberOrString,
},
CONFIG, VERSION,
};
@ -316,11 +315,7 @@ async fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -
async fn _generate_invite(user: &User, conn: &DbConn) -> EmptyResult {
if CONFIG.mail_enabled() {
let org_id: OrganizationId = if CONFIG.sso_enabled() {
FAKE_SSO_IDENTIFIER.into()
} else {
FAKE_ADMIN_UUID.into()
};
let org_id: OrganizationId = FAKE_ADMIN_UUID.to_string().into();
let member_id: MembershipId = FAKE_ADMIN_UUID.to_string().into();
mail::send_invite(user, org_id, member_id, &CONFIG.invitation_org_name(), None).await
} else {
@ -469,7 +464,7 @@ async fn deauth_user(user_id: UserId, _token: AdminToken, conn: DbConn, nt: Noti
if CONFIG.push_enabled() {
for device in Device::find_push_devices_by_user(&user.uuid, &conn).await {
match unregister_push_device(device.push_uuid.as_ref()).await {
match unregister_push_device(&device.push_uuid).await {
Ok(r) => r,
Err(e) => error!("Unable to unregister devices from Bitwarden server: {e}"),
};
@ -477,7 +472,7 @@ async fn deauth_user(user_id: UserId, _token: AdminToken, conn: DbConn, nt: Noti
}
Device::delete_all_by_user(&user.uuid, &conn).await?;
user.reset_security_stamp(&conn).await?;
user.reset_security_stamp();
user.save(&conn).await
}
@ -485,15 +480,14 @@ async fn deauth_user(user_id: UserId, _token: AdminToken, conn: DbConn, nt: Noti
#[post("/users/<user_id>/disable", format = "application/json")]
async fn disable_user(user_id: UserId, _token: AdminToken, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
let mut user = get_user_or_404(&user_id, &conn).await?;
user.reset_security_stamp(&conn).await?;
Device::delete_all_by_user(&user.uuid, &conn).await?;
user.reset_security_stamp();
user.enabled = false;
let save_result = user.save(&conn).await;
nt.send_logout(&user, None, &conn).await;
Device::delete_all_by_user(&user.uuid, &conn).await?;
save_result
}
@ -523,11 +517,7 @@ async fn resend_user_invite(user_id: UserId, _token: AdminToken, conn: DbConn) -
}
if CONFIG.mail_enabled() {
let org_id: OrganizationId = if CONFIG.sso_enabled() {
FAKE_SSO_IDENTIFIER.into()
} else {
FAKE_ADMIN_UUID.into()
};
let org_id: OrganizationId = FAKE_ADMIN_UUID.to_string().into();
let member_id: MembershipId = FAKE_ADMIN_UUID.to_string().into();
mail::send_invite(&user, org_id, member_id, &CONFIG.invitation_org_name(), None).await
} else {
@ -647,6 +637,7 @@ use cached::proc_macro::cached;
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already
/// It will cache this function for 600 seconds (10 minutes) which should prevent the exhaustion of the rate limit
/// Any cache will be lost if Vaultwarden is restarted
use std::time::Duration; // Needed for cached
#[cached(time = 600, sync_writes = "default")]
async fn get_release_info(has_http_access: bool) -> (String, String, String) {
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
@ -698,26 +689,6 @@ async fn get_ntp_time(has_http_access: bool) -> String {
String::from("Unable to fetch NTP time.")
}
fn web_vault_compare(active: &str, latest: &str) -> i8 {
use semver::Version;
use std::cmp::Ordering;
let active_semver = Version::parse(active).unwrap_or_else(|e| {
warn!("Unable to parse active web-vault version '{active}': {e}");
Version::parse("2025.1.1").unwrap()
});
let latest_semver = Version::parse(latest).unwrap_or_else(|e| {
warn!("Unable to parse latest web-vault version '{latest}': {e}");
Version::parse("2025.1.1").unwrap()
});
match active_semver.cmp(&latest_semver) {
Ordering::Less => -1,
Ordering::Equal => 0,
Ordering::Greater => 1,
}
}
#[get("/diagnostics")]
async fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult<Html<String>> {
use chrono::prelude::*;
@ -737,28 +708,32 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> A
_ => "Unable to resolve domain name.".to_string(),
};
let (latest_vw_release, latest_vw_commit, latest_web_release) = get_release_info(has_http_access).await;
let active_web_release = get_active_web_release();
let web_vault_compare = web_vault_compare(&active_web_release, &latest_web_release);
let (latest_release, latest_commit, latest_web_build) = get_release_info(has_http_access).await;
let ip_header_name = &ip_header.0.unwrap_or_default();
let invalid_feature_flags: Vec<String> = parse_experimental_client_feature_flags(
&CONFIG.experimental_client_feature_flags(),
FeatureFlagFilter::InvalidOnly,
// Get current running versions
let web_vault_version = get_web_vault_version();
// Check if the running version is newer than the latest stable released version
let web_vault_pre_release = if let Ok(web_ver_match) = semver::VersionReq::parse(&format!(">{latest_web_build}")) {
web_ver_match.matches(
&semver::Version::parse(&web_vault_version).unwrap_or_else(|_| semver::Version::parse("2025.1.1").unwrap()),
)
.into_keys()
.collect();
} else {
error!("Unable to parse latest_web_build: '{latest_web_build}'");
false
};
let diagnostics_json = json!({
"dns_resolved": dns_resolved,
"current_release": VERSION,
"latest_release": latest_vw_release,
"latest_commit": latest_vw_commit,
"latest_release": latest_release,
"latest_commit": latest_commit,
"web_vault_enabled": &CONFIG.web_vault_enabled(),
"active_web_release": active_web_release,
"latest_web_release": latest_web_release,
"web_vault_compare": web_vault_compare,
"web_vault_version": web_vault_version,
"latest_web_build": latest_web_build,
"web_vault_pre_release": web_vault_pre_release,
"running_within_container": running_within_container,
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
"has_http_access": has_http_access,
@ -772,7 +747,6 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> A
"db_version": get_sql_server_version(&conn).await,
"admin_url": format!("{}/diagnostics", admin_url()),
"overrides": &CONFIG.get_overrides().join(", "),
"invalid_feature_flags": invalid_feature_flags,
"host_arch": env::consts::ARCH,
"host_os": env::consts::OS,
"tz_env": env::var("TZ").unwrap_or_default(),
@ -870,32 +844,3 @@ impl<'r> FromRequest<'r> for AdminToken {
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn validate_web_vault_compare() {
// web_vault_compare(active, latest)
// Test normal versions
assert!(web_vault_compare("2025.12.0", "2025.12.1") == -1);
assert!(web_vault_compare("2025.12.1", "2025.12.1") == 0);
assert!(web_vault_compare("2025.12.2", "2025.12.1") == 1);
// Test patched/+build.n versions
// Newer latest version
assert!(web_vault_compare("2025.12.0+build.1", "2025.12.1") == -1);
assert!(web_vault_compare("2025.12.1", "2025.12.1+build.1") == -1);
assert!(web_vault_compare("2025.12.0+build.1", "2025.12.1+build.1") == -1);
assert!(web_vault_compare("2025.12.1+build.1", "2025.12.1+build.2") == -1);
// Equal versions
assert!(web_vault_compare("2025.12.1+build.1", "2025.12.1+build.1") == 0);
assert!(web_vault_compare("2025.12.2+build.2", "2025.12.2+build.2") == 0);
// Newer active version
assert!(web_vault_compare("2025.12.1+build.1", "2025.12.1") == 1);
assert!(web_vault_compare("2025.12.2", "2025.12.1+build.1") == 1);
assert!(web_vault_compare("2025.12.2+build.1", "2025.12.1+build.1") == 1);
assert!(web_vault_compare("2025.12.1+build.3", "2025.12.1+build.2") == 1);
}
}

View file

@ -22,7 +22,7 @@ use crate::{
DbConn,
},
mail,
util::{deser_opt_nonempty_str, format_date, NumberOrString},
util::{format_date, NumberOrString},
CONFIG,
};
@ -33,6 +33,7 @@ use rocket::{
pub fn routes() -> Vec<rocket::Route> {
routes![
register,
profile,
put_profile,
post_profile,
@ -106,6 +107,7 @@ pub struct RegisterData {
name: Option<String>,
#[allow(dead_code)]
organization_user_id: Option<MembershipId>,
// Used only from the register/finish endpoint
@ -137,7 +139,7 @@ struct KeysData {
}
/// Trims whitespace from password hints, and converts blank password hints to `None`.
fn clean_password_hint(password_hint: Option<&String>) -> Option<String> {
fn clean_password_hint(password_hint: &Option<String>) -> Option<String> {
match password_hint {
None => None,
Some(h) => match h.trim() {
@ -147,7 +149,7 @@ fn clean_password_hint(password_hint: Option<&String>) -> Option<String> {
}
}
fn enforce_password_hint_setting(password_hint: Option<&String>) -> EmptyResult {
fn enforce_password_hint_setting(password_hint: &Option<String>) -> EmptyResult {
if password_hint.is_some() && !CONFIG.password_hints_allowed() {
err!("Password hints have been disabled by the administrator. Remove the hint and try again.");
}
@ -166,6 +168,11 @@ async fn is_email_2fa_required(member_id: Option<MembershipId>, conn: &DbConn) -
false
}
#[post("/accounts/register", data = "<data>")]
async fn register(data: Json<RegisterData>, conn: DbConn) -> JsonResult {
_register(data, false, conn).await
}
pub async fn _register(data: Json<RegisterData>, email_verification: bool, conn: DbConn) -> JsonResult {
let mut data: RegisterData = data.into_inner();
let email = data.email.to_lowercase();
@ -245,8 +252,8 @@ pub async fn _register(data: Json<RegisterData>, email_verification: bool, conn:
// Check against the password hint setting here so if it fails, the user
// can retry without losing their invitation below.
let password_hint = clean_password_hint(data.master_password_hint.as_ref());
enforce_password_hint_setting(password_hint.as_ref())?;
let password_hint = clean_password_hint(&data.master_password_hint);
enforce_password_hint_setting(&password_hint)?;
let mut user = match User::find_by_mail(&email, &conn).await {
Some(user) => {
@ -295,7 +302,7 @@ pub async fn _register(data: Json<RegisterData>, email_verification: bool, conn:
set_kdf_data(&mut user, &data.kdf)?;
user.set_password(&data.master_password_hash, Some(data.key), true, None, &conn).await?;
user.set_password(&data.master_password_hash, Some(data.key), true, None);
user.password_hint = password_hint;
// Add extra fields if present
@ -353,8 +360,8 @@ async fn post_set_password(data: Json<SetPasswordData>, headers: Headers, conn:
// Check against the password hint setting here so if it fails,
// the user can retry without losing their invitation below.
let password_hint = clean_password_hint(data.master_password_hint.as_ref());
enforce_password_hint_setting(password_hint.as_ref())?;
let password_hint = clean_password_hint(&data.master_password_hint);
enforce_password_hint_setting(&password_hint)?;
set_kdf_data(&mut user, &data.kdf)?;
@ -363,9 +370,7 @@ async fn post_set_password(data: Json<SetPasswordData>, headers: Headers, conn:
Some(data.key),
false,
Some(vec![String::from("revision_date")]), // We need to allow revision-date to use the old security_timestamp
&conn,
)
.await?;
);
user.password_hint = password_hint;
if let Some(keys) = data.keys {
@ -374,13 +379,15 @@ async fn post_set_password(data: Json<SetPasswordData>, headers: Headers, conn:
}
if let Some(identifier) = data.org_identifier {
if identifier != crate::sso::FAKE_SSO_IDENTIFIER && identifier != crate::api::admin::FAKE_ADMIN_UUID {
let Some(org) = Organization::find_by_uuid(&identifier.into(), &conn).await else {
err!("Failed to retrieve the associated organization")
if identifier != crate::sso::FAKE_IDENTIFIER && identifier != crate::api::admin::FAKE_ADMIN_UUID {
let org = match Organization::find_by_uuid(&identifier.into(), &conn).await {
None => err!("Failed to retrieve the associated organization"),
Some(org) => org,
};
let Some(membership) = Membership::find_by_user_and_org(&user.uuid, &org.uuid, &conn).await else {
err!("Failed to retrieve the invitation")
let membership = match Membership::find_by_user_and_org(&user.uuid, &org.uuid, &conn).await {
None => err!("Failed to retrieve the invitation"),
Some(org) => org,
};
accept_org_invite(&user, membership, None, &conn).await?;
@ -515,8 +522,8 @@ async fn post_password(data: Json<ChangePassData>, headers: Headers, conn: DbCon
err!("Invalid password")
}
user.password_hint = clean_password_hint(data.master_password_hint.as_ref());
enforce_password_hint_setting(user.password_hint.as_ref())?;
user.password_hint = clean_password_hint(&data.master_password_hint);
enforce_password_hint_setting(&user.password_hint)?;
log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &conn)
.await;
@ -531,16 +538,14 @@ async fn post_password(data: Json<ChangePassData>, headers: Headers, conn: DbCon
String::from("get_public_keys"),
String::from("get_api_webauthn"),
]),
&conn,
)
.await?;
);
let save_result = user.save(&conn).await;
// Prevent logging out the client where the user requested this endpoint from.
// If you do logout the user it will causes issues at the client side.
// Adding the device uuid will prevent this.
nt.send_logout(&user, Some(&headers.device), &conn).await;
nt.send_logout(&user, Some(headers.device.uuid.clone()), &conn).await;
save_result
}
@ -580,6 +585,7 @@ fn set_kdf_data(user: &mut User, data: &KDFData) -> EmptyResult {
Ok(())
}
#[allow(dead_code)]
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct AuthenticationData {
@ -588,6 +594,7 @@ struct AuthenticationData {
master_password_authentication_hash: String,
}
#[allow(dead_code)]
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct UnlockData {
@ -596,12 +603,11 @@ struct UnlockData {
master_key_wrapped_user_key: String,
}
#[allow(dead_code)]
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct ChangeKdfData {
#[allow(dead_code)]
new_master_password_hash: String,
#[allow(dead_code)]
key: String,
authentication_data: AuthenticationData,
unlock_data: UnlockData,
@ -633,12 +639,10 @@ async fn post_kdf(data: Json<ChangeKdfData>, headers: Headers, conn: DbConn, nt:
Some(data.unlock_data.master_key_wrapped_user_key),
true,
None,
&conn,
)
.await?;
);
let save_result = user.save(&conn).await;
nt.send_logout(&user, Some(&headers.device), &conn).await;
nt.send_logout(&user, Some(headers.device.uuid.clone()), &conn).await;
save_result
}
@ -649,7 +653,6 @@ struct UpdateFolderData {
// There is a bug in 2024.3.x which adds a `null` item.
// To bypass this we allow a Option here, but skip it during the updates
// See: https://github.com/bitwarden/clients/issues/8453
#[serde(default, deserialize_with = "deser_opt_nonempty_str")]
id: Option<FolderId>,
name: String,
}
@ -903,16 +906,14 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, conn: DbConn, nt:
Some(data.account_unlock_data.master_password_unlock_data.master_key_encrypted_user_key),
true,
None,
&conn,
)
.await?;
);
let save_result = user.save(&conn).await;
// Prevent logging out the client where the user requested this endpoint from.
// If you do logout the user it will causes issues at the client side.
// Adding the device uuid will prevent this.
nt.send_logout(&user, Some(&headers.device), &conn).await;
nt.send_logout(&user, Some(headers.device.uuid.clone()), &conn).await;
save_result
}
@ -924,13 +925,12 @@ async fn post_sstamp(data: Json<PasswordOrOtpData>, headers: Headers, conn: DbCo
data.validate(&user, true, &conn).await?;
user.reset_security_stamp(&conn).await?;
Device::delete_all_by_user(&user.uuid, &conn).await?;
user.reset_security_stamp();
let save_result = user.save(&conn).await;
nt.send_logout(&user, None, &conn).await;
Device::delete_all_by_user(&user.uuid, &conn).await?;
save_result
}
@ -1048,7 +1048,7 @@ async fn post_email(data: Json<ChangeEmailData>, headers: Headers, conn: DbConn,
user.email_new = None;
user.email_new_token = None;
user.set_password(&data.new_master_password_hash, Some(data.key), true, None, &conn).await?;
user.set_password(&data.new_master_password_hash, Some(data.key), true, None);
let save_result = user.save(&conn).await;
@ -1199,9 +1199,10 @@ async fn password_hint(data: Json<PasswordHintData>, conn: DbConn) -> EmptyResul
// There is still a timing side channel here in that the code
// paths that send mail take noticeably longer than ones that
// don't. Add a randomized sleep to mitigate this somewhat.
use rand::{rngs::SmallRng, RngExt};
let mut rng: SmallRng = rand::make_rng();
let sleep_ms = rng.random_range(900..=1100) as u64;
use rand::{rngs::SmallRng, Rng, SeedableRng};
let mut rng = SmallRng::from_os_rng();
let delta: i32 = 100;
let sleep_ms = (1_000 + rng.random_range(-delta..=delta)) as u64;
tokio::time::sleep(tokio::time::Duration::from_millis(sleep_ms)).await;
Ok(())
} else {
@ -1260,7 +1261,7 @@ struct SecretVerificationRequest {
pub async fn kdf_upgrade(user: &mut User, pwd_hash: &str, conn: &DbConn) -> ApiResult<()> {
if user.password_iterations < CONFIG.password_iterations() {
user.password_iterations = CONFIG.password_iterations();
user.set_password(pwd_hash, None, false, None, conn).await?;
user.set_password(pwd_hash, None, false, None);
if let Err(e) = user.save(conn).await {
error!("Error updating user: {e:#?}");
@ -1334,11 +1335,6 @@ impl<'r> FromRequest<'r> for KnownDevice {
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let email = if let Some(email_b64) = req.headers().get_one("X-Request-Email") {
// Bitwarden seems to send padded Base64 strings since 2026.2.1
// Since these values are not streamed and Headers are always split by newlines
// we can safely ignore padding here and remove any '=' appended.
let email_b64 = email_b64.trim_end_matches('=');
let Ok(email_bytes) = data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) else {
return Outcome::Error((Status::BadRequest, "X-Request-Email value failed to decode as base64url"));
};
@ -1438,7 +1434,7 @@ async fn put_clear_device_token(device_id: DeviceId, conn: DbConn) -> EmptyResul
if let Some(device) = Device::find_by_uuid(&device_id, &conn).await {
Device::clear_push_token_by_uuid(&device_id, &conn).await?;
unregister_push_device(device.push_uuid.as_ref()).await?;
unregister_push_device(&device.push_uuid).await?;
}
Ok(())
@ -1708,6 +1704,6 @@ pub async fn purge_auth_requests(pool: DbPool) {
if let Ok(conn) = pool.get().await {
AuthRequest::purge_expired_auth_requests(&conn).await;
} else {
error!("Failed to get DB connection while purging auth requests")
error!("Failed to get DB connection while purging trashed ciphers")
}
}

View file

@ -11,17 +11,17 @@ use rocket::{
use serde_json::Value;
use crate::auth::ClientVersion;
use crate::util::{deser_opt_nonempty_str, save_temp_file, NumberOrString};
use crate::util::{save_temp_file, NumberOrString};
use crate::{
api::{self, core::log_event, EmptyResult, JsonResult, Notify, PasswordOrOtpData, UpdateType},
auth::{Headers, OrgIdGuard, OwnerHeaders},
auth::Headers,
config::PathType,
crypto,
db::{
models::{
Archive, Attachment, AttachmentId, Cipher, CipherId, Collection, CollectionCipher, CollectionGroup,
CollectionId, CollectionUser, EventType, Favorite, Folder, FolderCipher, FolderId, Group, Membership,
MembershipType, OrgPolicy, OrgPolicyType, OrganizationId, RepromptType, Send, UserId,
Attachment, AttachmentId, Cipher, CipherId, Collection, CollectionCipher, CollectionGroup, CollectionId,
CollectionUser, EventType, Favorite, Folder, FolderCipher, FolderId, Group, Membership, MembershipType,
OrgPolicy, OrgPolicyType, OrganizationId, RepromptType, Send, UserId,
},
DbConn, DbPool,
},
@ -86,8 +86,7 @@ pub fn routes() -> Vec<Route> {
restore_cipher_put_admin,
restore_cipher_selected,
restore_cipher_selected_admin,
purge_org_vault,
purge_personal_vault,
delete_all,
move_cipher_selected,
move_cipher_selected_put,
put_collections2_update,
@ -96,10 +95,6 @@ pub fn routes() -> Vec<Route> {
post_collections_update,
post_collections_admin,
put_collections_admin,
archive_cipher_put,
archive_cipher_selected,
unarchive_cipher_put,
unarchive_cipher_selected,
]
}
@ -252,7 +247,6 @@ pub struct CipherData {
// Id is optional as it is included only in bulk share
pub id: Option<CipherId>,
// Folder id is not included in import
#[serde(default, deserialize_with = "deser_opt_nonempty_str")]
pub folder_id: Option<FolderId>,
// TODO: Some of these might appear all the time, no need for Option
#[serde(alias = "organizationID")]
@ -297,13 +291,11 @@ pub struct CipherData {
// when using older client versions, or if the operation doesn't involve
// updating an existing cipher.
last_known_revision_date: Option<String>,
archived_date: Option<String>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct PartialCipherData {
#[serde(default, deserialize_with = "deser_opt_nonempty_str")]
folder_id: Option<FolderId>,
favorite: bool,
}
@ -433,7 +425,7 @@ pub async fn update_cipher_from_data(
let transfer_cipher = cipher.organization_uuid.is_none() && data.organization_id.is_some();
if let Some(org_id) = data.organization_id {
match Membership::find_confirmed_by_user_and_org(&headers.user.uuid, &org_id, conn).await {
match Membership::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await {
None => err!("You don't have permission to add item to organization"),
Some(member) => {
if shared_to_collections.is_some()
@ -539,13 +531,6 @@ pub async fn update_cipher_from_data(
cipher.move_to_folder(data.folder_id, &headers.user.uuid, conn).await?;
cipher.set_favorite(data.favorite, &headers.user.uuid, conn).await?;
if let Some(dt_str) = data.archived_date {
match NaiveDateTime::parse_from_str(&dt_str, "%+") {
Ok(dt) => cipher.set_archived_at(dt, &headers.user.uuid, conn).await?,
Err(err) => warn!("Error parsing ArchivedDate '{dt_str}': {err}"),
}
}
if ut != UpdateType::None {
// Only log events for organizational ciphers
if let Some(org_id) = &cipher.organization_uuid {
@ -642,7 +627,7 @@ async fn post_ciphers_import(data: Json<ImportData>, headers: Headers, conn: DbC
let mut user = headers.user;
user.update_revision(&conn).await?;
nt.send_user_update(UpdateType::SyncVault, &user, headers.device.push_uuid.as_ref(), &conn).await;
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &conn).await;
Ok(())
}
@ -730,13 +715,9 @@ async fn put_cipher_partial(
let data: PartialCipherData = data.into_inner();
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else {
err!("Cipher does not exist")
err!("Cipher doesn't exist")
};
if !cipher.is_accessible_to_user(&headers.user.uuid, &conn).await {
err!("Cipher does not exist", "Cipher is not accessible for the current user")
}
if let Some(ref folder_id) = data.folder_id {
if Folder::find_by_uuid_and_user(folder_id, &headers.user.uuid, &conn).await.is_none() {
err!("Invalid folder", "Folder does not exist or belongs to another user");
@ -814,16 +795,12 @@ async fn post_collections_update(
err!("Collection cannot be changed")
}
let Some(ref org_uuid) = cipher.organization_uuid else {
err!("Cipher is not owned by an organization")
};
let posted_collections = HashSet::<CollectionId>::from_iter(data.collection_ids);
let current_collections =
HashSet::<CollectionId>::from_iter(cipher.get_collections(headers.user.uuid.clone(), &conn).await);
for collection in posted_collections.symmetric_difference(&current_collections) {
match Collection::find_by_uuid_and_org(collection, org_uuid, &conn).await {
match Collection::find_by_uuid_and_org(collection, cipher.organization_uuid.as_ref().unwrap(), &conn).await {
None => err!("Invalid collection ID provided"),
Some(collection) => {
if collection.is_writable_by_user(&headers.user.uuid, &conn).await {
@ -854,7 +831,7 @@ async fn post_collections_update(
log_event(
EventType::CipherUpdatedCollections as i32,
&cipher.uuid,
org_uuid,
&cipher.organization_uuid.clone().unwrap(),
&headers.user.uuid,
headers.device.atype,
&headers.ip.ip,
@ -894,16 +871,12 @@ async fn post_collections_admin(
err!("Collection cannot be changed")
}
let Some(ref org_uuid) = cipher.organization_uuid else {
err!("Cipher is not owned by an organization")
};
let posted_collections = HashSet::<CollectionId>::from_iter(data.collection_ids);
let current_collections =
HashSet::<CollectionId>::from_iter(cipher.get_admin_collections(headers.user.uuid.clone(), &conn).await);
for collection in posted_collections.symmetric_difference(&current_collections) {
match Collection::find_by_uuid_and_org(collection, org_uuid, &conn).await {
match Collection::find_by_uuid_and_org(collection, cipher.organization_uuid.as_ref().unwrap(), &conn).await {
None => err!("Invalid collection ID provided"),
Some(collection) => {
if collection.is_writable_by_user(&headers.user.uuid, &conn).await {
@ -934,7 +907,7 @@ async fn post_collections_admin(
log_event(
EventType::CipherUpdatedCollections as i32,
&cipher.uuid,
org_uuid,
&cipher.organization_uuid.unwrap(),
&headers.user.uuid,
headers.device.atype,
&headers.ip.ip,
@ -1025,7 +998,7 @@ async fn put_cipher_share_selected(
}
// Multi share actions do not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), &conn).await;
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &conn).await;
Ok(())
}
@ -1591,7 +1564,6 @@ async fn restore_cipher_selected(
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct MoveCipherData {
#[serde(default, deserialize_with = "deser_opt_nonempty_str")]
folder_id: Option<FolderId>,
ids: Vec<CipherId>,
}
@ -1638,7 +1610,7 @@ async fn move_cipher_selected(
.await;
} else {
// Multi move actions do not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), &conn).await;
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &conn).await;
}
if cipher_count != accessible_ciphers_count {
@ -1666,51 +1638,9 @@ struct OrganizationIdData {
org_id: OrganizationId,
}
// Use the OrgIdGuard here, to ensure there an organization id present.
// If there is no organization id present, it should be forwarded to purge_personal_vault.
// This guard needs to be the first argument, else OwnerHeaders will be triggered which will logout the user.
#[post("/ciphers/purge?<organization..>", data = "<data>")]
async fn purge_org_vault(
_org_id_guard: OrgIdGuard,
organization: OrganizationIdData,
data: Json<PasswordOrOtpData>,
headers: OwnerHeaders,
conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
if organization.org_id != headers.org_id {
err!("Organization not found", "Organization id's do not match");
}
let data: PasswordOrOtpData = data.into_inner();
let user = headers.user;
data.validate(&user, true, &conn).await?;
match Membership::find_confirmed_by_user_and_org(&user.uuid, &organization.org_id, &conn).await {
Some(member) if member.atype == MembershipType::Owner => {
Cipher::delete_all_by_organization(&organization.org_id, &conn).await?;
nt.send_user_update(UpdateType::SyncVault, &user, headers.device.push_uuid.as_ref(), &conn).await;
log_event(
EventType::OrganizationPurgedVault as i32,
&organization.org_id,
&organization.org_id,
&user.uuid,
headers.device.atype,
&headers.ip.ip,
&conn,
)
.await;
Ok(())
}
_ => err!("You don't have permission to purge the organization vault"),
}
}
#[post("/ciphers/purge", data = "<data>")]
async fn purge_personal_vault(
async fn delete_all(
organization: Option<OrganizationIdData>,
data: Json<PasswordOrOtpData>,
headers: Headers,
conn: DbConn,
@ -1721,48 +1651,52 @@ async fn purge_personal_vault(
data.validate(&user, true, &conn).await?;
match organization {
Some(org_data) => {
// Organization ID in query params, purging organization vault
match Membership::find_by_user_and_org(&user.uuid, &org_data.org_id, &conn).await {
None => err!("You don't have permission to purge the organization vault"),
Some(member) => {
if member.atype == MembershipType::Owner {
Cipher::delete_all_by_organization(&org_data.org_id, &conn).await?;
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &conn).await;
log_event(
EventType::OrganizationPurgedVault as i32,
&org_data.org_id,
&org_data.org_id,
&user.uuid,
headers.device.atype,
&headers.ip.ip,
&conn,
)
.await;
Ok(())
} else {
err!("You don't have permission to purge the organization vault");
}
}
}
}
None => {
// No organization ID in query params, purging user vault
// Delete ciphers and their attachments
for cipher in Cipher::find_owned_by_user(&user.uuid, &conn).await {
cipher.delete(&conn).await?;
}
// Delete folders
for f in Folder::find_by_user(&user.uuid, &conn).await {
f.delete(&conn).await?;
}
user.update_revision(&conn).await?;
nt.send_user_update(UpdateType::SyncVault, &user, headers.device.push_uuid.as_ref(), &conn).await;
nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &conn).await;
Ok(())
}
#[put("/ciphers/<cipher_id>/archive")]
async fn archive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
archive_cipher(&cipher_id, &headers, false, &conn, &nt).await
}
#[put("/ciphers/archive", data = "<data>")]
async fn archive_cipher_selected(
data: Json<CipherIdsData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
archive_multiple_ciphers(data, &headers, &conn, &nt).await
}
#[put("/ciphers/<cipher_id>/unarchive")]
async fn unarchive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
unarchive_cipher(&cipher_id, &headers, false, &conn, &nt).await
}
#[put("/ciphers/unarchive", data = "<data>")]
async fn unarchive_cipher_selected(
data: Json<CipherIdsData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
unarchive_multiple_ciphers(data, &headers, &conn, &nt).await
}
#[derive(PartialEq)]
@ -1855,7 +1789,7 @@ async fn _delete_multiple_ciphers(
}
// Multi delete actions do not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), &conn).await;
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &conn).await;
Ok(())
}
@ -1923,7 +1857,7 @@ async fn _restore_multiple_ciphers(
}
// Multi move actions do not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), conn).await;
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, conn).await;
Ok(Json(json!({
"data": ciphers,
@ -1983,122 +1917,6 @@ async fn _delete_cipher_attachment_by_id(
Ok(Json(json!({"cipher":cipher_json})))
}
async fn archive_cipher(
cipher_id: &CipherId,
headers: &Headers,
multi_archive: bool,
conn: &DbConn,
nt: &Notify<'_>,
) -> JsonResult {
let Some(cipher) = Cipher::find_by_uuid(cipher_id, conn).await else {
err!("Cipher doesn't exist")
};
if !cipher.is_accessible_to_user(&headers.user.uuid, conn).await {
err!("Cipher is not accessible for the current user")
}
cipher.set_archived_at(Utc::now().naive_utc(), &headers.user.uuid, conn).await?;
if !multi_archive {
nt.send_cipher_update(
UpdateType::SyncCipherUpdate,
&cipher,
&cipher.update_users_revision(conn).await,
&headers.device,
None,
conn,
)
.await;
}
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
}
async fn unarchive_cipher(
cipher_id: &CipherId,
headers: &Headers,
multi_unarchive: bool,
conn: &DbConn,
nt: &Notify<'_>,
) -> JsonResult {
let Some(cipher) = Cipher::find_by_uuid(cipher_id, conn).await else {
err!("Cipher doesn't exist")
};
if !cipher.is_accessible_to_user(&headers.user.uuid, conn).await {
err!("Cipher is not accessible for the current user")
}
cipher.unarchive(&headers.user.uuid, conn).await?;
if !multi_unarchive {
nt.send_cipher_update(
UpdateType::SyncCipherUpdate,
&cipher,
&cipher.update_users_revision(conn).await,
&headers.device,
None,
conn,
)
.await;
}
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
}
async fn archive_multiple_ciphers(
data: Json<CipherIdsData>,
headers: &Headers,
conn: &DbConn,
nt: &Notify<'_>,
) -> JsonResult {
let data = data.into_inner();
let mut ciphers: Vec<Value> = Vec::new();
for cipher_id in data.ids {
match archive_cipher(&cipher_id, headers, true, conn, nt).await {
Ok(json) => ciphers.push(json.into_inner()),
err => return err,
}
}
// Multi archive does not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), conn).await;
Ok(Json(json!({
"data": ciphers,
"object": "list",
"continuationToken": null
})))
}
async fn unarchive_multiple_ciphers(
data: Json<CipherIdsData>,
headers: &Headers,
conn: &DbConn,
nt: &Notify<'_>,
) -> JsonResult {
let data = data.into_inner();
let mut ciphers: Vec<Value> = Vec::new();
for cipher_id in data.ids {
match unarchive_cipher(&cipher_id, headers, true, conn, nt).await {
Ok(json) => ciphers.push(json.into_inner()),
err => return err,
}
}
// Multi unarchive does not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, headers.device.push_uuid.as_ref(), conn).await;
Ok(Json(json!({
"data": ciphers,
"object": "list",
"continuationToken": null
})))
}
/// This will hold all the necessary data to improve a full sync of all the ciphers
/// It can be used during the `Cipher::to_json()` call.
/// It will prevent the so called N+1 SQL issue by running just a few queries which will hold all the data needed.
@ -2108,7 +1926,6 @@ pub struct CipherSyncData {
pub cipher_folders: HashMap<CipherId, FolderId>,
pub cipher_favorites: HashSet<CipherId>,
pub cipher_collections: HashMap<CipherId, Vec<CollectionId>>,
pub cipher_archives: HashMap<CipherId, NaiveDateTime>,
pub members: HashMap<OrganizationId, Membership>,
pub user_collections: HashMap<CollectionId, CollectionUser>,
pub user_collections_groups: HashMap<CollectionId, CollectionGroup>,
@ -2125,25 +1942,20 @@ impl CipherSyncData {
pub async fn new(user_id: &UserId, sync_type: CipherSyncType, conn: &DbConn) -> Self {
let cipher_folders: HashMap<CipherId, FolderId>;
let cipher_favorites: HashSet<CipherId>;
let cipher_archives: HashMap<CipherId, NaiveDateTime>;
match sync_type {
// User Sync supports Folders, Favorites, and Archives
// User Sync supports Folders and Favorites
CipherSyncType::User => {
// Generate a HashMap with the Cipher UUID as key and the Folder UUID as value
cipher_folders = FolderCipher::find_by_user(user_id, conn).await.into_iter().collect();
// Generate a HashSet of all the Cipher UUID's which are marked as favorite
cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_id, conn).await.into_iter().collect();
// Generate a HashMap with the Cipher UUID as key and the archived date time as value
cipher_archives = Archive::find_by_user(user_id, conn).await.into_iter().collect();
}
// Organization Sync does not support Folders, Favorites, or Archives.
// Organization Sync does not support Folders and Favorites.
// If these are set, it will cause issues in the web-vault.
CipherSyncType::Organization => {
cipher_folders = HashMap::with_capacity(0);
cipher_favorites = HashSet::with_capacity(0);
cipher_archives = HashMap::with_capacity(0);
}
}
@ -2164,11 +1976,8 @@ impl CipherSyncData {
}
// Generate a HashMap with the Organization UUID as key and the Membership record
let members: HashMap<OrganizationId, Membership> = Membership::find_confirmed_by_user(user_id, conn)
.await
.into_iter()
.map(|m| (m.org_uuid.clone(), m))
.collect();
let members: HashMap<OrganizationId, Membership> =
Membership::find_by_user(user_id, conn).await.into_iter().map(|m| (m.org_uuid.clone(), m)).collect();
// Generate a HashMap with the User_Collections UUID as key and the CollectionUser record
let user_collections: HashMap<CollectionId, CollectionUser> = CollectionUser::find_by_user(user_id, conn)
@ -2206,7 +2015,6 @@ impl CipherSyncData {
};
Self {
cipher_archives,
cipher_attachments,
cipher_folders,
cipher_favorites,

View file

@ -653,7 +653,7 @@ async fn password_emergency_access(
};
// change grantor_user password
grantor_user.set_password(new_master_password_hash, Some(data.key), true, None, &conn).await?;
grantor_user.set_password(new_master_password_hash, Some(data.key), true, None);
grantor_user.save(&conn).await?;
// Disable TwoFactor providers since they will otherwise block logins

View file

@ -240,7 +240,7 @@ async fn _log_user_event(
ip: &IpAddr,
conn: &DbConn,
) {
let memberships = Membership::find_confirmed_by_user(user_id, conn).await;
let memberships = Membership::find_by_user(user_id, conn).await;
let mut events: Vec<Event> = Vec::with_capacity(memberships.len() + 1); // We need an event per org and one without an org
// Upstream saves the event also without any org_id.

View file

@ -8,7 +8,6 @@ use crate::{
models::{Folder, FolderId},
DbConn,
},
util::deser_opt_nonempty_str,
};
pub fn routes() -> Vec<rocket::Route> {
@ -39,7 +38,6 @@ async fn get_folder(folder_id: FolderId, headers: Headers, conn: DbConn) -> Json
#[serde(rename_all = "camelCase")]
pub struct FolderData {
pub name: String,
#[serde(default, deserialize_with = "deser_opt_nonempty_str")]
pub id: Option<FolderId>,
}

View file

@ -59,8 +59,7 @@ use crate::{
error::Error,
http_client::make_http_request,
mail,
util::{parse_experimental_client_feature_flags, FeatureFlagFilter},
CONFIG,
util::parse_experimental_client_feature_flags,
};
#[derive(Debug, Serialize, Deserialize)]
@ -124,7 +123,7 @@ async fn post_eq_domains(data: Json<EquivDomainData>, headers: Headers, conn: Db
user.save(&conn).await?;
nt.send_user_update(UpdateType::SyncSettings, &user, headers.device.push_uuid.as_ref(), &conn).await;
nt.send_user_update(UpdateType::SyncSettings, &user, &headers.device.push_uuid, &conn).await;
Ok(Json(json!({})))
}
@ -137,7 +136,7 @@ async fn put_eq_domains(data: Json<EquivDomainData>, headers: Headers, conn: DbC
#[get("/hibp/breach?<username>")]
async fn hibp_breach(username: &str, _headers: Headers) -> JsonResult {
let username: String = url::form_urlencoded::byte_serialize(username.as_bytes()).collect();
if let Some(api_key) = CONFIG.hibp_api_key() {
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
let url = format!(
"https://haveibeenpwned.com/api/v3/breachedaccount/{username}?truncateResponse=false&includeUnverified=false"
);
@ -198,17 +197,19 @@ fn get_api_webauthn(_headers: Headers) -> Json<Value> {
#[get("/config")]
fn config() -> Json<Value> {
let domain = CONFIG.domain();
let domain = crate::CONFIG.domain();
// Official available feature flags can be found here:
// Server (v2026.2.1): https://github.com/bitwarden/server/blob/0e42725d0837bd1c0dabd864ff621a579959744b/src/Core/Constants.cs#L135
// Client (v2026.2.1): https://github.com/bitwarden/clients/blob/f96380c3138291a028bdd2c7a5fee540d5c98ba5/libs/common/src/enums/feature-flag.enum.ts#L12
// Android (v2026.2.1): https://github.com/bitwarden/android/blob/6902c19c0093fa476bbf74ccaa70c9f14afbb82f/core/src/main/kotlin/com/bitwarden/core/data/manager/model/FlagKey.kt#L31
// iOS (v2026.2.1): https://github.com/bitwarden/ios/blob/cdd9ba1770ca2ffc098d02d12cc3208e3a830454/BitwardenShared/Core/Platform/Models/Enum/FeatureFlag.swift#L7
let mut feature_states = parse_experimental_client_feature_flags(
&CONFIG.experimental_client_feature_flags(),
FeatureFlagFilter::ValidOnly,
);
feature_states.insert("pm-19148-innovation-archive".to_string(), true);
// Server (v2025.6.2): https://github.com/bitwarden/server/blob/d094be3267f2030bd0dc62106bc6871cf82682f5/src/Core/Constants.cs#L103
// Client (web-v2025.6.1): https://github.com/bitwarden/clients/blob/747c2fd6a1c348a57a76e4a7de8128466ffd3c01/libs/common/src/enums/feature-flag.enum.ts#L12
// Android (v2025.6.0): https://github.com/bitwarden/android/blob/b5b022caaad33390c31b3021b2c1205925b0e1a2/app/src/main/kotlin/com/x8bit/bitwarden/data/platform/manager/model/FlagKey.kt#L22
// iOS (v2025.6.0): https://github.com/bitwarden/ios/blob/ff06d9c6cc8da89f78f37f376495800201d7261a/BitwardenShared/Core/Platform/Models/Enum/FeatureFlag.swift#L7
let mut feature_states =
parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
feature_states.insert("duo-redirect".to_string(), true);
feature_states.insert("email-verification".to_string(), true);
feature_states.insert("unauth-ui-refresh".to_string(), true);
feature_states.insert("enable-pm-flight-recorder".to_string(), true);
feature_states.insert("mobile-error-reporting".to_string(), true);
Json(json!({
// Note: The clients use this version to handle backwards compatibility concerns
@ -224,7 +225,7 @@ fn config() -> Json<Value> {
"url": "https://github.com/dani-garcia/vaultwarden"
},
"settings": {
"disableUserRegistration": CONFIG.is_signup_disabled()
"disableUserRegistration": crate::CONFIG.is_signup_disabled()
},
"environment": {
"vault": domain,
@ -277,7 +278,7 @@ async fn accept_org_invite(
member.save(conn).await?;
if CONFIG.mail_enabled() {
if crate::CONFIG.mail_enabled() {
let org = match Organization::find_by_uuid(&member.org_uuid, conn).await {
Some(org) => org,
None => err!("Organization not found."),

File diff suppressed because it is too large Load diff

View file

@ -156,7 +156,7 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, conn: DbConn
}
};
GroupUser::delete_all_by_group(&group_uuid, &org_id, &conn).await?;
GroupUser::delete_all_by_group(&group_uuid, &conn).await?;
for ext_id in &group_data.member_external_ids {
if let Some(member) = Membership::find_by_external_id_and_org(ext_id, &org_id, &conn).await {

View file

@ -574,7 +574,7 @@ async fn download_url(host: &Host, send_id: &SendId, file_id: &SendFileId) -> Re
Ok(format!("{}/api/sends/{send_id}/{file_id}?t={token}", &host.host))
} else {
Ok(operator.presign_read(&format!("{send_id}/{file_id}"), Duration::from_mins(5)).await?.uri().to_string())
Ok(operator.presign_read(&format!("{send_id}/{file_id}"), Duration::from_secs(5 * 60)).await?.uri().to_string())
}
}

View file

@ -7,10 +7,10 @@ use crate::{
core::{log_user_event, two_factor::_generate_recover_code},
EmptyResult, JsonResult, PasswordOrOtpData,
},
auth::{ClientHeaders, Headers},
auth::Headers,
crypto,
db::{
models::{AuthRequest, AuthRequestId, DeviceId, EventType, TwoFactor, TwoFactorType, User, UserId},
models::{DeviceId, EventType, TwoFactor, TwoFactorType, User, UserId},
DbConn,
},
error::{Error, MapResult},
@ -30,64 +30,36 @@ struct SendEmailLoginData {
email: Option<String>,
#[serde(alias = "MasterPasswordHash")]
master_password_hash: Option<String>,
auth_request_id: Option<AuthRequestId>,
auth_request_access_code: Option<String>,
}
/// User is trying to login and wants to use email 2FA.
/// Does not require Bearer token
#[post("/two-factor/send-email-login", data = "<data>")] // JsonResult
async fn send_email_login(data: Json<SendEmailLoginData>, client_headers: ClientHeaders, conn: DbConn) -> EmptyResult {
async fn send_email_login(data: Json<SendEmailLoginData>, conn: DbConn) -> EmptyResult {
let data: SendEmailLoginData = data.into_inner();
if !CONFIG._enable_email_2fa() {
err!("Email 2FA is disabled")
}
// Ratelimit the login
crate::ratelimit::check_limit_login(&client_headers.ip.ip)?;
// Get the user
let email = match &data.email {
Some(email) if !email.is_empty() => Some(email),
_ => None,
};
let master_password_hash = match &data.master_password_hash {
Some(password_hash) if !password_hash.is_empty() => Some(password_hash),
_ => None,
};
let auth_request_id = match &data.auth_request_id {
Some(auth_request_id) if !auth_request_id.is_empty() => Some(auth_request_id),
_ => None,
let user = if let Some(email) = email {
let Some(master_password_hash) = &data.master_password_hash else {
err!("No password hash has been submitted.")
};
let user = if let Some(email) = email {
let Some(user) = User::find_by_mail(email, &conn).await else {
err!("Username or password is incorrect. Try again.")
};
if let Some(master_password_hash) = master_password_hash {
// Check password
if !user.check_valid_password(master_password_hash) {
err!("Username or password is incorrect. Try again.")
}
} else if let Some(auth_request_id) = auth_request_id {
let Some(auth_request) = AuthRequest::find_by_uuid(auth_request_id, &conn).await else {
err!("AuthRequest doesn't exist", "User not found")
};
let Some(code) = &data.auth_request_access_code else {
err!("no auth request access code")
};
if auth_request.device_type != client_headers.device_type
|| auth_request.request_ip != client_headers.ip.ip.to_string()
|| !auth_request.check_access_code(code)
{
err!("AuthRequest doesn't exist", "Invalid device, IP or code")
}
} else {
err!("No password hash has been submitted.")
}
user
} else {

View file

@ -1,9 +1,7 @@
use chrono::{TimeDelta, Utc};
use data_encoding::BASE32;
use num_traits::FromPrimitive;
use rocket::serde::json::Json;
use rocket::Route;
use serde::Deserialize;
use serde_json::Value;
use crate::{
@ -11,12 +9,12 @@ use crate::{
core::{log_event, log_user_event},
EmptyResult, JsonResult, PasswordOrOtpData,
},
auth::Headers,
auth::{ClientHeaders, Headers},
crypto,
db::{
models::{
DeviceType, EventType, Membership, MembershipType, OrgPolicyType, Organization, OrganizationId, TwoFactor,
TwoFactorIncomplete, TwoFactorType, User, UserId,
TwoFactorIncomplete, User, UserId,
},
DbConn, DbPool,
},
@ -33,47 +31,11 @@ pub mod protected_actions;
pub mod webauthn;
pub mod yubikey;
fn has_global_duo_credentials() -> bool {
CONFIG._enable_duo() && CONFIG.duo_host().is_some() && CONFIG.duo_ikey().is_some() && CONFIG.duo_skey().is_some()
}
pub fn is_twofactor_provider_usable(provider_type: TwoFactorType, provider_data: Option<&str>) -> bool {
#[derive(Deserialize)]
struct DuoProviderData {
host: String,
ik: String,
sk: String,
}
match provider_type {
TwoFactorType::Authenticator => true,
TwoFactorType::Email => CONFIG._enable_email_2fa(),
TwoFactorType::Duo | TwoFactorType::OrganizationDuo => {
provider_data
.and_then(|raw| serde_json::from_str::<DuoProviderData>(raw).ok())
.is_some_and(|duo| !duo.host.is_empty() && !duo.ik.is_empty() && !duo.sk.is_empty())
|| has_global_duo_credentials()
}
TwoFactorType::YubiKey => {
CONFIG._enable_yubico() && CONFIG.yubico_client_id().is_some() && CONFIG.yubico_secret_key().is_some()
}
TwoFactorType::Webauthn => CONFIG.is_webauthn_2fa_supported(),
TwoFactorType::Remember => !CONFIG.disable_2fa_remember(),
TwoFactorType::RecoveryCode => true,
TwoFactorType::U2f
| TwoFactorType::U2fRegisterChallenge
| TwoFactorType::U2fLoginChallenge
| TwoFactorType::EmailVerificationChallenge
| TwoFactorType::WebauthnRegisterChallenge
| TwoFactorType::WebauthnLoginChallenge
| TwoFactorType::ProtectedActions => false,
}
}
pub fn routes() -> Vec<Route> {
let mut routes = routes![
get_twofactor,
get_recover,
recover,
disable_twofactor,
disable_twofactor_put,
get_device_verification_settings,
@ -92,13 +54,7 @@ pub fn routes() -> Vec<Route> {
#[get("/two-factor")]
async fn get_twofactor(headers: Headers, conn: DbConn) -> Json<Value> {
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn).await;
let twofactors_json: Vec<Value> = twofactors
.iter()
.filter_map(|tf| {
let provider_type = TwoFactorType::from_i32(tf.atype)?;
is_twofactor_provider_usable(provider_type, Some(&tf.data)).then(|| TwoFactor::to_json_provider(tf))
})
.collect();
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
Json(json!({
"data": twofactors_json,
@ -120,6 +76,54 @@ async fn get_recover(data: Json<PasswordOrOtpData>, headers: Headers, conn: DbCo
})))
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct RecoverTwoFactor {
master_password_hash: String,
email: String,
recovery_code: String,
}
#[post("/two-factor/recover", data = "<data>")]
async fn recover(data: Json<RecoverTwoFactor>, client_headers: ClientHeaders, conn: DbConn) -> JsonResult {
let data: RecoverTwoFactor = data.into_inner();
use crate::db::models::User;
// Get the user
let Some(mut user) = User::find_by_mail(&data.email, &conn).await else {
err!("Username or password is incorrect. Try again.")
};
// Check password
if !user.check_valid_password(&data.master_password_hash) {
err!("Username or password is incorrect. Try again.")
}
// Check if recovery code is correct
if !user.check_valid_recovery_code(&data.recovery_code) {
err!("Recovery code is incorrect. Try again.")
}
// Remove all twofactors from the user
TwoFactor::delete_all_by_user(&user.uuid, &conn).await?;
enforce_2fa_policy(&user, &user.uuid, client_headers.device_type, &client_headers.ip.ip, &conn).await?;
log_user_event(
EventType::UserRecovered2fa as i32,
&user.uuid,
client_headers.device_type,
&client_headers.ip.ip,
&conn,
)
.await;
// Remove the recovery code, not needed without twofactors
user.totp_recover = None;
user.save(&conn).await?;
Ok(Json(Value::Object(serde_json::Map::new())))
}
async fn _generate_recover_code(user: &mut User, conn: &DbConn) {
if user.totp_recover.is_none() {
let totp_recover = crypto::encode_random_bytes::<20>(&BASE32);

View file

@ -38,7 +38,7 @@ static WEBAUTHN: LazyLock<Webauthn> = LazyLock::new(|| {
let webauthn = WebauthnBuilder::new(&rp_id, &rp_origin)
.expect("Creating WebauthnBuilder failed")
.rp_name(&domain)
.timeout(Duration::from_mins(1));
.timeout(Duration::from_millis(60000));
webauthn.build().expect("Building Webauthn failed")
});
@ -108,8 +108,8 @@ impl WebauthnRegistration {
#[post("/two-factor/get-webauthn", data = "<data>")]
async fn get_webauthn(data: Json<PasswordOrOtpData>, headers: Headers, conn: DbConn) -> JsonResult {
if !CONFIG.is_webauthn_2fa_supported() {
err!("Configured `DOMAIN` is not compatible with Webauthn")
if !CONFIG.domain_set() {
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
}
let data: PasswordOrOtpData = data.into_inner();
@ -144,7 +144,7 @@ async fn generate_webauthn_challenge(data: Json<PasswordOrOtpData>, headers: Hea
let (mut challenge, state) = WEBAUTHN.start_passkey_registration(
Uuid::from_str(&user.uuid).expect("Failed to parse UUID"), // Should never fail
&user.email,
user.display_name(),
&user.name,
Some(registrations),
)?;
@ -438,7 +438,7 @@ pub async fn validate_webauthn_login(user_id: &UserId, response: &str, conn: &Db
// We need to check for and update the backup_eligible flag when needed.
// Vaultwarden did not have knowledge of this flag prior to migrating to webauthn-rs v0.5.x
// Because of this we check the flag at runtime and update the registrations and state when needed
let backup_flags_updated = check_and_update_backup_eligible(&rsp, &mut registrations, &mut state)?;
check_and_update_backup_eligible(user_id, &rsp, &mut registrations, &mut state, conn).await?;
let authentication_result = WEBAUTHN.finish_passkey_authentication(&rsp, &state)?;
@ -446,8 +446,7 @@ pub async fn validate_webauthn_login(user_id: &UserId, response: &str, conn: &Db
if ct_eq(reg.credential.cred_id(), authentication_result.cred_id()) {
// If the cred id matches and the credential is updated, Some(true) is returned
// In those cases, update the record, else leave it alone
let credential_updated = reg.credential.update_credential(&authentication_result) == Some(true);
if credential_updated || backup_flags_updated {
if reg.credential.update_credential(&authentication_result) == Some(true) {
TwoFactor::new(user_id.clone(), TwoFactorType::Webauthn, serde_json::to_string(&registrations)?)
.save(conn)
.await?;
@ -464,11 +463,13 @@ pub async fn validate_webauthn_login(user_id: &UserId, response: &str, conn: &Db
)
}
fn check_and_update_backup_eligible(
async fn check_and_update_backup_eligible(
user_id: &UserId,
rsp: &PublicKeyCredential,
registrations: &mut Vec<WebauthnRegistration>,
state: &mut PasskeyAuthentication,
) -> Result<bool, Error> {
conn: &DbConn,
) -> EmptyResult {
// The feature flags from the response
// For details see: https://www.w3.org/TR/webauthn-3/#sctn-authenticator-data
const FLAG_BACKUP_ELIGIBLE: u8 = 0b0000_1000;
@ -485,7 +486,16 @@ fn check_and_update_backup_eligible(
let rsp_id = rsp.raw_id.as_slice();
for reg in &mut *registrations {
if ct_eq(reg.credential.cred_id().as_slice(), rsp_id) {
// Try to update the key, and if needed also update the database, before the actual state check is done
if reg.set_backup_eligible(backup_eligible, backup_state) {
TwoFactor::new(
user_id.clone(),
TwoFactorType::Webauthn,
serde_json::to_string(&registrations)?,
)
.save(conn)
.await?;
// We also need to adjust the current state which holds the challenge used to start the authentication verification
// Because Vaultwarden supports multiple keys, we need to loop through the deserialized state and check which key to update
let mut raw_state = serde_json::to_value(&state)?;
@ -507,12 +517,11 @@ fn check_and_update_backup_eligible(
}
*state = serde_json::from_value(raw_state)?;
return Ok(true);
}
break;
}
}
}
}
Ok(false)
Ok(())
}

View file

@ -19,7 +19,7 @@ use svg_hush::{data_url_filter, Filter};
use crate::{
config::PathType,
error::Error,
http_client::{get_reqwest_client_builder, get_valid_host, should_block_host, CustomHttpClientError},
http_client::{get_reqwest_client_builder, should_block_address, CustomHttpClientError},
util::Cached,
CONFIG,
};
@ -81,19 +81,19 @@ static ICON_SIZE_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"(?x)(\d+
// The function name `icon_external` is checked in the `on_response` function in `AppHeaders`
// It is used to prevent sending a specific header which breaks icon downloads.
// If this function needs to be renamed, also adjust the code in `util.rs`
#[get("/<host>/icon.png")]
fn icon_external(host: &str) -> Cached<Option<Redirect>> {
let Ok(host) = get_valid_host(host) else {
warn!("Invalid host: {host}");
return Cached::ttl(None, CONFIG.icon_cache_negttl(), true);
};
if should_block_host(&host).is_err() {
warn!("Blocked address: {host}");
#[get("/<domain>/icon.png")]
fn icon_external(domain: &str) -> Cached<Option<Redirect>> {
if !is_valid_domain(domain) {
warn!("Invalid domain: {domain}");
return Cached::ttl(None, CONFIG.icon_cache_negttl(), true);
}
let url = CONFIG._icon_service_url().replace("{}", &host.to_string());
if should_block_address(domain) {
warn!("Blocked address: {domain}");
return Cached::ttl(None, CONFIG.icon_cache_negttl(), true);
}
let url = CONFIG._icon_service_url().replace("{}", domain);
let redir = match CONFIG.icon_redirect_code() {
301 => Some(Redirect::moved(url)), // legacy permanent redirect
302 => Some(Redirect::found(url)), // legacy temporary redirect
@ -107,21 +107,12 @@ fn icon_external(host: &str) -> Cached<Option<Redirect>> {
Cached::ttl(redir, CONFIG.icon_cache_ttl(), true)
}
#[get("/<host>/icon.png")]
async fn icon_internal(host: &str) -> Cached<(ContentType, Vec<u8>)> {
#[get("/<domain>/icon.png")]
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
let Ok(host) = get_valid_host(host) else {
warn!("Invalid host: {host}");
return Cached::ttl(
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
CONFIG.icon_cache_negttl(),
true,
);
};
if should_block_host(&host).is_err() {
warn!("Blocked address: {host}");
if !is_valid_domain(domain) {
warn!("Invalid domain: {domain}");
return Cached::ttl(
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
CONFIG.icon_cache_negttl(),
@ -129,7 +120,16 @@ async fn icon_internal(host: &str) -> Cached<(ContentType, Vec<u8>)> {
);
}
match get_icon(&host.to_string()).await {
if should_block_address(domain) {
warn!("Blocked address: {domain}");
return Cached::ttl(
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
CONFIG.icon_cache_negttl(),
true,
);
}
match get_icon(domain).await {
Some((icon, icon_type)) => {
Cached::ttl((ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl(), true)
}
@ -137,6 +137,42 @@ async fn icon_internal(host: &str) -> Cached<(ContentType, Vec<u8>)> {
}
}
/// Returns if the domain provided is valid or not.
///
/// This does some manual checks and makes use of Url to do some basic checking.
/// domains can't be larger then 63 characters (not counting multiple subdomains) according to the RFC's, but we limit the total size to 255.
fn is_valid_domain(domain: &str) -> bool {
const ALLOWED_CHARS: &str = "-.";
// If parsing the domain fails using Url, it will not work with reqwest.
if let Err(parse_error) = url::Url::parse(format!("https://{domain}").as_str()) {
debug!("Domain parse error: '{domain}' - {parse_error:?}");
return false;
} else if domain.is_empty()
|| domain.contains("..")
|| domain.starts_with('.')
|| domain.starts_with('-')
|| domain.ends_with('-')
{
debug!(
"Domain validation error: '{domain}' is either empty, contains '..', starts with an '.', starts or ends with a '-'"
);
return false;
} else if domain.len() > 255 {
debug!("Domain validation error: '{domain}' exceeds 255 characters");
return false;
}
for c in domain.chars() {
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
debug!("Domain validation error: '{domain}' contains an invalid character '{c}'");
return false;
}
}
true
}
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
let path = format!("{domain}.png");
@ -331,7 +367,7 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
tld = domain_parts.next_back().unwrap(),
base = domain_parts.next_back().unwrap()
);
if get_valid_host(&base_domain).is_ok() {
if is_valid_domain(&base_domain) {
let sslbase = format!("https://{base_domain}");
let httpbase = format!("http://{base_domain}");
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
@ -342,7 +378,7 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
let www_domain = format!("www.{domain}");
if get_valid_host(&www_domain).is_ok() {
if is_valid_domain(&www_domain) {
let sslwww = format!("https://{www_domain}");
let httpwww = format!("http://{www_domain}");
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
@ -477,11 +513,13 @@ fn parse_sizes(sizes: &str) -> (u16, u16) {
if !sizes.is_empty() {
match ICON_SIZE_REGEX.captures(sizes.trim()) {
Some(dimensions) if dimensions.len() >= 3 => {
None => {}
Some(dimensions) => {
if dimensions.len() >= 3 {
width = dimensions[1].parse::<u16>().unwrap_or_default();
height = dimensions[2].parse::<u16>().unwrap_or_default();
}
_ => {}
}
}
}
@ -496,8 +534,7 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
use data_url::DataUrl;
let mut icons = icon_result.iconlist.iter().take(5).peekable();
while let Some(icon) = icons.next() {
for icon in icon_result.iconlist.iter().take(5) {
if icon.href.starts_with("data:image") {
let Ok(datauri) = DataUrl::process(&icon.href) else {
continue;
@ -525,23 +562,11 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
_ => debug!("Extracted icon from data:image uri is invalid"),
};
} else {
debug!("Trying {}", icon.href);
// Make sure all icons are checked before returning error
let res = match get_page_with_referer(&icon.href, &icon_result.referer).await {
Ok(r) => r,
Err(e) if icons.peek().is_none() => return Err(e),
Err(e) if CustomHttpClientError::downcast_ref(&e).is_some() => return Err(e), // If blacklisted stop immediately instead of checking the rest of the icons. see explanation and actual handling inside get_icon()
Err(e) => {
warn!("Unable to download icon: {e:?}");
// Continue to next icon
continue;
}
};
let res = get_page_with_referer(&icon.href, &icon_result.referer).await?;
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
// Check if the icon type is allowed, else try another icon from the list.
// Check if the icon type is allowed, else try an icon from the list.
icon_type = get_icon_type(&buffer);
if icon_type.is_none() {
buffer.clear();
@ -595,16 +620,13 @@ fn get_icon_type(bytes: &[u8]) -> Option<&'static str> {
None
}
// Some details can be found here:
// - https://www.garykessler.net/library/file_sigs_GCK_latest.html
// - https://en.wikipedia.org/wiki/List_of_file_signatures
match bytes {
[137, 80, 78, 71, 13, 10, 26, 10, ..] => Some("png"),
[0, 0, 1, 0, n1, n2, ..] if u16::from_le_bytes([*n1, *n2]) > 0 => Some("x-icon"), // https://en.wikipedia.org/wiki/ICO_(file_format)
[82, 73, 70, 70, _, _, _, _, 87, 69, 66, 80, ..] => Some("webp"), // Only match WebP Images
[255, 216, 255, b, ..] if *b >= 0xC0 => Some("jpeg"),
[71, 73, 70, 56, 55 | 57, 97, ..] => Some("gif"),
[66, 77, _, _, _, _, 0, 0, 0, 0, ..] => Some("bmp"), // https://en.wikipedia.org/wiki/BMP_file_format
[137, 80, 78, 71, ..] => Some("png"),
[0, 0, 1, 0, ..] => Some("x-icon"),
[82, 73, 70, 70, ..] => Some("webp"),
[255, 216, 255, ..] => Some("jpeg"),
[71, 73, 70, 56, ..] => Some("gif"),
[66, 77, ..] => Some("bmp"),
[60, 115, 118, 103, ..] => Some("svg+xml"), // Normal svg
[60, 63, 120, 109, 108, ..] => check_svg_after_xml_declaration(bytes), // An svg starting with <?xml
_ => None,

View file

@ -2,7 +2,7 @@ use chrono::Utc;
use num_traits::FromPrimitive;
use rocket::{
form::{Form, FromForm},
http::{Cookie, CookieJar, SameSite},
http::Status,
response::Redirect,
serde::json::Json,
Route,
@ -12,20 +12,16 @@ use serde_json::Value;
use crate::{
api::{
core::{
accounts::{_prelogin, _register, kdf_upgrade, PreloginData, RegisterData},
accounts::{PreloginData, RegisterData, _prelogin, _register, kdf_upgrade},
log_user_event,
two_factor::{
authenticator, duo, duo_oidc, email, enforce_2fa_policy, is_twofactor_provider_usable, webauthn,
yubikey,
},
two_factor::{authenticator, duo, duo_oidc, email, enforce_2fa_policy, webauthn, yubikey},
},
master_password_policy,
push::register_push_device,
ApiResult, EmptyResult, JsonResult,
},
auth,
auth::{generate_organization_api_key_login_claims, AuthMethod, ClientHeaders, ClientIp, ClientVersion, Secure},
crypto,
auth::{generate_organization_api_key_login_claims, AuthMethod, ClientHeaders, ClientIp, ClientVersion},
db::{
models::{
AuthRequest, AuthRequestId, Device, DeviceId, EventType, Invitation, OIDCCodeWrapper, OrganizationApiKey,
@ -43,7 +39,6 @@ pub fn routes() -> Vec<Route> {
routes![
login,
prelogin,
prelogin_password,
identity_register,
register_verification_email,
register_finish,
@ -67,43 +62,43 @@ async fn login(
let login_result = match data.grant_type.as_ref() {
"refresh_token" => {
_check_is_some(data.refresh_token.as_ref(), "refresh_token cannot be blank")?;
_check_is_some(&data.refresh_token, "refresh_token cannot be blank")?;
_refresh_login(data, &conn, &client_header.ip).await
}
"password" if CONFIG.sso_enabled() && CONFIG.sso_only() => err!("SSO sign-in is required"),
"password" => {
_check_is_some(data.client_id.as_ref(), "client_id cannot be blank")?;
_check_is_some(data.password.as_ref(), "password cannot be blank")?;
_check_is_some(data.scope.as_ref(), "scope cannot be blank")?;
_check_is_some(data.username.as_ref(), "username cannot be blank")?;
_check_is_some(&data.client_id, "client_id cannot be blank")?;
_check_is_some(&data.password, "password cannot be blank")?;
_check_is_some(&data.scope, "scope cannot be blank")?;
_check_is_some(&data.username, "username cannot be blank")?;
_check_is_some(data.device_identifier.as_ref(), "device_identifier cannot be blank")?;
_check_is_some(data.device_name.as_ref(), "device_name cannot be blank")?;
_check_is_some(data.device_type.as_ref(), "device_type cannot be blank")?;
_check_is_some(&data.device_identifier, "device_identifier cannot be blank")?;
_check_is_some(&data.device_name, "device_name cannot be blank")?;
_check_is_some(&data.device_type, "device_type cannot be blank")?;
_password_login(data, &mut user_id, &conn, &client_header.ip, client_version.as_ref()).await
_password_login(data, &mut user_id, &conn, &client_header.ip, &client_version).await
}
"client_credentials" => {
_check_is_some(data.client_id.as_ref(), "client_id cannot be blank")?;
_check_is_some(data.client_secret.as_ref(), "client_secret cannot be blank")?;
_check_is_some(data.scope.as_ref(), "scope cannot be blank")?;
_check_is_some(&data.client_id, "client_id cannot be blank")?;
_check_is_some(&data.client_secret, "client_secret cannot be blank")?;
_check_is_some(&data.scope, "scope cannot be blank")?;
_check_is_some(data.device_identifier.as_ref(), "device_identifier cannot be blank")?;
_check_is_some(data.device_name.as_ref(), "device_name cannot be blank")?;
_check_is_some(data.device_type.as_ref(), "device_type cannot be blank")?;
_check_is_some(&data.device_identifier, "device_identifier cannot be blank")?;
_check_is_some(&data.device_name, "device_name cannot be blank")?;
_check_is_some(&data.device_type, "device_type cannot be blank")?;
_api_key_login(data, &mut user_id, &conn, &client_header.ip).await
}
"authorization_code" if CONFIG.sso_enabled() => {
_check_is_some(data.client_id.as_ref(), "client_id cannot be blank")?;
_check_is_some(data.code.as_ref(), "code cannot be blank")?;
_check_is_some(data.code_verifier.as_ref(), "code verifier cannot be blank")?;
_check_is_some(&data.client_id, "client_id cannot be blank")?;
_check_is_some(&data.code, "code cannot be blank")?;
_check_is_some(&data.code_verifier, "code verifier cannot be blank")?;
_check_is_some(data.device_identifier.as_ref(), "device_identifier cannot be blank")?;
_check_is_some(data.device_name.as_ref(), "device_name cannot be blank")?;
_check_is_some(data.device_type.as_ref(), "device_type cannot be blank")?;
_check_is_some(&data.device_identifier, "device_identifier cannot be blank")?;
_check_is_some(&data.device_name, "device_name cannot be blank")?;
_check_is_some(&data.device_type, "device_type cannot be blank")?;
_sso_login(data, &mut user_id, &conn, &client_header.ip, client_version.as_ref()).await
_sso_login(data, &mut user_id, &conn, &client_header.ip, &client_version).await
}
"authorization_code" => err!("SSO sign-in is not available"),
t => err!("Invalid type", t),
@ -133,14 +128,12 @@ async fn login(
login_result
}
// Return Status::Unauthorized to trigger logout
async fn _refresh_login(data: ConnectData, conn: &DbConn, ip: &ClientIp) -> JsonResult {
// When a refresh token is invalid or missing we need to respond with an HTTP BadRequest (400)
// It also needs to return a json which holds at least a key `error` with the value `invalid_grant`
// See the link below for details
// https://github.com/bitwarden/clients/blob/2ee158e720a5e7dbe3641caf80b569e97a1dd91b/libs/common/src/services/api.service.ts#L1786-L1797
let Some(refresh_token) = data.refresh_token else {
err_json!(json!({"error": "invalid_grant"}), "Missing refresh_token")
// Extract token
let refresh_token = match data.refresh_token {
Some(token) => token,
None => err_code!("Missing refresh_token", Status::Unauthorized.code),
};
// ---
@ -151,10 +144,7 @@ async fn _refresh_login(data: ConnectData, conn: &DbConn, ip: &ClientIp) -> Json
// let members = Membership::find_confirmed_by_user(&user.uuid, conn).await;
match auth::refresh_tokens(ip, &refresh_token, data.client_id, conn).await {
Err(err) => {
err_json!(
json!({"error": "invalid_grant"}),
format!("Unable to refresh login credentials: {}", err.message())
)
err_code!(format!("Unable to refresh login credentials: {}", err.message()), Status::Unauthorized.code)
}
Ok((mut device, auth_tokens)) => {
// Save to update `device.updated_at` to track usage and toggle new status
@ -179,7 +169,7 @@ async fn _sso_login(
user_id: &mut Option<UserId>,
conn: &DbConn,
ip: &ClientIp,
client_version: Option<&ClientVersion>,
client_version: &Option<ClientVersion>,
) -> JsonResult {
AuthMethod::Sso.check_scope(data.scope.as_ref())?;
@ -230,33 +220,7 @@ async fn _sso_login(
}
)
}
Some((user, None)) => match user_infos.email_verified {
None if !CONFIG.sso_allow_unknown_email_verification() => {
error!(
"Login failure ({}), existing non SSO user ({}) with same email ({}) and email verification status is unknown",
user_infos.identifier, user.uuid, user.email
);
err_silent!(
"Email verification status is unknown",
ErrorEvent {
event: EventType::UserFailedLogIn
}
)
}
Some(false) => {
error!(
"Login failure ({}), existing non SSO user ({}) with same email ({}) and email is not verified",
user_infos.identifier, user.uuid, user.email
);
err_silent!(
"Email is not verified by the SSO provider",
ErrorEvent {
event: EventType::UserFailedLogIn
}
)
}
_ => Some((user, None)),
},
Some((user, None)) => Some((user, None)),
},
Some((user, sso_user)) => Some((user, Some(sso_user))),
};
@ -302,7 +266,7 @@ async fn _sso_login(
Some((user, _)) if !user.enabled => {
err!(
"This user has been disabled",
format!("IP: {}. Username: {}.", ip.ip, user.display_name()),
format!("IP: {}. Username: {}.", ip.ip, user.name),
ErrorEvent {
event: EventType::UserFailedLogIn
}
@ -348,7 +312,7 @@ async fn _password_login(
user_id: &mut Option<UserId>,
conn: &DbConn,
ip: &ClientIp,
client_version: Option<&ClientVersion>,
client_version: &Option<ClientVersion>,
) -> JsonResult {
// Validate scope
AuthMethod::Password.check_scope(data.scope.as_ref())?;
@ -518,18 +482,14 @@ async fn authenticated_response(
Value::Null
};
let account_keys = if user.private_key.is_some() {
json!({
let account_keys = json!({
"publicKeyEncryptionKeyPair": {
"wrappedPrivateKey": user.private_key,
"publicKey": user.public_key,
"Object": "publicKeyEncryptionKeyPair"
},
"Object": "privateKeys"
})
} else {
Value::Null
};
});
let mut result = json!({
"access_token": auth_tokens.access_token(),
@ -561,7 +521,7 @@ async fn authenticated_response(
result["TwoFactorToken"] = Value::String(token);
}
info!("User {} logged in successfully. IP: {}", user.display_name(), ip.ip);
info!("User {} logged in successfully. IP: {}", &user.name, ip.ip);
Ok(Json(result))
}
@ -650,38 +610,6 @@ async fn _user_api_key_login(
info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
let has_master_password = !user.password_hash.is_empty();
let master_password_unlock = if has_master_password {
json!({
"Kdf": {
"KdfType": user.client_kdf_type,
"Iterations": user.client_kdf_iter,
"Memory": user.client_kdf_memory,
"Parallelism": user.client_kdf_parallelism
},
// This field is named inconsistently and will be removed and replaced by the "wrapped" variant in the apps.
// https://github.com/bitwarden/android/blob/release/2025.12-rc41/network/src/main/kotlin/com/bitwarden/network/model/MasterPasswordUnlockDataJson.kt#L22-L26
"MasterKeyEncryptedUserKey": user.akey,
"MasterKeyWrappedUserKey": user.akey,
"Salt": user.email
})
} else {
Value::Null
};
let account_keys = if user.private_key.is_some() {
json!({
"publicKeyEncryptionKeyPair": {
"wrappedPrivateKey": user.private_key,
"publicKey": user.public_key,
"Object": "publicKeyEncryptionKeyPair"
},
"Object": "privateKeys"
})
} else {
Value::Null
};
// Note: No refresh_token is returned. The CLI just repeats the
// client_credentials login flow when the existing token expires.
let result = json!({
@ -696,14 +624,7 @@ async fn _user_api_key_login(
"KdfMemory": user.client_kdf_memory,
"KdfParallelism": user.client_kdf_parallelism,
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
"ForcePasswordReset": false,
"scope": AuthMethod::UserApiKey.scope(),
"AccountKeys": account_keys,
"UserDecryptionOptions": {
"HasMasterPassword": has_master_password,
"MasterPasswordUnlock": master_password_unlock,
"Object": "userDecryptionOptions"
},
});
Ok(Json(result))
@ -762,7 +683,7 @@ async fn twofactor_auth(
data: &ConnectData,
device: &mut Device,
ip: &ClientIp,
client_version: Option<&ClientVersion>,
client_version: &Option<ClientVersion>,
conn: &DbConn,
) -> ApiResult<Option<String>> {
let twofactors = TwoFactor::find_by_user(&user.uuid, conn).await;
@ -775,27 +696,8 @@ async fn twofactor_auth(
TwoFactorIncomplete::mark_incomplete(&user.uuid, &device.uuid, &device.name, device.atype, ip, conn).await?;
let twofactor_ids: Vec<_> = twofactors
.iter()
.filter_map(|tf| {
let provider_type = TwoFactorType::from_i32(tf.atype)?;
(tf.enabled && is_twofactor_provider_usable(provider_type, Some(&tf.data))).then_some(tf.atype)
})
.collect();
if twofactor_ids.is_empty() {
err!("No enabled and usable two factor providers are available for this account")
}
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, assume the first one
// Ignore Remember and RecoveryCode Types during this check, these are special
if ![TwoFactorType::Remember as i32, TwoFactorType::RecoveryCode as i32].contains(&selected_id)
&& !twofactor_ids.contains(&selected_id)
{
err_json!(
_json_err_twofactor(&twofactor_ids, &user.uuid, data, client_version, conn).await?,
"Invalid two factor provider"
)
}
let twofactor_code = match data.two_factor_token {
Some(ref code) => code,
@ -812,6 +714,7 @@ async fn twofactor_auth(
use crate::crypto::ct_eq;
let selected_data = _selected_data(selected_twofactor);
let mut remember = data.two_factor_remember.unwrap_or(0);
match TwoFactorType::from_i32(selected_id) {
Some(TwoFactorType::Authenticator) => {
@ -843,23 +746,13 @@ async fn twofactor_auth(
}
Some(TwoFactorType::Remember) => {
match device.twofactor_remember {
// When a 2FA Remember token is used, check and validate this JWT token, if it is valid, just continue
// If it is invalid we need to trigger the 2FA Login prompt
Some(ref token)
if !CONFIG.disable_2fa_remember()
&& (ct_eq(token, twofactor_code)
&& auth::decode_2fa_remember(twofactor_code)
.is_ok_and(|t| t.sub == device.uuid && t.user_uuid == user.uuid)) => {}
_ => {
// Always delete the current twofactor remember token here if it exists
if device.twofactor_remember.is_some() {
device.delete_twofactor_remember();
// We need to save here, since we send a err_json!() which prevents saving `device` at a later stage
device.save(true, conn).await?;
Some(ref code) if !CONFIG.disable_2fa_remember() && ct_eq(code, twofactor_code) => {
remember = 1; // Make sure we also return the token here, otherwise it will only remember the first time
}
_ => {
err_json!(
_json_err_twofactor(&twofactor_ids, &user.uuid, data, client_version, conn).await?,
"2FA Remember token not provided or expired"
"2FA Remember token not provided"
)
}
}
@ -890,10 +783,10 @@ async fn twofactor_auth(
TwoFactorIncomplete::mark_complete(&user.uuid, &device.uuid, conn).await?;
let remember = data.two_factor_remember.unwrap_or(0);
let two_factor = if !CONFIG.disable_2fa_remember() && remember == 1 {
Some(device.refresh_twofactor_remember())
} else {
device.delete_twofactor_remember();
None
};
Ok(two_factor)
@ -907,7 +800,7 @@ async fn _json_err_twofactor(
providers: &[i32],
user_id: &UserId,
data: &ConnectData,
client_version: Option<&ClientVersion>,
client_version: &Option<ClientVersion>,
conn: &DbConn,
) -> ApiResult<Value> {
let mut result = json!({
@ -926,7 +819,7 @@ async fn _json_err_twofactor(
match TwoFactorType::from_i32(*provider) {
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
Some(TwoFactorType::Webauthn) if CONFIG.is_webauthn_2fa_supported() => {
Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => {
let request = webauthn::generate_webauthn_login(user_id, conn).await?;
result["TwoFactorProviders2"][provider.to_string()] = request.0;
}
@ -1011,11 +904,6 @@ async fn prelogin(data: Json<PreloginData>, conn: DbConn) -> Json<Value> {
_prelogin(data, conn).await
}
#[post("/accounts/prelogin/password", data = "<data>")]
async fn prelogin_password(data: Json<PreloginData>, conn: DbConn) -> Json<Value> {
_prelogin(data, conn).await
}
#[post("/accounts/register", data = "<data>")]
async fn identity_register(data: Json<RegisterData>, conn: DbConn) -> JsonResult {
_register(data, false, conn).await
@ -1031,7 +919,6 @@ struct RegisterVerificationData {
#[derive(rocket::Responder)]
enum RegisterVerificationResponse {
#[response(status = 204)]
NoContent(()),
Token(Json<String>),
}
@ -1059,11 +946,12 @@ async fn register_verification_email(
let user = User::find_by_mail(&data.email, &conn).await;
if user.filter(|u| u.private_key.is_some()).is_some() {
// There is still a timing side channel here in that the code
// paths that send mail take noticeably longer than ones that don't.
// Add a randomized sleep to mitigate this somewhat.
use rand::{rngs::SmallRng, RngExt};
let mut rng: SmallRng = rand::make_rng();
let sleep_ms = rng.random_range(900..=1100) as u64;
// paths that send mail take noticeably longer than ones that
// don't. Add a randomized sleep to mitigate this somewhat.
use rand::{rngs::SmallRng, Rng, SeedableRng};
let mut rng = SmallRng::from_os_rng();
let delta: i32 = 100;
let sleep_ms = (1_000 + rng.random_range(-delta..=delta)) as u64;
tokio::time::sleep(tokio::time::Duration::from_millis(sleep_ms)).await;
} else {
mail::send_register_verify_email(&data.email, &token).await?;
@ -1142,7 +1030,7 @@ struct ConnectData {
#[field(name = uncased("code_verifier"))]
code_verifier: Option<OIDCCodeVerifier>,
}
fn _check_is_some<T>(value: Option<&T>, msg: &str) -> EmptyResult {
fn _check_is_some<T>(value: &Option<T>, msg: &str) -> EmptyResult {
if value.is_none() {
err!(msg)
}
@ -1161,16 +1049,13 @@ fn prevalidate() -> JsonResult {
}
}
const SSO_BINDING_COOKIE: &str = "VW_SSO_BINDING";
#[get("/connect/oidc-signin?<code>&<state>", rank = 1)]
async fn oidcsignin(code: OIDCCode, state: String, cookies: &CookieJar<'_>, mut conn: DbConn) -> ApiResult<Redirect> {
async fn oidcsignin(code: OIDCCode, state: String, mut conn: DbConn) -> ApiResult<Redirect> {
_oidcsignin_redirect(
state,
OIDCCodeWrapper::Ok {
code,
},
cookies,
&mut conn,
)
.await
@ -1183,7 +1068,6 @@ async fn oidcsignin_error(
state: String,
error: String,
error_description: Option<String>,
cookies: &CookieJar<'_>,
mut conn: DbConn,
) -> ApiResult<Redirect> {
_oidcsignin_redirect(
@ -1192,7 +1076,6 @@ async fn oidcsignin_error(
error,
error_description,
},
cookies,
&mut conn,
)
.await
@ -1204,7 +1087,6 @@ async fn oidcsignin_error(
async fn _oidcsignin_redirect(
base64_state: String,
code_response: OIDCCodeWrapper,
cookies: &CookieJar<'_>,
conn: &mut DbConn,
) -> ApiResult<Redirect> {
let state = sso::decode_state(&base64_state)?;
@ -1213,17 +1095,6 @@ async fn _oidcsignin_redirect(
None => err!(format!("Cannot retrieve sso_auth for {state}")),
Some(sso_auth) => sso_auth,
};
// Browser-binding check
// The cookie was set on /connect/authorize and must come from the same browser that initiated the flow.
let cookie_value = cookies.get(SSO_BINDING_COOKIE).map(|c| c.value().to_string());
let provided_hash = cookie_value.as_deref().map(|v| crypto::sha256_hex(v.as_bytes()));
match (sso_auth.binding_hash.as_deref(), provided_hash.as_deref()) {
(Some(expected), Some(actual)) if crypto::ct_eq(expected, actual) => {}
_ => err!(format!("SSO session binding mismatch for {state}")),
}
cookies.remove(Cookie::build(SSO_BINDING_COOKIE).path("/identity/connect/").build());
sso_auth.code_response = Some(code_response);
sso_auth.updated_at = Utc::now().naive_utc();
sso_auth.save(conn).await?;
@ -1270,7 +1141,7 @@ struct AuthorizeData {
// The `redirect_uri` will change depending of the client (web, android, ios ..)
#[get("/connect/authorize?<data..>")]
async fn authorize(data: AuthorizeData, cookies: &CookieJar<'_>, secure: Secure, conn: DbConn) -> ApiResult<Redirect> {
async fn authorize(data: AuthorizeData, conn: DbConn) -> ApiResult<Redirect> {
let AuthorizeData {
client_id,
redirect_uri,
@ -1284,23 +1155,7 @@ async fn authorize(data: AuthorizeData, cookies: &CookieJar<'_>, secure: Secure,
err!("Unsupported code challenge method");
}
// Generate browser-binding token. Stored hashed in DB; raw value handed to the browser as a cookie.
// Validated on /connect/oidc-signin
let binding_token = data_encoding::BASE64URL_NOPAD.encode(&crypto::get_random_bytes::<32>());
let binding_hash = crypto::sha256_hex(binding_token.as_bytes());
let auth_url =
sso::authorize_url(state, code_challenge, &client_id, &redirect_uri, Some(binding_hash), conn).await?;
cookies.add(
Cookie::build((SSO_BINDING_COOKIE, binding_token))
.path("/identity/connect/")
.max_age(time::Duration::seconds(sso::SSO_AUTH_EXPIRATION.num_seconds()))
.same_site(SameSite::Lax) // Lax is needed because the IdP runs on a different FQDN
.http_only(true)
.secure(secure.https)
.build(),
);
let auth_url = sso::authorize_url(state, code_challenge, &client_id, &redirect_uri, conn).await?;
Ok(Redirect::temporary(String::from(auth_url)))
}

View file

@ -47,7 +47,6 @@ pub type EmptyResult = ApiResult<()>;
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct PasswordOrOtpData {
#[serde(alias = "MasterPasswordHash")]
master_password_hash: Option<String>,
otp: Option<String>,
}

View file

@ -338,7 +338,7 @@ impl WebSocketUsers {
}
// NOTE: The last modified date needs to be updated before calling these methods
pub async fn send_user_update(&self, ut: UpdateType, user: &User, push_uuid: Option<&PushId>, conn: &DbConn) {
pub async fn send_user_update(&self, ut: UpdateType, user: &User, push_uuid: &Option<PushId>, conn: &DbConn) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
@ -358,16 +358,15 @@ impl WebSocketUsers {
}
}
pub async fn send_logout(&self, user: &User, acting_device: Option<&Device>, conn: &DbConn) {
pub async fn send_logout(&self, user: &User, acting_device_id: Option<DeviceId>, conn: &DbConn) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
}
let acting_device_id = acting_device.map(|d| d.uuid.clone());
let data = create_update(
vec![("UserId".into(), user.uuid.to_string().into()), ("Date".into(), serialize_date(user.updated_at))],
UpdateType::LogOut,
acting_device_id,
acting_device_id.clone(),
);
if CONFIG.enable_websocket() {
@ -375,7 +374,7 @@ impl WebSocketUsers {
}
if CONFIG.push_enabled() {
push_logout(user, acting_device, conn).await;
push_logout(user, acting_device_id.clone(), conn).await;
}
}

View file

@ -13,7 +13,7 @@ use tokio::sync::RwLock;
use crate::{
api::{ApiResult, EmptyResult, UpdateType},
db::{
models::{AuthRequestId, Cipher, Device, Folder, PushId, Send, User, UserId},
models::{AuthRequestId, Cipher, Device, DeviceId, Folder, PushId, Send, User, UserId},
DbConn,
},
http_client::make_http_request,
@ -135,7 +135,7 @@ pub async fn register_push_device(device: &mut Device, conn: &DbConn) -> EmptyRe
Ok(())
}
pub async fn unregister_push_device(push_id: Option<&PushId>) -> EmptyResult {
pub async fn unregister_push_device(push_id: &Option<PushId>) -> EmptyResult {
if !CONFIG.push_enabled() || push_id.is_none() {
return Ok(());
}
@ -188,13 +188,15 @@ pub async fn push_cipher_update(ut: UpdateType, cipher: &Cipher, device: &Device
}
}
pub async fn push_logout(user: &User, acting_device: Option<&Device>, conn: &DbConn) {
pub async fn push_logout(user: &User, acting_device_id: Option<DeviceId>, conn: &DbConn) {
let acting_device_id: Value = acting_device_id.map(|v| v.to_string().into()).unwrap_or_else(|| Value::Null);
if Device::check_user_has_push_device(&user.uuid, conn).await {
tokio::task::spawn(send_to_push_relay(json!({
"userId": user.uuid,
"organizationId": (),
"deviceId": acting_device.and_then(|d| d.push_uuid.as_ref()),
"identifier": acting_device.map(|d| &d.uuid),
"deviceId": acting_device_id,
"identifier": acting_device_id,
"type": UpdateType::LogOut as i32,
"payload": {
"userId": user.uuid,
@ -206,7 +208,7 @@ pub async fn push_logout(user: &User, acting_device: Option<&Device>, conn: &DbC
}
}
pub async fn push_user_update(ut: UpdateType, user: &User, push_uuid: Option<&PushId>, conn: &DbConn) {
pub async fn push_user_update(ut: UpdateType, user: &User, push_uuid: &Option<PushId>, conn: &DbConn) {
if Device::check_user_has_push_device(&user.uuid, conn).await {
tokio::task::spawn(send_to_push_relay(json!({
"userId": user.uuid,

View file

@ -12,6 +12,7 @@ use serde_json::Value;
use crate::{
api::{core::now, ApiResult, EmptyResult},
auth::decode_file_download,
config::CachedConfigOperation,
db::models::{AttachmentId, CipherId},
error::Error,
util::Cached,
@ -52,21 +53,18 @@ fn not_found() -> ApiResult<Html<String>> {
Ok(Html(text))
}
#[get("/css/vaultwarden.css")]
fn vaultwarden_css() -> Cached<Css<String>> {
static VAULTWARDEN_CSS_CACHE: CachedConfigOperation<String> = CachedConfigOperation::new(|config| {
let css_options = json!({
"emergency_access_allowed": CONFIG.emergency_access_allowed(),
"emergency_access_allowed": config.emergency_access_allowed(),
"load_user_scss": true,
"mail_2fa_enabled": CONFIG._enable_email_2fa(),
"mail_enabled": CONFIG.mail_enabled(),
"sends_allowed": CONFIG.sends_allowed(),
"remember_2fa_disabled": CONFIG.disable_2fa_remember(),
"password_hints_allowed": CONFIG.password_hints_allowed(),
"signup_disabled": CONFIG.is_signup_disabled(),
"sso_enabled": CONFIG.sso_enabled(),
"sso_only": CONFIG.sso_enabled() && CONFIG.sso_only(),
"webauthn_2fa_supported": CONFIG.is_webauthn_2fa_supported(),
"yubico_enabled": CONFIG._enable_yubico() && CONFIG.yubico_client_id().is_some() && CONFIG.yubico_secret_key().is_some(),
"mail_2fa_enabled": config._enable_email_2fa(),
"mail_enabled": config.mail_enabled(),
"sends_allowed": config.sends_allowed(),
"signup_disabled": config.is_signup_disabled(),
"sso_enabled": config.sso_enabled(),
"sso_only": config.sso_enabled() && config.sso_only(),
"yubico_enabled": config._enable_yubico() && config.yubico_client_id().is_some() && config.yubico_secret_key().is_some(),
"webauthn_2fa_supported": config.is_webauthn_2fa_supported(),
});
let scss = match CONFIG.render_template("scss/vaultwarden.scss", &css_options) {
@ -80,7 +78,7 @@ fn vaultwarden_css() -> Cached<Css<String>> {
}
};
let css = match grass_compiler::from_string(
match grass_compiler::from_string(
scss,
&grass_compiler::Options::default().style(grass_compiler::OutputStyle::Compressed),
) {
@ -99,10 +97,12 @@ fn vaultwarden_css() -> Cached<Css<String>> {
)
.expect("SCSS to compile")
}
};
}
});
// Cache for one day should be enough and not too much
Cached::ttl(Css(css), 86_400, false)
#[get("/css/vaultwarden.css")]
fn vaultwarden_css() -> Css<String> {
Css(CONFIG.cached_operation(&VAULTWARDEN_CSS_CACHE))
}
#[get("/")]
@ -240,8 +240,8 @@ pub fn static_files(filename: &str) -> Result<(ContentType, &'static [u8]), Erro
"jdenticon-3.3.0.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon-3.3.0.js"))),
"datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
"datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
"jquery-4.0.0.slim.js" => {
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-4.0.0.slim.js")))
"jquery-3.7.1.slim.js" => {
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.7.1.slim.js")))
}
_ => err!(format!("Static file not found: {filename}")),
}

View file

@ -46,7 +46,6 @@ static JWT_FILE_DOWNLOAD_ISSUER: LazyLock<String> =
LazyLock::new(|| format!("{}|file_download", CONFIG.domain_origin()));
static JWT_REGISTER_VERIFY_ISSUER: LazyLock<String> =
LazyLock::new(|| format!("{}|register_verify", CONFIG.domain_origin()));
static JWT_2FA_REMEMBER_ISSUER: LazyLock<String> = LazyLock::new(|| format!("{}|2faremember", CONFIG.domain_origin()));
static PRIVATE_RSA_KEY: OnceLock<EncodingKey> = OnceLock::new();
static PUBLIC_RSA_KEY: OnceLock<DecodingKey> = OnceLock::new();
@ -161,10 +160,6 @@ pub fn decode_register_verify(token: &str) -> Result<RegisterVerifyClaims, Error
decode_jwt(token, JWT_REGISTER_VERIFY_ISSUER.to_string())
}
pub fn decode_2fa_remember(token: &str) -> Result<TwoFactorRememberClaims, Error> {
decode_jwt(token, JWT_2FA_REMEMBER_ISSUER.to_string())
}
#[derive(Debug, Serialize, Deserialize)]
pub struct LoginJwtClaims {
// Not before
@ -445,31 +440,6 @@ pub fn generate_register_verify_claims(email: String, name: Option<String>, veri
}
}
#[derive(Serialize, Deserialize)]
pub struct TwoFactorRememberClaims {
// Not before
pub nbf: i64,
// Expiration time
pub exp: i64,
// Issuer
pub iss: String,
// Subject
pub sub: DeviceId,
// UserId
pub user_uuid: UserId,
}
pub fn generate_2fa_remember_claims(device_uuid: DeviceId, user_uuid: UserId) -> TwoFactorRememberClaims {
let time_now = Utc::now();
TwoFactorRememberClaims {
nbf: time_now.timestamp(),
exp: (time_now + TimeDelta::try_days(30).unwrap()).timestamp(),
iss: JWT_2FA_REMEMBER_ISSUER.to_string(),
sub: device_uuid,
user_uuid,
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct BasicJwtClaims {
// Not before
@ -704,9 +674,10 @@ pub struct OrgHeaders {
impl OrgHeaders {
fn is_member(&self) -> bool {
// Only allow not revoked members, we can not use the Confirmed status here
// as some endpoints can be triggered by invited users during joining
self.membership_status != MembershipStatus::Revoked && self.membership_type >= MembershipType::User
// NOTE: we don't care about MembershipStatus at the moment because this is only used
// where an invited, accepted or confirmed user is expected if this ever changes or
// if from_i32 is changed to return Some(Revoked) this check needs to be changed accordingly
self.membership_type >= MembershipType::User
}
fn is_confirmed_and_admin(&self) -> bool {
self.membership_status == MembershipStatus::Confirmed && self.membership_type >= MembershipType::Admin
@ -719,36 +690,6 @@ impl OrgHeaders {
}
}
// org_id is usually the second path param ("/organizations/<org_id>"),
// but there are cases where it is a query value.
// First check the path, if this is not a valid uuid, try the query values.
fn get_org_id(request: &Request<'_>) -> Option<OrganizationId> {
if let Some(Ok(org_id)) = request.param::<OrganizationId>(1) {
Some(org_id)
} else if let Some(Ok(org_id)) = request.query_value::<OrganizationId>("organizationId") {
Some(org_id)
} else {
None
}
}
// Special Guard to ensure that there is an organization id present
// If there is no org id trigger the Outcome::Forward.
// This is useful for endpoints which work for both organization and personal vaults, like purge.
pub struct OrgIdGuard;
#[rocket::async_trait]
impl<'r> FromRequest<'r> for OrgIdGuard {
type Error = &'static str;
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
match get_org_id(request) {
Some(_) => Outcome::Success(OrgIdGuard),
None => Outcome::Forward(rocket::http::Status::NotFound),
}
}
}
#[rocket::async_trait]
impl<'r> FromRequest<'r> for OrgHeaders {
type Error = &'static str;
@ -756,8 +697,18 @@ impl<'r> FromRequest<'r> for OrgHeaders {
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let headers = try_outcome!(Headers::from_request(request).await);
// Extract the org_id from the request
let url_org_id = get_org_id(request);
// org_id is usually the second path param ("/organizations/<org_id>"),
// but there are cases where it is a query value.
// First check the path, if this is not a valid uuid, try the query values.
let url_org_id: Option<OrganizationId> = {
if let Some(Ok(org_id)) = request.param::<OrganizationId>(1) {
Some(org_id)
} else if let Some(Ok(org_id)) = request.query_value::<OrganizationId>("organizationId") {
Some(org_id)
} else {
None
}
};
match url_org_id {
Some(org_id) if uuid::Uuid::parse_str(&org_id).is_ok() => {
@ -875,7 +826,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
_ => err_handler!("Error getting DB"),
};
if !Collection::is_coll_manageable_by_user(&col_id, &headers.membership.user_uuid, &conn).await {
if !Collection::can_access_collection(&headers.membership, &col_id, &conn).await {
err_handler!("The current user isn't a manager for this collection")
}
}
@ -957,8 +908,8 @@ impl ManagerHeaders {
if uuid::Uuid::parse_str(col_id.as_ref()).is_err() {
err!("Collection Id is malformed!");
}
if !Collection::is_coll_manageable_by_user(col_id, &h.membership.user_uuid, conn).await {
err!("Collection not found", "The current user isn't a manager for this collection")
if !Collection::can_access_collection(&h.membership, col_id, conn).await {
err!("You don't have access to all collections!");
}
}
@ -1259,20 +1210,8 @@ pub async fn refresh_tokens(
) -> ApiResult<(Device, AuthTokens)> {
let refresh_claims = match decode_refresh(refresh_token) {
Err(err) => {
error!("Failed to decode {} refresh_token: {refresh_token}: {err:?}", ip.ip);
//err_silent!(format!("Impossible to read refresh_token: {}", err.message()))
// If the token failed to decode, it was probably one of the old style tokens that was just a Base64 string.
// We can generate a claim for them for backwards compatibility. Note that the password refresh claims don't
// check expiration or issuer, so they're not included here.
RefreshJwtClaims {
nbf: 0,
exp: 0,
iss: String::new(),
sub: AuthMethod::Password,
device_token: refresh_token.into(),
token: None,
}
debug!("Failed to decode {} refresh_token: {refresh_token}", ip.ip);
err_silent!(format!("Impossible to read refresh_token: {}", err.message()))
}
Ok(claims) => claims,
};

View file

@ -3,7 +3,7 @@ use std::{
fmt,
process::exit,
sync::{
atomic::{AtomicBool, Ordering},
atomic::{AtomicBool, AtomicUsize, Ordering},
LazyLock, RwLock,
},
};
@ -14,10 +14,7 @@ use serde::de::{self, Deserialize, Deserializer, MapAccess, Visitor};
use crate::{
error::Error,
util::{
get_active_web_release, get_env, get_env_bool, is_valid_email, parse_experimental_client_feature_flags,
FeatureFlagFilter,
},
util::{get_env, get_env_bool, get_web_vault_version, is_valid_email, parse_experimental_client_feature_flags},
};
static CONFIG_FILE: LazyLock<String> = LazyLock::new(|| {
@ -106,6 +103,7 @@ macro_rules! make_config {
struct Inner {
rocket_shutdown_handle: Option<rocket::Shutdown>,
revision: usize,
templates: Handlebars<'static>,
config: ConfigItems,
@ -325,7 +323,7 @@ macro_rules! make_config {
}
#[derive(Clone, Default)]
struct ConfigItems { $($( $name: make_config! {@type $ty, $none_action}, )+)+ }
struct ConfigItems { $($( pub $name: make_config! {@type $ty, $none_action}, )+)+ }
#[derive(Serialize)]
struct ElementDoc {
@ -923,7 +921,7 @@ make_config! {
},
}
fn validate_config(cfg: &ConfigItems, on_update: bool) -> Result<(), Error> {
fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
// Validate connection URL is valid and DB feature is enabled
#[cfg(sqlite)]
{
@ -1029,17 +1027,33 @@ fn validate_config(cfg: &ConfigItems, on_update: bool) -> Result<(), Error> {
}
}
let invalid_flags =
parse_experimental_client_feature_flags(&cfg.experimental_client_feature_flags, FeatureFlagFilter::InvalidOnly);
// Server (v2025.6.2): https://github.com/bitwarden/server/blob/d094be3267f2030bd0dc62106bc6871cf82682f5/src/Core/Constants.cs#L103
// Client (web-v2025.6.1): https://github.com/bitwarden/clients/blob/747c2fd6a1c348a57a76e4a7de8128466ffd3c01/libs/common/src/enums/feature-flag.enum.ts#L12
// Android (v2025.6.0): https://github.com/bitwarden/android/blob/b5b022caaad33390c31b3021b2c1205925b0e1a2/app/src/main/kotlin/com/x8bit/bitwarden/data/platform/manager/model/FlagKey.kt#L22
// iOS (v2025.6.0): https://github.com/bitwarden/ios/blob/ff06d9c6cc8da89f78f37f376495800201d7261a/BitwardenShared/Core/Platform/Models/Enum/FeatureFlag.swift#L7
//
// NOTE: Move deprecated flags to the utils::parse_experimental_client_feature_flags() DEPRECATED_FLAGS const!
const KNOWN_FLAGS: &[&str] = &[
// Autofill Team
"inline-menu-positioning-improvements",
"inline-menu-totp",
"ssh-agent",
// Key Management Team
"ssh-key-vault-item",
"pm-25373-windows-biometrics-v2",
// Tools
"export-attachments",
// Mobile Team
"anon-addy-self-host-alias",
"simple-login-self-host-alias",
"mutual-tls",
];
let configured_flags = parse_experimental_client_feature_flags(&cfg.experimental_client_feature_flags);
let invalid_flags: Vec<_> = configured_flags.keys().filter(|flag| !KNOWN_FLAGS.contains(&flag.as_str())).collect();
if !invalid_flags.is_empty() {
let feature_flags_error = format!("Unrecognized experimental client feature flags: {:?}.\n\
err!(format!("Unrecognized experimental client feature flags: {invalid_flags:?}.\n\n\
Please ensure all feature flags are spelled correctly and that they are supported in this version.\n\
Supported flags: {:?}\n", invalid_flags, SUPPORTED_FEATURE_FLAGS);
if on_update {
err!(feature_flags_error);
} else {
println!("[WARNING] {feature_flags_error}");
}
Supported flags: {KNOWN_FLAGS:?}"));
}
const MAX_FILESIZE_KB: i64 = i64::MAX >> 10;
@ -1076,7 +1090,7 @@ fn validate_config(cfg: &ConfigItems, on_update: bool) -> Result<(), Error> {
validate_internal_sso_issuer_url(&cfg.sso_authority)?;
validate_internal_sso_redirect_url(&cfg.sso_callback_path)?;
validate_sso_master_password_policy(cfg.sso_master_password_policy.as_ref())?;
validate_sso_master_password_policy(&cfg.sso_master_password_policy)?;
}
if cfg._enable_yubico {
@ -1271,7 +1285,7 @@ fn validate_internal_sso_redirect_url(sso_callback_path: &String) -> Result<open
}
fn validate_sso_master_password_policy(
sso_master_password_policy: Option<&String>,
sso_master_password_policy: &Option<String>,
) -> Result<Option<serde_json::Value>, Error> {
let policy = sso_master_password_policy.as_ref().map(|mpp| serde_json::from_str::<serde_json::Value>(mpp));
@ -1312,16 +1326,12 @@ fn generate_smtp_img_src(embed_images: bool, domain: &str) -> String {
if embed_images {
"cid:".to_string()
} else {
// normalize base_url
let base_url = domain.trim_end_matches('/');
format!("{base_url}/vw_static/")
format!("{domain}/vw_static/")
}
}
fn generate_sso_callback_path(domain: &str) -> String {
// normalize base_url
let base_url = domain.trim_end_matches('/');
format!("{base_url}/identity/connect/oidc-signin")
format!("{domain}/identity/connect/oidc-signin")
}
/// Generate the correct URL for the icon service.
@ -1458,34 +1468,22 @@ pub enum PathType {
RsaKey,
}
// Official available feature flags can be found here:
// Server (v2026.2.1): https://github.com/bitwarden/server/blob/0e42725d0837bd1c0dabd864ff621a579959744b/src/Core/Constants.cs#L135
// Client (v2026.2.1): https://github.com/bitwarden/clients/blob/f96380c3138291a028bdd2c7a5fee540d5c98ba5/libs/common/src/enums/feature-flag.enum.ts#L12
// Android (v2026.2.1): https://github.com/bitwarden/android/blob/6902c19c0093fa476bbf74ccaa70c9f14afbb82f/core/src/main/kotlin/com/bitwarden/core/data/manager/model/FlagKey.kt#L31
// iOS (v2026.2.1): https://github.com/bitwarden/ios/blob/cdd9ba1770ca2ffc098d02d12cc3208e3a830454/BitwardenShared/Core/Platform/Models/Enum/FeatureFlag.swift#L7
pub const SUPPORTED_FEATURE_FLAGS: &[&str] = &[
// Architecture
"desktop-ui-migration-milestone-1",
"desktop-ui-migration-milestone-2",
"desktop-ui-migration-milestone-3",
"desktop-ui-migration-milestone-4",
// Auth Team
"pm-5594-safari-account-switching",
// Autofill Team
"ssh-agent",
"ssh-agent-v2",
// Key Management Team
"ssh-key-vault-item",
"pm-25373-windows-biometrics-v2",
// Mobile Team
"anon-addy-self-host-alias",
"simple-login-self-host-alias",
"mutual-tls",
"cxp-import-mobile",
"cxp-export-mobile",
// Platform Team
"pm-30529-webauthn-related-origins",
];
pub struct CachedConfigOperation<T: Clone> {
generator: fn(&Config) -> T,
value_cache: RwLock<Option<T>>,
revision: AtomicUsize,
}
impl<T: Clone> CachedConfigOperation<T> {
#[allow(private_interfaces)]
pub const fn new(generator: fn(&Config) -> T) -> Self {
CachedConfigOperation {
generator,
value_cache: RwLock::new(None),
revision: AtomicUsize::new(0),
}
}
}
impl Config {
pub async fn load() -> Result<Self, Error> {
@ -1500,12 +1498,13 @@ impl Config {
// Fill any missing with defaults
let config = builder.build();
if !SKIP_CONFIG_VALIDATION.load(Ordering::Relaxed) {
validate_config(&config, false)?;
validate_config(&config)?;
}
Ok(Config {
inner: RwLock::new(Inner {
rocket_shutdown_handle: None,
revision: 1,
templates: load_templates(&config.templates_folder),
config,
_env,
@ -1536,7 +1535,7 @@ impl Config {
let env = &self.inner.read().unwrap()._env;
env.merge(&builder, false, &mut overrides).build()
};
validate_config(&config, true)?;
validate_config(&config)?;
// Save both the user and the combined config
{
@ -1544,6 +1543,7 @@ impl Config {
writer.config = config;
writer._usr = builder;
writer._overrides = overrides;
writer.revision += 1;
}
//Save to file
@ -1562,6 +1562,51 @@ impl Config {
self.update_config(builder, false).await
}
pub async fn delete_user_config(&self) -> Result<(), Error> {
let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
operator.delete(&CONFIG_FILENAME).await?;
// Empty user config
let usr = ConfigBuilder::default();
// Config now is env + defaults
let config = {
let env = &self.inner.read().unwrap()._env;
env.build()
};
// Save configs
{
let mut writer = self.inner.write().unwrap();
writer.config = config;
writer._usr = usr;
writer._overrides = Vec::new();
writer.revision += 1;
}
Ok(())
}
pub fn cached_operation<T: Clone>(&self, operation: &CachedConfigOperation<T>) -> T {
let config_revision = self.inner.read().unwrap().revision;
let cache_revision = operation.revision.load(Ordering::Relaxed);
// If the current revision matches the cached revision, return the cached value
if cache_revision == config_revision {
let reader = operation.value_cache.read().unwrap();
return reader.as_ref().unwrap().clone();
}
// Otherwise, compute the value, update the cache and revision, and return the new value
let value = (operation.generator)(&CONFIG);
{
let mut writer = operation.value_cache.write().unwrap();
*writer = Some(value.clone());
operation.revision.store(config_revision, Ordering::Relaxed);
}
value
}
/// Tests whether an email's domain is allowed. A domain is allowed if it
/// is in signups_domains_whitelist, or if no whitelist is set (so there
/// are no domain restrictions in effect).
@ -1611,33 +1656,10 @@ impl Config {
}
}
pub async fn delete_user_config(&self) -> Result<(), Error> {
let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
operator.delete(&CONFIG_FILENAME).await?;
// Empty user config
let usr = ConfigBuilder::default();
// Config now is env + defaults
let config = {
let env = &self.inner.read().unwrap()._env;
env.build()
};
// Save configs
{
let mut writer = self.inner.write().unwrap();
writer.config = config;
writer._usr = usr;
writer._overrides = Vec::new();
}
Ok(())
}
pub fn private_rsa_key(&self) -> String {
format!("{}.pem", self.rsa_key_filename())
}
pub fn mail_enabled(&self) -> bool {
let inner = &self.inner.read().unwrap().config;
inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail)
@ -1725,7 +1747,7 @@ impl Config {
}
pub fn sso_master_password_policy_value(&self) -> Option<serde_json::Value> {
validate_sso_master_password_policy(self.sso_master_password_policy().as_ref()).ok().flatten()
validate_sso_master_password_policy(&self.sso_master_password_policy()).ok().flatten()
}
pub fn sso_scopes_vec(&self) -> Vec<String> {
@ -1865,7 +1887,7 @@ fn to_json<'reg, 'rc>(
// Configure the web-vault version as an integer so it can be used as a comparison smaller or greater then.
// The default is based upon the version since this feature is added.
static WEB_VAULT_VERSION: LazyLock<semver::Version> = LazyLock::new(|| {
let vault_version = get_active_web_release();
let vault_version = get_web_vault_version();
// Use a single regex capture to extract version components
let re = regex::Regex::new(r"(\d{4})\.(\d{1,2})\.(\d{1,2})").unwrap();
re.captures(&vault_version)

View file

@ -55,13 +55,13 @@ pub fn encode_random_bytes<const N: usize>(e: &Encoding) -> String {
/// Generates a random string over a specified alphabet.
pub fn get_random_string(alphabet: &[u8], num_chars: usize) -> String {
// Ref: https://rust-lang-nursery.github.io/rust-cookbook/algorithms/randomness.html
use rand::RngExt;
use rand::Rng;
let mut rng = rand::rng();
(0..num_chars)
.map(|_| {
let i = rng.random_range(0..alphabet.len());
char::from(alphabet[i])
alphabet[i] as char
})
.collect()
}
@ -113,10 +113,3 @@ pub fn ct_eq<T: AsRef<[u8]>, U: AsRef<[u8]>>(a: T, b: U) -> bool {
use subtle::ConstantTimeEq;
a.as_ref().ct_eq(b.as_ref()).into()
}
//
// SHA256
//
pub fn sha256_hex(data: &[u8]) -> String {
HEXLOWER.encode(digest::digest(&digest::SHA256, data).as_ref())
}

View file

@ -387,6 +387,7 @@ pub mod models;
#[cfg(sqlite)]
pub fn backup_sqlite() -> Result<String, Error> {
use diesel::Connection;
use std::{fs::File, io::Write};
let db_url = CONFIG.database_url();
if DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::Sqlite).unwrap_or(false) {
@ -400,13 +401,16 @@ pub fn backup_sqlite() -> Result<String, Error> {
.to_string_lossy()
.into_owned();
diesel::sql_query("VACUUM INTO ?")
.bind::<diesel::sql_types::Text, _>(&backup_file)
.execute(&mut conn)
.map(|_| ())
.map_res("VACUUM INTO failed")?;
match File::create(backup_file.clone()) {
Ok(mut f) => {
let serialized_db = conn.serialize_database_to_buffer();
f.write_all(serialized_db.as_slice()).expect("Error writing SQLite backup");
Ok(backup_file)
}
Err(e) => {
err_silent!(format!("Unable to save SQLite backup: {e:?}"))
}
}
} else {
err_silent!("The database type is not SQLite. Backups only works for SQLite databases")
}

View file

@ -1,91 +0,0 @@
use chrono::NaiveDateTime;
use diesel::prelude::*;
use super::{CipherId, User, UserId};
use crate::api::EmptyResult;
use crate::db::schema::archives;
use crate::db::DbConn;
use crate::error::MapResult;
#[derive(Identifiable, Queryable, Insertable)]
#[diesel(table_name = archives)]
#[diesel(primary_key(user_uuid, cipher_uuid))]
pub struct Archive {
pub user_uuid: UserId,
pub cipher_uuid: CipherId,
pub archived_at: NaiveDateTime,
}
impl Archive {
// Returns the date the specified cipher was archived
pub async fn get_archived_at(cipher_uuid: &CipherId, user_uuid: &UserId, conn: &DbConn) -> Option<NaiveDateTime> {
db_run! { conn: {
archives::table
.filter(archives::cipher_uuid.eq(cipher_uuid))
.filter(archives::user_uuid.eq(user_uuid))
.select(archives::archived_at)
.first::<NaiveDateTime>(conn).ok()
}}
}
// Saves (inserts or updates) an archive record with the provided timestamp
pub async fn save(
user_uuid: &UserId,
cipher_uuid: &CipherId,
archived_at: NaiveDateTime,
conn: &DbConn,
) -> EmptyResult {
User::update_uuid_revision(user_uuid, conn).await;
db_run! { conn:
sqlite, mysql {
diesel::replace_into(archives::table)
.values((
archives::user_uuid.eq(user_uuid),
archives::cipher_uuid.eq(cipher_uuid),
archives::archived_at.eq(archived_at),
))
.execute(conn)
.map_res("Error saving archive")
}
postgresql {
diesel::insert_into(archives::table)
.values((
archives::user_uuid.eq(user_uuid),
archives::cipher_uuid.eq(cipher_uuid),
archives::archived_at.eq(archived_at),
))
.on_conflict((archives::user_uuid, archives::cipher_uuid))
.do_update()
.set(archives::archived_at.eq(archived_at))
.execute(conn)
.map_res("Error saving archive")
}
}
}
// Deletes an archive record for a specific cipher
pub async fn delete_by_cipher(user_uuid: &UserId, cipher_uuid: &CipherId, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(user_uuid, conn).await;
db_run! { conn: {
diesel::delete(
archives::table
.filter(archives::user_uuid.eq(user_uuid))
.filter(archives::cipher_uuid.eq(cipher_uuid))
)
.execute(conn)
.map_res("Error deleting archive")
}}
}
/// Return a vec with (cipher_uuid, archived_at)
/// This is used during a full sync so we only need one query for all archive matches
pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec<(CipherId, NaiveDateTime)> {
db_run! { conn: {
archives::table
.filter(archives::user_uuid.eq(user_uuid))
.select((archives::cipher_uuid, archives::archived_at))
.load::<(CipherId, NaiveDateTime)>(conn)
.unwrap_or_default()
}}
}
}

View file

@ -50,7 +50,7 @@ impl Attachment {
let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone()));
Ok(format!("{host}/attachments/{}/{}?token={token}", self.cipher_uuid, self.id))
} else {
Ok(operator.presign_read(&self.get_file_path(), Duration::from_mins(5)).await?.uri().to_string())
Ok(operator.presign_read(&self.get_file_path(), Duration::from_secs(5 * 60)).await?.uri().to_string())
}
}

View file

@ -177,9 +177,7 @@ impl AuthRequest {
}
pub async fn purge_expired_auth_requests(conn: &DbConn) {
// delete auth requests older than 15 minutes which is functionally equivalent to upstream:
// https://github.com/bitwarden/server/blob/f8ee2270409f7a13125cd414c450740af605a175/src/Sql/dbo/Auth/Stored%20Procedures/AuthRequest_DeleteIfExpired.sql
let expiry_time = Utc::now().naive_utc() - chrono::TimeDelta::try_minutes(15).unwrap();
let expiry_time = Utc::now().naive_utc() - chrono::TimeDelta::try_minutes(5).unwrap(); //after 5 minutes, clients reject the request
for auth_request in Self::find_created_before(&expiry_time, conn).await {
auth_request.delete(conn).await.ok();
}

View file

@ -10,8 +10,8 @@ use diesel::prelude::*;
use serde_json::Value;
use super::{
Archive, Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, FolderId, Group, Membership,
MembershipStatus, MembershipType, OrganizationId, User, UserId,
Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, FolderId, Group, Membership, MembershipStatus,
MembershipType, OrganizationId, User, UserId,
};
use crate::api::core::{CipherData, CipherSyncData, CipherSyncType};
use macros::UuidFromParam;
@ -380,11 +380,6 @@ impl Cipher {
} else {
self.is_favorite(user_uuid, conn).await
});
json_object["archivedDate"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
cipher_sync_data.cipher_archives.get(&self.uuid).map_or(Value::Null, |d| Value::String(format_date(d)))
} else {
self.get_archived_at(user_uuid, conn).await.map_or(Value::Null, |d| Value::String(format_date(&d)))
});
// These values are true by default, but can be false if the
// cipher belongs to a collection or group where the org owner has enabled
// the "Read Only" or "Hide Passwords" restrictions for the user.
@ -403,7 +398,7 @@ impl Cipher {
3 => "card",
4 => "identity",
5 => "sshKey",
_ => err!(format!("Cipher {} has an invalid type {}", self.uuid, self.atype)),
_ => panic!("Wrong type"),
};
json_object[key] = type_data_json;
@ -564,7 +559,7 @@ impl Cipher {
if let Some(cached_member) = cipher_sync_data.members.get(org_uuid) {
return cached_member.has_full_access();
}
} else if let Some(member) = Membership::find_confirmed_by_user_and_org(user_uuid, org_uuid, conn).await {
} else if let Some(member) = Membership::find_by_user_and_org(user_uuid, org_uuid, conn).await {
return member.has_full_access();
}
}
@ -673,12 +668,10 @@ impl Cipher {
ciphers::table
.filter(ciphers::uuid.eq(&self.uuid))
.inner_join(ciphers_collections::table.on(
ciphers::uuid.eq(ciphers_collections::cipher_uuid)
))
ciphers::uuid.eq(ciphers_collections::cipher_uuid)))
.inner_join(users_collections::table.on(
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
.and(users_collections::user_uuid.eq(user_uuid))
))
.and(users_collections::user_uuid.eq(user_uuid))))
.select((users_collections::read_only, users_collections::hide_passwords, users_collections::manage))
.load::<(bool, bool, bool)>(conn)
.expect("Error getting user access restrictions")
@ -704,9 +697,6 @@ impl Cipher {
.inner_join(users_organizations::table.on(
users_organizations::uuid.eq(groups_users::users_organizations_uuid)
))
.inner_join(groups::table.on(groups::uuid.eq(collections_groups::groups_uuid)
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
))
.filter(users_organizations::user_uuid.eq(user_uuid))
.select((collections_groups::read_only, collections_groups::hide_passwords, collections_groups::manage))
.load::<(bool, bool, bool)>(conn)
@ -747,18 +737,6 @@ impl Cipher {
}
}
pub async fn get_archived_at(&self, user_uuid: &UserId, conn: &DbConn) -> Option<NaiveDateTime> {
Archive::get_archived_at(&self.uuid, user_uuid, conn).await
}
pub async fn set_archived_at(&self, archived_at: NaiveDateTime, user_uuid: &UserId, conn: &DbConn) -> EmptyResult {
Archive::save(user_uuid, &self.uuid, archived_at, conn).await
}
pub async fn unarchive(&self, user_uuid: &UserId, conn: &DbConn) -> EmptyResult {
Archive::delete_by_cipher(user_uuid, &self.uuid, conn).await
}
pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &DbConn) -> Option<FolderId> {
db_run! { conn: {
folders_ciphers::table
@ -831,13 +809,13 @@ impl Cipher {
.left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
// Ensure that group and membership belong to the same org
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
.left_join(groups::table.on(
groups::uuid.eq(groups_users::groups_uuid)
))
.left_join(collections_groups::table.on(
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid)
.and(collections_groups::groups_uuid.eq(groups::uuid))
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid).and(
collections_groups::groups_uuid.eq(groups::uuid)
)
))
.filter(ciphers::user_uuid.eq(user_uuid)) // Cipher owner
.or_filter(users_organizations::access_all.eq(true)) // access_all in org
@ -1008,9 +986,7 @@ impl Cipher {
.left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
.left_join(collections_groups::table.on(
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid)
.and(collections_groups::groups_uuid.eq(groups::uuid))
@ -1071,9 +1047,7 @@ impl Cipher {
.left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
.left_join(collections_groups::table.on(
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid)
.and(collections_groups::groups_uuid.eq(groups::uuid))
@ -1141,8 +1115,8 @@ impl Cipher {
.left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
.left_join(groups::table.on(
groups::uuid.eq(groups_users::groups_uuid)
))
.left_join(collections_groups::table.on(
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid).and(

View file

@ -191,7 +191,7 @@ impl Collection {
self.update_users_revision(conn).await;
CollectionCipher::delete_all_by_collection(&self.uuid, conn).await?;
CollectionUser::delete_all_by_collection(&self.uuid, conn).await?;
CollectionGroup::delete_all_by_collection(&self.uuid, &self.org_uuid, conn).await?;
CollectionGroup::delete_all_by_collection(&self.uuid, conn).await?;
db_run! { conn: {
diesel::delete(collections::table.filter(collections::uuid.eq(self.uuid)))
@ -239,8 +239,8 @@ impl Collection {
.left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
.left_join(groups::table.on(
groups::uuid.eq(groups_users::groups_uuid)
))
.left_join(collections_groups::table.on(
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
@ -355,8 +355,8 @@ impl Collection {
.left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
.left_join(groups::table.on(
groups::uuid.eq(groups_users::groups_uuid)
))
.left_join(collections_groups::table.on(
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
@ -422,8 +422,8 @@ impl Collection {
.left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
.left_join(groups::table.on(
groups::uuid.eq(groups_users::groups_uuid)
))
.left_join(collections_groups::table.on(
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
@ -484,8 +484,8 @@ impl Collection {
.left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
.left_join(groups::table.on(
groups::uuid.eq(groups_users::groups_uuid)
))
.left_join(collections_groups::table.on(
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
@ -513,8 +513,7 @@ impl Collection {
}}
}
pub async fn is_coll_manageable_by_user(uuid: &CollectionId, user_uuid: &UserId, conn: &DbConn) -> bool {
let uuid = uuid.to_string();
pub async fn is_manageable_by_user(&self, user_uuid: &UserId, conn: &DbConn) -> bool {
let user_uuid = user_uuid.to_string();
db_run! { conn: {
collections::table
@ -531,17 +530,17 @@ impl Collection {
.left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
.left_join(groups::table.on(
groups::uuid.eq(groups_users::groups_uuid)
))
.left_join(collections_groups::table.on(
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
collections_groups::collections_uuid.eq(collections::uuid)
)
))
.filter(collections::uuid.eq(&uuid))
.filter(collections::uuid.eq(&self.uuid))
.filter(
users_collections::collection_uuid.eq(&uuid).and(users_collections::manage.eq(true)).or(// Directly accessed collection
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::manage.eq(true)).or(// Directly accessed collection
users_organizations::access_all.eq(true).or( // access_all in Organization
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
)).or(
@ -559,10 +558,6 @@ impl Collection {
.unwrap_or(0) != 0
}}
}
pub async fn is_manageable_by_user(&self, user_uuid: &UserId, conn: &DbConn) -> bool {
Self::is_coll_manageable_by_user(&self.uuid, user_uuid, conn).await
}
}
/// Database methods

View file

@ -1,6 +1,6 @@
use chrono::{NaiveDateTime, Utc};
use data_encoding::BASE64URL;
use data_encoding::{BASE64, BASE64URL};
use derive_more::{Display, From};
use serde_json::Value;
@ -25,7 +25,7 @@ pub struct Device {
pub user_uuid: UserId,
pub name: String,
pub atype: i32, // https://github.com/bitwarden/server/blob/8d547dcc280babab70dd4a3c94ced6a34b12dfbf/src/Core/Enums/DeviceType.cs
pub atype: i32, // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Enums/DeviceType.cs
pub push_uuid: Option<PushId>,
pub push_token: Option<String>,
@ -49,16 +49,11 @@ impl Device {
push_uuid: Some(PushId(get_uuid())),
push_token: None,
refresh_token: Device::generate_refresh_token(),
refresh_token: crypto::encode_random_bytes::<64>(&BASE64URL),
twofactor_remember: None,
}
}
#[inline(always)]
pub fn generate_refresh_token() -> String {
crypto::encode_random_bytes::<64>(&BASE64URL)
}
pub fn to_json(&self) -> Value {
json!({
"id": self.uuid,
@ -72,13 +67,10 @@ impl Device {
}
pub fn refresh_twofactor_remember(&mut self) -> String {
use crate::auth::{encode_jwt, generate_2fa_remember_claims};
let twofactor_remember = crypto::encode_random_bytes::<180>(&BASE64);
self.twofactor_remember = Some(twofactor_remember.clone());
let two_factor_remember_claim = generate_2fa_remember_claims(self.uuid.clone(), self.user_uuid.clone());
let two_factor_remember_string = encode_jwt(&two_factor_remember_claim);
self.twofactor_remember = Some(two_factor_remember_string.clone());
two_factor_remember_string
twofactor_remember
}
pub fn delete_twofactor_remember(&mut self) {
@ -265,17 +257,6 @@ impl Device {
.unwrap_or(0) != 0
}}
}
pub async fn rotate_refresh_tokens_by_user(user_uuid: &UserId, conn: &DbConn) -> EmptyResult {
// Generate a new token per device.
// We cannot do a single UPDATE with one value because each device needs a unique token.
let devices = Self::find_by_user(user_uuid, conn).await;
for mut device in devices {
device.refresh_token = Device::generate_refresh_token();
device.save(false, conn).await?;
}
Ok(())
}
}
#[derive(Display)]
@ -332,8 +313,6 @@ pub enum DeviceType {
MacOsCLI = 24,
#[display("Linux CLI")]
LinuxCLI = 25,
#[display("DuckDuckGo")]
DuckDuckGoBrowser = 26,
}
impl DeviceType {
@ -365,7 +344,6 @@ impl DeviceType {
23 => DeviceType::WindowsCLI,
24 => DeviceType::MacOsCLI,
25 => DeviceType::LinuxCLI,
26 => DeviceType::DuckDuckGoBrowser,
_ => DeviceType::UnknownBrowser,
}
}

View file

@ -85,8 +85,7 @@ impl EmergencyAccess {
pub async fn to_json_grantee_details(&self, conn: &DbConn) -> Option<Value> {
let grantee_user = if let Some(grantee_uuid) = &self.grantee_uuid {
User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")
} else {
let email = self.email.as_deref()?;
} else if let Some(email) = self.email.as_deref() {
match User::find_by_mail(email, conn).await {
Some(user) => user,
None => {
@ -95,6 +94,8 @@ impl EmergencyAccess {
return None;
}
}
} else {
return None;
};
Some(json!({

View file

@ -1,6 +1,6 @@
use super::{CollectionId, Membership, MembershipId, OrganizationId, User, UserId};
use crate::api::EmptyResult;
use crate::db::schema::{collections, collections_groups, groups, groups_users, users_organizations};
use crate::db::schema::{collections_groups, groups, groups_users, users_organizations};
use crate::db::DbConn;
use crate::error::MapResult;
use chrono::{NaiveDateTime, Utc};
@ -81,7 +81,7 @@ impl Group {
// If both read_only and hide_passwords are false, then manage should be true
// You can't have an entry with read_only and manage, or hide_passwords and manage
// Or an entry with everything to false
let collections_groups: Vec<Value> = CollectionGroup::find_by_group(&self.uuid, &self.organizations_uuid, conn)
let collections_groups: Vec<Value> = CollectionGroup::find_by_group(&self.uuid, conn)
.await
.iter()
.map(|entry| {
@ -191,7 +191,7 @@ impl Group {
pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult {
for group in Self::find_by_organization(org_uuid, conn).await {
group.delete(org_uuid, conn).await?;
group.delete(conn).await?;
}
Ok(())
}
@ -246,8 +246,8 @@ impl Group {
.inner_join(users_organizations::table.on(
users_organizations::uuid.eq(groups_users::users_organizations_uuid)
))
.inner_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
.inner_join(groups::table.on(
groups::uuid.eq(groups_users::groups_uuid)
))
.filter(users_organizations::user_uuid.eq(user_uuid))
.filter(groups::access_all.eq(true))
@ -276,9 +276,9 @@ impl Group {
}}
}
pub async fn delete(&self, org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult {
CollectionGroup::delete_all_by_group(&self.uuid, org_uuid, conn).await?;
GroupUser::delete_all_by_group(&self.uuid, org_uuid, conn).await?;
pub async fn delete(&self, conn: &DbConn) -> EmptyResult {
CollectionGroup::delete_all_by_group(&self.uuid, conn).await?;
GroupUser::delete_all_by_group(&self.uuid, conn).await?;
db_run! { conn: {
diesel::delete(groups::table.filter(groups::uuid.eq(&self.uuid)))
@ -306,8 +306,8 @@ impl Group {
}
impl CollectionGroup {
pub async fn save(&mut self, org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult {
let group_users = GroupUser::find_by_group(&self.groups_uuid, org_uuid, conn).await;
pub async fn save(&mut self, conn: &DbConn) -> EmptyResult {
let group_users = GroupUser::find_by_group(&self.groups_uuid, conn).await;
for group_user in group_users {
group_user.update_user_revision(conn).await;
}
@ -365,19 +365,10 @@ impl CollectionGroup {
}
}
pub async fn find_by_group(group_uuid: &GroupId, org_uuid: &OrganizationId, conn: &DbConn) -> Vec<Self> {
pub async fn find_by_group(group_uuid: &GroupId, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
collections_groups::table
.inner_join(groups::table.on(
groups::uuid.eq(collections_groups::groups_uuid)
))
.inner_join(collections::table.on(
collections::uuid.eq(collections_groups::collections_uuid)
.and(collections::org_uuid.eq(groups::organizations_uuid))
))
.filter(collections_groups::groups_uuid.eq(group_uuid))
.filter(collections::org_uuid.eq(org_uuid))
.select(collections_groups::all_columns)
.load::<Self>(conn)
.expect("Error loading collection groups")
}}
@ -392,13 +383,6 @@ impl CollectionGroup {
.inner_join(users_organizations::table.on(
users_organizations::uuid.eq(groups_users::users_organizations_uuid)
))
.inner_join(groups::table.on(groups::uuid.eq(collections_groups::groups_uuid)
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
))
.inner_join(collections::table.on(
collections::uuid.eq(collections_groups::collections_uuid)
.and(collections::org_uuid.eq(groups::organizations_uuid))
))
.filter(users_organizations::user_uuid.eq(user_uuid))
.select(collections_groups::all_columns)
.load::<Self>(conn)
@ -410,20 +394,14 @@ impl CollectionGroup {
db_run! { conn: {
collections_groups::table
.filter(collections_groups::collections_uuid.eq(collection_uuid))
.inner_join(collections::table.on(
collections::uuid.eq(collections_groups::collections_uuid)
))
.inner_join(groups::table.on(groups::uuid.eq(collections_groups::groups_uuid)
.and(groups::organizations_uuid.eq(collections::org_uuid))
))
.select(collections_groups::all_columns)
.load::<Self>(conn)
.expect("Error loading collection groups")
}}
}
pub async fn delete(&self, org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult {
let group_users = GroupUser::find_by_group(&self.groups_uuid, org_uuid, conn).await;
pub async fn delete(&self, conn: &DbConn) -> EmptyResult {
let group_users = GroupUser::find_by_group(&self.groups_uuid, conn).await;
for group_user in group_users {
group_user.update_user_revision(conn).await;
}
@ -437,8 +415,8 @@ impl CollectionGroup {
}}
}
pub async fn delete_all_by_group(group_uuid: &GroupId, org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult {
let group_users = GroupUser::find_by_group(group_uuid, org_uuid, conn).await;
pub async fn delete_all_by_group(group_uuid: &GroupId, conn: &DbConn) -> EmptyResult {
let group_users = GroupUser::find_by_group(group_uuid, conn).await;
for group_user in group_users {
group_user.update_user_revision(conn).await;
}
@ -451,14 +429,10 @@ impl CollectionGroup {
}}
}
pub async fn delete_all_by_collection(
collection_uuid: &CollectionId,
org_uuid: &OrganizationId,
conn: &DbConn,
) -> EmptyResult {
pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &DbConn) -> EmptyResult {
let collection_assigned_to_groups = CollectionGroup::find_by_collection(collection_uuid, conn).await;
for collection_assigned_to_group in collection_assigned_to_groups {
let group_users = GroupUser::find_by_group(&collection_assigned_to_group.groups_uuid, org_uuid, conn).await;
let group_users = GroupUser::find_by_group(&collection_assigned_to_group.groups_uuid, conn).await;
for group_user in group_users {
group_user.update_user_revision(conn).await;
}
@ -520,19 +494,10 @@ impl GroupUser {
}
}
pub async fn find_by_group(group_uuid: &GroupId, org_uuid: &OrganizationId, conn: &DbConn) -> Vec<Self> {
pub async fn find_by_group(group_uuid: &GroupId, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
groups_users::table
.inner_join(groups::table.on(
groups::uuid.eq(groups_users::groups_uuid)
))
.inner_join(users_organizations::table.on(
users_organizations::uuid.eq(groups_users::users_organizations_uuid)
.and(users_organizations::org_uuid.eq(groups::organizations_uuid))
))
.filter(groups_users::groups_uuid.eq(group_uuid))
.filter(groups::organizations_uuid.eq(org_uuid))
.select(groups_users::all_columns)
.load::<Self>(conn)
.expect("Error loading group users")
}}
@ -557,13 +522,6 @@ impl GroupUser {
.inner_join(collections_groups::table.on(
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
))
.inner_join(groups::table.on(
groups::uuid.eq(groups_users::groups_uuid)
))
.inner_join(collections::table.on(
collections::uuid.eq(collections_groups::collections_uuid)
.and(collections::org_uuid.eq(groups::organizations_uuid))
))
.filter(collections_groups::collections_uuid.eq(collection_uuid))
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
.count()
@ -617,8 +575,8 @@ impl GroupUser {
}}
}
pub async fn delete_all_by_group(group_uuid: &GroupId, org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult {
let group_users = GroupUser::find_by_group(group_uuid, org_uuid, conn).await;
pub async fn delete_all_by_group(group_uuid: &GroupId, conn: &DbConn) -> EmptyResult {
let group_users = GroupUser::find_by_group(group_uuid, conn).await;
for group_user in group_users {
group_user.update_user_revision(conn).await;
}

View file

@ -1,4 +1,3 @@
mod archive;
mod attachment;
mod auth_request;
mod cipher;
@ -18,7 +17,6 @@ mod two_factor_duo_context;
mod two_factor_incomplete;
mod user;
pub use self::archive::Archive;
pub use self::attachment::{Attachment, AttachmentId};
pub use self::auth_request::{AuthRequest, AuthRequestId};
pub use self::cipher::{Cipher, CipherId, RepromptType};

View file

@ -269,7 +269,7 @@ impl OrgPolicy {
continue;
}
if let Some(user) = Membership::find_confirmed_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
if let Some(user) = Membership::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
if user.atype < MembershipType::Admin {
return true;
}
@ -332,7 +332,7 @@ impl OrgPolicy {
for policy in
OrgPolicy::find_confirmed_by_user_and_active_policy(user_uuid, OrgPolicyType::SendOptions, conn).await
{
if let Some(user) = Membership::find_confirmed_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
if let Some(user) = Membership::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
if user.atype < MembershipType::Admin {
match serde_json::from_str::<SendOptionsPolicyData>(&policy.data) {
Ok(opts) => {

View file

@ -514,8 +514,7 @@ impl Membership {
"familySponsorshipValidUntil": null,
"familySponsorshipToDelete": null,
"accessSecretsManager": false,
// limit collection creation to managers with access_all permission to prevent issues
"limitCollectionCreation": self.atype < MembershipType::Manager || !self.access_all,
"limitCollectionCreation": self.atype < MembershipType::Manager, // If less then a manager return true, to limit collection creations
"limitCollectionDeletion": true,
"limitItemDeletion": false,
"allowAdminAccessToAllCollectionItems": true,
@ -1074,9 +1073,7 @@ impl Membership {
.left_join(collections_groups::table.on(
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)
.and(groups::organizations_uuid.eq(users_organizations::org_uuid))
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
.left_join(ciphers_collections::table.on(
ciphers_collections::collection_uuid.eq(collections_groups::collections_uuid).and(ciphers_collections::cipher_uuid.eq(&cipher_uuid))

View file

@ -46,16 +46,6 @@ pub enum SendType {
File = 1,
}
enum SendAuthType {
#[allow(dead_code)]
// Send requires email OTP verification
Email = 0, // Not yet supported by Vaultwarden
// Send requires a password
Password = 1,
// Send requires no auth
None = 2,
}
impl Send {
pub fn new(atype: i32, name: String, data: String, akey: String, deletion_date: NaiveDateTime) -> Self {
let now = Utc::now().naive_utc();
@ -155,7 +145,6 @@ impl Send {
"maxAccessCount": self.max_access_count,
"accessCount": self.access_count,
"password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)),
"authType": if self.password_hash.is_some() { SendAuthType::Password as i32 } else { SendAuthType::None as i32 },
"disabled": self.disabled,
"hideEmail": self.hide_email,

View file

@ -54,18 +54,11 @@ pub struct SsoAuth {
pub auth_response: Option<OIDCAuthenticatedUser>,
pub created_at: NaiveDateTime,
pub updated_at: NaiveDateTime,
pub binding_hash: Option<String>,
}
/// Local methods
impl SsoAuth {
pub fn new(
state: OIDCState,
client_challenge: OIDCCodeChallenge,
nonce: String,
redirect_uri: String,
binding_hash: Option<String>,
) -> Self {
pub fn new(state: OIDCState, client_challenge: OIDCCodeChallenge, nonce: String, redirect_uri: String) -> Self {
let now = Utc::now().naive_utc();
SsoAuth {
@ -77,7 +70,6 @@ impl SsoAuth {
updated_at: now,
code_response: None,
auth_response: None,
binding_hash,
}
}
}

View file

@ -20,6 +20,7 @@ pub struct TwoFactor {
pub last_used: i64,
}
#[allow(dead_code)]
#[derive(num_derive::FromPrimitive)]
pub enum TwoFactorType {
Authenticator = 0,

View file

@ -185,14 +185,13 @@ impl User {
/// These routes are able to use the previous stamp id for the next 2 minutes.
/// After these 2 minutes this stamp will expire.
///
pub async fn set_password(
pub fn set_password(
&mut self,
password: &str,
new_key: Option<String>,
reset_security_stamp: bool,
allow_next_route: Option<Vec<String>>,
conn: &DbConn,
) -> EmptyResult {
) {
self.password_hash = crypto::hash_password(password.as_bytes(), &self.salt, self.password_iterations as u32);
if let Some(route) = allow_next_route {
@ -204,15 +203,12 @@ impl User {
}
if reset_security_stamp {
self.reset_security_stamp(conn).await?;
self.reset_security_stamp()
}
Ok(())
}
pub async fn reset_security_stamp(&mut self, conn: &DbConn) -> EmptyResult {
pub fn reset_security_stamp(&mut self) {
self.security_stamp = get_uuid();
Device::rotate_refresh_tokens_by_user(&self.uuid, conn).await?;
Ok(())
}
/// Set the stamp_exception to only allow a subsequent request matching a specific route using the current security-stamp.
@ -235,15 +231,6 @@ impl User {
pub fn reset_stamp_exception(&mut self) {
self.stamp_exception = None;
}
pub fn display_name(&self) -> &str {
// default to email if name is empty
if !&self.name.is_empty() {
&self.name
} else {
&self.email
}
}
}
/// Database methods

View file

@ -265,7 +265,6 @@ table! {
auth_response -> Nullable<Text>,
created_at -> Timestamp,
updated_at -> Timestamp,
binding_hash -> Nullable<Text>,
}
}
@ -342,16 +341,6 @@ table! {
}
}
table! {
archives (user_uuid, cipher_uuid) {
user_uuid -> Text,
cipher_uuid -> Text,
archived_at -> Timestamp,
}
}
joinable!(archives -> users (user_uuid));
joinable!(archives -> ciphers (cipher_uuid));
joinable!(attachments -> ciphers (cipher_uuid));
joinable!(ciphers -> organizations (organization_uuid));
joinable!(ciphers -> users (user_uuid));
@ -383,7 +372,6 @@ joinable!(auth_requests -> users (user_uuid));
joinable!(sso_users -> users (user_uuid));
allow_tables_to_appear_in_same_query!(
archives,
attachments,
ciphers,
ciphers_collections,

View file

@ -1,11 +1,12 @@
use std::{
fmt,
net::{IpAddr, SocketAddr},
str::FromStr,
sync::{Arc, LazyLock, Mutex},
time::Duration,
};
use hickory_resolver::{net::runtime::TokioRuntimeProvider, TokioResolver};
use hickory_resolver::{name_server::TokioConnectionProvider, TokioResolver};
use regex::Regex;
use reqwest::{
dns::{Name, Resolve, Resolving},
@ -58,6 +59,16 @@ pub fn get_reqwest_client_builder() -> ClientBuilder {
.timeout(Duration::from_secs(10))
}
pub fn should_block_address(domain_or_ip: &str) -> bool {
if let Ok(ip) = IpAddr::from_str(domain_or_ip) {
if should_block_ip(ip) {
return true;
}
}
should_block_address_regex(domain_or_ip)
}
fn should_block_ip(ip: IpAddr) -> bool {
if !CONFIG.http_request_block_non_global_ips() {
return false;
@ -89,54 +100,11 @@ fn should_block_address_regex(domain_or_ip: &str) -> bool {
is_match
}
pub fn get_valid_host(host: &str) -> Result<Host, CustomHttpClientError> {
let Ok(host) = Host::parse(host) else {
return Err(CustomHttpClientError::Invalid {
domain: host.to_string(),
});
};
// Some extra checks to validate hosts
match host {
Host::Domain(ref domain) => {
// Host::parse() does not verify length or all possible invalid characters
// We do some extra checks here to prevent issues
if domain.len() > 253 {
debug!("Domain validation error: '{domain}' exceeds 253 characters");
return Err(CustomHttpClientError::Invalid {
domain: host.to_string(),
});
}
if !domain.split('.').all(|label| {
!label.is_empty()
// Labels can't be longer than 63 chars
&& label.len() <= 63
// Labels are not allowed to start or end with a hyphen `-`
&& !label.starts_with('-')
&& !label.ends_with('-')
// Only ASCII Alphanumeric characters are allowed
// We already received a punycoded domain back, so no unicode should exists here
&& label.chars().all(|c| c.is_ascii_alphanumeric() || c == '-')
}) {
debug!(
"Domain validation error: '{domain}' labels contain invalid characters or exceed the maximum length"
);
return Err(CustomHttpClientError::Invalid {
domain: host.to_string(),
});
}
}
Host::Ipv4(_) | Host::Ipv6(_) => {}
}
Ok(host)
}
pub fn should_block_host<S: AsRef<str>>(host: &Host<S>) -> Result<(), CustomHttpClientError> {
fn should_block_host(host: &Host<&str>) -> Result<(), CustomHttpClientError> {
let (ip, host_str): (Option<IpAddr>, String) = match host {
Host::Ipv4(ip) => (Some(IpAddr::V4(*ip)), ip.to_string()),
Host::Ipv6(ip) => (Some(IpAddr::V6(*ip)), ip.to_string()),
Host::Domain(d) => (None, d.as_ref().to_string()),
Host::Domain(d) => (None, (*d).to_string()),
};
if let Some(ip) = ip {
@ -166,9 +134,6 @@ pub enum CustomHttpClientError {
domain: Option<String>,
ip: IpAddr,
},
Invalid {
domain: String,
},
}
impl CustomHttpClientError {
@ -190,7 +155,7 @@ impl fmt::Display for CustomHttpClientError {
match self {
Self::Blocked {
domain,
} => write!(f, "Blocked domain: '{domain}' matched HTTP_REQUEST_BLOCK_REGEX"),
} => write!(f, "Blocked domain: {domain} matched HTTP_REQUEST_BLOCK_REGEX"),
Self::NonGlobalIp {
domain: Some(domain),
ip,
@ -198,10 +163,7 @@ impl fmt::Display for CustomHttpClientError {
Self::NonGlobalIp {
domain: None,
ip,
} => write!(f, "IP '{ip}' is not a global IP!"),
Self::Invalid {
domain,
} => write!(f, "Invalid host: '{domain}' contains invalid characters or exceeds the maximum length"),
} => write!(f, "IP {ip} is not a global IP!"),
}
}
}
@ -222,46 +184,40 @@ impl CustomDnsResolver {
}
fn new() -> Arc<Self> {
TokioResolver::builder(TokioRuntimeProvider::default())
.and_then(|mut builder| {
// Hickory's default since v0.26 is `Ipv6AndIpv4`, which sorts IPv6 first
// This might cause issues on IPv4 only systems or containers
// Unless someone enabled DNS_PREFER_IPV6, use Ipv4AndIpv6, which returns IPv4 first which was our previous default
if !CONFIG.dns_prefer_ipv6() {
builder.options_mut().ip_strategy = hickory_resolver::config::LookupIpStrategy::Ipv4AndIpv6;
match TokioResolver::builder(TokioConnectionProvider::default()) {
Ok(mut builder) => {
if CONFIG.dns_prefer_ipv6() {
builder.options_mut().ip_strategy = hickory_resolver::config::LookupIpStrategy::Ipv6thenIpv4;
}
let resolver = builder.build();
Arc::new(Self::Hickory(Arc::new(resolver)))
}
Err(e) => {
warn!("Error creating Hickory resolver, falling back to default: {e:?}");
Arc::new(Self::Default())
}
}
builder.build()
})
.inspect_err(|e| warn!("Error creating Hickory resolver, falling back to default: {e:?}"))
.map(|resolver| Arc::new(Self::Hickory(Arc::new(resolver))))
.unwrap_or_else(|_| Arc::new(Self::Default()))
}
// Note that we get an iterator of addresses, but we only grab the first one for convenience
async fn resolve_domain(&self, name: &str) -> Result<Vec<SocketAddr>, BoxError> {
async fn resolve_domain(&self, name: &str) -> Result<Option<SocketAddr>, BoxError> {
pre_resolve(name)?;
let results: Vec<SocketAddr> = match self {
Self::Default() => tokio::net::lookup_host((name, 0)).await?.collect(),
Self::Hickory(r) => r.lookup_ip(name).await?.iter().map(|i| SocketAddr::new(i, 0)).collect(),
let result = match self {
Self::Default() => tokio::net::lookup_host(name).await?.next(),
Self::Hickory(r) => r.lookup_ip(name).await?.iter().next().map(|a| SocketAddr::new(a, 0)),
};
for addr in &results {
if let Some(addr) = &result {
post_resolve(name, addr.ip())?;
}
Ok(results)
Ok(result)
}
}
fn pre_resolve(name: &str) -> Result<(), CustomHttpClientError> {
let Ok(host) = get_valid_host(name) else {
return Err(CustomHttpClientError::Invalid {
domain: name.to_string(),
});
};
if should_block_host(&host).is_err() {
if should_block_address(name) {
return Err(CustomHttpClientError::Blocked {
domain: name.to_string(),
});
@ -286,11 +242,8 @@ impl Resolve for CustomDnsResolver {
let this = self.clone();
Box::pin(async move {
let name = name.as_str();
let results = this.resolve_domain(name).await?;
if results.is_empty() {
warn!("Unable to resolve {name} to any valid IP address");
}
Ok::<reqwest::dns::Addrs, _>(Box::new(results.into_iter()))
let result = this.resolve_domain(name).await?;
Ok::<reqwest::dns::Addrs, _>(Box::new(result.into_iter()))
})
}
}
@ -352,209 +305,3 @@ pub(crate) mod aws {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::util::is_global_hardcoded;
use std::net::Ipv4Addr;
use url::Host;
// ===
// IPv4 numeric-format normalization
fn parse_to_ip(s: &str) -> Option<IpAddr> {
match Host::parse(s).ok()? {
Host::Ipv4(v4) => Some(IpAddr::V4(v4)),
Host::Ipv6(v6) => Some(IpAddr::V6(v6)),
Host::Domain(_) => None,
}
}
#[test]
fn dotted_decimal_loopback_normalizes() {
let ip = parse_to_ip("127.0.0.1").unwrap();
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
assert!(!is_global_hardcoded(ip));
}
#[test]
fn single_decimal_loopback_normalizes() {
// 127.0.0.1 == 2130706433
let ip = parse_to_ip("2130706433").unwrap();
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
assert!(!is_global_hardcoded(ip));
}
#[test]
fn hex_loopback_normalizes() {
let ip = parse_to_ip("0x7f000001").unwrap();
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
assert!(!is_global_hardcoded(ip));
}
#[test]
fn dotted_hex_loopback_normalizes() {
let ip = parse_to_ip("0x7f.0.0.1").unwrap();
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
assert!(!is_global_hardcoded(ip));
}
#[test]
fn octal_loopback_normalizes() {
// 017700000001 == 127.0.0.1
let ip = parse_to_ip("017700000001").unwrap();
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
assert!(!is_global_hardcoded(ip));
}
#[test]
fn dotted_octal_loopback_normalizes() {
let ip = parse_to_ip("0177.0.0.01").unwrap();
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)));
assert!(!is_global_hardcoded(ip));
}
#[test]
fn aws_metadata_decimal_blocked() {
// 169.254.169.254 == 2852039166 (link-local, AWS IMDS)
let ip = parse_to_ip("2852039166").unwrap();
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(169, 254, 169, 254)));
assert!(!is_global_hardcoded(ip));
}
#[test]
fn rfc1918_hex_blocked() {
// 10.0.0.1
let ip = parse_to_ip("0x0a000001").unwrap();
assert!(!is_global_hardcoded(ip));
}
#[test]
fn public_ip_decimal_allowed() {
// 8.8.8.8 == 134744072
let ip = parse_to_ip("134744072").unwrap();
assert_eq!(ip, IpAddr::V4(Ipv4Addr::new(8, 8, 8, 8)));
assert!(is_global_hardcoded(ip));
}
// ===
// get_valid_host integration: numeric forms become Host::Ipv4
#[test]
fn get_valid_host_normalizes_decimal_int() {
let h = get_valid_host("2130706433").expect("valid");
assert!(matches!(h, Host::Ipv4(ip) if ip == Ipv4Addr::new(127, 0, 0, 1)));
}
#[test]
fn get_valid_host_normalizes_hex() {
let h = get_valid_host("0x7f000001").expect("valid");
assert!(matches!(h, Host::Ipv4(ip) if ip == Ipv4Addr::new(127, 0, 0, 1)));
}
#[test]
fn get_valid_host_normalizes_octal() {
let h = get_valid_host("017700000001").expect("valid");
assert!(matches!(h, Host::Ipv4(ip) if ip == Ipv4Addr::new(127, 0, 0, 1)));
}
// ===
// IPv6 formats
#[test]
fn ipv6_loopback_blocked() {
let h = get_valid_host("[::1]").expect("valid");
let Host::Ipv6(ip) = h else {
panic!("expected v6")
};
assert!(!is_global_hardcoded(IpAddr::V6(ip)));
}
#[test]
fn ipv4_mapped_in_ipv6_loopback_blocked() {
// ::ffff:127.0.0.1 — v4-mapped form; is_global_hardcoded blocks via ::ffff:0:0/96
let h = get_valid_host("[::ffff:127.0.0.1]").expect("valid");
let Host::Ipv6(ip) = h else {
panic!("expected v6")
};
assert!(!is_global_hardcoded(IpAddr::V6(ip)));
}
#[test]
fn ipv6_unique_local_blocked() {
let h = get_valid_host("[fc00::1]").expect("valid");
let Host::Ipv6(ip) = h else {
panic!("expected v6")
};
assert!(!is_global_hardcoded(IpAddr::V6(ip)));
}
// ===
// Punycode / IDN
#[test]
fn punycode_passthrough() {
let h = get_valid_host("xn--deadbeafcaf-lbb.test").expect("valid");
match h {
Host::Domain(d) => assert_eq!(d, "xn--deadbeafcaf-lbb.test"),
_ => panic!("expected domain"),
}
}
#[test]
fn idn_unicode_gets_punycoded() {
let h = get_valid_host("deadbeafcafé.test").expect("valid");
match h {
Host::Domain(d) => assert_eq!(d, "xn--deadbeafcaf-lbb.test"),
_ => panic!("expected domain"),
}
}
#[test]
fn idn_unicode_gets_punycoded_tld() {
let h = get_valid_host("deadbeaf.café").expect("valid");
match h {
Host::Domain(d) => assert_eq!(d, "deadbeaf.xn--caf-dma"),
_ => panic!("expected domain"),
}
}
#[test]
fn idn_emoji_gets_punycoded() {
let h = get_valid_host("xn--t88h.test").expect("valid"); // 🛡️.test
match h {
Host::Domain(d) => assert_eq!(d, "xn--t88h.test"),
_ => panic!("expected domain"),
}
}
#[test]
fn idn_unicode_to_punycode_roundtrip() {
let from_unicode = get_valid_host("🛡️.test").expect("valid");
let from_puny = get_valid_host("xn--t88h.test").expect("valid");
match (from_unicode, from_puny) {
(Host::Domain(a), Host::Domain(b)) => assert_eq!(a, b),
_ => panic!("expected domains"),
}
}
#[test]
fn invalid_punycode_rejected() {
// bare invalid punycode
assert!(get_valid_host("xn--").is_err());
}
#[test]
fn underscore_in_label_rejected() {
assert!(get_valid_host("dead_beaf.cafe").is_err());
}
#[test]
fn label_too_long_rejected() {
let label = "a".repeat(64);
assert!(get_valid_host(&format!("{label}.test")).is_err());
}
#[test]
fn domain_too_long_rejected() {
let big = "a.".repeat(130) + "test"; // > 253
assert!(get_valid_host(&big).is_err());
}
}

View file

@ -302,10 +302,10 @@ pub async fn send_invite(
.append_pair("organizationUserId", &member_id)
.append_pair("token", &invite_token);
if CONFIG.sso_enabled() && CONFIG.sso_only() {
if CONFIG.sso_enabled() {
query_params.append_pair("orgUserHasExistingUser", "false");
query_params.append_pair("orgSsoIdentifier", &org_id);
}
if user.private_key.is_some() {
} else if user.private_key.is_some() {
query_params.append_pair("orgUserHasExistingUser", "true");
}
}

View file

@ -126,7 +126,7 @@ fn parse_args() {
exit(0);
} else if pargs.contains(["-v", "--version"]) {
config::SKIP_CONFIG_VALIDATION.store(true, Ordering::Relaxed);
let web_vault_version = util::get_active_web_release();
let web_vault_version = util::get_web_vault_version();
println!("Vaultwarden {version}");
println!("Web-Vault {web_vault_version}");
exit(0);
@ -558,12 +558,6 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error>
let basepath = &CONFIG.domain_path();
let mut config = rocket::Config::from(rocket::Config::figment());
// We install our own signal handlers below; disable Rocket's built-in handlers
config.shutdown.ctrlc = false;
#[cfg(unix)]
config.shutdown.signals.clear();
config.temp_dir = canonicalize(CONFIG.tmp_folder()).unwrap().into();
config.cli_colors = false; // Make sure Rocket does not color any values for logging.
config.limits = Limits::new()
@ -595,7 +589,11 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error>
CONFIG.set_rocket_shutdown_handle(instance.shutdown());
spawn_shutdown_signal_handler();
tokio::spawn(async move {
tokio::signal::ctrl_c().await.expect("Error setting Ctrl-C handler");
info!("Exiting Vaultwarden!");
CONFIG.shutdown();
});
#[cfg(all(unix, sqlite))]
{
@ -623,35 +621,6 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error>
Ok(())
}
#[cfg(unix)]
fn spawn_shutdown_signal_handler() {
tokio::spawn(async move {
use tokio::signal::unix::signal;
let mut sigint = signal(SignalKind::interrupt()).expect("Error setting SIGINT handler");
let mut sigterm = signal(SignalKind::terminate()).expect("Error setting SIGTERM handler");
let mut sigquit = signal(SignalKind::quit()).expect("Error setting SIGQUIT handler");
let signal_name = tokio::select! {
_ = sigint.recv() => "SIGINT",
_ = sigterm.recv() => "SIGTERM",
_ = sigquit.recv() => "SIGQUIT",
};
info!("Received {signal_name}, initiating graceful shutdown");
CONFIG.shutdown();
});
}
#[cfg(not(unix))]
fn spawn_shutdown_signal_handler() {
tokio::spawn(async move {
tokio::signal::ctrl_c().await.expect("Error setting Ctrl-C handler");
info!("Received Ctrl-C, initiating graceful shutdown");
CONFIG.shutdown();
});
}
fn schedule_jobs(pool: db::DbPool) {
if CONFIG.job_poll_interval_ms() == 0 {
info!("Job scheduler disabled.");

View file

@ -17,7 +17,7 @@ use crate::{
CONFIG,
};
pub static FAKE_SSO_IDENTIFIER: &str = "00000000-01DC-01DC-01DC-000000000000";
pub static FAKE_IDENTIFIER: &str = "VW_DUMMY_IDENTIFIER_FOR_OIDC";
static SSO_JWT_ISSUER: LazyLock<String> = LazyLock::new(|| format!("{}|sso", CONFIG.domain_origin()));
@ -188,7 +188,6 @@ pub async fn authorize_url(
client_challenge: OIDCCodeChallenge,
client_id: &str,
raw_redirect_uri: &str,
binding_hash: Option<String>,
conn: DbConn,
) -> ApiResult<Url> {
let redirect_uri = match client_id {
@ -204,7 +203,7 @@ pub async fn authorize_url(
_ => err!(format!("Unsupported client {client_id}")),
};
let (auth_url, sso_auth) = Client::authorize_url(state, client_challenge, redirect_uri, binding_hash).await?;
let (auth_url, sso_auth) = Client::authorize_url(state, client_challenge, redirect_uri).await?;
sso_auth.save(&conn).await?;
Ok(auth_url)
}
@ -284,7 +283,7 @@ pub async fn exchange_code(
let email_verified = id_claims.email_verified().or(user_info.email_verified());
let user_name = id_claims.preferred_username().or(user_info.preferred_username()).map(|un| un.to_string());
let user_name = id_claims.preferred_username().map(|un| un.to_string());
let refresh_token = token_response.refresh_token().map(|t| t.secret());
if refresh_token.is_none() && CONFIG.sso_scopes_vec().contains(&"offline_access".to_string()) {

View file

@ -1,5 +1,6 @@
use std::{borrow::Cow, sync::LazyLock, time::Duration};
use mini_moka::sync::Cache;
use openidconnect::{core::*, reqwest, *};
use regex::Regex;
use url::Url;
@ -12,14 +13,9 @@ use crate::{
};
static CLIENT_CACHE_KEY: LazyLock<String> = LazyLock::new(|| "sso-client".to_string());
static CLIENT_CACHE: LazyLock<moka::sync::Cache<String, Client>> = LazyLock::new(|| {
moka::sync::Cache::builder()
.max_capacity(1)
.time_to_live(Duration::from_secs(CONFIG.sso_client_cache_expiration()))
.build()
static CLIENT_CACHE: LazyLock<Cache<String, Client>> = LazyLock::new(|| {
Cache::builder().max_capacity(1).time_to_live(Duration::from_secs(CONFIG.sso_client_cache_expiration())).build()
});
static REFRESH_CACHE: LazyLock<moka::future::Cache<String, Result<RefreshTokenResponse, String>>> =
LazyLock::new(|| moka::future::Cache::builder().max_capacity(1000).time_to_live(Duration::from_secs(30)).build());
/// OpenID Connect Core client.
pub type CustomClient = openidconnect::Client<
@ -42,8 +38,6 @@ pub type CustomClient = openidconnect::Client<
EndpointSet,
>;
pub type RefreshTokenResponse = (Option<String>, String, Option<Duration>);
#[derive(Clone)]
pub struct Client {
pub http_client: reqwest::Client,
@ -117,7 +111,6 @@ impl Client {
state: OIDCState,
client_challenge: OIDCCodeChallenge,
redirect_uri: String,
binding_hash: Option<String>,
) -> ApiResult<(Url, SsoAuth)> {
let scopes = CONFIG.sso_scopes_vec().into_iter().map(Scope::new);
let base64_state = data_encoding::BASE64.encode(state.to_string().as_bytes());
@ -140,7 +133,7 @@ impl Client {
}
let (auth_url, _, nonce) = auth_req.url();
Ok((auth_url, SsoAuth::new(state, client_challenge, nonce.secret().clone(), redirect_uri, binding_hash)))
Ok((auth_url, SsoAuth::new(state, client_challenge, nonce.secret().clone(), redirect_uri)))
}
pub async fn exchange_code(
@ -238,29 +231,23 @@ impl Client {
verifier
}
pub async fn exchange_refresh_token(refresh_token: String) -> ApiResult<RefreshTokenResponse> {
let client = Client::cached().await?;
REFRESH_CACHE
.get_with(refresh_token.clone(), async move { client._exchange_refresh_token(refresh_token).await })
.await
.map_err(Into::into)
}
async fn _exchange_refresh_token(&self, refresh_token: String) -> Result<RefreshTokenResponse, String> {
pub async fn exchange_refresh_token(
refresh_token: String,
) -> ApiResult<(Option<String>, String, Option<Duration>)> {
let rt = RefreshToken::new(refresh_token);
match self.core_client.exchange_refresh_token(&rt).request_async(&self.http_client).await {
Err(err) => {
error!("Request to exchange_refresh_token endpoint failed: {err}");
Err(format!("Request to exchange_refresh_token endpoint failed: {err}"))
}
Ok(token_response) => Ok((
let client = Client::cached().await?;
let token_response =
match client.core_client.exchange_refresh_token(&rt).request_async(&client.http_client).await {
Err(err) => err!(format!("Request to exchange_refresh_token endpoint failed: {:?}", err)),
Ok(token_response) => token_response,
};
Ok((
token_response.refresh_token().map(|token| token.secret().clone()),
token_response.access_token().secret().clone(),
token_response.expires_in(),
)),
}
))
}
}

View file

@ -111,8 +111,7 @@
"microsoftstore.com",
"xbox.com",
"azure.com",
"windowsazure.com",
"cloud.microsoft"
"windowsazure.com"
],
"excluded": false
},
@ -972,13 +971,5 @@
"pinterest.se"
],
"excluded": false
},
{
"type": 91,
"domains": [
"twitter.com",
"x.com"
],
"excluded": false
}
]

View file

@ -1,17 +1,6 @@
body {
padding-top: 75px;
}
/* Some extra width's for the main layout */
@media (min-width: 1600px) {
.container-xxl {
max-width: 1520px;
}
}
@media (min-width: 1800px) {
.container-xxl {
max-width: 1720px;
}
}
img {
width: 48px;
height: 48px;
@ -49,8 +38,8 @@ img {
max-width: 130px;
}
#users-table .vw-actions, #orgs-table .vw-actions {
min-width: 170px;
max-width: 180px;
min-width: 155px;
max-width: 160px;
}
#users-table .vw-org-cell {
max-height: 120px;

View file

@ -29,7 +29,7 @@ function isValidIp(ip) {
return ipv4Regex.test(ip) || ipv6Regex.test(ip);
}
function checkVersions(platform, installed, latest, commit=null, compare_order=0) {
function checkVersions(platform, installed, latest, commit=null, pre_release=false) {
if (installed === "-" || latest === "-") {
document.getElementById(`${platform}-failed`).classList.remove("d-none");
return;
@ -37,7 +37,7 @@ function checkVersions(platform, installed, latest, commit=null, compare_order=0
// Only check basic versions, no commit revisions
if (commit === null || installed.indexOf("-") === -1) {
if (platform === "web" && compare_order === 1) {
if (platform === "web" && pre_release === true) {
document.getElementById(`${platform}-prerelease`).classList.remove("d-none");
} else if (installed == latest) {
document.getElementById(`${platform}-success`).classList.remove("d-none");
@ -83,7 +83,7 @@ async function generateSupportString(event, dj) {
let supportString = "### Your environment (Generated via diagnostics page)\n\n";
supportString += `* Vaultwarden version: v${dj.current_release}\n`;
supportString += `* Web-vault version: v${dj.active_web_release}\n`;
supportString += `* Web-vault version: v${dj.web_vault_version}\n`;
supportString += `* OS/Arch: ${dj.host_os}/${dj.host_arch}\n`;
supportString += `* Running within a container: ${dj.running_within_container} (Base: ${dj.container_base_image})\n`;
supportString += `* Database type: ${dj.db_type}\n`;
@ -109,9 +109,6 @@ async function generateSupportString(event, dj) {
supportString += "* Websocket Check: disabled\n";
}
supportString += `* HTTP Response Checks: ${httpResponseCheck}\n`;
if (dj.invalid_feature_flags != "") {
supportString += `* Invalid feature flags: true\n`;
}
const jsonResponse = await fetch(`${BASE_URL}/admin/diagnostics/config`, {
"headers": { "Accept": "application/json" }
@ -131,10 +128,6 @@ async function generateSupportString(event, dj) {
supportString += `\n**Environment settings which are overridden:** ${dj.overrides}\n`;
}
if (dj.invalid_feature_flags != "") {
supportString += `\n**Invalid feature flags:** ${dj.invalid_feature_flags}\n`;
}
// Add http response check messages if they exists
if (httpResponseCheck === false) {
supportString += "\n**Failed HTTP Checks:**\n";
@ -215,9 +208,9 @@ function initVersionCheck(dj) {
}
checkVersions("server", serverInstalled, serverLatest, serverLatestCommit);
const webInstalled = dj.active_web_release;
const webLatest = dj.latest_web_release;
checkVersions("web", webInstalled, webLatest, null, dj.web_vault_compare);
const webInstalled = dj.web_vault_version;
const webLatest = dj.latest_web_build;
checkVersions("web", webInstalled, webLatest, null, dj.web_vault_pre_release);
}
function checkDns(dns_resolved) {

View file

@ -4,10 +4,10 @@
*
* To rebuild or modify this file with the latest versions of the included
* software please visit:
* https://datatables.net/download/#bs5/dt-2.3.7
* https://datatables.net/download/#bs5/dt-2.3.5
*
* Included libraries:
* DataTables 2.3.7
* DataTables 2.3.5
*/
:root {
@ -88,42 +88,42 @@ table.dataTable thead > tr > th:active,
table.dataTable thead > tr > td:active {
outline: none;
}
table.dataTable thead > tr > th.dt-orderable-asc .dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-asc .dt-column-order:before,
table.dataTable thead > tr > td.dt-orderable-asc .dt-column-order:before,
table.dataTable thead > tr > td.dt-ordering-asc .dt-column-order:before {
table.dataTable thead > tr > th.dt-orderable-asc span.dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order:before,
table.dataTable thead > tr > td.dt-orderable-asc span.dt-column-order:before,
table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order:before {
position: absolute;
display: block;
bottom: 50%;
content: "\25B2";
content: "\25B2"/"";
}
table.dataTable thead > tr > th.dt-orderable-desc .dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-desc .dt-column-order:after,
table.dataTable thead > tr > td.dt-orderable-desc .dt-column-order:after,
table.dataTable thead > tr > td.dt-ordering-desc .dt-column-order:after {
table.dataTable thead > tr > th.dt-orderable-desc span.dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order:after,
table.dataTable thead > tr > td.dt-orderable-desc span.dt-column-order:after,
table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order:after {
position: absolute;
display: block;
top: 50%;
content: "\25BC";
content: "\25BC"/"";
}
table.dataTable thead > tr > th.dt-orderable-asc .dt-column-order, table.dataTable thead > tr > th.dt-orderable-desc .dt-column-order, table.dataTable thead > tr > th.dt-ordering-asc .dt-column-order, table.dataTable thead > tr > th.dt-ordering-desc .dt-column-order,
table.dataTable thead > tr > td.dt-orderable-asc .dt-column-order,
table.dataTable thead > tr > td.dt-orderable-desc .dt-column-order,
table.dataTable thead > tr > td.dt-ordering-asc .dt-column-order,
table.dataTable thead > tr > td.dt-ordering-desc .dt-column-order {
table.dataTable thead > tr > th.dt-orderable-asc span.dt-column-order, table.dataTable thead > tr > th.dt-orderable-desc span.dt-column-order, table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order,
table.dataTable thead > tr > td.dt-orderable-asc span.dt-column-order,
table.dataTable thead > tr > td.dt-orderable-desc span.dt-column-order,
table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order,
table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order {
position: relative;
width: 12px;
height: 20px;
height: 24px;
}
table.dataTable thead > tr > th.dt-orderable-asc .dt-column-order:before, table.dataTable thead > tr > th.dt-orderable-asc .dt-column-order:after, table.dataTable thead > tr > th.dt-orderable-desc .dt-column-order:before, table.dataTable thead > tr > th.dt-orderable-desc .dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-asc .dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-asc .dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-desc .dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-desc .dt-column-order:after,
table.dataTable thead > tr > td.dt-orderable-asc .dt-column-order:before,
table.dataTable thead > tr > td.dt-orderable-asc .dt-column-order:after,
table.dataTable thead > tr > td.dt-orderable-desc .dt-column-order:before,
table.dataTable thead > tr > td.dt-orderable-desc .dt-column-order:after,
table.dataTable thead > tr > td.dt-ordering-asc .dt-column-order:before,
table.dataTable thead > tr > td.dt-ordering-asc .dt-column-order:after,
table.dataTable thead > tr > td.dt-ordering-desc .dt-column-order:before,
table.dataTable thead > tr > td.dt-ordering-desc .dt-column-order:after {
table.dataTable thead > tr > th.dt-orderable-asc span.dt-column-order:before, table.dataTable thead > tr > th.dt-orderable-asc span.dt-column-order:after, table.dataTable thead > tr > th.dt-orderable-desc span.dt-column-order:before, table.dataTable thead > tr > th.dt-orderable-desc span.dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order:after,
table.dataTable thead > tr > td.dt-orderable-asc span.dt-column-order:before,
table.dataTable thead > tr > td.dt-orderable-asc span.dt-column-order:after,
table.dataTable thead > tr > td.dt-orderable-desc span.dt-column-order:before,
table.dataTable thead > tr > td.dt-orderable-desc span.dt-column-order:after,
table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order:before,
table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order:after,
table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order:before,
table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order:after {
left: 0;
opacity: 0.125;
line-height: 9px;
@ -140,15 +140,15 @@ table.dataTable thead > tr > td.dt-orderable-desc:hover {
outline: 2px solid rgba(0, 0, 0, 0.05);
outline-offset: -2px;
}
table.dataTable thead > tr > th.dt-ordering-asc .dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-desc .dt-column-order:after,
table.dataTable thead > tr > td.dt-ordering-asc .dt-column-order:before,
table.dataTable thead > tr > td.dt-ordering-desc .dt-column-order:after {
table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order:after,
table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order:before,
table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order:after {
opacity: 0.6;
}
table.dataTable thead > tr > th.dt-orderable-none:not(.dt-ordering-asc, .dt-ordering-desc) .dt-column-order:empty, table.dataTable thead > tr > th.sorting_desc_disabled .dt-column-order:after, table.dataTable thead > tr > th.sorting_asc_disabled .dt-column-order:before,
table.dataTable thead > tr > td.dt-orderable-none:not(.dt-ordering-asc, .dt-ordering-desc) .dt-column-order:empty,
table.dataTable thead > tr > td.sorting_desc_disabled .dt-column-order:after,
table.dataTable thead > tr > td.sorting_asc_disabled .dt-column-order:before {
table.dataTable thead > tr > th.dt-orderable-none:not(.dt-ordering-asc, .dt-ordering-desc) span.dt-column-order:empty, table.dataTable thead > tr > th.sorting_desc_disabled span.dt-column-order:after, table.dataTable thead > tr > th.sorting_asc_disabled span.dt-column-order:before,
table.dataTable thead > tr > td.dt-orderable-none:not(.dt-ordering-asc, .dt-ordering-desc) span.dt-column-order:empty,
table.dataTable thead > tr > td.sorting_desc_disabled span.dt-column-order:after,
table.dataTable thead > tr > td.sorting_asc_disabled span.dt-column-order:before {
display: none;
}
table.dataTable thead > tr > th:active,
@ -169,24 +169,24 @@ table.dataTable tfoot > tr > td div.dt-column-footer {
align-items: var(--dt-header-align-items);
gap: 4px;
}
table.dataTable thead > tr > th div.dt-column-header .dt-column-title,
table.dataTable thead > tr > th div.dt-column-footer .dt-column-title,
table.dataTable thead > tr > td div.dt-column-header .dt-column-title,
table.dataTable thead > tr > td div.dt-column-footer .dt-column-title,
table.dataTable tfoot > tr > th div.dt-column-header .dt-column-title,
table.dataTable tfoot > tr > th div.dt-column-footer .dt-column-title,
table.dataTable tfoot > tr > td div.dt-column-header .dt-column-title,
table.dataTable tfoot > tr > td div.dt-column-footer .dt-column-title {
table.dataTable thead > tr > th div.dt-column-header span.dt-column-title,
table.dataTable thead > tr > th div.dt-column-footer span.dt-column-title,
table.dataTable thead > tr > td div.dt-column-header span.dt-column-title,
table.dataTable thead > tr > td div.dt-column-footer span.dt-column-title,
table.dataTable tfoot > tr > th div.dt-column-header span.dt-column-title,
table.dataTable tfoot > tr > th div.dt-column-footer span.dt-column-title,
table.dataTable tfoot > tr > td div.dt-column-header span.dt-column-title,
table.dataTable tfoot > tr > td div.dt-column-footer span.dt-column-title {
flex-grow: 1;
}
table.dataTable thead > tr > th div.dt-column-header .dt-column-title:empty,
table.dataTable thead > tr > th div.dt-column-footer .dt-column-title:empty,
table.dataTable thead > tr > td div.dt-column-header .dt-column-title:empty,
table.dataTable thead > tr > td div.dt-column-footer .dt-column-title:empty,
table.dataTable tfoot > tr > th div.dt-column-header .dt-column-title:empty,
table.dataTable tfoot > tr > th div.dt-column-footer .dt-column-title:empty,
table.dataTable tfoot > tr > td div.dt-column-header .dt-column-title:empty,
table.dataTable tfoot > tr > td div.dt-column-footer .dt-column-title:empty {
table.dataTable thead > tr > th div.dt-column-header span.dt-column-title:empty,
table.dataTable thead > tr > th div.dt-column-footer span.dt-column-title:empty,
table.dataTable thead > tr > td div.dt-column-header span.dt-column-title:empty,
table.dataTable thead > tr > td div.dt-column-footer span.dt-column-title:empty,
table.dataTable tfoot > tr > th div.dt-column-header span.dt-column-title:empty,
table.dataTable tfoot > tr > th div.dt-column-footer span.dt-column-title:empty,
table.dataTable tfoot > tr > td div.dt-column-header span.dt-column-title:empty,
table.dataTable tfoot > tr > td div.dt-column-footer span.dt-column-title:empty {
display: none;
}
@ -588,16 +588,16 @@ table.dataTable.table-sm > thead > tr td.dt-ordering-asc,
table.dataTable.table-sm > thead > tr td.dt-ordering-desc {
padding-right: 0.25rem;
}
table.dataTable.table-sm > thead > tr th.dt-orderable-asc .dt-column-order, table.dataTable.table-sm > thead > tr th.dt-orderable-desc .dt-column-order, table.dataTable.table-sm > thead > tr th.dt-ordering-asc .dt-column-order, table.dataTable.table-sm > thead > tr th.dt-ordering-desc .dt-column-order,
table.dataTable.table-sm > thead > tr td.dt-orderable-asc .dt-column-order,
table.dataTable.table-sm > thead > tr td.dt-orderable-desc .dt-column-order,
table.dataTable.table-sm > thead > tr td.dt-ordering-asc .dt-column-order,
table.dataTable.table-sm > thead > tr td.dt-ordering-desc .dt-column-order {
table.dataTable.table-sm > thead > tr th.dt-orderable-asc span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-orderable-desc span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-ordering-asc span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-ordering-desc span.dt-column-order,
table.dataTable.table-sm > thead > tr td.dt-orderable-asc span.dt-column-order,
table.dataTable.table-sm > thead > tr td.dt-orderable-desc span.dt-column-order,
table.dataTable.table-sm > thead > tr td.dt-ordering-asc span.dt-column-order,
table.dataTable.table-sm > thead > tr td.dt-ordering-desc span.dt-column-order {
right: 0.25rem;
}
table.dataTable.table-sm > thead > tr th.dt-type-date .dt-column-order, table.dataTable.table-sm > thead > tr th.dt-type-numeric .dt-column-order,
table.dataTable.table-sm > thead > tr td.dt-type-date .dt-column-order,
table.dataTable.table-sm > thead > tr td.dt-type-numeric .dt-column-order {
table.dataTable.table-sm > thead > tr th.dt-type-date span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-type-numeric span.dt-column-order,
table.dataTable.table-sm > thead > tr td.dt-type-date span.dt-column-order,
table.dataTable.table-sm > thead > tr td.dt-type-numeric span.dt-column-order {
left: 0.25rem;
}
@ -606,8 +606,7 @@ div.dt-scroll-head table.table-bordered {
}
div.table-responsive > div.dt-container > div.row {
margin-left: 0;
margin-right: 0;
margin: 0;
}
div.table-responsive > div.dt-container > div.row > div[class^=col-]:first-child {
padding-left: 0;

View file

@ -4,13 +4,13 @@
*
* To rebuild or modify this file with the latest versions of the included
* software please visit:
* https://datatables.net/download/#bs5/dt-2.3.7
* https://datatables.net/download/#bs5/dt-2.3.5
*
* Included libraries:
* DataTables 2.3.7
* DataTables 2.3.5
*/
/*! DataTables 2.3.7
/*! DataTables 2.3.5
* © SpryMedia Ltd - datatables.net/license
*/
@ -186,7 +186,7 @@
"sDestroyWidth": $this[0].style.width,
"sInstance": sId,
"sTableId": sId,
colgroup: $('<colgroup>'),
colgroup: $('<colgroup>').prependTo(this),
fastData: function (row, column, type) {
return _fnGetCellData(oSettings, row, column, type);
}
@ -259,7 +259,6 @@
"orderHandler",
"titleRow",
"typeDetect",
"columnTitleTag",
[ "iCookieDuration", "iStateDuration" ], // backwards compat
[ "oSearch", "oPreviousSearch" ],
[ "aoSearchCols", "aoPreSearchCols" ],
@ -424,7 +423,7 @@
if ( oSettings.caption ) {
if ( caption.length === 0 ) {
caption = $('<caption/>').prependTo( $this );
caption = $('<caption/>').appendTo( $this );
}
caption.html( oSettings.caption );
@ -437,14 +436,6 @@
oSettings.captionNode = caption[0];
}
// Place the colgroup element in the correct location for the HTML structure
if (caption.length) {
oSettings.colgroup.insertAfter(caption);
}
else {
oSettings.colgroup.prependTo(oSettings.nTable);
}
if ( thead.length === 0 ) {
thead = $('<thead/>').appendTo($this);
}
@ -525,7 +516,7 @@
*
* @type string
*/
builder: "bs5/dt-2.3.7",
builder: "bs5/dt-2.3.5",
/**
* Buttons. For use with the Buttons extension for DataTables. This is
@ -1301,7 +1292,7 @@
};
// Replaceable function in api.util
var _stripHtml = function (input, replacement) {
var _stripHtml = function (input) {
if (! input || typeof input !== 'string') {
return input;
}
@ -1313,7 +1304,7 @@
var previous;
input = input.replace(_re_html, replacement || ''); // Complete tags
input = input.replace(_re_html, ''); // Complete tags
// Safety for incomplete script tag - use do / while to ensure that
// we get all instances
@ -1778,7 +1769,7 @@
}
},
stripHtml: function (mixed, replacement) {
stripHtml: function (mixed) {
var type = typeof mixed;
if (type === 'function') {
@ -1786,7 +1777,7 @@
return;
}
else if (type === 'string') {
return _stripHtml(mixed, replacement);
return _stripHtml(mixed);
}
return mixed;
},
@ -3388,7 +3379,7 @@
colspan++;
}
var titleSpan = $('.dt-column-title', cell);
var titleSpan = $('span.dt-column-title', cell);
structure[row][column] = {
cell: cell,
@ -4102,8 +4093,8 @@
}
// Wrap the column title so we can write to it in future
if ( $('.dt-column-title', cell).length === 0) {
$(document.createElement(settings.columnTitleTag))
if ( $('span.dt-column-title', cell).length === 0) {
$('<span>')
.addClass('dt-column-title')
.append(cell.childNodes)
.appendTo(cell);
@ -4114,9 +4105,9 @@
isHeader &&
jqCell.filter(':not([data-dt-order=disable])').length !== 0 &&
jqCell.parent(':not([data-dt-order=disable])').length !== 0 &&
$('.dt-column-order', cell).length === 0
$('span.dt-column-order', cell).length === 0
) {
$(document.createElement(settings.columnTitleTag))
$('<span>')
.addClass('dt-column-order')
.appendTo(cell);
}
@ -4125,7 +4116,7 @@
// layout for those elements
var headerFooter = isHeader ? 'header' : 'footer';
if ( $('div.dt-column-' + headerFooter, cell).length === 0) {
if ( $('span.dt-column-' + headerFooter, cell).length === 0) {
$('<div>')
.addClass('dt-column-' + headerFooter)
.append(cell.childNodes)
@ -4282,10 +4273,6 @@
// Custom Ajax option to submit the parameters as a JSON string
if (baseAjax.submitAs === 'json' && typeof data === 'object') {
baseAjax.data = JSON.stringify(data);
if (!baseAjax.contentType) {
baseAjax.contentType = 'application/json; charset=utf-8';
}
}
if (typeof ajax === 'function') {
@ -5544,7 +5531,7 @@
var autoClass = _ext.type.className[column.sType];
var padding = column.sContentPadding || (scrollX ? '-' : '');
var text = longest + padding;
var insert = longest.indexOf('<') === -1 && longest.indexOf('&') === -1
var insert = longest.indexOf('<') === -1
? document.createTextNode(text)
: text
@ -5732,20 +5719,15 @@
.replace(/id=".*?"/g, '')
.replace(/name=".*?"/g, '');
// Don't want Javascript at all in these calculation cells.
cellString = cellString.replace(/<script.*?<\/script>/gi, ' ');
var noHtml = _stripHtml(cellString, ' ')
var s = _stripHtml(cellString)
.replace( /&nbsp;/g, ' ' );
// The length is calculated on the text only, but we keep the HTML
// in the string so it can be used in the calculation table
collection.push({
str: cellString,
len: noHtml.length
str: s,
len: s.length
});
allStrings.push(noHtml);
allStrings.push(s);
}
// Order and then cut down to the size we need
@ -8800,7 +8782,7 @@
// Automatic - find the _last_ unique cell from the top that is not empty (last for
// backwards compatibility)
for (var i=0 ; i<header.length ; i++) {
if (header[i][column].unique && $('.dt-column-title', header[i][column].cell).text()) {
if (header[i][column].unique && $('span.dt-column-title', header[i][column].cell).text()) {
target = i;
}
}
@ -8896,10 +8878,6 @@
return null;
}
if (col.responsiveVisible === false) {
return null;
}
// Selector
if (match[1]) {
return $(nodes[idx]).filter(match[1]).length > 0 ? idx : null;
@ -9111,7 +9089,7 @@
title = undefined;
}
var span = $('.dt-column-title', this.column(column).header(row));
var span = $('span.dt-column-title', this.column(column).header(row));
if (title !== undefined) {
span.html(title);
@ -10285,8 +10263,8 @@
// Needed for header and footer, so pulled into its own function
function cleanHeader(node, className) {
$(node).find('.dt-column-order').remove();
$(node).find('.dt-column-title').each(function () {
$(node).find('span.dt-column-order').remove();
$(node).find('span.dt-column-title').each(function () {
var title = $(this).html();
$(this).parent().parent().append(title);
$(this).remove();
@ -10304,7 +10282,7 @@
* @type string
* @default Version number
*/
DataTable.version = "2.3.7";
DataTable.version = "2.3.5";
/**
* Private data store, containing all of the settings objects that are
@ -11472,10 +11450,7 @@
iDeferLoading: null,
/** Event listeners */
on: null,
/** Title wrapper element type */
columnTitleTag: 'span'
on: null
};
_fnHungarianMap( DataTable.defaults );
@ -12439,10 +12414,7 @@
orderHandler: true,
/** Title row indicator */
titleRow: null,
/** Title wrapper element type */
columnTitleTag: 'span'
titleRow: null
};
/**

View file

@ -27,7 +27,7 @@
</symbol>
</svg>
<nav class="navbar navbar-expand-md navbar-dark bg-dark mb-4 shadow fixed-top">
<div class="container-xxl">
<div class="container-xl">
<a class="navbar-brand" href="{{urlpath}}/admin"><img class="vaultwarden-icon" src="{{urlpath}}/vw_static/vaultwarden-icon.png" alt="V">aultwarden Admin</a>
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarCollapse"
aria-controls="navbarCollapse" aria-expanded="false" aria-label="Toggle navigation">

View file

@ -1,4 +1,4 @@
<main class="container-xxl">
<main class="container-xl">
<div id="diagnostics-block" class="my-3 p-3 rounded shadow">
<h6 class="border-bottom pb-2 mb-2">Diagnostics</h6>
@ -8,7 +8,7 @@
<dl class="row">
<dt class="col-sm-5">Server Installed
<span class="badge bg-success d-none abbr-badge" id="server-success" title="Latest version is installed.">Ok</span>
<span class="badge bg-warning text-dark d-none abbr-badge" id="server-warning" title="An update is available.">Update</span>
<span class="badge bg-warning text-dark d-none abbr-badge" id="server-warning" title="There seems to be an update available.">Update</span>
<span class="badge bg-info text-dark d-none abbr-badge" id="server-branch" title="This is a branched version.">Branched</span>
</dt>
<dd class="col-sm-7">
@ -23,17 +23,17 @@
{{#if page_data.web_vault_enabled}}
<dt class="col-sm-5">Web Installed
<span class="badge bg-success d-none abbr-badge" id="web-success" title="Latest version is installed.">Ok</span>
<span class="badge bg-warning text-dark d-none abbr-badge" id="web-warning" title="An update is available.">Update</span>
<span class="badge bg-info text-dark d-none abbr-badge" id="web-prerelease" title="You are using a pre-release version.">Pre-Release</span>
<span class="badge bg-warning text-dark d-none abbr-badge" id="web-warning" title="There seems to be an update available.">Update</span>
<span class="badge bg-info text-dark d-none abbr-badge" id="web-prerelease" title="You seem to be using a pre-release version.">Pre-Release</span>
</dt>
<dd class="col-sm-7">
<span id="web-installed">{{page_data.active_web_release}}</span>
<span id="web-installed">{{page_data.web_vault_version}}</span>
</dd>
<dt class="col-sm-5">Web Latest
<span class="badge bg-secondary d-none abbr-badge" id="web-failed" title="Unable to determine latest version.">Unknown</span>
</dt>
<dd class="col-sm-7">
<span id="web-latest">{{page_data.latest_web_release}}</span>
<span id="web-latest">{{page_data.latest_web_build}}</span>
</dd>
{{/if}}
{{#unless page_data.web_vault_enabled}}
@ -194,14 +194,6 @@
<dd class="col-sm-7">
<span id="http-response-errors" class="d-block"></span>
</dd>
{{#if page_data.invalid_feature_flags}}
<dt class="col-sm-5">Invalid Feature Flags
<span class="badge bg-warning text-dark abbr-badge" id="feature-flag-warning" title="Some feature flags are invalid or outdated!">Warning</span>
</dt>
<dd class="col-sm-7">
<span id="feature-flags" class="d-block"><b>Flags:</b> <span id="feature-flags-string">{{page_data.invalid_feature_flags}}</span></span>
</dd>
{{/if}}
</dl>
</div>
</div>

View file

@ -1,4 +1,4 @@
<main class="container-xxl">
<main class="container-xl">
{{#if error}}
<div class="align-items-center p-3 mb-3 text-opacity-50 text-dark bg-warning rounded shadow">
<div>

View file

@ -1,4 +1,4 @@
<main class="container-xxl">
<main class="container-xl">
<div id="organizations-block" class="my-3 p-3 rounded shadow">
<h6 class="border-bottom pb-2 mb-3">Organizations</h6>
<div class="table-responsive-xl small">
@ -59,7 +59,7 @@
</main>
<link rel="stylesheet" href="{{urlpath}}/vw_static/datatables.css" />
<script src="{{urlpath}}/vw_static/jquery-4.0.0.slim.js"></script>
<script src="{{urlpath}}/vw_static/jquery-3.7.1.slim.js"></script>
<script src="{{urlpath}}/vw_static/datatables.js"></script>
<script src="{{urlpath}}/vw_static/admin_organizations.js"></script>
<script src="{{urlpath}}/vw_static/jdenticon-3.3.0.js"></script>

View file

@ -1,4 +1,4 @@
<main class="container-xxl">
<main class="container-xl">
<div id="admin_token_warning" class="alert alert-warning alert-dismissible fade show d-none">
<button type="button" class="btn-close" data-bs-target="admin_token_warning" data-bs-dismiss="alert" aria-label="Close"></button>
You are using a plain text `ADMIN_TOKEN` which is insecure.<br>

View file

@ -1,4 +1,4 @@
<main class="container-xxl">
<main class="container-xl">
<div id="users-block" class="my-3 p-3 rounded shadow">
<h6 class="border-bottom pb-2 mb-3">Registered Users</h6>
<div class="table-responsive-xl small">
@ -43,7 +43,7 @@
</td>
{{#if ../sso_enabled}}
<td>
<span class="d-block text-break text-wrap">{{sso_identifier}}</span>
<span class="d-block">{{sso_identifier}}</span>
</td>
{{/if}}
<td>
@ -153,7 +153,7 @@
</main>
<link rel="stylesheet" href="{{urlpath}}/vw_static/datatables.css" />
<script src="{{urlpath}}/vw_static/jquery-4.0.0.slim.js"></script>
<script src="{{urlpath}}/vw_static/jquery-3.7.1.slim.js"></script>
<script src="{{urlpath}}/vw_static/datatables.js"></script>
<script src="{{urlpath}}/vw_static/admin_users.js"></script>
<script src="{{urlpath}}/vw_static/jdenticon-3.3.0.js"></script>

View file

@ -137,14 +137,6 @@ bit-nav-logo bit-nav-item .bwi-shield {
app-user-layout app-danger-zone button:nth-child(1) {
@extend %vw-hide;
}
/* Hide unsupported Forwarding email alias options */
ng-dropdown-panel div.ng-dropdown-panel-items div:has(> [title="Firefox Relay"]) {
@extend %vw-hide;
}
ng-dropdown-panel div.ng-dropdown-panel-items div:has(> [title="DuckDuckGo"]) {
@extend %vw-hide;
}
/**** END Static Vaultwarden Changes ****/
/**** START Dynamic Vaultwarden Changes ****/
{{#if signup_disabled}}
@ -166,13 +158,6 @@ app-root a[routerlink="/signup"] {
{{/if}}
{{/if}}
{{#if remember_2fa_disabled}}
/* Hide checkbox to remember 2FA token for 30 days */
app-two-factor-auth > form > bit-form-control {
@extend %vw-hide;
}
{{/if}}
{{#unless mail_2fa_enabled}}
/* Hide `Email` 2FA if mail is not enabled */
.providers-2fa-1 {
@ -207,19 +192,6 @@ bit-nav-item[route="sends"] {
@extend %vw-hide;
}
{{/unless}}
{{#unless password_hints_allowed}}
/* Hide password hints if not allowed */
a[routerlink="/hint"],
{{#if (webver "<2025.12.2")}}
app-change-password > form > .form-group:nth-child(5),
auth-input-password > form > bit-form-field:nth-child(4) {
{{else}}
.vw-password-hint {
{{/if}}
@extend %vw-hide;
}
{{/unless}}
/**** End Dynamic Vaultwarden Changes ****/
/**** Include a special user stylesheet for custom changes ****/
{{#if load_user_scss}}

View file

@ -16,10 +16,7 @@ use tokio::{
time::{sleep, Duration},
};
use crate::{
config::{PathType, SUPPORTED_FEATURE_FLAGS},
CONFIG,
};
use crate::{config::PathType, CONFIG};
pub struct AppHeaders();
@ -156,11 +153,9 @@ impl Cors {
fn get_allowed_origin(headers: &HeaderMap<'_>) -> Option<String> {
let origin = Cors::get_header(headers, "Origin");
let safari_extension_origin = "file://";
let desktop_custom_file_origin = "bw-desktop-file://bundle";
if origin == CONFIG.domain_origin()
|| origin == safari_extension_origin
|| origin == desktop_custom_file_origin
|| (CONFIG.sso_enabled() && origin == CONFIG.sso_authority())
{
Some(origin)
@ -536,7 +531,7 @@ struct WebVaultVersion {
version: String,
}
pub fn get_active_web_release() -> String {
pub fn get_web_vault_version() -> String {
let version_files = [
format!("{}/vw-version.json", CONFIG.web_vault_folder()),
format!("{}/version.json", CONFIG.web_vault_folder()),
@ -634,21 +629,6 @@ fn _process_key(key: &str) -> String {
}
}
pub fn deser_opt_nonempty_str<'de, D, T>(deserializer: D) -> Result<Option<T>, D::Error>
where
D: Deserializer<'de>,
T: From<String>,
{
use serde::Deserialize;
Ok(Option::<String>::deserialize(deserializer)?.and_then(|s| {
if s.is_empty() {
None
} else {
Some(T::from(s))
}
}))
}
#[derive(Clone, Debug, Deserialize)]
#[serde(untagged)]
pub enum NumberOrString {
@ -734,7 +714,7 @@ where
warn!("Can't connect to database, retrying: {e:?}");
sleep(Duration::from_secs(1)).await;
sleep(Duration::from_millis(1_000)).await;
}
}
}
@ -783,28 +763,21 @@ pub fn convert_json_key_lcase_first(src_json: Value) -> Value {
}
}
pub enum FeatureFlagFilter {
#[allow(dead_code)]
Unfiltered,
ValidOnly,
InvalidOnly,
}
/// Parses the experimental client feature flags string into a HashMap.
pub fn parse_experimental_client_feature_flags(
experimental_client_feature_flags: &str,
filter_mode: FeatureFlagFilter,
) -> HashMap<String, bool> {
pub fn parse_experimental_client_feature_flags(experimental_client_feature_flags: &str) -> HashMap<String, bool> {
// These flags could still be configured, but are deprecated and not used anymore
// To prevent old installations from starting filter these out and not error out
const DEPRECATED_FLAGS: &[&str] =
&["autofill-overlay", "autofill-v2", "browser-fileless-import", "extension-refresh", "fido2-vault-credentials"];
experimental_client_feature_flags
.split(',')
.map(str::trim)
.filter(|flag| !flag.is_empty())
.filter(|flag| match filter_mode {
FeatureFlagFilter::Unfiltered => true,
FeatureFlagFilter::ValidOnly => SUPPORTED_FEATURE_FLAGS.contains(flag),
FeatureFlagFilter::InvalidOnly => !SUPPORTED_FEATURE_FLAGS.contains(flag),
.filter_map(|f| {
let flag = f.trim();
if !flag.is_empty() && !DEPRECATED_FLAGS.contains(&flag) {
return Some((flag.to_owned(), true));
}
None
})
.map(|flag| (flag.to_owned(), true))
.collect()
}
@ -822,11 +795,7 @@ pub fn is_global_hardcoded(ip: std::net::IpAddr) -> bool {
|| ip.is_loopback()
|| ip.is_link_local()
// addresses reserved for future protocols (`192.0.0.0/24`)
// .9 and .10 are documented as globally reachable so they're excluded
|| (
ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0
&& ip.octets()[3] != 9 && ip.octets()[3] != 10
)
||(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|| ip.is_documentation()
|| (ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18) // ip.is_benchmarking()
|| (ip.octets()[0] & 240 == 240 && !ip.is_broadcast()) //ip.is_reserved()
@ -853,17 +822,11 @@ pub fn is_global_hardcoded(ip: std::net::IpAddr) -> bool {
// AS112-v6 (`2001:4:112::/48`)
|| matches!(ip.segments(), [0x2001, 4, 0x112, _, _, _, _, _])
// ORCHIDv2 (`2001:20::/28`)
// Drone Remote ID Protocol Entity Tags (DETs) Prefix (`2001:30::/28`)`
|| matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if (0x20..=0x3F).contains(&b))
|| matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if (0x20..=0x2F).contains(&b))
))
// 6to4 (`2002::/16`) it's not explicitly documented as globally reachable,
// IANA says N/A.
|| matches!(ip.segments(), [0x2002, _, _, _, _, _, _, _])
|| matches!(ip.segments(), [0x2001, 0xdb8, ..] | [0x3fff, 0..=0x0fff, ..]) // ip.is_documentation()
// Segment Routing (SRv6) SIDs (`5f00::/16`)
|| matches!(ip.segments(), [0x5f00, ..])
|| ip.is_unique_local()
|| ip.is_unicast_link_local())
|| ((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8)) // ip.is_documentation()
|| ((ip.segments()[0] & 0xfe00) == 0xfc00) //ip.is_unique_local()
|| ((ip.segments()[0] & 0xffc0) == 0xfe80)) //ip.is_unicast_link_local()
}
}
}

View file

@ -79,4 +79,3 @@ for name, domain_list in domain_lists.items():
# Write out the global domains JSON file.
with open(file=OUTPUT_FILE, mode='w', encoding='utf-8') as f:
json.dump(global_domains, f, indent=2)
f.write("\n")