Compare commits

..

1 Commits

Author SHA1 Message Date
Jason Rasmussen
1f5393d02c WIP 2025-02-11 12:30:21 -05:00
1288 changed files with 35682 additions and 54783 deletions

View File

@@ -1,10 +1,10 @@
ARG BASEIMAGE=mcr.microsoft.com/devcontainers/typescript-node:22@sha256:2ef23730ec68d8511ec8e6e0b82550ca728b256805d81f60ed890f3bfb21cfb9
ARG BASEIMAGE=mcr.microsoft.com/devcontainers/typescript-node:22@sha256:9791f4aa527774bc370c6bd2f6705ce5a686f1e6f204badd8dfaacce28c631ae
FROM ${BASEIMAGE}
# Flutter SDK
# https://flutter.dev/docs/development/tools/sdk/releases?tab=linux
ENV FLUTTER_CHANNEL="stable"
ENV FLUTTER_VERSION="3.29.1"
ENV FLUTTER_VERSION="3.24.5"
ENV FLUTTER_HOME=/flutter
ENV PATH=${PATH}:${FLUTTER_HOME}/bin

1
.github/.nvmrc vendored
View File

@@ -1 +0,0 @@
22.14.0

View File

@@ -1,5 +1,5 @@
title: '[Feature] feature-name-goes-here'
labels: ['feature']
title: "[Feature] feature-name-goes-here"
labels: ["feature"]
body:
- type: markdown
@@ -11,9 +11,9 @@ body:
- type: checkboxes
attributes:
label: I have searched the existing feature requests, both open and closed, to make sure this is not a duplicate request.
label: I have searched the existing feature requests to make sure this is not a duplicate request.
options:
- label: 'Yes'
- label: "Yes"
required: true
- type: textarea

2
.github/FUNDING.yml vendored
View File

@@ -1 +1 @@
custom: ['https://buy.immich.app', 'https://immich.store']
custom: ['https://buy.immich.app']

View File

@@ -1,13 +1,6 @@
name: Report an issue with Immich
description: Report an issue with Immich
body:
- type: checkboxes
attributes:
label: I have searched the existing issues, both open and closed, to make sure this is not a duplicate report.
options:
- label: 'Yes'
required: true
- type: markdown
attributes:
value: |
@@ -84,7 +77,7 @@ body:
id: repro
attributes:
label: Reproduction steps
description: 'How do you trigger this bug? Please walk us through it step by step.'
description: "How do you trigger this bug? Please walk us through it step by step."
value: |
1.
2.
@@ -97,13 +90,12 @@ body:
id: logs
attributes:
label: Relevant log output
description:
Please copy and paste any relevant logs below. (code formatting is
description: Please copy and paste any relevant logs below. (code formatting is
enabled, no need for backticks)
render: shell
validations:
required: false
- type: textarea
attributes:
label: Additional information

28
.github/package-lock.json generated vendored
View File

@@ -1,28 +0,0 @@
{
"name": ".github",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"devDependencies": {
"prettier": "^3.5.3"
}
},
"node_modules/prettier": {
"version": "3.5.3",
"resolved": "https://registry.npmjs.org/prettier/-/prettier-3.5.3.tgz",
"integrity": "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==",
"dev": true,
"license": "MIT",
"bin": {
"prettier": "bin/prettier.cjs"
},
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/prettier/prettier?sponsor=1"
}
}
}
}

View File

@@ -1,9 +0,0 @@
{
"scripts": {
"format": "prettier --check .",
"format:fix": "prettier --write ."
},
"devDependencies": {
"prettier": "^3.5.3"
}
}

View File

@@ -32,5 +32,5 @@ The `/api/something` endpoint is now `/api/something-else`
- [ ] I have confirmed that any new dependencies are strictly necessary.
- [ ] I have written tests for new code (if applicable)
- [ ] I have followed naming conventions/patterns in the surrounding code
- [ ] All code in `src/services/` uses repositories implementations for database calls, filesystem operations, etc.
- [ ] All code in `src/repositories/` is pretty basic/simple and does not have any immich specific logic (that belongs in `src/services/`)
- [ ] All code in `src/services` uses repositories implementations for database calls, filesystem operations, etc.
- [ ] All code in `src/repositories/` is pretty basic/simple and does not have any immich specific logic (that belongs in `src/services`)

66
.github/release.yml vendored
View File

@@ -1,33 +1,33 @@
changelog:
categories:
- title: 🚨 Breaking Changes
labels:
- changelog:breaking-change
- title: 🫥 Deprecated Changes
labels:
- changelog:deprecated
- title: 🔒 Security
labels:
- changelog:security
- title: 🚀 Features
labels:
- changelog:feature
- title: 🌟 Enhancements
labels:
- changelog:enhancement
- title: 🐛 Bug fixes
labels:
- changelog:bugfix
- title: 📚 Documentation
labels:
- changelog:documentation
- title: 🌐 Translations
labels:
- changelog:translation
changelog:
categories:
- title: 🚨 Breaking Changes
labels:
- changelog:breaking-change
- title: 🫥 Deprecated Changes
labels:
- changelog:deprecated
- title: 🔒 Security
labels:
- changelog:security
- title: 🚀 Features
labels:
- changelog:feature
- title: 🌟 Enhancements
labels:
- changelog:enhancement
- title: 🐛 Bug fixes
labels:
- changelog:bugfix
- title: 📚 Documentation
labels:
- changelog:documentation
- title: 🌐 Translations
labels:
- changelog:translation

View File

@@ -22,18 +22,16 @@ jobs:
should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
uses: dorny/paths-filter@v3
with:
filters: |
mobile:
- 'mobile/**'
workflow:
- '.github/workflows/build-mobile.yml'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
run: echo "should_force=${{ github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
build-sign-android:
name: Build and sign Android
@@ -51,18 +49,18 @@ jobs:
ref="${input_ref:-$github_ref}"
echo "ref=$ref" >> $GITHUB_OUTPUT
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- uses: actions/checkout@v4
with:
ref: ${{ steps.get-ref.outputs.ref }}
- uses: actions/setup-java@3a4f6e1af504cf6a31855fa899c6aa5355ba6c12 # v4
- uses: actions/setup-java@v4
with:
distribution: 'zulu'
java-version: '17'
cache: 'gradle'
- name: Setup Flutter SDK
uses: subosito/flutter-action@e938fdf56512cc96ef2f93601a5a40bde3801046 # v2
uses: subosito/flutter-action@v2
with:
channel: 'stable'
flutter-version-file: ./mobile/pubspec.yaml
@@ -89,7 +87,7 @@ jobs:
flutter build apk --release --split-per-abi --target-platform android-arm,android-arm64,android-x64
- name: Publish Android Artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
uses: actions/upload-artifact@v4
with:
name: release-apk-signed
path: mobile/build/app/outputs/flutter-apk/*.apk

View File

@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- name: Cleanup
run: |

View File

@@ -6,6 +6,7 @@ on:
- 'cli/**'
- '.github/workflows/cli.yml'
pull_request:
branches: [main]
paths:
- 'cli/**'
- '.github/workflows/cli.yml'
@@ -28,9 +29,9 @@ jobs:
working-directory: ./cli
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- uses: actions/checkout@v4
# Setup .npmrc file to publish to npm
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
- uses: actions/setup-node@v4
with:
node-version-file: './cli/.nvmrc'
registry-url: 'https://registry.npmjs.org'
@@ -52,16 +53,16 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
uses: docker/setup-qemu-action@v3.3.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
uses: docker/setup-buildx-action@v3.8.0
- name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
uses: docker/login-action@v3
if: ${{ !github.event.pull_request.head.repo.fork }}
with:
registry: ghcr.io
@@ -76,7 +77,7 @@ jobs:
- name: Generate docker image tags
id: metadata
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
uses: docker/metadata-action@v5
with:
flavor: |
latest=false
@@ -87,7 +88,7 @@ jobs:
type=raw,value=latest,enable=${{ github.event_name == 'release' }}
- name: Build and push image
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0
uses: docker/build-push-action@v6.13.0
with:
file: cli/Dockerfile
platforms: linux/amd64,linux/arm64

View File

@@ -9,14 +9,14 @@
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: 'CodeQL'
name: "CodeQL"
on:
push:
branches: ['main']
branches: [ "main" ]
pull_request:
# The branches below must be a subset of the branches above
branches: ['main']
branches: [ "main" ]
schedule:
- cron: '20 13 * * 1'
@@ -36,42 +36,43 @@ jobs:
strategy:
fail-fast: false
matrix:
language: ['javascript', 'python']
language: [ 'javascript', 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Checkout repository
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v3
# If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
# If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3
with:
category: '/language:${{matrix.language}}'
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

73
.github/workflows/docker-cleanup.yml vendored Normal file
View File

@@ -0,0 +1,73 @@
# This workflow runs on certain conditions to check for and potentially
# delete container images from the GHCR which no longer have an associated
# code branch.
# Requires a PAT with the correct scope set in the secrets.
#
# This workflow will not trigger runs on forked repos.
name: Docker Cleanup
on:
pull_request:
types:
- "closed"
push:
paths:
- ".github/workflows/docker-cleanup.yml"
concurrency:
group: registry-tags-cleanup
cancel-in-progress: false
jobs:
cleanup-images:
name: Cleanup Stale Images Tags for ${{ matrix.primary-name }}
runs-on: ubuntu-24.04
strategy:
fail-fast: false
matrix:
include:
- primary-name: "immich-server"
- primary-name: "immich-machine-learning"
env:
# Requires a personal access token with the OAuth scope delete:packages
TOKEN: ${{ secrets.PACKAGE_DELETE_TOKEN }}
steps:
- name: Clean temporary images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/ephemeral@v0.9.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"
is_org: "true"
do_delete: "true"
package_name: "${{ matrix.primary-name }}"
scheme: "pull_request"
repo_name: "immich"
match_regex: '^pr-(\d+)$|^(\d+)$'
cleanup-untagged-images:
name: Cleanup Untagged Images Tags for ${{ matrix.primary-name }}
runs-on: ubuntu-24.04
needs:
- cleanup-images
strategy:
fail-fast: false
matrix:
include:
- primary-name: "immich-server"
- primary-name: "immich-machine-learning"
- primary-name: "immich-build-cache"
env:
# Requires a personal access token with the OAuth scope delete:packages
TOKEN: ${{ secrets.PACKAGE_DELETE_TOKEN }}
steps:
- name: Clean untagged images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/untagged@v0.9.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"
do_delete: "true"
is_org: "true"
package_name: "${{ matrix.primary-name }}"

View File

@@ -5,6 +5,7 @@ on:
push:
branches: [main]
pull_request:
branches: [main]
release:
types: [published]
@@ -23,9 +24,9 @@ jobs:
should_run_ml: ${{ steps.found_paths.outputs.machine-learning == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
uses: dorny/paths-filter@v3
with:
filters: |
server:
@@ -35,12 +36,10 @@ jobs:
- 'i18n/**'
machine-learning:
- 'machine-learning/**'
workflow:
- '.github/workflows/docker.yml'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
run: echo "should_force=${{ github.event_name == 'workflow_dispatch' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
retag_ml:
name: Re-Tag ML
@@ -49,23 +48,21 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
suffix: ['', '-cuda', '-rocm', '-openvino', '-armnn', '-rknn']
suffix: ["", "-cuda", "-openvino", "-armnn"]
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Re-tag image
run: |
REGISTRY_NAME="ghcr.io"
REPOSITORY=${{ github.repository_owner }}/immich-machine-learning
TAG_OLD=main${{ matrix.suffix }}
TAG_PR=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
TAG_COMMIT=commit-${{ github.event_name != 'pull_request' && github.sha || github.event.pull_request.head.sha }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_PR $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_COMMIT $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Re-tag image
run: |
REGISTRY_NAME="ghcr.io"
REPOSITORY=${{ github.repository_owner }}/immich-machine-learning
TAG_OLD=main${{ matrix.suffix }}
TAG_NEW=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_NEW $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
retag_server:
name: Re-Tag Server
@@ -74,10 +71,10 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
suffix: ['']
suffix: [""]
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
@@ -87,110 +84,107 @@ jobs:
REGISTRY_NAME="ghcr.io"
REPOSITORY=${{ github.repository_owner }}/immich-server
TAG_OLD=main${{ matrix.suffix }}
TAG_PR=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
TAG_COMMIT=commit-${{ github.event_name != 'pull_request' && github.sha || github.event.pull_request.head.sha }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_PR $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_COMMIT $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
TAG_NEW=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_NEW $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
build_and_push_ml:
name: Build and Push ML
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' }}
runs-on: ${{ matrix.runner }}
runs-on: ubuntu-latest
env:
image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-machine-learning
strategy:
# Prevent a failure in one image from stopping the other builds
fail-fast: false
matrix:
include:
- platform: linux/amd64
runner: ubuntu-latest
- platforms: linux/amd64,linux/arm64
device: cpu
- platform: linux/arm64
runner: ubuntu-24.04-arm
device: cpu
- platform: linux/amd64
runner: ubuntu-latest
- platforms: linux/amd64
device: cuda
suffix: -cuda
- platform: linux/amd64
runner: mich
device: rocm
suffix: -rocm
- platform: linux/amd64
runner: ubuntu-latest
- platforms: linux/amd64
device: openvino
suffix: -openvino
- platform: linux/arm64
runner: ubuntu-24.04-arm
- platforms: linux/arm64
device: armnn
suffix: -armnn
- platform: linux/arm64
runner: ubuntu-24.04-arm
device: rknn
suffix: -rknn
steps:
- name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.3.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
uses: docker/setup-buildx-action@v3.8.0
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
uses: docker/login-action@v3
# Skip when PR from a fork
if: ${{ !github.event.pull_request.head.repo.fork }}
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate cache key suffix
run: |
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
echo "CACHE_KEY_SUFFIX=pr-${{ github.event.number }}" >> $GITHUB_ENV
else
echo "CACHE_KEY_SUFFIX=$(echo ${{ github.ref_name }} | sed 's/[^a-zA-Z0-9]/-/g')" >> $GITHUB_ENV
fi
- name: Generate docker image tags
id: metadata
uses: docker/metadata-action@v5
with:
flavor: |
# Disable latest tag
latest=false
images: |
name=ghcr.io/${{ github.repository_owner }}/${{env.image}}
name=altran1502/${{env.image}},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch,suffix=${{ matrix.suffix }}
# Tag with pr-number
type=ref,event=pr,suffix=${{ matrix.suffix }}
# Tag with git tag on release
type=ref,event=tag,suffix=${{ matrix.suffix }}
type=raw,value=release,enable=${{ github.event_name == 'release' }},suffix=${{ matrix.suffix }}
- name: Generate cache target
- name: Determine build cache output
id: cache-target
run: |
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
# Essentially just ignore the cache output (forks can't write to registry cache)
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
# Essentially just ignore the cache output (PR can't write to registry cache)
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
else
echo "cache-to=type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-${{ env.CACHE_KEY_SUFFIX }},mode=max,compression=zstd" >> $GITHUB_OUTPUT
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ env.image }}" >> $GITHUB_OUTPUT
fi
- name: Build and push image
id: build
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0
uses: docker/build-push-action@v6.13.0
with:
context: ${{ env.context }}
file: ${{ env.file }}
platforms: ${{ matrix.platforms }}
labels: ${{ steps.metadata.outputs.labels }}
# Skip pushing when PR from a fork
push: ${{ !github.event.pull_request.head.repo.fork }}
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{env.image}}
cache-to: ${{ steps.cache-target.outputs.cache-to }}
cache-from: |
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-${{ env.CACHE_KEY_SUFFIX }}
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-main
outputs: type=image,"name=${{ env.GHCR_REPO }}",push-by-digest=true,name-canonical=true,push=${{ !github.event.pull_request.head.repo.fork }}
tags: ${{ steps.metadata.outputs.tags }}
labels: ${{ steps.metadata.outputs.labels }}
build-args: |
DEVICE=${{ matrix.device }}
BUILD_ID=${{ github.run_id }}
@@ -198,255 +192,100 @@ jobs:
BUILD_SOURCE_REF=${{ github.ref_name }}
BUILD_SOURCE_COMMIT=${{ github.sha }}
- name: Export digest
run: |
mkdir -p ${{ runner.temp }}/digests
digest="${{ steps.build.outputs.digest }}"
touch "${{ runner.temp }}/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
with:
name: ml-digests-${{ matrix.device }}-${{ env.PLATFORM_PAIR }}
path: ${{ runner.temp }}/digests/*
if-no-files-found: error
retention-days: 1
merge_ml:
name: Merge & Push ML
runs-on: ubuntu-latest
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' && !github.event.pull_request.head.repo.fork }}
env:
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-machine-learning
DOCKER_REPO: altran1502/immich-machine-learning
strategy:
matrix:
include:
- device: cpu
- device: cuda
suffix: -cuda
- device: rocm
suffix: -rocm
- device: openvino
suffix: -openvino
- device: armnn
suffix: -armnn
- device: rknn
suffix: -rknn
needs:
- build_and_push_ml
steps:
- name: Download digests
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4
with:
path: ${{ runner.temp }}/digests
pattern: ml-digests-${{ matrix.device }}-*
merge-multiple: true
- name: Login to Docker Hub
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- name: Generate docker image tags
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
env:
DOCKER_METADATA_PR_HEAD_SHA: 'true'
with:
flavor: |
# Disable latest tag
latest=false
suffix=${{ matrix.suffix }}
images: |
name=${{ env.GHCR_REPO }}
name=${{ env.DOCKER_REPO }},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch
# Tag with pr-number
type=ref,event=pr
# Tag with long commit sha hash
type=sha,format=long,prefix=commit-
# Tag with git tag on release
type=ref,event=tag
type=raw,value=release,enable=${{ github.event_name == 'release' }}
- name: Create manifest list and push
working-directory: ${{ runner.temp }}/digests
run: |
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.GHCR_REPO }}@sha256:%s ' *)
build_and_push_server:
name: Build and Push Server
runs-on: ${{ matrix.runner }}
runs-on: ubuntu-latest
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
env:
image: immich-server
context: .
file: server/Dockerfile
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-server
strategy:
fail-fast: false
matrix:
include:
- platform: linux/amd64
runner: ubuntu-latest
- platform: linux/arm64
runner: ubuntu-24.04-arm
- platforms: linux/amd64,linux/arm64
device: cpu
steps:
- name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.3.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
uses: docker/setup-buildx-action@v3.8.0
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
uses: docker/login-action@v3
# Skip when PR from a fork
if: ${{ !github.event.pull_request.head.repo.fork }}
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate cache key suffix
run: |
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
echo "CACHE_KEY_SUFFIX=pr-${{ github.event.number }}" >> $GITHUB_ENV
else
echo "CACHE_KEY_SUFFIX=$(echo ${{ github.ref_name }} | sed 's/[^a-zA-Z0-9]/-/g')" >> $GITHUB_ENV
fi
- name: Generate docker image tags
id: metadata
uses: docker/metadata-action@v5
with:
flavor: |
# Disable latest tag
latest=false
images: |
name=ghcr.io/${{ github.repository_owner }}/${{env.image}}
name=altran1502/${{env.image}},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch,suffix=${{ matrix.suffix }}
# Tag with pr-number
type=ref,event=pr,suffix=${{ matrix.suffix }}
# Tag with git tag on release
type=ref,event=tag,suffix=${{ matrix.suffix }}
type=raw,value=release,enable=${{ github.event_name == 'release' }},suffix=${{ matrix.suffix }}
- name: Generate cache target
- name: Determine build cache output
id: cache-target
run: |
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
# Essentially just ignore the cache output (forks can't write to registry cache)
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
# Essentially just ignore the cache output (PR can't write to registry cache)
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
else
echo "cache-to=type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-${{ env.CACHE_KEY_SUFFIX }},mode=max,compression=zstd" >> $GITHUB_OUTPUT
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ env.image }}" >> $GITHUB_OUTPUT
fi
- name: Build and push image
id: build
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0
uses: docker/build-push-action@v6.13.0
with:
context: ${{ env.context }}
file: ${{ env.file }}
platforms: ${{ matrix.platform }}
labels: ${{ steps.metadata.outputs.labels }}
platforms: ${{ matrix.platforms }}
# Skip pushing when PR from a fork
push: ${{ !github.event.pull_request.head.repo.fork }}
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{env.image}}
cache-to: ${{ steps.cache-target.outputs.cache-to }}
cache-from: |
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ env.CACHE_KEY_SUFFIX }}
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-main
outputs: type=image,"name=${{ env.GHCR_REPO }}",push-by-digest=true,name-canonical=true,push=${{ !github.event.pull_request.head.repo.fork }}
tags: ${{ steps.metadata.outputs.tags }}
labels: ${{ steps.metadata.outputs.labels }}
build-args: |
DEVICE=cpu
DEVICE=${{ matrix.device }}
BUILD_ID=${{ github.run_id }}
BUILD_IMAGE=${{ github.event_name == 'release' && github.ref_name || steps.metadata.outputs.tags }}
BUILD_SOURCE_REF=${{ github.ref_name }}
BUILD_SOURCE_COMMIT=${{ github.sha }}
- name: Export digest
run: |
mkdir -p ${{ runner.temp }}/digests
digest="${{ steps.build.outputs.digest }}"
touch "${{ runner.temp }}/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
with:
name: server-digests-${{ env.PLATFORM_PAIR }}
path: ${{ runner.temp }}/digests/*
if-no-files-found: error
retention-days: 1
merge_server:
name: Merge & Push Server
runs-on: ubuntu-latest
if: ${{ needs.pre-job.outputs.should_run_server == 'true' && !github.event.pull_request.head.repo.fork }}
env:
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-server
DOCKER_REPO: altran1502/immich-server
needs:
- build_and_push_server
steps:
- name: Download digests
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4
with:
path: ${{ runner.temp }}/digests
pattern: server-digests-*
merge-multiple: true
- name: Login to Docker Hub
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- name: Generate docker image tags
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
env:
DOCKER_METADATA_PR_HEAD_SHA: 'true'
with:
flavor: |
# Disable latest tag
latest=false
suffix=${{ matrix.suffix }}
images: |
name=${{ env.GHCR_REPO }}
name=${{ env.DOCKER_REPO }},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch
# Tag with pr-number
type=ref,event=pr
# Tag with long commit sha hash
type=sha,format=long,prefix=commit-
# Tag with git tag on release
type=ref,event=tag
type=raw,value=release,enable=${{ github.event_name == 'release' }}
- name: Create manifest list and push
working-directory: ${{ runner.temp }}/digests
run: |
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.GHCR_REPO }}@sha256:%s ' *)
success-check-server:
name: Docker Build & Push Server Success
needs: [merge_server, retag_server]
needs: [build_and_push_server, retag_server]
runs-on: ubuntu-latest
if: always()
steps:
@@ -459,7 +298,7 @@ jobs:
success-check-ml:
name: Docker Build & Push ML Success
needs: [merge_ml, retag_ml]
needs: [build_and_push_ml, retag_ml]
runs-on: ubuntu-latest
if: always()
steps:

View File

@@ -3,6 +3,7 @@ on:
push:
branches: [main]
pull_request:
branches: [main]
release:
types: [published]
@@ -14,21 +15,19 @@ jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.found_paths.outputs.docs == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run: ${{ steps.found_paths.outputs.docs == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
uses: dorny/paths-filter@v3
with:
filters: |
docs:
- 'docs/**'
workflow:
- '.github/workflows/docs-build.yml'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'release' || github.ref_name == 'main' }}" >> "$GITHUB_OUTPUT"
run: echo "should_force=${{ github.event_name == 'release' || github.ref_name == 'main' }}" >> "$GITHUB_OUTPUT"
build:
name: Docs Build
@@ -41,10 +40,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
uses: actions/setup-node@v4
with:
node-version-file: './docs/.nvmrc'
@@ -58,7 +57,7 @@ jobs:
run: npm run build
- name: Upload build output
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
uses: actions/upload-artifact@v4
with:
name: docs-build-output
path: docs/build/

View File

@@ -1,7 +1,7 @@
name: Docs deploy
on:
workflow_run:
workflows: ['Docs build']
workflows: ["Docs build"]
types:
- completed
@@ -17,7 +17,7 @@ jobs:
run: echo 'The triggering workflow did not succeed' && exit 1
- name: Get artifact
id: get-artifact
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
uses: actions/github-script@v7
with:
script: |
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
@@ -35,7 +35,7 @@ jobs:
return { found: true, id: matchArtifact.id };
- name: Determine deploy parameters
id: parameters
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
uses: actions/github-script@v7
with:
script: |
const eventType = context.payload.workflow_run.event;
@@ -98,11 +98,11 @@ jobs:
if: ${{ fromJson(needs.checks.outputs.artifact).found && fromJson(needs.checks.outputs.parameters).shouldDeploy }}
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- name: Load parameters
id: parameters
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
uses: actions/github-script@v7
with:
script: |
const json = `${{ needs.checks.outputs.parameters }}`;
@@ -115,7 +115,7 @@ jobs:
echo "Starting docs deployment for ${{ steps.parameters.outputs.event }} ${{ steps.parameters.outputs.name }}"
- name: Download artifact
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
uses: actions/github-script@v7
with:
script: |
let artifact = ${{ needs.checks.outputs.artifact }};
@@ -138,12 +138,12 @@ jobs:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
uses: gruntwork-io/terragrunt-action@9559e51d05873b0ea467c42bbabcb5c067642ccc # v2
uses: gruntwork-io/terragrunt-action@v2
with:
tg_version: '0.58.12'
tofu_version: '1.7.1'
tg_dir: 'deployment/modules/cloudflare/docs'
tg_command: 'apply'
tg_version: "0.58.12"
tofu_version: "1.7.1"
tg_dir: "deployment/modules/cloudflare/docs"
tg_command: "apply"
- name: Deploy Docs Subdomain Output
id: docs-output
@@ -153,12 +153,12 @@ jobs:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
uses: gruntwork-io/terragrunt-action@9559e51d05873b0ea467c42bbabcb5c067642ccc # v2
uses: gruntwork-io/terragrunt-action@v2
with:
tg_version: '0.58.12'
tofu_version: '1.7.1'
tg_dir: 'deployment/modules/cloudflare/docs'
tg_command: 'output -json'
tg_version: "0.58.12"
tofu_version: "1.7.1"
tg_dir: "deployment/modules/cloudflare/docs"
tg_command: "output -json"
- name: Output Cleaning
id: clean
@@ -167,13 +167,13 @@ jobs:
echo "output=$TG_OUT" >> $GITHUB_OUTPUT
- name: Publish to Cloudflare Pages
uses: cloudflare/pages-action@f0a1cd58cd66095dee69bfa18fa5efd1dde93bca # v1
uses: cloudflare/pages-action@v1
with:
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN_PAGES_UPLOAD }}
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
projectName: ${{ fromJson(steps.clean.outputs.output).pages_project_name.value }}
workingDirectory: 'docs'
directory: 'build'
workingDirectory: "docs"
directory: "build"
branch: ${{ steps.parameters.outputs.name }}
wranglerVersion: '3'
@@ -184,7 +184,7 @@ jobs:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
uses: gruntwork-io/terragrunt-action@9559e51d05873b0ea467c42bbabcb5c067642ccc # v2
uses: gruntwork-io/terragrunt-action@v2
with:
tg_version: '0.58.12'
tofu_version: '1.7.1'
@@ -192,7 +192,7 @@ jobs:
tg_command: 'apply'
- name: Comment
uses: actions-cool/maintain-one-comment@4b2dbf086015f892dcb5e8c1106f5fccd6c1476b # v3
uses: actions-cool/maintain-one-comment@v3
if: ${{ steps.parameters.outputs.event == 'pr' }}
with:
number: ${{ fromJson(needs.checks.outputs.parameters).pr_number }}

View File

@@ -9,24 +9,24 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- name: Destroy Docs Subdomain
env:
TF_VAR_prefix_name: 'pr-${{ github.event.number }}'
TF_VAR_prefix_event_type: 'pr'
TF_VAR_prefix_name: "pr-${{ github.event.number }}"
TF_VAR_prefix_event_type: "pr"
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
uses: gruntwork-io/terragrunt-action@9559e51d05873b0ea467c42bbabcb5c067642ccc # v2
uses: gruntwork-io/terragrunt-action@v2
with:
tg_version: '0.58.12'
tofu_version: '1.7.1'
tg_dir: 'deployment/modules/cloudflare/docs'
tg_command: 'destroy -refresh=false'
tg_version: "0.58.12"
tofu_version: "1.7.1"
tg_dir: "deployment/modules/cloudflare/docs"
tg_command: "destroy -refresh=false"
- name: Comment
uses: actions-cool/maintain-one-comment@4b2dbf086015f892dcb5e8c1106f5fccd6c1476b # v3
uses: actions-cool/maintain-one-comment@v3
with:
number: ${{ github.event.number }}
delete: true

View File

@@ -13,19 +13,19 @@ jobs:
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: 'Checkout'
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
token: ${{ steps.generate-token.outputs.token }}
- name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
uses: actions/setup-node@v4
with:
node-version-file: './server/.nvmrc'
@@ -33,13 +33,13 @@ jobs:
run: make install-all && make format-all
- name: Commit and push
uses: EndBug/add-and-commit@a94899bca583c204427a224a7af87c02f9b325d5 # v9
uses: EndBug/add-and-commit@v9
with:
default_author: github_actions
message: 'chore: fix formatting'
- name: Remove label
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
uses: actions/github-script@v7
if: always()
with:
script: |
@@ -49,3 +49,4 @@ jobs:
repo: context.repo.repo,
name: 'fix:formatting'
})

View File

@@ -12,11 +12,11 @@ jobs:
pull-requests: write
steps:
- name: Require PR to have a changelog label
uses: mheap/github-action-required-labels@388fd6af37b34cdfe5a23b37060e763217e58b03 # v5
uses: mheap/github-action-required-labels@v5
with:
mode: exactly
count: 1
use_regex: true
labels: 'changelog:.*'
labels: "changelog:.*"
add_comment: true
message: 'Label error. Requires {{errorString}} {{count}} of: {{ provided }}. Found: {{ applied }}. A maintainer will add the required label.'
message: "Label error. Requires {{errorString}} {{count}} of: {{ provided }}. Found: {{ applied }}. A maintainer will add the required label."

View File

@@ -1,6 +1,6 @@
name: 'Pull Request Labeler'
name: "Pull Request Labeler"
on:
- pull_request_target
- pull_request_target
jobs:
labeler:
@@ -9,4 +9,4 @@ jobs:
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5
- uses: actions/labeler@v5

View File

@@ -9,7 +9,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: PR Conventional Commit Validation
uses: ytanikin/PRConventionalCommits@b628c5a234cc32513014b7bfdd1e47b532124d98 # 1.3.0
uses: ytanikin/PRConventionalCommits@1.3.0
with:
task_types: '["feat","fix","docs","test","ci","refactor","perf","chore","revert"]'
add_label: 'false'

View File

@@ -31,25 +31,25 @@ jobs:
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
with:
token: ${{ steps.generate-token.outputs.token }}
- name: Install uv
uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v5
- name: Install Poetry
run: pipx install poetry
- name: Bump version
run: misc/release/pump-version.sh -s "${{ inputs.serverBump }}" -m "${{ inputs.mobileBump }}"
- name: Commit and tag
id: push-tag
uses: EndBug/add-and-commit@a94899bca583c204427a224a7af87c02f9b325d5 # v9
uses: EndBug/add-and-commit@v9
with:
default_author: github_actions
message: 'chore: version ${{ env.IMMICH_VERSION }}'
@@ -70,23 +70,23 @@ jobs:
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
with:
token: ${{ steps.generate-token.outputs.token }}
- name: Download APK
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4
uses: actions/download-artifact@v4
with:
name: release-apk-signed
- name: Create draft release
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2
uses: softprops/action-gh-release@v2
with:
draft: true
tag_name: ${{ env.IMMICH_VERSION }}

View File

@@ -1,33 +0,0 @@
name: Preview label
on:
pull_request:
types: [labeled, closed]
jobs:
comment-status:
runs-on: ubuntu-latest
if: ${{ github.event.action == 'labeled' && github.event.label.name == 'preview' }}
permissions:
pull-requests: write
steps:
- uses: mshick/add-pr-comment@b8f338c590a895d50bcbfa6c5859251edc8952fc # v2
with:
message-id: 'preview-status'
message: 'Deploying preview environment to https://pr-${{ github.event.pull_request.number }}.preview.internal.immich.cloud/'
remove-label:
runs-on: ubuntu-latest
if: ${{ github.event.action == 'closed' && contains(github.event.pull_request.labels.*.name, 'preview') }}
permissions:
pull-requests: write
steps:
- uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
with:
script: |
github.rest.issues.removeLabel({
issue_number: context.payload.pull_request.number,
owner: context.repo.owner,
repo: context.repo.repo,
name: 'preview'
})

View File

@@ -15,9 +15,9 @@ jobs:
run:
working-directory: ./open-api/typescript-sdk
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- uses: actions/checkout@v4
# Setup .npmrc file to publish to npm
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
- uses: actions/setup-node@v4
with:
node-version-file: './open-api/typescript-sdk/.nvmrc'
registry-url: 'https://registry.npmjs.org'

View File

@@ -16,18 +16,16 @@ jobs:
should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
uses: dorny/paths-filter@v3
with:
filters: |
mobile:
- 'mobile/**'
workflow:
- '.github/workflows/static_analysis.yml'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
run: echo "should_force=${{ github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
mobile-dart-analyze:
name: Run Dart Code Analysis
@@ -38,10 +36,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- name: Setup Flutter SDK
uses: subosito/flutter-action@e938fdf56512cc96ef2f93601a5a40bde3801046 # v2
uses: subosito/flutter-action@v2
with:
channel: 'stable'
flutter-version-file: ./mobile/pubspec.yaml
@@ -50,26 +48,6 @@ jobs:
run: dart pub get
working-directory: ./mobile
- name: Run Build Runner
run: make build
working-directory: ./mobile
- name: Find file changes
uses: tj-actions/verify-changed-files@a1c6acee9df209257a246f2cc6ae8cb6581c1edf # v20
id: verify-changed-files
with:
files: |
mobile/**/*.g.dart
mobile/**/*.gr.dart
mobile/**/*.drift.dart
- name: Verify files have not changed
if: steps.verify-changed-files.outputs.files_changed == 'true'
run: |
echo "ERROR: Generated files not up to date! Run make_build inside the mobile directory"
echo "Changed files: ${{ steps.verify-changed-files.outputs.changed_files }}"
exit 1
- name: Run dart analyze
run: dart analyze --fatal-infos
working-directory: ./mobile
@@ -81,3 +59,8 @@ jobs:
- name: Run dart custom_lint
run: dart run custom_lint
working-directory: ./mobile
# Enable after riverpod generator migration is completed
# - name: Run dart custom lint
# run: dart run custom_lint
# working-directory: ./mobile

View File

@@ -21,12 +21,11 @@ jobs:
should_run_ml: ${{ steps.found_paths.outputs.machine-learning == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_e2e_web: ${{ steps.found_paths.outputs.e2e == 'true' || steps.found_paths.outputs.web == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_e2e_server_cli: ${{ steps.found_paths.outputs.e2e == 'true' || steps.found_paths.outputs.server == 'true' || steps.found_paths.outputs.cli == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_.github: ${{ steps.found_paths.outputs['.github'] == 'true' || steps.should_force.outputs.should_force == 'true' }} # redundant to have should_force but if someone changes the trigger then this won't have to be changed
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
uses: dorny/paths-filter@v3
with:
filters: |
web:
@@ -44,14 +43,10 @@ jobs:
- 'mobile/**'
machine-learning:
- 'machine-learning/**'
workflow:
- '.github/workflows/test.yml'
.github:
- '.github/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
run: echo "should_force=${{ github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
server-unit-tests:
name: Test & Lint Server
@@ -64,10 +59,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
uses: actions/setup-node@v4
with:
node-version-file: './server/.nvmrc'
@@ -101,10 +96,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
uses: actions/setup-node@v4
with:
node-version-file: './cli/.nvmrc'
@@ -142,10 +137,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
uses: actions/setup-node@v4
with:
node-version-file: './cli/.nvmrc'
@@ -176,10 +171,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
uses: actions/setup-node@v4
with:
node-version-file: './web/.nvmrc'
@@ -221,10 +216,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
uses: actions/setup-node@v4
with:
node-version-file: './e2e/.nvmrc'
@@ -249,30 +244,25 @@ jobs:
run: npm run check
if: ${{ !cancelled() }}
server-medium-tests:
medium-tests-server:
name: Medium Tests (Server)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./server
runs-on: mich
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
uses: actions/checkout@v4
with:
node-version-file: './server/.nvmrc'
submodules: 'recursive'
- name: Run npm install
run: npm ci
- name: Production build
if: ${{ !cancelled() }}
run: docker compose -f e2e/docker-compose.yml build
- name: Run medium tests
run: npm run test:medium
if: ${{ !cancelled() }}
run: make test-medium
e2e-tests-server-cli:
name: End-to-End Tests (Server & CLI)
@@ -285,12 +275,12 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
with:
submodules: 'recursive'
- name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
uses: actions/setup-node@v4
with:
node-version-file: './e2e/.nvmrc'
@@ -327,12 +317,12 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
with:
submodules: 'recursive'
- name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
uses: actions/setup-node@v4
with:
node-version-file: './e2e/.nvmrc'
@@ -363,9 +353,9 @@ jobs:
if: ${{ needs.pre-job.outputs.should_run_mobile == 'true' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- uses: actions/checkout@v4
- name: Setup Flutter SDK
uses: subosito/flutter-action@e938fdf56512cc96ef2f93601a5a40bde3801046 # v2
uses: subosito/flutter-action@v2
with:
channel: 'stable'
flutter-version-file: ./mobile/pubspec.yaml
@@ -382,60 +372,34 @@ jobs:
run:
working-directory: ./machine-learning
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Install uv
uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v5
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5
# TODO: add caching when supported (https://github.com/actions/setup-python/pull/818)
# with:
# python-version: 3.11
# cache: 'uv'
- uses: actions/checkout@v4
- name: Install poetry
run: pipx install poetry
- uses: actions/setup-python@v5
with:
python-version: 3.11
cache: 'poetry'
- name: Install dependencies
run: |
uv sync --extra cpu
poetry install --with dev --with cpu
- name: Lint with ruff
run: |
uv run ruff check --output-format=github immich_ml
poetry run ruff check --output-format=github app export
- name: Check black formatting
run: |
uv run black --check immich_ml
poetry run black --check app export
- name: Run mypy type checking
run: |
uv run mypy --strict immich_ml/
poetry run mypy --install-types --non-interactive --strict app/
- name: Run tests and coverage
run: |
uv run pytest --cov=immich_ml --cov-report term-missing
github-files-formatting:
name: .github Files Formatting
needs: pre-job
if: ${{ needs.pre-job.outputs['should_run_.github'] == 'true' }}
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./.github
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './.github/.nvmrc'
- name: Run npm install
run: npm ci
- name: Run formatter
run: npm run format
if: ${{ !cancelled() }}
poetry run pytest app --cov=app --cov-report term-missing
shellcheck:
name: ShellCheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- uses: actions/checkout@v4
- name: Run ShellCheck
uses: ludeeus/action-shellcheck@master
with:
@@ -449,10 +413,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
uses: actions/setup-node@v4
with:
node-version-file: './server/.nvmrc'
@@ -466,7 +430,7 @@ jobs:
run: make open-api
- name: Find file changes
uses: tj-actions/verify-changed-files@a1c6acee9df209257a246f2cc6ae8cb6581c1edf # v20
uses: tj-actions/verify-changed-files@v20
id: verify-changed-files
with:
files: |
@@ -486,7 +450,7 @@ jobs:
runs-on: ubuntu-latest
services:
postgres:
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
env:
POSTGRES_PASSWORD: postgres
POSTGRES_USER: postgres
@@ -504,10 +468,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
uses: actions/setup-node@v4
with:
node-version-file: './server/.nvmrc'
@@ -525,10 +489,10 @@ jobs:
- name: Generate new migrations
continue-on-error: true
run: npm run migrations:generate TestMigration
run: npm run typeorm:migrations:generate ./src/migrations/TestMigration
- name: Find file changes
uses: tj-actions/verify-changed-files@a1c6acee9df209257a246f2cc6ae8cb6581c1edf # v20
uses: tj-actions/verify-changed-files@v20
id: verify-changed-files
with:
files: |
@@ -538,7 +502,6 @@ jobs:
run: |
echo "ERROR: Generated migration files not up to date!"
echo "Changed files: ${{ steps.verify-changed-files.outputs.changed_files }}"
cat ./src/*-TestMigration.ts
exit 1
- name: Run SQL generation
@@ -547,7 +510,7 @@ jobs:
DB_URL: postgres://postgres:postgres@localhost:5432/immich
- name: Find file changes
uses: tj-actions/verify-changed-files@a1c6acee9df209257a246f2cc6ae8cb6581c1edf # v20
uses: tj-actions/verify-changed-files@v20
id: verify-changed-sql-files
with:
files: |

View File

@@ -1,57 +0,0 @@
name: Weblate checks
on:
pull_request:
branches: [main]
jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.found_paths.outputs.i18n == 'true' && github.head_ref != 'chore/translations'}}
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
with:
filters: |
i18n:
- 'i18n/!(en)**\.json'
- name: Debug
run: |
echo "Should run: ${{ steps.found_paths.outputs.i18n == 'true' && github.head_ref != 'chore/translations'}}"
echo "Found i18n paths: ${{ steps.found_paths.outputs.i18n }}"
echo "Head ref: ${{ github.head_ref }}"
enforce-lock:
name: Check Weblate Lock
needs: [pre-job]
runs-on: ubuntu-latest
if: ${{ needs.pre-job.outputs.should_run == 'true' }}
steps:
- name: Check weblate lock
run: |
if [[ "false" = $(curl https://hosted.weblate.org/api/components/immich/immich/lock/ | jq .locked) ]]; then
exit 1
fi
- name: Find Pull Request
uses: juliangruber/find-pull-request-action@48b6133aa6c826f267ebd33aa2d29470f9d9e7d0 # v1
id: find-pr
with:
branch: chore/translations
- name: Fail if existing weblate PR
if: ${{ steps.find-pr.outputs.number }}
run: exit 1
success-check-lock:
name: Weblate Lock Check Success
needs: [enforce-lock]
runs-on: ubuntu-latest
if: always()
steps:
- name: Any jobs failed?
if: ${{ contains(needs.*.result, 'failure') }}
run: exit 1
- name: All jobs passed or skipped
if: ${{ !(contains(needs.*.result, 'failure')) }}
run: echo "All jobs passed or skipped" && echo "${{ toJSON(needs.*.result) }}"

View File

@@ -39,7 +39,7 @@ attach-server:
renovate:
LOG_LEVEL=debug npx renovate --platform=local --repository-cache=reset
MODULES = e2e server web cli sdk docs .github
MODULES = e2e server web cli sdk docs
audit-%:
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) audit fix
@@ -77,14 +77,14 @@ test-medium:
test-medium-dev:
docker exec -it immich_server /bin/sh -c "npm run test:medium"
build-all: $(foreach M,$(filter-out e2e .github,$(MODULES)),build-$M) ;
build-all: $(foreach M,$(filter-out e2e,$(MODULES)),build-$M) ;
install-all: $(foreach M,$(MODULES),install-$M) ;
check-all: $(foreach M,$(filter-out sdk cli docs .github,$(MODULES)),check-$M) ;
lint-all: $(foreach M,$(filter-out sdk docs .github,$(MODULES)),lint-$M) ;
check-all: $(foreach M,$(filter-out sdk cli docs,$(MODULES)),check-$M) ;
lint-all: $(foreach M,$(filter-out sdk docs,$(MODULES)),lint-$M) ;
format-all: $(foreach M,$(filter-out sdk,$(MODULES)),format-$M) ;
audit-all: $(foreach M,$(MODULES),audit-$M) ;
hygiene-all: lint-all format-all check-all sql audit-all;
test-all: $(foreach M,$(filter-out sdk docs .github,$(MODULES)),test-$M) ;
test-all: $(foreach M,$(filter-out sdk docs,$(MODULES)),test-$M) ;
clean:
find . -name "node_modules" -type d -prune -exec rm -rf '{}' +

View File

@@ -1,11 +1,11 @@
<p align="center">
<br/>
<br/>
<a href="https://opensource.org/license/agpl-v3"><img src="https://img.shields.io/badge/License-AGPL_v3-blue.svg?color=3F51B5&style=for-the-badge&label=License&logoColor=000000&labelColor=ececec" alt="License: AGPLv3"></a>
<a href="https://discord.immich.app">
<img src="https://img.shields.io/discord/979116623879368755.svg?label=Discord&logo=Discord&style=for-the-badge&logoColor=000000&labelColor=ececec" alt="Discord"/>
</a>
<br/>
<br/>
<br/>
<br/>
</p>
<p align="center">
@@ -63,7 +63,7 @@
Access the demo [here](https://demo.immich.app). The demo is running on a Free-tier Oracle VM in Amsterdam with a 2.4Ghz quad-core ARM64 CPU and 24GB RAM.
For the mobile app, you can use `https://demo.immich.app` for the `Server Endpoint URL`
For the mobile app, you can use `https://demo.immich.app/api` for the `Server Endpoint URL`
### Login credentials
@@ -104,7 +104,7 @@ For the mobile app, you can use `https://demo.immich.app` for the `Server Endpoi
| Read-only gallery | Yes | Yes |
| Stacked Photos | Yes | Yes |
| Tags | No | Yes |
| Folder View | Yes | Yes |
| Folder View | No | Yes |
## Translations

View File

@@ -1 +1 @@
22.14.0
22.13.1

View File

@@ -1,4 +1,4 @@
FROM node:22.14.0-alpine3.20@sha256:40be979442621049f40b1d51a26b55e281246b5de4e5f51a18da7beb6e17e3f9 AS core
FROM node:22.13.1-alpine3.20@sha256:c52e20859a92b3eccbd3a36c5e1a90adc20617d8d421d65e8a622e87b5dac963 AS core
WORKDIR /usr/src/open-api/typescript-sdk
COPY open-api/typescript-sdk/package*.json open-api/typescript-sdk/tsconfig*.json ./

View File

@@ -1,29 +1,39 @@
import { FlatCompat } from '@eslint/eslintrc';
import js from '@eslint/js';
import eslintPluginPrettierRecommended from 'eslint-plugin-prettier/recommended';
import eslintPluginUnicorn from 'eslint-plugin-unicorn';
import typescriptEslint from '@typescript-eslint/eslint-plugin';
import tsParser from '@typescript-eslint/parser';
import globals from 'globals';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
import typescriptEslint from 'typescript-eslint';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const compat = new FlatCompat({
baseDirectory: __dirname,
recommendedConfig: js.configs.recommended,
allConfig: js.configs.all,
});
export default typescriptEslint.config([
eslintPluginUnicorn.configs.recommended,
eslintPluginPrettierRecommended,
js.configs.recommended,
typescriptEslint.configs.recommended,
export default [
{
ignores: ['eslint.config.mjs', 'dist'],
},
...compat.extends(
'plugin:@typescript-eslint/recommended',
'plugin:prettier/recommended',
'plugin:unicorn/recommended',
),
{
plugins: {
'@typescript-eslint': typescriptEslint,
},
languageOptions: {
globals: {
...globals.node,
},
parser: typescriptEslint.parser,
parser: tsParser,
ecmaVersion: 5,
sourceType: 'module',
@@ -48,4 +58,4 @@ export default typescriptEslint.config([
'object-shorthand': ['error', 'always'],
},
},
]);
];

1422
cli/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "@immich/cli",
"version": "2.2.60",
"version": "2.2.50",
"description": "Command Line Interface (CLI) for Immich",
"type": "module",
"exports": "./dist/index.js",
@@ -19,9 +19,10 @@
"@types/byte-size": "^8.1.0",
"@types/cli-progress": "^3.11.0",
"@types/lodash-es": "^4.17.12",
"@types/micromatch": "^4.0.9",
"@types/mock-fs": "^4.13.1",
"@types/node": "^22.13.14",
"@types/node": "^22.13.1",
"@typescript-eslint/eslint-plugin": "^8.15.0",
"@typescript-eslint/parser": "^8.15.0",
"@vitest/coverage-v8": "^3.0.0",
"byte-size": "^9.0.0",
"cli-progress": "^3.12.0",
@@ -29,13 +30,12 @@
"eslint": "^9.14.0",
"eslint-config-prettier": "^10.0.0",
"eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-unicorn": "^57.0.0",
"globals": "^16.0.0",
"eslint-plugin-unicorn": "^56.0.1",
"globals": "^15.9.0",
"mock-fs": "^5.2.0",
"prettier": "^3.2.5",
"prettier-plugin-organize-imports": "^4.0.0",
"typescript": "^5.3.3",
"typescript-eslint": "^8.28.0",
"vite": "^6.0.0",
"vite-tsconfig-paths": "^5.0.0",
"vitest": "^3.0.0",
@@ -62,13 +62,11 @@
"node": ">=20.0.0"
},
"dependencies": {
"chokidar": "^4.0.3",
"fast-glob": "^3.3.2",
"fastq": "^1.17.1",
"lodash-es": "^4.17.21",
"micromatch": "^4.0.8"
"lodash-es": "^4.17.21"
},
"volta": {
"node": "22.14.0"
"node": "22.13.1"
}
}

View File

@@ -1,13 +1,12 @@
import * as fs from 'node:fs';
import * as os from 'node:os';
import * as path from 'node:path';
import { setTimeout as sleep } from 'node:timers/promises';
import { describe, expect, it, MockedFunction, vi } from 'vitest';
import { describe, expect, it, vi } from 'vitest';
import { Action, checkBulkUpload, defaults, getSupportedMediaTypes, Reason } from '@immich/sdk';
import { Action, checkBulkUpload, defaults, Reason } from '@immich/sdk';
import createFetchMock from 'vitest-fetch-mock';
import { checkForDuplicates, getAlbumName, startWatch, uploadFiles, UploadOptionsDto } from 'src/commands/asset';
import { checkForDuplicates, getAlbumName, uploadFiles, UploadOptionsDto } from './asset';
vi.mock('@immich/sdk');
@@ -200,112 +199,3 @@ describe('checkForDuplicates', () => {
});
});
});
describe('startWatch', () => {
let testFolder: string;
let checkBulkUploadMocked: MockedFunction<typeof checkBulkUpload>;
beforeEach(async () => {
vi.restoreAllMocks();
vi.mocked(getSupportedMediaTypes).mockResolvedValue({
image: ['.jpg'],
sidecar: ['.xmp'],
video: ['.mp4'],
});
testFolder = await fs.promises.mkdtemp(path.join(os.tmpdir(), 'test-startWatch-'));
checkBulkUploadMocked = vi.mocked(checkBulkUpload);
checkBulkUploadMocked.mockResolvedValue({
results: [],
});
});
it('should start watching a directory and upload new files', async () => {
const testFilePath = path.join(testFolder, 'test.jpg');
await startWatch([testFolder], { concurrency: 1 }, { batchSize: 1, debounceTimeMs: 10 });
await sleep(100); // to debounce the watcher from considering the test file as a existing file
await fs.promises.writeFile(testFilePath, 'testjpg');
await vi.waitUntil(() => checkBulkUploadMocked.mock.calls.length > 0, 3000);
expect(checkBulkUpload).toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: [
expect.objectContaining({
id: testFilePath,
}),
],
},
});
});
it('should filter out unsupported files', async () => {
const testFilePath = path.join(testFolder, 'test.jpg');
const unsupportedFilePath = path.join(testFolder, 'test.txt');
await startWatch([testFolder], { concurrency: 1 }, { batchSize: 1, debounceTimeMs: 10 });
await sleep(100); // to debounce the watcher from considering the test file as a existing file
await fs.promises.writeFile(testFilePath, 'testjpg');
await fs.promises.writeFile(unsupportedFilePath, 'testtxt');
await vi.waitUntil(() => checkBulkUploadMocked.mock.calls.length > 0, 3000);
expect(checkBulkUpload).toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: expect.arrayContaining([
expect.objectContaining({
id: testFilePath,
}),
]),
},
});
expect(checkBulkUpload).not.toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: expect.arrayContaining([
expect.objectContaining({
id: unsupportedFilePath,
}),
]),
},
});
});
it('should filger out ignored patterns', async () => {
const testFilePath = path.join(testFolder, 'test.jpg');
const ignoredPattern = 'ignored';
const ignoredFolder = path.join(testFolder, ignoredPattern);
await fs.promises.mkdir(ignoredFolder, { recursive: true });
const ignoredFilePath = path.join(ignoredFolder, 'ignored.jpg');
await startWatch([testFolder], { concurrency: 1, ignore: ignoredPattern }, { batchSize: 1, debounceTimeMs: 10 });
await sleep(100); // to debounce the watcher from considering the test file as a existing file
await fs.promises.writeFile(testFilePath, 'testjpg');
await fs.promises.writeFile(ignoredFilePath, 'ignoredjpg');
await vi.waitUntil(() => checkBulkUploadMocked.mock.calls.length > 0, 3000);
expect(checkBulkUpload).toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: expect.arrayContaining([
expect.objectContaining({
id: testFilePath,
}),
]),
},
});
expect(checkBulkUpload).not.toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: expect.arrayContaining([
expect.objectContaining({
id: ignoredFilePath,
}),
]),
},
});
});
afterEach(async () => {
await fs.promises.rm(testFolder, { recursive: true, force: true });
});
});

View File

@@ -12,18 +12,13 @@ import {
getSupportedMediaTypes,
} from '@immich/sdk';
import byteSize from 'byte-size';
import { Matcher, watch as watchFs } from 'chokidar';
import { MultiBar, Presets, SingleBar } from 'cli-progress';
import { chunk } from 'lodash-es';
import micromatch from 'micromatch';
import { Stats, createReadStream } from 'node:fs';
import { stat, unlink } from 'node:fs/promises';
import path, { basename } from 'node:path';
import { Queue } from 'src/queue';
import { BaseOptions, Batcher, authenticate, crawl, sha1 } from 'src/utils';
const UPLOAD_WATCH_BATCH_SIZE = 100;
const UPLOAD_WATCH_DEBOUNCE_TIME_MS = 10_000;
import { BaseOptions, authenticate, crawl, sha1 } from 'src/utils';
const s = (count: number) => (count === 1 ? '' : 's');
@@ -41,8 +36,6 @@ export interface UploadOptionsDto {
albumName?: string;
includeHidden?: boolean;
concurrency: number;
progress?: boolean;
watch?: boolean;
}
class UploadFile extends File {
@@ -62,94 +55,19 @@ class UploadFile extends File {
}
}
const uploadBatch = async (files: string[], options: UploadOptionsDto) => {
const { newFiles, duplicates } = await checkForDuplicates(files, options);
const newAssets = await uploadFiles(newFiles, options);
await updateAlbums([...newAssets, ...duplicates], options);
await deleteFiles(newFiles, options);
};
export const startWatch = async (
paths: string[],
options: UploadOptionsDto,
{
batchSize = UPLOAD_WATCH_BATCH_SIZE,
debounceTimeMs = UPLOAD_WATCH_DEBOUNCE_TIME_MS,
}: { batchSize?: number; debounceTimeMs?: number } = {},
) => {
const watcherIgnored: Matcher[] = [];
const { image, video } = await getSupportedMediaTypes();
const extensions = new Set([...image, ...video]);
if (options.ignore) {
watcherIgnored.push((path) => micromatch.contains(path, `**/${options.ignore}`));
}
const pathsBatcher = new Batcher<string>({
batchSize,
debounceTimeMs,
onBatch: async (paths: string[]) => {
const uniquePaths = [...new Set(paths)];
await uploadBatch(uniquePaths, options);
},
});
const onFile = async (path: string, stats?: Stats) => {
if (stats?.isDirectory()) {
return;
}
const ext = '.' + path.split('.').pop()?.toLowerCase();
if (!ext || !extensions.has(ext)) {
return;
}
if (!options.progress) {
// logging when progress is disabled as it can cause issues with the progress bar rendering
console.log(`Change detected: ${path}`);
}
pathsBatcher.add(path);
};
const fsWatcher = watchFs(paths, {
ignoreInitial: true,
ignored: watcherIgnored,
alwaysStat: true,
awaitWriteFinish: true,
depth: options.recursive ? undefined : 1,
persistent: true,
})
.on('add', onFile)
.on('change', onFile)
.on('error', (error) => console.error(`Watcher error: ${error}`));
process.on('SIGINT', async () => {
console.log('Exiting...');
await fsWatcher.close();
process.exit();
});
};
export const upload = async (paths: string[], baseOptions: BaseOptions, options: UploadOptionsDto) => {
await authenticate(baseOptions);
const scanFiles = await scan(paths, options);
if (scanFiles.length === 0) {
if (options.watch) {
console.log('No files found initially.');
} else {
console.log('No files found, exiting');
return;
}
console.log('No files found, exiting');
return;
}
if (options.watch) {
console.log('Watching for changes...');
await startWatch(paths, options);
// watcher does not handle the initial scan
// as the scan() is a more efficient quick start with batched results
}
await uploadBatch(scanFiles, options);
const { newFiles, duplicates } = await checkForDuplicates(scanFiles, options);
const newAssets = await uploadFiles(newFiles, options);
await updateAlbums([...newAssets, ...duplicates], options);
await deleteFiles(newFiles, options);
};
const scan = async (pathsToCrawl: string[], options: UploadOptionsDto) => {
@@ -167,25 +85,19 @@ const scan = async (pathsToCrawl: string[], options: UploadOptionsDto) => {
return files;
};
export const checkForDuplicates = async (files: string[], { concurrency, skipHash, progress }: UploadOptionsDto) => {
export const checkForDuplicates = async (files: string[], { concurrency, skipHash }: UploadOptionsDto) => {
if (skipHash) {
console.log('Skipping hash check, assuming all files are new');
return { newFiles: files, duplicates: [] };
}
let multiBar: MultiBar | undefined;
const multiBar = new MultiBar(
{ format: '{message} | {bar} | {percentage}% | ETA: {eta}s | {value}/{total} assets' },
Presets.shades_classic,
);
if (progress) {
multiBar = new MultiBar(
{ format: '{message} | {bar} | {percentage}% | ETA: {eta}s | {value}/{total} assets' },
Presets.shades_classic,
);
} else {
console.log(`Received ${files.length} files, hashing...`);
}
const hashProgressBar = multiBar?.create(files.length, 0, { message: 'Hashing files ' });
const checkProgressBar = multiBar?.create(files.length, 0, { message: 'Checking for duplicates' });
const hashProgressBar = multiBar.create(files.length, 0, { message: 'Hashing files ' });
const checkProgressBar = multiBar.create(files.length, 0, { message: 'Checking for duplicates' });
const newFiles: string[] = [];
const duplicates: Asset[] = [];
@@ -205,7 +117,7 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
}
}
checkProgressBar?.increment(assets.length);
checkProgressBar.increment(assets.length);
},
{ concurrency, retry: 3 },
);
@@ -225,7 +137,7 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
void checkBulkUploadQueue.push(batch);
}
hashProgressBar?.increment();
hashProgressBar.increment();
return results;
},
{ concurrency, retry: 3 },
@@ -243,7 +155,7 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
await checkBulkUploadQueue.drained();
multiBar?.stop();
multiBar.stop();
console.log(`Found ${newFiles.length} new files and ${duplicates.length} duplicate${s(duplicates.length)}`);
@@ -259,10 +171,7 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
return { newFiles, duplicates };
};
export const uploadFiles = async (
files: string[],
{ dryRun, concurrency, progress }: UploadOptionsDto,
): Promise<Asset[]> => {
export const uploadFiles = async (files: string[], { dryRun, concurrency }: UploadOptionsDto): Promise<Asset[]> => {
if (files.length === 0) {
console.log('All assets were already uploaded, nothing to do.');
return [];
@@ -282,20 +191,12 @@ export const uploadFiles = async (
return files.map((filepath) => ({ id: '', filepath }));
}
let uploadProgress: SingleBar | undefined;
if (progress) {
uploadProgress = new SingleBar(
{
format: 'Uploading assets | {bar} | {percentage}% | ETA: {eta_formatted} | {value_formatted}/{total_formatted}',
},
Presets.shades_classic,
);
} else {
console.log(`Uploading ${files.length} asset${s(files.length)} (${byteSize(totalSize)})`);
}
uploadProgress?.start(totalSize, 0);
uploadProgress?.update({ value_formatted: 0, total_formatted: byteSize(totalSize) });
const uploadProgress = new SingleBar(
{ format: 'Uploading assets | {bar} | {percentage}% | ETA: {eta_formatted} | {value_formatted}/{total_formatted}' },
Presets.shades_classic,
);
uploadProgress.start(totalSize, 0);
uploadProgress.update({ value_formatted: 0, total_formatted: byteSize(totalSize) });
let duplicateCount = 0;
let duplicateSize = 0;
@@ -321,7 +222,7 @@ export const uploadFiles = async (
successSize += stats.size ?? 0;
}
uploadProgress?.update(successSize, { value_formatted: byteSize(successSize + duplicateSize) });
uploadProgress.update(successSize, { value_formatted: byteSize(successSize + duplicateSize) });
return response;
},
@@ -334,7 +235,7 @@ export const uploadFiles = async (
await queue.drained();
uploadProgress?.stop();
uploadProgress.stop();
console.log(`Successfully uploaded ${successCount} new asset${s(successCount)} (${byteSize(successSize)})`);
if (duplicateCount > 0) {

View File

@@ -69,13 +69,6 @@ program
.default(4),
)
.addOption(new Option('--delete', 'Delete local assets after upload').env('IMMICH_DELETE_ASSETS'))
.addOption(new Option('--no-progress', 'Hide progress bars').env('IMMICH_PROGRESS_BAR').default(true))
.addOption(
new Option('--watch', 'Watch for changes and upload automatically')
.env('IMMICH_WATCH_CHANGES')
.default(false)
.implies({ progress: false }),
)
.argument('[paths...]', 'One or more paths to assets to be uploaded')
.action((paths, options) => upload(paths, program.opts(), options));

View File

@@ -1,7 +1,6 @@
import mockfs from 'mock-fs';
import { readFileSync } from 'node:fs';
import { Batcher, CrawlOptions, crawl } from 'src/utils';
import { Mock } from 'vitest';
import { CrawlOptions, crawl } from 'src/utils';
interface Test {
test: string;
@@ -304,38 +303,3 @@ describe('crawl', () => {
}
});
});
describe('Batcher', () => {
let batcher: Batcher;
let onBatch: Mock;
beforeEach(() => {
onBatch = vi.fn();
batcher = new Batcher({ batchSize: 2, onBatch });
});
it('should trigger onBatch() when a batch limit is reached', async () => {
batcher.add('a');
batcher.add('b');
batcher.add('c');
expect(onBatch).toHaveBeenCalledOnce();
expect(onBatch).toHaveBeenCalledWith(['a', 'b']);
});
it('should trigger onBatch() when flush() is called', async () => {
batcher.add('a');
batcher.flush();
expect(onBatch).toHaveBeenCalledOnce();
expect(onBatch).toHaveBeenCalledWith(['a']);
});
it('should trigger onBatch() when debounce time reached', async () => {
vi.useFakeTimers();
batcher = new Batcher({ batchSize: 2, debounceTimeMs: 100, onBatch });
batcher.add('a');
expect(onBatch).not.toHaveBeenCalled();
vi.advanceTimersByTime(200);
expect(onBatch).toHaveBeenCalledOnce();
expect(onBatch).toHaveBeenCalledWith(['a']);
vi.useRealTimers();
});
});

View File

@@ -172,64 +172,3 @@ export const sha1 = (filepath: string) => {
rs.on('end', () => resolve(hash.digest('hex')));
});
};
/**
* Batches items and calls onBatch to process them
* when the batch size is reached or the debounce time has passed.
*/
export class Batcher<T = unknown> {
private items: T[] = [];
private readonly batchSize: number;
private readonly debounceTimeMs?: number;
private readonly onBatch: (items: T[]) => void;
private debounceTimer?: NodeJS.Timeout;
constructor({
batchSize,
debounceTimeMs,
onBatch,
}: {
batchSize: number;
debounceTimeMs?: number;
onBatch: (items: T[]) => Promise<void>;
}) {
this.batchSize = batchSize;
this.debounceTimeMs = debounceTimeMs;
this.onBatch = onBatch;
}
private setDebounceTimer() {
if (this.debounceTimer) {
clearTimeout(this.debounceTimer);
}
if (this.debounceTimeMs) {
this.debounceTimer = setTimeout(() => this.flush(), this.debounceTimeMs);
}
}
private clearDebounceTimer() {
if (this.debounceTimer) {
clearTimeout(this.debounceTimer);
this.debounceTimer = undefined;
}
}
add(item: T) {
this.items.push(item);
this.setDebounceTimer();
if (this.items.length >= this.batchSize) {
this.flush();
}
}
flush() {
this.clearDebounceTimer();
if (this.items.length === 0) {
return;
}
this.onBatch(this.items);
this.items = [];
}
}

View File

@@ -1,13 +1,4 @@
#
# WARNING: To install Immich, follow our guide: https://immich.app/docs/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
# For development see:
# See:
# - https://immich.app/docs/developer/setup
# - https://immich.app/docs/developer/troubleshooting
@@ -25,7 +16,7 @@ services:
context: ../
dockerfile: server/Dockerfile
target: dev
restart: unless-stopped
restart: always
volumes:
- ../server:/usr/src/app
- ../open-api:/usr/src/open-api
@@ -95,12 +86,12 @@ services:
image: immich-machine-learning-dev:latest
# extends:
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
build:
context: ../machine-learning
dockerfile: Dockerfile
args:
- DEVICE=cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference
- DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
ports:
- 3003:3003
volumes:
@@ -116,13 +107,13 @@ services:
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
image: redis:6.2-alpine@sha256:905c4ee67b8e0aa955331960d2aa745781e6bd89afc44a8584bfd13bc890f0ae
healthcheck:
test: redis-cli ping || exit 1
database:
container_name: immich_postgres
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
env_file:
- .env
environment:

View File

@@ -1,12 +1,3 @@
#
# WARNING: To install Immich, follow our guide: https://immich.app/docs/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
name: immich-prod
services:
@@ -38,12 +29,12 @@ services:
image: immich-machine-learning:latest
# extends:
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
build:
context: ../machine-learning
dockerfile: Dockerfile
args:
- DEVICE=cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference
- DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
ports:
- 3003:3003
volumes:
@@ -56,14 +47,14 @@ services:
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
image: redis:6.2-alpine@sha256:905c4ee67b8e0aa955331960d2aa745781e6bd89afc44a8584bfd13bc890f0ae
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
env_file:
- .env
environment:
@@ -77,12 +68,22 @@ services:
- 5432:5432
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1; Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres -c shared_preload_libraries=vectors.so -c 'search_path="$$user", public, vectors' -c logging_collector=on -c max_wal_size=2GB -c shared_buffers=512MB -c wal_compression=on
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
restart: always
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
@@ -90,7 +91,7 @@ services:
container_name: immich_prometheus
ports:
- 9090:9090
image: prom/prometheus@sha256:502ad90314c7485892ce696cb14a99fceab9fc27af29f4b427f41bd39701a199
image: prom/prometheus@sha256:6559acbd5d770b15bb3c954629ce190ac3cbbdb2b7f1c30f0385c4e05104e218
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus-data:/prometheus
@@ -99,10 +100,10 @@ services:
# add data source for http://immich-prometheus:9090 to get started
immich-grafana:
container_name: immich_grafana
command: [ './run.sh', '-disable-reporting' ]
command: ['./run.sh', '-disable-reporting']
ports:
- 3000:3000
image: grafana/grafana:11.5.2-ubuntu@sha256:8b5858c447e06fd7a89006b562ba7bba7c4d5813600c7982374c41852adefaeb
image: grafana/grafana:11.5.1-ubuntu@sha256:9a4ab78cec1a2ec7d1ca5dfd5aacec6412706a1bc9e971fc7184e2f6696a63f5
volumes:
- grafana-data:/var/lib/grafana

View File

@@ -1,11 +1,10 @@
#
# WARNING: To install Immich, follow our guide: https://immich.app/docs/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
# WARNING: Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
#
name: immich
@@ -33,12 +32,12 @@ services:
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
@@ -49,14 +48,14 @@ services:
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
image: docker.io/redis:6.2-alpine@sha256:905c4ee67b8e0aa955331960d2aa745781e6bd89afc44a8584bfd13bc890f0ae
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
@@ -67,12 +66,22 @@ services:
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1; Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres -c shared_preload_libraries=vectors.so -c 'search_path="$$user", public, vectors' -c logging_collector=on -c max_wal_size=2GB -c shared_buffers=512MB -c wal_compression=on
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
restart: always
volumes:

View File

@@ -13,13 +13,6 @@ services:
volumes:
- /lib/firmware/mali_csffw.bin:/lib/firmware/mali_csffw.bin:ro # Mali firmware for your chipset (not always required depending on the driver)
- /usr/lib/libmali.so:/usr/lib/libmali.so:ro # Mali driver for your chipset (always required)
rknn:
security_opt:
- systempaths=unconfined
- apparmor=unconfined
devices:
- /dev/dri:/dev/dri
cpu: {}
@@ -33,13 +26,6 @@ services:
capabilities:
- gpu
rocm:
group_add:
- video
devices:
- /dev/dri:/dev/dri
- /dev/kfd:/dev/kfd
openvino:
device_cgroup_rules:
- 'c 189:* rmw'

View File

@@ -1 +1 @@
22.14.0
22.13.1

View File

@@ -97,7 +97,7 @@ Make sure to [set your reverse proxy](/docs/administration/reverse-proxy/) to al
Also, check the disk space of your reverse proxy.
In some cases, proxies cache requests to disk before passing them on, and if disk space runs out, the request fails.
If you are using Cloudflare Tunnel, please know that they set a maximum filesize of 100 MB that cannot be changed.
If you are using Cloudflare Tunnel, please know that they set a maxiumum filesize of 100 MB that cannot be changed.
At times, files larger than this may work, potentially up to 1 GB. However, the official limit is 100 MB.
If you are having issues, we recommend switching to a different network deployment.
@@ -117,7 +117,7 @@ See [Backup and Restore](/docs/administration/backup-and-restore.md).
### Does Immich support reading existing face tag metadata?
Yes, it creates new faces and persons from the imported asset metadata. For details see the [feature request #4348](https://github.com/immich-app/immich/discussions/4348) and [PR #6455](https://github.com/immich-app/immich/pull/6455).
No, it currently does not. There is an [open feature request on GitHub](https://github.com/immich-app/immich/discussions/4348).
### Does Immich support the filtering of NSFW images?
@@ -170,7 +170,7 @@ If you aren't able to or prefer not to mount Samba on the host (such as Windows
Below is an example in the `docker-compose.yml`.
Change your username, password, local IP, and share name, and see below where the line `- originals:/usr/src/app/originals`,
correlates to the section where the volume `originals` was created. You can call this whatever you like, and map it to the docker container as you like.
corrolates to the section where the volume `originals` was created. You can call this whatever you like, and map it to the docker container as you like.
For example you could change `originals:` to `Photos:`, and change `- originals:/usr/src/app/originals` to `Photos:/usr/src/app/photos`.
```diff
@@ -262,7 +262,7 @@ No, this is not supported. Only models listed in the [Hugging Face][huggingface]
### I want to be able to search in other languages besides English. How can I do that?
You can change to a multilingual CLIP model. See [here](/docs/features/searching#clip-models) for instructions.
You can change to a multilingual CLIP model. See [here](/docs/features/searching#clip-model) for instructions.
### Does Immich support Facial Recognition for videos?

View File

@@ -30,13 +30,6 @@ As mentioned above, you should make your own backup of these together with the a
You can adjust the schedule and amount of kept backups in the [admin settings](http://my.immich.app/admin/system-settings?isOpen=backup).
By default, Immich will keep the last 14 backups and create a new backup every day at 2:00 AM.
#### Trigger Backup
You are able to trigger a backup in the [admin job status page](http://my.immich.app/admin/jobs-status).
Visit the page, open the "Create job" modal from the top right, select "Backup Database" and click "Confirm".
A job will run and trigger a backup, you can verify this worked correctly by checking the logs or the backup folder.
This backup will count towards the last X backups that will be kept based on your settings.
#### Restoring
We hope to make restoring simpler in future versions, for now you can find the backups in the `UPLOAD_LOCATION/backups` folder on your host.
@@ -60,7 +53,7 @@ docker compose create # Create Docker containers for Immich apps witho
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
# Check the database user if you deviated from the default
gunzip --stdout "/path/to/backup/dump.sql.gz" \
gunzip < "/path/to/backup/dump.sql.gz" \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| docker exec -i immich_postgres psql --dbname=postgres --username=<DB_USERNAME> # Restore Backup
docker compose up -d # Start remainder of Immich apps
@@ -83,8 +76,10 @@ docker compose create # Create Docker containers for
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
docker exec -it immich_postgres bash # Enter the Docker shell and run the following command
# Check the database user if you deviated from the default. If your backup ends in `.gz`, replace `cat` with `gunzip --stdout`
cat "/dump.sql" | sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" | psql --dbname=postgres --username=<DB_USERNAME>
# Check the database user if you deviated from the default. If your backup ends in `.gz`, replace `cat` with `gunzip`
cat < "/dump.sql" \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| psql --dbname=postgres --username=<DB_USERNAME> # Restore Backup
exit # Exit the Docker shell
docker compose up -d # Start remainder of Immich apps
```

View File

@@ -70,4 +70,4 @@ When installing a new version of pgvecto.rs, you will need to manually update th
If you get the error `driverError: error: permission denied for view pg_vector_index_stat`, you can fix this by connecting to the Immich database and running `GRANT SELECT ON TABLE pg_vector_index_stat TO <immichdbusername>;`.
[vectors-install]: https://docs.vectorchord.ai/getting-started/installation.html
[vectors-install]: https://docs.pgvecto.rs/getting-started/installation.html

View File

@@ -11,7 +11,6 @@ The `immich-server` docker image comes preinstalled with an administrative CLI (
| `enable-oauth-login` | Enable OAuth login |
| `disable-oauth-login` | Disable OAuth login |
| `list-users` | List Immich users |
| `version` | Print Immich version |
## How to run a command
@@ -81,10 +80,3 @@ immich-admin list-users
}
]
```
Print Immich Version
```
immich-admin version
v1.129.0
```

View File

@@ -98,14 +98,6 @@ The default Immich log level is `Log` (commonly known as `Info`). The Immich adm
Through this setting, you can manage all the settings related to machine learning in Immich, from the setting of remote machine learning to the model and its parameters
You can choose to disable a certain type of machine learning, for example smart search or facial recognition.
### URL
The built in (`http://immich-machine-learning:3003`) machine learning server will be configured by default, but you can change this or add additional servers.
Hosting the `immich-machine-learning` container on a machine with a more powerful GPU can be helpful to for processing a large number of photos (such as during batch import) or for faster search.
If more than one URL is provided, each server will be attempted one-at-a-time until one responds successfully, in order from first to last. Servers that don't respond will be temporarily ignored until they come back online.
### Smart Search
The [smart search](/docs/features/searching) settings allow you to change the [CLIP model](https://openai.com/research/clip). Larger models will typically provide [more accurate search results](https://github.com/immich-app/immich/discussions/11862) but consume more processing power and RAM. When [changing the CLIP model](/docs/FAQ#can-i-use-a-custom-clip-model) it is mandatory to re-run the Smart Search job on all images to fully apply the change.

View File

@@ -69,8 +69,6 @@ Navigating to Administration > Settings > Machine Learning Settings > Facial Rec
:::tip
It's better to only tweak the parameters here than to set them to something very different unless you're ready to test a variety of options. If you do need to set a parameter to a strict setting, relaxing other settings can be a good option to compensate, and vice versa.
You can learn how the tune the result in this [Guide](/docs/guides/better-facial-clusters)
:::
### Facial recognition model

View File

Before

Width:  |  Height:  |  Size: 4.9 MiB

After

Width:  |  Height:  |  Size: 4.9 MiB

View File

@@ -37,7 +37,7 @@ To validate that Immich can reach your external library, start a shell inside th
### Exclusion Patterns
By default, all files in the import paths will be added to the library. If there are files that should not be added, exclusion patterns can be used to exclude them. Exclusion patterns are glob patterns are matched against the full file path. If a file matches an exclusion pattern, it will not be added to the library. Exclusion patterns can be added in the Scan Settings page for each library.
By default, all files in the import paths will be added to the library. If there are files that should not be added, exclusion patterns can be used to exclude them. Exclusion patterns are glob patterns are matched against the full file path. If a file matches an exclusion pattern, it will not be added to the library. Exclusion patterns can be added in the Scan Settings page for each library. Under the hood, Immich uses the [glob](https://www.npmjs.com/package/glob) package to match patterns, so please refer to [their documentation](https://github.com/isaacs/node-glob#glob-primer) to see what patterns are supported.
Some basic examples:
@@ -48,11 +48,7 @@ Some basic examples:
Special characters such as @ should be escaped, for instance:
- `**/\@eaDir/**` will exclude all files in any directory named `@eaDir`
:::info
Internally, Immich uses the [glob](https://www.npmjs.com/package/glob) package to process exclusion patterns, and sometimes those patterns are translated into [Postgres LIKE patterns](https://www.postgresql.org/docs/current/functions-matching.html). The intention is to support basic folder exclusions but we recommend against advanced usage since those can't reliably be translated to the Postgres syntax. Please refer to the [glob documentation](https://github.com/isaacs/node-glob#glob-primer) for a basic overview on glob patterns.
:::
- `**/\@eadir/**` will exclude all files in any directory named `@eadir`
### Automatic watching (EXPERIMENTAL)
@@ -72,7 +68,7 @@ In rare cases, the library watcher can hang, preventing Immich from starting up.
### Nightly job
There is an automatic scan job that is scheduled to run once a day. This job also cleans up any libraries stuck in deletion. It is possible to trigger the cleanup by clicking "Scan all libraries" in the library managment page.
There is an automatic scan job that is scheduled to run once a day. This job also cleans up any libraries stuck in deletion.
## Usage
@@ -95,7 +91,7 @@ The `immich-server` container will need access to the gallery. Modify your docke
+ - /mnt/nas/christmas-trip:/mnt/media/christmas-trip:ro
+ - /home/user/old-pics:/mnt/media/old-pics:ro
+ - /mnt/media/videos:/mnt/media/videos:ro
+ - /mnt/media/videos2:/mnt/media/videos2 # WARNING: Immich will be able to delete the files in this folder, as it does not end with :ro
+ - /mnt/media/videos2:/mnt/media/videos2 # the files in this folder can be deleted, as it does not end with :ro
+ - "C:/Users/user_name/Desktop/my media:/mnt/media/my-media:ro" # import path in Windows system.
```
@@ -115,10 +111,11 @@ These actions must be performed by the Immich administrator.
- Click on Administration -> Libraries
- Click on Create External Library
- Select which user owns the library, this can not be changed later
- Enter `/mnt/media/christmas-trip` then click Add
- Click on Save
- Click the drop-down menu on the newly created library
- Click on Rename Library and rename it to "Christmas Trip"
- Click Edit Import Paths
- Click on Add Path
- Enter `/mnt/media/christmas-trip` then click Add
NOTE: We have to use the `/mnt/media/christmas-trip` path and not the `/mnt/nas/christmas-trip` path since all paths have to be what the Docker containers see.

View File

@@ -11,9 +11,7 @@ You do not need to redo any machine learning jobs after enabling hardware accele
- ARM NN (Mali)
- CUDA (NVIDIA GPUs with [compute capability](https://developer.nvidia.com/cuda-gpus) 5.2 or higher)
- ROCm (AMD GPUs)
- OpenVINO (Intel GPUs such as Iris Xe and Arc)
- RKNN (Rockchip)
## Limitations
@@ -21,7 +19,6 @@ You do not need to redo any machine learning jobs after enabling hardware accele
- Only Linux and Windows (through WSL2) servers are supported.
- ARM NN is only supported on devices with Mali GPUs. Other Arm devices are not supported.
- Some models may not be compatible with certain backends. CUDA is the most reliable.
- Search latency isn't improved by ARM NN due to model compatibility issues preventing its use. However, smart search jobs do make use of ARM NN.
## Prerequisites
@@ -36,7 +33,6 @@ You do not need to redo any machine learning jobs after enabling hardware accele
- The `hwaccel.ml.yml` file assumes the path to it is `/usr/lib/libmali.so`, so update accordingly if it is elsewhere
- The `hwaccel.ml.yml` file assumes an additional file `/lib/firmware/mali_csffw.bin`, so update accordingly if your device's driver does not require this file
- Optional: Configure your `.env` file, see [environment variables](/docs/install/environment-variables) for ARM NN specific settings
- In particular, the `MACHINE_LEARNING_ANN_FP16_TURBO` can significantly improve performance at the cost of very slightly lower accuracy
#### CUDA
@@ -45,38 +41,22 @@ You do not need to redo any machine learning jobs after enabling hardware accele
- The installed driver must be >= 535 (it must support CUDA 12.2).
- On Linux (except for WSL2), you also need to have [NVIDIA Container Toolkit][nvct] installed.
#### ROCm
- The GPU must be supported by ROCm. If it isn't officially supported, you can attempt to use the `HSA_OVERRIDE_GFX_VERSION` environmental variable: `HSA_OVERRIDE_GFX_VERSION=<a supported version, e.g. 10.3.0>`. If this doesn't work, you might need to also set `HSA_USE_SVM=0`.
- The ROCm image is quite large and requires at least 35GiB of free disk space. However, pulling later updates to the service through Docker will generally only amount to a few hundred megabytes as the rest will be cached.
- This backend is new and may experience some issues. For example, GPU power consumption can be higher than usual after running inference, even if the machine learning service is idle. In this case, it will only go back to normal after being idle for 5 minutes (configurable with the [MACHINE_LEARNING_MODEL_TTL](/docs/install/environment-variables) setting).
#### OpenVINO
- Integrated GPUs are more likely to experience issues than discrete GPUs, especially for older processors or servers with low RAM.
- Ensure the server's kernel version is new enough to use the device for hardware accceleration.
- Expect higher RAM usage when using OpenVINO compared to CPU processing.
#### RKNN
- You must have a supported Rockchip SoC: only RK3566, RK3568, RK3576 and RK3588 are supported at this moment.
- Make sure you have the appropriate linux kernel driver installed
- This is usually pre-installed on the device vendor's Linux images
- RKNPU driver V0.9.8 or later must be available in the host server
- You may confirm this by running `cat /sys/kernel/debug/rknpu/version` to check the version
- Optional: Configure your `.env` file, see [environment variables](/docs/install/environment-variables) for RKNN specific settings
- In particular, setting `MACHINE_LEARNING_RKNN_THREADS` to 2 or 3 can _dramatically_ improve performance for RK3576 and RK3588 compared to the default of 1, at the expense of multiplying the amount of RAM each model uses by that amount.
## Setup
1. If you do not already have it, download the latest [`hwaccel.ml.yml`][hw-file] file and ensure it's in the same folder as the `docker-compose.yml`.
2. In the `docker-compose.yml` under `immich-machine-learning`, uncomment the `extends` section and change `cpu` to the appropriate backend.
3. Still in `immich-machine-learning`, add one of -[armnn, cuda, rocm, openvino, rknn] to the `image` section's tag at the end of the line.
3. Still in `immich-machine-learning`, add one of -[armnn, cuda, openvino] to the `image` section's tag at the end of the line.
4. Redeploy the `immich-machine-learning` container with these updated settings.
### Confirming Device Usage
You can confirm the device is being recognized and used by checking its utilization. There are many tools to display this, such as `nvtop` for NVIDIA or Intel, `intel_gpu_top` for Intel, and `radeontop` for AMD.
You can confirm the device is being recognized and used by checking its utilization. There are many tools to display this, such as `nvtop` for NVIDIA or Intel and `intel_gpu_top` for Intel.
You can also check the logs of the `immich-machine-learning` container. When a Smart Search or Face Detection job begins, or when you search with text in Immich, you should either see a log for `Available ORT providers` containing the relevant provider (e.g. `CUDAExecutionProvider` in the case of CUDA), or a `Loaded ANN model` log entry without errors in the case of ARM NN.
@@ -147,12 +127,3 @@ Note that you should increase job concurrencies to increase overall utilization
- If you encounter an error when a model is running, try a different model to see if the issue is model-specific.
- You may want to increase concurrency past the default for higher utilization. However, keep in mind that this will also increase VRAM consumption.
- Larger models benefit more from hardware acceleration, if you have the VRAM for them.
- Compared to ARM NN, RKNPU has:
- Wider model support (including for search, which ARM NN does not accelerate)
- Less heat generation
- Very slightly lower accuracy (RKNPU always uses FP16, while ARM NN by default uses higher precision FP32 unless `MACHINE_LEARNING_ANN_FP16_TURBO` is enabled)
- Varying speed (tested on RK3588):
- If `MACHINE_LEARNING_RKNN_THREADS` is at the default of 1, RKNPU will have substantially lower throughput for ML jobs than ARM NN in most cases, but similar latency (such as when searching)
- If `MACHINE_LEARNING_RKNN_THREADS` is set to 3, it will be somewhat faster than ARM NN at FP32, but somewhat slower than ARM NN if `MACHINE_LEARNING_ANN_FP16_TURBO` is enabled
- When other tasks also use the GPU (like transcoding), RKNPU has a significant advantage over ARM NN as it uses the otherwise idle NPU instead of competing for GPU usage
- Lower RAM usage if `MACHINE_LEARNING_RKNN_THREADS` is at the default of 1, but significantly higher if greater than 1 (which is necessary for it to fully utilize the NPU and hence be comparable in speed to ARM NN)

File diff suppressed because it is too large Load Diff

View File

@@ -18,7 +18,7 @@ For the full list, refer to the [Immich source code](https://github.com/immich-a
| `JPEG 2000` | `.jp2` | :white_check_mark: | |
| `JPEG` | `.webp` `.jpg` `.jpe` `.insp` | :white_check_mark: | |
| `JPEG XL` | `.jxl` | :white_check_mark: | |
| `PNG` | `.png` | :white_check_mark: | |
| `PNG` | `.webp` | :white_check_mark: | |
| `PSD` | `.psd` | :white_check_mark: | Adobe Photoshop |
| `RAW` | `.raw` | :white_check_mark: | |
| `RW2` | `.rw2` | :white_check_mark: | |

View File

@@ -1,72 +0,0 @@
# Better Facial Recognition Clusters
## Purpose
This guide explains how to optimize facial recognition in systems with large image libraries. By following these steps, you'll achieve better clustering of faces, reducing the need for manual merging.
---
## Important Notes
- **Best Suited For:** Large image libraries after importing a significant number of images.
- **Warning:** This method deletes all previously assigned names.
- **Tip:** **Always take a [backup](/docs/administration/backup-and-restore#database) before proceeding!**
---
## Step-by-Step Instructions
### Objective
To enhance face clustering and ensure the model effectively identifies faces using qualitative initial data.
---
### Steps
#### 1. Adjust Machine Learning Settings
Navigate to:
**Admin → Administration → Settings → Machine Learning Settings**
Make the following changes:
- **Maximum recognition distance (Optional):**
Lower this value, e.g., to **0.4**, if the library contains people with similar facial features.
- **Minimum recognized faces:**
Set this to a **high value** (e.g., 20 For libraries with a large amount of assets (~100K+), and 10 for libraries with medium amount of assets (~40K+)).
> A high value ensures clusters only include faces that appear at least 20/`value` times in the library, improving the initial clustering process.
---
#### 2. Run Reset Jobs
Go to:
**Admin → Administration → Settings → Jobs**
Perform the following:
1. **FACIAL RECOGNITION → Reset**
> These reset jobs rebuild the recognition model based on the new settings.
---
#### 3. Refine Recognition with Lower Thresholds
Once the reset jobs are complete, refine the recognition as follows:
- **Step 1:**
Return to **Minimum recognized faces** in Machine Learning Settings and lower the value to **10** (In medium libraries we will lower the value from 10 to 5).
> Run the job: **FACIAL RECOGNITION → MISSING Mode**
- **Step 2:**
Lower the value again to **3**.
> Run the job: **FACIAL RECOGNITION → MISSING Mode**
:::tip try different values
For certain libraries with a larger or smaller amount of assets, other settings will be better or worse. It is recommended to try different values **before assigning names** and see which settings work best for your library.
:::
---

View File

@@ -6,7 +6,7 @@ This guide explains how to store generated and raw files with docker's volume mo
It is important to remember to update the backup settings after following the guide to back up the new backup paths if using automatic backup tools, especially `profile/`.
:::
In our `.env` file, we will define the paths we want to use. Note that you don't have to define all of these: UPLOAD_LOCATION will be the base folder that files are stored in by default, with the other paths acting as overrides.
In our `.env` file, we will define variables that will help us in the future when we want to move to a more advanced server
```diff title=".env"
# You can find documentation for all the supported environment variables [here](/docs/install/environment-variables)
@@ -21,7 +21,7 @@ In our `.env` file, we will define the paths we want to use. Note that you don't
...
```
After defining the locations of these files, we will edit the `docker-compose.yml` file accordingly and add the new variables to the `immich-server` container. These paths are where the mount attaches inside of the container, so don't change those.
After defining the locations of these files, we will edit the `docker-compose.yml` file accordingly and add the new variables to the `immich-server` container.
```diff title="docker-compose.yml"
services:
@@ -35,8 +35,7 @@ services:
- /etc/localtime:/etc/localtime:ro
```
After making this change, you have to move the files over to the new folders to make sure Immich can find everything it needs. If you haven't uploaded anything important yet, you can also reset Immich entirely by deleting the database folder.
Then restart Immich to register the changes:
Restart Immich to register the changes.
```
docker compose up -d

View File

@@ -31,10 +31,6 @@ SELECT * FROM "assets" WHERE "originalPath" LIKE 'upload/library/admin/2023/%';
SELECT * FROM "assets" WHERE "id" = '9f94e60f-65b6-47b7-ae44-a4df7b57f0e9';
```
```sql title="Find by partial ID"
SELECT * FROM "assets" WHERE "id"::text LIKE '%ab431d3a%';
```
:::note
You can calculate the checksum for a particular file by using the command `sha1sum <filename>`.
:::

View File

@@ -23,12 +23,12 @@ name: immich_remote_ml
services:
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.ml.yml
# service: # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
# service: # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
restart: always

View File

@@ -1,7 +1,3 @@
---
sidebar_position: 100
---
# Config File
A config file can be provided as an alternative to the UI configuration.

View File

@@ -37,7 +37,7 @@ You can alternatively download these two files from your browser and move them t
</CodeBlock>
- Populate `UPLOAD_LOCATION` with your preferred location for storing backup assets. It should be a new directory on the server with enough free space.
- Consider changing `DB_PASSWORD` to a custom value. Postgres is not publicly exposed, so this password is only used for local authentication.
- Consider changing `DB_PASSWORD` to a custom value. Postgres is not publically exposed, so this password is only used for local authentication.
To avoid issues with Docker parsing this value, it is best to use only the characters `A-Za-z0-9`. `pwgen` is a handy utility for this.
- Set your timezone by uncommenting the `TZ=` line.
- Populate custom database information if necessary.
@@ -69,4 +69,39 @@ If you get an error `can't set healthcheck.start_interval as feature require Doc
## Next Steps
Read the [Post Installation](/docs/install/post-install.mdx) steps and [upgrade instructions](/docs/install/upgrading.md).
Read the [Post Installation](/docs/install/post-install.mdx) steps or setup optional features below.
### Setting up optional features
- [External Libraries](/docs/features/libraries.md): Adding your existing photo library to Immich
- [Hardware Transcoding](/docs/features/hardware-transcoding.md): Speeding up video transcoding
- [Hardware-Accelerated Machine Learning](/docs/features/ml-hardware-acceleration.md): Speeding up various machine learning tasks in Immich
### Upgrading
:::danger Read the release notes
Immich is currently under heavy development, which means you can expect [breaking changes][breaking] and bugs. Therefore, we recommend reading the release notes prior to updating and to take special care when using automated tools like [Watchtower][watchtower].
You can see versions that had breaking changes [here][breaking].
:::
If `IMMICH_VERSION` is set, it will need to be updated to the latest or desired version.
When a new version of Immich is [released][releases], the application can be upgraded and restarted with the following commands, run in the directory with the `docker-compose.yml` file:
```bash title="Upgrade and restart Immich"
docker compose pull && docker compose up -d
```
To clean up disk space, the old version's obsolete container images can be deleted with the following command:
```bash title="Clean up unused Docker images"
docker image prune
```
[compose-file]: https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
[env-file]: https://github.com/immich-app/immich/releases/latest/download/example.env
[watchtower]: https://containrrr.dev/watchtower/
[breaking]: https://github.com/immich-app/immich/discussions?discussions_q=label%3Achangelog%3Abreaking-change+sort%3Adate_created
[container-auth]: https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry#authenticating-to-the-container-registry
[releases]: https://github.com/immich-app/immich/releases

View File

@@ -11,7 +11,7 @@ Just restarting the containers does not replace the environment within the conta
In order to recreate the container using docker compose, run `docker compose up -d`.
In most cases docker will recognize that the `.env` file has changed and recreate the affected containers.
If this does not work, try running `docker compose up -d --force-recreate`.
If this should not work, try running `docker compose up -d --force-recreate`.
:::
@@ -20,8 +20,8 @@ If this does not work, try running `docker compose up -d --force-recreate`.
| Variable | Description | Default | Containers |
| :----------------- | :------------------------------ | :-------: | :----------------------- |
| `IMMICH_VERSION` | Image tags | `release` | server, machine learning |
| `UPLOAD_LOCATION` | Host path for uploads | | server |
| `DB_DATA_LOCATION` | Host path for Postgres database | | database |
| `UPLOAD_LOCATION` | Host Path for uploads | | server |
| `DB_DATA_LOCATION` | Host Path for Postgres database | | database |
:::tip
These environment variables are used by the `docker-compose.yml` file and do **NOT** affect the containers directly.
@@ -33,15 +33,15 @@ These environment variables are used by the `docker-compose.yml` file and do **N
| :---------------------------------- | :---------------------------------------------------------------------------------------- | :--------------------------: | :----------------------- | :----------------- |
| `TZ` | Timezone | <sup>\*1</sup> | server | microservices |
| `IMMICH_ENV` | Environment (production, development) | `production` | server, machine learning | api, microservices |
| `IMMICH_LOG_LEVEL` | Log level (verbose, debug, log, warn, error) | `log` | server, machine learning | api, microservices |
| `IMMICH_MEDIA_LOCATION` | Media location inside the container ⚠️**You probably shouldn't set this**<sup>\*2</sup>⚠️ | `./upload`<sup>\*3</sup> | server | api, microservices |
| `IMMICH_LOG_LEVEL` | Log Level (verbose, debug, log, warn, error) | `log` | server, machine learning | api, microservices |
| `IMMICH_MEDIA_LOCATION` | Media Location inside the container ⚠️**You probably shouldn't set this**<sup>\*2</sup>⚠️ | `./upload`<sup>\*3</sup> | server | api, microservices |
| `IMMICH_CONFIG_FILE` | Path to config file | | server | api, microservices |
| `NO_COLOR` | Set to `true` to disable color-coded log output | `false` | server, machine learning | |
| `CPU_CORES` | Number of cores available to the Immich server | auto-detected CPU core count | server | |
| `CPU_CORES` | Amount of cores available to the immich server | auto-detected cpu core count | server | |
| `IMMICH_API_METRICS_PORT` | Port for the OTEL metrics | `8081` | server | api |
| `IMMICH_MICROSERVICES_METRICS_PORT` | Port for the OTEL metrics | `8082` | server | microservices |
| `IMMICH_PROCESS_INVALID_IMAGES` | When `true`, generate thumbnails for invalid images | | server | microservices |
| `IMMICH_TRUSTED_PROXIES` | List of comma-separated IPs set as trusted proxies | | server | api |
| `IMMICH_TRUSTED_PROXIES` | List of comma separated IPs set as trusted proxies | | server | api |
| `IMMICH_IGNORE_MOUNT_CHECK_ERRORS` | See [System Integrity](/docs/administration/system-integrity) | | server | api, microservices |
\*1: `TZ` should be set to a `TZ identifier` from [this list][tz-list]. For example, `TZ="Etc/UTC"`.
@@ -50,7 +50,7 @@ These environment variables are used by the `docker-compose.yml` file and do **N
\*2: This path is where the Immich code looks for the files, which is internal to the docker container. Setting it to a path on your host will certainly break things, you should use the `UPLOAD_LOCATION` variable instead.
\*3: With the default `WORKDIR` of `/usr/src/app`, this path will resolve to `/usr/src/app/upload`.
It only needs to be set if the Immich deployment method is changing.
It only need to be set if the Immich deployment method is changing.
## Workers
@@ -75,12 +75,12 @@ Information on the current workers can be found [here](/docs/administration/jobs
| Variable | Description | Default | Containers |
| :---------------------------------- | :----------------------------------------------------------------------- | :----------: | :----------------------------- |
| `DB_URL` | Database URL | | server |
| `DB_HOSTNAME` | Database host | `database` | server |
| `DB_PORT` | Database port | `5432` | server |
| `DB_USERNAME` | Database user | `postgres` | server, database<sup>\*1</sup> |
| `DB_PASSWORD` | Database password | `postgres` | server, database<sup>\*1</sup> |
| `DB_DATABASE_NAME` | Database name | `immich` | server, database<sup>\*1</sup> |
| `DB_VECTOR_EXTENSION`<sup>\*2</sup> | Database vector extension (one of [`pgvector`, `pgvecto.rs`]) | `pgvecto.rs` | server |
| `DB_HOSTNAME` | Database Host | `database` | server |
| `DB_PORT` | Database Port | `5432` | server |
| `DB_USERNAME` | Database User | `postgres` | server, database<sup>\*1</sup> |
| `DB_PASSWORD` | Database Password | `postgres` | server, database<sup>\*1</sup> |
| `DB_DATABASE_NAME` | Database Name | `immich` | server, database<sup>\*1</sup> |
| `DB_VECTOR_EXTENSION`<sup>\*2</sup> | Database Vector Extension (one of [`pgvector`, `pgvecto.rs`]) | `pgvecto.rs` | server |
| `DB_SKIP_MIGRATIONS` | Whether to skip running migrations on startup (one of [`true`, `false`]) | `false` | server |
\*1: The values of `DB_USERNAME`, `DB_PASSWORD`, and `DB_DATABASE_NAME` are passed to the Postgres container as the variables `POSTGRES_USER`, `POSTGRES_PASSWORD`, and `POSTGRES_DB` in `docker-compose.yml`.
@@ -103,18 +103,18 @@ When `DB_URL` is defined, the `DB_HOSTNAME`, `DB_PORT`, `DB_USERNAME`, `DB_PASSW
| Variable | Description | Default | Containers |
| :--------------- | :------------- | :-----: | :--------- |
| `REDIS_URL` | Redis URL | | server |
| `REDIS_SOCKET` | Redis socket | | server |
| `REDIS_HOSTNAME` | Redis host | `redis` | server |
| `REDIS_PORT` | Redis port | `6379` | server |
| `REDIS_USERNAME` | Redis username | | server |
| `REDIS_PASSWORD` | Redis password | | server |
| `REDIS_DBINDEX` | Redis DB index | `0` | server |
| `REDIS_SOCKET` | Redis Socket | | server |
| `REDIS_HOSTNAME` | Redis Host | `redis` | server |
| `REDIS_PORT` | Redis Port | `6379` | server |
| `REDIS_USERNAME` | Redis Username | | server |
| `REDIS_PASSWORD` | Redis Password | | server |
| `REDIS_DBINDEX` | Redis DB Index | `0` | server |
:::info
All `REDIS_` variables must be provided to all Immich workers, including `api` and `microservices`.
`REDIS_URL` must start with `ioredis://` and then include a `base64` encoded JSON string for the configuration.
More information can be found in the upstream [ioredis] documentation.
More info can be found in the upstream [ioredis] documentation.
When `REDIS_URL` or `REDIS_SOCKET` are defined, the `REDIS_HOSTNAME`, `REDIS_PORT`, `REDIS_USERNAME`, `REDIS_PASSWORD`, and `REDIS_DBINDEX` variables are ignored.
:::
@@ -168,10 +168,6 @@ Redis (Sentinel) URL example JSON before encoding:
| `MACHINE_LEARNING_ANN_TUNING_LEVEL` | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive) | `2` | machine learning |
| `MACHINE_LEARNING_DEVICE_IDS`<sup>\*4</sup> | Device IDs to use in multi-GPU environments | `0` | machine learning |
| `MACHINE_LEARNING_MAX_BATCH_SIZE__FACIAL_RECOGNITION` | Set the maximum number of faces that will be processed at once by the facial recognition model | None (`1` if using OpenVINO) | machine learning |
| `MACHINE_LEARNING_PING_TIMEOUT` | How long (ms) to wait for a PING response when checking if an ML server is available | `2000` | server |
| `MACHINE_LEARNING_AVAILABILITY_BACKOFF_TIME` | How long to ignore ML servers that are offline before trying again | `30000` | server |
| `MACHINE_LEARNING_RKNN` | Enable RKNN hardware acceleration if supported | `True` | machine learning |
| `MACHINE_LEARNING_RKNN_THREADS` | How many threads of RKNN runtime should be spinned up while inferencing. | `1` | machine learning |
\*1: It is recommended to begin with this parameter when changing the concurrency levels of the machine learning service and then tune the other ones.
@@ -183,11 +179,7 @@ Redis (Sentinel) URL example JSON before encoding:
:::info
While the `textual` model is the only one required for smart search, some users may experience slow first searches
due to backups triggering loading of the other models into memory, which blocks other requests until completed.
To avoid this, you can preload the other models (`visual`, `recognition`, and `detection`) if you have enough RAM to do so.
Additional machine learning parameters can be tuned from the admin UI.
Other machine learning parameters can be tuned from the admin UI.
:::
@@ -218,7 +210,7 @@ the `_FILE` variable should be set to the path of a file containing the variable
details on how to use Docker Secrets in the Postgres image.
\*2: See [this comment][docker-secrets-example] for an example of how
to use a Docker secret for the password in the Redis container.
to use use a Docker secret for the password in the Redis container.
[tz-list]: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
[docker-secrets-example]: https://github.com/docker-library/redis/issues/46#issuecomment-335326234

View File

@@ -41,9 +41,3 @@ A list of common steps to take after installing Immich include:
## Step 7 - Setup Server Backups
<ServerBackup />
## Setting up optional features
- [External Libraries](/docs/features/libraries.md): Adding your existing photo library to Immich
- [Hardware Transcoding](/docs/features/hardware-transcoding.md): Speeding up video transcoding
- [Hardware-Accelerated Machine Learning](/docs/features/ml-hardware-acceleration.md): Speeding up various machine learning tasks in Immich

View File

@@ -1,70 +0,0 @@
---
sidebar_position: 85
---
# Synology [Community]
:::note
This is a community contribution and not officially supported by the Immich team, but included here for convenience.
Community support can be found in the dedicated channel on the [Discord Server](https://discord.immich.app/).
**Please report app issues to the corresponding [Github Repository](https://github.com/truenas/charts/tree/master/community/immich).**
:::
Immich can easily be installed on a Synology NAS using Container Manager within DSM. If you have not installed Container Manager already, you can install it in the Packages Center. Refer to the [Container Manager docs](https://kb.synology.com/en-us/DSM/help/ContainerManager/docker_desc?version=7) for more information on using Container Manager.
## Step 1 - Download the required files
Create a directory of your choice (e.g. `./immich-app`) to house Immich. In general, it's a best practice to have all Docker-based applications running under the `./docker` directory, so in this case, your directory structure will look like `./docker/immich-app`.
Now create a `./postgres` and `./library` directory as sub-directories of the `./docker/immich-app`.
When you're all done, you should have the following:
- `./docker/immich-app/postgres`
- `./docker/immich-app/library`
Download [`docker-compose.yml`](https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml) and [`example.env`](https://github.com/immich-app/immich/releases/latest/download/example.env) to your computer. Upload the files to the `./docker/immich-app` directory.
## Step 2 - Populate the .env file with custom values
Follow [Step 2 in Docker Compose](./docker-compose#step-2---populate-the-env-file-with-custom-values) for instructions on customizing the `.env` file, and then return back to this guide to continue.
## Step 3 - Create a new project in Container Manager
Open Container Manager, and select the "**Project**" action on the left navigation bar and then click "**Create**".
![Create Project](../../static/img/synology-container-manager-create-project.png)
In the settings of your new project, set "**Project name**" to a name you'll remember, such as _immich-app_. When setting the "**Path**", select the `./docker/immich-app` directory you created earlier. Doing so will prompt a message to use the existing `docker-compose.yml` already present in the directory for your project. Click "**OK**" to continue.
![Set Path](../../static/img/synology-container-manager-set-path.png)
The following screen will give you the option to further customize your `docker-compose.yml` file, giving you a warning regarding the `start_interval` property. Under the `healthcheck` heading, remove the `start_interval: 30s` completely and click "**Next**".
![start interval](../../static/img/synology-container-manager-customize-docker-compose.png)
Skip the section asking to set-up a portal for Web Station, and then complete the wizard which will build and start the containers for your project.
Once your containers are successfully running, navigate to the "**Container**" section of Container Manager, right-click on the "**immich-server**" container, and choose the "**Details**".
Scroll to the bottom of the "**Details**" section, and find the `IP Address` of the container, located in the `Network` section. Take note of the container's IP address as you will need it for **Step 4**.
![Container Details](../../static/img/synology-container-manager-container-details.png)
## Step 4 - Configure Firewall Settings
Once your project completes the build process, your containers will start. In order to be able to access Immich from your browser, you need to configure the firewall settings for your Synology NAS.
Open "**Control Panel**" on your Synology NAS, and select "**Security**". Navigate to "**Firewall**"
![Firewall rules](../../static/img/synology-firewall-rules.png)
Click "**Edit Rules**" and add the following firewall rules:
- Add a "**Source IP**" rule for the IP address of your container that you obtained in Step 3 above
- Add a "**Ports**" rule for the port specified in the `docker-compose.yml`, which should be `2283`
## Next Steps
Read the [Post Installation](/docs/install/post-install.mdx) steps and [upgrade instructions](/docs/install/upgrading.md).

View File

@@ -198,7 +198,7 @@ The **CPU** value was specified in a different format with a default of `4000m`
The **Memory** value was specified in a different format with a default of `8Gi` which is 8 GiB of RAM. The value was specified in bytes or a number with a measurement suffix. Examples: `129M`, `123Mi`, `1000000000`
:::
Enable **GPU Configuration** options if you have a GPU that you will use for [Hardware Transcoding](/docs/features/hardware-transcoding) and/or [Hardware-Accelerated Machine Learning](/docs/features/ml-hardware-acceleration.md). More info: [GPU Passthrough Docs for TrueNAS Apps](https://www.truenas.com/docs/truenasapps/#gpu-passthrough)
Enable **GPU Configuration** options if you have a GPU that you will use for [Hardware Transcoding](/docs/features/hardware-transcoding) and/or [Hardware-Accelerated Machine Learning](/docs/features/ml-hardware-acceleration.md). More info: [GPU Passtrough Docs for TrueNAS Apps](https://www.truenas.com/docs/truenasapps/#gpu-passthrough)
### Install
@@ -247,10 +247,6 @@ Some examples are: `IMMICH_VERSION`, `UPLOAD_LOCATION`, `DB_DATA_LOCATION`, `TZ`
## Updating the App
:::danger
Make sure to read the general [upgrade instructions](/docs/install/upgrading.md).
:::
When updates become available, SCALE alerts and provides easy updates.
To update the app to the latest version:

View File

@@ -77,7 +77,7 @@ alt="Select Plugins > Compose.Manager > Add New Stack > Label it Immich"
7. Paste the entire contents of the [Immich example.env](https://github.com/immich-app/immich/releases/latest/download/example.env) file into the Unraid editor, then **before saving** edit the following:
- `UPLOAD_LOCATION`: Create a folder in your Images Unraid share and place the **absolute** location here > For example my _"images"_ share has a folder within it called _"immich"_. If I browse to this directory in the terminal and type `pwd` the output is `/mnt/user/images/immich`. This is the exact value I need to enter as my `UPLOAD_LOCATION`
- `DB_DATA_LOCATION`: Change this to use an Unraid share (preferably a cache pool, e.g. `/mnt/user/appdata/postgresql/data`). This uses the `appdata` share. Do also create the `postgresql` folder, by running `mkdir /mnt/user/{share_location}/postgresql/data`. If left at default it will try to use Unraid's `/boot/config/plugins/compose.manager/projects/[stack_name]/postgres` folder which it doesn't have permissions to, resulting in this container continuously restarting.
- `DB_DATA_LOCATION`: Change this to use an Unraid share (preferably a cache pool, e.g. `/mnt/user/appdata`). If left at default it will try to use Unraid's `/boot/config/plugins/compose.manager/projects/[stack_name]/postgres` folder which it doesn't have permissions to, resulting in this container continuously restarting.
<img
src={require('./img/unraid05.webp').default}
@@ -131,10 +131,6 @@ For more information on how to use the application once installed, please refer
## Updating Steps
:::danger
Make sure to read the general [upgrade instructions](/docs/install/upgrading.md).
:::
Updating is extremely easy however it's important to be aware that containers managed via the Docker Compose Manager plugin do not integrate with Unraid's native dockerman UI, the label "_update ready_" will always be present on containers installed via the Docker Compose Manager.
<img

View File

@@ -1,32 +0,0 @@
---
sidebar_position: 95
---
# Upgrading
:::danger Read the release notes
Immich is currently under heavy development, which means you can expect [breaking changes][breaking] and bugs. You should read the release notes prior to updating and take special care when using automated tools like [Watchtower][watchtower].
You can see versions that had breaking changes [here][breaking].
:::
When a new version of Immich is [released][releases], you should read the release notes and account for any breaking changes noted (as mentioned above).
If you use `IMMICH_VERSION` in your `.env` file, it will need to be updated to the latest or desired version.
After that, the application can be upgraded and restarted with the following commands, run in the directory with the `docker-compose.yml` file:
```bash title="Upgrade and restart Immich"
docker compose pull && docker compose up -d
```
To clean up disk space, the old version's obsolete container images can be deleted with the following command:
```bash title="Clean up unused Docker images"
docker image prune
```
[compose-file]: https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
[env-file]: https://github.com/immich-app/immich/releases/latest/download/example.env
[watchtower]: https://containrrr.dev/watchtower/
[breaking]: https://github.com/immich-app/immich/discussions?discussions_q=label%3Achangelog%3Abreaking-change+sort%3Adate_created
[container-auth]: https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry#authenticating-to-the-container-registry
[releases]: https://github.com/immich-app/immich/releases

View File

@@ -1,7 +1,2 @@
Now that you have imported some pictures, you should setup server backups to preserve your memories.
You can do so by following our [backup guide](/docs/administration/backup-and-restore.md).
:::danger
Immich is still under heavy development _and_ handles very important data.
It is essential that you set up good backups, and test them.
:::

45
docs/package-lock.json generated
View File

@@ -28,8 +28,6 @@
},
"devDependencies": {
"@docusaurus/module-type-aliases": "~3.7.0",
"@docusaurus/tsconfig": "^3.7.0",
"@docusaurus/types": "^3.7.0",
"prettier": "^3.2.4",
"typescript": "^5.1.6"
},
@@ -3700,13 +3698,6 @@
"node": ">=18.0"
}
},
"node_modules/@docusaurus/tsconfig": {
"version": "3.7.0",
"resolved": "https://registry.npmjs.org/@docusaurus/tsconfig/-/tsconfig-3.7.0.tgz",
"integrity": "sha512-vRsyj3yUZCjscgfgcFYjIsTcAru/4h4YH2/XAE8Rs7wWdnng98PgWKvP5ovVc4rmRpRg2WChVW0uOy2xHDvDBQ==",
"dev": true,
"license": "MIT"
},
"node_modules/@docusaurus/types": {
"version": "3.7.0",
"resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.7.0.tgz",
@@ -5308,9 +5299,9 @@
}
},
"node_modules/autoprefixer": {
"version": "10.4.21",
"resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.21.tgz",
"integrity": "sha512-O+A6LWV5LDHSJD3LjHYoNi4VLsj/Whi7k6zG12xTYaU4cQ8oxQGckXNX8cRHK5yOZ/ppVHe0ZBXGzSV9jXdVbQ==",
"version": "10.4.20",
"resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz",
"integrity": "sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==",
"funding": [
{
"type": "opencollective",
@@ -5327,11 +5318,11 @@
],
"license": "MIT",
"dependencies": {
"browserslist": "^4.24.4",
"caniuse-lite": "^1.0.30001702",
"browserslist": "^4.23.3",
"caniuse-lite": "^1.0.30001646",
"fraction.js": "^4.3.7",
"normalize-range": "^0.1.2",
"picocolors": "^1.1.1",
"picocolors": "^1.0.1",
"postcss-value-parser": "^4.2.0"
},
"bin": {
@@ -5781,9 +5772,9 @@
}
},
"node_modules/caniuse-lite": {
"version": "1.0.30001706",
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001706.tgz",
"integrity": "sha512-3ZczoTApMAZwPKYWmwVbQMFpXBDds3/0VciVoUwPUbldlYyVLmRVuRs/PcUZtHpbLRpzzDvrvnFuREsGt6lUug==",
"version": "1.0.30001695",
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001695.tgz",
"integrity": "sha512-vHyLade6wTgI2u1ec3WQBxv+2BrTERV28UXQu9LO6lZ9pYeMk34vjXFLOxo1A4UBA8XTL4njRQZdno/yYaSmWw==",
"funding": [
{
"type": "opencollective",
@@ -14070,9 +14061,9 @@
}
},
"node_modules/postcss": {
"version": "8.5.3",
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.3.tgz",
"integrity": "sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A==",
"version": "8.5.1",
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.1.tgz",
"integrity": "sha512-6oz2beyjc5VMn/KV1pPw8fliQkhBXrVn1Z3TVyqZxU8kZpzEKhBdmCFqI6ZbmGtamQvQGuU1sgPTk8ZrXDD7jQ==",
"funding": [
{
"type": "opencollective",
@@ -15734,9 +15725,9 @@
}
},
"node_modules/prettier": {
"version": "3.5.3",
"resolved": "https://registry.npmjs.org/prettier/-/prettier-3.5.3.tgz",
"integrity": "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==",
"version": "3.4.2",
"resolved": "https://registry.npmjs.org/prettier/-/prettier-3.4.2.tgz",
"integrity": "sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ==",
"dev": true,
"license": "MIT",
"bin": {
@@ -18377,9 +18368,9 @@
}
},
"node_modules/typescript": {
"version": "5.8.2",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.2.tgz",
"integrity": "sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==",
"version": "5.7.3",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.3.tgz",
"integrity": "sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw==",
"license": "Apache-2.0",
"bin": {
"tsc": "bin/tsc",

View File

@@ -36,8 +36,6 @@
},
"devDependencies": {
"@docusaurus/module-type-aliases": "~3.7.0",
"@docusaurus/tsconfig": "^3.7.0",
"@docusaurus/types": "^3.7.0",
"prettier": "^3.2.4",
"typescript": "^5.1.6"
},
@@ -57,6 +55,6 @@
"node": ">=20"
},
"volta": {
"node": "22.14.0"
"node": "22.13.1"
}
}

View File

@@ -53,11 +53,6 @@ const guides: CommunityGuidesProps[] = [
description: 'How to configure an existing fail2ban installation to block incorrect login attempts.',
url: 'https://github.com/immich-app/immich/discussions/3243#discussioncomment-6681948',
},
{
title: 'Immich remote access with NordVPN Meshnet',
description: 'Access Immich with an end-to-end encrypted connection.',
url: 'https://meshnet.nordvpn.com/how-to/remote-files-media-access/immich-remote-access',
},
];
function CommunityGuide({ title, description, url }: CommunityGuidesProps): JSX.Element {

View File

@@ -1,3 +1,2 @@
export const discordPath =
'M81.15,0c-1.2376,2.1973-2.3489,4.4704-3.3591,6.794-9.5975-1.4396-19.3718-1.4396-28.9945,0-.985-2.3236-2.1216-4.5967-3.3591-6.794-9.0166,1.5407-17.8059,4.2431-26.1405,8.0568C2.779,32.5304-1.6914,56.3725.5312,79.8863c9.6732,7.1476,20.5083,12.603,32.0505,16.0884,2.6014-3.4854,4.8998-7.1981,6.8698-11.0623-3.738-1.3891-7.3497-3.1318-10.8098-5.1523.9092-.6567,1.7932-1.3386,2.6519-1.9953,20.281,9.547,43.7696,9.547,64.0758,0,.8587.7072,1.7427,1.3891,2.6519,1.9953-3.4601,2.0457-7.0718,3.7632-10.835,5.1776,1.97,3.8642,4.2683,7.5769,6.8698,11.0623,11.5419-3.4854,22.3769-8.9156,32.0509-16.0631,2.626-27.2771-4.496-50.9172-18.817-71.8548C98.9811,4.2684,90.1918,1.5659,81.1752.0505l-.0252-.0505ZM42.2802,65.4144c-6.2383,0-11.4159-5.6575-11.4159-12.6535s4.9755-12.6788,11.3907-12.6788,11.5169,5.708,11.4159,12.6788c-.101,6.9708-5.026,12.6535-11.3907,12.6535ZM84.3576,65.4144c-6.2637,0-11.3907-5.6575-11.3907-12.6535s4.9755-12.6788,11.3907-12.6788,11.4917,5.708,11.3906,12.6788c-.101,6.9708-5.026,12.6535-11.3906,12.6535Z';
export const discordViewBox = '0 0 126.644 96';
'M 9.1367188 3.8691406 C 9.1217187 3.8691406 9.1067969 3.8700938 9.0917969 3.8710938 C 8.9647969 3.8810937 5.9534375 4.1403594 4.0234375 5.6933594 C 3.0154375 6.6253594 1 12.073203 1 16.783203 C 1 16.866203 1.0215 16.946531 1.0625 17.019531 C 2.4535 19.462531 6.2473281 20.102859 7.1113281 20.130859 L 7.1269531 20.130859 C 7.2799531 20.130859 7.4236719 20.057594 7.5136719 19.933594 L 8.3886719 18.732422 C 6.0296719 18.122422 4.8248594 17.086391 4.7558594 17.025391 C 4.5578594 16.850391 4.5378906 16.549563 4.7128906 16.351562 C 4.8068906 16.244563 4.9383125 16.189453 5.0703125 16.189453 C 5.1823125 16.189453 5.2957188 16.228594 5.3867188 16.308594 C 5.4157187 16.334594 7.6340469 18.216797 11.998047 18.216797 C 16.370047 18.216797 18.589328 16.325641 18.611328 16.306641 C 18.702328 16.227641 18.815734 16.189453 18.927734 16.189453 C 19.059734 16.189453 19.190156 16.243562 19.285156 16.351562 C 19.459156 16.549563 19.441141 16.851391 19.244141 17.025391 C 19.174141 17.087391 17.968375 18.120469 15.609375 18.730469 L 16.484375 19.933594 C 16.574375 20.057594 16.718094 20.130859 16.871094 20.130859 L 16.886719 20.130859 C 17.751719 20.103859 21.5465 19.463531 22.9375 17.019531 C 22.9785 16.947531 23 16.866203 23 16.783203 C 23 12.073203 20.984172 6.624875 19.951172 5.671875 C 18.047172 4.140875 15.036203 3.8820937 14.908203 3.8710938 C 14.895203 3.8700938 14.880188 3.8691406 14.867188 3.8691406 C 14.681188 3.8691406 14.510594 3.9793906 14.433594 4.1503906 C 14.427594 4.1623906 14.362062 4.3138281 14.289062 4.5488281 C 15.548063 4.7608281 17.094141 5.1895937 18.494141 6.0585938 C 18.718141 6.1975938 18.787437 6.4917969 18.648438 6.7167969 C 18.558438 6.8627969 18.402188 6.9433594 18.242188 6.9433594 C 18.156188 6.9433594 18.069234 6.9200937 17.990234 6.8710938 C 15.584234 5.3800938 12.578 5.3046875 12 5.3046875 C 11.422 5.3046875 8.4157187 5.3810469 6.0117188 6.8730469 C 5.9327188 6.9210469 5.8457656 6.9433594 5.7597656 6.9433594 C 5.5997656 6.9433594 5.4425625 6.86475 5.3515625 6.71875 C 5.2115625 6.49375 5.2818594 6.1985938 5.5058594 6.0585938 C 6.9058594 5.1905937 8.4528906 4.7627812 9.7128906 4.5507812 C 9.6388906 4.3147813 9.5714062 4.1643437 9.5664062 4.1523438 C 9.4894063 3.9813438 9.3217188 3.8691406 9.1367188 3.8691406 z M 12 7.3046875 C 12.296 7.3046875 14.950594 7.3403125 16.933594 8.5703125 C 17.326594 8.8143125 17.777234 8.9453125 18.240234 8.9453125 C 18.633234 8.9453125 19.010656 8.8555 19.347656 8.6875 C 19.964656 10.2405 20.690828 12.686219 20.923828 15.199219 C 20.883828 15.143219 20.840922 15.089109 20.794922 15.037109 C 20.324922 14.498109 19.644687 14.191406 18.929688 14.191406 C 18.332687 14.191406 17.754078 14.405437 17.330078 14.773438 C 17.257078 14.832437 15.505 16.21875 12 16.21875 C 8.496 16.21875 6.7450313 14.834687 6.7070312 14.804688 C 6.2540312 14.407687 5.6742656 14.189453 5.0722656 14.189453 C 4.3612656 14.189453 3.6838438 14.494391 3.2148438 15.025391 C 3.1658438 15.080391 3.1201719 15.138266 3.0761719 15.197266 C 3.3091719 12.686266 4.0344375 10.235594 4.6484375 8.6835938 C 4.9864375 8.8525938 5.3657656 8.9433594 5.7597656 8.9433594 C 6.2217656 8.9433594 6.6724531 8.8143125 7.0644531 8.5703125 C 9.0494531 7.3393125 11.704 7.3046875 12 7.3046875 z M 8.890625 10.044922 C 7.966625 10.044922 7.2167969 10.901031 7.2167969 11.957031 C 7.2167969 13.013031 7.965625 13.869141 8.890625 13.869141 C 9.815625 13.869141 10.564453 13.013031 10.564453 11.957031 C 10.564453 10.900031 9.815625 10.044922 8.890625 10.044922 z M 15.109375 10.044922 C 14.185375 10.044922 13.435547 10.901031 13.435547 11.957031 C 13.435547 13.013031 14.184375 13.869141 15.109375 13.869141 C 16.034375 13.869141 16.783203 13.013031 16.783203 11.957031 C 16.783203 10.900031 16.033375 10.044922 15.109375 10.044922 z';

View File

@@ -4,7 +4,7 @@ import React, { useEffect, useState } from 'react';
export default function VersionSwitcher(): JSX.Element {
const [versions, setVersions] = useState([]);
const [activeLabel, setLabel] = useState('Versions');
const [label, setLabel] = useState('Versions');
const windowSize = useWindowSize();
@@ -24,13 +24,10 @@ export default function VersionSwitcher(): JSX.Element {
{ label: 'Next', url: 'https://main.preview.immich.app' },
{ label: 'Latest', url: 'https://immich.app' },
...archiveVersions,
].map(({ label, url }) => ({
label,
url: new URL(url),
}));
];
setVersions(allVersions);
const activeVersion = allVersions.find((version) => version.url.origin === window.location.origin);
const activeVersion = allVersions.find((version) => new URL(version.url).origin === window.location.origin);
if (activeVersion) {
setLabel(activeVersion.label);
}
@@ -48,13 +45,12 @@ export default function VersionSwitcher(): JSX.Element {
versions.length > 0 && (
<DropdownNavbarItem
className="version-switcher-34ab39"
label={activeLabel}
label={label}
mobile={windowSize === 'mobile'}
items={versions.map(({ label, url }) => ({
label,
to: new URL(location.pathname + location.search + location.hash, url).href,
to: url + location.pathname + location.hash,
target: '_self',
className: label === activeLabel ? 'dropdown__link--active menu__link--active' : '', // workaround because React Router `<NavLink>` only supports using URL path for checking if active: https://v5.reactrouter.com/web/api/NavLink/isactive-func
}))}
/>
)

View File

@@ -1,10 +1,12 @@
import React from 'react';
import Link from '@docusaurus/Link';
import Layout from '@theme/Layout';
import { discordPath, discordViewBox } from '@site/src/components/svg-paths';
import ThemedImage from '@theme/ThemedImage';
import { useColorMode } from '@docusaurus/theme-common';
import { discordPath } from '@site/src/components/svg-paths';
import Icon from '@mdi/react';
function HomepageHeader() {
const { isDarkTheme } = useColorMode();
return (
<header>
<div className="top-[calc(12%)] md:top-[calc(30%)] h-screen w-full absolute -z-10">
@@ -12,8 +14,8 @@ function HomepageHeader() {
<div className="w-full h-[120vh] absolute left-0 top-0 backdrop-blur-3xl bg-immich-bg/40 dark:bg-transparent"></div>
</div>
<section className="text-center pt-12 sm:pt-24 bg-immich-bg/50 dark:bg-immich-dark-bg/80">
<ThemedImage
sources={{ dark: 'img/logomark-dark.svg', light: 'img/logomark-light.svg' }}
<img
src={isDarkTheme ? 'img/logomark-dark.svg' : 'img/logomark-light.svg'}
className="h-[115px] w-[115px] mb-2 antialiased rounded-none"
alt="Immich logo"
/>
@@ -33,6 +35,7 @@ function HomepageHeader() {
sacrificing your privacy.
</p>
</div>
<div className="flex flex-col sm:flex-row place-items-center place-content-center mt-9 gap-4 ">
<Link
className="flex place-items-center place-content-center py-3 px-8 border bg-immich-primary dark:bg-immich-dark-primary rounded-xl no-underline hover:no-underline text-white hover:text-gray-50 dark:text-immich-dark-bg font-bold uppercase"
@@ -55,27 +58,27 @@ function HomepageHeader() {
Buy Merch
</Link>
</div>
<div className="my-12 flex gap-1 font-medium place-items-center place-content-center text-immich-primary dark:text-immich-dark-primary">
<Icon
path={discordPath}
viewBox={discordViewBox} /* viewBox may show an error in your IDE but it is normal. */
size={1}
/>
<Icon path={discordPath} size={1} />
<Link to="https://discord.immich.app/">Join our Discord</Link>
</div>
<ThemedImage
sources={{ dark: '/img/screenshot-dark.webp', light: '/img/screenshot-light.webp' }}
<img
src={isDarkTheme ? '/img/screenshot-dark.webp' : '/img/screenshot-light.webp'}
alt="screenshots"
className="w-[95%] lg:w-[85%] xl:w-[70%] 2xl:w-[60%] "
/>
<div className="mx-[25%] m-auto my-14 md:my-28">
<hr className="border bg-gray-500 dark:bg-gray-400" />
</div>
<ThemedImage
sources={{ dark: 'img/logomark-dark.svg', light: 'img/logomark-light.svg' }}
<img
src={isDarkTheme ? 'img/logomark-dark.svg' : 'img/logomark-light.svg'}
className="h-[115px] w-[115px] mb-2 antialiased rounded-none"
alt="Immich logo"
/>
<div>
<p className="font-bold text-2xl md:text-5xl ">Download the mobile app</p>
<p className="text-lg">
@@ -94,8 +97,9 @@ function HomepageHeader() {
</a>
</div>
</div>
<ThemedImage
sources={{ dark: '/img/app-qr-code-dark.svg', light: '/img/app-qr-code-light.svg' }}
<img
src={isDarkTheme ? '/img/app-qr-code-dark.svg' : '/img/app-qr-code-light.svg'}
alt="app qr code"
width={'150px'}
className="shadow-lg p-3 my-8 dark:bg-immich-dark-bg "

View File

@@ -1,7 +1,10 @@
import React from 'react';
import Link from '@docusaurus/Link';
import Layout from '@theme/Layout';
import { useColorMode } from '@docusaurus/theme-common';
function HomepageHeader() {
const { isDarkTheme } = useColorMode();
return (
<header>
<section className="max-w-[900px] m-4 p-4 md:p-6 md:m-auto md:my-12 border border-red-400 rounded-2xl bg-slate-200 dark:bg-immich-dark-gray">

View File

@@ -242,13 +242,6 @@ const roadmap: Item[] = [
];
const milestones: Item[] = [
{
icon: mdiStar,
iconColor: 'gold',
title: '60,000 Stars',
description: 'Reached 60K Stars on GitHub!',
getDateLabel: withLanguage(new Date(2025, 2, 4)),
},
withRelease({
icon: mdiLinkEdit,
iconColor: 'crimson',

View File

@@ -1,44 +1,4 @@
[
{
"label": "v1.131.2",
"url": "https://v1.131.2.archive.immich.app"
},
{
"label": "v1.131.1",
"url": "https://v1.131.1.archive.immich.app"
},
{
"label": "v1.131.0",
"url": "https://v1.131.0.archive.immich.app"
},
{
"label": "v1.130.3",
"url": "https://v1.130.3.archive.immich.app"
},
{
"label": "v1.130.2",
"url": "https://v1.130.2.archive.immich.app"
},
{
"label": "v1.130.1",
"url": "https://v1.130.1.archive.immich.app"
},
{
"label": "v1.130.0",
"url": "https://v1.130.0.archive.immich.app"
},
{
"label": "v1.129.0",
"url": "https://v1.129.0.archive.immich.app"
},
{
"label": "v1.128.0",
"url": "https://v1.128.0.archive.immich.app"
},
{
"label": "v1.127.0",
"url": "https://v1.127.0.archive.immich.app"
},
{
"label": "v1.126.1",
"url": "https://v1.126.1.archive.immich.app"
@@ -59,6 +19,10 @@
"label": "v1.125.5",
"url": "https://v1.125.5.archive.immich.app"
},
{
"label": "v1.125.4",
"url": "https://v1.125.4.archive.immich.app"
},
{
"label": "v1.125.3",
"url": "https://v1.125.3.archive.immich.app"
@@ -71,6 +35,10 @@
"label": "v1.125.1",
"url": "https://v1.125.1.archive.immich.app"
},
{
"label": "v1.125.0",
"url": "https://v1.125.0.archive.immich.app"
},
{
"label": "v1.124.2",
"url": "https://v1.124.2.archive.immich.app"
@@ -233,46 +201,46 @@
},
{
"label": "v1.105.1",
"url": "https://v1.105.1.archive.immich.app"
"url": "https://v1.105.1.archive.immich.app/"
},
{
"label": "v1.105.0",
"url": "https://v1.105.0.archive.immich.app"
"url": "https://v1.105.0.archive.immich.app/"
},
{
"label": "v1.104.0",
"url": "https://v1.104.0.archive.immich.app"
"url": "https://v1.104.0.archive.immich.app/"
},
{
"label": "v1.103.1",
"url": "https://v1.103.1.archive.immich.app"
"url": "https://v1.103.1.archive.immich.app/"
},
{
"label": "v1.103.0",
"url": "https://v1.103.0.archive.immich.app"
"url": "https://v1.103.0.archive.immich.app/"
},
{
"label": "v1.102.3",
"url": "https://v1.102.3.archive.immich.app"
"url": "https://v1.102.3.archive.immich.app/"
},
{
"label": "v1.102.2",
"url": "https://v1.102.2.archive.immich.app"
"url": "https://v1.102.2.archive.immich.app/"
},
{
"label": "v1.102.1",
"url": "https://v1.102.1.archive.immich.app"
"url": "https://v1.102.1.archive.immich.app/"
},
{
"label": "v1.102.0",
"url": "https://v1.102.0.archive.immich.app"
"url": "https://v1.102.0.archive.immich.app/"
},
{
"label": "v1.101.0",
"url": "https://v1.101.0.archive.immich.app"
"url": "https://v1.101.0.archive.immich.app/"
},
{
"label": "v1.100.0",
"url": "https://v1.100.0.archive.immich.app"
"url": "https://v1.100.0.archive.immich.app/"
}
]

Binary file not shown.

Before

Width:  |  Height:  |  Size: 164 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 112 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 81 KiB

View File

@@ -5,7 +5,7 @@ module.exports = {
preflight: false, // disable Tailwind's reset
},
content: ['./src/**/*.{js,jsx,ts,tsx}', './{docs,blog}/**/*.{md,mdx}'], // my markdown stuff is in ../docs, not /src
darkMode: ['class', '[data-theme="dark"]'], // hooks into docusaurus' dark mode settings
darkMode: ['class', '[data-theme="dark"]'], // hooks into docusaurus' dark mode settigns
theme: {
extend: {
colors: {

View File

@@ -1,8 +1,9 @@
{
// This file is not used in compilation. It is here just for a nice editor experience.
"extends": "@docusaurus/tsconfig",
"extends": "@tsconfig/docusaurus/tsconfig.json",
"compilerOptions": {
"baseUrl": "."
"baseUrl": ".",
"module": "Node16"
}
}

View File

@@ -1 +1 @@
22.14.0
22.13.1

View File

@@ -34,10 +34,10 @@ services:
- 2285:2285
redis:
image: redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
image: redis:6.2-alpine@sha256:905c4ee67b8e0aa955331960d2aa745781e6bd89afc44a8584bfd13bc890f0ae
database:
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
command: -c fsync=off -c shared_preload_libraries=vectors.so
environment:
POSTGRES_PASSWORD: postgres

View File

@@ -1,29 +1,39 @@
import { FlatCompat } from '@eslint/eslintrc';
import js from '@eslint/js';
import eslintPluginPrettierRecommended from 'eslint-plugin-prettier/recommended';
import eslintPluginUnicorn from 'eslint-plugin-unicorn';
import typescriptEslint from '@typescript-eslint/eslint-plugin';
import tsParser from '@typescript-eslint/parser';
import globals from 'globals';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
import typescriptEslint from 'typescript-eslint';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const compat = new FlatCompat({
baseDirectory: __dirname,
recommendedConfig: js.configs.recommended,
allConfig: js.configs.all,
});
export default typescriptEslint.config([
eslintPluginUnicorn.configs.recommended,
eslintPluginPrettierRecommended,
js.configs.recommended,
typescriptEslint.configs.recommended,
export default [
{
ignores: ['eslint.config.mjs'],
},
...compat.extends(
'plugin:@typescript-eslint/recommended',
'plugin:prettier/recommended',
'plugin:unicorn/recommended',
),
{
plugins: {
'@typescript-eslint': typescriptEslint,
},
languageOptions: {
globals: {
...globals.node,
},
parser: typescriptEslint.parser,
parser: tsParser,
ecmaVersion: 5,
sourceType: 'module',
@@ -52,4 +62,4 @@ export default typescriptEslint.config([
'object-shorthand': ['error', 'always'],
},
},
]);
];

1788
e2e/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "immich-e2e",
"version": "1.131.2",
"version": "1.126.1",
"description": "",
"main": "index.js",
"type": "module",
@@ -25,18 +25,20 @@
"@immich/sdk": "file:../open-api/typescript-sdk",
"@playwright/test": "^1.44.1",
"@types/luxon": "^3.4.2",
"@types/node": "^22.13.14",
"@types/node": "^22.13.1",
"@types/oidc-provider": "^8.5.1",
"@types/pg": "^8.11.0",
"@types/pngjs": "^6.0.4",
"@types/supertest": "^6.0.2",
"@typescript-eslint/eslint-plugin": "^8.15.0",
"@typescript-eslint/parser": "^8.15.0",
"@vitest/coverage-v8": "^3.0.0",
"eslint": "^9.14.0",
"eslint-config-prettier": "^10.0.0",
"eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-unicorn": "^57.0.0",
"eslint-plugin-unicorn": "^56.0.1",
"exiftool-vendored": "^28.3.1",
"globals": "^16.0.0",
"globals": "^15.9.0",
"jose": "^5.6.3",
"luxon": "^3.4.4",
"oidc-provider": "^8.5.1",
@@ -47,11 +49,10 @@
"socket.io-client": "^4.7.4",
"supertest": "^7.0.0",
"typescript": "^5.3.3",
"typescript-eslint": "^8.28.0",
"utimes": "^5.2.1",
"vitest": "^3.0.0"
},
"volta": {
"node": "22.14.0"
"node": "22.13.1"
}
}

View File

@@ -4,6 +4,7 @@ import {
AssetResponseDto,
AssetTypeEnum,
getAssetInfo,
getConfig,
getMyUser,
LoginResponseDto,
SharedLinkType,
@@ -44,6 +45,8 @@ const locationAssetFilepath = `${testAssetDir}/metadata/gps-position/thompson-sp
const ratingAssetFilepath = `${testAssetDir}/metadata/rating/mongolels.jpg`;
const facesAssetFilepath = `${testAssetDir}/metadata/faces/portrait.jpg`;
const getSystemConfig = (accessToken: string) => getConfig({ headers: asBearerAuth(accessToken) });
const readTags = async (bytes: Buffer, filename: string) => {
const filepath = join(tempDir, filename);
await writeFile(filepath, bytes);
@@ -225,7 +228,7 @@ describe('/asset', () => {
});
it('should get the asset faces', async () => {
const config = await utils.getSystemConfig(admin.accessToken);
const config = await getSystemConfig(admin.accessToken);
config.metadata.faces.import = true;
await updateConfig({ systemConfigDto: config }, { headers: asBearerAuth(admin.accessToken) });
@@ -1257,7 +1260,6 @@ describe('/asset', () => {
for (const { id, status } of assets) {
expect(status).toBe(AssetMediaStatus.Created);
// longer timeout as the thumbnail generation from full-size raw files can take a while
await utils.waitForWebsocketEvent({ event: 'assetUpload', id });
}

View File

@@ -1,9 +1,8 @@
import { JobCommand, JobName, LoginResponseDto, updateConfig } from '@immich/sdk';
import { cpSync, rmSync } from 'node:fs';
import { JobCommand, JobName, LoginResponseDto } from '@immich/sdk';
import { readFile } from 'node:fs/promises';
import { basename } from 'node:path';
import { errorDto } from 'src/responses';
import { app, asBearerAuth, testAssetDir, utils } from 'src/utils';
import { app, testAssetDir, utils } from 'src/utils';
import request from 'supertest';
import { afterEach, beforeAll, describe, expect, it } from 'vitest';
@@ -21,33 +20,6 @@ describe('/jobs', () => {
command: JobCommand.Resume,
force: false,
});
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
command: JobCommand.Resume,
force: false,
});
await utils.jobCommand(admin.accessToken, JobName.FaceDetection, {
command: JobCommand.Resume,
force: false,
});
await utils.jobCommand(admin.accessToken, JobName.SmartSearch, {
command: JobCommand.Resume,
force: false,
});
await utils.jobCommand(admin.accessToken, JobName.DuplicateDetection, {
command: JobCommand.Resume,
force: false,
});
const config = await utils.getSystemConfig(admin.accessToken);
config.machineLearning.duplicateDetection.enabled = false;
config.machineLearning.enabled = false;
config.metadata.faces.import = false;
config.machineLearning.clip.enabled = false;
await updateConfig({ systemConfigDto: config }, { headers: asBearerAuth(admin.accessToken) });
});
it('should require authentication', async () => {
@@ -57,7 +29,14 @@ describe('/jobs', () => {
});
it('should queue metadata extraction for missing assets', async () => {
const path = `${testAssetDir}/formats/raw/Nikon/D700/philadelphia.nef`;
const path1 = `${testAssetDir}/formats/raw/Nikon/D700/philadelphia.nef`;
const path2 = `${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`;
await utils.createAsset(admin.accessToken, {
assetData: { bytes: await readFile(path1), filename: basename(path1) },
});
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
await utils.jobCommand(admin.accessToken, JobName.MetadataExtraction, {
command: JobCommand.Pause,
@@ -65,7 +44,7 @@ describe('/jobs', () => {
});
const { id } = await utils.createAsset(admin.accessToken, {
assetData: { bytes: await readFile(path), filename: basename(path) },
assetData: { bytes: await readFile(path2), filename: basename(path2) },
});
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
@@ -103,123 +82,5 @@ describe('/jobs', () => {
expect(asset.exifInfo?.make).toBe('NIKON CORPORATION');
}
});
it('should not re-extract metadata for existing assets', async () => {
const path = `${testAssetDir}/temp/metadata/asset.jpg`;
cpSync(`${testAssetDir}/formats/raw/Nikon/D700/philadelphia.nef`, path);
const { id } = await utils.createAsset(admin.accessToken, {
assetData: { bytes: await readFile(path), filename: basename(path) },
});
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
{
const asset = await utils.getAssetInfo(admin.accessToken, id);
expect(asset.exifInfo).toBeDefined();
expect(asset.exifInfo?.model).toBe('NIKON D700');
}
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, path);
await utils.jobCommand(admin.accessToken, JobName.MetadataExtraction, {
command: JobCommand.Start,
force: false,
});
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
{
const asset = await utils.getAssetInfo(admin.accessToken, id);
expect(asset.exifInfo).toBeDefined();
expect(asset.exifInfo?.model).toBe('NIKON D700');
}
rmSync(path);
});
it('should queue thumbnail extraction for assets missing thumbs', async () => {
const path = `${testAssetDir}/albums/nature/tanners_ridge.jpg`;
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
command: JobCommand.Pause,
force: false,
});
const { id } = await utils.createAsset(admin.accessToken, {
assetData: { bytes: await readFile(path), filename: basename(path) },
});
await utils.waitForQueueFinish(admin.accessToken, JobName.MetadataExtraction);
await utils.waitForQueueFinish(admin.accessToken, JobName.ThumbnailGeneration);
const assetBefore = await utils.getAssetInfo(admin.accessToken, id);
expect(assetBefore.thumbhash).toBeNull();
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
command: JobCommand.Empty,
force: false,
});
await utils.waitForQueueFinish(admin.accessToken, JobName.MetadataExtraction);
await utils.waitForQueueFinish(admin.accessToken, JobName.ThumbnailGeneration);
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
command: JobCommand.Resume,
force: false,
});
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
command: JobCommand.Start,
force: false,
});
await utils.waitForQueueFinish(admin.accessToken, JobName.MetadataExtraction);
await utils.waitForQueueFinish(admin.accessToken, JobName.ThumbnailGeneration);
const assetAfter = await utils.getAssetInfo(admin.accessToken, id);
expect(assetAfter.thumbhash).not.toBeNull();
});
it('should not reload existing thumbnail when running thumb job for missing assets', async () => {
const path = `${testAssetDir}/temp/thumbs/asset1.jpg`;
cpSync(`${testAssetDir}/albums/nature/tanners_ridge.jpg`, path);
const { id } = await utils.createAsset(admin.accessToken, {
assetData: { bytes: await readFile(path), filename: basename(path) },
});
await utils.waitForQueueFinish(admin.accessToken, JobName.MetadataExtraction);
await utils.waitForQueueFinish(admin.accessToken, JobName.ThumbnailGeneration);
const assetBefore = await utils.getAssetInfo(admin.accessToken, id);
cpSync(`${testAssetDir}/albums/nature/notocactus_minimus.jpg`, path);
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
command: JobCommand.Resume,
force: false,
});
// This runs the missing thumbnail job
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
command: JobCommand.Start,
force: false,
});
await utils.waitForQueueFinish(admin.accessToken, JobName.MetadataExtraction);
await utils.waitForQueueFinish(admin.accessToken, JobName.ThumbnailGeneration);
const assetAfter = await utils.getAssetInfo(admin.accessToken, id);
// Asset 1 thumbnail should be untouched since its thumb should not have been reloaded, even though the file was changed
expect(assetAfter.thumbhash).toEqual(assetBefore.thumbhash);
rmSync(path);
});
});
});

View File

@@ -1,4 +1,4 @@
import { LibraryResponseDto, LoginResponseDto, getAllLibraries } from '@immich/sdk';
import { LibraryResponseDto, LoginResponseDto, getAllLibraries, scanLibrary } from '@immich/sdk';
import { cpSync, existsSync, rmSync, unlinkSync } from 'node:fs';
import { Socket } from 'socket.io-client';
import { userDto, uuidDto } from 'src/fixtures';
@@ -8,6 +8,8 @@ import request from 'supertest';
import { utimes } from 'utimes';
import { afterAll, beforeAll, beforeEach, describe, expect, it } from 'vitest';
const scan = async (accessToken: string, id: string) => scanLibrary({ id }, { headers: asBearerAuth(accessToken) });
describe('/libraries', () => {
let admin: LoginResponseDto;
let user: LoginResponseDto;
@@ -296,8 +298,6 @@ describe('/libraries', () => {
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets } = await utils.searchAssets(admin.accessToken, {
originalPath: `${testAssetDirInternal}/temp/directoryA/assetA.png`,
@@ -312,7 +312,15 @@ describe('/libraries', () => {
importPaths: [`${testAssetDirInternal}/temp/directoryA`],
});
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
await utils.waitForQueueFinish(admin.accessToken, 'thumbnailGeneration');
const { assets } = await utils.searchAssets(admin.accessToken, {
originalPath: `${testAssetDirInternal}/temp/directoryA/assetA.png`,
@@ -329,90 +337,21 @@ describe('/libraries', () => {
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
importPaths: [`${testAssetDirInternal}/temp`],
exclusionPatterns: ['**/directoryA/**'],
exclusionPatterns: ['**/directoryA'],
});
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.count).toBe(1);
expect(assets.items).toEqual(
expect.arrayContaining([
expect.objectContaining({ originalPath: expect.stringContaining('directoryB/assetB.png') }),
]),
);
});
it('should scan external library with multiple exclusion patterns', async () => {
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
importPaths: [`${testAssetDirInternal}/temp`],
exclusionPatterns: ['**/directoryA/**', '**/directoryB/**'],
});
await utils.scan(admin.accessToken, library.id);
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.count).toBe(0);
expect(assets.items).toEqual([]);
});
it('should remove assets covered by a new exclusion pattern', async () => {
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
importPaths: [`${testAssetDirInternal}/temp`],
});
await utils.scan(admin.accessToken, library.id);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.count).toBe(2);
expect(assets.items).toEqual(
expect.arrayContaining([
expect.objectContaining({ originalPath: expect.stringContaining('directoryA/assetA.png') }),
expect.objectContaining({ originalPath: expect.stringContaining('directoryB/assetB.png') }),
]),
);
}
await utils.updateLibrary(admin.accessToken, library.id, {
exclusionPatterns: ['**/directoryA/**'],
});
await utils.scan(admin.accessToken, library.id);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.count).toBe(1);
expect(assets.items).toEqual(
expect.arrayContaining([
expect.objectContaining({ originalPath: expect.stringContaining('directoryB/assetB.png') }),
]),
);
}
await utils.updateLibrary(admin.accessToken, library.id, {
exclusionPatterns: ['**/directoryA/**', '**/directoryB/**'],
});
await utils.scan(admin.accessToken, library.id);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.count).toBe(0);
expect(assets.items).toEqual([]);
}
expect(assets.items[0].originalPath.includes('directoryB'));
});
it('should scan multiple import paths', async () => {
@@ -421,7 +360,13 @@ describe('/libraries', () => {
importPaths: [`${testAssetDirInternal}/temp/directoryA`, `${testAssetDirInternal}/temp/directoryB`],
});
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -440,7 +385,13 @@ describe('/libraries', () => {
utils.createImageFile(`${testAssetDir}/temp/folder, a/assetA.png`);
utils.createImageFile(`${testAssetDir}/temp/folder, b/assetB.png`);
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -462,7 +413,13 @@ describe('/libraries', () => {
utils.createImageFile(`${testAssetDir}/temp/folder{ a/assetA.png`);
utils.createImageFile(`${testAssetDir}/temp/folder} b/assetB.png`);
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -514,7 +471,13 @@ describe('/libraries', () => {
utils.createImageFile(`${testAssetDir}/temp/folder${char}1/asset1.png`);
utils.createImageFile(`${testAssetDir}/temp/folder${char}2/asset2.png`);
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -529,133 +492,6 @@ describe('/libraries', () => {
utils.removeImageFile(`${testAssetDir}/temp/folder${char}2/asset2.png`);
});
it('should respect exclusion patterns when using multiple import paths', async () => {
// https://github.com/immich-app/immich/issues/17121
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
importPaths: [`${testAssetDirInternal}/temp/exclusion/`, `${testAssetDirInternal}/temp/exclusion2/`],
});
const excludedFolder = `Raw`;
utils.createImageFile(`${testAssetDir}/temp/exclusion/asset1.png`);
utils.createImageFile(`${testAssetDir}/temp/exclusion/${excludedFolder}/asset2.png`);
await utils.scan(admin.accessToken, library.id);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.items).toEqual(
expect.arrayContaining([
expect.objectContaining({ originalPath: expect.stringContaining(`/asset1.png`) }),
expect.objectContaining({ originalPath: expect.stringContaining(`${excludedFolder}/asset2.png`) }),
]),
);
}
await utils.scan(admin.accessToken, library.id);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.items).toEqual(
expect.arrayContaining([
expect.objectContaining({ originalPath: expect.stringContaining(`/asset1.png`) }),
expect.objectContaining({ originalPath: expect.stringContaining(`${excludedFolder}/asset2.png`) }),
]),
);
}
await utils.updateLibrary(admin.accessToken, library.id, { exclusionPatterns: [`**/${excludedFolder}/**`] });
await utils.scan(admin.accessToken, library.id);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.items).toEqual([
expect.objectContaining({ originalPath: expect.stringContaining(`/asset1.png`) }),
]);
}
await utils.scan(admin.accessToken, library.id);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.items).toEqual([
expect.objectContaining({ originalPath: expect.stringContaining(`/asset1.png`) }),
]);
}
utils.removeImageFile(`${testAssetDir}/temp/exclusion/asset1.png`);
utils.removeImageFile(`${testAssetDir}/temp/exclusion/${excludedFolder}/asset2.png`);
});
const annoyingExclusionPatterns = ['@', '#', '$', '%', '^', '&', '='];
it.each(annoyingExclusionPatterns)('should support exclusion patterns with %s', async (char) => {
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
importPaths: [`${testAssetDirInternal}/temp/exclusion/`],
});
const excludedFolder = `${char}folder`;
utils.createImageFile(`${testAssetDir}/temp/exclusion/asset1.png`);
utils.createImageFile(`${testAssetDir}/temp/exclusion/${excludedFolder}/asset2.png`);
await utils.scan(admin.accessToken, library.id);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.items).toEqual(
expect.arrayContaining([
expect.objectContaining({ originalPath: expect.stringContaining(`/asset1.png`) }),
expect.objectContaining({ originalPath: expect.stringContaining(`${excludedFolder}/asset2.png`) }),
]),
);
}
await utils.scan(admin.accessToken, library.id);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.items).toEqual(
expect.arrayContaining([
expect.objectContaining({ originalPath: expect.stringContaining(`/asset1.png`) }),
expect.objectContaining({ originalPath: expect.stringContaining(`${excludedFolder}/asset2.png`) }),
]),
);
}
await utils.updateLibrary(admin.accessToken, library.id, { exclusionPatterns: [`**/${excludedFolder}/**`] });
await utils.scan(admin.accessToken, library.id);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.items).toEqual([
expect.objectContaining({ originalPath: expect.stringContaining(`/asset1.png`) }),
]);
}
await utils.scan(admin.accessToken, library.id);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.items).toEqual([
expect.objectContaining({ originalPath: expect.stringContaining(`/asset1.png`) }),
]);
}
utils.removeImageFile(`${testAssetDir}/temp/exclusion/asset1.png`);
utils.removeImageFile(`${testAssetDir}/temp/exclusion/${excludedFolder}/asset2.png`);
});
it('should reimport a modified file', async () => {
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
@@ -665,12 +501,23 @@ describe('/libraries', () => {
utils.createImageFile(`${testAssetDir}/temp/reimport/asset.jpg`);
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
cpSync(`${testAssetDir}/albums/nature/tanners_ridge.jpg`, `${testAssetDir}/temp/reimport/asset.jpg`);
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets } = await utils.searchAssets(admin.accessToken, {
libraryId: library.id,
@@ -692,7 +539,7 @@ describe('/libraries', () => {
utils.removeImageFile(`${testAssetDir}/temp/reimport/asset.jpg`);
});
it('should not reimport a file with unchanged timestamp', async () => {
it('should not reimport unmodified files', async () => {
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
importPaths: [`${testAssetDirInternal}/temp/reimport`],
@@ -701,12 +548,21 @@ describe('/libraries', () => {
utils.createImageFile(`${testAssetDir}/temp/reimport/asset.jpg`);
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
cpSync(`${testAssetDir}/albums/nature/tanners_ridge.jpg`, `${testAssetDir}/temp/reimport/asset.jpg`);
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets } = await utils.searchAssets(admin.accessToken, {
libraryId: library.id,
@@ -728,47 +584,6 @@ describe('/libraries', () => {
utils.removeImageFile(`${testAssetDir}/temp/reimport/asset.jpg`);
});
it('should not reimport a modified file more than once', async () => {
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
importPaths: [`${testAssetDirInternal}/temp/reimport`],
});
utils.createImageFile(`${testAssetDir}/temp/reimport/asset.jpg`);
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
cpSync(`${testAssetDir}/albums/nature/tanners_ridge.jpg`, `${testAssetDir}/temp/reimport/asset.jpg`);
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
cpSync(`${testAssetDir}/albums/nature/el_torcal_rocks.jpg`, `${testAssetDir}/temp/reimport/asset.jpg`);
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
const { assets } = await utils.searchAssets(admin.accessToken, {
libraryId: library.id,
});
expect(assets.count).toEqual(1);
const asset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(asset).toEqual(
expect.objectContaining({
originalFileName: 'asset.jpg',
exifInfo: expect.objectContaining({
model: 'NIKON D750',
}),
}),
);
utils.removeImageFile(`${testAssetDir}/temp/reimport/asset.jpg`);
});
it('should set an asset offline if its file is missing', async () => {
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
@@ -777,14 +592,21 @@ describe('/libraries', () => {
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.count).toBe(1);
utils.removeImageFile(`${testAssetDir}/temp/offline/offline.png`);
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const trashedAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(trashedAsset.originalPath).toBe(`${testAssetDirInternal}/temp/offline/offline.png`);
@@ -802,7 +624,8 @@ describe('/libraries', () => {
importPaths: [`${testAssetDirInternal}/temp/offline`],
});
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.count).toBe(1);
@@ -813,7 +636,13 @@ describe('/libraries', () => {
importPaths: [`${testAssetDirInternal}/temp/another-path/`],
});
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const trashedAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(trashedAsset.originalPath).toBe(`${testAssetDirInternal}/temp/offline/offline.png`);
@@ -833,7 +662,8 @@ describe('/libraries', () => {
importPaths: [`${testAssetDirInternal}/temp`],
});
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, {
libraryId: library.id,
@@ -843,7 +673,8 @@ describe('/libraries', () => {
await utils.updateLibrary(admin.accessToken, library.id, { exclusionPatterns: ['**/directoryB/**'] });
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const trashedAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(trashedAsset.isTrashed).toBe(true);
@@ -865,12 +696,19 @@ describe('/libraries', () => {
importPaths: [`${testAssetDirInternal}/temp`],
});
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets: assetsBefore } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assetsBefore.count).toBeGreaterThan(1);
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -887,7 +725,11 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/metadata/xmp/dates/2000.xmp`, `${testAssetDir}/temp/xmp/glarus.xmp`);
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -910,7 +752,10 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/metadata/xmp/dates/2000.xmp`, `${testAssetDir}/temp/xmp/glarus.nef.xmp`);
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -934,7 +779,10 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/metadata/xmp/dates/2010.xmp`, `${testAssetDir}/temp/xmp/glarus.xmp`);
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -958,13 +806,19 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
cpSync(`${testAssetDir}/metadata/xmp/dates/2010.xmp`, `${testAssetDir}/temp/xmp/glarus.nef.xmp`);
unlinkSync(`${testAssetDir}/temp/xmp/glarus.xmp`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -987,12 +841,18 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
cpSync(`${testAssetDir}/metadata/xmp/dates/2000.xmp`, `${testAssetDir}/temp/xmp/glarus.xmp`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -1015,12 +875,18 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
cpSync(`${testAssetDir}/metadata/xmp/dates/2000.xmp`, `${testAssetDir}/temp/xmp/glarus.nef.xmp`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -1044,13 +910,19 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
cpSync(`${testAssetDir}/metadata/xmp/dates/2010.xmp`, `${testAssetDir}/temp/xmp/glarus.xmp`);
unlinkSync(`${testAssetDir}/temp/xmp/glarus.nef.xmp`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -1074,12 +946,18 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
unlinkSync(`${testAssetDir}/temp/xmp/glarus.nef.xmp`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -1103,12 +981,18 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
unlinkSync(`${testAssetDir}/temp/xmp/glarus.xmp`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -1131,15 +1015,22 @@ describe('/libraries', () => {
importPaths: [`${testAssetDirInternal}/temp/offline`],
});
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.count).toBe(1);
utils.renameImageFile(`${testAssetDir}/temp/offline/offline.png`, `${testAssetDir}/temp/offline.png`);
await utils.scan(admin.accessToken, library.id);
{
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
}
await utils.waitForQueueFinish(admin.accessToken, 'library');
const offlineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(offlineAsset.isTrashed).toBe(true);
@@ -1153,7 +1044,15 @@ describe('/libraries', () => {
utils.renameImageFile(`${testAssetDir}/temp/offline.png`, `${testAssetDir}/temp/offline/offline.png`);
await utils.scan(admin.accessToken, library.id);
{
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
}
await utils.waitForQueueFinish(admin.accessToken, 'library');
const backOnlineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
@@ -1167,58 +1066,6 @@ describe('/libraries', () => {
}
});
it('should set a trashed offline asset to online but keep it in trash', async () => {
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
importPaths: [`${testAssetDirInternal}/temp/offline`],
});
await utils.scan(admin.accessToken, library.id);
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.count).toBe(1);
await utils.deleteAssets(admin.accessToken, [assets.items[0].id]);
{
const trashedAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(trashedAsset.isTrashed).toBe(true);
}
utils.renameImageFile(`${testAssetDir}/temp/offline/offline.png`, `${testAssetDir}/temp/offline.png`);
await utils.scan(admin.accessToken, library.id);
const offlineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(offlineAsset.isTrashed).toBe(true);
expect(offlineAsset.originalPath).toBe(`${testAssetDirInternal}/temp/offline/offline.png`);
expect(offlineAsset.isOffline).toBe(true);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id, withDeleted: true });
expect(assets.count).toBe(1);
}
utils.renameImageFile(`${testAssetDir}/temp/offline.png`, `${testAssetDir}/temp/offline/offline.png`);
await utils.scan(admin.accessToken, library.id);
const backOnlineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(backOnlineAsset.originalPath).toBe(`${testAssetDirInternal}/temp/offline/offline.png`);
expect(backOnlineAsset.isOffline).toBe(false);
expect(backOnlineAsset.isTrashed).toBe(true);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id, withDeleted: true });
expect(assets.count).toBe(1);
}
});
it('should not set an offline asset to online if its file exists, is not covered by an exclusion pattern, but is outside of all import paths', async () => {
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
@@ -1227,13 +1074,22 @@ describe('/libraries', () => {
importPaths: [`${testAssetDirInternal}/temp/offline`],
});
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
utils.renameImageFile(`${testAssetDir}/temp/offline/offline.png`, `${testAssetDir}/temp/offline.png`);
await utils.scan(admin.accessToken, library.id);
{
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
}
await utils.waitForQueueFinish(admin.accessToken, 'library');
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id, withDeleted: true });
@@ -1254,7 +1110,15 @@ describe('/libraries', () => {
importPaths: [`${testAssetDirInternal}/temp/another-path`],
});
await utils.scan(admin.accessToken, library.id);
{
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
}
await utils.waitForQueueFinish(admin.accessToken, 'library');
const stillOfflineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
@@ -1278,19 +1142,27 @@ describe('/libraries', () => {
importPaths: [`${testAssetDirInternal}/temp/offline`],
});
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
{
const { assets: assetsBefore } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assetsBefore.count).toBe(1);
}
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
utils.renameImageFile(`${testAssetDir}/temp/offline/offline.png`, `${testAssetDir}/temp/offline.png`);
await utils.scan(admin.accessToken, library.id);
{
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
}
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id, withDeleted: true });
expect(assets.count).toBe(1);
await utils.waitForQueueFinish(admin.accessToken, 'library');
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id, withDeleted: true });
expect(assets.count).toBe(1);
}
const offlineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
@@ -1302,7 +1174,15 @@ describe('/libraries', () => {
await utils.updateLibrary(admin.accessToken, library.id, { exclusionPatterns: ['**/offline/**'] });
await utils.scan(admin.accessToken, library.id);
{
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
}
await utils.waitForQueueFinish(admin.accessToken, 'library');
const stillOfflineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
@@ -1422,7 +1302,8 @@ describe('/libraries', () => {
importPaths: [`${testAssetDirInternal}/temp`],
});
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { status, body } = await request(app)
.delete(`/libraries/${library.id}`)

View File

@@ -201,7 +201,7 @@ describe('/people', () => {
expect(body).toMatchObject({
id: expect.any(String),
name: 'New Person',
birthDate: '1990-01-01',
birthDate: '1990-01-01T00:00:00.000Z',
});
});
@@ -262,7 +262,7 @@ describe('/people', () => {
.set('Authorization', `Bearer ${admin.accessToken}`)
.send({ birthDate: '1990-01-01' });
expect(status).toBe(200);
expect(body).toMatchObject({ birthDate: '1990-01-01' });
expect(body).toMatchObject({ birthDate: '1990-01-01T00:00:00.000Z' });
});
it('should clear a date of birth', async () => {

View File

@@ -633,6 +633,7 @@ describe('/search', () => {
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(body).toEqual([
'Andalusia',
'Berlin',
'Glarus',
'Greater Accra',
'Havana',
@@ -641,7 +642,6 @@ describe('/search', () => {
'Mississippi',
'New York',
'Shanghai',
'State of Berlin',
'St.-Petersburg',
'Tbilisi',
'Tokyo',
@@ -657,6 +657,7 @@ describe('/search', () => {
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(body).toEqual([
'Andalusia',
'Berlin',
'Glarus',
'Greater Accra',
'Havana',
@@ -665,7 +666,6 @@ describe('/search', () => {
'Mississippi',
'New York',
'Shanghai',
'State of Berlin',
'St.-Petersburg',
'Tbilisi',
'Tokyo',

View File

@@ -95,7 +95,7 @@ describe('/shared-links', () => {
expect(resp.status).toBe(200);
expect(resp.header['content-type']).toContain('text/html');
expect(resp.text).toContain(
`<meta name="description" content="${metadataAlbum.assets.length} shared photos &amp; videos" />`,
`<meta name="description" content="${metadataAlbum.assets.length} shared photos & videos" />`,
);
});
@@ -103,21 +103,21 @@ describe('/shared-links', () => {
const resp = await request(shareUrl).get(`/${linkWithAlbum.key}`);
expect(resp.status).toBe(200);
expect(resp.header['content-type']).toContain('text/html');
expect(resp.text).toContain(`<meta name="description" content="0 shared photos &amp; videos" />`);
expect(resp.text).toContain(`<meta name="description" content="0 shared photos & videos" />`);
});
it('should have correct asset count in meta tag for shared asset', async () => {
const resp = await request(shareUrl).get(`/${linkWithAssets.key}`);
expect(resp.status).toBe(200);
expect(resp.header['content-type']).toContain('text/html');
expect(resp.text).toContain(`<meta name="description" content="1 shared photos &amp; videos" />`);
expect(resp.text).toContain(`<meta name="description" content="1 shared photos & videos" />`);
});
it('should have fqdn og:image meta tag for shared asset', async () => {
const resp = await request(shareUrl).get(`/${linkWithAssets.key}`);
expect(resp.status).toBe(200);
expect(resp.header['content-type']).toContain('text/html');
expect(resp.text).toContain(`<meta property="og:image" content="https://my.immich.app`);
expect(resp.text).toContain(`<meta property="og:image" content="http://`);
});
});

View File

@@ -1,4 +1,4 @@
import { LoginResponseDto, getAssetInfo, getAssetStatistics } from '@immich/sdk';
import { LoginResponseDto, getAssetInfo, getAssetStatistics, scanLibrary } from '@immich/sdk';
import { existsSync } from 'node:fs';
import { Socket } from 'socket.io-client';
import { errorDto } from 'src/responses';
@@ -6,6 +6,8 @@ import { app, asBearerAuth, testAssetDir, testAssetDirInternal, utils } from 'sr
import request from 'supertest';
import { afterAll, beforeAll, describe, expect, it } from 'vitest';
const scan = async (accessToken: string, id: string) => scanLibrary({ id }, { headers: asBearerAuth(accessToken) });
describe('/trash', () => {
let admin: LoginResponseDto;
let ws: Socket;
@@ -79,7 +81,8 @@ describe('/trash', () => {
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.items.length).toBe(1);
@@ -87,7 +90,8 @@ describe('/trash', () => {
await utils.updateLibrary(admin.accessToken, library.id, { exclusionPatterns: ['**/offline/**'] });
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const assetBefore = await utils.getAssetInfo(admin.accessToken, asset.id);
expect(assetBefore).toMatchObject({ isTrashed: true, isOffline: true });
@@ -112,7 +116,8 @@ describe('/trash', () => {
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.items.length).toBe(1);
@@ -120,7 +125,8 @@ describe('/trash', () => {
await utils.updateLibrary(admin.accessToken, library.id, { exclusionPatterns: ['**/offline/**'] });
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const assetBefore = await utils.getAssetInfo(admin.accessToken, asset.id);
expect(assetBefore).toMatchObject({ isTrashed: true, isOffline: true });
@@ -174,7 +180,8 @@ describe('/trash', () => {
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.count).toBe(1);
@@ -182,7 +189,9 @@ describe('/trash', () => {
await utils.updateLibrary(admin.accessToken, library.id, { exclusionPatterns: ['**/offline/**'] });
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const before = await getAssetInfo({ id: assetId }, { headers: asBearerAuth(admin.accessToken) });
expect(before).toStrictEqual(expect.objectContaining({ id: assetId, isOffline: true }));
@@ -192,8 +201,6 @@ describe('/trash', () => {
const after = await getAssetInfo({ id: assetId }, { headers: asBearerAuth(admin.accessToken) });
expect(after).toStrictEqual(expect.objectContaining({ id: assetId, isOffline: true }));
utils.removeImageFile(`${testAssetDir}/temp/offline/offline.png`);
});
});
@@ -231,7 +238,7 @@ describe('/trash', () => {
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -240,7 +247,7 @@ describe('/trash', () => {
await utils.updateLibrary(admin.accessToken, library.id, { exclusionPatterns: ['**/offline/**'] });
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const before = await utils.getAssetInfo(admin.accessToken, assetId);
@@ -254,8 +261,6 @@ describe('/trash', () => {
const after = await utils.getAssetInfo(admin.accessToken, assetId);
expect(after.isTrashed).toBe(true);
utils.removeImageFile(`${testAssetDir}/temp/offline/offline.png`);
});
});
});

View File

@@ -22,7 +22,7 @@ const tests: Test[] = [
},
{
test: 'should support paths with an asterisk',
paths: [`/photos*/image1.jpg`],
paths: [`/photos\*/image1.jpg`],
files: {
'/photos*/image1.jpg': true,
'/photos*/image2.jpg': false,
@@ -40,7 +40,7 @@ const tests: Test[] = [
},
{
test: 'should support paths with a single quote',
paths: [`/photos'/image1.jpg`],
paths: [`/photos\'/image1.jpg`],
files: {
"/photos'/image1.jpg": true,
"/photos'/image2.jpg": false,
@@ -49,7 +49,7 @@ const tests: Test[] = [
},
{
test: 'should support paths with a double quote',
paths: [`/photos"/image1.jpg`],
paths: [`/photos\"/image1.jpg`],
files: {
'/photos"/image1.jpg': true,
'/photos"/image2.jpg': false,
@@ -67,7 +67,7 @@ const tests: Test[] = [
},
{
test: 'should support paths with an opening brace',
paths: [`/photos{/image1.jpg`],
paths: [`/photos\{/image1.jpg`],
files: {
'/photos{/image1.jpg': true,
'/photos{/image2.jpg': false,
@@ -76,7 +76,7 @@ const tests: Test[] = [
},
{
test: 'should support paths with a closing brace',
paths: [`/photos}/image1.jpg`],
paths: [`/photos\}/image1.jpg`],
files: {
'/photos}/image1.jpg': true,
'/photos}/image2.jpg': false,

View File

@@ -28,10 +28,8 @@ import {
deleteAssets,
getAllJobsStatus,
getAssetInfo,
getConfig,
getConfigDefaults,
login,
scanLibrary,
searchAssets,
sendJobCommand,
setBaseUrl,
@@ -122,7 +120,6 @@ const execPromise = promisify(exec);
const onEvent = ({ event, id }: { event: EventType; id: string }) => {
// console.log(`Received event: ${event} [id=${id}]`);
const set = events[event];
set.add(id);
const idCallback = idCallbacks[id];
@@ -417,8 +414,6 @@ export const utils = {
rmSync(path, { recursive: true });
},
getSystemConfig: (accessToken: string) => getConfig({ headers: asBearerAuth(accessToken) }),
getAssetInfo: (accessToken: string, id: string) => getAssetInfo({ id }, { headers: asBearerAuth(accessToken) }),
checkExistingAssets: (accessToken: string, checkExistingAssetsDto: CheckExistingAssetsDto) =>
@@ -493,7 +488,7 @@ export const utils = {
value: accessToken,
domain,
path: '/',
expires: 2_058_028_213,
expires: 1_742_402_728,
httpOnly: true,
secure: false,
sameSite: 'Lax',
@@ -503,7 +498,7 @@ export const utils = {
value: 'password',
domain,
path: '/',
expires: 2_058_028_213,
expires: 1_742_402_728,
httpOnly: true,
secure: false,
sameSite: 'Lax',
@@ -513,7 +508,7 @@ export const utils = {
value: 'true',
domain,
path: '/',
expires: 2_058_028_213,
expires: 1_742_402_728,
httpOnly: false,
secure: false,
sameSite: 'Lax',
@@ -537,7 +532,6 @@ export const utils = {
},
waitForQueueFinish: (accessToken: string, queue: keyof AllJobStatusResponseDto, ms?: number) => {
// eslint-disable-next-line no-async-promise-executor
return new Promise<void>(async (resolve, reject) => {
const timeout = setTimeout(() => reject(new Error('Timed out waiting for queue to empty')), ms || 10_000);
@@ -559,14 +553,6 @@ export const utils = {
await immichCli(['login', app, `${key.secret}`]);
return key.secret;
},
scan: async (accessToken: string, id: string) => {
await scanLibrary({ id }, { headers: asBearerAuth(accessToken) });
await utils.waitForQueueFinish(accessToken, 'library');
await utils.waitForQueueFinish(accessToken, 'sidecar');
await utils.waitForQueueFinish(accessToken, 'metadataExtraction');
},
};
utils.initSdk();

View File

@@ -44,7 +44,7 @@ test.describe('Photo Viewer', () => {
const { x, y, width, height } = box!;
await page.mouse.move(x + width / 2, y + height / 2);
await page.mouse.wheel(0, -1);
await expect.poll(async () => await imageLocator(page).getAttribute('src')).toContain('fullsize');
await expect.poll(async () => await imageLocator(page).getAttribute('src')).toContain('original');
});
test('reloads photo when checksum changes', async ({ page }) => {

Some files were not shown because too many files have changed in this diff Show More