Compare commits
2 Commits
v1.120.2
...
feat/mobil
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bf67e92d44 | ||
|
|
c17eeb6b44 |
@@ -4,7 +4,6 @@
|
||||
|
||||
design/
|
||||
docker/
|
||||
!docker/scripts
|
||||
docs/
|
||||
e2e/
|
||||
fastlane/
|
||||
@@ -22,12 +21,11 @@ open-api/typescript-sdk/node_modules/
|
||||
server/coverage/
|
||||
server/node_modules/
|
||||
server/upload/
|
||||
server/src/queries
|
||||
server/dist/
|
||||
server/www/
|
||||
server/test/assets/
|
||||
|
||||
web/node_modules/
|
||||
web/coverage/
|
||||
web/.svelte-kit
|
||||
web/build/
|
||||
web/.env
|
||||
|
||||
2
.gitattributes
vendored
@@ -2,6 +2,8 @@ mobile/openapi/**/*.md -diff -merge
|
||||
mobile/openapi/**/*.md linguist-generated=true
|
||||
mobile/openapi/**/*.dart -diff -merge
|
||||
mobile/openapi/**/*.dart linguist-generated=true
|
||||
mobile/openapi/.openapi-generator/FILES -diff -merge
|
||||
mobile/openapi/.openapi-generator/FILES linguist-generated=true
|
||||
|
||||
mobile/lib/**/*.g.dart -diff -merge
|
||||
mobile/lib/**/*.g.dart linguist-generated=true
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
title: "[Feature] feature-name-goes-here"
|
||||
title: "[Feature] <feature-name-goes-here>"
|
||||
labels: ["feature"]
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please use this form to request new feature for Immich.
|
||||
Stick to only a single feature per request. If you list multiple different features at once,
|
||||
your request will be closed.
|
||||
Please use this form to request new feature for Immich
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
|
||||
6
.github/FUNDING.yml
vendored
@@ -1 +1,5 @@
|
||||
custom: ['https://buy.immich.app']
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: immich-app
|
||||
liberapay: alex.tran1502
|
||||
custom: https://www.buymeacoffee.com/altran1502
|
||||
|
||||
11
.github/ISSUE_TEMPLATE/bug_report.yaml
vendored
@@ -83,19 +83,10 @@ body:
|
||||
2.
|
||||
3.
|
||||
...
|
||||
render: bash
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: Please copy and paste any relevant logs below. (code formatting is
|
||||
enabled, no need for backticks)
|
||||
render: shell
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional information
|
||||
|
||||
13
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,14 +1,11 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: ✋ I have a question or need support
|
||||
url: https://discord.immich.app
|
||||
- name: I have a question or need support
|
||||
url: https://discord.gg/D8JsnBEuKb
|
||||
about: We use GitHub for tracking bugs, please check out our Discord channel for freaky fast support.
|
||||
- name: 📷 My photo or video has a date, time, or timezone problem
|
||||
url: https://github.com/immich-app/immich/discussions/12650
|
||||
about: Upload a sample file to this discussion and we will take a look
|
||||
- name: 🌟 Feature request
|
||||
- name: Feature Request
|
||||
url: https://github.com/immich-app/immich/discussions/new?category=feature-request
|
||||
about: Please use our GitHub Discussion for making feature requests.
|
||||
- name: 🫣 I'm unsure where to go
|
||||
url: https://discord.immich.app
|
||||
- name: I'm unsure where to go
|
||||
url: https://discord.gg/D8JsnBEuKb
|
||||
about: If you are unsure where to go, then joining our Discord is recommended; Just ask!
|
||||
|
||||
7
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
version: 2
|
||||
updates:
|
||||
# Maintain dependencies for GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
38
.github/labeler.yml
vendored
@@ -1,38 +0,0 @@
|
||||
cli:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- cli/src/**
|
||||
|
||||
documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- docs/blob/**
|
||||
- docs/docs/**
|
||||
- docs/src/**
|
||||
- docs/static/**
|
||||
|
||||
🖥️web:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- web/src/**
|
||||
- web/static/**
|
||||
|
||||
📱mobile:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- mobile/lib/**
|
||||
- mobile/test/**
|
||||
|
||||
🗄️server:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- server/src/**
|
||||
- server/test/**
|
||||
|
||||
🧠machine-learning:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- machine-learning/app/**
|
||||
|
||||
changelog:translation:
|
||||
- head-branch: ['^chore/translations$']
|
||||
40
.github/release.yml
vendored
@@ -1,33 +1,41 @@
|
||||
changelog:
|
||||
categories:
|
||||
- title: 🚨 Breaking Changes
|
||||
- title: ⚠️ Breaking Changes
|
||||
labels:
|
||||
- changelog:breaking-change
|
||||
- breaking-change
|
||||
|
||||
- title: 🫥 Deprecated Changes
|
||||
- title: 🗄️ Server
|
||||
labels:
|
||||
- changelog:deprecated
|
||||
- 🗄️server
|
||||
|
||||
- title: 🔒 Security
|
||||
- title: 📱 Mobile
|
||||
labels:
|
||||
- changelog:security
|
||||
- 📱mobile
|
||||
|
||||
- title: 🚀 Features
|
||||
- title: 🖥️ Web
|
||||
labels:
|
||||
- changelog:feature
|
||||
- 🖥️web
|
||||
|
||||
- title: 🌟 Enhancements
|
||||
- title: 🧠 Machine Learning
|
||||
labels:
|
||||
- changelog:enhancement
|
||||
- 🧠machine-learning
|
||||
|
||||
- title: 🐛 Bug fixes
|
||||
- title: ⚡ CLI
|
||||
labels:
|
||||
- changelog:bugfix
|
||||
- cli
|
||||
|
||||
- title: 📚 Documentation
|
||||
- title: 📓 Documentation
|
||||
labels:
|
||||
- changelog:documentation
|
||||
- documentation
|
||||
|
||||
- title: 🌐 Translations
|
||||
- title: 🔨 Maintenance
|
||||
labels:
|
||||
- changelog:translation
|
||||
- deployment
|
||||
- dependencies
|
||||
- renovate
|
||||
- maintenance
|
||||
- tech-debt
|
||||
|
||||
- title: Other changes
|
||||
labels:
|
||||
- "*"
|
||||
|
||||
30
.github/workflows/build-mobile.yml
vendored
@@ -16,28 +16,10 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-job:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- id: found_paths
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
mobile:
|
||||
- 'mobile/**'
|
||||
- name: Check if we should force jobs to run
|
||||
id: should_force
|
||||
run: echo "should_force=${{ github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
build-sign-android:
|
||||
name: Build and sign Android
|
||||
needs: pre-job
|
||||
# Skip when PR from a fork
|
||||
if: ${{ !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' && needs.pre-job.outputs.should_run == 'true' }}
|
||||
if: ${{ !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }}
|
||||
runs-on: macos-14
|
||||
|
||||
steps:
|
||||
@@ -55,15 +37,15 @@ jobs:
|
||||
|
||||
- uses: actions/setup-java@v4
|
||||
with:
|
||||
distribution: 'zulu'
|
||||
java-version: '17'
|
||||
cache: 'gradle'
|
||||
distribution: "zulu"
|
||||
java-version: "11.0.21+9"
|
||||
cache: "gradle"
|
||||
|
||||
- name: Setup Flutter SDK
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
channel: 'stable'
|
||||
flutter-version-file: ./mobile/pubspec.yaml
|
||||
channel: "stable"
|
||||
flutter-version: "3.19.3"
|
||||
cache: true
|
||||
|
||||
- name: Create the Keystore
|
||||
|
||||
31
.github/workflows/cli.yml
vendored
@@ -1,17 +1,16 @@
|
||||
name: CLI Build
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'cli/**'
|
||||
- '.github/workflows/cli.yml'
|
||||
- "cli/**"
|
||||
- ".github/workflows/cli.yml"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'cli/**'
|
||||
- '.github/workflows/cli.yml'
|
||||
release:
|
||||
types: [published]
|
||||
- "cli/**"
|
||||
- ".github/workflows/cli.yml"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -22,7 +21,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
name: CLI Publish
|
||||
name: Publish
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
@@ -33,8 +32,8 @@ jobs:
|
||||
# Setup .npmrc file to publish to npm
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version-file: './cli/.nvmrc'
|
||||
registry-url: 'https://registry.npmjs.org'
|
||||
node-version: "20.x"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
- name: Prepare SDK
|
||||
run: npm ci --prefix ../open-api/typescript-sdk/
|
||||
- name: Build SDK
|
||||
@@ -42,7 +41,7 @@ jobs:
|
||||
- run: npm ci
|
||||
- run: npm run build
|
||||
- run: npm publish
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
@@ -56,10 +55,10 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.2.0
|
||||
uses: docker/setup-qemu-action@v3.0.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.7.1
|
||||
uses: docker/setup-buildx-action@v3.3.0
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
@@ -84,15 +83,15 @@ jobs:
|
||||
images: |
|
||||
name=ghcr.io/${{ github.repository_owner }}/immich-cli
|
||||
tags: |
|
||||
type=raw,value=${{ steps.package-version.outputs.version }},enable=${{ github.event_name == 'release' }}
|
||||
type=raw,value=latest,enable=${{ github.event_name == 'release' }}
|
||||
type=raw,value=${{ steps.package-version.outputs.version }},enable=${{ github.event_name == 'workflow_dispatch' }}
|
||||
type=raw,value=latest,enable=${{ github.event_name == 'workflow_dispatch' }}
|
||||
|
||||
- name: Build and push image
|
||||
uses: docker/build-push-action@v6.9.0
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
file: cli/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name == 'release' }}
|
||||
push: ${{ github.event_name == 'workflow_dispatch' }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
tags: ${{ steps.metadata.outputs.tags }}
|
||||
|
||||
8
.github/workflows/docker-cleanup.yml
vendored
@@ -22,7 +22,7 @@ concurrency:
|
||||
jobs:
|
||||
cleanup-images:
|
||||
name: Cleanup Stale Images Tags for ${{ matrix.primary-name }}
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
steps:
|
||||
- name: Clean temporary images
|
||||
if: "${{ env.TOKEN != '' }}"
|
||||
uses: stumpylog/image-cleaner-action/ephemeral@v0.9.0
|
||||
uses: stumpylog/image-cleaner-action/ephemeral@v0.5.0
|
||||
with:
|
||||
token: "${{ env.TOKEN }}"
|
||||
owner: "immich-app"
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
|
||||
cleanup-untagged-images:
|
||||
name: Cleanup Untagged Images Tags for ${{ matrix.primary-name }}
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- cleanup-images
|
||||
strategy:
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
steps:
|
||||
- name: Clean untagged images
|
||||
if: "${{ env.TOKEN != '' }}"
|
||||
uses: stumpylog/image-cleaner-action/untagged@v0.9.0
|
||||
uses: stumpylog/image-cleaner-action/untagged@v0.5.0
|
||||
with:
|
||||
token: "${{ env.TOKEN }}"
|
||||
owner: "immich-app"
|
||||
|
||||
248
.github/workflows/docker.yml
vendored
@@ -17,206 +17,56 @@ permissions:
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
pre-job:
|
||||
build_and_push:
|
||||
name: Build and Push
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_run_server: ${{ steps.found_paths.outputs.server == 'true' || steps.should_force.outputs.should_force == 'true' }}
|
||||
should_run_ml: ${{ steps.found_paths.outputs.machine-learning == 'true' || steps.should_force.outputs.should_force == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- id: found_paths
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
server:
|
||||
- 'server/**'
|
||||
- 'openapi/**'
|
||||
- 'web/**'
|
||||
- 'i18n/**'
|
||||
machine-learning:
|
||||
- 'machine-learning/**'
|
||||
|
||||
- name: Check if we should force jobs to run
|
||||
id: should_force
|
||||
run: echo "should_force=${{ github.event_name == 'workflow_dispatch' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
retag_ml:
|
||||
name: Re-Tag ML
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_ml == 'false' && !github.event.pull_request.head.repo.fork }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
suffix: ["", "-cuda", "-openvino", "-armnn"]
|
||||
steps:
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Re-tag image
|
||||
run: |
|
||||
REGISTRY_NAME="ghcr.io"
|
||||
REPOSITORY=${{ github.repository_owner }}/immich-machine-learning
|
||||
TAG_OLD=main${{ matrix.suffix }}
|
||||
TAG_NEW=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
|
||||
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_NEW $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
|
||||
|
||||
retag_server:
|
||||
name: Re-Tag Server
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_server == 'false' && !github.event.pull_request.head.repo.fork }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
suffix: [""]
|
||||
steps:
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Re-tag image
|
||||
run: |
|
||||
REGISTRY_NAME="ghcr.io"
|
||||
REPOSITORY=${{ github.repository_owner }}/immich-server
|
||||
TAG_OLD=main${{ matrix.suffix }}
|
||||
TAG_NEW=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
|
||||
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_NEW $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
|
||||
|
||||
|
||||
build_and_push_ml:
|
||||
name: Build and Push ML
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
image: immich-machine-learning
|
||||
context: machine-learning
|
||||
file: machine-learning/Dockerfile
|
||||
strategy:
|
||||
# Prevent a failure in one image from stopping the other builds
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platforms: linux/amd64,linux/arm64
|
||||
- image: immich-machine-learning
|
||||
context: machine-learning
|
||||
file: machine-learning/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
device: cpu
|
||||
|
||||
- platforms: linux/amd64
|
||||
- image: immich-machine-learning
|
||||
context: machine-learning
|
||||
file: machine-learning/Dockerfile
|
||||
platforms: linux/amd64
|
||||
device: cuda
|
||||
suffix: -cuda
|
||||
|
||||
- platforms: linux/amd64
|
||||
- image: immich-machine-learning
|
||||
context: machine-learning
|
||||
file: machine-learning/Dockerfile
|
||||
platforms: linux/amd64
|
||||
device: openvino
|
||||
suffix: -openvino
|
||||
|
||||
- platforms: linux/arm64
|
||||
- image: immich-machine-learning
|
||||
context: machine-learning
|
||||
file: machine-learning/Dockerfile
|
||||
platforms: linux/arm64
|
||||
device: armnn
|
||||
suffix: -armnn
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.2.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.7.1
|
||||
|
||||
- name: Login to Docker Hub
|
||||
# Only push to Docker Hub when making a release
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
# Skip when PR from a fork
|
||||
if: ${{ !github.event.pull_request.head.repo.fork }}
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate docker image tags
|
||||
id: metadata
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
flavor: |
|
||||
# Disable latest tag
|
||||
latest=false
|
||||
images: |
|
||||
name=ghcr.io/${{ github.repository_owner }}/${{env.image}}
|
||||
name=altran1502/${{env.image}},enable=${{ github.event_name == 'release' }}
|
||||
tags: |
|
||||
# Tag with branch name
|
||||
type=ref,event=branch,suffix=${{ matrix.suffix }}
|
||||
# Tag with pr-number
|
||||
type=ref,event=pr,suffix=${{ matrix.suffix }}
|
||||
# Tag with git tag on release
|
||||
type=ref,event=tag,suffix=${{ matrix.suffix }}
|
||||
type=raw,value=release,enable=${{ github.event_name == 'release' }},suffix=${{ matrix.suffix }}
|
||||
|
||||
- name: Determine build cache output
|
||||
id: cache-target
|
||||
run: |
|
||||
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||
# Essentially just ignore the cache output (PR can't write to registry cache)
|
||||
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ env.image }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Build and push image
|
||||
uses: docker/build-push-action@v6.9.0
|
||||
with:
|
||||
context: ${{ env.context }}
|
||||
file: ${{ env.file }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
# Skip pushing when PR from a fork
|
||||
push: ${{ !github.event.pull_request.head.repo.fork }}
|
||||
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{env.image}}
|
||||
cache-to: ${{ steps.cache-target.outputs.cache-to }}
|
||||
tags: ${{ steps.metadata.outputs.tags }}
|
||||
labels: ${{ steps.metadata.outputs.labels }}
|
||||
build-args: |
|
||||
DEVICE=${{ matrix.device }}
|
||||
BUILD_ID=${{ github.run_id }}
|
||||
BUILD_IMAGE=${{ github.event_name == 'release' && github.ref_name || steps.metadata.outputs.tags }}
|
||||
BUILD_SOURCE_REF=${{ github.ref_name }}
|
||||
BUILD_SOURCE_COMMIT=${{ github.sha }}
|
||||
|
||||
|
||||
build_and_push_server:
|
||||
name: Build and Push Server
|
||||
runs-on: ubuntu-latest
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
|
||||
env:
|
||||
image: immich-server
|
||||
context: .
|
||||
file: server/Dockerfile
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platforms: linux/amd64,linux/arm64
|
||||
- image: immich-server
|
||||
context: .
|
||||
file: server/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
device: cpu
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.2.0
|
||||
uses: docker/setup-qemu-action@v3.0.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.7.1
|
||||
uses: docker/setup-buildx-action@v3.3.0
|
||||
|
||||
- name: Login to Docker Hub
|
||||
# Only push to Docker Hub when making a release
|
||||
@@ -243,8 +93,8 @@ jobs:
|
||||
# Disable latest tag
|
||||
latest=false
|
||||
images: |
|
||||
name=ghcr.io/${{ github.repository_owner }}/${{env.image}}
|
||||
name=altran1502/${{env.image}},enable=${{ github.event_name == 'release' }}
|
||||
name=ghcr.io/${{ github.repository_owner }}/${{matrix.image}}
|
||||
name=altran1502/${{matrix.image}},enable=${{ github.event_name == 'release' }}
|
||||
tags: |
|
||||
# Tag with branch name
|
||||
type=ref,event=branch,suffix=${{ matrix.suffix }}
|
||||
@@ -261,50 +111,20 @@ jobs:
|
||||
# Essentially just ignore the cache output (PR can't write to registry cache)
|
||||
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ env.image }}" >> $GITHUB_OUTPUT
|
||||
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ matrix.image }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Build and push image
|
||||
uses: docker/build-push-action@v6.9.0
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: ${{ env.context }}
|
||||
file: ${{ env.file }}
|
||||
context: ${{ matrix.context }}
|
||||
file: ${{ matrix.file }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
# Skip pushing when PR from a fork
|
||||
push: ${{ !github.event.pull_request.head.repo.fork }}
|
||||
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{env.image}}
|
||||
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{matrix.image}}
|
||||
cache-to: ${{ steps.cache-target.outputs.cache-to }}
|
||||
tags: ${{ steps.metadata.outputs.tags }}
|
||||
labels: ${{ steps.metadata.outputs.labels }}
|
||||
build-args: |
|
||||
DEVICE=${{ matrix.device }}
|
||||
BUILD_ID=${{ github.run_id }}
|
||||
BUILD_IMAGE=${{ github.event_name == 'release' && github.ref_name || steps.metadata.outputs.tags }}
|
||||
BUILD_SOURCE_REF=${{ github.ref_name }}
|
||||
BUILD_SOURCE_COMMIT=${{ github.sha }}
|
||||
|
||||
success-check-server:
|
||||
name: Docker Build & Push Server Success
|
||||
needs: [build_and_push_server, retag_server]
|
||||
runs-on: ubuntu-latest
|
||||
if: always()
|
||||
steps:
|
||||
- name: Any jobs failed?
|
||||
if: ${{ contains(needs.*.result, 'failure') }}
|
||||
run: exit 1
|
||||
- name: All jobs passed or skipped
|
||||
if: ${{ !(contains(needs.*.result, 'failure')) }}
|
||||
run: echo "All jobs passed or skipped" && echo "${{ toJSON(needs.*.result) }}"
|
||||
|
||||
success-check-ml:
|
||||
name: Docker Build & Push ML Success
|
||||
needs: [build_and_push_ml, retag_ml]
|
||||
runs-on: ubuntu-latest
|
||||
if: always()
|
||||
steps:
|
||||
- name: Any jobs failed?
|
||||
if: ${{ contains(needs.*.result, 'failure') }}
|
||||
run: exit 1
|
||||
- name: All jobs passed or skipped
|
||||
if: ${{ !(contains(needs.*.result, 'failure')) }}
|
||||
run: echo "All jobs passed or skipped" && echo "${{ toJSON(needs.*.result) }}"
|
||||
tags: ${{ steps.metadata.outputs.tags }}
|
||||
labels: ${{ steps.metadata.outputs.labels }}
|
||||
|
||||
64
.github/workflows/docs-build.yml
vendored
@@ -1,64 +0,0 @@
|
||||
name: Docs build
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-job:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_run: ${{ steps.found_paths.outputs.docs == 'true' || steps.should_force.outputs.should_force == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- id: found_paths
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
docs:
|
||||
- 'docs/**'
|
||||
- name: Check if we should force jobs to run
|
||||
id: should_force
|
||||
run: echo "should_force=${{ github.event_name == 'release' || github.ref_name == 'main' }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
build:
|
||||
name: Docs Build
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./docs
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version-file: './docs/.nvmrc'
|
||||
|
||||
- name: Run npm install
|
||||
run: npm ci
|
||||
|
||||
- name: Check formatting
|
||||
run: npm run format
|
||||
|
||||
- name: Run build
|
||||
run: npm run build
|
||||
|
||||
- name: Upload build output
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: docs-build-output
|
||||
path: docs/build/
|
||||
retention-days: 1
|
||||
202
.github/workflows/docs-deploy.yml
vendored
@@ -1,202 +0,0 @@
|
||||
name: Docs deploy
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Docs build"]
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
checks:
|
||||
name: Docs Deploy Checks
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
parameters: ${{ steps.parameters.outputs.result }}
|
||||
artifact: ${{ steps.get-artifact.outputs.result }}
|
||||
steps:
|
||||
- if: ${{ github.event.workflow_run.conclusion != 'success' }}
|
||||
run: echo 'The triggering workflow did not succeed' && exit 1
|
||||
- name: Get artifact
|
||||
id: get-artifact
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
run_id: context.payload.workflow_run.id,
|
||||
});
|
||||
let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
|
||||
return artifact.name == "docs-build-output"
|
||||
})[0];
|
||||
if (!matchArtifact) {
|
||||
console.log("No artifact found with the name docs-build-output, build job was skipped")
|
||||
return { found: false };
|
||||
}
|
||||
return { found: true, id: matchArtifact.id };
|
||||
- name: Determine deploy parameters
|
||||
id: parameters
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const eventType = context.payload.workflow_run.event;
|
||||
const isFork = context.payload.workflow_run.repository.fork;
|
||||
|
||||
let parameters;
|
||||
|
||||
console.log({eventType, isFork});
|
||||
|
||||
if (eventType == "push") {
|
||||
const branch = context.payload.workflow_run.head_branch;
|
||||
console.log({branch});
|
||||
const shouldDeploy = !isFork && branch == "main";
|
||||
parameters = {
|
||||
event: "branch",
|
||||
name: "main",
|
||||
shouldDeploy
|
||||
};
|
||||
} else if (eventType == "pull_request") {
|
||||
let pull_number = context.payload.workflow_run.pull_requests[0]?.number;
|
||||
if(!pull_number) {
|
||||
const response = await github.rest.search.issuesAndPullRequests({q: 'repo:${{ github.repository }} is:pr sha:${{ github.event.workflow_run.head_sha }}',per_page: 1,})
|
||||
const items = response.data.items
|
||||
if (items.length < 1) {
|
||||
throw new Error("No pull request found for the commit")
|
||||
}
|
||||
const pullRequestNumber = items[0].number
|
||||
console.info("Pull request number is", pullRequestNumber)
|
||||
pull_number = pullRequestNumber
|
||||
}
|
||||
const {data: pr} = await github.rest.pulls.get({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number
|
||||
});
|
||||
|
||||
console.log({pull_number});
|
||||
|
||||
parameters = {
|
||||
event: "pr",
|
||||
name: `pr-${pull_number}`,
|
||||
pr_number: pull_number,
|
||||
shouldDeploy: true
|
||||
};
|
||||
} else if (eventType == "release") {
|
||||
parameters = {
|
||||
event: "release",
|
||||
name: context.payload.workflow_run.head_branch,
|
||||
shouldDeploy: !isFork
|
||||
};
|
||||
}
|
||||
|
||||
console.log(parameters);
|
||||
return parameters;
|
||||
|
||||
deploy:
|
||||
name: Docs Deploy
|
||||
runs-on: ubuntu-latest
|
||||
needs: checks
|
||||
if: ${{ fromJson(needs.checks.outputs.artifact).found && fromJson(needs.checks.outputs.parameters).shouldDeploy }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Load parameters
|
||||
id: parameters
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const json = `${{ needs.checks.outputs.parameters }}`;
|
||||
const parameters = JSON.parse(json);
|
||||
core.setOutput("event", parameters.event);
|
||||
core.setOutput("name", parameters.name);
|
||||
core.setOutput("shouldDeploy", parameters.shouldDeploy);
|
||||
|
||||
- run: |
|
||||
echo "Starting docs deployment for ${{ steps.parameters.outputs.event }} ${{ steps.parameters.outputs.name }}"
|
||||
|
||||
- name: Download artifact
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
let artifact = ${{ needs.checks.outputs.artifact }};
|
||||
let download = await github.rest.actions.downloadArtifact({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
artifact_id: artifact.id,
|
||||
archive_format: 'zip',
|
||||
});
|
||||
let fs = require('fs');
|
||||
fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/docs-build-output.zip`, Buffer.from(download.data));
|
||||
|
||||
- name: Unzip artifact
|
||||
run: unzip "${{ github.workspace }}/docs-build-output.zip" -d "${{ github.workspace }}/docs/build"
|
||||
|
||||
- name: Deploy Docs Subdomain
|
||||
env:
|
||||
TF_VAR_prefix_name: ${{ steps.parameters.outputs.name}}
|
||||
TF_VAR_prefix_event_type: ${{ steps.parameters.outputs.event }}
|
||||
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
|
||||
uses: gruntwork-io/terragrunt-action@v2
|
||||
with:
|
||||
tg_version: "0.58.12"
|
||||
tofu_version: "1.7.1"
|
||||
tg_dir: "deployment/modules/cloudflare/docs"
|
||||
tg_command: "apply"
|
||||
|
||||
- name: Deploy Docs Subdomain Output
|
||||
id: docs-output
|
||||
env:
|
||||
TF_VAR_prefix_name: ${{ steps.parameters.outputs.name}}
|
||||
TF_VAR_prefix_event_type: ${{ steps.parameters.outputs.event }}
|
||||
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
|
||||
uses: gruntwork-io/terragrunt-action@v2
|
||||
with:
|
||||
tg_version: "0.58.12"
|
||||
tofu_version: "1.7.1"
|
||||
tg_dir: "deployment/modules/cloudflare/docs"
|
||||
tg_command: "output -json"
|
||||
|
||||
- name: Output Cleaning
|
||||
id: clean
|
||||
run: |
|
||||
TG_OUT=$(echo '${{ steps.docs-output.outputs.tg_action_output }}' | sed 's|%0A|\n|g ; s|%3C|<|g' | jq -c .)
|
||||
echo "output=$TG_OUT" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Publish to Cloudflare Pages
|
||||
uses: cloudflare/pages-action@v1
|
||||
with:
|
||||
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN_PAGES_UPLOAD }}
|
||||
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
projectName: ${{ fromJson(steps.clean.outputs.output).pages_project_name.value }}
|
||||
workingDirectory: "docs"
|
||||
directory: "build"
|
||||
branch: ${{ steps.parameters.outputs.name }}
|
||||
wranglerVersion: '3'
|
||||
|
||||
- name: Deploy Docs Release Domain
|
||||
if: ${{ steps.parameters.outputs.event == 'release' }}
|
||||
env:
|
||||
TF_VAR_prefix_name: ${{ steps.parameters.outputs.name}}
|
||||
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
|
||||
uses: gruntwork-io/terragrunt-action@v2
|
||||
with:
|
||||
tg_version: '0.58.12'
|
||||
tofu_version: '1.7.1'
|
||||
tg_dir: 'deployment/modules/cloudflare/docs-release'
|
||||
tg_command: 'apply'
|
||||
|
||||
- name: Comment
|
||||
uses: actions-cool/maintain-one-comment@v3
|
||||
if: ${{ steps.parameters.outputs.event == 'pr' }}
|
||||
with:
|
||||
number: ${{ fromJson(needs.checks.outputs.parameters).pr_number }}
|
||||
body: |
|
||||
📖 Documentation deployed to [${{ fromJson(steps.clean.outputs.output).immich_app_branch_subdomain.value }}](https://${{ fromJson(steps.clean.outputs.output).immich_app_branch_subdomain.value }})
|
||||
emojis: 'rocket'
|
||||
body-include: '<!-- Docs PR URL -->'
|
||||
33
.github/workflows/docs-destroy.yml
vendored
@@ -1,33 +0,0 @@
|
||||
name: Docs destroy
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed]
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Docs Destroy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Destroy Docs Subdomain
|
||||
env:
|
||||
TF_VAR_prefix_name: "pr-${{ github.event.number }}"
|
||||
TF_VAR_prefix_event_type: "pr"
|
||||
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
|
||||
uses: gruntwork-io/terragrunt-action@v2
|
||||
with:
|
||||
tg_version: "0.58.12"
|
||||
tofu_version: "1.7.1"
|
||||
tg_dir: "deployment/modules/cloudflare/docs"
|
||||
tg_command: "destroy -refresh=false"
|
||||
|
||||
- name: Comment
|
||||
uses: actions-cool/maintain-one-comment@v3
|
||||
with:
|
||||
number: ${{ github.event.number }}
|
||||
delete: true
|
||||
body-include: '<!-- Docs PR URL -->'
|
||||
52
.github/workflows/fix-format.yml
vendored
@@ -1,52 +0,0 @@
|
||||
name: Fix formatting
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
fix-formatting:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.label.name == 'fix:formatting' }}
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Generate a token
|
||||
id: generate-token
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
|
||||
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
|
||||
|
||||
- name: 'Checkout'
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
token: ${{ steps.generate-token.outputs.token }}
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version-file: './server/.nvmrc'
|
||||
|
||||
- name: Fix formatting
|
||||
run: make install-all && make format-all
|
||||
|
||||
- name: Commit and push
|
||||
uses: EndBug/add-and-commit@v9
|
||||
with:
|
||||
default_author: github_actions
|
||||
message: 'chore: fix formatting'
|
||||
|
||||
- name: Remove label
|
||||
uses: actions/github-script@v7
|
||||
if: always()
|
||||
with:
|
||||
script: |
|
||||
github.rest.issues.removeLabel({
|
||||
issue_number: context.payload.pull_request.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
name: 'fix:formatting'
|
||||
})
|
||||
|
||||
21
.github/workflows/pr-label-validation.yml
vendored
@@ -1,21 +0,0 @@
|
||||
name: PR Label Validation
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, labeled, unlabeled, synchronize]
|
||||
|
||||
jobs:
|
||||
validate-release-label:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Require PR to have a changelog label
|
||||
uses: mheap/github-action-required-labels@v5
|
||||
with:
|
||||
mode: exactly
|
||||
count: 1
|
||||
use_regex: true
|
||||
labels: "changelog:.*"
|
||||
add_comment: true
|
||||
12
.github/workflows/pr-labeler.yml
vendored
@@ -1,12 +0,0 @@
|
||||
name: "Pull Request Labeler"
|
||||
on:
|
||||
- pull_request_target
|
||||
|
||||
jobs:
|
||||
labeler:
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/labeler@v5
|
||||
@@ -1,15 +0,0 @@
|
||||
name: PR Conventional Commit Validation
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, edited]
|
||||
|
||||
jobs:
|
||||
validate-pr-title:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: PR Conventional Commit Validation
|
||||
uses: ytanikin/PRConventionalCommits@1.2.0
|
||||
with:
|
||||
task_types: '["feat","fix","docs","test","ci","refactor","perf","chore","revert"]'
|
||||
add_label: 'false'
|
||||
13
.github/workflows/pr-require-label.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
name: Enforce PR labels
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, unlabeled, opened, edited, synchronize]
|
||||
jobs:
|
||||
enforce-label:
|
||||
name: Enforce label
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- if: toJson(github.event.pull_request.labels) == '[]'
|
||||
run: exit 1
|
||||
|
||||
15
.github/workflows/prepare-release.yml
vendored
@@ -29,17 +29,10 @@ jobs:
|
||||
ref: ${{ steps.push-tag.outputs.commit_long_sha }}
|
||||
|
||||
steps:
|
||||
- name: Generate a token
|
||||
id: generate-token
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
|
||||
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
token: ${{ steps.generate-token.outputs.token }}
|
||||
token: ${{ secrets.ORG_RELEASE_TOKEN }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: pipx install poetry
|
||||
@@ -51,8 +44,10 @@ jobs:
|
||||
id: push-tag
|
||||
uses: EndBug/add-and-commit@v9
|
||||
with:
|
||||
default_author: github_actions
|
||||
message: 'chore: version ${{ env.IMMICH_VERSION }}'
|
||||
author_name: Alex The Bot
|
||||
author_email: alex.tran1502@gmail.com
|
||||
default_author: user_info
|
||||
message: 'Version ${{ env.IMMICH_VERSION }}'
|
||||
tag: ${{ env.IMMICH_VERSION }}
|
||||
push: true
|
||||
|
||||
|
||||
2
.github/workflows/sdk.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
# Setup .npmrc file to publish to npm
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version-file: './open-api/typescript-sdk/.nvmrc'
|
||||
node-version: '20.x'
|
||||
registry-url: 'https://registry.npmjs.org'
|
||||
- name: Install deps
|
||||
run: npm ci
|
||||
|
||||
27
.github/workflows/static_analysis.yml
vendored
@@ -10,27 +10,8 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-job:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- id: found_paths
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
mobile:
|
||||
- 'mobile/**'
|
||||
- name: Check if we should force jobs to run
|
||||
id: should_force
|
||||
run: echo "should_force=${{ github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
mobile-dart-analyze:
|
||||
name: Run Dart Code Analysis
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run == 'true' }}
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -41,8 +22,8 @@ jobs:
|
||||
- name: Setup Flutter SDK
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
channel: 'stable'
|
||||
flutter-version-file: ./mobile/pubspec.yaml
|
||||
channel: "stable"
|
||||
flutter-version: "3.19.3"
|
||||
|
||||
- name: Install dependencies
|
||||
run: dart pub get
|
||||
@@ -56,10 +37,6 @@ jobs:
|
||||
run: dart format lib/ --set-exit-if-changed
|
||||
working-directory: ./mobile
|
||||
|
||||
- name: Run dart custom_lint
|
||||
run: dart run custom_lint
|
||||
working-directory: ./mobile
|
||||
|
||||
# Enable after riverpod generator migration is completed
|
||||
# - name: Run dart custom lint
|
||||
# run: dart run custom_lint
|
||||
|
||||
266
.github/workflows/test.yml
vendored
@@ -10,48 +10,43 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-job:
|
||||
server-e2e-jobs:
|
||||
name: Server (e2e-jobs)
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_run_web: ${{ steps.found_paths.outputs.web == 'true' || steps.should_force.outputs.should_force == 'true' }}
|
||||
should_run_server: ${{ steps.found_paths.outputs.server == 'true' || steps.should_force.outputs.should_force == 'true' }}
|
||||
should_run_cli: ${{ steps.found_paths.outputs.cli == 'true' || steps.should_force.outputs.should_force == 'true' }}
|
||||
should_run_e2e: ${{ steps.found_paths.outputs.e2e == 'true' || steps.should_force.outputs.should_force == 'true' }}
|
||||
should_run_mobile: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
|
||||
should_run_ml: ${{ steps.found_paths.outputs.machine-learning == 'true' || steps.should_force.outputs.should_force == 'true' }}
|
||||
should_run_e2e_web: ${{ steps.found_paths.outputs.e2e == 'true' || steps.found_paths.outputs.web == 'true' || steps.should_force.outputs.should_force == 'true' }}
|
||||
should_run_e2e_server_cli: ${{ steps.found_paths.outputs.e2e == 'true' || steps.found_paths.outputs.server == 'true' || steps.found_paths.outputs.cli == 'true' || steps.should_force.outputs.should_force == 'true' }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- id: found_paths
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
web:
|
||||
- 'web/**'
|
||||
- 'i18n/**'
|
||||
- 'open-api/typescript-sdk/**'
|
||||
server:
|
||||
- 'server/**'
|
||||
cli:
|
||||
- 'cli/**'
|
||||
- 'open-api/typescript-sdk/**'
|
||||
e2e:
|
||||
- 'e2e/**'
|
||||
mobile:
|
||||
- 'mobile/**'
|
||||
machine-learning:
|
||||
- 'machine-learning/**'
|
||||
submodules: 'recursive'
|
||||
|
||||
- name: Check if we should force jobs to run
|
||||
id: should_force
|
||||
run: echo "should_force=${{ github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
|
||||
- name: Run e2e tests
|
||||
run: make server-e2e-jobs
|
||||
|
||||
doc-tests:
|
||||
name: Docs
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./docs
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run npm install
|
||||
run: npm ci
|
||||
|
||||
- name: Run formatter
|
||||
run: npm run format
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
- name: Run build
|
||||
run: npm run build
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
server-unit-tests:
|
||||
name: Test & Lint Server
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
|
||||
name: Server
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
@@ -61,11 +56,6 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version-file: './server/.nvmrc'
|
||||
|
||||
- name: Run npm install
|
||||
run: npm ci
|
||||
|
||||
@@ -81,14 +71,12 @@ jobs:
|
||||
run: npm run check
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
- name: Run small tests & coverage
|
||||
- name: Run unit tests & coverage
|
||||
run: npm run test:cov
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
cli-unit-tests:
|
||||
name: Unit Test CLI
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_cli == 'true' }}
|
||||
name: CLI
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
@@ -101,7 +89,7 @@ jobs:
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version-file: './cli/.nvmrc'
|
||||
node-version: 20
|
||||
|
||||
- name: Setup typescript-sdk
|
||||
run: npm ci && npm run build
|
||||
@@ -126,44 +114,8 @@ jobs:
|
||||
run: npm run test:cov
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
cli-unit-tests-win:
|
||||
name: Unit Test CLI (Windows)
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_cli == 'true' }}
|
||||
runs-on: windows-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./cli
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version-file: './cli/.nvmrc'
|
||||
|
||||
- name: Setup typescript-sdk
|
||||
run: npm ci && npm run build
|
||||
working-directory: ./open-api/typescript-sdk
|
||||
|
||||
- name: Install deps
|
||||
run: npm ci
|
||||
|
||||
# Skip linter & formatter in Windows test.
|
||||
- name: Run tsc
|
||||
run: npm run check
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
- name: Run unit tests & coverage
|
||||
run: npm run test:cov
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
web-unit-tests:
|
||||
name: Test & Lint Web
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_web == 'true' }}
|
||||
name: Web
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
@@ -173,11 +125,6 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version-file: './web/.nvmrc'
|
||||
|
||||
- name: Run setup typescript-sdk
|
||||
run: npm ci && npm run build
|
||||
working-directory: ./open-api/typescript-sdk
|
||||
@@ -205,10 +152,8 @@ jobs:
|
||||
run: npm run test:cov
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
e2e-tests-lint:
|
||||
name: End-to-End Lint
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_e2e == 'true' }}
|
||||
e2e-tests:
|
||||
name: End-to-End Tests
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
@@ -217,17 +162,24 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version-file: './e2e/.nvmrc'
|
||||
node-version: 20
|
||||
|
||||
- name: Run setup typescript-sdk
|
||||
run: npm ci && npm run build
|
||||
working-directory: ./open-api/typescript-sdk
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
- name: Run setup cli
|
||||
run: npm ci && npm run build
|
||||
working-directory: ./cli
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
if: ${{ !cancelled() }}
|
||||
@@ -244,58 +196,8 @@ jobs:
|
||||
run: npm run check
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
medium-tests-server:
|
||||
name: Medium Tests (Server)
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
|
||||
runs-on: mich
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
|
||||
- name: Production build
|
||||
if: ${{ !cancelled() }}
|
||||
run: docker compose -f e2e/docker-compose.yml build
|
||||
|
||||
- name: Run medium tests
|
||||
if: ${{ !cancelled() }}
|
||||
run: make test-medium
|
||||
|
||||
e2e-tests-server-cli:
|
||||
name: End-to-End Tests (Server & CLI)
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_e2e_server_cli == 'true' }}
|
||||
runs-on: mich
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./e2e
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version-file: './e2e/.nvmrc'
|
||||
|
||||
- name: Run setup typescript-sdk
|
||||
run: npm ci && npm run build
|
||||
working-directory: ./open-api/typescript-sdk
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
- name: Run setup cli
|
||||
run: npm ci && npm run build
|
||||
working-directory: ./cli
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
- name: Install Playwright Browsers
|
||||
run: npx playwright install --with-deps chromium
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
- name: Docker build
|
||||
@@ -306,51 +208,12 @@ jobs:
|
||||
run: npm run test
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
e2e-tests-web:
|
||||
name: End-to-End Tests (Web)
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_e2e_web == 'true' }}
|
||||
runs-on: mich
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./e2e
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version-file: './e2e/.nvmrc'
|
||||
|
||||
- name: Run setup typescript-sdk
|
||||
run: npm ci && npm run build
|
||||
working-directory: ./open-api/typescript-sdk
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
- name: Install Playwright Browsers
|
||||
run: npx playwright install --with-deps chromium
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
- name: Docker build
|
||||
run: docker compose build
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
- name: Run e2e tests (web)
|
||||
run: npx playwright test
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
mobile-unit-tests:
|
||||
name: Unit Test Mobile
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_mobile == 'true' }}
|
||||
name: Mobile
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -358,15 +221,13 @@ jobs:
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
channel: 'stable'
|
||||
flutter-version-file: ./mobile/pubspec.yaml
|
||||
flutter-version: '3.16.9'
|
||||
- name: Run tests
|
||||
working-directory: ./mobile
|
||||
run: flutter test -j 1
|
||||
|
||||
ml-unit-tests:
|
||||
name: Unit Test ML
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' }}
|
||||
name: Machine Learning
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
@@ -412,32 +273,16 @@ jobs:
|
||||
name: OpenAPI Clients
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version-file: './server/.nvmrc'
|
||||
|
||||
- name: Install server dependencies
|
||||
run: npm --prefix=server ci
|
||||
|
||||
- name: Build the app
|
||||
run: npm --prefix=server run build
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- name: Run API generation
|
||||
run: make open-api
|
||||
|
||||
- name: Find file changes
|
||||
uses: tj-actions/verify-changed-files@v20
|
||||
uses: tj-actions/verify-changed-files@v19
|
||||
id: verify-changed-files
|
||||
with:
|
||||
files: |
|
||||
mobile/openapi
|
||||
open-api/typescript-sdk
|
||||
open-api/immich-openapi-specs.json
|
||||
|
||||
- name: Verify files have not changed
|
||||
if: steps.verify-changed-files.outputs.files_changed == 'true'
|
||||
run: |
|
||||
@@ -470,11 +315,6 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version-file: './server/.nvmrc'
|
||||
|
||||
- name: Install server dependencies
|
||||
run: npm ci
|
||||
|
||||
@@ -492,7 +332,7 @@ jobs:
|
||||
run: npm run typeorm:migrations:generate ./src/migrations/TestMigration
|
||||
|
||||
- name: Find file changes
|
||||
uses: tj-actions/verify-changed-files@v20
|
||||
uses: tj-actions/verify-changed-files@v19
|
||||
id: verify-changed-files
|
||||
with:
|
||||
files: |
|
||||
@@ -505,12 +345,12 @@ jobs:
|
||||
exit 1
|
||||
|
||||
- name: Run SQL generation
|
||||
run: npm run sync:sql
|
||||
run: npm run sql:generate
|
||||
env:
|
||||
DB_URL: postgres://postgres:postgres@localhost:5432/immich
|
||||
|
||||
- name: Find file changes
|
||||
uses: tj-actions/verify-changed-files@v20
|
||||
uses: tj-actions/verify-changed-files@v19
|
||||
id: verify-changed-sql-files
|
||||
with:
|
||||
files: |
|
||||
|
||||
7
.gitignore
vendored
@@ -14,12 +14,7 @@ mobile/gradle.properties
|
||||
mobile/openapi/pubspec.lock
|
||||
mobile/*.jks
|
||||
mobile/libisar.dylib
|
||||
mobile/openapi/test
|
||||
mobile/openapi/doc
|
||||
mobile/openapi/.openapi-generator/FILES
|
||||
|
||||
open-api/typescript-sdk/build
|
||||
mobile/android/fastlane/report.xml
|
||||
mobile/ios/fastlane/report.xml
|
||||
|
||||
vite.config.js.timestamp-*
|
||||
mobile/ios/fastlane/report.xml
|
||||
4
.gitmodules
vendored
@@ -1,6 +1,6 @@
|
||||
[submodule "mobile/.isar"]
|
||||
path = mobile/.isar
|
||||
url = https://github.com/isar/isar
|
||||
[submodule "e2e/test-assets"]
|
||||
path = e2e/test-assets
|
||||
[submodule "server/test/assets"]
|
||||
path = server/test/assets
|
||||
url = https://github.com/immich-app/test-assets
|
||||
|
||||
8
.vscode/launch.json
vendored
@@ -5,8 +5,8 @@
|
||||
"type": "node",
|
||||
"request": "attach",
|
||||
"restart": true,
|
||||
"port": 9231,
|
||||
"name": "Immich API Server",
|
||||
"port": 9230,
|
||||
"name": "Immich Server",
|
||||
"remoteRoot": "/usr/src/app",
|
||||
"localRoot": "${workspaceFolder}/server"
|
||||
},
|
||||
@@ -14,8 +14,8 @@
|
||||
"type": "node",
|
||||
"request": "attach",
|
||||
"restart": true,
|
||||
"port": 9230,
|
||||
"name": "Immich Workers",
|
||||
"port": 9231,
|
||||
"name": "Immich Microservices",
|
||||
"remoteRoot": "/usr/src/app",
|
||||
"localRoot": "${workspaceFolder}/server"
|
||||
}
|
||||
|
||||
14
.vscode/settings.json
vendored
@@ -1,16 +1,6 @@
|
||||
{
|
||||
"editor.formatOnSave": true,
|
||||
"[javascript]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"editor.tabSize": 2,
|
||||
"editor.formatOnSave": true
|
||||
},
|
||||
"[typescript]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"editor.tabSize": 2,
|
||||
"editor.formatOnSave": true
|
||||
},
|
||||
"[css]": {
|
||||
"[javascript][typescript][css]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"editor.tabSize": 2,
|
||||
"editor.formatOnSave": true
|
||||
@@ -41,4 +31,4 @@
|
||||
"explorer.fileNesting.patterns": {
|
||||
"*.ts": "${capture}.spec.ts,${capture}.mock.ts"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -131,4 +131,4 @@ conduct enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
For answers to common questions about this code of conduct, see the
|
||||
FAQ at https://www.contributor-covenant.org/faq. Translations are
|
||||
available at https://www.contributor-covenant.org/translations.
|
||||
available at https://www.contributor-covenant.org/translations.
|
||||
71
Makefile
@@ -10,6 +10,15 @@ dev-update:
|
||||
dev-scale:
|
||||
docker compose -f ./docker/docker-compose.dev.yml up --build -V --scale immich-server=3 --remove-orphans
|
||||
|
||||
stage:
|
||||
docker compose -f ./docker/docker-compose.staging.yml up --build -V --remove-orphans
|
||||
|
||||
pull-stage:
|
||||
docker compose -f ./docker/docker-compose.staging.yml pull
|
||||
|
||||
server-e2e-jobs:
|
||||
docker compose -f ./server/e2e/docker-compose.server-e2e.yml up --renew-anon-volumes --abort-on-container-exit --exit-code-from immich-server --remove-orphans --build
|
||||
|
||||
.PHONY: e2e
|
||||
e2e:
|
||||
docker compose -f ./e2e/docker-compose.yml up --build -V --remove-orphans
|
||||
@@ -31,67 +40,7 @@ open-api-typescript:
|
||||
cd ./open-api && bash ./bin/generate-open-api.sh typescript
|
||||
|
||||
sql:
|
||||
npm --prefix server run sync:sql
|
||||
npm --prefix server run sql:generate
|
||||
|
||||
attach-server:
|
||||
docker exec -it docker_immich-server_1 sh
|
||||
|
||||
renovate:
|
||||
LOG_LEVEL=debug npx renovate --platform=local --repository-cache=reset
|
||||
|
||||
MODULES = e2e server web cli sdk
|
||||
|
||||
audit-%:
|
||||
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) audit fix
|
||||
install-%:
|
||||
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) i
|
||||
build-cli: build-sdk
|
||||
build-web: build-sdk
|
||||
build-%: install-%
|
||||
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run | grep 'build' >/dev/null \
|
||||
&& npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run build || true
|
||||
format-%:
|
||||
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run | grep 'format:fix' >/dev/null \
|
||||
&& npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run format:fix || true
|
||||
lint-%:
|
||||
npm --prefix $* run lint:fix
|
||||
check-%:
|
||||
npm --prefix $* run check
|
||||
check-web:
|
||||
npm --prefix web run check:typescript
|
||||
npm --prefix web run check:svelte
|
||||
test-%:
|
||||
npm --prefix $* run test
|
||||
test-e2e:
|
||||
docker compose -f ./e2e/docker-compose.yml build
|
||||
npm --prefix e2e run test
|
||||
npm --prefix e2e run test:web
|
||||
test-medium:
|
||||
docker run \
|
||||
--rm \
|
||||
-v ./server/src:/usr/src/app/src \
|
||||
-v ./server/test:/usr/src/app/test \
|
||||
-v ./server/vitest.config.medium.mjs:/usr/src/app/vitest.config.medium.mjs \
|
||||
-v ./server/tsconfig.json:/usr/src/app/tsconfig.json \
|
||||
-e NODE_ENV=development \
|
||||
immich-server:latest \
|
||||
-c "npm ci && npm run test:medium -- --run"
|
||||
test-medium-dev:
|
||||
docker exec -it immich_server /bin/sh -c "npm run test:medium"
|
||||
|
||||
build-all: $(foreach M,$(MODULES),build-$M) ;
|
||||
install-all: $(foreach M,$(MODULES),install-$M) ;
|
||||
check-all: $(foreach M,$(MODULES),check-$M) ;
|
||||
lint-all: $(foreach M,$(MODULES),lint-$M) ;
|
||||
format-all: $(foreach M,$(MODULES),format-$M) ;
|
||||
audit-all: $(foreach M,$(MODULES),audit-$M) ;
|
||||
hygiene-all: lint-all format-all check-all sql audit-all;
|
||||
test-all: $(foreach M,$(MODULES),test-$M) ;
|
||||
|
||||
clean:
|
||||
find . -name "node_modules" -type d -prune -exec rm -rf '{}' +
|
||||
find . -name "dist" -type d -prune -exec rm -rf '{}' +
|
||||
find . -name "build" -type d -prune -exec rm -rf '{}' +
|
||||
find . -name "svelte-kit" -type d -prune -exec rm -rf '{}' +
|
||||
docker compose -f ./docker/docker-compose.dev.yml rm -v -f || true
|
||||
docker compose -f ./e2e/docker-compose.yml rm -v -f || true
|
||||
|
||||
88
README.md
@@ -1,7 +1,7 @@
|
||||
<p align="center">
|
||||
<br/>
|
||||
<a href="https://opensource.org/license/agpl-v3"><img src="https://img.shields.io/badge/License-AGPL_v3-blue.svg?color=3F51B5&style=for-the-badge&label=License&logoColor=000000&labelColor=ececec" alt="License: AGPLv3"></a>
|
||||
<a href="https://discord.immich.app">
|
||||
<a href="https://discord.gg/D8JsnBEuKb">
|
||||
<img src="https://img.shields.io/discord/979116623879368755.svg?label=Discord&logo=Discord&style=for-the-badge&logoColor=000000&labelColor=ececec" alt="Discord"/>
|
||||
</a>
|
||||
<br/>
|
||||
@@ -17,7 +17,6 @@
|
||||
<img src="design/immich-screenshots.png" title="Main Screenshot">
|
||||
</a>
|
||||
<br/>
|
||||
|
||||
<p align="center">
|
||||
<a href="readme_i18n/README_ca_ES.md">Català</a>
|
||||
<a href="readme_i18n/README_es_ES.md">Español</a>
|
||||
@@ -30,11 +29,6 @@
|
||||
<a href="readme_i18n/README_tr_TR.md">Türkçe</a>
|
||||
<a href="readme_i18n/README_zh_CN.md">中文</a>
|
||||
<a href="readme_i18n/README_ru_RU.md">Русский</a>
|
||||
<a href="readme_i18n/README_pt_BR.md">Português Brasileiro</a>
|
||||
<a href="readme_i18n/README_sv_SE.md">Svenska</a>
|
||||
<a href="readme_i18n/README_ar_JO.md">العربية</a>
|
||||
<a href="readme_i18n/README_vi_VN.md">Tiếng Việt</a>
|
||||
<a href="readme_i18n/README_th_TH.md">ภาษาไทย</a>
|
||||
</p>
|
||||
|
||||
## Disclaimer
|
||||
@@ -44,36 +38,45 @@
|
||||
- ⚠️ **Do not use the app as the only way to store your photos and videos.**
|
||||
- ⚠️ Always follow [3-2-1](https://www.backblaze.com/blog/the-3-2-1-backup-strategy/) backup plan for your precious photos and videos!
|
||||
|
||||
> [!NOTE]
|
||||
> You can find the main documentation, including installation guides, at https://immich.app/.
|
||||
## Content
|
||||
|
||||
## Links
|
||||
|
||||
- [Documentation](https://immich.app/docs)
|
||||
- [About](https://immich.app/docs/overview/introduction)
|
||||
- [Installation](https://immich.app/docs/install/requirements)
|
||||
- [Roadmap](https://immich.app/roadmap)
|
||||
- [Official Documentation](https://immich.app/docs)
|
||||
- [Roadmap](https://github.com/orgs/immich-app/projects/1)
|
||||
- [Demo](#demo)
|
||||
- [Features](#features)
|
||||
- [Translations](https://immich.app/docs/developer/translations)
|
||||
- [Contributing](https://immich.app/docs/overview/support-the-project)
|
||||
- [Introduction](https://immich.app/docs/overview/introduction)
|
||||
- [Installation](https://immich.app/docs/install/requirements)
|
||||
- [Contribution Guidelines](https://immich.app/docs/overview/support-the-project)
|
||||
- [Support The Project](#support-the-project)
|
||||
|
||||
## Documentation
|
||||
|
||||
You can find the main documentation, including installation guides, at https://immich.app/.
|
||||
|
||||
## Demo
|
||||
|
||||
Access the demo [here](https://demo.immich.app). The demo is running on a Free-tier Oracle VM in Amsterdam with a 2.4Ghz quad-core ARM64 CPU and 24GB RAM.
|
||||
You can access the web demo at https://demo.immich.app
|
||||
|
||||
For the mobile app, you can use `https://demo.immich.app/api` for the `Server Endpoint URL`
|
||||
|
||||
### Login credentials
|
||||
```bash title="Demo Credential"
|
||||
The credential
|
||||
email: demo@immich.app
|
||||
password: demo
|
||||
```
|
||||
|
||||
| Email | Password |
|
||||
| --------------- | -------- |
|
||||
| demo@immich.app | demo |
|
||||
```
|
||||
Spec: Free-tier Oracle VM - Amsterdam - 2.4Ghz quad-core ARM64 CPU, 24GB RAM
|
||||
```
|
||||
|
||||
## Activities
|
||||

|
||||
|
||||
## Features
|
||||
|
||||
|
||||
| Features | Mobile | Web |
|
||||
| :------------------------------------------- | ------ | --- |
|
||||
| :--------------------------------------------- | -------- | ----- |
|
||||
| Upload and view videos and photos | Yes | Yes |
|
||||
| Auto backup when the app is opened | Yes | N/A |
|
||||
| Prevent duplication of assets | Yes | Yes |
|
||||
@@ -93,7 +96,7 @@ For the mobile app, you can use `https://demo.immich.app/api` for the `Server En
|
||||
| LivePhoto/MotionPhoto backup and playback | Yes | Yes |
|
||||
| Support 360 degree image display | No | Yes |
|
||||
| User-defined storage structure | Yes | Yes |
|
||||
| Public Sharing | Yes | Yes |
|
||||
| Public Sharing | No | Yes |
|
||||
| Archive and Favorites | Yes | Yes |
|
||||
| Global Map | Yes | Yes |
|
||||
| Partner Sharing | Yes | Yes |
|
||||
@@ -102,22 +105,31 @@ For the mobile app, you can use `https://demo.immich.app/api` for the `Server En
|
||||
| Offline support | Yes | No |
|
||||
| Read-only gallery | Yes | Yes |
|
||||
| Stacked Photos | Yes | Yes |
|
||||
| Tags | No | Yes |
|
||||
| Folder View | No | Yes |
|
||||
|
||||
## Translations
|
||||
## Support the project
|
||||
|
||||
Read more about translations [here](https://immich.app/docs/developer/translations).
|
||||
I've committed to this project, and I will not stop. I will keep updating the docs, adding new features, and fixing bugs. But I can't do it alone. So I need your help to give me additional motivation to keep going.
|
||||
|
||||
<a href="https://hosted.weblate.org/engage/immich/">
|
||||
<img src="https://hosted.weblate.org/widget/immich/immich/multi-auto.svg" alt="Translation status" />
|
||||
As our hosts in the [selfhosted.show - In the episode 'The-organization-must-not-be-name is a Hostile Actor'](https://selfhosted.show/79?t=1418) said, this is a massive undertaking of what the team and I are doing. And I would love to someday be able to do this full-time, and I am asking for your help to make that happen.
|
||||
|
||||
If you feel like this is the right cause and the app is something you are seeing yourself using for a long time, please consider supporting the project with the option below.
|
||||
|
||||
### Donation
|
||||
|
||||
- [Monthly donation](https://github.com/sponsors/immich-app) via GitHub Sponsors
|
||||
- [One-time donation](https://github.com/sponsors/immich-app?frequency=one-time&sponsor=alextran1502) via GitHub Sponsors
|
||||
- [Liberapay](https://liberapay.com/alex.tran1502/)
|
||||
- [buymeacoffee](https://www.buymeacoffee.com/altran1502)
|
||||
- Bitcoin: 3QVAb9dCHutquVejeNXitPqZX26Yg5kxb7
|
||||
- ZCash: u1smm4wvqegcp46zss2jf5xptchgeczp4rx7a0wu3mermf2wxahm26yyz5w9mw3f2p4emwlljxjumg774kgs8rntt9yags0whnzane4n67z4c7gppq4yyvcj404ne3r769prwzd9j8ntvqp44fa6d67sf7rmcfjmds3gmeceff4u8e92rh38nd30cr96xw6vfhk6scu4ws90ldzupr3sz
|
||||
|
||||
## Contributors
|
||||
|
||||
<a href="https://github.com/alextran1502/immich/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=immich-app/immich" width="100%"/>
|
||||
</a>
|
||||
|
||||
## Repository activity
|
||||
|
||||

|
||||
|
||||
## Star history
|
||||
## Star History
|
||||
|
||||
<a href="https://star-history.com/#immich-app/immich&Date">
|
||||
<picture>
|
||||
@@ -126,9 +138,3 @@ Read more about translations [here](https://immich.app/docs/developer/translatio
|
||||
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=immich-app/immich&type=Date" width="100%" />
|
||||
</picture>
|
||||
</a>
|
||||
|
||||
## Contributors
|
||||
|
||||
<a href="https://github.com/alextran1502/immich/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=immich-app/immich" width="100%"/>
|
||||
</a>
|
||||
|
||||
@@ -2,4 +2,4 @@
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please report security issues to `security@immich.app`
|
||||
Please report security issues to `alex.tran1502@gmail.com`
|
||||
1
cli/.eslintignore
Normal file
@@ -0,0 +1 @@
|
||||
/dist
|
||||
28
cli/.eslintrc.cjs
Normal file
@@ -0,0 +1,28 @@
|
||||
module.exports = {
|
||||
parser: '@typescript-eslint/parser',
|
||||
parserOptions: {
|
||||
project: 'tsconfig.json',
|
||||
sourceType: 'module',
|
||||
tsconfigRootDir: __dirname,
|
||||
},
|
||||
plugins: ['@typescript-eslint/eslint-plugin'],
|
||||
extends: ['plugin:@typescript-eslint/recommended', 'plugin:prettier/recommended', 'plugin:unicorn/recommended'],
|
||||
root: true,
|
||||
env: {
|
||||
node: true,
|
||||
},
|
||||
ignorePatterns: ['.eslintrc.js'],
|
||||
rules: {
|
||||
'@typescript-eslint/interface-name-prefix': 'off',
|
||||
'@typescript-eslint/explicit-function-return-type': 'off',
|
||||
'@typescript-eslint/explicit-module-boundary-types': 'off',
|
||||
'@typescript-eslint/no-explicit-any': 'off',
|
||||
'@typescript-eslint/no-floating-promises': 'error',
|
||||
'unicorn/prefer-module': 'off',
|
||||
'unicorn/prevent-abbreviations': 'off',
|
||||
'unicorn/no-process-exit': 'off',
|
||||
'unicorn/import-style': 'off',
|
||||
curly: 2,
|
||||
'prettier/prettier': 0,
|
||||
},
|
||||
};
|
||||
@@ -1 +0,0 @@
|
||||
22.11.0
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM node:22.11.0-alpine3.20@sha256:dc8ba2f61dd86c44e43eb25a7812ad03c5b1b224a19fc6f77e1eb9e5669f0b82 AS core
|
||||
FROM node:20-alpine3.19@sha256:7e227295e96f5b00aa79555ae166f50610940d888fc2e321cf36304cbd17d7d6 as core
|
||||
|
||||
WORKDIR /usr/src/open-api/typescript-sdk
|
||||
COPY open-api/typescript-sdk/package*.json open-api/typescript-sdk/tsconfig*.json ./
|
||||
@@ -16,4 +16,4 @@ RUN npm run build
|
||||
|
||||
WORKDIR /import
|
||||
|
||||
ENTRYPOINT ["node", "/usr/src/app/dist"]
|
||||
ENTRYPOINT ["node", "/usr/src/app/dist"]
|
||||
@@ -4,18 +4,8 @@ Please see the [Immich CLI documentation](https://immich.app/docs/features/comma
|
||||
|
||||
# For developers
|
||||
|
||||
Before building the CLI, you must build the immich server and the open-api client. To build the server run the following in the server folder:
|
||||
|
||||
$ npm install
|
||||
$ npm run build
|
||||
|
||||
Then, to build the open-api client run the following in the open-api folder:
|
||||
|
||||
$ ./bin/generate-open-api.sh
|
||||
|
||||
To run the Immich CLI from source, run the following in the cli folder:
|
||||
|
||||
$ npm install
|
||||
$ npm run build
|
||||
$ ts-node .
|
||||
|
||||
@@ -27,4 +17,3 @@ You can also build and install the CLI using
|
||||
|
||||
$ npm run build
|
||||
$ npm install -g .
|
||||
****
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
import { FlatCompat } from '@eslint/eslintrc';
|
||||
import js from '@eslint/js';
|
||||
import typescriptEslint from '@typescript-eslint/eslint-plugin';
|
||||
import tsParser from '@typescript-eslint/parser';
|
||||
import globals from 'globals';
|
||||
import path from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
const compat = new FlatCompat({
|
||||
baseDirectory: __dirname,
|
||||
recommendedConfig: js.configs.recommended,
|
||||
allConfig: js.configs.all,
|
||||
});
|
||||
|
||||
export default [
|
||||
{
|
||||
ignores: ['eslint.config.mjs', 'dist'],
|
||||
},
|
||||
...compat.extends(
|
||||
'plugin:@typescript-eslint/recommended',
|
||||
'plugin:prettier/recommended',
|
||||
'plugin:unicorn/recommended',
|
||||
),
|
||||
{
|
||||
plugins: {
|
||||
'@typescript-eslint': typescriptEslint,
|
||||
},
|
||||
|
||||
languageOptions: {
|
||||
globals: {
|
||||
...globals.node,
|
||||
},
|
||||
|
||||
parser: tsParser,
|
||||
ecmaVersion: 5,
|
||||
sourceType: 'module',
|
||||
|
||||
parserOptions: {
|
||||
project: 'tsconfig.json',
|
||||
tsconfigRootDir: __dirname,
|
||||
},
|
||||
},
|
||||
|
||||
rules: {
|
||||
'@typescript-eslint/interface-name-prefix': 'off',
|
||||
'@typescript-eslint/explicit-function-return-type': 'off',
|
||||
'@typescript-eslint/explicit-module-boundary-types': 'off',
|
||||
'@typescript-eslint/no-explicit-any': 'off',
|
||||
'@typescript-eslint/no-floating-promises': 'error',
|
||||
'unicorn/prefer-module': 'off',
|
||||
'unicorn/prevent-abbreviations': 'off',
|
||||
'unicorn/no-process-exit': 'off',
|
||||
'unicorn/import-style': 'off',
|
||||
curly: 2,
|
||||
'prettier/prettier': 0,
|
||||
'object-shorthand': ['error', 'always'],
|
||||
},
|
||||
},
|
||||
];
|
||||
2233
cli/package-lock.json
generated
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@immich/cli",
|
||||
"version": "2.2.31",
|
||||
"version": "2.2.0",
|
||||
"description": "Command Line Interface (CLI) for Immich",
|
||||
"type": "module",
|
||||
"exports": "./dist/index.js",
|
||||
@@ -13,33 +13,30 @@
|
||||
"cli"
|
||||
],
|
||||
"devDependencies": {
|
||||
"@eslint/eslintrc": "^3.1.0",
|
||||
"@eslint/js": "^9.8.0",
|
||||
"@immich/sdk": "file:../open-api/typescript-sdk",
|
||||
"@types/byte-size": "^8.1.0",
|
||||
"@types/cli-progress": "^3.11.0",
|
||||
"@types/lodash-es": "^4.17.12",
|
||||
"@types/mock-fs": "^4.13.1",
|
||||
"@types/node": "^22.9.0",
|
||||
"@typescript-eslint/eslint-plugin": "^8.0.0",
|
||||
"@typescript-eslint/parser": "^8.0.0",
|
||||
"@vitest/coverage-v8": "^2.0.5",
|
||||
"byte-size": "^9.0.0",
|
||||
"@types/node": "^20.3.1",
|
||||
"@typescript-eslint/eslint-plugin": "^7.0.0",
|
||||
"@typescript-eslint/parser": "^7.0.0",
|
||||
"@vitest/coverage-v8": "^1.2.2",
|
||||
"byte-size": "^8.1.1",
|
||||
"cli-progress": "^3.12.0",
|
||||
"commander": "^12.0.0",
|
||||
"eslint": "^9.0.0",
|
||||
"eslint": "^8.56.0",
|
||||
"eslint-config-prettier": "^9.1.0",
|
||||
"eslint-plugin-prettier": "^5.1.3",
|
||||
"eslint-plugin-unicorn": "^55.0.0",
|
||||
"globals": "^15.9.0",
|
||||
"eslint-plugin-unicorn": "^52.0.0",
|
||||
"glob": "^10.3.1",
|
||||
"mock-fs": "^5.2.0",
|
||||
"prettier": "^3.2.5",
|
||||
"prettier-plugin-organize-imports": "^4.0.0",
|
||||
"prettier-plugin-organize-imports": "^3.2.4",
|
||||
"typescript": "^5.3.3",
|
||||
"vite": "^5.0.12",
|
||||
"vite-tsconfig-paths": "^5.0.0",
|
||||
"vitest": "^2.0.5",
|
||||
"vitest-fetch-mock": "^0.4.0",
|
||||
"vite-tsconfig-paths": "^4.3.2",
|
||||
"vitest": "^1.2.2",
|
||||
"yaml": "^2.3.1"
|
||||
},
|
||||
"scripts": {
|
||||
@@ -62,11 +59,6 @@
|
||||
"node": ">=20.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"fast-glob": "^3.3.2",
|
||||
"fastq": "^1.17.1",
|
||||
"lodash-es": "^4.17.21"
|
||||
},
|
||||
"volta": {
|
||||
"node": "22.11.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,201 +0,0 @@
|
||||
import * as fs from 'node:fs';
|
||||
import * as os from 'node:os';
|
||||
import * as path from 'node:path';
|
||||
import { describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import { Action, checkBulkUpload, defaults, Reason } from '@immich/sdk';
|
||||
import createFetchMock from 'vitest-fetch-mock';
|
||||
|
||||
import { checkForDuplicates, getAlbumName, uploadFiles, UploadOptionsDto } from './asset';
|
||||
|
||||
vi.mock('@immich/sdk');
|
||||
|
||||
describe('getAlbumName', () => {
|
||||
it('should return a non-undefined value', () => {
|
||||
if (os.platform() === 'win32') {
|
||||
// This is meaningless for Unix systems.
|
||||
expect(getAlbumName(String.raw`D:\test\Filename.txt`, {} as UploadOptionsDto)).toBe('test');
|
||||
}
|
||||
expect(getAlbumName('D:/parentfolder/test/Filename.txt', {} as UploadOptionsDto)).toBe('test');
|
||||
});
|
||||
|
||||
it('has higher priority to return `albumName` in `options`', () => {
|
||||
expect(getAlbumName('/parentfolder/test/Filename.txt', { albumName: 'example' } as UploadOptionsDto)).toBe(
|
||||
'example',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('uploadFiles', () => {
|
||||
const testDir = fs.mkdtempSync(path.join(os.tmpdir(), 'test-'));
|
||||
const testFilePath = path.join(testDir, 'test.png');
|
||||
const testFileData = 'test';
|
||||
const baseUrl = 'http://example.com';
|
||||
const apiKey = 'key';
|
||||
const retry = 3;
|
||||
|
||||
const fetchMocker = createFetchMock(vi);
|
||||
|
||||
beforeEach(() => {
|
||||
// Create a test file
|
||||
fs.writeFileSync(testFilePath, testFileData);
|
||||
|
||||
// Defaults
|
||||
vi.mocked(defaults).baseUrl = baseUrl;
|
||||
vi.mocked(defaults).headers = { 'x-api-key': apiKey };
|
||||
|
||||
fetchMocker.enableMocks();
|
||||
fetchMocker.resetMocks();
|
||||
});
|
||||
|
||||
it('returns new assets when upload file is successful', async () => {
|
||||
fetchMocker.doMockIf(new RegExp(`${baseUrl}/assets$`), () => {
|
||||
return {
|
||||
status: 200,
|
||||
body: JSON.stringify({ id: 'fc5621b1-86f6-44a1-9905-403e607df9f5', status: 'created' }),
|
||||
};
|
||||
});
|
||||
|
||||
await expect(uploadFiles([testFilePath], { concurrency: 1 })).resolves.toEqual([
|
||||
{
|
||||
filepath: testFilePath,
|
||||
id: 'fc5621b1-86f6-44a1-9905-403e607df9f5',
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('returns new assets when upload file retry is successful', async () => {
|
||||
let counter = 0;
|
||||
fetchMocker.doMockIf(new RegExp(`${baseUrl}/assets$`), () => {
|
||||
counter++;
|
||||
if (counter < retry) {
|
||||
throw new Error('Network error');
|
||||
}
|
||||
|
||||
return {
|
||||
status: 200,
|
||||
body: JSON.stringify({ id: 'fc5621b1-86f6-44a1-9905-403e607df9f5', status: 'created' }),
|
||||
};
|
||||
});
|
||||
|
||||
await expect(uploadFiles([testFilePath], { concurrency: 1 })).resolves.toEqual([
|
||||
{
|
||||
filepath: testFilePath,
|
||||
id: 'fc5621b1-86f6-44a1-9905-403e607df9f5',
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('returns new assets when upload file retry is failed', async () => {
|
||||
fetchMocker.doMockIf(new RegExp(`${baseUrl}/assets$`), () => {
|
||||
throw new Error('Network error');
|
||||
});
|
||||
|
||||
await expect(uploadFiles([testFilePath], { concurrency: 1 })).resolves.toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('checkForDuplicates', () => {
|
||||
const testDir = fs.mkdtempSync(path.join(os.tmpdir(), 'test-'));
|
||||
const testFilePath = path.join(testDir, 'test.png');
|
||||
const testFileData = 'test';
|
||||
const testFileChecksum = 'a94a8fe5ccb19ba61c4c0873d391e987982fbbd3'; // SHA1
|
||||
const retry = 3;
|
||||
|
||||
beforeEach(() => {
|
||||
// Create a test file
|
||||
fs.writeFileSync(testFilePath, testFileData);
|
||||
});
|
||||
|
||||
it('checks duplicates', async () => {
|
||||
vi.mocked(checkBulkUpload).mockResolvedValue({
|
||||
results: [
|
||||
{
|
||||
action: Action.Accept,
|
||||
id: testFilePath,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
await checkForDuplicates([testFilePath], { concurrency: 1 });
|
||||
|
||||
expect(checkBulkUpload).toHaveBeenCalledWith({
|
||||
assetBulkUploadCheckDto: {
|
||||
assets: [
|
||||
{
|
||||
checksum: testFileChecksum,
|
||||
id: testFilePath,
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('returns duplicates when check duplicates is rejected', async () => {
|
||||
vi.mocked(checkBulkUpload).mockResolvedValue({
|
||||
results: [
|
||||
{
|
||||
action: Action.Reject,
|
||||
id: testFilePath,
|
||||
assetId: 'fc5621b1-86f6-44a1-9905-403e607df9f5',
|
||||
reason: Reason.Duplicate,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
await expect(checkForDuplicates([testFilePath], { concurrency: 1 })).resolves.toEqual({
|
||||
duplicates: [
|
||||
{
|
||||
filepath: testFilePath,
|
||||
id: 'fc5621b1-86f6-44a1-9905-403e607df9f5',
|
||||
},
|
||||
],
|
||||
newFiles: [],
|
||||
});
|
||||
});
|
||||
|
||||
it('returns new assets when check duplicates is accepted', async () => {
|
||||
vi.mocked(checkBulkUpload).mockResolvedValue({
|
||||
results: [
|
||||
{
|
||||
action: Action.Accept,
|
||||
id: testFilePath,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
await expect(checkForDuplicates([testFilePath], { concurrency: 1 })).resolves.toEqual({
|
||||
duplicates: [],
|
||||
newFiles: [testFilePath],
|
||||
});
|
||||
});
|
||||
|
||||
it('returns results when check duplicates retry is successful', async () => {
|
||||
let mocked = vi.mocked(checkBulkUpload);
|
||||
for (let i = 1; i < retry; i++) {
|
||||
mocked = mocked.mockRejectedValueOnce(new Error('Network error'));
|
||||
}
|
||||
mocked.mockResolvedValue({
|
||||
results: [
|
||||
{
|
||||
action: Action.Accept,
|
||||
id: testFilePath,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
await expect(checkForDuplicates([testFilePath], { concurrency: 1 })).resolves.toEqual({
|
||||
duplicates: [],
|
||||
newFiles: [testFilePath],
|
||||
});
|
||||
});
|
||||
|
||||
it('returns results when check duplicates retry is failed', async () => {
|
||||
vi.mocked(checkBulkUpload).mockRejectedValue(new Error('Network error'));
|
||||
|
||||
await expect(checkForDuplicates([testFilePath], { concurrency: 1 })).resolves.toEqual({
|
||||
duplicates: [],
|
||||
newFiles: [],
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,8 +1,7 @@
|
||||
import {
|
||||
Action,
|
||||
AssetBulkUploadCheckResult,
|
||||
AssetMediaResponseDto,
|
||||
AssetMediaStatus,
|
||||
AssetFileUploadResponseDto,
|
||||
addAssetsToAlbum,
|
||||
checkBulkUpload,
|
||||
createAlbum,
|
||||
@@ -15,8 +14,8 @@ import { Presets, SingleBar } from 'cli-progress';
|
||||
import { chunk } from 'lodash-es';
|
||||
import { Stats, createReadStream } from 'node:fs';
|
||||
import { stat, unlink } from 'node:fs/promises';
|
||||
import os from 'node:os';
|
||||
import path, { basename } from 'node:path';
|
||||
import { Queue } from 'src/queue';
|
||||
import { BaseOptions, authenticate, crawl, sha1 } from 'src/utils';
|
||||
|
||||
const s = (count: number) => (count === 1 ? '' : 's');
|
||||
@@ -25,9 +24,9 @@ const s = (count: number) => (count === 1 ? '' : 's');
|
||||
type AssetBulkUploadCheckResults = Array<AssetBulkUploadCheckResult & { id: string }>;
|
||||
type Asset = { id: string; filepath: string };
|
||||
|
||||
export interface UploadOptionsDto {
|
||||
interface UploadOptionsDto {
|
||||
recursive?: boolean;
|
||||
ignore?: string;
|
||||
exclusionPatterns?: string[];
|
||||
dryRun?: boolean;
|
||||
skipHash?: boolean;
|
||||
delete?: boolean;
|
||||
@@ -76,7 +75,7 @@ const scan = async (pathsToCrawl: string[], options: UploadOptionsDto) => {
|
||||
const files = await crawl({
|
||||
pathsToCrawl,
|
||||
recursive: options.recursive,
|
||||
exclusionPattern: options.ignore,
|
||||
exclusionPatterns: options.exclusionPatterns,
|
||||
includeHidden: options.includeHidden,
|
||||
extensions: [...image, ...video],
|
||||
});
|
||||
@@ -84,7 +83,7 @@ const scan = async (pathsToCrawl: string[], options: UploadOptionsDto) => {
|
||||
return files;
|
||||
};
|
||||
|
||||
export const checkForDuplicates = async (files: string[], { concurrency, skipHash }: UploadOptionsDto) => {
|
||||
const checkForDuplicates = async (files: string[], { concurrency, skipHash }: UploadOptionsDto) => {
|
||||
if (skipHash) {
|
||||
console.log('Skipping hash check, assuming all files are new');
|
||||
return { newFiles: files, duplicates: [] };
|
||||
@@ -100,50 +99,32 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
|
||||
const newFiles: string[] = [];
|
||||
const duplicates: Asset[] = [];
|
||||
|
||||
const queue = new Queue<string[], AssetBulkUploadCheckResults>(
|
||||
async (filepaths: string[]) => {
|
||||
const dto = await Promise.all(
|
||||
filepaths.map(async (filepath) => ({ id: filepath, checksum: await sha1(filepath) })),
|
||||
);
|
||||
const response = await checkBulkUpload({ assetBulkUploadCheckDto: { assets: dto } });
|
||||
const results = response.results as AssetBulkUploadCheckResults;
|
||||
for (const { id: filepath, assetId, action } of results) {
|
||||
try {
|
||||
// TODO refactor into a queue
|
||||
for (const items of chunk(files, concurrency)) {
|
||||
const dto = await Promise.all(items.map(async (filepath) => ({ id: filepath, checksum: await sha1(filepath) })));
|
||||
const { results } = await checkBulkUpload({ assetBulkUploadCheckDto: { assets: dto } });
|
||||
|
||||
for (const { id: filepath, assetId, action } of results as AssetBulkUploadCheckResults) {
|
||||
if (action === Action.Accept) {
|
||||
newFiles.push(filepath);
|
||||
} else {
|
||||
// rejects are always duplicates
|
||||
duplicates.push({ id: assetId as string, filepath });
|
||||
}
|
||||
progressBar.increment();
|
||||
}
|
||||
progressBar.increment(filepaths.length);
|
||||
return results;
|
||||
},
|
||||
{ concurrency, retry: 3 },
|
||||
);
|
||||
|
||||
for (const items of chunk(files, concurrency)) {
|
||||
await queue.push(items);
|
||||
}
|
||||
} finally {
|
||||
progressBar.stop();
|
||||
}
|
||||
|
||||
await queue.drained();
|
||||
|
||||
progressBar.stop();
|
||||
|
||||
console.log(`Found ${newFiles.length} new files and ${duplicates.length} duplicate${s(duplicates.length)}`);
|
||||
|
||||
// Report failures
|
||||
const failedTasks = queue.tasks.filter((task) => task.status === 'failed');
|
||||
if (failedTasks.length > 0) {
|
||||
console.log(`Failed to verify ${failedTasks.length} file${s(failedTasks.length)}:`);
|
||||
for (const task of failedTasks) {
|
||||
console.log(`- ${task.data} - ${task.error}`);
|
||||
}
|
||||
}
|
||||
|
||||
return { newFiles, duplicates };
|
||||
};
|
||||
|
||||
export const uploadFiles = async (files: string[], { dryRun, concurrency }: UploadOptionsDto): Promise<Asset[]> => {
|
||||
const uploadFiles = async (files: string[], { dryRun, concurrency }: UploadOptionsDto): Promise<Asset[]> => {
|
||||
if (files.length === 0) {
|
||||
console.log('All assets were already uploaded, nothing to do.');
|
||||
return [];
|
||||
@@ -160,7 +141,7 @@ export const uploadFiles = async (files: string[], { dryRun, concurrency }: Uplo
|
||||
|
||||
if (dryRun) {
|
||||
console.log(`Would have uploaded ${files.length} asset${s(files.length)} (${byteSize(totalSize)})`);
|
||||
return files.map((filepath) => ({ id: '', filepath }));
|
||||
return [];
|
||||
}
|
||||
|
||||
const uploadProgress = new SingleBar(
|
||||
@@ -177,56 +158,41 @@ export const uploadFiles = async (files: string[], { dryRun, concurrency }: Uplo
|
||||
|
||||
const newAssets: Asset[] = [];
|
||||
|
||||
const queue = new Queue<string, AssetMediaResponseDto>(
|
||||
async (filepath: string) => {
|
||||
const stats = statsMap.get(filepath);
|
||||
if (!stats) {
|
||||
throw new Error(`Stats not found for ${filepath}`);
|
||||
}
|
||||
try {
|
||||
for (const items of chunk(files, concurrency)) {
|
||||
await Promise.all(
|
||||
items.map(async (filepath) => {
|
||||
const stats = statsMap.get(filepath) as Stats;
|
||||
const response = await uploadFile(filepath, stats);
|
||||
|
||||
const response = await uploadFile(filepath, stats);
|
||||
newAssets.push({ id: response.id, filepath });
|
||||
if (response.status === AssetMediaStatus.Duplicate) {
|
||||
duplicateCount++;
|
||||
duplicateSize += stats.size ?? 0;
|
||||
} else {
|
||||
successCount++;
|
||||
successSize += stats.size ?? 0;
|
||||
}
|
||||
newAssets.push({ id: response.id, filepath });
|
||||
|
||||
uploadProgress.update(successSize, { value_formatted: byteSize(successSize + duplicateSize) });
|
||||
if (response.duplicate) {
|
||||
duplicateCount++;
|
||||
duplicateSize += stats.size ?? 0;
|
||||
} else {
|
||||
successCount++;
|
||||
successSize += stats.size ?? 0;
|
||||
}
|
||||
|
||||
return response;
|
||||
},
|
||||
{ concurrency, retry: 3 },
|
||||
);
|
||||
uploadProgress.update(successSize, { value_formatted: byteSize(successSize + duplicateSize) });
|
||||
|
||||
for (const filepath of files) {
|
||||
await queue.push(filepath);
|
||||
return response;
|
||||
}),
|
||||
);
|
||||
}
|
||||
} finally {
|
||||
uploadProgress.stop();
|
||||
}
|
||||
|
||||
await queue.drained();
|
||||
|
||||
uploadProgress.stop();
|
||||
|
||||
console.log(`Successfully uploaded ${successCount} new asset${s(successCount)} (${byteSize(successSize)})`);
|
||||
if (duplicateCount > 0) {
|
||||
console.log(`Skipped ${duplicateCount} duplicate asset${s(duplicateCount)} (${byteSize(duplicateSize)})`);
|
||||
}
|
||||
|
||||
// Report failures
|
||||
const failedTasks = queue.tasks.filter((task) => task.status === 'failed');
|
||||
if (failedTasks.length > 0) {
|
||||
console.log(`Failed to upload ${failedTasks.length} asset${s(failedTasks.length)}:`);
|
||||
for (const task of failedTasks) {
|
||||
console.log(`- ${task.data} - ${task.error}`);
|
||||
}
|
||||
}
|
||||
|
||||
return newAssets;
|
||||
};
|
||||
|
||||
const uploadFile = async (input: string, stats: Stats): Promise<AssetMediaResponseDto> => {
|
||||
const uploadFile = async (input: string, stats: Stats): Promise<AssetFileUploadResponseDto> => {
|
||||
const { baseUrl, headers } = defaults;
|
||||
|
||||
const assetPath = path.parse(input);
|
||||
@@ -259,7 +225,7 @@ const uploadFile = async (input: string, stats: Stats): Promise<AssetMediaRespon
|
||||
formData.append('sidecarData', sidecarData);
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/assets`, {
|
||||
const response = await fetch(`${baseUrl}/asset/upload`, {
|
||||
method: 'post',
|
||||
redirect: 'error',
|
||||
headers: headers as Record<string, string>,
|
||||
@@ -278,7 +244,7 @@ const deleteFiles = async (files: string[], options: UploadOptionsDto): Promise<
|
||||
}
|
||||
|
||||
if (options.dryRun) {
|
||||
console.log(`Would have deleted ${files.length} local asset${s(files.length)}`);
|
||||
console.log(`Would now have deleted assets, but skipped due to dry run`);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -319,7 +285,7 @@ const updateAlbums = async (assets: Asset[], options: UploadOptionsDto) => {
|
||||
if (dryRun) {
|
||||
// TODO print asset counts for new albums
|
||||
console.log(`Would have created ${newAlbums.size} new album${s(newAlbums.size)}`);
|
||||
console.log(`Would have updated albums of ${assets.length} asset${s(assets.length)}`);
|
||||
console.log(`Would have updated ${assets.length} asset${s(assets.length)}`);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -379,9 +345,7 @@ const updateAlbums = async (assets: Asset[], options: UploadOptionsDto) => {
|
||||
}
|
||||
};
|
||||
|
||||
// `filepath` valid format:
|
||||
// - Windows: `D:\\test\\Filename.txt` or `D:/test/Filename.txt`
|
||||
// - Unix: `/test/Filename.txt`
|
||||
export const getAlbumName = (filepath: string, options: UploadOptionsDto) => {
|
||||
return options.albumName ?? path.basename(path.dirname(filepath));
|
||||
const getAlbumName = (filepath: string, options: UploadOptionsDto) => {
|
||||
const folderName = os.platform() === 'win32' ? filepath.split('\\').at(-2) : filepath.split('/').at(-2);
|
||||
return options.albumName ?? folderName;
|
||||
};
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { getMyUser } from '@immich/sdk';
|
||||
import { getMyUserInfo } from '@immich/sdk';
|
||||
import { existsSync } from 'node:fs';
|
||||
import { mkdir, unlink } from 'node:fs/promises';
|
||||
import { BaseOptions, connect, getAuthFilePath, logError, withError, writeAuthFile } from 'src/utils';
|
||||
@@ -10,13 +10,13 @@ export const login = async (url: string, key: string, options: BaseOptions) => {
|
||||
|
||||
await connect(url, key);
|
||||
|
||||
const [error, user] = await withError(getMyUser());
|
||||
const [error, userInfo] = await withError(getMyUserInfo());
|
||||
if (error) {
|
||||
logError(error, 'Failed to load user info');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log(`Logged in as ${user.email}`);
|
||||
console.log(`Logged in as ${userInfo.email}`);
|
||||
|
||||
if (!existsSync(configDir)) {
|
||||
// Create config folder if it doesn't exist
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { getAssetStatistics, getMyUser, getServerVersion, getSupportedMediaTypes } from '@immich/sdk';
|
||||
import { getAssetStatistics, getMyUserInfo, getServerVersion, getSupportedMediaTypes } from '@immich/sdk';
|
||||
import { BaseOptions, authenticate } from 'src/utils';
|
||||
|
||||
export const serverInfo = async (options: BaseOptions) => {
|
||||
@@ -8,7 +8,7 @@ export const serverInfo = async (options: BaseOptions) => {
|
||||
getServerVersion(),
|
||||
getSupportedMediaTypes(),
|
||||
getAssetStatistics({}),
|
||||
getMyUser(),
|
||||
getMyUserInfo(),
|
||||
]);
|
||||
|
||||
console.log(`Server Info (via ${userInfo.email})`);
|
||||
|
||||
@@ -44,7 +44,7 @@ program
|
||||
.description('Upload assets')
|
||||
.usage('[paths...] [options]')
|
||||
.addOption(new Option('-r, --recursive', 'Recursive').env('IMMICH_RECURSIVE').default(false))
|
||||
.addOption(new Option('-i, --ignore <pattern>', 'Pattern to ignore').env('IMMICH_IGNORE_PATHS'))
|
||||
.addOption(new Option('-i, --ignore [paths...]', 'Paths to ignore').env('IMMICH_IGNORE_PATHS').default([]))
|
||||
.addOption(new Option('-h, --skip-hash', "Don't hash files before upload").env('IMMICH_SKIP_HASH').default(false))
|
||||
.addOption(new Option('-H, --include-hidden', 'Include hidden folders').env('IMMICH_INCLUDE_HIDDEN').default(false))
|
||||
.addOption(
|
||||
@@ -60,8 +60,7 @@ program
|
||||
.addOption(
|
||||
new Option('-n, --dry-run', "Don't perform any actions, just show what will be done")
|
||||
.env('IMMICH_DRY_RUN')
|
||||
.default(false)
|
||||
.conflicts('skipHash'),
|
||||
.default(false),
|
||||
)
|
||||
.addOption(
|
||||
new Option('-c, --concurrency <number>', 'Number of assets to upload at the same time')
|
||||
|
||||
131
cli/src/queue.ts
@@ -1,131 +0,0 @@
|
||||
import * as fastq from 'fastq';
|
||||
import { uniqueId } from 'lodash-es';
|
||||
|
||||
export type Task<T, R> = {
|
||||
readonly id: string;
|
||||
status: 'idle' | 'processing' | 'succeeded' | 'failed';
|
||||
data: T;
|
||||
error: unknown | undefined;
|
||||
count: number;
|
||||
// TODO: Could be useful to adding progress property.
|
||||
// TODO: Could be useful to adding start_at/end_at/duration properties.
|
||||
result: undefined | R;
|
||||
};
|
||||
|
||||
export type QueueOptions = {
|
||||
verbose?: boolean;
|
||||
concurrency?: number;
|
||||
retry?: number;
|
||||
// TODO: Could be useful to adding timeout property for retry.
|
||||
};
|
||||
|
||||
export type ComputedQueueOptions = Required<QueueOptions>;
|
||||
|
||||
export const defaultQueueOptions = {
|
||||
concurrency: 1,
|
||||
retry: 0,
|
||||
verbose: false,
|
||||
};
|
||||
|
||||
/**
|
||||
* An in-memory queue that processes tasks in parallel with a given concurrency.
|
||||
* @see {@link https://www.npmjs.com/package/fastq}
|
||||
* @template T - The type of the worker task data.
|
||||
* @template R - The type of the worker output data.
|
||||
*/
|
||||
export class Queue<T, R> {
|
||||
private readonly queue: fastq.queueAsPromised<string, Task<T, R>>;
|
||||
private readonly store = new Map<string, Task<T, R>>();
|
||||
readonly options: ComputedQueueOptions;
|
||||
readonly worker: (data: T) => Promise<R>;
|
||||
|
||||
/**
|
||||
* Create a new queue.
|
||||
* @param worker - The worker function that processes the task.
|
||||
* @param options - The queue options.
|
||||
*/
|
||||
constructor(worker: (data: T) => Promise<R>, options?: QueueOptions) {
|
||||
this.options = { ...defaultQueueOptions, ...options };
|
||||
this.worker = worker;
|
||||
this.store = new Map<string, Task<T, R>>();
|
||||
this.queue = this.buildQueue();
|
||||
}
|
||||
|
||||
get tasks(): Task<T, R>[] {
|
||||
const tasks: Task<T, R>[] = [];
|
||||
for (const task of this.store.values()) {
|
||||
tasks.push(task);
|
||||
}
|
||||
return tasks;
|
||||
}
|
||||
|
||||
getTask(id: string): Task<T, R> {
|
||||
const task = this.store.get(id);
|
||||
if (!task) {
|
||||
throw new Error(`Task with id ${id} not found`);
|
||||
}
|
||||
return task;
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for the queue to be empty.
|
||||
* @returns Promise<void> - The returned Promise will be resolved when all tasks in the queue have been processed by a worker.
|
||||
* This promise could be ignored as it will not lead to a `unhandledRejection`.
|
||||
*/
|
||||
async drained(): Promise<void> {
|
||||
await this.queue.drain();
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a task at the end of the queue.
|
||||
* @see {@link https://www.npmjs.com/package/fastq}
|
||||
* @param data
|
||||
* @returns Promise<void> - A Promise that will be fulfilled (rejected) when the task is completed successfully (unsuccessfully).
|
||||
* This promise could be ignored as it will not lead to a `unhandledRejection`.
|
||||
*/
|
||||
async push(data: T): Promise<Task<T, R>> {
|
||||
const id = uniqueId();
|
||||
const task: Task<T, R> = { id, status: 'idle', error: undefined, count: 0, data, result: undefined };
|
||||
this.store.set(id, task);
|
||||
return this.queue.push(id);
|
||||
}
|
||||
|
||||
// TODO: Support more function delegation to fastq.
|
||||
|
||||
private buildQueue(): fastq.queueAsPromised<string, Task<T, R>> {
|
||||
return fastq.promise((id: string) => {
|
||||
const task = this.getTask(id);
|
||||
return this.work(task);
|
||||
}, this.options.concurrency);
|
||||
}
|
||||
|
||||
private async work(task: Task<T, R>): Promise<Task<T, R>> {
|
||||
task.count += 1;
|
||||
task.error = undefined;
|
||||
task.status = 'processing';
|
||||
if (this.options.verbose) {
|
||||
console.log('[task] processing:', task);
|
||||
}
|
||||
try {
|
||||
task.result = await this.worker(task.data);
|
||||
task.status = 'succeeded';
|
||||
if (this.options.verbose) {
|
||||
console.log('[task] succeeded:', task);
|
||||
}
|
||||
return task;
|
||||
} catch (error) {
|
||||
task.error = error;
|
||||
task.status = 'failed';
|
||||
if (this.options.verbose) {
|
||||
console.log('[task] failed:', task);
|
||||
}
|
||||
if (this.options.retry > 0 && task.count < this.options.retry) {
|
||||
if (this.options.verbose) {
|
||||
console.log('[task] retry:', task);
|
||||
}
|
||||
return this.work(task);
|
||||
}
|
||||
return task;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
import mockfs from 'mock-fs';
|
||||
import { readFileSync } from 'node:fs';
|
||||
import { CrawlOptions, crawl } from 'src/utils';
|
||||
|
||||
interface Test {
|
||||
@@ -10,10 +9,6 @@ interface Test {
|
||||
|
||||
const cwd = process.cwd();
|
||||
|
||||
const readContent = (path: string) => {
|
||||
return readFileSync(path).toString();
|
||||
};
|
||||
|
||||
const extensions = [
|
||||
'.jpg',
|
||||
'.jpeg',
|
||||
@@ -71,7 +66,7 @@ const tests: Test[] = [
|
||||
test: 'should exclude by file extension',
|
||||
options: {
|
||||
pathsToCrawl: ['/photos/'],
|
||||
exclusionPattern: '**/*.tif',
|
||||
exclusionPatterns: ['**/*.tif'],
|
||||
},
|
||||
files: {
|
||||
'/photos/image.jpg': true,
|
||||
@@ -82,7 +77,7 @@ const tests: Test[] = [
|
||||
test: 'should exclude by file extension without case sensitivity',
|
||||
options: {
|
||||
pathsToCrawl: ['/photos/'],
|
||||
exclusionPattern: '**/*.TIF',
|
||||
exclusionPatterns: ['**/*.TIF'],
|
||||
},
|
||||
files: {
|
||||
'/photos/image.jpg': true,
|
||||
@@ -93,7 +88,7 @@ const tests: Test[] = [
|
||||
test: 'should exclude by folder',
|
||||
options: {
|
||||
pathsToCrawl: ['/photos/'],
|
||||
exclusionPattern: '**/raw/**',
|
||||
exclusionPatterns: ['**/raw/**'],
|
||||
recursive: true,
|
||||
},
|
||||
files: {
|
||||
@@ -115,7 +110,17 @@ const tests: Test[] = [
|
||||
'/albums/image3.jpg': true,
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
test: 'should support globbing paths',
|
||||
options: {
|
||||
pathsToCrawl: ['/photos*'],
|
||||
},
|
||||
files: {
|
||||
'/photos1/image1.jpg': true,
|
||||
'/photos2/image2.jpg': true,
|
||||
'/images/image3.jpg': false,
|
||||
},
|
||||
},
|
||||
{
|
||||
test: 'should crawl a single path without trailing slash',
|
||||
options: {
|
||||
@@ -213,7 +218,7 @@ const tests: Test[] = [
|
||||
test: 'should support ignoring full filename',
|
||||
options: {
|
||||
pathsToCrawl: ['/photos'],
|
||||
exclusionPattern: '**/image2.jpg',
|
||||
exclusionPatterns: ['**/image2.jpg'],
|
||||
},
|
||||
files: {
|
||||
'/photos/image1.jpg': true,
|
||||
@@ -225,7 +230,7 @@ const tests: Test[] = [
|
||||
test: 'should support ignoring file extensions',
|
||||
options: {
|
||||
pathsToCrawl: ['/photos'],
|
||||
exclusionPattern: '**/*.png',
|
||||
exclusionPatterns: ['**/*.png'],
|
||||
},
|
||||
files: {
|
||||
'/photos/image1.jpg': true,
|
||||
@@ -238,7 +243,7 @@ const tests: Test[] = [
|
||||
options: {
|
||||
pathsToCrawl: ['/photos'],
|
||||
recursive: true,
|
||||
exclusionPattern: '**/raw/**',
|
||||
exclusionPatterns: ['**/raw/**'],
|
||||
},
|
||||
files: {
|
||||
'/photos/image1.jpg': true,
|
||||
@@ -251,10 +256,9 @@ const tests: Test[] = [
|
||||
{
|
||||
test: 'should support ignoring absolute paths',
|
||||
options: {
|
||||
// Currently, fast-glob has some caveat when dealing with `/`.
|
||||
pathsToCrawl: ['/*s'],
|
||||
pathsToCrawl: ['/'],
|
||||
recursive: true,
|
||||
exclusionPattern: '/images/**',
|
||||
exclusionPatterns: ['/images/**'],
|
||||
},
|
||||
files: {
|
||||
'/photos/image1.jpg': true,
|
||||
@@ -272,16 +276,14 @@ describe('crawl', () => {
|
||||
describe('crawl', () => {
|
||||
for (const { test, options, files } of tests) {
|
||||
it(test, async () => {
|
||||
// The file contents is the same as the path.
|
||||
mockfs(Object.fromEntries(Object.keys(files).map((file) => [file, file])));
|
||||
mockfs(Object.fromEntries(Object.keys(files).map((file) => [file, ''])));
|
||||
|
||||
const actual = await crawl({ ...options, extensions });
|
||||
const expected = Object.entries(files)
|
||||
.filter((entry) => entry[1])
|
||||
.map(([file]) => file);
|
||||
|
||||
// Compare file's content instead of path since a file can be represent in multiple ways.
|
||||
expect(actual.map((path) => readContent(path)).sort()).toEqual(expected.sort());
|
||||
expect(actual.sort()).toEqual(expected.sort());
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
import { getMyUser, init, isHttpError } from '@immich/sdk';
|
||||
import { convertPathToPattern, glob } from 'fast-glob';
|
||||
import { defaults, getMyUserInfo, isHttpError } from '@immich/sdk';
|
||||
import { glob } from 'glob';
|
||||
import { createHash } from 'node:crypto';
|
||||
import { createReadStream } from 'node:fs';
|
||||
import { readFile, stat, writeFile } from 'node:fs/promises';
|
||||
import { platform } from 'node:os';
|
||||
import { join, resolve } from 'node:path';
|
||||
import { join } from 'node:path';
|
||||
import yaml from 'yaml';
|
||||
|
||||
export interface BaseOptions {
|
||||
@@ -47,9 +46,10 @@ export const connect = async (url: string, key: string) => {
|
||||
// noop
|
||||
}
|
||||
|
||||
init({ baseUrl: url, apiKey: key });
|
||||
defaults.baseUrl = url;
|
||||
defaults.headers = { 'x-api-key': key };
|
||||
|
||||
const [error] = await withError(getMyUser());
|
||||
const [error] = await withError(getMyUserInfo());
|
||||
if (isHttpError(error)) {
|
||||
logError(error, 'Failed to connect to server');
|
||||
process.exit(1);
|
||||
@@ -104,16 +104,11 @@ export interface CrawlOptions {
|
||||
pathsToCrawl: string[];
|
||||
recursive?: boolean;
|
||||
includeHidden?: boolean;
|
||||
exclusionPattern?: string;
|
||||
exclusionPatterns?: string[];
|
||||
extensions: string[];
|
||||
}
|
||||
|
||||
const convertPathToPatternOnWin = (path: string) => {
|
||||
return platform() === 'win32' ? convertPathToPattern(path) : path;
|
||||
};
|
||||
|
||||
export const crawl = async (options: CrawlOptions): Promise<string[]> => {
|
||||
const { extensions: extensionsWithPeriod, recursive, pathsToCrawl, exclusionPattern, includeHidden } = options;
|
||||
const { extensions: extensionsWithPeriod, recursive, pathsToCrawl, exclusionPatterns, includeHidden } = options;
|
||||
const extensions = extensionsWithPeriod.map((extension) => extension.replace('.', ''));
|
||||
|
||||
if (pathsToCrawl.length === 0) {
|
||||
@@ -125,42 +120,45 @@ export const crawl = async (options: CrawlOptions): Promise<string[]> => {
|
||||
|
||||
for await (const currentPath of pathsToCrawl) {
|
||||
try {
|
||||
const absolutePath = resolve(currentPath);
|
||||
const stats = await stat(absolutePath);
|
||||
const stats = await stat(currentPath);
|
||||
if (stats.isFile() || stats.isSymbolicLink()) {
|
||||
crawledFiles.push(absolutePath);
|
||||
crawledFiles.push(currentPath);
|
||||
} else {
|
||||
patterns.push(convertPathToPatternOnWin(absolutePath));
|
||||
patterns.push(currentPath);
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ENOENT') {
|
||||
patterns.push(convertPathToPatternOnWin(currentPath));
|
||||
patterns.push(currentPath);
|
||||
} else {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (patterns.length === 0) {
|
||||
let searchPattern: string;
|
||||
if (patterns.length === 1) {
|
||||
searchPattern = patterns[0];
|
||||
} else if (patterns.length === 0) {
|
||||
return crawledFiles;
|
||||
} else {
|
||||
searchPattern = '{' + patterns.join(',') + '}';
|
||||
}
|
||||
|
||||
const searchPatterns = patterns.map((pattern) => {
|
||||
let escapedPattern = pattern;
|
||||
if (recursive) {
|
||||
escapedPattern = escapedPattern + '/**';
|
||||
}
|
||||
return `${escapedPattern}/*.{${extensions.join(',')}}`;
|
||||
if (recursive) {
|
||||
searchPattern = searchPattern + '/**/';
|
||||
}
|
||||
|
||||
searchPattern = `${searchPattern}/*.{${extensions.join(',')}}`;
|
||||
|
||||
const globbedFiles = await glob(searchPattern, {
|
||||
absolute: true,
|
||||
nocase: true,
|
||||
nodir: true,
|
||||
dot: includeHidden,
|
||||
ignore: exclusionPatterns,
|
||||
});
|
||||
|
||||
const globbedFiles = await glob(searchPatterns, {
|
||||
absolute: true,
|
||||
caseSensitiveMatch: false,
|
||||
dot: includeHidden,
|
||||
ignore: [`**/${exclusionPattern}`],
|
||||
});
|
||||
globbedFiles.push(...crawledFiles);
|
||||
return globbedFiles.sort();
|
||||
return [...crawledFiles, ...globbedFiles].sort();
|
||||
};
|
||||
|
||||
export const sha1 = (filepath: string) => {
|
||||
|
||||
37
cli/src/version.ts
Normal file
@@ -0,0 +1,37 @@
|
||||
import { version } from '../package.json';
|
||||
|
||||
export interface ICliVersion {
|
||||
major: number;
|
||||
minor: number;
|
||||
patch: number;
|
||||
}
|
||||
|
||||
export class CliVersion implements ICliVersion {
|
||||
constructor(
|
||||
public readonly major: number,
|
||||
public readonly minor: number,
|
||||
public readonly patch: number,
|
||||
) {}
|
||||
|
||||
toString() {
|
||||
return `${this.major}.${this.minor}.${this.patch}`;
|
||||
}
|
||||
|
||||
toJSON() {
|
||||
const { major, minor, patch } = this;
|
||||
return { major, minor, patch };
|
||||
}
|
||||
|
||||
static fromString(version: string): CliVersion {
|
||||
const regex = /v?(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+)/i;
|
||||
const matchResult = version.match(regex);
|
||||
if (matchResult) {
|
||||
const [, major, minor, patch] = matchResult.map(Number);
|
||||
return new CliVersion(major, minor, patch);
|
||||
} else {
|
||||
throw new Error(`Invalid version format: ${version}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const cliVersion = CliVersion.fromString(version);
|
||||
@@ -2,7 +2,6 @@ import { defineConfig } from 'vite';
|
||||
import tsconfigPaths from 'vite-tsconfig-paths';
|
||||
|
||||
export default defineConfig({
|
||||
resolve: { alias: { src: '/src' } },
|
||||
build: {
|
||||
rollupOptions: {
|
||||
input: 'src/index.ts',
|
||||
|
||||
38
deployment/.gitignore
vendored
@@ -1,38 +0,0 @@
|
||||
# OpenTofu
|
||||
|
||||
# Local .terraform directories
|
||||
**/.terraform/*
|
||||
|
||||
# .tfstate files
|
||||
*.tfstate
|
||||
*.tfstate.*
|
||||
|
||||
# Crash log files
|
||||
crash.log
|
||||
crash.*.log
|
||||
|
||||
# Ignore override files as they are usually used to override resources locally and so
|
||||
# are not checked in
|
||||
override.tf
|
||||
override.tf.json
|
||||
*_override.tf
|
||||
*_override.tf.json
|
||||
|
||||
# Include override files you do wish to add to version control using negated pattern
|
||||
# !example_override.tf
|
||||
|
||||
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
|
||||
# example: *tfplan*
|
||||
|
||||
# Ignore CLI configuration files
|
||||
.terraformrc
|
||||
terraform.rc
|
||||
|
||||
# Terragrunt
|
||||
|
||||
# terragrunt cache directories
|
||||
**/.terragrunt-cache/*
|
||||
|
||||
# Terragrunt debug output file (when using `--terragrunt-debug` option)
|
||||
# See: https://terragrunt.gruntwork.io/docs/reference/cli-options/#terragrunt-debug
|
||||
terragrunt-debug.tfvars.json
|
||||
@@ -1,38 +0,0 @@
|
||||
# This file is maintained automatically by "tofu init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.opentofu.org/cloudflare/cloudflare" {
|
||||
version = "4.45.0"
|
||||
constraints = "4.45.0"
|
||||
hashes = [
|
||||
"h1:/CGpnYMkLRDmqn4iAsh/jg7ELZ6QExUw03VdjKZyK5M=",
|
||||
"h1:82C/ryqwQvxhBINYOOyF5ZzPW/k4zJ/RYT13eCdPgEc=",
|
||||
"h1:8Wu1D7ZwbLGdHakLRAzoAJ5VqZ8I14qzkPv1OGNfIlg=",
|
||||
"h1:CVq0CAibeueOuiNk0UQtwZvMLMof33n1BgskFPOymrk=",
|
||||
"h1:FSS5Kq+L+CX1zARy8PhaF8edBFNgsLtds4Uo8MwJiK8=",
|
||||
"h1:L4qsorLII7f8xSFmv6JOoWfLWDunWQEpK964Bxk7mtM=",
|
||||
"h1:StO3PV5PDskSCnhoHhWHOPxu6hbzJUQggfLgOSkvhwg=",
|
||||
"h1:Tjo+Er9ets5YrTRIdP9LBmi4p89nL/W+A7r8a1MM9nI=",
|
||||
"h1:XIwT+AWvks1LTytePM9zls+O8ItxoqCfPOgHwuH9ivQ=",
|
||||
"h1:aOXn/zuM1+5GGy/SSRx8q4EYCSTFE9Tr0twHPIf5/KE=",
|
||||
"h1:lb+YcuZ4guYd8zE51vgSnDsRAD9IV00Z15l1i1X52s8=",
|
||||
"h1:pYwNXGjfXA2rUEmotGMLWgmavT9D2rdHnV3TpuIK3ko=",
|
||||
"h1:q1qrnPq6KkljwBrugCwzb7f0SVP4Lzkfh+EOLARY9V8=",
|
||||
"h1:v9sL4cZLTV5Gu2004DDyy7209gT0JmudBCAD0WCr/JE=",
|
||||
"zh:00be2a6adc76615a368491c7a026098103b6286deb31e3cfb037365dd39f095f",
|
||||
"zh:05bd072e6119f7a5abff05c6064001f745473119a956586cf77ae843cf55d666",
|
||||
"zh:228bbe61345c4e8e0bc6b698b4b9652abff65662ee72ede2aecb4c3efb91b243",
|
||||
"zh:2948aeefe71ba041c94082cf931ecc95510d93af0a61d0a287880f5b9d24b11a",
|
||||
"zh:5dfc2c5e95843ca54957212ee3ecb7ff06f2cf60bfd6ca278b5249fd70ac18f5",
|
||||
"zh:69922cb45559b0b0544b9c2d31ed2d0fac9121faa75bc2f523484785b45d8e2b",
|
||||
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
|
||||
"zh:9d83a0cbf72327286f7dbd63cd4af89059c648163fe6ed21b1df768e0518d445",
|
||||
"zh:a8e1982945822c7d7aaa6ba8602c7247d1a3fad15d612f30eb323491a637bf8d",
|
||||
"zh:c6d41ebd69ddb23e3dad49a0ebf1da5a9c7d8706a4f55d953115d371f407928b",
|
||||
"zh:d03e5442b12846c2737f099d30cd23d9f85a0c6d65437ccb44819f9a6c4e1d7f",
|
||||
"zh:d446f2e1186b35037aea03b0e27d8b032d2f069f194f84b3f0e2907b3a79a955",
|
||||
"zh:e4d7549a4c856524e01f3dd4d69f57119ea205f7a0fa38dcfe154475b4ae9258",
|
||||
"zh:e64b8915cb9686f85e77115bd674f2faf4f29880688067d7d0f1376566fdb3b0",
|
||||
"zh:f046efdc55e6385cdd69baaa06a929bef9fe6809d373b0d2d6c7df8f8c23eddc",
|
||||
]
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
terraform {
|
||||
backend "pg" {}
|
||||
required_version = "~> 1.7"
|
||||
|
||||
required_providers {
|
||||
cloudflare = {
|
||||
source = "cloudflare/cloudflare"
|
||||
version = "4.45.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
resource "cloudflare_pages_domain" "immich_app_release_domain" {
|
||||
account_id = var.cloudflare_account_id
|
||||
project_name = data.terraform_remote_state.cloudflare_account.outputs.immich_app_archive_pages_project_name
|
||||
domain = "immich.app"
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "immich_app_release_domain" {
|
||||
name = "immich.app"
|
||||
proxied = true
|
||||
ttl = 1
|
||||
type = "CNAME"
|
||||
content = data.terraform_remote_state.cloudflare_immich_app_docs.outputs.immich_app_branch_pages_hostname
|
||||
zone_id = data.terraform_remote_state.cloudflare_account.outputs.immich_app_zone_id
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
provider "cloudflare" {
|
||||
api_token = data.terraform_remote_state.api_keys_state.outputs.terraform_key_cloudflare_docs
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
data "terraform_remote_state" "api_keys_state" {
|
||||
backend = "pg"
|
||||
|
||||
config = {
|
||||
conn_str = var.tf_state_postgres_conn_str
|
||||
schema_name = "prod_cloudflare_api_keys"
|
||||
}
|
||||
}
|
||||
|
||||
data "terraform_remote_state" "cloudflare_account" {
|
||||
backend = "pg"
|
||||
|
||||
config = {
|
||||
conn_str = var.tf_state_postgres_conn_str
|
||||
schema_name = "prod_cloudflare_account"
|
||||
}
|
||||
}
|
||||
|
||||
data "terraform_remote_state" "cloudflare_immich_app_docs" {
|
||||
backend = "pg"
|
||||
|
||||
config = {
|
||||
conn_str = var.tf_state_postgres_conn_str
|
||||
schema_name = "prod_cloudflare_immich_app_docs_${var.prefix_name}"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
terraform {
|
||||
source = "."
|
||||
|
||||
extra_arguments custom_vars {
|
||||
commands = get_terraform_commands_that_need_vars()
|
||||
}
|
||||
}
|
||||
|
||||
include {
|
||||
path = find_in_parent_folders("state.hcl")
|
||||
}
|
||||
|
||||
remote_state {
|
||||
backend = "pg"
|
||||
|
||||
config = {
|
||||
conn_str = get_env("TF_STATE_POSTGRES_CONN_STR")
|
||||
schema_name = "prod_cloudflare_immich_app_docs_release"
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
variable "cloudflare_account_id" {}
|
||||
variable "tf_state_postgres_conn_str" {}
|
||||
|
||||
variable "prefix_name" {}
|
||||
@@ -1,38 +0,0 @@
|
||||
# This file is maintained automatically by "tofu init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.opentofu.org/cloudflare/cloudflare" {
|
||||
version = "4.45.0"
|
||||
constraints = "4.45.0"
|
||||
hashes = [
|
||||
"h1:/CGpnYMkLRDmqn4iAsh/jg7ELZ6QExUw03VdjKZyK5M=",
|
||||
"h1:82C/ryqwQvxhBINYOOyF5ZzPW/k4zJ/RYT13eCdPgEc=",
|
||||
"h1:8Wu1D7ZwbLGdHakLRAzoAJ5VqZ8I14qzkPv1OGNfIlg=",
|
||||
"h1:CVq0CAibeueOuiNk0UQtwZvMLMof33n1BgskFPOymrk=",
|
||||
"h1:FSS5Kq+L+CX1zARy8PhaF8edBFNgsLtds4Uo8MwJiK8=",
|
||||
"h1:L4qsorLII7f8xSFmv6JOoWfLWDunWQEpK964Bxk7mtM=",
|
||||
"h1:StO3PV5PDskSCnhoHhWHOPxu6hbzJUQggfLgOSkvhwg=",
|
||||
"h1:Tjo+Er9ets5YrTRIdP9LBmi4p89nL/W+A7r8a1MM9nI=",
|
||||
"h1:XIwT+AWvks1LTytePM9zls+O8ItxoqCfPOgHwuH9ivQ=",
|
||||
"h1:aOXn/zuM1+5GGy/SSRx8q4EYCSTFE9Tr0twHPIf5/KE=",
|
||||
"h1:lb+YcuZ4guYd8zE51vgSnDsRAD9IV00Z15l1i1X52s8=",
|
||||
"h1:pYwNXGjfXA2rUEmotGMLWgmavT9D2rdHnV3TpuIK3ko=",
|
||||
"h1:q1qrnPq6KkljwBrugCwzb7f0SVP4Lzkfh+EOLARY9V8=",
|
||||
"h1:v9sL4cZLTV5Gu2004DDyy7209gT0JmudBCAD0WCr/JE=",
|
||||
"zh:00be2a6adc76615a368491c7a026098103b6286deb31e3cfb037365dd39f095f",
|
||||
"zh:05bd072e6119f7a5abff05c6064001f745473119a956586cf77ae843cf55d666",
|
||||
"zh:228bbe61345c4e8e0bc6b698b4b9652abff65662ee72ede2aecb4c3efb91b243",
|
||||
"zh:2948aeefe71ba041c94082cf931ecc95510d93af0a61d0a287880f5b9d24b11a",
|
||||
"zh:5dfc2c5e95843ca54957212ee3ecb7ff06f2cf60bfd6ca278b5249fd70ac18f5",
|
||||
"zh:69922cb45559b0b0544b9c2d31ed2d0fac9121faa75bc2f523484785b45d8e2b",
|
||||
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
|
||||
"zh:9d83a0cbf72327286f7dbd63cd4af89059c648163fe6ed21b1df768e0518d445",
|
||||
"zh:a8e1982945822c7d7aaa6ba8602c7247d1a3fad15d612f30eb323491a637bf8d",
|
||||
"zh:c6d41ebd69ddb23e3dad49a0ebf1da5a9c7d8706a4f55d953115d371f407928b",
|
||||
"zh:d03e5442b12846c2737f099d30cd23d9f85a0c6d65437ccb44819f9a6c4e1d7f",
|
||||
"zh:d446f2e1186b35037aea03b0e27d8b032d2f069f194f84b3f0e2907b3a79a955",
|
||||
"zh:e4d7549a4c856524e01f3dd4d69f57119ea205f7a0fa38dcfe154475b4ae9258",
|
||||
"zh:e64b8915cb9686f85e77115bd674f2faf4f29880688067d7d0f1376566fdb3b0",
|
||||
"zh:f046efdc55e6385cdd69baaa06a929bef9fe6809d373b0d2d6c7df8f8c23eddc",
|
||||
]
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
terraform {
|
||||
backend "pg" {}
|
||||
required_version = "~> 1.7"
|
||||
|
||||
required_providers {
|
||||
cloudflare = {
|
||||
source = "cloudflare/cloudflare"
|
||||
version = "4.45.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
resource "cloudflare_pages_domain" "immich_app_branch_domain" {
|
||||
account_id = var.cloudflare_account_id
|
||||
project_name = local.is_release ? data.terraform_remote_state.cloudflare_account.outputs.immich_app_archive_pages_project_name : data.terraform_remote_state.cloudflare_account.outputs.immich_app_preview_pages_project_name
|
||||
domain = "${var.prefix_name}.${local.deploy_domain_prefix}.immich.app"
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "immich_app_branch_subdomain" {
|
||||
name = "${var.prefix_name}.${local.deploy_domain_prefix}.immich.app"
|
||||
proxied = true
|
||||
ttl = 1
|
||||
type = "CNAME"
|
||||
content = "${replace(var.prefix_name, "/\\/|\\./", "-")}.${local.is_release ? data.terraform_remote_state.cloudflare_account.outputs.immich_app_archive_pages_project_subdomain : data.terraform_remote_state.cloudflare_account.outputs.immich_app_preview_pages_project_subdomain}"
|
||||
zone_id = data.terraform_remote_state.cloudflare_account.outputs.immich_app_zone_id
|
||||
}
|
||||
|
||||
output "immich_app_branch_subdomain" {
|
||||
value = cloudflare_record.immich_app_branch_subdomain.hostname
|
||||
}
|
||||
|
||||
output "immich_app_branch_pages_hostname" {
|
||||
value = cloudflare_record.immich_app_branch_subdomain.content
|
||||
}
|
||||
|
||||
output "pages_project_name" {
|
||||
value = cloudflare_pages_domain.immich_app_branch_domain.project_name
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
locals {
|
||||
domain_name = "immich.app"
|
||||
preview_prefix = contains(["branch", "pr"], var.prefix_event_type) ? "preview" : ""
|
||||
archive_prefix = contains(["release"], var.prefix_event_type) ? "archive" : ""
|
||||
deploy_domain_prefix = coalesce(local.preview_prefix, local.archive_prefix)
|
||||
is_release = contains(["release"], var.prefix_event_type)
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
provider "cloudflare" {
|
||||
api_token = data.terraform_remote_state.api_keys_state.outputs.terraform_key_cloudflare_docs
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
data "terraform_remote_state" "api_keys_state" {
|
||||
backend = "pg"
|
||||
|
||||
config = {
|
||||
conn_str = var.tf_state_postgres_conn_str
|
||||
schema_name = "prod_cloudflare_api_keys"
|
||||
}
|
||||
}
|
||||
|
||||
data "terraform_remote_state" "cloudflare_account" {
|
||||
backend = "pg"
|
||||
|
||||
config = {
|
||||
conn_str = var.tf_state_postgres_conn_str
|
||||
schema_name = "prod_cloudflare_account"
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
terraform {
|
||||
source = "."
|
||||
|
||||
extra_arguments custom_vars {
|
||||
commands = get_terraform_commands_that_need_vars()
|
||||
}
|
||||
}
|
||||
|
||||
include {
|
||||
path = find_in_parent_folders("state.hcl")
|
||||
}
|
||||
|
||||
locals {
|
||||
prefix_name = get_env("TF_VAR_prefix_name")
|
||||
}
|
||||
|
||||
remote_state {
|
||||
backend = "pg"
|
||||
|
||||
config = {
|
||||
conn_str = get_env("TF_STATE_POSTGRES_CONN_STR")
|
||||
schema_name = "prod_cloudflare_immich_app_docs_${local.prefix_name}"
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
variable "cloudflare_account_id" {}
|
||||
variable "tf_state_postgres_conn_str" {}
|
||||
|
||||
variable "prefix_name" {}
|
||||
variable "prefix_event_type" {}
|
||||
@@ -1,20 +0,0 @@
|
||||
locals {
|
||||
cloudflare_account_id = get_env("CLOUDFLARE_ACCOUNT_ID")
|
||||
cloudflare_api_token = get_env("CLOUDFLARE_API_TOKEN")
|
||||
|
||||
tf_state_postgres_conn_str = get_env("TF_STATE_POSTGRES_CONN_STR")
|
||||
}
|
||||
|
||||
remote_state {
|
||||
backend = "pg"
|
||||
|
||||
config = {
|
||||
conn_str = local.tf_state_postgres_conn_str
|
||||
}
|
||||
}
|
||||
|
||||
inputs = {
|
||||
cloudflare_account_id = local.cloudflare_account_id
|
||||
cloudflare_api_token = local.cloudflare_api_token
|
||||
tf_state_postgres_conn_str = local.tf_state_postgres_conn_str
|
||||
}
|
||||
@@ -4,54 +4,51 @@
|
||||
|
||||
name: immich-dev
|
||||
|
||||
x-server-build: &server-common
|
||||
image: immich-server-dev:latest
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: server/Dockerfile
|
||||
target: dev
|
||||
restart: always
|
||||
volumes:
|
||||
- ../server:/usr/src/app
|
||||
- ../open-api:/usr/src/open-api
|
||||
- ${UPLOAD_LOCATION}/photos:/usr/src/app/upload
|
||||
- ${UPLOAD_LOCATION}/photos/upload:/usr/src/app/upload/upload
|
||||
- /usr/src/app/node_modules
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
env_file:
|
||||
- .env
|
||||
ulimits:
|
||||
nofile:
|
||||
soft: 1048576
|
||||
hard: 1048576
|
||||
|
||||
services:
|
||||
immich-server:
|
||||
container_name: immich_server
|
||||
command: ['/usr/src/app/bin/immich-dev']
|
||||
image: immich-server-dev:latest
|
||||
# extends:
|
||||
# file: hwaccel.transcoding.yml
|
||||
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: server/Dockerfile
|
||||
target: dev
|
||||
restart: always
|
||||
volumes:
|
||||
- ../server:/usr/src/app
|
||||
- ../open-api:/usr/src/open-api
|
||||
- ${UPLOAD_LOCATION}/photos:/usr/src/app/upload
|
||||
- ${UPLOAD_LOCATION}/photos/upload:/usr/src/app/upload/upload
|
||||
- /usr/src/app/node_modules
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
IMMICH_REPOSITORY: immich-app/immich
|
||||
IMMICH_REPOSITORY_URL: https://github.com/immich-app/immich
|
||||
IMMICH_SOURCE_REF: local
|
||||
IMMICH_SOURCE_COMMIT: af2efbdbbddc27cd06142f22253ccbbbbeec1f55
|
||||
IMMICH_SOURCE_URL: https://github.com/immich-app/immich/commit/af2efbdbbddc27cd06142f22253ccbbbbeec1f55
|
||||
IMMICH_BUILD: '9654404849'
|
||||
IMMICH_BUILD_URL: https://github.com/immich-app/immich/actions/runs/9654404849
|
||||
IMMICH_BUILD_IMAGE: development
|
||||
IMMICH_BUILD_IMAGE_URL: https://github.com/immich-app/immich/pkgs/container/immich-server
|
||||
IMMICH_THIRD_PARTY_SOURCE_URL: https://github.com/immich-app/immich/
|
||||
IMMICH_THIRD_PARTY_BUG_FEATURE_URL: https://github.com/immich-app/immich/issues
|
||||
IMMICH_THIRD_PARTY_DOCUMENTATION_URL: https://immich.app/docs
|
||||
IMMICH_THIRD_PARTY_SUPPORT_URL: https://immich.app/docs/third-party
|
||||
ulimits:
|
||||
nofile:
|
||||
soft: 1048576
|
||||
hard: 1048576
|
||||
command: ['/usr/src/app/bin/immich-dev', 'immich']
|
||||
<<: *server-common
|
||||
ports:
|
||||
- 3001:3001
|
||||
- 9230:9230
|
||||
- 9231:9231
|
||||
depends_on:
|
||||
- redis
|
||||
- database
|
||||
healthcheck:
|
||||
disable: false
|
||||
|
||||
immich-microservices:
|
||||
container_name: immich_microservices
|
||||
command: ['/usr/src/app/bin/immich-dev', 'microservices']
|
||||
<<: *server-common
|
||||
# extends:
|
||||
# file: hwaccel.transcoding.yml
|
||||
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
|
||||
ports:
|
||||
- 9231:9230
|
||||
depends_on:
|
||||
- database
|
||||
- immich-server
|
||||
|
||||
immich-web:
|
||||
container_name: immich_web
|
||||
@@ -66,7 +63,6 @@ services:
|
||||
- 24678:24678
|
||||
volumes:
|
||||
- ../web:/usr/src/app
|
||||
- ../i18n:/usr/src/i18n
|
||||
- ../open-api/:/usr/src/open-api/
|
||||
- /usr/src/app/node_modules
|
||||
ulimits:
|
||||
@@ -98,14 +94,10 @@ services:
|
||||
depends_on:
|
||||
- database
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
disable: false
|
||||
|
||||
redis:
|
||||
container_name: immich_redis
|
||||
image: redis:6.2-alpine@sha256:2ba50e1ac3a0ea17b736ce9db2b0a9f6f8b85d4c27d5f5accc6a416d8f42c6d5
|
||||
healthcheck:
|
||||
test: redis-cli ping || exit 1
|
||||
image: redis:6.2-alpine@sha256:3fcb624d83a9c478357f16dc173c58ded325ccc5fd2a4375f3916c04cc579f70
|
||||
|
||||
database:
|
||||
container_name: immich_postgres
|
||||
@@ -116,34 +108,12 @@ services:
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
||||
POSTGRES_USER: ${DB_USERNAME}
|
||||
POSTGRES_DB: ${DB_DATABASE_NAME}
|
||||
POSTGRES_INITDB_ARGS: '--data-checksums'
|
||||
volumes:
|
||||
- ${UPLOAD_LOCATION}/postgres:/var/lib/postgresql/data
|
||||
ports:
|
||||
- 5432:5432
|
||||
healthcheck:
|
||||
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
|
||||
interval: 5m
|
||||
start_interval: 30s
|
||||
start_period: 5m
|
||||
command:
|
||||
[
|
||||
'postgres',
|
||||
'-c',
|
||||
'shared_preload_libraries=vectors.so',
|
||||
'-c',
|
||||
'search_path="$$user", public, vectors',
|
||||
'-c',
|
||||
'logging_collector=on',
|
||||
'-c',
|
||||
'max_wal_size=2GB',
|
||||
'-c',
|
||||
'shared_buffers=512MB',
|
||||
'-c',
|
||||
'wal_compression=on',
|
||||
]
|
||||
|
||||
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
|
||||
|
||||
# set IMMICH_METRICS=true in .env to enable metrics
|
||||
# immich-prometheus:
|
||||
# container_name: immich_prometheus
|
||||
# ports:
|
||||
|
||||
@@ -1,28 +1,39 @@
|
||||
name: immich-prod
|
||||
|
||||
x-server-build: &server-common
|
||||
image: immich-server:latest
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: server/Dockerfile
|
||||
volumes:
|
||||
- ${UPLOAD_LOCATION}/photos:/usr/src/app/upload
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
|
||||
services:
|
||||
immich-server:
|
||||
container_name: immich_server
|
||||
image: immich-server:latest
|
||||
# extends:
|
||||
# file: hwaccel.transcoding.yml
|
||||
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: server/Dockerfile
|
||||
volumes:
|
||||
- ${UPLOAD_LOCATION}/photos:/usr/src/app/upload
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
env_file:
|
||||
- .env
|
||||
command: ['start.sh', 'immich']
|
||||
<<: *server-common
|
||||
ports:
|
||||
- 2283:2283
|
||||
- 2283:3001
|
||||
depends_on:
|
||||
- redis
|
||||
- database
|
||||
restart: always
|
||||
healthcheck:
|
||||
disable: false
|
||||
|
||||
immich-microservices:
|
||||
container_name: immich_microservices
|
||||
command: ['start.sh', 'microservices']
|
||||
<<: *server-common
|
||||
# extends:
|
||||
# file: hwaccel.transcoding.yml
|
||||
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
|
||||
depends_on:
|
||||
- redis
|
||||
- database
|
||||
- immich-server
|
||||
|
||||
immich-machine-learning:
|
||||
container_name: immich_machine_learning
|
||||
@@ -35,21 +46,15 @@ services:
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
|
||||
ports:
|
||||
- 3003:3003
|
||||
volumes:
|
||||
- model-cache:/cache
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
healthcheck:
|
||||
disable: false
|
||||
|
||||
redis:
|
||||
container_name: immich_redis
|
||||
image: redis:6.2-alpine@sha256:2ba50e1ac3a0ea17b736ce9db2b0a9f6f8b85d4c27d5f5accc6a416d8f42c6d5
|
||||
healthcheck:
|
||||
test: redis-cli ping || exit 1
|
||||
image: redis:6.2-alpine@sha256:3fcb624d83a9c478357f16dc173c58ded325ccc5fd2a4375f3916c04cc579f70
|
||||
restart: always
|
||||
|
||||
database:
|
||||
@@ -61,40 +66,17 @@ services:
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
||||
POSTGRES_USER: ${DB_USERNAME}
|
||||
POSTGRES_DB: ${DB_DATABASE_NAME}
|
||||
POSTGRES_INITDB_ARGS: '--data-checksums'
|
||||
volumes:
|
||||
- ${UPLOAD_LOCATION}/postgres:/var/lib/postgresql/data
|
||||
ports:
|
||||
- 5432:5432
|
||||
healthcheck:
|
||||
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
|
||||
interval: 5m
|
||||
start_interval: 30s
|
||||
start_period: 5m
|
||||
command:
|
||||
[
|
||||
'postgres',
|
||||
'-c',
|
||||
'shared_preload_libraries=vectors.so',
|
||||
'-c',
|
||||
'search_path="$$user", public, vectors',
|
||||
'-c',
|
||||
'logging_collector=on',
|
||||
'-c',
|
||||
'max_wal_size=2GB',
|
||||
'-c',
|
||||
'shared_buffers=512MB',
|
||||
'-c',
|
||||
'wal_compression=on',
|
||||
]
|
||||
restart: always
|
||||
|
||||
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
|
||||
# set IMMICH_METRICS=true in .env to enable metrics
|
||||
immich-prometheus:
|
||||
container_name: immich_prometheus
|
||||
ports:
|
||||
- 9090:9090
|
||||
image: prom/prometheus@sha256:378f4e03703557d1c6419e6caccf922f96e6d88a530f7431d66a4c4f4b1000fe
|
||||
image: prom/prometheus@sha256:4f6c47e39a9064028766e8c95890ed15690c30f00c4ba14e7ce6ae1ded0295b1
|
||||
volumes:
|
||||
- ./prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
- prometheus-data:/prometheus
|
||||
@@ -106,7 +88,7 @@ services:
|
||||
command: ['./run.sh', '-disable-reporting']
|
||||
ports:
|
||||
- 3000:3000
|
||||
image: grafana/grafana:11.3.0-ubuntu@sha256:51587e148ac0214d7938e7f3fe8512182e4eb6141892a3ffb88bba1901b49285
|
||||
image: grafana/grafana:10.4.1-ubuntu@sha256:65e0e7d0f0b001cb0478bce5093bff917677dc308dd27a0aa4b3ac38e4fd877c
|
||||
volumes:
|
||||
- grafana-data:/var/lib/grafana
|
||||
|
||||
|
||||
@@ -12,23 +12,35 @@ services:
|
||||
immich-server:
|
||||
container_name: immich_server
|
||||
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
|
||||
# extends:
|
||||
# file: hwaccel.transcoding.yml
|
||||
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
|
||||
command: ['start.sh', 'immich']
|
||||
volumes:
|
||||
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
|
||||
- ${UPLOAD_LOCATION}:/usr/src/app/upload
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
env_file:
|
||||
- .env
|
||||
ports:
|
||||
- '2283:2283'
|
||||
- 2283:3001
|
||||
depends_on:
|
||||
- redis
|
||||
- database
|
||||
restart: always
|
||||
|
||||
immich-microservices:
|
||||
container_name: immich_microservices
|
||||
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
|
||||
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/hardware-transcoding
|
||||
# file: hwaccel.transcoding.yml
|
||||
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
|
||||
command: ['start.sh', 'microservices']
|
||||
volumes:
|
||||
- ${UPLOAD_LOCATION}:/usr/src/app/upload
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
env_file:
|
||||
- .env
|
||||
depends_on:
|
||||
- redis
|
||||
- database
|
||||
restart: always
|
||||
healthcheck:
|
||||
disable: false
|
||||
|
||||
immich-machine-learning:
|
||||
container_name: immich_machine_learning
|
||||
@@ -43,48 +55,21 @@ services:
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
healthcheck:
|
||||
disable: false
|
||||
|
||||
redis:
|
||||
container_name: immich_redis
|
||||
image: docker.io/redis:6.2-alpine@sha256:2ba50e1ac3a0ea17b736ce9db2b0a9f6f8b85d4c27d5f5accc6a416d8f42c6d5
|
||||
healthcheck:
|
||||
test: redis-cli ping || exit 1
|
||||
image: registry.hub.docker.com/library/redis:6.2-alpine@sha256:51d6c56749a4243096327e3fb964a48ed92254357108449cb6e23999c37773c5
|
||||
restart: always
|
||||
|
||||
database:
|
||||
container_name: immich_postgres
|
||||
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
|
||||
image: registry.hub.docker.com/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
|
||||
environment:
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
||||
POSTGRES_USER: ${DB_USERNAME}
|
||||
POSTGRES_DB: ${DB_DATABASE_NAME}
|
||||
POSTGRES_INITDB_ARGS: '--data-checksums'
|
||||
volumes:
|
||||
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
|
||||
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
|
||||
interval: 5m
|
||||
start_interval: 30s
|
||||
start_period: 5m
|
||||
command:
|
||||
[
|
||||
'postgres',
|
||||
'-c',
|
||||
'shared_preload_libraries=vectors.so',
|
||||
'-c',
|
||||
'search_path="$$user", public, vectors',
|
||||
'-c',
|
||||
'logging_collector=on',
|
||||
'-c',
|
||||
'max_wal_size=2GB',
|
||||
'-c',
|
||||
'shared_buffers=512MB',
|
||||
'-c',
|
||||
'wal_compression=on',
|
||||
]
|
||||
restart: always
|
||||
|
||||
volumes:
|
||||
|
||||
@@ -2,20 +2,18 @@
|
||||
|
||||
# The location where your uploaded files are stored
|
||||
UPLOAD_LOCATION=./library
|
||||
# The location where your database files are stored
|
||||
DB_DATA_LOCATION=./postgres
|
||||
|
||||
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
|
||||
# TZ=Etc/UTC
|
||||
|
||||
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
|
||||
IMMICH_VERSION=release
|
||||
|
||||
# Connection secret for postgres. You should change it to a random password
|
||||
# Please use only the characters `A-Za-z0-9`, without special characters or spaces
|
||||
DB_PASSWORD=postgres
|
||||
|
||||
# The values below this line do not need to be changed
|
||||
###################################################################################
|
||||
DB_HOSTNAME=immich_postgres
|
||||
DB_USERNAME=postgres
|
||||
DB_DATABASE_NAME=immich
|
||||
DB_DATA_LOCATION=./postgres
|
||||
|
||||
REDIS_HOSTNAME=immich_redis
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
version: "3.8"
|
||||
|
||||
# Configurations for hardware-accelerated machine learning
|
||||
|
||||
# If using Unraid or another platform that doesn't allow multiple Compose files,
|
||||
# you can inline the config for a backend by copying its contents
|
||||
# you can inline the config for a backend by copying its contents
|
||||
# into the immich-machine-learning service in the docker-compose.yml file.
|
||||
|
||||
# See https://immich.app/docs/features/ml-hardware-acceleration for info on usage.
|
||||
@@ -28,7 +30,7 @@ services:
|
||||
|
||||
openvino:
|
||||
device_cgroup_rules:
|
||||
- 'c 189:* rmw'
|
||||
- "c 189:* rmw"
|
||||
devices:
|
||||
- /dev/dri:/dev/dri
|
||||
volumes:
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
version: "3.8"
|
||||
|
||||
# Configurations for hardware-accelerated transcoding
|
||||
|
||||
# If using Unraid or another platform that doesn't allow multiple Compose files,
|
||||
@@ -51,4 +53,5 @@ services:
|
||||
volumes:
|
||||
- /usr/lib/wsl:/usr/lib/wsl
|
||||
environment:
|
||||
- LD_LIBRARY_PATH=/usr/lib/wsl/lib
|
||||
- LIBVA_DRIVER_NAME=d3d12
|
||||
|
||||
@@ -3,10 +3,10 @@ global:
|
||||
evaluation_interval: 15s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: immich_api
|
||||
- job_name: immich_server
|
||||
static_configs:
|
||||
- targets: ['immich-server:8081']
|
||||
|
||||
|
||||
- job_name: immich_microservices
|
||||
static_configs:
|
||||
- targets: ['immich-server:8082']
|
||||
- targets: ['immich-microservices:8081']
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -eu
|
||||
|
||||
LOG_LEVEL="${IMMICH_LOG_LEVEL:='info'}"
|
||||
|
||||
logDebug() {
|
||||
if [ "$LOG_LEVEL" = "debug" ] || [ "$LOG_LEVEL" = "verbose" ]; then
|
||||
echo "DEBUG: $1" >&2
|
||||
fi
|
||||
}
|
||||
|
||||
if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
|
||||
logDebug "cgroup v2 detected."
|
||||
if [ -f /sys/fs/cgroup/cpu.max ]; then
|
||||
read -r quota period </sys/fs/cgroup/cpu.max
|
||||
if [ "$quota" = "max" ]; then
|
||||
logDebug "No CPU limits set."
|
||||
unset quota period
|
||||
fi
|
||||
else
|
||||
logDebug "/sys/fs/cgroup/cpu.max not found."
|
||||
fi
|
||||
else
|
||||
logDebug "cgroup v1 detected."
|
||||
|
||||
if [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then
|
||||
quota=$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)
|
||||
period=$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)
|
||||
|
||||
if [ "$quota" = "-1" ]; then
|
||||
logDebug "No CPU limits set."
|
||||
unset quota period
|
||||
fi
|
||||
else
|
||||
logDebug "/sys/fs/cgroup/cpu/cpu.cfs_quota_us or /sys/fs/cgroup/cpu/cpu.cfs_period_us not found."
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "${quota:-}" ] && [ -n "${period:-}" ]; then
|
||||
cpus=$((quota / period))
|
||||
if [ "$cpus" -eq 0 ]; then
|
||||
cpus=1
|
||||
fi
|
||||
else
|
||||
cpus=$(grep -c ^processor /proc/cpuinfo)
|
||||
fi
|
||||
|
||||
echo "$cpus"
|
||||
@@ -1 +0,0 @@
|
||||
22.11.0
|
||||
@@ -1,17 +1,17 @@
|
||||
# Website
|
||||
|
||||
This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator.
|
||||
This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator.
|
||||
|
||||
### Installation
|
||||
|
||||
```
|
||||
$ npm install
|
||||
$ yarn
|
||||
```
|
||||
|
||||
### Local Development
|
||||
|
||||
```
|
||||
$ npm run start
|
||||
$ yarn start
|
||||
```
|
||||
|
||||
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
|
||||
@@ -19,7 +19,7 @@ This command starts a local development server and opens up a browser window. Mo
|
||||
### Build
|
||||
|
||||
```
|
||||
$ npm run build
|
||||
$ yarn build
|
||||
```
|
||||
|
||||
This command generates static content into the `build` directory and can be served using any static contents hosting service.
|
||||
@@ -29,13 +29,13 @@ This command generates static content into the `build` directory and can be serv
|
||||
Using SSH:
|
||||
|
||||
```
|
||||
$ USE_SSH=true npm run deploy
|
||||
$ USE_SSH=true yarn deploy
|
||||
```
|
||||
|
||||
Not using SSH:
|
||||
|
||||
```
|
||||
$ GIT_USER=<Your GitHub username> npm run deploy
|
||||
$ GIT_USER=<Your GitHub username> yarn deploy
|
||||
```
|
||||
|
||||
If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.
|
||||
|
||||
@@ -94,7 +94,7 @@ Thank you, and I am asking for your support for the project. I hope to be a full
|
||||
- Bitcoin: 3QVAb9dCHutquVejeNXitPqZX26Yg5kxb7
|
||||
- Give a project a star - the contributors love gazing at the stars and seeing their creations shining in the sky.
|
||||
|
||||
Join our friendly [Discord](https://discord.immich.app) to talk and discuss Immich, tech, or anything
|
||||
Join our friendly [Discord](https://discord.gg/D8JsnBEuKb) to talk and discuss Immich, tech, or anything
|
||||
|
||||
Cheer!
|
||||
|
||||
|
||||
@@ -142,7 +142,7 @@ Thank you, and I am asking for your support for the project. I hope to be a full
|
||||
- Bitcoin: 3QVAb9dCHutquVejeNXitPqZX26Yg5kxb7
|
||||
- Give a project a star - the contributors love gazing at the stars and seeing their creations shining in the sky.
|
||||
|
||||
Join our friendly [Discord](https://discord.immich.app) to talk and discuss Immich, tech, or anything
|
||||
Join our friendly [Discord](https://discord.gg/D8JsnBEuKb) to talk and discuss Immich, tech, or anything
|
||||
|
||||
Cheer!
|
||||
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
---
|
||||
title: The Immich core team goes full-time
|
||||
authors: [alextran]
|
||||
tags: [update, announcement, FUTO]
|
||||
date: 2024-05-01T00:00
|
||||
---
|
||||
|
||||
**Immich is joining [FUTO](https://futo.org/)!**
|
||||
|
||||
Since the beginning of this adventure, my goal has always been to create a better world for my children. Memories are priceless, and privacy should not be a luxury. However, building quality open source has its challenges. Over the past two years, it has taken significant dedication, time, and effort.
|
||||
|
||||
Recently, a company in Austin, Texas, called FUTO contacted the team. FUTO strives to develop quality and sustainable open software. They build software alternatives that focus on giving control to users. From their mission statement:
|
||||
|
||||
“Computers should belong to you, the people. We develop and fund technology to give them back.”
|
||||
|
||||
FUTO loved Immich and wanted to see if we’d consider working with them to take the project to the next level. In short, FUTO offered to:
|
||||
|
||||
- Pay the core team to work on Immich full-time
|
||||
- Let us keep full autonomy about the project’s direction and leadership
|
||||
- Continue to license Immich under AGPL
|
||||
- Keep Immich’s development direction with no paywalled features
|
||||
- Keep Immich “built for the people” (no ads, data mining/selling, or alternative motives)
|
||||
- Provide us with financial, technical, legal, and administrative support
|
||||
|
||||
After careful deliberation, the team decided that FUTO’s vision closely aligns with our own: to build a better future by providing a polished, performant, and privacy-preserving open-source software solution for photo and video management delivered in a sustainable way.
|
||||
|
||||
Immich’s future has never looked brighter, and we look forward to realizing our vision for Immich as part of FUTO.
|
||||
|
||||
If you have more questions, we’ll host a Q&A live stream on May 9th at 3PM UTC (10AM CST). [You can ask questions here](https://www.live-ask.com/event/01HWP2SB99A1K8EXFBDKZ5Z9CF), and the stream will be live [here on our YouTube channel](https://youtube.com/live/cwz2iZwYpgg).
|
||||
|
||||
Cheers,
|
||||
|
||||
The Immich Team
|
||||
|
||||
---
|
||||
|
||||
## FAQs
|
||||
|
||||
### What is FUTO?
|
||||
|
||||
[https://futo.org/what-is-futo/](https://futo.org/what-is-futo/)
|
||||
|
||||
### Will the license change?
|
||||
|
||||
No. Immich will continue to be licensed under AGPL without a CLA.
|
||||
|
||||
### Will Immich continue to be free?
|
||||
|
||||
Yes. The Immich source code will remain freely available under the AGPL license.
|
||||
|
||||
### Is Immich getting VC funding?
|
||||
|
||||
No. Venture capital implies investment in a business, often with the expectation of a future payout (exit plan). Immich is neither a business that can be acquired nor comes with a money-making exit plan.
|
||||
|
||||
### I am currently supporting Immich through GitHub sponsors. What will happen to my donation?
|
||||
|
||||
Effective immediately, all donations to the Immich organization will be canceled. In the future, we will offer an optional, modest payment option instead. Thank you to everyone who donated to help us get this far!
|
||||
|
||||
### How is funding sustainable?
|
||||
|
||||
Immich and FUTO believe a sustainable future requires a model that does not rely on users-as-a-product. To this end, FUTO advocates that users pay for good, open software. In keeping with this model, we will adopt a purchase price. This means we no longer accept donations, but — _without limiting features for those who do not pay_ — we will soon allow you to purchase Immich through a modest payment. We encourage you to pay for the high-quality software you use to foster a healthy software culture where developers build great applications without hidden motives for their users.
|
||||
|
||||
### When does this change take effect?
|
||||
|
||||
This change takes effect immediately.
|
||||
|
||||
### What will change?
|
||||
|
||||
The following things will change as Immich joins FUTO:
|
||||
|
||||
- The brand, logo, and other Immich trademarks will be transferred to FUTO.
|
||||
- We will stop all donations to the project.
|
||||
- The core team can now dedicate our full attention to Immich
|
||||
- Before the end of the year, we plan to have a roadmap for what it will take to get Immich to a stable release.
|
||||
- Bugs will be squashed, and features will be delivered faster.
|
||||
@@ -1,91 +0,0 @@
|
||||
---
|
||||
title: Licensing announcement - Purchase a license to support Immich
|
||||
authors: [alextran]
|
||||
tags: [update, announcement, FUTO]
|
||||
date: 2024-07-18T00:00
|
||||
---
|
||||
|
||||
Hello everybody,
|
||||
|
||||
Firstly, on behalf of the Immich team, I'd like to thank everybody for your continuous support of Immich since the very first day! Your contributions, encouragement, and community engagement have helped bring Immich to its current state. The team and I are forever grateful for that.
|
||||
|
||||
Since our [last announcement of the core team joining FUTO to work on Immich full-time](https://immich.app/blog/2024/immich-core-team-goes-fulltime), one of the goals of our new position is to foster a healthy relationship between the developers and the users. We believe that this enables us to create great software, establish transparent policies and build trust.
|
||||
|
||||
We want to build a great software application that brings value to you and your loved ones' lives. We are not using you as a product, i.e., selling or tracking your data. We are not putting annoying ads into our software. We respect your privacy. We want to be compensated for the hard work we put in to build Immich for you.
|
||||
|
||||
With those notes, we have enabled a way for you to financially support the continued development of Immich, ensuring the software can move forward and will be maintained, by offering a lifetime license of the software. We think if you like and use software, you should pay for it, but _we're never going to force anyone to pay or try to limit Immich for those who don't._
|
||||
|
||||
There are two types of license that you can choose to purchase: **Server License** and **Individual License**.
|
||||
|
||||
### Server License
|
||||
|
||||
This is a lifetime license costing **$99.99**. The license is applied to the whole server. You and all users that use your server are licensed.
|
||||
|
||||
### Individual License
|
||||
|
||||
This is a lifetime license costing **$24.99**. The license is applied to a single user, and can be used on any server they choose to connect to.
|
||||
|
||||
<img
|
||||
width="837"
|
||||
alt="license-social-gh"
|
||||
src="https://github.com/user-attachments/assets/241932ed-ef3b-44ec-a9e2-ee80754e0cca"
|
||||
/>
|
||||
|
||||
You can purchase the license on [our page - https://buy.immich.app](https://buy.immich.app).
|
||||
|
||||
Starting with release `v1.109.0` you can purchase and enter your purchased license key directly in the app.
|
||||
|
||||
<img
|
||||
width="1414"
|
||||
alt="license-page-gh"
|
||||
src="https://github.com/user-attachments/assets/364fc32a-f6ef-4594-9fea-28d5a26ad77c"
|
||||
/>
|
||||
|
||||
## Thank you
|
||||
|
||||
Thank you again for your support, this will help create a strong foundation and stability for the Immich team to continue developing and maintaining the project that you love to use.
|
||||
|
||||
<p align="center">
|
||||
<img
|
||||
src="https://media.giphy.com/media/v1.Y2lkPTc5MGI3NjExbjY2eWc5Y2F0ZW56MmR4aWE0dDhzZXlidXRmYWZyajl1bWZidXZpcyZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9Zw/87CKDqErVfMqY/giphy.gif"
|
||||
width="550"
|
||||
title="SUPPORT THE PROJECT!"
|
||||
/>
|
||||
</p>
|
||||
|
||||
<br />
|
||||
<br />
|
||||
|
||||
Cheers! 🎉
|
||||
|
||||
Immich team
|
||||
|
||||
# FAQ
|
||||
|
||||
### 1. Where can I purchase a license?
|
||||
|
||||
There are several places where you can purchase the license from
|
||||
|
||||
- [https://buy.immich.app](https://buy.immich.app)
|
||||
- [https://pay.futo.org](https://pay.futo.org/)
|
||||
- or directly from the app.
|
||||
|
||||
### 2. Do I need both _Individual License_ and _Server License_?
|
||||
|
||||
No,
|
||||
|
||||
If you are the admin and the sole user, or your instance has less than a total of 4 users, you can buy the **Individual License** for each user.
|
||||
|
||||
If your instance has more than 4 users, it is more cost-effective to buy the **Server License**, which will license all the users on your instance.
|
||||
|
||||
### 3. What do I do if I don't pay?
|
||||
|
||||
You can continue using Immich without any restriction.
|
||||
|
||||
### 4. Will there be any paywalled features?
|
||||
|
||||
No, there will never be any paywalled features.
|
||||
|
||||
### 5. Where can I get support regarding payment issues?
|
||||
|
||||
You can email us with your `orderId` and your email address `billing@futo.org` or on our Discord server.
|
||||
@@ -1,78 +0,0 @@
|
||||
---
|
||||
title: Immich Update - July 2024
|
||||
authors: [alextran]
|
||||
date: 2024-07-01T00:00
|
||||
tags: [update, v1.106.0]
|
||||
---
|
||||
|
||||
Hello everybody! Alex from Immich here and I am back with another development progress update for the project.
|
||||
|
||||
Summer has returned once again, and the night sky is filled with stars, thank you for **38_000 shining stars** you have sent to our [GitHub repo](https://github.com/immich-app/immich)! Since the last announcement several core contributors have started full time. Everything is going great with development, PRs get merged with _brrrrrrr_ rate, conversation exchange between team members is on a new high, we met and are working with the great engineers at FUTO. The spirit is high and we have a lot of things brewing that we think you will like.
|
||||
|
||||
Let's go over some of the updates we had since the last post.
|
||||
|
||||
### Container consolidation
|
||||
|
||||
Reduced the number of total containers from 5 to 4 by making the microservices thread get spawned directly in the server container. Woohoo, remember when Immich had 7 containers?
|
||||
|
||||
### Email notifications
|
||||
|
||||

|
||||
|
||||
We added email notifications to the app with SMTP settings that you can configure for the following events
|
||||
|
||||
- A new account is created for you.
|
||||
- You are added to a shared album.
|
||||
- New media is added to an album.
|
||||
|
||||
### Versioned docs
|
||||
|
||||
You can now jump back into the past or take a peek at the unreleased version of the documentation by selecting the version on the website.
|
||||
|
||||

|
||||
|
||||
### Similarity deduplication
|
||||
|
||||
With more machine learning and CLIP magic, we now have similarity deduplication built into the application where it will search for closely similar images and let you decide what to do with them; i.e keep or trash.
|
||||
|
||||

|
||||
|
||||
### Permanent URL for asset on the web
|
||||
|
||||
The detail view for an asset now has a permanent URL so you can easily share them with your loved ones.
|
||||
|
||||
### Web app translations
|
||||
|
||||
We now have a public Weblate project which the community can use to translate the webapp to their native languages. We are planning to port the mobile app translation to this platform as well. If you would like to contribute, you can take a look [here](https://hosted.weblate.org/projects/immich/immich/). We're already close to 50% translations -- we really appreciate everyone contributing to that!
|
||||
|
||||

|
||||
|
||||
### Read-only/Editor mode on shared album
|
||||
|
||||
As the owner of the album, you can choose if the shared user can edit the album or to only view the content of the album without any modification.
|
||||
|
||||

|
||||
|
||||
### Better video thumbnails
|
||||
|
||||
Immich now tries to find a descriptive video thumbnail instead of simply using the first frame. No more black images for thumbnails!
|
||||
|
||||
### Public Roadmap
|
||||
|
||||
We now have a [public roadmap](https://immich.app/roadmap), giving you a high-level overview of things the team is working on. The first goal of this roadmap is to bring Immich to a stable release, which is expected sometime later this year. Some of the highlights include
|
||||
|
||||
- Auto stacking - Auto stacking of burst photos
|
||||
- Basic editor - Basic photo editing capabilities
|
||||
- Workflows - Automate tasks with workflows
|
||||
- Fine grained access controls - Granular access controls for users and api keys
|
||||
- Better background backups - Rework background backups to be more reliable
|
||||
- Private/locked photos - Private assets with extra protections
|
||||
|
||||
Beyond the items in the roadmap, we have _many many_ more ideas for Immich. The team and I hope that you are enjoying the application, find it helpful in your life and we have nothing but the intention of building out great software for you all!
|
||||
|
||||
Have an amazing Summer or Winter for those in the southern hemisphere! :D
|
||||
|
||||
Until next time,
|
||||
|
||||
Cheers!
|
||||
Alex
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
The admin password can be reset by running the [reset-admin-password](/docs/administration/server-commands.md) command on the immich-server.
|
||||
|
||||
### How can I see a list of all users in Immich?
|
||||
### How can I see list of all users in Immich?
|
||||
|
||||
You can see the list of all users by running [list-users](/docs/administration/server-commands.md) Command on the Immich-server.
|
||||
|
||||
@@ -24,69 +24,37 @@ You can see the list of all users by running [list-users](/docs/administration/s
|
||||
|
||||
### I cannot log into the application after an update. What can I do?
|
||||
|
||||
Verify that the mobile app and server are both running the same version (major and minor).
|
||||
First, verify that the mobile app and server are both running the same version (major and minor).
|
||||
|
||||
:::note
|
||||
App store updates sometimes take longer because the stores (Google Play Store and Apple App Store)
|
||||
need to approve the update first, and it can take some time.
|
||||
App store updates sometimes take longer because the stores (Google play store and Apple app store)
|
||||
need to approve the update first which may take some time.
|
||||
:::
|
||||
|
||||
If you still cannot log in to the app, try the following:
|
||||
If you still cannot login to the app, try the following:
|
||||
|
||||
- Check the mobile logs
|
||||
- Make sure login credentials are correct by logging in on the web app
|
||||
|
||||
### Why does foreground backup stop when I navigate away from the app? Shouldn't it transfer the job to background backup?
|
||||
|
||||
Foreground backup and background backup are two separate mechanisms. They don't communicate or interact with each other.
|
||||
|
||||
Foreground backup is controlled by the user's action, while background backup is controlled by your device's operating system. When the app is put in the background, the invocation of background tasks is delegated to the device's operating system scheduler. It decides when the background task can run and how long it can run.
|
||||
|
||||
The behaviors differ based on your device manufacturer and operating system, but most are related to battery-saving policies.
|
||||
|
||||
### Why is background backup on iOS not working?
|
||||
|
||||
On iOS (iPhone and iPad), the operating system determines if a particular app can invoke background tasks based on multiple factors, most of which the Immich app has no control over. To increase the likelihood that the background backup task is run, follow the steps below:
|
||||
|
||||
- Enable Background App Refresh for Immich in the iOS settings at `Settings > General > Background App Refresh`.
|
||||
- Disable Background App Refresh for apps that don't need background tasks to run. This will reduce the competition for background task invocation for Immich.
|
||||
- Use the Immich app more often.
|
||||
|
||||
### Why are features not working with a self-signed cert or mTLS?
|
||||
|
||||
Due to limitations in the upstream app/video library, using a self-signed TLS certificate or mutual TLS may break video playback or asset upload (both foreground and/or background).
|
||||
We recommend using a real SSL certificate from a free provider, for example [Let's Encrypt](https://letsencrypt.org/).
|
||||
|
||||
---
|
||||
|
||||
## Assets
|
||||
|
||||
### Does Immich change the file?
|
||||
|
||||
No, Immich does not modify the original files.
|
||||
All edited metadata is saved in companion `.xmp` sidecar files and the database.
|
||||
However, Immich will delete original files that have been trashed when the trash is emptied in the Immich UI.
|
||||
|
||||
### Why do my file names appear as a random string in the file manager?
|
||||
|
||||
When Storage Template is off (default) Immich saves the file names in a random string (also known as random UUIDs) to prevent duplicate file names. To retrieve the original file names, you must enable the Storage Template and then run the STORAGE TEMPLATE MIGRATION job.
|
||||
It is recommended to read about [Storage Template](https://immich.app/docs/administration/storage-template) before activation.
|
||||
|
||||
### Can I add my existing photo library?
|
||||
|
||||
Yes, with an [External Library](/docs/features/libraries.md).
|
||||
|
||||
### What happens to existing files after I choose a new [Storage Template](/docs/administration/storage-template.mdx)?
|
||||
|
||||
Template changes will only apply to _new_ assets. To retroactively apply the template to previously uploaded assets, run the Storage Migration Job, available on the [Jobs](/docs/administration/jobs-workers/#jobs) page.
|
||||
Template changes will only apply to _new_ assets. To retroactively apply the template to previously uploaded assets, run the Storage Migration Job, available on the [Jobs](/docs/administration/jobs.md) page.
|
||||
|
||||
### Why are only photos and not videos being uploaded to Immich?
|
||||
|
||||
This often happens when using a reverse proxy (such as Nginx or Cloudflare tunnel) in front of Immich. Make sure to set your reverse proxy to allow large `POST` requests. In `nginx`, set `client_max_body_size 50000M;` or similar. Also, check the disk space of your reverse proxy. In some cases, proxies cache requests to disk before passing them on, and if disk space runs out, the request fails.
|
||||
This often happens when using a reverse proxy (such as nginx or Cloudflare tunnel) in front of Immich. Make sure to set your reverse proxy to allow large `POST` requests. In `nginx`, set `client_max_body_size 50000M;` or similar. Also check the disk space of your reverse proxy, in some cases proxies cache requests to disk before passing them on, and if disk space runs out the request fails.
|
||||
|
||||
### Why are some photos stored in the file system with the wrong date?
|
||||
|
||||
There are a few different scenarios that can lead to this situation. The solution is to rerun the storage migration job. The job is only automatically run once per asset after upload. If metadata extraction originally failed, the jobs were cleared/canceled, etc., the job may not have run automatically the first time.
|
||||
There are a few different scenarios that can lead to this situation. The solution is to run the storage migration job again. The job is only _automatically_ run once per asset, after upload. If metadata extraction originally failed, the jobs were cleared/cancelled, etc. the job may not have run automatically the first time.
|
||||
|
||||
### How can I hide photos from the timeline?
|
||||
|
||||
@@ -100,27 +68,23 @@ See [Backup and Restore](/docs/administration/backup-and-restore.md).
|
||||
|
||||
No, it currently does not. There is an [open feature request on GitHub](https://github.com/immich-app/immich/discussions/4348).
|
||||
|
||||
### Does Immich support the filtering of NSFW images?
|
||||
### Does Immich support filtering of NSFW images?
|
||||
|
||||
No, it currently does not. There is an [open feature request on Github](https://github.com/immich-app/immich/discussions/2451).
|
||||
|
||||
### Why are there so many thumbnail generation jobs?
|
||||
|
||||
There are three thumbnail jobs for each asset:
|
||||
There are three thubmanil jobs for each asset:
|
||||
|
||||
- Blurred (thumbhash)
|
||||
- Preview (Webp)
|
||||
- Thumbnail (Jpeg)
|
||||
- Small (webp)
|
||||
- Large (jpeg)
|
||||
|
||||
Also, there are additional jobs for person (face) thumbnails.
|
||||
|
||||
### Why do files from WhatsApp not appear with the correct date?
|
||||
|
||||
Files sent on WhatsApp are saved without metadata on the file. Therefore, Immich has no way of knowing the original date of the file when files are uploaded from WhatsApp, not the order of arrival on the device. [See #3527](https://github.com/immich-app/immich/issues/3527).
|
||||
|
||||
### What happens if an asset exists in more than one account?
|
||||
|
||||
There are no requirements for assets to be unique across users. If multiple users upload the same image, it is processed as if it were a distinct asset, and jobs run and thumbnails are generated accordingly.
|
||||
There are no requirements for assets to be unique across users. If multiple users upload the same image they are processed as if they were distinct assets and jobs run and thumbnails are generated accordingly.
|
||||
|
||||
### Why do HDR videos appear pale in Immich player but look normal after download?
|
||||
|
||||
@@ -132,17 +96,50 @@ Immich always keeps your original files. Alongside that, it generates a transcod
|
||||
|
||||
### How can I delete transcoded videos without deleting the original?
|
||||
|
||||
The transcoded version of an asset can be deleted by setting a transcode policy that makes it unnecessary and then running a transcoding job for that asset. This can be done on a per-asset basis by starting a transcoding job for a single asset with the _Refresh encoded videos_ button in the asset viewer options or for all assets by running transcoding jobs for all assets from the administration page.
|
||||
The transcoded version of an asset can be deleted by setting a transcode policy that makes it unnecessary, then running a transcoding job for that asset. This can be done on a per-asset basis by starting a transcoding job for a single asset with the _Refresh encoded videos_ button in the asset viewer options, or for all assets by running transcoding jobs for all assets from the administration page.
|
||||
|
||||
To update the transcode policy, navigate to Administration > Video Transcoding Settings > Transcoding Policy and select a policy from the drop-down. This policy will determine whether an existing transcode will be deleted or overwritten in the transcoding job. If a video should be transcoded according to this policy, an existing transcode is overwritten. If not, then it is deleted.
|
||||
|
||||
:::note
|
||||
For example, say you have existing transcodes with the policy "Videos higher than normal resolution or not in the desired format" and switch to a narrower policy: "Videos not in the desired format." If an asset was only transcoded due to its resolution, running a transcoding job for it will delete the existing transcode. This is because resolution is no longer part of the transcode policy and the transcode is unnecessary. Likewise, if you set the policy to "Don't transcode any videos" and run transcoding jobs for all assets, this will delete all existing transcodes as they are unnecessary.
|
||||
For example, say you have existing transcodes with the policy "Videos higher than normal resolution or not in the desired format" and switch to a narrower policy: "Videos not in the desired format". If an asset was only transcoded due to its resolution, then running a transcoding job for it will now delete the existing transcode. This is because resolution is no longer part of the transcode policy and the transcode is unnecessary as a result. Likewise, if you set the policy to "Don't transcode any videos" and run transcoding jobs for all assets, this will delete all existing transcodes as they are all unnecessary.
|
||||
:::
|
||||
|
||||
### Is it possible to compress images during backup?
|
||||
|
||||
No. Our design principle is that the original assets should always be untouched.
|
||||
No. Our golden rule is that the original assets should always be untouched, so we don't think this feature is a good fit for Immich.
|
||||
|
||||
### How can I move all data (photos, persons, albums) from one user to another?
|
||||
|
||||
This is not officially supported, but can be accomplished with some database updates. You can do this on the command line (in the PostgreSQL container using the `psql` command), or you can add for example an [Adminer](https://www.adminer.org/) container to the `docker-compose.yml` file, so that you can use a web-interface.
|
||||
|
||||
:::warning
|
||||
This is an advanced operation. If you can't do it with the steps described here, this is not for you.
|
||||
:::
|
||||
|
||||
<details>
|
||||
<summary>Steps</summary>
|
||||
|
||||
1. **MAKE A BACKUP** - See [backup and restore](/docs/administration/backup-and-restore.md).
|
||||
|
||||
2. Find the id of both the 'source' and the 'destination' user (it's the id column in the users table)
|
||||
|
||||
3. Three tables need to be updated:
|
||||
|
||||
```sql
|
||||
// reassign albums
|
||||
UPDATE albums SET "ownerId" = '<destinationId>' WHERE "ownerId" = '<sourceId>';
|
||||
|
||||
// reassign people
|
||||
UPDATE person SET "ownerId" = '<destinationId>' WHERE "ownerId" = '<sourceId>';
|
||||
|
||||
// reassign assets
|
||||
UPDATE assets SET "ownerId" = '<destinationId>' WHERE "ownerId" = '<sourceId>'
|
||||
AND CHECKSUM NOT IN (SELECT CHECKSUM FROM assets WHERE "ownerId" = '<destinationId>');
|
||||
```
|
||||
|
||||
4. There might be left-over assets in the 'source' user's library if they are skipped by the last query because of duplicate checksums. These are probably duplicates anyway, and can probably be removed.
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
@@ -162,36 +159,23 @@ No, not yet. For updates on this planned feature, follow the [GitHub discussion]
|
||||
|
||||
### Can I add an external library while keeping the existing album structure?
|
||||
|
||||
We haven't implemented an official mechanism for creating albums from external libraries, but there are some [workarounds from the community](https://github.com/immich-app/immich/discussions/4279) to help you achieve that.
|
||||
We haven't put in an official mechanism to create albums from external libraries at the moment, but there are some [workarounds from the community](https://github.com/immich-app/immich/discussions/4279) to help you achieve that.
|
||||
|
||||
### What happens to duplicates in external libraries?
|
||||
|
||||
Duplicate checking only exists for upload libraries, using the file hash. Furthermore, duplicate checking is not global, but _per library_. Therefore, a situation where the same file appears twice in the timeline is possible, especially for external libraries.
|
||||
|
||||
### Why are my edits to files not being saved in read-only external libraries?
|
||||
|
||||
Images in read-write external libraries (the default) can be edited as normal.
|
||||
In read-only libraries (`:ro` in the `docker-compose.yml`), Immich is unable to create the `.xmp` sidecar files to store edited file metadata.
|
||||
For this reason, the metadata (timestamp, location, description, star rating, etc.) cannot be edited for files in read-only external libraries.
|
||||
|
||||
### How are deletions of files handled in external libraries?
|
||||
|
||||
Immich will attempt to delete original files that have been trashed when the trash is emptied.
|
||||
In read-write external libraries (the default), Immich will delete the original file.
|
||||
In read-only libraries (`:ro` in the `docker-compose.yml`), files can still be trashed in the UI.
|
||||
However, when the trash is emptied, the files will re-appear in the main timeline since Immich is unable to delete the original file.
|
||||
|
||||
---
|
||||
|
||||
## Machine Learning
|
||||
|
||||
### How does smart search work?
|
||||
|
||||
Immich uses CLIP models. An ML model converts each image to an "embedding", which is essentially a string of numbers that semantically encodes what is in the image. The same is done for the text that you enter when you do a search, and that text embedding is then compared with those of the images to find similar ones. As such, there are no "tags", "labels", or "descriptions" generated that you can look at. For more information about CLIP and its capabilities, read about it [here](https://openai.com/research/clip).
|
||||
Immich uses CLIP models, for more information about CLIP and its capabilities read about it [here](https://openai.com/research/clip).
|
||||
|
||||
### How does facial recognition work?
|
||||
|
||||
See [How Facial Recognition Works](/docs/features/facial-recognition#How-Facial-Recognition-Works) for details.
|
||||
For face detection and recognition, Immich uses [InsightFace models](https://github.com/deepinsight/insightface/tree/master/model_zoo).
|
||||
|
||||
### How can I disable machine learning?
|
||||
|
||||
@@ -205,27 +189,33 @@ However, disabling all jobs will not disable the machine learning service itself
|
||||
|
||||
### I'm getting errors about models being corrupt or failing to download. What do I do?
|
||||
|
||||
You can delete the model cache volume, where models are downloaded. This will give the service a clean environment to download the model again. If models are failing to download entirely, you can manually download them from [Hugging Face][huggingface] and place them in the cache folder.
|
||||
You can delete the model cache volume, which is where models are downloaded to. This will give the service a clean environment to download the model again. If models are failing to download entirely, you can manually download them from [Huggingface](https://huggingface.co/immich-app) and place them in the cache folder.
|
||||
|
||||
### Why did Immich decide to remove object detection?
|
||||
|
||||
The feature added keywords to images for metadata search, but wasn't used for smart search. Smart search made it unnecessary as it isn't limited to exact keywords. Combined with it causing crashes on some devices, using many dependencies and causing user confusion as to how search worked, it was better to remove the job altogether.
|
||||
For more info see [here](https://github.com/immich-app/immich/pull/5903)
|
||||
|
||||
### Can I use a custom CLIP model?
|
||||
|
||||
No, this is not supported. Only models listed in the [Hugging Face][huggingface] page are compatible. Feel free to make a feature request if there's a model not listed here that you think should be added.
|
||||
No, this is not supported. Only models listed in the [Huggingface](https://huggingface.co/immich-app) page are compatible. Feel free to make a feature request if there's a model not listed here that you think should be added.
|
||||
|
||||
### I want to be able to search in other languages besides English. How can I do that?
|
||||
|
||||
You can change to a multilingual CLIP model. See [here](/docs/features/smart-search#CLIP-model) for instructions.
|
||||
You can change to a multilingual model listed [here](https://huggingface.co/collections/immich-app/multilingual-clip-654eb08c2382f591eeb8c2a7) by going to Administration > Machine Learning Settings > Smart Search and replacing the name of the model. Be sure to re-run Smart Search on all assets after this change. You can then search in over 100 languages.
|
||||
|
||||
### Does Immich support Facial Recognition for videos?
|
||||
:::note
|
||||
Feel free to make a feature request if there's a model you want to use that isn't in [Immich Huggingface list](https://huggingface.co/immich-app).
|
||||
:::
|
||||
|
||||
Immich's machine learning feature operates on the generated thumbnail. If a face is visible in the video's thumbnail it will be picked up by facial recognition.
|
||||
### Does Immich support Facial Recognition for videos ?
|
||||
|
||||
Immich's machine learning feature operate on the generated thumbnail. If a face is visible in the video's thumbnail it will be picked up by facial recognition.
|
||||
Scanning the entire video for faces may be implemented in the future.
|
||||
|
||||
### Does Immich have animal recognition?
|
||||
|
||||
No.
|
||||
:::tip
|
||||
You can use [Smart Search](/docs/features/smart-search.md) for this to some extent. For example, if you have a Golden Retriever and a Chihuahua, type these words in the smart search and watch the results.
|
||||
:::
|
||||
|
||||
### I'm getting a lot of "faces" that aren't faces, what can I do?
|
||||
|
||||
@@ -234,19 +224,14 @@ to increase the bar for what the algorithm considers a "core face" for that pers
|
||||
|
||||
### The immich_model-cache volume takes up a lot of space, what could be the problem?
|
||||
|
||||
If you installed several models and chose not to use some of them, it might be worth deleting the old models that are in immich_model-cache. To do this you can mount the model cache and remove the undesired models.
|
||||
If you installed several models and chose not to use some of them, it might be worth deleting the old models that are in immich_model-cache.
|
||||
|
||||
<details>
|
||||
<summary>Steps</summary>
|
||||
To do this you can run:
|
||||
|
||||
```bash
|
||||
docker run -it --rm -v immich_model-cache:/mnt-models alpine sh
|
||||
cd /mnt-models
|
||||
ls clip/ facial-recognition/
|
||||
# rm -r clip/ABC facial-recognition/DEF # delete unused models
|
||||
```
|
||||
|
||||
</details>
|
||||
- `docker run -it --rm -v immich_model-cache:/mnt ubuntu bash`
|
||||
- `cd mnt`
|
||||
- `ls`
|
||||
- and delete unused models with `rm -r <model_name>`.
|
||||
|
||||
---
|
||||
|
||||
@@ -254,7 +239,7 @@ ls clip/ facial-recognition/
|
||||
|
||||
### Why is Immich slow on low-memory systems like the Raspberry Pi?
|
||||
|
||||
Immich optionally uses transcoding and machine learning for several features. However, it can be too heavy to run on a Raspberry Pi. You can [mitigate](/docs/FAQ#can-i-lower-cpu-and-ram-usage) this or host Immich's machine-learning container on a [more powerful system](/docs/guides/remote-machine-learning), or [disable](/docs/FAQ#how-can-i-disable-machine-learning) machine learning entirely.
|
||||
Immich optionally uses machine learning for several features. However, it can be too heavy to run on a Raspberry Pi. You can [mitigate](/docs/FAQ#can-i-lower-cpu-and-ram-usage) this or host Immich's machine-learning container on a [more powerful system](/docs/guides/remote-machine-learning), or [disable](/docs/FAQ#how-can-i-disable-machine-learning) machine learning entirely.
|
||||
|
||||
### Can I lower CPU and RAM usage?
|
||||
|
||||
@@ -263,18 +248,13 @@ The initial backup is the most intensive due to the number of jobs running. The
|
||||
- Lower the job concurrency for these jobs to 1.
|
||||
- Under Settings > Transcoding Settings > Threads, set the number of threads to a low number like 1 or 2.
|
||||
- Under Settings > Machine Learning Settings > Facial Recognition > Model Name, you can change the facial recognition model to `buffalo_s` instead of `buffalo_l`. The former is a smaller and faster model, albeit not as good.
|
||||
- For facial recognition on new images to work properly, You must re-run the Face Detection job for all images after this.
|
||||
- At the container level, you can [set resource constraints](/docs/FAQ#can-i-limit-cpu-and-ram-usage) to lower usage further.
|
||||
- It's recommended to only apply these constraints _after_ taking some of the measures here for best performance.
|
||||
- If these changes are not enough, see [below](/docs/FAQ#how-can-i-disable-machine-learning) for instructions on how to disable machine learning.
|
||||
- You _must_ re-run the Face Detection job for all images after this for facial recognition on new images to work properly.
|
||||
- If these changes are not enough, see [below](/docs/FAQ#how-can-i-disable-machine-learning) for how you can disable machine learning.
|
||||
|
||||
### Can I limit CPU and RAM usage?
|
||||
### Can I limit the amount of CPU and RAM usage?
|
||||
|
||||
By default, a container has no resource constraints and can use as much of a given resource as the host's kernel scheduler allows. To limit this, you can add the following to the `docker-compose.yml` block of any containers that you want to have limited resources.
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.yml</summary>
|
||||
|
||||
```yaml
|
||||
deploy:
|
||||
resources:
|
||||
@@ -285,11 +265,8 @@ deploy:
|
||||
memory: '1G'
|
||||
```
|
||||
|
||||
</details>
|
||||
For more details, you can look at the [original docker docs](https://docs.docker.com/config/containers/resource_constraints/) or use this [guide](https://www.baeldung.com/ops/docker-memory-limit).
|
||||
|
||||
Note that memory constraints work by terminating the container, so this can introduce instability if set too low.
|
||||
|
||||
### How can I boost machine learning speed?
|
||||
|
||||
:::note
|
||||
@@ -299,16 +276,17 @@ This advice improves throughput, not latency. This is to say that it will make S
|
||||
You can increase throughput by increasing the job concurrency for machine learning jobs (Smart Search, Face Detection). With higher concurrency, the host will work on more assets in parallel. You can do this by navigating to Administration > Settings > Job Settings and increasing concurrency as needed.
|
||||
|
||||
:::danger
|
||||
On a normal machine, 2 or 3 concurrent jobs can probably max the CPU. Storage speed and latency can quickly become the limiting factor beyond this, particularly when using HDDs.
|
||||
On a normal machine, 2 or 3 concurrent jobs can probably max the CPU. Beyond this, note that storage speed and latency may quickly become the limiting factor; particularly when using HDDs.
|
||||
|
||||
The concurrency can be increased more comfortably with a GPU, but should still not be above 16 in most cases.
|
||||
Do not exaggerate with the amount of jobs because you're probably thoroughly overloading the server.
|
||||
|
||||
Do not exaggerate with the job concurrency because you're probably thoroughly overloading the server.
|
||||
More detail can be found [here](https://discord.com/channels/979116623879368755/994044917355663450/1174711719994605708)
|
||||
:::
|
||||
|
||||
### My server shows Server Status Offline | Version Unknown. What can I do?
|
||||
### Why is Immich using so much of my CPU?
|
||||
|
||||
You need to enable WebSockets on your reverse proxy.
|
||||
When a large amount of assets are uploaded to Immich it makes sense that the CPU and RAM will be heavily used due to machine learning work and creating image thumbnails.
|
||||
Once this process completes, the percentage of CPU usage will drop to around 3-5% usage
|
||||
|
||||
---
|
||||
|
||||
@@ -318,12 +296,6 @@ You need to enable WebSockets on your reverse proxy.
|
||||
|
||||
Immich components are typically deployed using docker. To see logs for deployed docker containers, you can use the [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/), specifically the `docker logs` command. For examples, see [Docker Help](/docs/guides/docker-help.md).
|
||||
|
||||
### How can I reduce the log verbosity of Redis?
|
||||
|
||||
To decrease Redis logs, you can add the following line to the `redis:` section of the `docker-compose.yml`:
|
||||
|
||||
` command: redis-server --loglevel warning`
|
||||
|
||||
### How can I run Immich as a non-root user?
|
||||
|
||||
You can change the user in the container by setting the `user` argument in `docker-compose.yml` for each service.
|
||||
@@ -333,35 +305,28 @@ You may need to add mount points or docker volumes for the following internal co
|
||||
- `immich-machine-learning:/.cache`
|
||||
- `redis:/data`
|
||||
|
||||
The non-root user/group needs read/write access to the volume mounts, including `UPLOAD_LOCATION` and `/cache` for machine-learning.
|
||||
|
||||
:::note Docker Compose Volumes
|
||||
The Docker Compose top level volume element does not support non-root access, all of the above volumes must be local volume mounts.
|
||||
:::
|
||||
The non-root user/group needs read/write access to the volume mounts, including `UPLOAD_LOCATION`.
|
||||
|
||||
For a further hardened system, you can add the following block to every container except for `immich_postgres`.
|
||||
|
||||
<details>
|
||||
<summary>docker-compose.yml</summary>
|
||||
|
||||
```yaml
|
||||
security_opt:
|
||||
# Prevent escalation of privileges after the container is started
|
||||
# Prevent escalation of privileges after container is started
|
||||
- no-new-privileges:true
|
||||
cap_drop:
|
||||
# Prevent access to raw network traffic
|
||||
- NET_RAW
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### How can I purge data from Immich?
|
||||
### How can I **purge** data from Immich?
|
||||
|
||||
Data for Immich comes in two forms:
|
||||
|
||||
1. **Metadata** stored in a Postgres database, stored in the `DB_DATA_LOCATION` folder (previously `pg_data` Docker volume).
|
||||
1. **Metadata** stored in a postgres database, persisted via the `pg_data` volume
|
||||
2. **Files** (originals, thumbs, profile, etc.), stored in the `UPLOAD_LOCATION` folder, more [info](/docs/administration/backup-and-restore#asset-types-and-storage-locations).
|
||||
|
||||
To remove the **Metadata** you can stop Immich and delete the volume.
|
||||
|
||||
:::warning
|
||||
This will destroy your database and reset your instance, meaning that you start from scratch.
|
||||
:::
|
||||
@@ -370,16 +335,13 @@ This will destroy your database and reset your instance, meaning that you start
|
||||
docker compose down -v
|
||||
```
|
||||
|
||||
After removing the containers and volumes, there are a few directories that need to be deleted to reset Immich to a new installation. Once they are deleted, Immich can be started back up and will be a fresh installation.
|
||||
|
||||
- `DB_DATA_LOCATION` contains the database, media info, and settings.
|
||||
- `UPLOAD_LOCATION` contains all the media uploaded to Immich.
|
||||
|
||||
:::note Portainer
|
||||
If you use portainer, bring down the stack in portainer. Go into the volumes section
|
||||
and remove all the volumes related to immich then restart the stack.
|
||||
:::
|
||||
|
||||
After removing the containers and volumes, the **Files** should be removed from the `UPLOAD_LOCATION` to provide a clean start.
|
||||
|
||||
### Why does the machine learning service report workers crashing?
|
||||
|
||||
:::note
|
||||
@@ -394,54 +356,6 @@ If it mentions SIGILL (note the lack of a K) or error code 132, it most likely m
|
||||
|
||||
If your version of Immich is below 1.92.0 and the crash occurs after logs about tracing or exporting a model, consider either upgrading or disabling the Tag Objects job.
|
||||
|
||||
## Database
|
||||
### Why does Immich log migration errors on startup?
|
||||
|
||||
### Why am I getting database ownership errors?
|
||||
|
||||
If you get database errors such as `FATAL: data directory "/var/lib/postgresql/data" has wrong ownership` upon database startup, this is likely due to an issue with your filesystem.
|
||||
NTFS and ex/FAT/32 filesystems are not supported. See [here](/docs/install/environment-variables#supported-filesystems) for more details.
|
||||
|
||||
### How can I verify the integrity of my database?
|
||||
|
||||
If you installed Immich using v1.104.0 or later, you likely have database checksums enabled by default. You can check this by running the following command.
|
||||
A result of `on` means that checksums are enabled.
|
||||
|
||||
<details>
|
||||
<summary>Check if checksums are enabled</summary>
|
||||
|
||||
```bash
|
||||
docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME> --command="show data_checksums"
|
||||
data_checksums
|
||||
----------------
|
||||
on
|
||||
(1 row)
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
If checksums are enabled, you can check the status of the database with the following command. A normal result is all zeroes.
|
||||
|
||||
<details>
|
||||
<summary>Check for database corruption</summary>
|
||||
|
||||
```bash
|
||||
docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME> --command="SELECT datname, checksum_failures, checksum_last_failure FROM pg_stat_database WHERE datname IS NOT NULL"
|
||||
datname | checksum_failures | checksum_last_failure
|
||||
-----------+-------------------+-----------------------
|
||||
postgres | 0 |
|
||||
immich | 0 |
|
||||
template1 | 0 |
|
||||
template0 | 0 |
|
||||
(4 rows)
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
If corruption is detected, you should immediately make a backup before performing any other work in the database.
|
||||
To do so, you may need to set the `zero_damaged_pages=on` flag for the database server to allow `pg_dumpall` to succeed.
|
||||
After taking a backup, the recommended next step is to restore the database from a healthy backup before corruption was detected.
|
||||
The damaged database dump can be used to manually recover any changes made since the last backup, if needed.
|
||||
|
||||
The causes of possible corruption are many, but can include unexpected poweroffs or unmounts, use of a network share for Postgres data, or a poor storage medium such an SD card or failing HDD/SSD.
|
||||
|
||||
[huggingface]: https://huggingface.co/immich-app
|
||||
Sometimes Immich logs errors such as "duplicate key value violates unique constraint" or "column (...) of relation (...) already exists". Because of Immich's container structure, this error can be seen when both immich and immich-microservices start at the same time and attempt to migrate or create the database structure. Since the database migration is run sequentially and inside of transactions, this error message does not cause harm to your installation of Immich and can safely be ignored. If needed, you can manually restart Immich by running `docker restart immich immich-microservices`.
|
||||
|
||||
@@ -15,86 +15,91 @@ Immich saves [file paths in the database](https://github.com/immich-app/immich/d
|
||||
Refer to the official [postgres documentation](https://www.postgresql.org/docs/current/backup.html) for details about backing up and restoring a postgres database.
|
||||
:::
|
||||
|
||||
:::caution
|
||||
It is not recommended to directly backup the `DB_DATA_LOCATION` folder. Doing so while the database is running can lead to a corrupted backup that cannot be restored.
|
||||
:::
|
||||
|
||||
### Automatic Database Backups
|
||||
|
||||
Immich will automatically create database backups by default. The backups are stored in `UPLOAD_LOCATION/backups`.
|
||||
You can adjust the schedule and amount of kept backups in the [admin settings](http://my.immich.app/admin/system-settings?isOpen=backup).
|
||||
By default, Immich will keep the last 14 backups and create a new backup every day at 2:00 AM.
|
||||
|
||||
#### Restoring
|
||||
|
||||
We hope to make restoring simpler in future versions, for now you can find the backups in the `UPLOAD_LOCATION/backups` folder on your host.
|
||||
Then please follow the steps in the following section for restoring the database.
|
||||
|
||||
### Manual Backup and Restore
|
||||
The recommended way to backup and restore the Immich database is to use the `pg_dumpall` command.
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="Linux system" label="Linux system" default>
|
||||
<TabItem value="Linux system based Backup" label="Linux system based Backup" default>
|
||||
|
||||
```bash title='Backup'
|
||||
docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres | gzip > "/path/to/backup/dump.sql.gz"
|
||||
```
|
||||
|
||||
```bash title='Restore'
|
||||
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch
|
||||
## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database
|
||||
# rm -rf DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch
|
||||
docker compose pull # Update to latest version of Immich (if desired)
|
||||
docker compose create # Create Docker containers for Immich apps without running them
|
||||
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch.
|
||||
docker compose pull # Update to latest version of Immich (if desired)
|
||||
docker compose create # Create Docker containers for Immich apps without running them.
|
||||
docker start immich_postgres # Start Postgres server
|
||||
sleep 10 # Wait for Postgres server to start up
|
||||
# Check the database user if you deviated from the default
|
||||
gunzip < "/path/to/backup/dump.sql.gz" \
|
||||
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
|
||||
| docker exec -i immich_postgres psql --username=postgres # Restore Backup
|
||||
docker compose up -d # Start remainder of Immich apps
|
||||
sleep 10 # Wait for Postgres server to start up
|
||||
gunzip < "/path/to/backup/dump.sql.gz" | docker exec -i immich_postgres psql --username=postgres # Restore Backup
|
||||
docker compose up -d # Start remainder of Immich apps
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="Windows system (PowerShell)" label="Windows system (PowerShell)">
|
||||
<TabItem value="Windows system based Backup" label="Windows system based Backup">
|
||||
|
||||
```powershell title='Backup'
|
||||
docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres | Set-Content -Encoding utf8 "C:\path\to\backup\dump.sql"
|
||||
docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres > "\path\to\backup\dump.sql"
|
||||
```
|
||||
|
||||
```powershell title='Restore'
|
||||
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch
|
||||
## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database
|
||||
# Remove-Item -Recurse -Force DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch
|
||||
docker compose pull # Update to latest version of Immich (if desired)
|
||||
docker compose create # Create Docker containers for Immich apps without running them
|
||||
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch.
|
||||
docker compose pull # Update to latest version of Immich (if desired)
|
||||
docker compose create # Create Docker containers for Immich apps without running them.
|
||||
docker start immich_postgres # Start Postgres server
|
||||
sleep 10 # Wait for Postgres server to start up
|
||||
# Check the database user if you deviated from the default
|
||||
gc "C:\path\to\backup\dump.sql" | docker exec -i immich_postgres psql --username=postgres # Restore Backup
|
||||
docker compose up -d # Start remainder of Immich apps
|
||||
sleep 10 # Wait for Postgres server to start up
|
||||
gc "C:\path\to\backup\dump.sql" | docker exec -i immich_postgres psql --username=postgres # Restore Backup
|
||||
docker compose up -d # Start remainder of Immich apps
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
Note that for the database restore to proceed properly, it requires a completely fresh install (i.e. the Immich server has never run since creating the Docker containers). If the Immich app has run, Postgres conflicts may be encountered upon database restoration (relation already exists, violated foreign key constraints, multiple primary keys, etc.), in which case you need to delete the `DB_DATA_LOCATION` folder to reset the database.
|
||||
Note that for the database restore to proceed properly, it requires a completely fresh install (i.e. the Immich server has never run since creating the Docker containers). If the Immich app has run, Postgres conflicts may be encountered upon database restoration (relation already exists, violated foreign key constraints, multiple primary keys, etc.).
|
||||
|
||||
:::tip
|
||||
Some deployment methods make it difficult to start the database without also starting the server. In these cases, you may set the environment variable `DB_SKIP_MIGRATIONS=true` before starting the services. This will prevent the server from running migrations that interfere with the restore process. Be sure to remove this variable and restart the services after the database is restored.
|
||||
The database dumps can also be automated (using [this image](https://github.com/prodrigestivill/docker-postgres-backup-local)) by editing the docker compose file to match the following:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
...
|
||||
backup:
|
||||
container_name: immich_db_dumper
|
||||
image: prodrigestivill/postgres-backup-local:14
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
POSTGRES_HOST: database
|
||||
POSTGRES_CLUSTER: 'TRUE'
|
||||
POSTGRES_USER: ${DB_USERNAME}
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
||||
SCHEDULE: "@daily"
|
||||
POSTGRES_EXTRA_OPTS: '--clean --if-exists'
|
||||
BACKUP_DIR: /db_dumps
|
||||
volumes:
|
||||
- ./db_dumps:/db_dumps
|
||||
depends_on:
|
||||
- database
|
||||
```
|
||||
|
||||
Then you can restore with the same command but pointed at the latest dump.
|
||||
|
||||
```bash title='Automated Restore'
|
||||
gunzip < db_dumps/last/immich-latest.sql.gz | docker exec -i immich_postgres psql --username=postgres
|
||||
```
|
||||
|
||||
:::note
|
||||
If you see the error `ERROR: type "earth" does not exist`, or you have problems with Reverse Geocoding after a restore, add the following `sed` fragment to your restore command.
|
||||
|
||||
Example: `gunzip < "/path/to/backup/dump.sql.gz" | sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" | docker exec -i immich_postgres psql --username=postgres`
|
||||
:::
|
||||
|
||||
## Filesystem
|
||||
|
||||
Immich stores two types of content in the filesystem: (1) original, unmodified assets (photos and videos), and (2) generated content. Only the original content needs to be backed-up, which is stored in the following folders:
|
||||
Immich stores two types of content in the filesystem: (1) original, unmodified content, and (2) generated content. Only the original content needs to be backed-up, which includes the following folders:
|
||||
|
||||
1. `UPLOAD_LOCATION/library`
|
||||
2. `UPLOAD_LOCATION/upload`
|
||||
3. `UPLOAD_LOCATION/profile`
|
||||
|
||||
:::caution
|
||||
If you moved some of these folders onto a different storage device, such as `profile/`, make sure to adjust the backup path to match your setup
|
||||
:::
|
||||
|
||||
### Asset Types and Storage Locations
|
||||
|
||||
Some storage locations are impacted by the Storage Template. See below for more details.
|
||||
@@ -103,8 +108,7 @@ Some storage locations are impacted by the Storage Template. See below for more
|
||||
<TabItem value="Storage Template Off (Default)." label="Storage Template Off (Default)." default>
|
||||
|
||||
:::note
|
||||
The `UPLOAD_LOCATION/library` folder is not used by default on new machines running version 1.92.0. It is used only if the system administrator activated the storage template engine,
|
||||
for more info read the [release notes](https://github.com/immich-app/immich/releases/tag/v1.92.0#:~:text=the%20partner%E2%80%99s%20assets.-,Hardening%20storage%20template).
|
||||
`UPLOAD_LOCATION/library` folder is not used by default on new machines running version 1.92.0. These are if the system administrator activated the storage template engine, for [more info](https://github.com/immich-app/immich/releases/tag/v1.92.0#:~:text=the%20partner%E2%80%99s%20assets.-,Hardening%20storage%20template).
|
||||
:::
|
||||
|
||||
**1. User-Specific Folders:**
|
||||
@@ -116,28 +120,16 @@ for more info read the [release notes](https://github.com/immich-app/immich/rele
|
||||
|
||||
- **Source Assets:**
|
||||
- Original assets uploaded through the browser interface & mobile & CLI.
|
||||
- Stored in `UPLOAD_LOCATION/upload/<userID>`.
|
||||
- Stored in `/library/upload/<userID>`.
|
||||
- **Avatar Images:**
|
||||
- User profile images.
|
||||
- Stored in `UPLOAD_LOCATION/profile/<userID>`.
|
||||
- Stored in `/library/profile/<userID>`.
|
||||
- **Thumbs Images:**
|
||||
- Preview images (small thumbnails and large previews) for each asset and thumbnails for recognized faces.
|
||||
- Stored in `UPLOAD_LOCATION/thumbs/<userID>`.
|
||||
- Preview images (blurred, small, large) for each asset and thumbnails for recognized faces.
|
||||
- Stored in `/library/thumbs/<userID>`.
|
||||
- **Encoded Assets:**
|
||||
|
||||
- Videos that have been re-encoded from the original for wider compatibility. The original is not removed.
|
||||
- Stored in `UPLOAD_LOCATION/encoded-video/<userID>`.
|
||||
|
||||
- **Postgres**
|
||||
|
||||
- The Immich database containing all the information to allow the system to function properly.
|
||||
**Note:** This folder will only appear to users who have made the changes mentioned in [v1.102.0](https://github.com/immich-app/immich/discussions/8930) (an optional, non-mandatory change) or who started with this version.
|
||||
- Stored in `DB_DATA_LOCATION`.
|
||||
|
||||
:::danger
|
||||
A backup of this folder does not constitute a backup of your database!
|
||||
Follow the instructions listed [here](/docs/administration/backup-and-restore#database) to learn how to perform a proper backup.
|
||||
:::
|
||||
- By default, unless otherwise specified re-encoded video assets for wider compatibility.
|
||||
- Stored in `/library/encoded-video/<userID>`.
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="Storage Template On" label="Storage Template On">
|
||||
@@ -145,51 +137,40 @@ for more info read the [release notes](https://github.com/immich-app/immich/rele
|
||||
:::note
|
||||
If you choose to activate the storage template engine, it will move all assets to `UPLOAD_LOCATION/library/<userID>`.
|
||||
|
||||
When you turn off the storage template engine, it will leave the assets in `UPLOAD_LOCATION/library/<userID>` and will not return them to `UPLOAD_LOCATION/upload`.
|
||||
**New assets** will be saved to `UPLOAD_LOCATION/upload`.
|
||||
When you turn off the storage template engine, it will leave the assets in `UPLOAD_LOCATION/library/<userID>` and will not return them to `/library/upload`.
|
||||
**New assets** will be saved to `/library/upload`.
|
||||
:::
|
||||
|
||||
**1. User-Specific Folders:**
|
||||
|
||||
- Each user has a unique string representing them.
|
||||
- The administrator can set a Storage Label for a user, which will be used instead of `<userID>` for the `library/` folder.
|
||||
- The Admin has a default storage label of `admin`.
|
||||
- You can find your user ID and Storage Label in Account Account Settings -> Account -> User ID.
|
||||
- The main user is "Admin" (but only for `UPLOAD_LOCATION/library`)
|
||||
- Other users have different string identifiers.
|
||||
- You can find your user ID in Account Account Settings -> Account -> User ID.
|
||||
|
||||
**2. Asset Types and Storage Locations:**
|
||||
|
||||
- **Source Assets:**
|
||||
- Original assets uploaded through the browser interface, mobile, and CLI.
|
||||
- Original assets uploaded through the browser interface & mobile & CLI.
|
||||
- Stored in `UPLOAD_LOCATION/library/<userID>`.
|
||||
- **Avatar Images:**
|
||||
- User profile images.
|
||||
- Stored in `UPLOAD_LOCATION/profile/<userID>`.
|
||||
- Stored in `/library/profile/<userID>`.
|
||||
- **Thumbs Images:**
|
||||
- Preview images (blurred, small, large) for each asset and thumbnails for recognized faces.
|
||||
- Stored in `UPLOAD_LOCATION/thumbs/<userID>`.
|
||||
- Stored in `/library/thumbs/<userID>`.
|
||||
- **Encoded Assets:**
|
||||
- Videos that have been re-encoded from the original for wider compatibility. The original is not removed.
|
||||
- Stored in `UPLOAD_LOCATION/encoded-video/<userID>`.
|
||||
- By default, unless otherwise specified re-encoded video assets for wider compatibility .
|
||||
- Stored in `/library/encoded-video/<userID>`.
|
||||
- **Files in Upload Queue (Mobile):**
|
||||
- Files uploaded through mobile apps.
|
||||
- Temporarily located in `UPLOAD_LOCATION/upload/<userID>`.
|
||||
- Temporarily located in `/library/upload/<userID>`.
|
||||
- Transferred to `UPLOAD_LOCATION/library/<userID>` upon successful upload.
|
||||
- **Postgres**
|
||||
|
||||
- The Immich database containing all the information to allow the system to function properly.
|
||||
**Note:** This folder will only appear to users who have made the changes mentioned in [v1.102.0](https://github.com/immich-app/immich/discussions/8930) (an optional, non-mandatory change) or who started with this version.
|
||||
- Stored in `DB_DATA_LOCATION`.
|
||||
|
||||
:::danger
|
||||
A backup of this folder does not constitute a backup of your database!
|
||||
Follow the instructions listed [here](/docs/administration/backup-and-restore#database) to learn how to perform a proper backup.
|
||||
:::
|
||||
|
||||
</TabItem>
|
||||
|
||||
</Tabs>
|
||||
|
||||
:::danger
|
||||
Do not touch the files inside these folders under any circumstances except taking a backup. Changing or removing an asset can cause untracked and missing files.
|
||||
Do not touch the files inside these folders under any circumstances except taking a backup, changing or removing an asset can cause untracked and missing files.
|
||||
You can think of it as App-Which-Must-Not-Be-Named, the only access to viewing, changing and deleting assets is only through the mobile or browser interface.
|
||||
:::
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
# Email Notifications
|
||||
|
||||
Immich supports the option to send notifications via Email for the following events:
|
||||
|
||||
- Creating a new user
|
||||
- Notifying a user when they get added to a shared album
|
||||
- Informing other users about the addition of new assets to a shared album
|
||||
|
||||
## SMTP settings
|
||||
|
||||
You can access the settings panel from the web at `Administration -> Settings -> Notification settings`.
|
||||
|
||||
Under Email, enter the required details to connect with an SMTP server.
|
||||
|
||||
You can use [this guide](/docs/guides/smtp-gmail) to use Gmail's SMTP server.
|
||||
|
||||
## User's notifications settings
|
||||
|
||||
Users can manage their email notification settings from their account settings page on the web. They can choose to turn email notifications on or off for the following events:
|
||||
|
||||
<img src={require('./img/user-notifications-settings.png').default} width="80%" title="User notification settings" />
|
||||
BIN
docs/docs/administration/img/admin-jobs.png
Normal file
|
After Width: | Height: | Size: 1.1 MiB |
|
Before Width: | Height: | Size: 79 KiB |
|
Before Width: | Height: | Size: 55 KiB |
|
Before Width: | Height: | Size: 11 KiB |
|
Before Width: | Height: | Size: 18 KiB |
BIN
docs/docs/administration/img/oauth-settings.png
Normal file
|
After Width: | Height: | Size: 57 KiB |
|
Before Width: | Height: | Size: 2.7 KiB |
|
Before Width: | Height: | Size: 109 KiB |
@@ -1,55 +0,0 @@
|
||||
# Jobs and Workers
|
||||
|
||||
## Workers
|
||||
|
||||
### Architecture
|
||||
|
||||
The `immich-server` container contains multiple workers:
|
||||
|
||||
- `api`: responds to API requests for data and files for the web and mobile app.
|
||||
- `microservices`: handles most other work, such as thumbnail generation and video encoding, in the form of _jobs_. Simply put, a job is a request to process data in the background.
|
||||
|
||||
## Split workers
|
||||
|
||||
If you prefer to throttle or distribute the workers, you can do this using the [environment variables](/docs/install/environment-variables) to specify which container should pick up which tasks.
|
||||
|
||||
For example, for a simple setup with one container for the Web/API and one for all other microservices, you can do the following:
|
||||
|
||||
Copy the entire `immich-server` block as a new service and make the following changes to the **copy**:
|
||||
|
||||
```diff
|
||||
- immich-server:
|
||||
- container_name: immich_server
|
||||
...
|
||||
- ports:
|
||||
- - 2283:2283
|
||||
+ immich-microservices:
|
||||
+ container_name: immich_microservices
|
||||
```
|
||||
|
||||
Once you have two copies of the immich-server service, make the following changes to each one. This will allow one container to only serve the web UI and API, and the other one to handle all other tasks.
|
||||
|
||||
```diff
|
||||
services:
|
||||
immich-server:
|
||||
...
|
||||
+ environment:
|
||||
+ IMMICH_WORKERS_INCLUDE: 'api'
|
||||
|
||||
immich-microservices:
|
||||
...
|
||||
+ environment:
|
||||
+ IMMICH_WORKERS_EXCLUDE: 'api'
|
||||
```
|
||||
|
||||
## Jobs
|
||||
|
||||
When a new asset is uploaded it kicks off a series of jobs, which include metadata extraction, thumbnail generation, machine learning tasks, and storage template migration, if enabled. To view the status of a job navigate to the Administration -> Jobs page.
|
||||
|
||||
Additionally, some jobs run on a schedule, which is every night at midnight. This schedule, with the exception of [External Libraries](/docs/features/libraries) scanning, cannot be changed.
|
||||
|
||||
:::info
|
||||
Storage Migration job can be run after changing the [Storage Template](/docs/administration/storage-template.mdx), in order to apply the change to the existing library.
|
||||
:::
|
||||
|
||||
<img src={require('./img/admin-jobs.webp').default} width="60%" title="Admin jobs" />
|
||||
13
docs/docs/administration/jobs.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# Jobs
|
||||
|
||||
The `immich-server` responds to API requests for data and files for the web and mobile app. To do this quickly and reliably, it offloads most other work to `immich-microservices` in the form of _jobs_. Simply put, a job is a request to process data in the background. Jobs are picked up automatically by microservices containers.
|
||||
|
||||
When a new asset is uploaded it kicks off a series of jobs, which include metadata extraction, thumbnail generation, machine learning tasks, and storage template migration, if enabled. To view the status of a job navigate to the Administration -> Jobs page.
|
||||
|
||||
Additionally, some jobs run on a schedule, which is every night at midnight. This schedule, with the exception of [External Libraries](/docs/features/libraries) scanning, cannot be changed.
|
||||
|
||||
:::info
|
||||
Storage Migration job can be run after changing the [Storage Template](/docs/administration/storage-template.mdx), in order to apply the change to the existing library.
|
||||
:::
|
||||
|
||||
<img src={require('./img/admin-jobs.png').default} width="80%" title="Admin jobs" />
|
||||
@@ -3,7 +3,7 @@
|
||||
This page contains details about using OAuth in Immich.
|
||||
|
||||
:::tip
|
||||
Unable to set `app.immich:///oauth-callback` as a valid redirect URI? See [Mobile Redirect URI](#mobile-redirect-uri) for an alternative solution.
|
||||
Unable to set `app.immich:/` as a valid redirect URI? See [Mobile Redirect URI](#mobile-redirect-uri) for an alternative solution.
|
||||
:::
|
||||
|
||||
## Overview
|
||||
@@ -11,7 +11,7 @@ Unable to set `app.immich:///oauth-callback` as a valid redirect URI? See [Mobil
|
||||
Immich supports 3rd party authentication via [OpenID Connect][oidc] (OIDC), an identity layer built on top of OAuth2. OIDC is supported by most identity providers, including:
|
||||
|
||||
- [Authentik](https://goauthentik.io/integrations/sources/oauth/#openid-connect)
|
||||
- [Authelia](https://www.authelia.com/integration/openid-connect/immich/)
|
||||
- [Authelia](https://www.authelia.com/configuration/identity-providers/openid-connect/clients/)
|
||||
- [Okta](https://www.okta.com/openid-connect/)
|
||||
- [Google](https://developers.google.com/identity/openid-connect/openid-connect)
|
||||
|
||||
@@ -30,7 +30,7 @@ Before enabling OAuth in Immich, a new client application needs to be configured
|
||||
|
||||
The **Sign-in redirect URIs** should include:
|
||||
|
||||
- `app.immich:///oauth-callback` - for logging in with OAuth from the [Mobile App](/docs/features/mobile-app.mdx)
|
||||
- `app.immich:/` - for logging in with OAuth from the [Mobile App](/docs/features/mobile-app.mdx)
|
||||
- `http://DOMAIN:PORT/auth/login` - for logging in with OAuth from the Web Client
|
||||
- `http://DOMAIN:PORT/user-settings` - for manually linking OAuth in the Web Client
|
||||
|
||||
@@ -38,7 +38,7 @@ Before enabling OAuth in Immich, a new client application needs to be configured
|
||||
|
||||
Mobile
|
||||
|
||||
- `app.immich:///oauth-callback` (You **MUST** include this for iOS and Android mobile apps to work properly)
|
||||
- `app.immich:/` (You **MUST** include this for iOS and Android mobile apps to work properly)
|
||||
|
||||
Localhost
|
||||
|
||||
@@ -52,8 +52,8 @@ Before enabling OAuth in Immich, a new client application needs to be configured
|
||||
|
||||
Hostname
|
||||
|
||||
- `https://immich.example.com/auth/login`
|
||||
- `https://immich.example.com/user-settings`
|
||||
- `https://immich.example.com/auth/login`)
|
||||
- `https://immich.example.com/user-settings`)
|
||||
|
||||
## Enable OAuth
|
||||
|
||||
@@ -96,80 +96,22 @@ When Auto Launch is enabled, the login page will automatically redirect the user
|
||||
|
||||
## Mobile Redirect URI
|
||||
|
||||
The redirect URI for the mobile app is `app.immich:///oauth-callback`, which is a [Custom Scheme](https://developer.apple.com/documentation/xcode/defining-a-custom-url-scheme-for-your-app). If this custom scheme is an invalid redirect URI for your OAuth Provider, you can work around this by doing the following:
|
||||
The redirect URI for the mobile app is `app.immich:/`, which is a [Custom Scheme](https://developer.apple.com/documentation/xcode/defining-a-custom-url-scheme-for-your-app). If this custom scheme is an invalid redirect URI for your OAuth Provider, you can work around this by doing the following:
|
||||
|
||||
1. Configure an http(s) endpoint to forwards requests to `app.immich:///oauth-callback`
|
||||
1. Configure an http(s) endpoint to forwards requests to `app.immich:/`
|
||||
2. Whitelist the new endpoint as a valid redirect URI with your provider.
|
||||
3. Specify the new endpoint as the `Mobile Redirect URI Override`, in the OAuth settings.
|
||||
|
||||
With these steps in place, you should be able to use OAuth from the [Mobile App](/docs/features/mobile-app.mdx) without a custom scheme redirect URI.
|
||||
|
||||
:::info
|
||||
Immich has a route (`/api/oauth/mobile-redirect`) that is already configured to forward requests to `app.immich:///oauth-callback`, and can be used for step 1.
|
||||
Immich has a route (`/api/oauth/mobile-redirect`) that is already configured to forward requests to `app.immich:/`, and can be used for step 1.
|
||||
:::
|
||||
|
||||
## Example Configuration
|
||||
|
||||
<details>
|
||||
<summary>Authentik Example</summary>
|
||||
|
||||
### Authentik Example
|
||||
|
||||
Here's an example of OAuth configured for Authentik:
|
||||
|
||||
Configuration of Authorised redirect URIs (Authentik OAuth2/OpenID Provider)
|
||||
|
||||
<img src={require('./img/authentik-redirect-uris-example.webp').default} width='70%' title="Authentik authorised redirect URIs" />
|
||||
|
||||
Configuration of OAuth in Immich System Settings
|
||||
|
||||
| Setting | Value |
|
||||
| ---------------------------- | ---------------------------------------------------------------------------------- |
|
||||
| Issuer URL | `https://example.immich.app/application/o/immich/.well-known/openid-configuration` |
|
||||
| Client ID | AFCj2rM1f4rps**\*\*\*\***\***\*\*\*\***lCLEum6hH9... |
|
||||
| Client Secret | 0v89FXkQOWO\***\*\*\*\*\***\*\*\***\*\*\*\*\***mprbvXD549HH6s1iw... |
|
||||
| Scope | openid email profile |
|
||||
| Signing Algorithm | RS256 |
|
||||
| Storage Label Claim | preferred_username |
|
||||
| Storage Quota Claim | immich_quota |
|
||||
| Default Storage Quota (GiB) | 0 (0 for unlimited quota) |
|
||||
| Button Text | Sign in with Authentik (optional) |
|
||||
| Auto Register | Enabled (optional) |
|
||||
| Auto Launch | Enabled (optional) |
|
||||
| Mobile Redirect URI Override | Disable |
|
||||
| Mobile Redirect URI | |
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Google Example</summary>
|
||||
|
||||
### Google Example
|
||||
|
||||
Here's an example of OAuth configured for Google:
|
||||
|
||||
Configuration of Authorised redirect URIs (Google Console)
|
||||
|
||||
<img src={require('./img/google-redirect-uris-example.webp').default} width='50%' title="Google authorised redirect URIs" />
|
||||
|
||||
Configuration of OAuth in Immich System Settings
|
||||
|
||||
| Setting | Value |
|
||||
| ---------------------------- | ---------------------------------------------------------------------------- |
|
||||
| Issuer URL | `https://accounts.google.com` |
|
||||
| Client ID | 7\***\*\*\*\*\*\*\***\*\*\***\*\*\*\*\*\*\***vuls.apps.googleusercontent.com |
|
||||
| Client Secret | G\***\*\*\*\*\*\*\***\*\*\***\*\*\*\*\*\*\***OO |
|
||||
| Scope | openid email profile |
|
||||
| Signing Algorithm | RS256 |
|
||||
| Storage Label Claim | preferred_username |
|
||||
| Storage Quota Claim | immich_quota |
|
||||
| Default Storage Quota (GiB) | 0 (0 for unlimited quota) |
|
||||
| Button Text | Sign in with Google (optional) |
|
||||
| Auto Register | Enabled (optional) |
|
||||
| Auto Launch | Enabled |
|
||||
| Mobile Redirect URI Override | Enabled (required) |
|
||||
| Mobile Redirect URI | `https://example.immich.app/api/oauth/mobile-redirect` |
|
||||
|
||||
</details>
|
||||

|
||||
|
||||
[oidc]: https://openid.net/connect/
|
||||
|
||||
@@ -5,17 +5,15 @@ While not officially recommended, it is possible to run Immich using a pre-exist
|
||||
By default, Immich expects superuser permission on the Postgres database and requires certain extensions to be installed. This guide outlines the steps required to prepare a pre-existing Postgres server to be used by Immich.
|
||||
|
||||
:::tip
|
||||
Running with a pre-existing Postgres server can unlock powerful administrative features, including logical replication and streaming write-ahead log backups using programs like pgBackRest or Barman.
|
||||
Running with a pre-existing Postgres server can unlock powerful administrative features, including logical replication, data page checksums, and streaming write-ahead log backups using programs like pgBackRest or Barman.
|
||||
:::
|
||||
|
||||
## Prerequisites
|
||||
|
||||
You must install pgvecto.rs into your instance of Postgres using their [instructions][vectors-install]. After installation, add `shared_preload_libraries = 'vectors.so'` to your `postgresql.conf`. If you already have some `shared_preload_libraries` set, you can separate each extension with a comma. For example, `shared_preload_libraries = 'pg_stat_statements, vectors.so'`.
|
||||
You must install pgvecto.rs using their [instructions](https://docs.pgvecto.rs/getting-started/installation.html). After installation, add `shared_preload_libraries = 'vectors.so'` to your `postgresql.conf`. If you already have some `shared_preload_libraries` set, you can separate each extension with a comma. For example, `shared_preload_libraries = 'pg_stat_statements, vectors.so'`.
|
||||
|
||||
:::note
|
||||
Immich is known to work with Postgres versions 14, 15, and 16. Earlier versions are unsupported. Postgres 17 is nominally compatible, but pgvecto.rs does not have prebuilt images or packages for it as of writing.
|
||||
|
||||
Make sure the installed version of pgvecto.rs is compatible with your version of Immich. The current accepted range for pgvecto.rs is `>= 0.2.0, < 0.4.0`.
|
||||
Make sure the installed version of pgvecto.rs is compatible with your version of Immich. For example, if your Immich version uses the dedicated database image `tensorchord/pgvecto-rs:pg14-v0.2.1`, you must install pgvecto.rs `>= 0.2.1, < 0.3.0`.
|
||||
:::
|
||||
|
||||
## Specifying the connection URL
|
||||
@@ -52,7 +50,8 @@ ALTER DATABASE <immichdatabasename> OWNER TO <immichdbusername>;
|
||||
CREATE EXTENSION vectors;
|
||||
CREATE EXTENSION earthdistance CASCADE;
|
||||
ALTER DATABASE <immichdatabasename> SET search_path TO "$user", public, vectors;
|
||||
ALTER SCHEMA vectors OWNER TO <immichdbusername>;
|
||||
GRANT USAGE ON SCHEMA vectors TO <immichdbusername>;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA vectors GRANT SELECT ON TABLES TO <immichdbusername>;
|
||||
COMMIT;
|
||||
```
|
||||
|
||||
@@ -64,6 +63,4 @@ When installing a new version of pgvecto.rs, you will need to manually update th
|
||||
|
||||
#### Permission denied for view
|
||||
|
||||
If you get the error `driverError: error: permission denied for view pg_vector_index_stat`, you can fix this by connecting to the Immich database and running `GRANT SELECT ON TABLE pg_vector_index_stat TO <immichdbusername>;`.
|
||||
|
||||
[vectors-install]: https://docs.pgvecto.rs/getting-started/installation.html
|
||||
If you get the error `driverError: error: permission denied for view pg_vector_index_stat`, you can fix this by connecting to the Immich database and running `GRANT SELECT ON TABLE pg_vector_index_stat to <immichdbusername>;`.
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
# Repair Page
|
||||
|
||||
:::warning
|
||||
This feature is currently disabled and will be reworked in the near future.
|
||||
:::
|
||||
|
||||
The repair page is designed to give information to the system administrator about files that are not tracked, or offline paths.
|
||||
|
||||
## Natural State
|
||||
@@ -22,7 +18,7 @@ In any other situation, there are 3 different options that can appear:
|
||||
|
||||
- MATCHES - These files are matched by their checksums.
|
||||
|
||||
- OFFLINE PATHS - These files are the result of manually deleting files from immich or a failed file move in the past (losing track of a file).
|
||||
- OFFLINE PATHS - These files are the result of manually deleting files in the upload library or a failed file move in the past (losing track of a file).
|
||||
|
||||
- UNTRACKED FILES - These files are not tracked by the application. They can be the result of failed moves, interrupted uploads, or left behind due to a bug.
|
||||
|
||||
|
||||
@@ -40,26 +40,6 @@ server {
|
||||
}
|
||||
```
|
||||
|
||||
#### Compatibility with Let's Encrypt
|
||||
|
||||
In the event that your nginx configuration includes a section for Let's Encrypt, it's likely that you have a segment similar to the following:
|
||||
|
||||
```nginx
|
||||
location ~ /.well-known {
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
This particular `location` directive can inadvertently prevent mobile clients from reaching the `/.well-known/immich` path, which is crucial for discovery. Usual error message for this case is: "Your app major version is not compatible with the server". To remedy this, you should introduce an additional location block specifically for this path, ensuring that requests are correctly proxied to the Immich server:
|
||||
|
||||
```nginx
|
||||
location = /.well-known/immich {
|
||||
proxy_pass http://<backend_url>:2283;
|
||||
}
|
||||
```
|
||||
|
||||
By doing so, you'll maintain the functionality of Let's Encrypt while allowing mobile clients to access the necessary Immich path without obstruction.
|
||||
|
||||
### Caddy example config
|
||||
|
||||
As an alternative to nginx, you can also use [Caddy](https://caddyserver.com/) as a reverse proxy (with automatic HTTPS configuration). Below is an example config.
|
||||
@@ -84,43 +64,3 @@ Below is an example config for Apache2 site configuration.
|
||||
ProxyPreserveHost On
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
### Traefik Proxy example config
|
||||
|
||||
The example below is for Traefik version 3.
|
||||
|
||||
The most important is to increase the `respondingTimeouts` of the entrypoint used by immich. In this example of entrypoint `websecure` for port `443`. Per default it's set to 60s which leeds to videos stop uploading after 1 minute (Error Code 499). With this config it will fail after 10 minutes which is in most cases enough. Increase it if needed.
|
||||
|
||||
`traefik.yaml`
|
||||
|
||||
```yaml
|
||||
[...]
|
||||
entryPoints:
|
||||
websecure:
|
||||
address: :443
|
||||
# this section needs to be added
|
||||
transport:
|
||||
respondingTimeouts:
|
||||
readTimeout: 600s
|
||||
idleTimeout: 600s
|
||||
writeTimeout: 600s
|
||||
```
|
||||
|
||||
The second part is in the `docker-compose.yml` file where immich is in. Add the Traefik specific labels like in the example.
|
||||
|
||||
`docker-compose.yml`
|
||||
|
||||
```yaml
|
||||
services:
|
||||
immich-server:
|
||||
[...]
|
||||
labels:
|
||||
traefik.enable: true
|
||||
# increase readingTimeouts for the entrypoint used here
|
||||
traefik.http.routers.immich.entrypoints: websecure
|
||||
traefik.http.routers.immich.rule: Host(`immich.your-domain.com`)
|
||||
traefik.http.services.immich.loadbalancer.server.port: 2283
|
||||
```
|
||||
|
||||
Keep in mind, that Traefik needs to communicate with the network where immich is in, usually done
|
||||
by adding the Traefik network to the `immich-server`.
|
||||
|
||||