Compare commits

..

1 Commits

Author SHA1 Message Date
Alex Tran
ad3e92fff0 feat(server): license verification 2024-06-05 01:56:37 -05:00
1799 changed files with 54027 additions and 157177 deletions

View File

@@ -4,7 +4,6 @@
design/
docker/
!docker/scripts
docs/
e2e/
fastlane/
@@ -22,7 +21,6 @@ open-api/typescript-sdk/node_modules/
server/coverage/
server/node_modules/
server/upload/
server/src/queries
server/dist/
server/www/
@@ -30,4 +28,3 @@ web/node_modules/
web/coverage/
web/.svelte-kit
web/build/
web/.env

View File

@@ -1,13 +1,11 @@
title: "[Feature] feature-name-goes-here"
title: "[Feature] <feature-name-goes-here>"
labels: ["feature"]
body:
- type: markdown
attributes:
value: |
Please use this form to request new feature for Immich.
Stick to only a single feature per request. If you list multiple different features at once,
your request will be closed.
Please use this form to request new feature for Immich
- type: checkboxes
attributes:

1
.github/FUNDING.yml vendored
View File

@@ -1 +0,0 @@
custom: ['https://buy.immich.app']

View File

@@ -83,6 +83,7 @@ body:
2.
3.
...
render: bash
validations:
required: true

View File

@@ -1,14 +1,11 @@
blank_issues_enabled: false
contact_links:
- name: I have a question or need support
url: https://discord.immich.app
- name: I have a question or need support
url: https://discord.gg/D8JsnBEuKb
about: We use GitHub for tracking bugs, please check out our Discord channel for freaky fast support.
- name: 📷 My photo or video has a date, time, or timezone problem
url: https://github.com/immich-app/immich/discussions/12650
about: Upload a sample file to this discussion and we will take a look
- name: 🌟 Feature request
- name: Feature Request
url: https://github.com/immich-app/immich/discussions/new?category=feature-request
about: Please use our GitHub Discussion for making feature requests.
- name: 🫣 I'm unsure where to go
url: https://discord.immich.app
- name: I'm unsure where to go
url: https://discord.gg/D8JsnBEuKb
about: If you are unsure where to go, then joining our Discord is recommended; Just ask!

7
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,7 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

38
.github/labeler.yml vendored
View File

@@ -1,38 +0,0 @@
cli:
- changed-files:
- any-glob-to-any-file:
- cli/src/**
documentation:
- changed-files:
- any-glob-to-any-file:
- docs/blob/**
- docs/docs/**
- docs/src/**
- docs/static/**
🖥web:
- changed-files:
- any-glob-to-any-file:
- web/src/**
- web/static/**
📱mobile:
- changed-files:
- any-glob-to-any-file:
- mobile/lib/**
- mobile/test/**
🗄server:
- changed-files:
- any-glob-to-any-file:
- server/src/**
- server/test/**
🧠machine-learning:
- changed-files:
- any-glob-to-any-file:
- machine-learning/app/**
changelog:translation:
- head-branch: ['^chore/translations$']

40
.github/release.yml vendored
View File

@@ -1,33 +1,41 @@
changelog:
categories:
- title: 🚨 Breaking Changes
- title: ⚠️ Breaking Changes
labels:
- changelog:breaking-change
- breaking-change
- title: 🫥 Deprecated Changes
- title: 🗄️ Server
labels:
- changelog:deprecated
- 🗄server
- title: 🔒 Security
- title: 📱 Mobile
labels:
- changelog:security
- 📱mobile
- title: 🚀 Features
- title: 🖥️ Web
labels:
- changelog:feature
- 🖥web
- title: 🌟 Enhancements
- title: 🧠 Machine Learning
labels:
- changelog:enhancement
- 🧠machine-learning
- title: 🐛 Bug fixes
- title: ⚡ CLI
labels:
- changelog:bugfix
- cli
- title: 📚 Documentation
- title: 📓 Documentation
labels:
- changelog:documentation
- documentation
- title: 🌐 Translations
- title: 🔨 Maintenance
labels:
- changelog:translation
- deployment
- dependencies
- renovate
- maintenance
- tech-debt
- title: Other changes
labels:
- "*"

View File

@@ -16,28 +16,10 @@ concurrency:
cancel-in-progress: true
jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@v3
with:
filters: |
mobile:
- 'mobile/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
build-sign-android:
name: Build and sign Android
needs: pre-job
# Skip when PR from a fork
if: ${{ !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' && needs.pre-job.outputs.should_run == 'true' }}
if: ${{ !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }}
runs-on: macos-14
steps:

View File

@@ -1,17 +1,16 @@
name: CLI Build
on:
workflow_dispatch:
push:
branches: [main]
paths:
- 'cli/**'
- '.github/workflows/cli.yml'
- "cli/**"
- ".github/workflows/cli.yml"
pull_request:
branches: [main]
paths:
- 'cli/**'
- '.github/workflows/cli.yml'
release:
types: [published]
- "cli/**"
- ".github/workflows/cli.yml"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -22,7 +21,7 @@ permissions:
jobs:
publish:
name: CLI Publish
name: Publish
runs-on: ubuntu-latest
defaults:
run:
@@ -33,8 +32,8 @@ jobs:
# Setup .npmrc file to publish to npm
- uses: actions/setup-node@v4
with:
node-version-file: './cli/.nvmrc'
registry-url: 'https://registry.npmjs.org'
node-version: "20.x"
registry-url: "https://registry.npmjs.org"
- name: Prepare SDK
run: npm ci --prefix ../open-api/typescript-sdk/
- name: Build SDK
@@ -42,7 +41,7 @@ jobs:
- run: npm ci
- run: npm run build
- run: npm publish
if: ${{ github.event_name == 'release' }}
if: ${{ github.event_name == 'workflow_dispatch' }}
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
@@ -56,10 +55,10 @@ jobs:
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.2.0
uses: docker/setup-qemu-action@v3.0.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.7.1
uses: docker/setup-buildx-action@v3.3.0
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
@@ -84,15 +83,15 @@ jobs:
images: |
name=ghcr.io/${{ github.repository_owner }}/immich-cli
tags: |
type=raw,value=${{ steps.package-version.outputs.version }},enable=${{ github.event_name == 'release' }}
type=raw,value=latest,enable=${{ github.event_name == 'release' }}
type=raw,value=${{ steps.package-version.outputs.version }},enable=${{ github.event_name == 'workflow_dispatch' }}
type=raw,value=latest,enable=${{ github.event_name == 'workflow_dispatch' }}
- name: Build and push image
uses: docker/build-push-action@v6.9.0
uses: docker/build-push-action@v5.3.0
with:
file: cli/Dockerfile
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name == 'release' }}
push: ${{ github.event_name == 'workflow_dispatch' }}
cache-from: type=gha
cache-to: type=gha,mode=max
tags: ${{ steps.metadata.outputs.tags }}

View File

@@ -22,7 +22,7 @@ concurrency:
jobs:
cleanup-images:
name: Cleanup Stale Images Tags for ${{ matrix.primary-name }}
runs-on: ubuntu-24.04
runs-on: ubuntu-22.04
strategy:
fail-fast: false
matrix:
@@ -35,7 +35,7 @@ jobs:
steps:
- name: Clean temporary images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/ephemeral@v0.9.0
uses: stumpylog/image-cleaner-action/ephemeral@v0.7.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"
@@ -48,7 +48,7 @@ jobs:
cleanup-untagged-images:
name: Cleanup Untagged Images Tags for ${{ matrix.primary-name }}
runs-on: ubuntu-24.04
runs-on: ubuntu-22.04
needs:
- cleanup-images
strategy:
@@ -64,7 +64,7 @@ jobs:
steps:
- name: Clean untagged images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/untagged@v0.9.0
uses: stumpylog/image-cleaner-action/untagged@v0.7.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"

View File

@@ -17,206 +17,56 @@ permissions:
packages: write
jobs:
pre-job:
build_and_push:
name: Build and Push
runs-on: ubuntu-latest
outputs:
should_run_server: ${{ steps.found_paths.outputs.server == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_ml: ${{ steps.found_paths.outputs.machine-learning == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@v3
with:
filters: |
server:
- 'server/**'
- 'openapi/**'
- 'web/**'
- 'i18n/**'
machine-learning:
- 'machine-learning/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'workflow_dispatch' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
retag_ml:
name: Re-Tag ML
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_ml == 'false' && !github.event.pull_request.head.repo.fork }}
runs-on: ubuntu-latest
strategy:
matrix:
suffix: ["", "-cuda", "-openvino", "-armnn"]
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Re-tag image
run: |
REGISTRY_NAME="ghcr.io"
REPOSITORY=${{ github.repository_owner }}/immich-machine-learning
TAG_OLD=main${{ matrix.suffix }}
TAG_NEW=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_NEW $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
retag_server:
name: Re-Tag Server
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'false' && !github.event.pull_request.head.repo.fork }}
runs-on: ubuntu-latest
strategy:
matrix:
suffix: [""]
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Re-tag image
run: |
REGISTRY_NAME="ghcr.io"
REPOSITORY=${{ github.repository_owner }}/immich-server
TAG_OLD=main${{ matrix.suffix }}
TAG_NEW=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_NEW $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
build_and_push_ml:
name: Build and Push ML
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' }}
runs-on: ubuntu-latest
env:
image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
strategy:
# Prevent a failure in one image from stopping the other builds
fail-fast: false
matrix:
include:
- platforms: linux/amd64,linux/arm64
- image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
platforms: linux/amd64,linux/arm64
device: cpu
- platforms: linux/amd64
- image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
platforms: linux/amd64
device: cuda
suffix: -cuda
- platforms: linux/amd64
- image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
platforms: linux/amd64
device: openvino
suffix: -openvino
- platforms: linux/arm64
- image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
platforms: linux/arm64
device: armnn
suffix: -armnn
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.2.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.7.1
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
# Skip when PR from a fork
if: ${{ !github.event.pull_request.head.repo.fork }}
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate docker image tags
id: metadata
uses: docker/metadata-action@v5
with:
flavor: |
# Disable latest tag
latest=false
images: |
name=ghcr.io/${{ github.repository_owner }}/${{env.image}}
name=altran1502/${{env.image}},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch,suffix=${{ matrix.suffix }}
# Tag with pr-number
type=ref,event=pr,suffix=${{ matrix.suffix }}
# Tag with git tag on release
type=ref,event=tag,suffix=${{ matrix.suffix }}
type=raw,value=release,enable=${{ github.event_name == 'release' }},suffix=${{ matrix.suffix }}
- name: Determine build cache output
id: cache-target
run: |
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
# Essentially just ignore the cache output (PR can't write to registry cache)
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
else
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ env.image }}" >> $GITHUB_OUTPUT
fi
- name: Build and push image
uses: docker/build-push-action@v6.9.0
with:
context: ${{ env.context }}
file: ${{ env.file }}
platforms: ${{ matrix.platforms }}
# Skip pushing when PR from a fork
push: ${{ !github.event.pull_request.head.repo.fork }}
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{env.image}}
cache-to: ${{ steps.cache-target.outputs.cache-to }}
tags: ${{ steps.metadata.outputs.tags }}
labels: ${{ steps.metadata.outputs.labels }}
build-args: |
DEVICE=${{ matrix.device }}
BUILD_ID=${{ github.run_id }}
BUILD_IMAGE=${{ github.event_name == 'release' && github.ref_name || steps.metadata.outputs.tags }}
BUILD_SOURCE_REF=${{ github.ref_name }}
BUILD_SOURCE_COMMIT=${{ github.sha }}
build_and_push_server:
name: Build and Push Server
runs-on: ubuntu-latest
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
env:
image: immich-server
context: .
file: server/Dockerfile
strategy:
fail-fast: false
matrix:
include:
- platforms: linux/amd64,linux/arm64
- image: immich-server
context: .
file: server/Dockerfile
platforms: linux/amd64,linux/arm64
device: cpu
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.2.0
uses: docker/setup-qemu-action@v3.0.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.7.1
uses: docker/setup-buildx-action@v3.3.0
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
@@ -243,8 +93,8 @@ jobs:
# Disable latest tag
latest=false
images: |
name=ghcr.io/${{ github.repository_owner }}/${{env.image}}
name=altran1502/${{env.image}},enable=${{ github.event_name == 'release' }}
name=ghcr.io/${{ github.repository_owner }}/${{matrix.image}}
name=altran1502/${{matrix.image}},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch,suffix=${{ matrix.suffix }}
@@ -261,50 +111,20 @@ jobs:
# Essentially just ignore the cache output (PR can't write to registry cache)
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
else
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ env.image }}" >> $GITHUB_OUTPUT
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ matrix.image }}" >> $GITHUB_OUTPUT
fi
- name: Build and push image
uses: docker/build-push-action@v6.9.0
uses: docker/build-push-action@v5.3.0
with:
context: ${{ env.context }}
file: ${{ env.file }}
context: ${{ matrix.context }}
file: ${{ matrix.file }}
platforms: ${{ matrix.platforms }}
# Skip pushing when PR from a fork
push: ${{ !github.event.pull_request.head.repo.fork }}
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{env.image}}
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{matrix.image}}
cache-to: ${{ steps.cache-target.outputs.cache-to }}
tags: ${{ steps.metadata.outputs.tags }}
labels: ${{ steps.metadata.outputs.labels }}
build-args: |
DEVICE=${{ matrix.device }}
BUILD_ID=${{ github.run_id }}
BUILD_IMAGE=${{ github.event_name == 'release' && github.ref_name || steps.metadata.outputs.tags }}
BUILD_SOURCE_REF=${{ github.ref_name }}
BUILD_SOURCE_COMMIT=${{ github.sha }}
success-check-server:
name: Docker Build & Push Server Success
needs: [build_and_push_server, retag_server]
runs-on: ubuntu-latest
if: always()
steps:
- name: Any jobs failed?
if: ${{ contains(needs.*.result, 'failure') }}
run: exit 1
- name: All jobs passed or skipped
if: ${{ !(contains(needs.*.result, 'failure')) }}
run: echo "All jobs passed or skipped" && echo "${{ toJSON(needs.*.result) }}"
success-check-ml:
name: Docker Build & Push ML Success
needs: [build_and_push_ml, retag_ml]
runs-on: ubuntu-latest
if: always()
steps:
- name: Any jobs failed?
if: ${{ contains(needs.*.result, 'failure') }}
run: exit 1
- name: All jobs passed or skipped
if: ${{ !(contains(needs.*.result, 'failure')) }}
run: echo "All jobs passed or skipped" && echo "${{ toJSON(needs.*.result) }}"
tags: ${{ steps.metadata.outputs.tags }}
labels: ${{ steps.metadata.outputs.labels }}

View File

@@ -2,8 +2,12 @@ name: Docs build
on:
push:
branches: [main]
paths:
- "docs/**"
pull_request:
branches: [main]
paths:
- "docs/**"
release:
types: [published]
@@ -12,27 +16,7 @@ concurrency:
cancel-in-progress: true
jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.found_paths.outputs.docs == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@v3
with:
filters: |
docs:
- 'docs/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'release' || github.ref_name == 'main' }}" >> "$GITHUB_OUTPUT"
build:
name: Docs Build
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run == 'true' }}
runs-on: ubuntu-latest
defaults:
run:
@@ -42,11 +26,6 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './docs/.nvmrc'
- name: Run npm install
run: npm ci

View File

@@ -7,32 +7,13 @@ on:
jobs:
checks:
name: Docs Deploy Checks
runs-on: ubuntu-latest
outputs:
parameters: ${{ steps.parameters.outputs.result }}
artifact: ${{ steps.get-artifact.outputs.result }}
steps:
- if: ${{ github.event.workflow_run.conclusion != 'success' }}
run: echo 'The triggering workflow did not succeed' && exit 1
- name: Get artifact
id: get-artifact
uses: actions/github-script@v7
with:
script: |
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: context.payload.workflow_run.id,
});
let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
return artifact.name == "docs-build-output"
})[0];
if (!matchArtifact) {
console.log("No artifact found with the name docs-build-output, build job was skipped")
return { found: false };
}
return { found: true, id: matchArtifact.id };
- if: ${{ github.event.workflow_run.conclusion == 'failure' }}
run: echo 'The triggering workflow failed' && exit 1
- name: Determine deploy parameters
id: parameters
uses: actions/github-script@v7
@@ -92,10 +73,9 @@ jobs:
return parameters;
deploy:
name: Docs Deploy
runs-on: ubuntu-latest
needs: checks
if: ${{ fromJson(needs.checks.outputs.artifact).found && fromJson(needs.checks.outputs.parameters).shouldDeploy }}
if: ${{ fromJson(needs.checks.outputs.parameters).shouldDeploy }}
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -118,11 +98,18 @@ jobs:
uses: actions/github-script@v7
with:
script: |
let artifact = ${{ needs.checks.outputs.artifact }};
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: context.payload.workflow_run.id,
});
let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
return artifact.name == "docs-build-output"
})[0];
let download = await github.rest.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: artifact.id,
artifact_id: matchArtifact.id,
archive_format: 'zip',
});
let fs = require('fs');

View File

@@ -5,7 +5,6 @@ on:
jobs:
deploy:
name: Docs Destroy
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -23,7 +22,7 @@ jobs:
tg_version: "0.58.12"
tofu_version: "1.7.1"
tg_dir: "deployment/modules/cloudflare/docs"
tg_command: "destroy -refresh=false"
tg_command: "destroy"
- name: Comment
uses: actions-cool/maintain-one-comment@v3

View File

@@ -1,52 +0,0 @@
name: Fix formatting
on:
pull_request:
types: [labeled]
jobs:
fix-formatting:
runs-on: ubuntu-latest
if: ${{ github.event.label.name == 'fix:formatting' }}
permissions:
pull-requests: write
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: 'Checkout'
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
token: ${{ steps.generate-token.outputs.token }}
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './server/.nvmrc'
- name: Fix formatting
run: make install-all && make format-all
- name: Commit and push
uses: EndBug/add-and-commit@v9
with:
default_author: github_actions
message: 'chore: fix formatting'
- name: Remove label
uses: actions/github-script@v7
if: always()
with:
script: |
github.rest.issues.removeLabel({
issue_number: context.payload.pull_request.number,
owner: context.repo.owner,
repo: context.repo.repo,
name: 'fix:formatting'
})

View File

@@ -1,21 +0,0 @@
name: PR Label Validation
on:
pull_request_target:
types: [opened, labeled, unlabeled, synchronize]
jobs:
validate-release-label:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- name: Require PR to have a changelog label
uses: mheap/github-action-required-labels@v5
with:
mode: exactly
count: 1
use_regex: true
labels: "changelog:.*"
add_comment: true

View File

@@ -1,12 +0,0 @@
name: "Pull Request Labeler"
on:
- pull_request_target
jobs:
labeler:
permissions:
contents: read
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@v5

13
.github/workflows/pr-require-label.yml vendored Normal file
View File

@@ -0,0 +1,13 @@
name: Enforce PR labels
on:
pull_request:
types: [labeled, unlabeled, opened, edited, synchronize]
jobs:
enforce-label:
name: Enforce label
runs-on: ubuntu-latest
steps:
- if: toJson(github.event.pull_request.labels) == '[]'
run: exit 1

View File

@@ -29,17 +29,10 @@ jobs:
ref: ${{ steps.push-tag.outputs.commit_long_sha }}
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: Checkout
uses: actions/checkout@v4
with:
token: ${{ steps.generate-token.outputs.token }}
token: ${{ secrets.ORG_RELEASE_TOKEN }}
- name: Install Poetry
run: pipx install poetry
@@ -51,8 +44,10 @@ jobs:
id: push-tag
uses: EndBug/add-and-commit@v9
with:
default_author: github_actions
message: 'chore: version ${{ env.IMMICH_VERSION }}'
author_name: Alex The Bot
author_email: alex.tran1502@gmail.com
default_author: user_info
message: 'Version ${{ env.IMMICH_VERSION }}'
tag: ${{ env.IMMICH_VERSION }}
push: true

View File

@@ -19,7 +19,7 @@ jobs:
# Setup .npmrc file to publish to npm
- uses: actions/setup-node@v4
with:
node-version-file: './open-api/typescript-sdk/.nvmrc'
node-version: '20.x'
registry-url: 'https://registry.npmjs.org'
- name: Install deps
run: npm ci

View File

@@ -10,27 +10,8 @@ concurrency:
cancel-in-progress: true
jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@v3
with:
filters: |
mobile:
- 'mobile/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
mobile-dart-analyze:
name: Run Dart Code Analysis
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run == 'true' }}
runs-on: ubuntu-latest
@@ -56,10 +37,6 @@ jobs:
run: dart format lib/ --set-exit-if-changed
working-directory: ./mobile
- name: Run dart custom_lint
run: dart run custom_lint
working-directory: ./mobile
# Enable after riverpod generator migration is completed
# - name: Run dart custom lint
# run: dart run custom_lint

View File

@@ -10,48 +10,8 @@ concurrency:
cancel-in-progress: true
jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run_web: ${{ steps.found_paths.outputs.web == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_server: ${{ steps.found_paths.outputs.server == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_cli: ${{ steps.found_paths.outputs.cli == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_e2e: ${{ steps.found_paths.outputs.e2e == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_mobile: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_ml: ${{ steps.found_paths.outputs.machine-learning == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_e2e_web: ${{ steps.found_paths.outputs.e2e == 'true' || steps.found_paths.outputs.web == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_e2e_server_cli: ${{ steps.found_paths.outputs.e2e == 'true' || steps.found_paths.outputs.server == 'true' || steps.found_paths.outputs.cli == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@v3
with:
filters: |
web:
- 'web/**'
- 'i18n/**'
- 'open-api/typescript-sdk/**'
server:
- 'server/**'
cli:
- 'cli/**'
- 'open-api/typescript-sdk/**'
e2e:
- 'e2e/**'
mobile:
- 'mobile/**'
machine-learning:
- 'machine-learning/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
server-unit-tests:
name: Test & Lint Server
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
name: Server
runs-on: ubuntu-latest
defaults:
run:
@@ -61,11 +21,6 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './server/.nvmrc'
- name: Run npm install
run: npm ci
@@ -81,14 +36,12 @@ jobs:
run: npm run check
if: ${{ !cancelled() }}
- name: Run small tests & coverage
- name: Run unit tests & coverage
run: npm run test:cov
if: ${{ !cancelled() }}
cli-unit-tests:
name: Unit Test CLI
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_cli == 'true' }}
name: CLI
runs-on: ubuntu-latest
defaults:
run:
@@ -101,7 +54,7 @@ jobs:
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './cli/.nvmrc'
node-version: 20
- name: Setup typescript-sdk
run: npm ci && npm run build
@@ -126,44 +79,8 @@ jobs:
run: npm run test:cov
if: ${{ !cancelled() }}
cli-unit-tests-win:
name: Unit Test CLI (Windows)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_cli == 'true' }}
runs-on: windows-latest
defaults:
run:
working-directory: ./cli
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './cli/.nvmrc'
- name: Setup typescript-sdk
run: npm ci && npm run build
working-directory: ./open-api/typescript-sdk
- name: Install deps
run: npm ci
# Skip linter & formatter in Windows test.
- name: Run tsc
run: npm run check
if: ${{ !cancelled() }}
- name: Run unit tests & coverage
run: npm run test:cov
if: ${{ !cancelled() }}
web-unit-tests:
name: Test & Lint Web
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_web == 'true' }}
name: Web
runs-on: ubuntu-latest
defaults:
run:
@@ -173,11 +90,6 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './web/.nvmrc'
- name: Run setup typescript-sdk
run: npm ci && npm run build
working-directory: ./open-api/typescript-sdk
@@ -205,10 +117,8 @@ jobs:
run: npm run test:cov
if: ${{ !cancelled() }}
e2e-tests-lint:
name: End-to-End Lint
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_e2e == 'true' }}
e2e-tests:
name: End-to-End Tests
runs-on: ubuntu-latest
defaults:
run:
@@ -217,17 +127,24 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: 'recursive'
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './e2e/.nvmrc'
node-version: 20
- name: Run setup typescript-sdk
run: npm ci && npm run build
working-directory: ./open-api/typescript-sdk
if: ${{ !cancelled() }}
- name: Run setup cli
run: npm ci && npm run build
working-directory: ./cli
if: ${{ !cancelled() }}
- name: Install dependencies
run: npm ci
if: ${{ !cancelled() }}
@@ -244,58 +161,8 @@ jobs:
run: npm run check
if: ${{ !cancelled() }}
medium-tests-server:
name: Medium Tests (Server)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
runs-on: mich
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: 'recursive'
- name: Production build
if: ${{ !cancelled() }}
run: docker compose -f e2e/docker-compose.yml build
- name: Run medium tests
if: ${{ !cancelled() }}
run: make test-medium
e2e-tests-server-cli:
name: End-to-End Tests (Server & CLI)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_e2e_server_cli == 'true' }}
runs-on: mich
defaults:
run:
working-directory: ./e2e
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: 'recursive'
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './e2e/.nvmrc'
- name: Run setup typescript-sdk
run: npm ci && npm run build
working-directory: ./open-api/typescript-sdk
if: ${{ !cancelled() }}
- name: Run setup cli
run: npm ci && npm run build
working-directory: ./cli
if: ${{ !cancelled() }}
- name: Install dependencies
run: npm ci
- name: Install Playwright Browsers
run: npx playwright install --with-deps chromium
if: ${{ !cancelled() }}
- name: Docker build
@@ -306,51 +173,12 @@ jobs:
run: npm run test
if: ${{ !cancelled() }}
e2e-tests-web:
name: End-to-End Tests (Web)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_e2e_web == 'true' }}
runs-on: mich
defaults:
run:
working-directory: ./e2e
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: 'recursive'
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './e2e/.nvmrc'
- name: Run setup typescript-sdk
run: npm ci && npm run build
working-directory: ./open-api/typescript-sdk
if: ${{ !cancelled() }}
- name: Install dependencies
run: npm ci
if: ${{ !cancelled() }}
- name: Install Playwright Browsers
run: npx playwright install --with-deps chromium
if: ${{ !cancelled() }}
- name: Docker build
run: docker compose build
if: ${{ !cancelled() }}
- name: Run e2e tests (web)
run: npx playwright test
if: ${{ !cancelled() }}
mobile-unit-tests:
name: Unit Test Mobile
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_mobile == 'true' }}
name: Mobile
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
@@ -364,9 +192,7 @@ jobs:
run: flutter test -j 1
ml-unit-tests:
name: Unit Test ML
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' }}
name: Machine Learning
runs-on: ubuntu-latest
defaults:
run:
@@ -415,11 +241,6 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './server/.nvmrc'
- name: Install server dependencies
run: npm --prefix=server ci
@@ -470,11 +291,6 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './server/.nvmrc'
- name: Install server dependencies
run: npm ci

2
.gitignore vendored
View File

@@ -21,5 +21,3 @@ mobile/openapi/.openapi-generator/FILES
open-api/typescript-sdk/build
mobile/android/fastlane/report.xml
mobile/ios/fastlane/report.xml
vite.config.js.timestamp-*

2
.gitmodules vendored
View File

@@ -1,6 +1,6 @@
[submodule "mobile/.isar"]
path = mobile/.isar
url = https://github.com/isar/isar
[submodule "e2e/test-assets"]
[submodule "server/test/assets"]
path = e2e/test-assets
url = https://github.com/immich-app/test-assets

8
.vscode/launch.json vendored
View File

@@ -5,8 +5,8 @@
"type": "node",
"request": "attach",
"restart": true,
"port": 9231,
"name": "Immich API Server",
"port": 9230,
"name": "Immich Server",
"remoteRoot": "/usr/src/app",
"localRoot": "${workspaceFolder}/server"
},
@@ -14,8 +14,8 @@
"type": "node",
"request": "attach",
"restart": true,
"port": 9230,
"name": "Immich Workers",
"port": 9231,
"name": "Immich Microservices",
"remoteRoot": "/usr/src/app",
"localRoot": "${workspaceFolder}/server"
}

View File

@@ -131,4 +131,4 @@ conduct enforcement ladder](https://github.com/mozilla/diversity).
For answers to common questions about this code of conduct, see the
FAQ at https://www.contributor-covenant.org/faq. Translations are
available at https://www.contributor-covenant.org/translations.
available at https://www.contributor-covenant.org/translations.

View File

@@ -10,6 +10,12 @@ dev-update:
dev-scale:
docker compose -f ./docker/docker-compose.dev.yml up --build -V --scale immich-server=3 --remove-orphans
stage:
docker compose -f ./docker/docker-compose.staging.yml up --build -V --remove-orphans
pull-stage:
docker compose -f ./docker/docker-compose.staging.yml pull
.PHONY: e2e
e2e:
docker compose -f ./e2e/docker-compose.yml up --build -V --remove-orphans
@@ -35,63 +41,3 @@ sql:
attach-server:
docker exec -it docker_immich-server_1 sh
renovate:
LOG_LEVEL=debug npx renovate --platform=local --repository-cache=reset
MODULES = e2e server web cli sdk
audit-%:
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) audit fix
install-%:
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) i
build-cli: build-sdk
build-web: build-sdk
build-%: install-%
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run | grep 'build' >/dev/null \
&& npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run build || true
format-%:
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run | grep 'format:fix' >/dev/null \
&& npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run format:fix || true
lint-%:
npm --prefix $* run lint:fix
check-%:
npm --prefix $* run check
check-web:
npm --prefix web run check:typescript
npm --prefix web run check:svelte
test-%:
npm --prefix $* run test
test-e2e:
docker compose -f ./e2e/docker-compose.yml build
npm --prefix e2e run test
npm --prefix e2e run test:web
test-medium:
docker run \
--rm \
-v ./server/src:/usr/src/app/src \
-v ./server/test:/usr/src/app/test \
-v ./server/vitest.config.medium.mjs:/usr/src/app/vitest.config.medium.mjs \
-v ./server/tsconfig.json:/usr/src/app/tsconfig.json \
-e NODE_ENV=development \
immich-server:latest \
-c "npm ci && npm run test:medium -- --run"
test-medium-dev:
docker exec -it immich_server /bin/sh -c "npm run test:medium"
build-all: $(foreach M,$(MODULES),build-$M) ;
install-all: $(foreach M,$(MODULES),install-$M) ;
check-all: $(foreach M,$(MODULES),check-$M) ;
lint-all: $(foreach M,$(MODULES),lint-$M) ;
format-all: $(foreach M,$(MODULES),format-$M) ;
audit-all: $(foreach M,$(MODULES),audit-$M) ;
hygiene-all: lint-all format-all check-all sql audit-all;
test-all: $(foreach M,$(MODULES),test-$M) ;
clean:
find . -name "node_modules" -type d -prune -exec rm -rf '{}' +
find . -name "dist" -type d -prune -exec rm -rf '{}' +
find . -name "build" -type d -prune -exec rm -rf '{}' +
find . -name "svelte-kit" -type d -prune -exec rm -rf '{}' +
docker compose -f ./docker/docker-compose.dev.yml rm -v -f || true
docker compose -f ./e2e/docker-compose.yml rm -v -f || true

View File

@@ -1,7 +1,7 @@
<p align="center">
<br/>
<a href="https://opensource.org/license/agpl-v3"><img src="https://img.shields.io/badge/License-AGPL_v3-blue.svg?color=3F51B5&style=for-the-badge&label=License&logoColor=000000&labelColor=ececec" alt="License: AGPLv3"></a>
<a href="https://discord.immich.app">
<a href="https://discord.gg/D8JsnBEuKb">
<img src="https://img.shields.io/discord/979116623879368755.svg?label=Discord&logo=Discord&style=for-the-badge&logoColor=000000&labelColor=ececec" alt="Discord"/>
</a>
<br/>
@@ -17,8 +17,8 @@
<img src="design/immich-screenshots.png" title="Main Screenshot">
</a>
<br/>
<p align="center">
<a href="readme_i18n/README_ca_ES.md">Català</a>
<a href="readme_i18n/README_es_ES.md">Español</a>
<a href="readme_i18n/README_fr_FR.md">Français</a>
@@ -31,10 +31,7 @@
<a href="readme_i18n/README_zh_CN.md">中文</a>
<a href="readme_i18n/README_ru_RU.md">Русский</a>
<a href="readme_i18n/README_pt_BR.md">Português Brasileiro</a>
<a href="readme_i18n/README_sv_SE.md">Svenska</a>
<a href="readme_i18n/README_ar_JO.md">العربية</a>
<a href="readme_i18n/README_vi_VN.md">Tiếng Việt</a>
<a href="readme_i18n/README_th_TH.md">ภาษาไทย</a>
</p>
## Disclaimer
@@ -44,36 +41,45 @@
- ⚠️ **Do not use the app as the only way to store your photos and videos.**
- ⚠️ Always follow [3-2-1](https://www.backblaze.com/blog/the-3-2-1-backup-strategy/) backup plan for your precious photos and videos!
> [!NOTE]
> You can find the main documentation, including installation guides, at https://immich.app/.
## Content
## Links
- [Documentation](https://immich.app/docs)
- [About](https://immich.app/docs/overview/introduction)
- [Installation](https://immich.app/docs/install/requirements)
- [Roadmap](https://immich.app/roadmap)
- [Official Documentation](https://immich.app/docs)
- [Roadmap](https://github.com/orgs/immich-app/projects/1)
- [Demo](#demo)
- [Features](#features)
- [Translations](https://immich.app/docs/developer/translations)
- [Contributing](https://immich.app/docs/overview/support-the-project)
- [Introduction](https://immich.app/docs/overview/introduction)
- [Installation](https://immich.app/docs/install/requirements)
- [Contribution Guidelines](https://immich.app/docs/overview/support-the-project)
## Documentation
You can find the main documentation, including installation guides, at https://immich.app/.
## Demo
Access the demo [here](https://demo.immich.app). The demo is running on a Free-tier Oracle VM in Amsterdam with a 2.4Ghz quad-core ARM64 CPU and 24GB RAM.
You can access the web demo at https://demo.immich.app
For the mobile app, you can use `https://demo.immich.app/api` for the `Server Endpoint URL`
### Login credentials
```bash title="Demo Credential"
The credential
email: demo@immich.app
password: demo
```
| Email | Password |
| --------------- | -------- |
| demo@immich.app | demo |
```
Spec: Free-tier Oracle VM - Amsterdam - 2.4Ghz quad-core ARM64 CPU, 24GB RAM
```
## Activities
![Activities](https://repobeats.axiom.co/api/embed/9e86d9dc3ddd137161f2f6d2e758d7863b1789cb.svg "Repobeats analytics image")
## Features
| Features | Mobile | Web |
| :------------------------------------------- | ------ | --- |
| :--------------------------------------------- | -------- | ----- |
| Upload and view videos and photos | Yes | Yes |
| Auto backup when the app is opened | Yes | N/A |
| Prevent duplication of assets | Yes | Yes |
@@ -93,7 +99,7 @@ For the mobile app, you can use `https://demo.immich.app/api` for the `Server En
| LivePhoto/MotionPhoto backup and playback | Yes | Yes |
| Support 360 degree image display | No | Yes |
| User-defined storage structure | Yes | Yes |
| Public Sharing | Yes | Yes |
| Public Sharing | No | Yes |
| Archive and Favorites | Yes | Yes |
| Global Map | Yes | Yes |
| Partner Sharing | Yes | Yes |
@@ -102,22 +108,14 @@ For the mobile app, you can use `https://demo.immich.app/api` for the `Server En
| Offline support | Yes | No |
| Read-only gallery | Yes | Yes |
| Stacked Photos | Yes | Yes |
| Tags | No | Yes |
| Folder View | No | Yes |
## Translations
## Contributors
Read more about translations [here](https://immich.app/docs/developer/translations).
<a href="https://hosted.weblate.org/engage/immich/">
<img src="https://hosted.weblate.org/widget/immich/immich/multi-auto.svg" alt="Translation status" />
<a href="https://github.com/alextran1502/immich/graphs/contributors">
<img src="https://contrib.rocks/image?repo=immich-app/immich" width="100%"/>
</a>
## Repository activity
![Activities](https://repobeats.axiom.co/api/embed/9e86d9dc3ddd137161f2f6d2e758d7863b1789cb.svg "Repobeats analytics image")
## Star history
## Star History
<a href="https://star-history.com/#immich-app/immich&Date">
<picture>
@@ -126,9 +124,3 @@ Read more about translations [here](https://immich.app/docs/developer/translatio
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=immich-app/immich&type=Date" width="100%" />
</picture>
</a>
## Contributors
<a href="https://github.com/alextran1502/immich/graphs/contributors">
<img src="https://contrib.rocks/image?repo=immich-app/immich" width="100%"/>
</a>

View File

@@ -2,4 +2,4 @@
## Reporting a Vulnerability
Please report security issues to `security@immich.app`
Please report security issues to `alex.tran1502@gmail.com`

1
cli/.eslintignore Normal file
View File

@@ -0,0 +1 @@
/dist

28
cli/.eslintrc.cjs Normal file
View File

@@ -0,0 +1,28 @@
module.exports = {
parser: '@typescript-eslint/parser',
parserOptions: {
project: 'tsconfig.json',
sourceType: 'module',
tsconfigRootDir: __dirname,
},
plugins: ['@typescript-eslint/eslint-plugin'],
extends: ['plugin:@typescript-eslint/recommended', 'plugin:prettier/recommended', 'plugin:unicorn/recommended'],
root: true,
env: {
node: true,
},
ignorePatterns: ['.eslintrc.js'],
rules: {
'@typescript-eslint/interface-name-prefix': 'off',
'@typescript-eslint/explicit-function-return-type': 'off',
'@typescript-eslint/explicit-module-boundary-types': 'off',
'@typescript-eslint/no-explicit-any': 'off',
'@typescript-eslint/no-floating-promises': 'error',
'unicorn/prefer-module': 'off',
'unicorn/prevent-abbreviations': 'off',
'unicorn/no-process-exit': 'off',
'unicorn/import-style': 'off',
curly: 2,
'prettier/prettier': 0,
},
};

View File

@@ -1 +1 @@
22.11.0
20.14

View File

@@ -1,4 +1,4 @@
FROM node:22.10.0-alpine3.20@sha256:fc95a044b87e95507c60c1f8c829e5d98ddf46401034932499db370c494ef0ff AS core
FROM node:20-alpine3.19@sha256:696ae41fb5880949a15ade7879a2deae93b3f0723f757bdb5b8a9e4a744ce27f as core
WORKDIR /usr/src/open-api/typescript-sdk
COPY open-api/typescript-sdk/package*.json open-api/typescript-sdk/tsconfig*.json ./
@@ -16,4 +16,4 @@ RUN npm run build
WORKDIR /import
ENTRYPOINT ["node", "/usr/src/app/dist"]
ENTRYPOINT ["node", "/usr/src/app/dist"]

View File

@@ -4,18 +4,8 @@ Please see the [Immich CLI documentation](https://immich.app/docs/features/comma
# For developers
Before building the CLI, you must build the immich server and the open-api client. To build the server run the following in the server folder:
$ npm install
$ npm run build
Then, to build the open-api client run the following in the open-api folder:
$ ./bin/generate-open-api.sh
To run the Immich CLI from source, run the following in the cli folder:
$ npm install
$ npm run build
$ ts-node .
@@ -27,4 +17,3 @@ You can also build and install the CLI using
$ npm run build
$ npm install -g .
****

View File

@@ -1,61 +0,0 @@
import { FlatCompat } from '@eslint/eslintrc';
import js from '@eslint/js';
import typescriptEslint from '@typescript-eslint/eslint-plugin';
import tsParser from '@typescript-eslint/parser';
import globals from 'globals';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const compat = new FlatCompat({
baseDirectory: __dirname,
recommendedConfig: js.configs.recommended,
allConfig: js.configs.all,
});
export default [
{
ignores: ['eslint.config.mjs', 'dist'],
},
...compat.extends(
'plugin:@typescript-eslint/recommended',
'plugin:prettier/recommended',
'plugin:unicorn/recommended',
),
{
plugins: {
'@typescript-eslint': typescriptEslint,
},
languageOptions: {
globals: {
...globals.node,
},
parser: tsParser,
ecmaVersion: 5,
sourceType: 'module',
parserOptions: {
project: 'tsconfig.json',
tsconfigRootDir: __dirname,
},
},
rules: {
'@typescript-eslint/interface-name-prefix': 'off',
'@typescript-eslint/explicit-function-return-type': 'off',
'@typescript-eslint/explicit-module-boundary-types': 'off',
'@typescript-eslint/no-explicit-any': 'off',
'@typescript-eslint/no-floating-promises': 'error',
'unicorn/prefer-module': 'off',
'unicorn/prevent-abbreviations': 'off',
'unicorn/no-process-exit': 'off',
'unicorn/import-style': 'off',
curly: 2,
'prettier/prettier': 0,
'object-shorthand': ['error', 'always'],
},
},
];

2353
cli/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "@immich/cli",
"version": "2.2.28",
"version": "2.2.0",
"description": "Command Line Interface (CLI) for Immich",
"type": "module",
"exports": "./dist/index.js",
@@ -13,33 +13,29 @@
"cli"
],
"devDependencies": {
"@eslint/eslintrc": "^3.1.0",
"@eslint/js": "^9.8.0",
"@immich/sdk": "file:../open-api/typescript-sdk",
"@types/byte-size": "^8.1.0",
"@types/cli-progress": "^3.11.0",
"@types/lodash-es": "^4.17.12",
"@types/mock-fs": "^4.13.1",
"@types/node": "^22.8.1",
"@typescript-eslint/eslint-plugin": "^8.0.0",
"@typescript-eslint/parser": "^8.0.0",
"@vitest/coverage-v8": "^2.0.5",
"byte-size": "^9.0.0",
"@types/node": "^20.3.1",
"@typescript-eslint/eslint-plugin": "^7.0.0",
"@typescript-eslint/parser": "^7.0.0",
"@vitest/coverage-v8": "^1.2.2",
"byte-size": "^8.1.1",
"cli-progress": "^3.12.0",
"commander": "^12.0.0",
"eslint": "^9.0.0",
"eslint": "^8.56.0",
"eslint-config-prettier": "^9.1.0",
"eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-unicorn": "^55.0.0",
"globals": "^15.9.0",
"eslint-plugin-unicorn": "^53.0.0",
"mock-fs": "^5.2.0",
"prettier": "^3.2.5",
"prettier-plugin-organize-imports": "^4.0.0",
"prettier-plugin-organize-imports": "^3.2.4",
"typescript": "^5.3.3",
"vite": "^5.0.12",
"vite-tsconfig-paths": "^5.0.0",
"vitest": "^2.0.5",
"vitest-fetch-mock": "^0.4.0",
"vite-tsconfig-paths": "^4.3.2",
"vitest": "^1.2.2",
"yaml": "^2.3.1"
},
"scripts": {
@@ -63,10 +59,9 @@
},
"dependencies": {
"fast-glob": "^3.3.2",
"fastq": "^1.17.1",
"lodash-es": "^4.17.21"
},
"volta": {
"node": "22.11.0"
"node": "20.14.0"
}
}

View File

@@ -1,201 +0,0 @@
import * as fs from 'node:fs';
import * as os from 'node:os';
import * as path from 'node:path';
import { describe, expect, it, vi } from 'vitest';
import { Action, checkBulkUpload, defaults, Reason } from '@immich/sdk';
import createFetchMock from 'vitest-fetch-mock';
import { checkForDuplicates, getAlbumName, uploadFiles, UploadOptionsDto } from './asset';
vi.mock('@immich/sdk');
describe('getAlbumName', () => {
it('should return a non-undefined value', () => {
if (os.platform() === 'win32') {
// This is meaningless for Unix systems.
expect(getAlbumName(String.raw`D:\test\Filename.txt`, {} as UploadOptionsDto)).toBe('test');
}
expect(getAlbumName('D:/parentfolder/test/Filename.txt', {} as UploadOptionsDto)).toBe('test');
});
it('has higher priority to return `albumName` in `options`', () => {
expect(getAlbumName('/parentfolder/test/Filename.txt', { albumName: 'example' } as UploadOptionsDto)).toBe(
'example',
);
});
});
describe('uploadFiles', () => {
const testDir = fs.mkdtempSync(path.join(os.tmpdir(), 'test-'));
const testFilePath = path.join(testDir, 'test.png');
const testFileData = 'test';
const baseUrl = 'http://example.com';
const apiKey = 'key';
const retry = 3;
const fetchMocker = createFetchMock(vi);
beforeEach(() => {
// Create a test file
fs.writeFileSync(testFilePath, testFileData);
// Defaults
vi.mocked(defaults).baseUrl = baseUrl;
vi.mocked(defaults).headers = { 'x-api-key': apiKey };
fetchMocker.enableMocks();
fetchMocker.resetMocks();
});
it('returns new assets when upload file is successful', async () => {
fetchMocker.doMockIf(new RegExp(`${baseUrl}/assets$`), () => {
return {
status: 200,
body: JSON.stringify({ id: 'fc5621b1-86f6-44a1-9905-403e607df9f5', status: 'created' }),
};
});
await expect(uploadFiles([testFilePath], { concurrency: 1 })).resolves.toEqual([
{
filepath: testFilePath,
id: 'fc5621b1-86f6-44a1-9905-403e607df9f5',
},
]);
});
it('returns new assets when upload file retry is successful', async () => {
let counter = 0;
fetchMocker.doMockIf(new RegExp(`${baseUrl}/assets$`), () => {
counter++;
if (counter < retry) {
throw new Error('Network error');
}
return {
status: 200,
body: JSON.stringify({ id: 'fc5621b1-86f6-44a1-9905-403e607df9f5', status: 'created' }),
};
});
await expect(uploadFiles([testFilePath], { concurrency: 1 })).resolves.toEqual([
{
filepath: testFilePath,
id: 'fc5621b1-86f6-44a1-9905-403e607df9f5',
},
]);
});
it('returns new assets when upload file retry is failed', async () => {
fetchMocker.doMockIf(new RegExp(`${baseUrl}/assets$`), () => {
throw new Error('Network error');
});
await expect(uploadFiles([testFilePath], { concurrency: 1 })).resolves.toEqual([]);
});
});
describe('checkForDuplicates', () => {
const testDir = fs.mkdtempSync(path.join(os.tmpdir(), 'test-'));
const testFilePath = path.join(testDir, 'test.png');
const testFileData = 'test';
const testFileChecksum = 'a94a8fe5ccb19ba61c4c0873d391e987982fbbd3'; // SHA1
const retry = 3;
beforeEach(() => {
// Create a test file
fs.writeFileSync(testFilePath, testFileData);
});
it('checks duplicates', async () => {
vi.mocked(checkBulkUpload).mockResolvedValue({
results: [
{
action: Action.Accept,
id: testFilePath,
},
],
});
await checkForDuplicates([testFilePath], { concurrency: 1 });
expect(checkBulkUpload).toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: [
{
checksum: testFileChecksum,
id: testFilePath,
},
],
},
});
});
it('returns duplicates when check duplicates is rejected', async () => {
vi.mocked(checkBulkUpload).mockResolvedValue({
results: [
{
action: Action.Reject,
id: testFilePath,
assetId: 'fc5621b1-86f6-44a1-9905-403e607df9f5',
reason: Reason.Duplicate,
},
],
});
await expect(checkForDuplicates([testFilePath], { concurrency: 1 })).resolves.toEqual({
duplicates: [
{
filepath: testFilePath,
id: 'fc5621b1-86f6-44a1-9905-403e607df9f5',
},
],
newFiles: [],
});
});
it('returns new assets when check duplicates is accepted', async () => {
vi.mocked(checkBulkUpload).mockResolvedValue({
results: [
{
action: Action.Accept,
id: testFilePath,
},
],
});
await expect(checkForDuplicates([testFilePath], { concurrency: 1 })).resolves.toEqual({
duplicates: [],
newFiles: [testFilePath],
});
});
it('returns results when check duplicates retry is successful', async () => {
let mocked = vi.mocked(checkBulkUpload);
for (let i = 1; i < retry; i++) {
mocked = mocked.mockRejectedValueOnce(new Error('Network error'));
}
mocked.mockResolvedValue({
results: [
{
action: Action.Accept,
id: testFilePath,
},
],
});
await expect(checkForDuplicates([testFilePath], { concurrency: 1 })).resolves.toEqual({
duplicates: [],
newFiles: [testFilePath],
});
});
it('returns results when check duplicates retry is failed', async () => {
vi.mocked(checkBulkUpload).mockRejectedValue(new Error('Network error'));
await expect(checkForDuplicates([testFilePath], { concurrency: 1 })).resolves.toEqual({
duplicates: [],
newFiles: [],
});
});
});

View File

@@ -15,8 +15,8 @@ import { Presets, SingleBar } from 'cli-progress';
import { chunk } from 'lodash-es';
import { Stats, createReadStream } from 'node:fs';
import { stat, unlink } from 'node:fs/promises';
import os from 'node:os';
import path, { basename } from 'node:path';
import { Queue } from 'src/queue';
import { BaseOptions, authenticate, crawl, sha1 } from 'src/utils';
const s = (count: number) => (count === 1 ? '' : 's');
@@ -25,7 +25,7 @@ const s = (count: number) => (count === 1 ? '' : 's');
type AssetBulkUploadCheckResults = Array<AssetBulkUploadCheckResult & { id: string }>;
type Asset = { id: string; filepath: string };
export interface UploadOptionsDto {
interface UploadOptionsDto {
recursive?: boolean;
ignore?: string;
dryRun?: boolean;
@@ -84,7 +84,7 @@ const scan = async (pathsToCrawl: string[], options: UploadOptionsDto) => {
return files;
};
export const checkForDuplicates = async (files: string[], { concurrency, skipHash }: UploadOptionsDto) => {
const checkForDuplicates = async (files: string[], { concurrency, skipHash }: UploadOptionsDto) => {
if (skipHash) {
console.log('Skipping hash check, assuming all files are new');
return { newFiles: files, duplicates: [] };
@@ -100,50 +100,32 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
const newFiles: string[] = [];
const duplicates: Asset[] = [];
const queue = new Queue<string[], AssetBulkUploadCheckResults>(
async (filepaths: string[]) => {
const dto = await Promise.all(
filepaths.map(async (filepath) => ({ id: filepath, checksum: await sha1(filepath) })),
);
const response = await checkBulkUpload({ assetBulkUploadCheckDto: { assets: dto } });
const results = response.results as AssetBulkUploadCheckResults;
for (const { id: filepath, assetId, action } of results) {
try {
// TODO refactor into a queue
for (const items of chunk(files, concurrency)) {
const dto = await Promise.all(items.map(async (filepath) => ({ id: filepath, checksum: await sha1(filepath) })));
const { results } = await checkBulkUpload({ assetBulkUploadCheckDto: { assets: dto } });
for (const { id: filepath, assetId, action } of results as AssetBulkUploadCheckResults) {
if (action === Action.Accept) {
newFiles.push(filepath);
} else {
// rejects are always duplicates
duplicates.push({ id: assetId as string, filepath });
}
progressBar.increment();
}
progressBar.increment(filepaths.length);
return results;
},
{ concurrency, retry: 3 },
);
for (const items of chunk(files, concurrency)) {
await queue.push(items);
}
} finally {
progressBar.stop();
}
await queue.drained();
progressBar.stop();
console.log(`Found ${newFiles.length} new files and ${duplicates.length} duplicate${s(duplicates.length)}`);
// Report failures
const failedTasks = queue.tasks.filter((task) => task.status === 'failed');
if (failedTasks.length > 0) {
console.log(`Failed to verify ${failedTasks.length} file${s(failedTasks.length)}:`);
for (const task of failedTasks) {
console.log(`- ${task.data} - ${task.error}`);
}
}
return { newFiles, duplicates };
};
export const uploadFiles = async (files: string[], { dryRun, concurrency }: UploadOptionsDto): Promise<Asset[]> => {
const uploadFiles = async (files: string[], { dryRun, concurrency }: UploadOptionsDto): Promise<Asset[]> => {
if (files.length === 0) {
console.log('All assets were already uploaded, nothing to do.');
return [];
@@ -177,52 +159,37 @@ export const uploadFiles = async (files: string[], { dryRun, concurrency }: Uplo
const newAssets: Asset[] = [];
const queue = new Queue<string, AssetMediaResponseDto>(
async (filepath: string) => {
const stats = statsMap.get(filepath);
if (!stats) {
throw new Error(`Stats not found for ${filepath}`);
}
try {
for (const items of chunk(files, concurrency)) {
await Promise.all(
items.map(async (filepath) => {
const stats = statsMap.get(filepath) as Stats;
const response = await uploadFile(filepath, stats);
const response = await uploadFile(filepath, stats);
newAssets.push({ id: response.id, filepath });
if (response.status === AssetMediaStatus.Duplicate) {
duplicateCount++;
duplicateSize += stats.size ?? 0;
} else {
successCount++;
successSize += stats.size ?? 0;
}
newAssets.push({ id: response.id, filepath });
uploadProgress.update(successSize, { value_formatted: byteSize(successSize + duplicateSize) });
if (response.status === AssetMediaStatus.Duplicate) {
duplicateCount++;
duplicateSize += stats.size ?? 0;
} else {
successCount++;
successSize += stats.size ?? 0;
}
return response;
},
{ concurrency, retry: 3 },
);
uploadProgress.update(successSize, { value_formatted: byteSize(successSize + duplicateSize) });
for (const filepath of files) {
await queue.push(filepath);
return response;
}),
);
}
} finally {
uploadProgress.stop();
}
await queue.drained();
uploadProgress.stop();
console.log(`Successfully uploaded ${successCount} new asset${s(successCount)} (${byteSize(successSize)})`);
if (duplicateCount > 0) {
console.log(`Skipped ${duplicateCount} duplicate asset${s(duplicateCount)} (${byteSize(duplicateSize)})`);
}
// Report failures
const failedTasks = queue.tasks.filter((task) => task.status === 'failed');
if (failedTasks.length > 0) {
console.log(`Failed to upload ${failedTasks.length} asset${s(failedTasks.length)}:`);
for (const task of failedTasks) {
console.log(`- ${task.data} - ${task.error}`);
}
}
return newAssets;
};
@@ -379,9 +346,7 @@ const updateAlbums = async (assets: Asset[], options: UploadOptionsDto) => {
}
};
// `filepath` valid format:
// - Windows: `D:\\test\\Filename.txt` or `D:/test/Filename.txt`
// - Unix: `/test/Filename.txt`
export const getAlbumName = (filepath: string, options: UploadOptionsDto) => {
return options.albumName ?? path.basename(path.dirname(filepath));
const getAlbumName = (filepath: string, options: UploadOptionsDto) => {
const folderName = os.platform() === 'win32' ? filepath.split('\\').at(-2) : filepath.split('/').at(-2);
return options.albumName ?? folderName;
};

View File

@@ -1,131 +0,0 @@
import * as fastq from 'fastq';
import { uniqueId } from 'lodash-es';
export type Task<T, R> = {
readonly id: string;
status: 'idle' | 'processing' | 'succeeded' | 'failed';
data: T;
error: unknown | undefined;
count: number;
// TODO: Could be useful to adding progress property.
// TODO: Could be useful to adding start_at/end_at/duration properties.
result: undefined | R;
};
export type QueueOptions = {
verbose?: boolean;
concurrency?: number;
retry?: number;
// TODO: Could be useful to adding timeout property for retry.
};
export type ComputedQueueOptions = Required<QueueOptions>;
export const defaultQueueOptions = {
concurrency: 1,
retry: 0,
verbose: false,
};
/**
* An in-memory queue that processes tasks in parallel with a given concurrency.
* @see {@link https://www.npmjs.com/package/fastq}
* @template T - The type of the worker task data.
* @template R - The type of the worker output data.
*/
export class Queue<T, R> {
private readonly queue: fastq.queueAsPromised<string, Task<T, R>>;
private readonly store = new Map<string, Task<T, R>>();
readonly options: ComputedQueueOptions;
readonly worker: (data: T) => Promise<R>;
/**
* Create a new queue.
* @param worker - The worker function that processes the task.
* @param options - The queue options.
*/
constructor(worker: (data: T) => Promise<R>, options?: QueueOptions) {
this.options = { ...defaultQueueOptions, ...options };
this.worker = worker;
this.store = new Map<string, Task<T, R>>();
this.queue = this.buildQueue();
}
get tasks(): Task<T, R>[] {
const tasks: Task<T, R>[] = [];
for (const task of this.store.values()) {
tasks.push(task);
}
return tasks;
}
getTask(id: string): Task<T, R> {
const task = this.store.get(id);
if (!task) {
throw new Error(`Task with id ${id} not found`);
}
return task;
}
/**
* Wait for the queue to be empty.
* @returns Promise<void> - The returned Promise will be resolved when all tasks in the queue have been processed by a worker.
* This promise could be ignored as it will not lead to a `unhandledRejection`.
*/
async drained(): Promise<void> {
await this.queue.drain();
}
/**
* Add a task at the end of the queue.
* @see {@link https://www.npmjs.com/package/fastq}
* @param data
* @returns Promise<void> - A Promise that will be fulfilled (rejected) when the task is completed successfully (unsuccessfully).
* This promise could be ignored as it will not lead to a `unhandledRejection`.
*/
async push(data: T): Promise<Task<T, R>> {
const id = uniqueId();
const task: Task<T, R> = { id, status: 'idle', error: undefined, count: 0, data, result: undefined };
this.store.set(id, task);
return this.queue.push(id);
}
// TODO: Support more function delegation to fastq.
private buildQueue(): fastq.queueAsPromised<string, Task<T, R>> {
return fastq.promise((id: string) => {
const task = this.getTask(id);
return this.work(task);
}, this.options.concurrency);
}
private async work(task: Task<T, R>): Promise<Task<T, R>> {
task.count += 1;
task.error = undefined;
task.status = 'processing';
if (this.options.verbose) {
console.log('[task] processing:', task);
}
try {
task.result = await this.worker(task.data);
task.status = 'succeeded';
if (this.options.verbose) {
console.log('[task] succeeded:', task);
}
return task;
} catch (error) {
task.error = error;
task.status = 'failed';
if (this.options.verbose) {
console.log('[task] failed:', task);
}
if (this.options.retry > 0 && task.count < this.options.retry) {
if (this.options.verbose) {
console.log('[task] retry:', task);
}
return this.work(task);
}
return task;
}
}
}

View File

@@ -1,5 +1,4 @@
import mockfs from 'mock-fs';
import { readFileSync } from 'node:fs';
import { CrawlOptions, crawl } from 'src/utils';
interface Test {
@@ -10,10 +9,6 @@ interface Test {
const cwd = process.cwd();
const readContent = (path: string) => {
return readFileSync(path).toString();
};
const extensions = [
'.jpg',
'.jpeg',
@@ -115,7 +110,17 @@ const tests: Test[] = [
'/albums/image3.jpg': true,
},
},
{
test: 'should support globbing paths',
options: {
pathsToCrawl: ['/photos*'],
},
files: {
'/photos1/image1.jpg': true,
'/photos2/image2.jpg': true,
'/images/image3.jpg': false,
},
},
{
test: 'should crawl a single path without trailing slash',
options: {
@@ -251,8 +256,7 @@ const tests: Test[] = [
{
test: 'should support ignoring absolute paths',
options: {
// Currently, fast-glob has some caveat when dealing with `/`.
pathsToCrawl: ['/*s'],
pathsToCrawl: ['/'],
recursive: true,
exclusionPattern: '/images/**',
},
@@ -272,16 +276,14 @@ describe('crawl', () => {
describe('crawl', () => {
for (const { test, options, files } of tests) {
it(test, async () => {
// The file contents is the same as the path.
mockfs(Object.fromEntries(Object.keys(files).map((file) => [file, file])));
mockfs(Object.fromEntries(Object.keys(files).map((file) => [file, ''])));
const actual = await crawl({ ...options, extensions });
const expected = Object.entries(files)
.filter((entry) => entry[1])
.map(([file]) => file);
// Compare file's content instead of path since a file can be represent in multiple ways.
expect(actual.map((path) => readContent(path)).sort()).toEqual(expected.sort());
expect(actual.sort()).toEqual(expected.sort());
});
}
});

View File

@@ -1,9 +1,8 @@
import { getMyUser, init, isHttpError } from '@immich/sdk';
import { convertPathToPattern, glob } from 'fast-glob';
import { glob } from 'fast-glob';
import { createHash } from 'node:crypto';
import { createReadStream } from 'node:fs';
import { readFile, stat, writeFile } from 'node:fs/promises';
import { platform } from 'node:os';
import { join, resolve } from 'node:path';
import yaml from 'yaml';
@@ -107,11 +106,6 @@ export interface CrawlOptions {
exclusionPattern?: string;
extensions: string[];
}
const convertPathToPatternOnWin = (path: string) => {
return platform() === 'win32' ? convertPathToPattern(path) : path;
};
export const crawl = async (options: CrawlOptions): Promise<string[]> => {
const { extensions: extensionsWithPeriod, recursive, pathsToCrawl, exclusionPattern, includeHidden } = options;
const extensions = extensionsWithPeriod.map((extension) => extension.replace('.', ''));
@@ -130,32 +124,36 @@ export const crawl = async (options: CrawlOptions): Promise<string[]> => {
if (stats.isFile() || stats.isSymbolicLink()) {
crawledFiles.push(absolutePath);
} else {
patterns.push(convertPathToPatternOnWin(absolutePath));
patterns.push(absolutePath);
}
} catch (error: any) {
if (error.code === 'ENOENT') {
patterns.push(convertPathToPatternOnWin(currentPath));
patterns.push(currentPath);
} else {
throw error;
}
}
}
if (patterns.length === 0) {
let searchPattern: string;
if (patterns.length === 1) {
searchPattern = patterns[0];
} else if (patterns.length === 0) {
return crawledFiles;
} else {
searchPattern = '{' + patterns.join(',') + '}';
}
const searchPatterns = patterns.map((pattern) => {
let escapedPattern = pattern;
if (recursive) {
escapedPattern = escapedPattern + '/**';
}
return `${escapedPattern}/*.{${extensions.join(',')}}`;
});
if (recursive) {
searchPattern = searchPattern + '/**/';
}
const globbedFiles = await glob(searchPatterns, {
searchPattern = `${searchPattern}/*.{${extensions.join(',')}}`;
const globbedFiles = await glob(searchPattern, {
absolute: true,
caseSensitiveMatch: false,
onlyFiles: true,
dot: includeHidden,
ignore: [`**/${exclusionPattern}`],
});

View File

@@ -2,7 +2,6 @@ import { defineConfig } from 'vite';
import tsconfigPaths from 'vite-tsconfig-paths';
export default defineConfig({
resolve: { alias: { src: '/src' } },
build: {
rollupOptions: {
input: 'src/index.ts',

View File

@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.45.0"
constraints = "4.45.0"
version = "4.34.0"
constraints = "4.34.0"
hashes = [
"h1:/CGpnYMkLRDmqn4iAsh/jg7ELZ6QExUw03VdjKZyK5M=",
"h1:82C/ryqwQvxhBINYOOyF5ZzPW/k4zJ/RYT13eCdPgEc=",
"h1:8Wu1D7ZwbLGdHakLRAzoAJ5VqZ8I14qzkPv1OGNfIlg=",
"h1:CVq0CAibeueOuiNk0UQtwZvMLMof33n1BgskFPOymrk=",
"h1:FSS5Kq+L+CX1zARy8PhaF8edBFNgsLtds4Uo8MwJiK8=",
"h1:L4qsorLII7f8xSFmv6JOoWfLWDunWQEpK964Bxk7mtM=",
"h1:StO3PV5PDskSCnhoHhWHOPxu6hbzJUQggfLgOSkvhwg=",
"h1:Tjo+Er9ets5YrTRIdP9LBmi4p89nL/W+A7r8a1MM9nI=",
"h1:XIwT+AWvks1LTytePM9zls+O8ItxoqCfPOgHwuH9ivQ=",
"h1:aOXn/zuM1+5GGy/SSRx8q4EYCSTFE9Tr0twHPIf5/KE=",
"h1:lb+YcuZ4guYd8zE51vgSnDsRAD9IV00Z15l1i1X52s8=",
"h1:pYwNXGjfXA2rUEmotGMLWgmavT9D2rdHnV3TpuIK3ko=",
"h1:q1qrnPq6KkljwBrugCwzb7f0SVP4Lzkfh+EOLARY9V8=",
"h1:v9sL4cZLTV5Gu2004DDyy7209gT0JmudBCAD0WCr/JE=",
"zh:00be2a6adc76615a368491c7a026098103b6286deb31e3cfb037365dd39f095f",
"zh:05bd072e6119f7a5abff05c6064001f745473119a956586cf77ae843cf55d666",
"zh:228bbe61345c4e8e0bc6b698b4b9652abff65662ee72ede2aecb4c3efb91b243",
"zh:2948aeefe71ba041c94082cf931ecc95510d93af0a61d0a287880f5b9d24b11a",
"zh:5dfc2c5e95843ca54957212ee3ecb7ff06f2cf60bfd6ca278b5249fd70ac18f5",
"zh:69922cb45559b0b0544b9c2d31ed2d0fac9121faa75bc2f523484785b45d8e2b",
"h1:+W0+Xe1AUh7yvHjDbgR9T7CY1UbBC3Y6U7Eo+ucLnJM=",
"h1:2+1lKObDDdFZRluvROF3RKtXD66CFT3PfnHOvR6CmfA=",
"h1:7vluN2wmw8D9nI11YwTgoGv3hGDXlkt8xqQ4L/JABeQ=",
"h1:B0Urm8ZKTJ8cXzSCtEpJ+o+LsD8MXaD6LU59qVbh50Q=",
"h1:FpGLCm5oF12FaRti3E4iQJlkVbdCC7toyGVuH8og7KY=",
"h1:FunTmrCMDy+rom7YskY0WiL5/Y164zFrrD9xnBxU5NY=",
"h1:GrxZhEb+5HzmHF/BvZBdGKBJy6Wyjme0+ABVDz/63to=",
"h1:J36dda2K42/oTfHuZ4jKkW5+nI6BTWFRUvo60P17NJg=",
"h1:Kq0Wyn+j6zoQeghMYixbnfnyP9ZSIEJbOCzMbaCiAQQ=",
"h1:TKxunXCiS/z105sN/kBNFwU6tIKD67JKJ3ZKjwzoCuI=",
"h1:TR0URKFQxsRO5/v7bKm5hkD/CTTjsG7aVGllL/Mf25c=",
"h1:V+3Qs0Reb6r+8p4XjE5ZFDWYrOIN0x5SwORz4wvHOJ4=",
"h1:mZB3Ui7V/lPQMQK53eBOjIHcrul74252dT06Kgn3J+s=",
"h1:wJwZrIXxoki8omXLJ7XA7B1KaSrtcLMJp090fRtFRAc=",
"zh:02aa46743c1585ada8faa7db23af68ea614053a506f88f05d1090ff5e0e68076",
"zh:1e1a545e83e6457a0e15357b23139bc288fb4fbd5e9a5ddfedc95a6a0216b08c",
"zh:29eef2621e0b1501f620e615bf73b1b90d5417d745e38af63634bc03250faf87",
"zh:3c20989d7e1e141882e6091384bf85fdc83f70f3d29e3e047c493a07de992095",
"zh:3d39619379ba29c7ffb15196f0ea72a04c84cfcdf4b39ac42ac4cf4c19f3eae2",
"zh:805f4a2774e9279c590b8214aabe6df9dcc22bb995df2530513f2f78c647ce75",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:9d83a0cbf72327286f7dbd63cd4af89059c648163fe6ed21b1df768e0518d445",
"zh:a8e1982945822c7d7aaa6ba8602c7247d1a3fad15d612f30eb323491a637bf8d",
"zh:c6d41ebd69ddb23e3dad49a0ebf1da5a9c7d8706a4f55d953115d371f407928b",
"zh:d03e5442b12846c2737f099d30cd23d9f85a0c6d65437ccb44819f9a6c4e1d7f",
"zh:d446f2e1186b35037aea03b0e27d8b032d2f069f194f84b3f0e2907b3a79a955",
"zh:e4d7549a4c856524e01f3dd4d69f57119ea205f7a0fa38dcfe154475b4ae9258",
"zh:e64b8915cb9686f85e77115bd674f2faf4f29880688067d7d0f1376566fdb3b0",
"zh:f046efdc55e6385cdd69baaa06a929bef9fe6809d373b0d2d6c7df8f8c23eddc",
"zh:8af716f8655a57aa986861a8a7fa1d724594a284bd77c870eaea4db5f8b9732d",
"zh:a3d13c93b4e6ee6004782debaa9a17f990f2fe8ec8ba545c232818bb6064aba9",
"zh:bfa136acf82d3719473c0064446cc16d1b0303d98b06f55f503b7abeebceadb1",
"zh:ca6cf9254ae5436f2efbc01a0e3f7e4aa3c08b45182037b3eb3eb9539b2f7aec",
"zh:cba32d5de02674004e0a5955bd5222016d9991ca0553d4bd3bea517cd9def6ab",
"zh:d22c8cd527c6d0e84567f57be5911792e2fcd5969e3bba3747489f18bb16705b",
"zh:e4eeede9b3e72cdadd6cc252d4cbcf41baee6ecfd12bacd927e2dcbe733ab210",
"zh:facdaa787a69f86203cd3cc6922baea0b4a18bd9c36b0a8162e2e88ef6c90655",
]
}

View File

@@ -5,7 +5,7 @@ terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "4.45.0"
version = "4.34.0"
}
}
}

View File

@@ -9,6 +9,6 @@ resource "cloudflare_record" "immich_app_release_domain" {
proxied = true
ttl = 1
type = "CNAME"
content = data.terraform_remote_state.cloudflare_immich_app_docs.outputs.immich_app_branch_pages_hostname
value = data.terraform_remote_state.cloudflare_immich_app_docs.outputs.immich_app_branch_pages_hostname
zone_id = data.terraform_remote_state.cloudflare_account.outputs.immich_app_zone_id
}

View File

@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.45.0"
constraints = "4.45.0"
version = "4.34.0"
constraints = "4.34.0"
hashes = [
"h1:/CGpnYMkLRDmqn4iAsh/jg7ELZ6QExUw03VdjKZyK5M=",
"h1:82C/ryqwQvxhBINYOOyF5ZzPW/k4zJ/RYT13eCdPgEc=",
"h1:8Wu1D7ZwbLGdHakLRAzoAJ5VqZ8I14qzkPv1OGNfIlg=",
"h1:CVq0CAibeueOuiNk0UQtwZvMLMof33n1BgskFPOymrk=",
"h1:FSS5Kq+L+CX1zARy8PhaF8edBFNgsLtds4Uo8MwJiK8=",
"h1:L4qsorLII7f8xSFmv6JOoWfLWDunWQEpK964Bxk7mtM=",
"h1:StO3PV5PDskSCnhoHhWHOPxu6hbzJUQggfLgOSkvhwg=",
"h1:Tjo+Er9ets5YrTRIdP9LBmi4p89nL/W+A7r8a1MM9nI=",
"h1:XIwT+AWvks1LTytePM9zls+O8ItxoqCfPOgHwuH9ivQ=",
"h1:aOXn/zuM1+5GGy/SSRx8q4EYCSTFE9Tr0twHPIf5/KE=",
"h1:lb+YcuZ4guYd8zE51vgSnDsRAD9IV00Z15l1i1X52s8=",
"h1:pYwNXGjfXA2rUEmotGMLWgmavT9D2rdHnV3TpuIK3ko=",
"h1:q1qrnPq6KkljwBrugCwzb7f0SVP4Lzkfh+EOLARY9V8=",
"h1:v9sL4cZLTV5Gu2004DDyy7209gT0JmudBCAD0WCr/JE=",
"zh:00be2a6adc76615a368491c7a026098103b6286deb31e3cfb037365dd39f095f",
"zh:05bd072e6119f7a5abff05c6064001f745473119a956586cf77ae843cf55d666",
"zh:228bbe61345c4e8e0bc6b698b4b9652abff65662ee72ede2aecb4c3efb91b243",
"zh:2948aeefe71ba041c94082cf931ecc95510d93af0a61d0a287880f5b9d24b11a",
"zh:5dfc2c5e95843ca54957212ee3ecb7ff06f2cf60bfd6ca278b5249fd70ac18f5",
"zh:69922cb45559b0b0544b9c2d31ed2d0fac9121faa75bc2f523484785b45d8e2b",
"h1:+W0+Xe1AUh7yvHjDbgR9T7CY1UbBC3Y6U7Eo+ucLnJM=",
"h1:2+1lKObDDdFZRluvROF3RKtXD66CFT3PfnHOvR6CmfA=",
"h1:7vluN2wmw8D9nI11YwTgoGv3hGDXlkt8xqQ4L/JABeQ=",
"h1:B0Urm8ZKTJ8cXzSCtEpJ+o+LsD8MXaD6LU59qVbh50Q=",
"h1:FpGLCm5oF12FaRti3E4iQJlkVbdCC7toyGVuH8og7KY=",
"h1:FunTmrCMDy+rom7YskY0WiL5/Y164zFrrD9xnBxU5NY=",
"h1:GrxZhEb+5HzmHF/BvZBdGKBJy6Wyjme0+ABVDz/63to=",
"h1:J36dda2K42/oTfHuZ4jKkW5+nI6BTWFRUvo60P17NJg=",
"h1:Kq0Wyn+j6zoQeghMYixbnfnyP9ZSIEJbOCzMbaCiAQQ=",
"h1:TKxunXCiS/z105sN/kBNFwU6tIKD67JKJ3ZKjwzoCuI=",
"h1:TR0URKFQxsRO5/v7bKm5hkD/CTTjsG7aVGllL/Mf25c=",
"h1:V+3Qs0Reb6r+8p4XjE5ZFDWYrOIN0x5SwORz4wvHOJ4=",
"h1:mZB3Ui7V/lPQMQK53eBOjIHcrul74252dT06Kgn3J+s=",
"h1:wJwZrIXxoki8omXLJ7XA7B1KaSrtcLMJp090fRtFRAc=",
"zh:02aa46743c1585ada8faa7db23af68ea614053a506f88f05d1090ff5e0e68076",
"zh:1e1a545e83e6457a0e15357b23139bc288fb4fbd5e9a5ddfedc95a6a0216b08c",
"zh:29eef2621e0b1501f620e615bf73b1b90d5417d745e38af63634bc03250faf87",
"zh:3c20989d7e1e141882e6091384bf85fdc83f70f3d29e3e047c493a07de992095",
"zh:3d39619379ba29c7ffb15196f0ea72a04c84cfcdf4b39ac42ac4cf4c19f3eae2",
"zh:805f4a2774e9279c590b8214aabe6df9dcc22bb995df2530513f2f78c647ce75",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:9d83a0cbf72327286f7dbd63cd4af89059c648163fe6ed21b1df768e0518d445",
"zh:a8e1982945822c7d7aaa6ba8602c7247d1a3fad15d612f30eb323491a637bf8d",
"zh:c6d41ebd69ddb23e3dad49a0ebf1da5a9c7d8706a4f55d953115d371f407928b",
"zh:d03e5442b12846c2737f099d30cd23d9f85a0c6d65437ccb44819f9a6c4e1d7f",
"zh:d446f2e1186b35037aea03b0e27d8b032d2f069f194f84b3f0e2907b3a79a955",
"zh:e4d7549a4c856524e01f3dd4d69f57119ea205f7a0fa38dcfe154475b4ae9258",
"zh:e64b8915cb9686f85e77115bd674f2faf4f29880688067d7d0f1376566fdb3b0",
"zh:f046efdc55e6385cdd69baaa06a929bef9fe6809d373b0d2d6c7df8f8c23eddc",
"zh:8af716f8655a57aa986861a8a7fa1d724594a284bd77c870eaea4db5f8b9732d",
"zh:a3d13c93b4e6ee6004782debaa9a17f990f2fe8ec8ba545c232818bb6064aba9",
"zh:bfa136acf82d3719473c0064446cc16d1b0303d98b06f55f503b7abeebceadb1",
"zh:ca6cf9254ae5436f2efbc01a0e3f7e4aa3c08b45182037b3eb3eb9539b2f7aec",
"zh:cba32d5de02674004e0a5955bd5222016d9991ca0553d4bd3bea517cd9def6ab",
"zh:d22c8cd527c6d0e84567f57be5911792e2fcd5969e3bba3747489f18bb16705b",
"zh:e4eeede9b3e72cdadd6cc252d4cbcf41baee6ecfd12bacd927e2dcbe733ab210",
"zh:facdaa787a69f86203cd3cc6922baea0b4a18bd9c36b0a8162e2e88ef6c90655",
]
}

View File

@@ -5,7 +5,7 @@ terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "4.45.0"
version = "4.34.0"
}
}
}

View File

@@ -9,7 +9,7 @@ resource "cloudflare_record" "immich_app_branch_subdomain" {
proxied = true
ttl = 1
type = "CNAME"
content = "${replace(var.prefix_name, "/\\/|\\./", "-")}.${local.is_release ? data.terraform_remote_state.cloudflare_account.outputs.immich_app_archive_pages_project_subdomain : data.terraform_remote_state.cloudflare_account.outputs.immich_app_preview_pages_project_subdomain}"
value = "${replace(var.prefix_name, "/\\/|\\./", "-")}.${local.is_release ? data.terraform_remote_state.cloudflare_account.outputs.immich_app_archive_pages_project_subdomain : data.terraform_remote_state.cloudflare_account.outputs.immich_app_preview_pages_project_subdomain}"
zone_id = data.terraform_remote_state.cloudflare_account.outputs.immich_app_zone_id
}
@@ -18,7 +18,7 @@ output "immich_app_branch_subdomain" {
}
output "immich_app_branch_pages_hostname" {
value = cloudflare_record.immich_app_branch_subdomain.content
value = cloudflare_record.immich_app_branch_subdomain.value
}
output "pages_project_name" {

View File

@@ -26,32 +26,16 @@ services:
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
environment:
IMMICH_REPOSITORY: immich-app/immich
IMMICH_REPOSITORY_URL: https://github.com/immich-app/immich
IMMICH_SOURCE_REF: local
IMMICH_SOURCE_COMMIT: af2efbdbbddc27cd06142f22253ccbbbbeec1f55
IMMICH_SOURCE_URL: https://github.com/immich-app/immich/commit/af2efbdbbddc27cd06142f22253ccbbbbeec1f55
IMMICH_BUILD: '9654404849'
IMMICH_BUILD_URL: https://github.com/immich-app/immich/actions/runs/9654404849
IMMICH_BUILD_IMAGE: development
IMMICH_BUILD_IMAGE_URL: https://github.com/immich-app/immich/pkgs/container/immich-server
IMMICH_THIRD_PARTY_SOURCE_URL: https://github.com/immich-app/immich/
IMMICH_THIRD_PARTY_BUG_FEATURE_URL: https://github.com/immich-app/immich/issues
IMMICH_THIRD_PARTY_DOCUMENTATION_URL: https://immich.app/docs
IMMICH_THIRD_PARTY_SUPPORT_URL: https://immich.app/docs/third-party
ulimits:
nofile:
soft: 1048576
hard: 1048576
ports:
- 3001:3001
- 9230:9230
- 9231:9231
depends_on:
- redis
- database
healthcheck:
disable: false
immich-web:
container_name: immich_web
@@ -66,7 +50,6 @@ services:
- 24678:24678
volumes:
- ../web:/usr/src/app
- ../i18n:/usr/src/i18n
- ../open-api/:/usr/src/open-api/
- /usr/src/app/node_modules
ulimits:
@@ -98,12 +81,10 @@ services:
depends_on:
- database
restart: unless-stopped
healthcheck:
disable: false
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:2ba50e1ac3a0ea17b736ce9db2b0a9f6f8b85d4c27d5f5accc6a416d8f42c6d5
image: redis:6.2-alpine@sha256:e31ca60b18f7e9b78b573d156702471d4eda038803c0b8e6f01559f350031e93
healthcheck:
test: redis-cli ping || exit 1
@@ -122,28 +103,13 @@ services:
ports:
- 5432:5432
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
test: pg_isready --dbname='${DB_DATABASE_NAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT SUM(checksum_failures) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command:
[
'postgres',
'-c',
'shared_preload_libraries=vectors.so',
'-c',
'search_path="$$user", public, vectors',
'-c',
'logging_collector=on',
'-c',
'max_wal_size=2GB',
'-c',
'shared_buffers=512MB',
'-c',
'wal_compression=on',
]
command: ["postgres", "-c" ,"shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
# set IMMICH_METRICS=true in .env to enable metrics
# immich-prometheus:
# container_name: immich_prometheus
# ports:

View File

@@ -16,13 +16,11 @@ services:
env_file:
- .env
ports:
- 2283:2283
- 2283:3001
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
@@ -35,19 +33,15 @@ services:
dockerfile: Dockerfile
args:
- DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
ports:
- 3003:3003
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:2ba50e1ac3a0ea17b736ce9db2b0a9f6f8b85d4c27d5f5accc6a416d8f42c6d5
image: redis:6.2-alpine@sha256:e31ca60b18f7e9b78b573d156702471d4eda038803c0b8e6f01559f350031e93
healthcheck:
test: redis-cli ping || exit 1
restart: always
@@ -67,34 +61,19 @@ services:
ports:
- 5432:5432
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
test: pg_isready --dbname='${DB_DATABASE_NAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT SUM(checksum_failures) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command:
[
'postgres',
'-c',
'shared_preload_libraries=vectors.so',
'-c',
'search_path="$$user", public, vectors',
'-c',
'logging_collector=on',
'-c',
'max_wal_size=2GB',
'-c',
'shared_buffers=512MB',
'-c',
'wal_compression=on',
]
command: ["postgres", "-c" ,"shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
restart: always
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
# set IMMICH_METRICS=true in .env to enable metrics
immich-prometheus:
container_name: immich_prometheus
ports:
- 9090:9090
image: prom/prometheus@sha256:378f4e03703557d1c6419e6caccf922f96e6d88a530f7431d66a4c4f4b1000fe
image: prom/prometheus@sha256:5c435642ca4d8427ca26f4901c11114023004709037880cd7860d5b7176aa731
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus-data:/prometheus
@@ -106,7 +85,7 @@ services:
command: ['./run.sh', '-disable-reporting']
ports:
- 3000:3000
image: grafana/grafana:11.3.0-ubuntu@sha256:51587e148ac0214d7938e7f3fe8512182e4eb6141892a3ffb88bba1901b49285
image: grafana/grafana:11.0.0-ubuntu@sha256:02e99d1ee0b52dc9d3000c7b5314e7a07e0dfd69cc49bb3f8ce323491ed3406b
volumes:
- grafana-data:/var/lib/grafana

View File

@@ -16,19 +16,16 @@ services:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- '2283:2283'
- 2283:3001
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
@@ -43,12 +40,10 @@ services:
env_file:
- .env
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:2ba50e1ac3a0ea17b736ce9db2b0a9f6f8b85d4c27d5f5accc6a416d8f42c6d5
image: docker.io/redis:6.2-alpine@sha256:e31ca60b18f7e9b78b573d156702471d4eda038803c0b8e6f01559f350031e93
healthcheck:
test: redis-cli ping || exit 1
restart: always
@@ -62,29 +57,13 @@ services:
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
test: pg_isready --dbname='${DB_DATABASE_NAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT SUM(checksum_failures) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command:
[
'postgres',
'-c',
'shared_preload_libraries=vectors.so',
'-c',
'search_path="$$user", public, vectors',
'-c',
'logging_collector=on',
'-c',
'max_wal_size=2GB',
'-c',
'shared_buffers=512MB',
'-c',
'wal_compression=on',
]
command: ["postgres", "-c" ,"shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
restart: always
volumes:

View File

@@ -12,7 +12,6 @@ DB_DATA_LOCATION=./postgres
IMMICH_VERSION=release
# Connection secret for postgres. You should change it to a random password
# Please use only the characters `A-Za-z0-9`, without special characters or spaces
DB_PASSWORD=postgres
# The values below this line do not need to be changed

View File

@@ -51,4 +51,5 @@ services:
volumes:
- /usr/lib/wsl:/usr/lib/wsl
environment:
- LD_LIBRARY_PATH=/usr/lib/wsl/lib
- LIBVA_DRIVER_NAME=d3d12

View File

@@ -3,10 +3,10 @@ global:
evaluation_interval: 15s
scrape_configs:
- job_name: immich_api
- job_name: immich_server
static_configs:
- targets: ['immich-server:8081']
- job_name: immich_microservices
static_configs:
- targets: ['immich-server:8082']
- targets: ['immich-microservices:8081']

View File

@@ -1,49 +0,0 @@
#!/bin/sh
set -eu
LOG_LEVEL="${IMMICH_LOG_LEVEL:='info'}"
logDebug() {
if [ "$LOG_LEVEL" = "debug" ] || [ "$LOG_LEVEL" = "verbose" ]; then
echo "DEBUG: $1" >&2
fi
}
if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
logDebug "cgroup v2 detected."
if [ -f /sys/fs/cgroup/cpu.max ]; then
read -r quota period </sys/fs/cgroup/cpu.max
if [ "$quota" = "max" ]; then
logDebug "No CPU limits set."
unset quota period
fi
else
logDebug "/sys/fs/cgroup/cpu.max not found."
fi
else
logDebug "cgroup v1 detected."
if [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then
quota=$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)
period=$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)
if [ "$quota" = "-1" ]; then
logDebug "No CPU limits set."
unset quota period
fi
else
logDebug "/sys/fs/cgroup/cpu/cpu.cfs_quota_us or /sys/fs/cgroup/cpu/cpu.cfs_period_us not found."
fi
fi
if [ -n "${quota:-}" ] && [ -n "${period:-}" ]; then
cpus=$((quota / period))
if [ "$cpus" -eq 0 ]; then
cpus=1
fi
else
cpus=$(grep -c ^processor /proc/cpuinfo)
fi
echo "$cpus"

View File

@@ -1 +1 @@
22.11.0
20.14

View File

@@ -94,7 +94,7 @@ Thank you, and I am asking for your support for the project. I hope to be a full
- Bitcoin: 3QVAb9dCHutquVejeNXitPqZX26Yg5kxb7
- Give a project a star - the contributors love gazing at the stars and seeing their creations shining in the sky.
Join our friendly [Discord](https://discord.immich.app) to talk and discuss Immich, tech, or anything
Join our friendly [Discord](https://discord.gg/D8JsnBEuKb) to talk and discuss Immich, tech, or anything
Cheer!

View File

@@ -142,7 +142,7 @@ Thank you, and I am asking for your support for the project. I hope to be a full
- Bitcoin: 3QVAb9dCHutquVejeNXitPqZX26Yg5kxb7
- Give a project a star - the contributors love gazing at the stars and seeing their creations shining in the sky.
Join our friendly [Discord](https://discord.immich.app) to talk and discuss Immich, tech, or anything
Join our friendly [Discord](https://discord.gg/D8JsnBEuKb) to talk and discuss Immich, tech, or anything
Cheer!

View File

@@ -1,7 +1,7 @@
---
title: The Immich core team goes full-time
authors: [alextran]
tags: [update, announcement, FUTO]
tags: [update, announcement, futo]
date: 2024-05-01T00:00
---

View File

@@ -1,91 +0,0 @@
---
title: Licensing announcement - Purchase a license to support Immich
authors: [alextran]
tags: [update, announcement, FUTO]
date: 2024-07-18T00:00
---
Hello everybody,
Firstly, on behalf of the Immich team, I'd like to thank everybody for your continuous support of Immich since the very first day! Your contributions, encouragement, and community engagement have helped bring Immich to its current state. The team and I are forever grateful for that.
Since our [last announcement of the core team joining FUTO to work on Immich full-time](https://immich.app/blog/2024/immich-core-team-goes-fulltime), one of the goals of our new position is to foster a healthy relationship between the developers and the users. We believe that this enables us to create great software, establish transparent policies and build trust.
We want to build a great software application that brings value to you and your loved ones' lives. We are not using you as a product, i.e., selling or tracking your data. We are not putting annoying ads into our software. We respect your privacy. We want to be compensated for the hard work we put in to build Immich for you.
With those notes, we have enabled a way for you to financially support the continued development of Immich, ensuring the software can move forward and will be maintained, by offering a lifetime license of the software. We think if you like and use software, you should pay for it, but _we're never going to force anyone to pay or try to limit Immich for those who don't._
There are two types of license that you can choose to purchase: **Server License** and **Individual License**.
### Server License
This is a lifetime license costing **$99.99**. The license is applied to the whole server. You and all users that use your server are licensed.
### Individual License
This is a lifetime license costing **$24.99**. The license is applied to a single user, and can be used on any server they choose to connect to.
<img
width="837"
alt="license-social-gh"
src="https://github.com/user-attachments/assets/241932ed-ef3b-44ec-a9e2-ee80754e0cca"
/>
You can purchase the license on [our page - https://buy.immich.app](https://buy.immich.app).
Starting with release `v1.109.0` you can purchase and enter your purchased license key directly in the app.
<img
width="1414"
alt="license-page-gh"
src="https://github.com/user-attachments/assets/364fc32a-f6ef-4594-9fea-28d5a26ad77c"
/>
## Thank you
Thank you again for your support, this will help create a strong foundation and stability for the Immich team to continue developing and maintaining the project that you love to use.
<p align="center">
<img
src="https://media.giphy.com/media/v1.Y2lkPTc5MGI3NjExbjY2eWc5Y2F0ZW56MmR4aWE0dDhzZXlidXRmYWZyajl1bWZidXZpcyZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9Zw/87CKDqErVfMqY/giphy.gif"
width="550"
title="SUPPORT THE PROJECT!"
/>
</p>
<br />
<br />
Cheers! 🎉
Immich team
# FAQ
### 1. Where can I purchase a license?
There are several places where you can purchase the license from
- [https://buy.immich.app](https://buy.immich.app)
- [https://pay.futo.org](https://pay.futo.org/)
- or directly from the app.
### 2. Do I need both _Individual License_ and _Server License_?
No,
If you are the admin and the sole user, or your instance has less than a total of 4 users, you can buy the **Individual License** for each user.
If your instance has more than 4 users, it is more cost-effective to buy the **Server License**, which will license all the users on your instance.
### 3. What do I do if I don't pay?
You can continue using Immich without any restriction.
### 4. Will there be any paywalled features?
No, there will never be any paywalled features.
### 5. Where can I get support regarding payment issues?
You can email us with your `orderId` and your email address `billing@futo.org` or on our Discord server.

View File

@@ -1,78 +0,0 @@
---
title: Immich Update - July 2024
authors: [alextran]
date: 2024-07-01T00:00
tags: [update, v1.106.0]
---
Hello everybody! Alex from Immich here and I am back with another development progress update for the project.
Summer has returned once again, and the night sky is filled with stars, thank you for **38_000 shining stars** you have sent to our [GitHub repo](https://github.com/immich-app/immich)! Since the last announcement several core contributors have started full time. Everything is going great with development, PRs get merged with _brrrrrrr_ rate, conversation exchange between team members is on a new high, we met and are working with the great engineers at FUTO. The spirit is high and we have a lot of things brewing that we think you will like.
Let's go over some of the updates we had since the last post.
### Container consolidation
Reduced the number of total containers from 5 to 4 by making the microservices thread get spawned directly in the server container. Woohoo, remember when Immich had 7 containers?
### Email notifications
![smtp](https://github.com/immich-app/immich/assets/27055614/949cba85-d3f1-4cd3-b246-a6f5fb5d3ae8)
We added email notifications to the app with SMTP settings that you can configure for the following events
- A new account is created for you.
- You are added to a shared album.
- New media is added to an album.
### Versioned docs
You can now jump back into the past or take a peek at the unreleased version of the documentation by selecting the version on the website.
![version-doc](https://github.com/immich-app/immich/assets/27055614/6d22898a-5093-41ad-b416-4573d7ce6e03)
### Similarity deduplication
With more machine learning and CLIP magic, we now have similarity deduplication built into the application where it will search for closely similar images and let you decide what to do with them; i.e keep or trash.
![similarity-deduplication](https://github.com/immich-app/immich/assets/27055614/3cac8478-fbf7-47ea-acb6-0146901dc67e)
### Permanent URL for asset on the web
The detail view for an asset now has a permanent URL so you can easily share them with your loved ones.
### Web app translations
We now have a public Weblate project which the community can use to translate the webapp to their native languages. We are planning to port the mobile app translation to this platform as well. If you would like to contribute, you can take a look [here](https://hosted.weblate.org/projects/immich/immich/). We're already close to 50% translations -- we really appreciate everyone contributing to that!
![web-translation](https://github.com/immich-app/immich/assets/27055614/363df2ed-656c-4584-bd82-0708a693c5bc)
### Read-only/Editor mode on shared album
As the owner of the album, you can choose if the shared user can edit the album or to only view the content of the album without any modification.
![read-only-album](https://github.com/immich-app/immich/assets/27055614/c6f66375-b869-495a-9a86-3e87b316d109)
### Better video thumbnails
Immich now tries to find a descriptive video thumbnail instead of simply using the first frame. No more black images for thumbnails!
### Public Roadmap
We now have a [public roadmap](https://immich.app/roadmap), giving you a high-level overview of things the team is working on. The first goal of this roadmap is to bring Immich to a stable release, which is expected sometime later this year. Some of the highlights include
- Auto stacking - Auto stacking of burst photos
- Basic editor - Basic photo editing capabilities
- Workflows - Automate tasks with workflows
- Fine grained access controls - Granular access controls for users and api keys
- Better background backups - Rework background backups to be more reliable
- Private/locked photos - Private assets with extra protections
Beyond the items in the roadmap, we have _many many_ more ideas for Immich. The team and I hope that you are enjoying the application, find it helpful in your life and we have nothing but the intention of building out great software for you all!
Have an amazing Summer or Winter for those in the southern hemisphere! :D
Until next time,
Cheers!
Alex

View File

@@ -52,25 +52,14 @@ On iOS (iPhone and iPad), the operating system determines if a particular app ca
- Disable Background App Refresh for apps that don't need background tasks to run. This will reduce the competition for background task invocation for Immich.
- Use the Immich app more often.
### Why are features not working with a self-signed cert or mTLS?
Due to limitations in the upstream app/video library, using a self-signed TLS certificate or mutual TLS may break video playback or asset upload (both foreground and/or background).
We recommend using a real SSL certificate from a free provider, for example [Let's Encrypt](https://letsencrypt.org/).
---
## Assets
### Does Immich change the file?
No, Immich does not modify the original files.
All edited metadata is saved in companion `.xmp` sidecar files and the database.
However, Immich will delete original files that have been trashed when the trash is emptied in the Immich UI.
### Why do my file names appear as a random string in the file manager?
When Storage Template is off (default) Immich saves the file names in a random string (also known as random UUIDs) to prevent duplicate file names. To retrieve the original file names, you must enable the Storage Template and then run the STORAGE TEMPLATE MIGRATION job.
It is recommended to read about [Storage Template](https://immich.app/docs/administration/storage-template) before activation.
No, Immich does not touch the original file under any circumstances,
all edited metadata are saved in the companion sidecar file and the database.
### Can I add my existing photo library?
@@ -144,6 +133,40 @@ For example, say you have existing transcodes with the policy "Videos higher tha
No. Our design principle is that the original assets should always be untouched.
### How can I move all data (photos, persons, albums, libraries) from one user to another?
This is not officially supported but can be accomplished with some database updates. You can do this on the command line (in the PostgreSQL container using the `psql` command), or you can add, for example, an [Adminer](https://www.adminer.org/) container to the `docker-compose.yml` file so that you can use a web interface.
<details>
<summary>Steps</summary>
1. **MAKE A BACKUP** - See [backup and restore](/docs/administration/backup-and-restore.md).
2. Find the ID of both the 'source' and the 'destination' user (it's the id column in the `users` table)
3. Four tables need to be updated:
```sql
BEGIN;
-- reassign albums
UPDATE albums SET "ownerId" = '<destinationId>' WHERE "ownerId" = '<sourceId>';
-- reassign people
UPDATE person SET "ownerId" = '<destinationId>' WHERE "ownerId" = '<sourceId>';
-- reassign assets
UPDATE assets SET "ownerId" = '<destinationId>' WHERE "ownerId" = '<sourceId>'
AND CHECKSUM NOT IN (SELECT CHECKSUM FROM assets WHERE "ownerId" = '<destinationId>');
-- reassign external libraries
UPDATE libraries SET "ownerId" = '<destinationId>' WHERE "ownerId" = '<sourceId>';
COMMIT;
```
4. There might be left-over assets in the 'source' user's library if they are skipped by the last query because of duplicate checksums. These are probably duplicates anyway, and can probably be removed.
</details>
---
## Albums
@@ -168,30 +191,17 @@ We haven't implemented an official mechanism for creating albums from external l
Duplicate checking only exists for upload libraries, using the file hash. Furthermore, duplicate checking is not global, but _per library_. Therefore, a situation where the same file appears twice in the timeline is possible, especially for external libraries.
### Why are my edits to files not being saved in read-only external libraries?
Images in read-write external libraries (the default) can be edited as normal.
In read-only libraries (`:ro` in the `docker-compose.yml`), Immich is unable to create the `.xmp` sidecar files to store edited file metadata.
For this reason, the metadata (timestamp, location, description, star rating, etc.) cannot be edited for files in read-only external libraries.
### How are deletions of files handled in external libraries?
Immich will attempt to delete original files that have been trashed when the trash is emptied.
In read-write external libraries (the default), Immich will delete the original file.
In read-only libraries (`:ro` in the `docker-compose.yml`), files can still be trashed in the UI.
However, when the trash is emptied, the files will re-appear in the main timeline since Immich is unable to delete the original file.
---
## Machine Learning
### How does smart search work?
Immich uses CLIP models. An ML model converts each image to an "embedding", which is essentially a string of numbers that semantically encodes what is in the image. The same is done for the text that you enter when you do a search, and that text embedding is then compared with those of the images to find similar ones. As such, there are no "tags", "labels", or "descriptions" generated that you can look at. For more information about CLIP and its capabilities, read about it [here](https://openai.com/research/clip).
Immich uses CLIP models. For more information about CLIP and its capabilities, read about it [here](https://openai.com/research/clip).
### How does facial recognition work?
See [How Facial Recognition Works](/docs/features/facial-recognition#How-Facial-Recognition-Works) for details.
For face detection and recognition, Immich uses [InsightFace models](https://github.com/deepinsight/insightface/tree/master/model_zoo).
### How can I disable machine learning?
@@ -205,15 +215,19 @@ However, disabling all jobs will not disable the machine learning service itself
### I'm getting errors about models being corrupt or failing to download. What do I do?
You can delete the model cache volume, where models are downloaded. This will give the service a clean environment to download the model again. If models are failing to download entirely, you can manually download them from [Hugging Face][huggingface] and place them in the cache folder.
You can delete the model cache volume, where models are downloaded. This will give the service a clean environment to download the model again. If models are failing to download entirely, you can manually download them from [Huggingface][huggingface] and place them in the cache folder.
### Can I use a custom CLIP model?
No, this is not supported. Only models listed in the [Hugging Face][huggingface] page are compatible. Feel free to make a feature request if there's a model not listed here that you think should be added.
No, this is not supported. Only models listed in the [Huggingface][huggingface] page are compatible. Feel free to make a feature request if there's a model not listed here that you think should be added.
### I want to be able to search in other languages besides English. How can I do that?
You can change to a multilingual CLIP model. See [here](/docs/features/smart-search#CLIP-model) for instructions.
You can change to a multilingual model listed [here](https://huggingface.co/collections/immich-app/multilingual-clip-654eb08c2382f591eeb8c2a7) by going to Administration > Machine Learning Settings > Smart Search and replacing the name of the model. Be sure to re-run Smart Search on all assets after this change. You can then search in over 100 languages.
:::note
Feel free to make a feature request if there's a model you want to use that isn't in [Immich Huggingface list][huggingface].
:::
### Does Immich support Facial Recognition for videos?
@@ -254,7 +268,7 @@ ls clip/ facial-recognition/
### Why is Immich slow on low-memory systems like the Raspberry Pi?
Immich optionally uses transcoding and machine learning for several features. However, it can be too heavy to run on a Raspberry Pi. You can [mitigate](/docs/FAQ#can-i-lower-cpu-and-ram-usage) this or host Immich's machine-learning container on a [more powerful system](/docs/guides/remote-machine-learning), or [disable](/docs/FAQ#how-can-i-disable-machine-learning) machine learning entirely.
Immich optionally uses machine learning for several features. However, it can be too heavy to run on a Raspberry Pi. You can [mitigate](/docs/FAQ#can-i-lower-cpu-and-ram-usage) this or host Immich's machine-learning container on a [more powerful system](/docs/guides/remote-machine-learning), or [disable](/docs/FAQ#how-can-i-disable-machine-learning) machine learning entirely.
### Can I lower CPU and RAM usage?
@@ -263,12 +277,10 @@ The initial backup is the most intensive due to the number of jobs running. The
- Lower the job concurrency for these jobs to 1.
- Under Settings > Transcoding Settings > Threads, set the number of threads to a low number like 1 or 2.
- Under Settings > Machine Learning Settings > Facial Recognition > Model Name, you can change the facial recognition model to `buffalo_s` instead of `buffalo_l`. The former is a smaller and faster model, albeit not as good.
- For facial recognition on new images to work properly, You must re-run the Face Detection job for all images after this.
- At the container level, you can [set resource constraints](/docs/FAQ#can-i-limit-cpu-and-ram-usage) to lower usage further.
- It's recommended to only apply these constraints _after_ taking some of the measures here for best performance.
- For facial recognition on new images to work properly, You must re-run the Face Detection job for all images after this.
- If these changes are not enough, see [below](/docs/FAQ#how-can-i-disable-machine-learning) for instructions on how to disable machine learning.
### Can I limit CPU and RAM usage?
### Can I limit the amount of CPU and RAM usage?
By default, a container has no resource constraints and can use as much of a given resource as the host's kernel scheduler allows. To limit this, you can add the following to the `docker-compose.yml` block of any containers that you want to have limited resources.
@@ -288,8 +300,6 @@ deploy:
</details>
For more details, you can look at the [original docker docs](https://docs.docker.com/config/containers/resource_constraints/) or use this [guide](https://www.baeldung.com/ops/docker-memory-limit).
Note that memory constraints work by terminating the container, so this can introduce instability if set too low.
### How can I boost machine learning speed?
:::note
@@ -299,16 +309,21 @@ This advice improves throughput, not latency. This is to say that it will make S
You can increase throughput by increasing the job concurrency for machine learning jobs (Smart Search, Face Detection). With higher concurrency, the host will work on more assets in parallel. You can do this by navigating to Administration > Settings > Job Settings and increasing concurrency as needed.
:::danger
On a normal machine, 2 or 3 concurrent jobs can probably max the CPU. Storage speed and latency can quickly become the limiting factor beyond this, particularly when using HDDs.
On a normal machine, 2 or 3 concurrent jobs can probably max the CPU. Beyond this, note that storage speed and latency may quickly become the limiting factor; particularly when using HDDs.
The concurrency can be increased more comfortably with a GPU, but should still not be above 16 in most cases.
Do not exaggerate with the amount of jobs because you're probably thoroughly overloading the server.
Do not exaggerate with the job concurrency because you're probably thoroughly overloading the server.
More details can be found [here](https://discord.com/channels/979116623879368755/994044917355663450/1174711719994605708)
:::
### My server shows Server Status Offline | Version Unknown. What can I do?
### Why is Immich using so much of my CPU?
You need to enable WebSockets on your reverse proxy.
When a large number of assets are uploaded to Immich, it makes sense that the CPU and RAM will be heavily used for machine learning work and creating image thumbnails.
Once this process is completed, the percentage of CPU usage will drop to around 3-5% usage
### My server shows Server Status Offline | Version Unknown what can I do?
You need to enable Websocket on your reverse proxy.
---
@@ -318,12 +333,6 @@ You need to enable WebSockets on your reverse proxy.
Immich components are typically deployed using docker. To see logs for deployed docker containers, you can use the [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/), specifically the `docker logs` command. For examples, see [Docker Help](/docs/guides/docker-help.md).
### How can I reduce the log verbosity of Redis?
To decrease Redis logs, you can add the following line to the `redis:` section of the `docker-compose.yml`:
` command: redis-server --loglevel warning`
### How can I run Immich as a non-root user?
You can change the user in the container by setting the `user` argument in `docker-compose.yml` for each service.
@@ -333,11 +342,7 @@ You may need to add mount points or docker volumes for the following internal co
- `immich-machine-learning:/.cache`
- `redis:/data`
The non-root user/group needs read/write access to the volume mounts, including `UPLOAD_LOCATION` and `/cache` for machine-learning.
:::note Docker Compose Volumes
The Docker Compose top level volume element does not support non-root access, all of the above volumes must be local volume mounts.
:::
The non-root user/group needs read/write access to the volume mounts, including `UPLOAD_LOCATION`.
For a further hardened system, you can add the following block to every container except for `immich_postgres`.
@@ -437,11 +442,4 @@ docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME> --
</details>
If corruption is detected, you should immediately make a backup before performing any other work in the database.
To do so, you may need to set the `zero_damaged_pages=on` flag for the database server to allow `pg_dumpall` to succeed.
After taking a backup, the recommended next step is to restore the database from a healthy backup before corruption was detected.
The damaged database dump can be used to manually recover any changes made since the last backup, if needed.
The causes of possible corruption are many, but can include unexpected poweroffs or unmounts, use of a network share for Postgres data, or a poor storage medium such an SD card or failing HDD/SSD.
[huggingface]: https://huggingface.co/immich-app

View File

@@ -21,19 +21,6 @@ The recommended way to backup and restore the Immich database is to use the `pg_
It is not recommended to directly backup the `DB_DATA_LOCATION` folder. Doing so while the database is running can lead to a corrupted backup that cannot be restored.
:::
### Automatic Database Backups
Immich will automatically create database backups by default. The backups are stored in `UPLOAD_LOCATION/backups`.
You can adjust the schedule and amount of kept backups in the [admin settings](http://my.immich.app/admin/system-settings?isOpen=backup).
By default, Immich will keep the last 14 backups and create a new backup every day at 2:00 AM.
#### Restoring
We hope to make restoring simpler in future versions, for now you can find the backups in the `UPLOAD_LOCATION/backups` folder on your host.
Then please follow the steps in the following section for restoring the database.
### Manual Backup and Restore
<Tabs>
<TabItem value="Linux system" label="Linux system" default>
@@ -42,38 +29,34 @@ docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgre
```
```bash title='Restore'
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch
## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database
# rm -rf DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch.
# rm -rf DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch.
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them.
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
# Check the database user if you deviated from the default
sleep 10 # Wait for Postgres server to start up
gunzip < "/path/to/backup/dump.sql.gz" \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker compose up -d # Start remainder of Immich apps
| docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker compose up -d # Start remainder of Immich apps
```
</TabItem>
<TabItem value="Windows system (PowerShell)" label="Windows system (PowerShell)">
```powershell title='Backup'
docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres | Set-Content -Encoding utf8 "C:\path\to\backup\dump.sql"
docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres > "\path\to\backup\dump.sql"
```
```powershell title='Restore'
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch
## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database
# Remove-Item -Recurse -Force DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch.
# Remove-Item -Recurse -Force DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch.
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them.
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
# Check the database user if you deviated from the default
gc "C:\path\to\backup\dump.sql" | docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker compose up -d # Start remainder of Immich apps
sleep 10 # Wait for Postgres server to start up
gc "C:\path\to\backup\dump.sql" | docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker compose up -d # Start remainder of Immich apps
```
</TabItem>
@@ -85,8 +68,6 @@ Note that for the database restore to proceed properly, it requires a completely
Some deployment methods make it difficult to start the database without also starting the server or microservices. In these cases, you may set the environmental variable `DB_SKIP_MIGRATIONS=true` before starting the services. This will prevent the server from running migrations that interfere with the restore process. Note that both the server and microservices must have this variable set to prevent the migrations from running. Be sure to remove this variable and restart the services after the database is restored.
:::
### Automatic Database Backups
The database dumps can also be automated (using [this image](https://github.com/prodrigestivill/docker-postgres-backup-local)) by editing the docker compose file to match the following:
```yaml
@@ -95,7 +76,6 @@ services:
backup:
container_name: immich_db_dumper
image: prodrigestivill/postgres-backup-local:14
restart: always
env_file:
- .env
environment:
@@ -116,7 +96,6 @@ services:
Then you can restore with the same command but pointed at the latest dump.
```bash title='Automated Restore'
# Be sure to check the username if you changed it from default
gunzip < db_dumps/last/immich-latest.sql.gz \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| docker exec -i immich_postgres psql --username=postgres
@@ -169,21 +148,9 @@ for more info read the [release notes](https://github.com/immich-app/immich/rele
- Preview images (small thumbnails and large previews) for each asset and thumbnails for recognized faces.
- Stored in `UPLOAD_LOCATION/thumbs/<userID>`.
- **Encoded Assets:**
- Videos that have been re-encoded from the original for wider compatibility. The original is not removed.
- Stored in `UPLOAD_LOCATION/encoded-video/<userID>`.
- **Postgres**
- The Immich database containing all the information to allow the system to function properly.
**Note:** This folder will only appear to users who have made the changes mentioned in [v1.102.0](https://github.com/immich-app/immich/discussions/8930) (an optional, non-mandatory change) or who started with this version.
- Stored in `DB_DATA_LOCATION`.
:::danger
A backup of this folder does not constitute a backup of your database!
Follow the instructions listed [here](/docs/administration/backup-and-restore#database) to learn how to perform a proper backup.
:::
</TabItem>
<TabItem value="Storage Template On" label="Storage Template On">
@@ -211,7 +178,7 @@ When you turn off the storage template engine, it will leave the assets in `UPLO
- Stored in `UPLOAD_LOCATION/profile/<userID>`.
- **Thumbs Images:**
- Preview images (blurred, small, large) for each asset and thumbnails for recognized faces.
- Stored in `UPLOAD_LOCATION/thumbs/<userID>`.
- Stored in `UPLOCAD_LOCATION/thumbs/<userID>`.
- **Encoded Assets:**
- Videos that have been re-encoded from the original for wider compatibility. The original is not removed.
- Stored in `UPLOAD_LOCATION/encoded-video/<userID>`.
@@ -219,22 +186,11 @@ When you turn off the storage template engine, it will leave the assets in `UPLO
- Files uploaded through mobile apps.
- Temporarily located in `UPLOAD_LOCATION/upload/<userID>`.
- Transferred to `UPLOAD_LOCATION/library/<userID>` upon successful upload.
- **Postgres**
- The Immich database containing all the information to allow the system to function properly.
**Note:** This folder will only appear to users who have made the changes mentioned in [v1.102.0](https://github.com/immich-app/immich/discussions/8930) (an optional, non-mandatory change) or who started with this version.
- Stored in `DB_DATA_LOCATION`.
:::danger
A backup of this folder does not constitute a backup of your database!
Follow the instructions listed [here](/docs/administration/backup-and-restore#database) to learn how to perform a proper backup.
:::
</TabItem>
</Tabs>
:::danger
Do not touch the files inside these folders under any circumstances except taking a backup. Changing or removing an asset can cause untracked and missing files.
Do not touch the files inside these folders under any circumstances except taking a backup, changing or removing an asset can cause untracked and missing files.
You can think of it as App-Which-Must-Not-Be-Named, the only access to viewing, changing and deleting assets is only through the mobile or browser interface.
:::

View File

@@ -1,21 +0,0 @@
# Email Notifications
Immich supports the option to send notifications via Email for the following events:
- Creating a new user
- Notifying a user when they get added to a shared album
- Informing other users about the addition of new assets to a shared album
## SMTP settings
You can access the settings panel from the web at `Administration -> Settings -> Notification settings`.
Under Email, enter the required details to connect with an SMTP server.
You can use [this guide](/docs/guides/smtp-gmail) to use Gmail's SMTP server.
## User's notifications settings
Users can manage their email notification settings from their account settings page on the web. They can choose to turn email notifications on or off for the following events:
<img src={require('./img/user-notifications-settings.png').default} width="80%" title="User notification settings" />

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 109 KiB

View File

@@ -22,12 +22,12 @@ Copy the entire `immich-server` block as a new service and make the following ch
- container_name: immich_server
...
- ports:
- - 2283:2283
- - 2283:3001
+ immich-microservices:
+ container_name: immich_microservices
```
Once you have two copies of the immich-server service, make the following changes to each one. This will allow one container to only serve the web UI and API, and the other one to handle all other tasks.
Once you have two copies of the immich-server service, make the following chnages to each one. This will allow one container to only serve the web UI and API, and the other one to handle all other tasks.
```diff
services:
@@ -52,4 +52,4 @@ Additionally, some jobs run on a schedule, which is every night at midnight. Thi
Storage Migration job can be run after changing the [Storage Template](/docs/administration/storage-template.mdx), in order to apply the change to the existing library.
:::
<img src={require('./img/admin-jobs.webp').default} width="60%" title="Admin jobs" />
<img src={require('./img/admin-jobs.png').default} width="80%" title="Admin jobs" />

View File

@@ -3,7 +3,7 @@
This page contains details about using OAuth in Immich.
:::tip
Unable to set `app.immich:///oauth-callback` as a valid redirect URI? See [Mobile Redirect URI](#mobile-redirect-uri) for an alternative solution.
Unable to set `app.immich:/` as a valid redirect URI? See [Mobile Redirect URI](#mobile-redirect-uri) for an alternative solution.
:::
## Overview
@@ -11,7 +11,7 @@ Unable to set `app.immich:///oauth-callback` as a valid redirect URI? See [Mobil
Immich supports 3rd party authentication via [OpenID Connect][oidc] (OIDC), an identity layer built on top of OAuth2. OIDC is supported by most identity providers, including:
- [Authentik](https://goauthentik.io/integrations/sources/oauth/#openid-connect)
- [Authelia](https://www.authelia.com/integration/openid-connect/immich/)
- [Authelia](https://www.authelia.com/configuration/identity-providers/openid-connect/clients/)
- [Okta](https://www.okta.com/openid-connect/)
- [Google](https://developers.google.com/identity/openid-connect/openid-connect)
@@ -30,7 +30,7 @@ Before enabling OAuth in Immich, a new client application needs to be configured
The **Sign-in redirect URIs** should include:
- `app.immich:///oauth-callback` - for logging in with OAuth from the [Mobile App](/docs/features/mobile-app.mdx)
- `app.immich:/` - for logging in with OAuth from the [Mobile App](/docs/features/mobile-app.mdx)
- `http://DOMAIN:PORT/auth/login` - for logging in with OAuth from the Web Client
- `http://DOMAIN:PORT/user-settings` - for manually linking OAuth in the Web Client
@@ -38,7 +38,7 @@ Before enabling OAuth in Immich, a new client application needs to be configured
Mobile
- `app.immich:///oauth-callback` (You **MUST** include this for iOS and Android mobile apps to work properly)
- `app.immich:/` (You **MUST** include this for iOS and Android mobile apps to work properly)
Localhost
@@ -96,16 +96,16 @@ When Auto Launch is enabled, the login page will automatically redirect the user
## Mobile Redirect URI
The redirect URI for the mobile app is `app.immich:///oauth-callback`, which is a [Custom Scheme](https://developer.apple.com/documentation/xcode/defining-a-custom-url-scheme-for-your-app). If this custom scheme is an invalid redirect URI for your OAuth Provider, you can work around this by doing the following:
The redirect URI for the mobile app is `app.immich:/`, which is a [Custom Scheme](https://developer.apple.com/documentation/xcode/defining-a-custom-url-scheme-for-your-app). If this custom scheme is an invalid redirect URI for your OAuth Provider, you can work around this by doing the following:
1. Configure an http(s) endpoint to forwards requests to `app.immich:///oauth-callback`
1. Configure an http(s) endpoint to forwards requests to `app.immich:/`
2. Whitelist the new endpoint as a valid redirect URI with your provider.
3. Specify the new endpoint as the `Mobile Redirect URI Override`, in the OAuth settings.
With these steps in place, you should be able to use OAuth from the [Mobile App](/docs/features/mobile-app.mdx) without a custom scheme redirect URI.
:::info
Immich has a route (`/api/oauth/mobile-redirect`) that is already configured to forward requests to `app.immich:///oauth-callback`, and can be used for step 1.
Immich has a route (`/api/oauth/mobile-redirect`) that is already configured to forward requests to `app.immich:/`, and can be used for step 1.
:::
## Example Configuration
@@ -154,21 +154,21 @@ Configuration of Authorised redirect URIs (Google Console)
Configuration of OAuth in Immich System Settings
| Setting | Value |
| ---------------------------- | ---------------------------------------------------------------------------- |
| Issuer URL | `https://accounts.google.com` |
| Client ID | 7\***\*\*\*\*\*\*\***\*\*\***\*\*\*\*\*\*\***vuls.apps.googleusercontent.com |
| Client Secret | G\***\*\*\*\*\*\*\***\*\*\***\*\*\*\*\*\*\***OO |
| Scope | openid email profile |
| Signing Algorithm | RS256 |
| Storage Label Claim | preferred_username |
| Storage Quota Claim | immich_quota |
| Default Storage Quota (GiB) | 0 (0 for unlimited quota) |
| Button Text | Sign in with Google (optional) |
| Auto Register | Enabled (optional) |
| Auto Launch | Enabled |
| Mobile Redirect URI Override | Enabled (required) |
| Mobile Redirect URI | `https://example.immich.app/api/oauth/mobile-redirect` |
| Setting | Value |
| ---------------------------- | ------------------------------------------------------------------------------------------------------ |
| Issuer URL | [https://accounts.google.com](https://accounts.google.com) |
| Client ID | 7\***\*\*\*\*\*\*\***\*\*\***\*\*\*\*\*\*\***vuls.apps.googleusercontent.com |
| Client Secret | G\***\*\*\*\*\*\*\***\*\*\***\*\*\*\*\*\*\***OO |
| Scope | openid email profile |
| Signing Algorithm | RS256 |
| Storage Label Claim | preferred_username |
| Storage Quota Claim | immich_quota |
| Default Storage Quota (GiB) | 0 (0 for unlimited quota) |
| Button Text | Sign in with Google (optional) |
| Auto Register | Enabled (optional) |
| Auto Launch | Enabled |
| Mobile Redirect URI Override | Enabled (required) |
| Mobile Redirect URI | [https://demo.immich.app/api/oauth/mobile-redirect](https://demo.immich.app/api/oauth/mobile-redirect) |
</details>

View File

@@ -13,9 +13,9 @@ Running with a pre-existing Postgres server can unlock powerful administrative f
You must install pgvecto.rs into your instance of Postgres using their [instructions][vectors-install]. After installation, add `shared_preload_libraries = 'vectors.so'` to your `postgresql.conf`. If you already have some `shared_preload_libraries` set, you can separate each extension with a comma. For example, `shared_preload_libraries = 'pg_stat_statements, vectors.so'`.
:::note
Immich is known to work with Postgres versions 14, 15, and 16. Earlier versions are unsupported. Postgres 17 is nominally compatible, but pgvecto.rs does not have prebuilt images or packages for it as of writing.
Immich is known to work with Postgres versions 14, 15, and 16. Earlier versions are unsupported.
Make sure the installed version of pgvecto.rs is compatible with your version of Immich. The current accepted range for pgvecto.rs is `>= 0.2.0, < 0.4.0`.
Make sure the installed version of pgvecto.rs is compatible with your version of Immich. For example, if your Immich version uses the dedicated database image `tensorchord/pgvecto-rs:pg14-v0.2.1`, you must install pgvecto.rs `>= 0.2.1, < 0.3.0`.
:::
## Specifying the connection URL

View File

@@ -1,9 +1,5 @@
# Repair Page
:::warning
This feature is currently disabled and will be reworked in the near future.
:::
The repair page is designed to give information to the system administrator about files that are not tracked, or offline paths.
## Natural State

View File

@@ -40,26 +40,6 @@ server {
}
```
#### Compatibility with Let's Encrypt
In the event that your nginx configuration includes a section for Let's Encrypt, it's likely that you have a segment similar to the following:
```nginx
location ~ /.well-known {
...
}
```
This particular `location` directive can inadvertently prevent mobile clients from reaching the `/.well-known/immich` path, which is crucial for discovery. Usual error message for this case is: "Your app major version is not compatible with the server". To remedy this, you should introduce an additional location block specifically for this path, ensuring that requests are correctly proxied to the Immich server:
```nginx
location = /.well-known/immich {
proxy_pass http://<backend_url>:2283;
}
```
By doing so, you'll maintain the functionality of Let's Encrypt while allowing mobile clients to access the necessary Immich path without obstruction.
### Caddy example config
As an alternative to nginx, you can also use [Caddy](https://caddyserver.com/) as a reverse proxy (with automatic HTTPS configuration). Below is an example config.
@@ -84,43 +64,3 @@ Below is an example config for Apache2 site configuration.
ProxyPreserveHost On
</VirtualHost>
```
### Traefik Proxy example config
The example below is for Traefik version 3.
The most important is to increase the `respondingTimeouts` of the entrypoint used by immich. In this example of entrypoint `websecure` for port `443`. Per default it's set to 60s which leeds to videos stop uploading after 1 minute (Error Code 499). With this config it will fail after 10 minutes which is in most cases enough. Increase it if needed.
`traefik.yaml`
```yaml
[...]
entryPoints:
websecure:
address: :443
# this section needs to be added
transport:
respondingTimeouts:
readTimeout: 600s
idleTimeout: 600s
writeTimeout: 600s
```
The second part is in the `docker-compose.yml` file where immich is in. Add the Traefik specific labels like in the example.
`docker-compose.yml`
```yaml
services:
immich-server:
[...]
labels:
traefik.enable: true
# increase readingTimeouts for the entrypoint used here
traefik.http.routers.immich.entrypoints: websecure
traefik.http.routers.immich.rule: Host(`immich.your-domain.com`)
traefik.http.services.immich.loadbalancer.server.port: 2283
```
Keep in mind, that Traefik needs to communicate with the network where immich is in, usually done
by adding the Traefik network to the `immich-server`.

View File

@@ -7,7 +7,7 @@ If a storage quota has been defined for the user, the usage number will be displ
:::
:::info External library
External libraries are not included in the storage quota.
External library is not included in the storage quota.
:::
<img src={require('./img/server-stats.png').default} title="server statistic" />

View File

@@ -1,47 +0,0 @@
# System Integrity
## Folder checks
:::info
The folders considered for these checks include: `upload/`, `library/`, `thumbs/`, `encoded-video/`, `profile/`
:::
When Immich starts, it performs a series of checks in order to validate that it can read and write files to the volume mounts used by the storage system. If it cannot perform all the required operations, it will fail to start. The checks include:
- Creating an initial hidden file (`.immich`) in each folder
- Reading a hidden file (`.immich`) in each folder
- Overwriting a hidden file (`.immich`) in each folder
The checks are designed to catch the following situations:
- Incorrect permissions (cannot read/write files)
- Missing volume mount (`.immich` files should exist, but are missing)
### Common issues
:::note
`.immich` files serve as markers and help keep track of volume mounts being used by Immich. Except for the situations listed below, they should never be manually created or deleted.
:::
#### Missing `.immich` files
```
Verifying system mount folder checks (enabled=true)
...
ENOENT: no such file or directory, open 'upload/encoded-video/.immich'
```
The above error messages show that the server has previously (successfully) written `.immich` files to each folder, but now does not detect them. This could be because any of the following:
- Permission error - unable to read the file, but it exists
- File does not exist - volume mount has changed and should be corrected
- File does not exist - user manually deleted it and should be manually re-created (`touch .immich`)
- File does not exist - user restored from a backup, but did not restore each folder (user should restore all folders or manually create `.immich` in any missing folders)
### Ignoring the checks
The checks are designed to catch common problems that we have seen users have in the past, but if you want to disable them you can set the following environment variable:
```
IMMICH_IGNORE_MOUNT_CHECK_ERRORS=true
```

View File

@@ -10,59 +10,6 @@ Viewing and modifying the system settings is restricted to the Administrator.
You can always return to the default settings by clicking the `Reset to default` button.
:::
## Authentication Settings
Manage password, OAuth, and other authentication settings
### OAuth Authentication
Immich supports OAuth Authentication. Read more about this feature and its configuration [here](/docs/administration/oauth).
### Password Authentication
The administrator can choose to disable login with username and password for the entire instance. This means that **no one**, including the system administrator, will be able to log using this method. If [OAuth Authentication](/docs/administration/oauth) is also disabled, no users will be able to login using **any** method. Changing this setting does not affect existing sessions, just new login attempts.
:::tip
You can always use the [Server CLI](/docs/administration/server-commands) to re-enable password login.
:::
## Image Settings (thumbnails and previews)
- Thumbnails - Used in the main timeline.
- Previews - Used in the asset viewer.
By default Immich creates 3 thumbnails for each asset,
Blurred (thumbhash) , Small - thumbnails (webp) , and Large - previews (jpeg/webp), using these settings you can change the quality for the thumbnails and previews files that are created.
**Thumbnail format**
Allows you to choose the type of format you want for the Thumbnail images, Webp produces smaller files than jpeg, but is slower to encode.
:::tip
You can read in detail about the advantages and disadvantages of using webp over jpeg on [Adobe's website](https://www.adobe.com/creativecloud/file-types/image/raster/webp-file.html)
:::
**Thumbnail resolution**
Used when viewing groups of photos (main timeline, album view, etc.). Higher resolutions can preserve more detail but take longer to encode, have larger file sizes, and can reduce app responsiveness.
**Preview format**
Allows you to choose the type of format you want for the Preview images, Webp produces smaller files than jpeg, but is slower to encode.
**Preview resolution**
Used when viewing a single photo and for machine learning. Higher resolutions can preserve more detail but take longer to encode, have larger file sizes, and can reduce app responsiveness.
**Quality**
Image quality from 1-100. Higher is better for quality but produces larger files, this option affects the Preview and Thumbnail images.
**Prefer wide gamut**
Use Display P3 for thumbnails. This better preserves the vibrance of images with wide colorspaces, but images may appear differently on old devices with an old browser version. sRGB images are kept as sRGB to avoid color shifts.
**Prefer embedded preview**
Use embedded previews in RAW photos as the input to image processing when available. This can produce more accurate colors for some images, but the quality of the preview is camera-dependent and the image may have more compression artifacts.
:::tip
The default resolution for Large thumbnails can be lowered from 1440p (default) to 1080p or 720p to save storage space.
:::
## Job Settings
Using these settings, you can determine the amount of work that will run concurrently for each task in microservices. Some tasks can be set to higher values on computers with powerful hardware and storage with good I/O capabilities.
@@ -72,11 +19,6 @@ this advice improves throughput, not latency, for example, it will make Smart Se
It is important to remember that jobs like Smart Search, Face Detection, Facial Recognition, and Transcode Videos require a **lot** of processing power and therefore do not exaggerate the amount of jobs because you're probably thoroughly overloading the server.
:::danger IMPORTANT
If you increase the concurrency from the defaults we set, especially for thumbnail generation, make sure you do not increase them past the amount of CPU cores you have available.
Doing so can impact API responsiveness with no gain in thumbnail generation speed.
:::
:::info Facial Recognition Concurrency
The Facial Recognition Concurrency value cannot be changed because
[DBSCAN](https://www.youtube.com/watch?v=RDZUdRSDOok) is traditionally sequential, but there are parallel implementations of it out there. Our implementation isn't parallel.
@@ -104,7 +46,7 @@ You can choose to disable a certain type of machine learning, for example smart
### Smart Search
The [smart search](/docs/features/smart-search) settings are designed to allow the search tool to be used using [CLIP](https://openai.com/research/clip) models that [can be changed](/docs/FAQ#can-i-use-a-custom-clip-model), different models will necessarily give better results but may consume more processing power, when changing a model it is mandatory to re-run the
The smart search settings are designed to allow the search tool to be used using [CLIP](https://openai.com/research/clip) models that [can be changed](/docs/FAQ#can-i-use-a-custom-clip-model), different models will necessarily give better results but may consume more processing power, when changing a model it is mandatory to re-run the
Smart Search job on all images to fully apply the change.
:::info Internet connection
@@ -113,23 +55,15 @@ After downloading, there is no need for Immich to connect to the network
Unless version checking has been enabled in the settings.
:::
### Duplicate Detection
Use CLIP embeddings to find likely duplicates. The maximum detection distance can be configured in order to improve / reduce the level of accuracy.
- **Maximum detection distance -** Maximum distance between two images to consider them duplicates, ranging from 0.001-0.1. Higher values will detect more duplicates, but may result in false positives.
### Facial Recognition
Under these settings, you can change the facial recognition settings
Editable settings:
- **Facial Recognition Model**
- **Min Detection Score**
- **Max Recognition Distance**
- **Min Recognized Faces**
You can learn more about these options on the [Facial Recognition page](/docs/features/facial-recognition#how-face-detection-works)
- **Facial Recognition Model -** Models are listed in descending order of size. Larger models are slower and use more memory, but produce better results. Note that you must re-run the Face Detection job for all images upon changing a model.
- **Min Detection Score -** Minimum confidence score for a face to be detected from 0-1. Lower values will detect more faces but may result in false positives.
- **Max Recognition Distance -** Maximum distance between two faces to be considered the same person, ranging from 0-2. Lowering this can prevent labeling two people as the same person, while raising it can prevent labeling the same person as two different people. Note that it is easier to merge two people than to split one person in two, so err on the side of a lower threshold when possible.
- **Min Recognized Faces -** The minimum number of recognized faces for a person to be created (AKA: Core face). Increasing this makes Facial Recognition more precise at the cost of increasing the chance that a face is not assigned to a person.
:::info
When changing the values in Min Detection Score, Max Recognition Distance, and Min Recognized Faces.
@@ -153,15 +87,23 @@ The map can be adjusted via [OpenMapTiles](https://openmaptiles.org/styles/) for
Immich supports [Reverse Geocoding](/docs/features/reverse-geocoding) using data from the [GeoNames](https://www.geonames.org/) geographical database.
## Notification Settings
## OAuth Authentication
SMTP server setup, for user creation notifications, new albums, etc. More information can be found [here](/docs/administration/email-notification)
Immich supports OAuth Authentication. Read more about this feature and its configuration [here](/docs/administration/oauth).
## Password Authentication
The administrator can choose to disable login with username and password for the entire instance. This means that **no one**, including the system administrator, will be able to log using this method. If [OAuth Authentication](/docs/administration/oauth) is also disabled, no users will be able to login using **any** method. Changing this setting does not affect existing sessions, just new login attempts.
:::tip
You can always use the [Server CLI](/docs/administration/server-commands) to re-enable password login.
:::
## Server Settings
### External Domain
Overrides the domain name in shared links and email notifications. The URL should not include a trailing slash.
When set, will override the domain name used when viewing and copying a shared link.
### Welcome Message
@@ -183,6 +125,27 @@ p {
}
```
## Thumbnail Settings
By default Immich creates 3 thumbnails for each asset,
Blurred (thumbhash) , Small (webp) , and Large (jpeg), using these settings you can change the quality for the thumbnail files that are created.
**Small thumbnail resolution**
Used when viewing groups of photos (main timeline, album view, etc.). Higher resolutions can preserve more detail but take longer to encode, have larger file sizes, and can reduce app responsiveness.
**Large thumbnail resolution**
Used when viewing a single photo and for machine learning. Higher resolutions can preserve more detail but take longer to encode, have larger file sizes, and can reduce app responsiveness.
**Quality**
Thumbnail quality from 1-100. Higher is better for quality but produces larger files.
**Prefer wide gamut**
Use display p3 for thumbnails. This better preserves the vibrance of images with wide color spaces, but images may appear differently on old devices with an old browser version. Srgb images are kept as srgb to avoid color shifts.
:::tip
The default resolution for Large thumbnails can be lowered from 1440p (default) to 1080p or 720p to save storage space.
:::
## Trash Settings
In the system administrator's option to set a trash for deleted files, these files will remain in the trash until the deletion date 30 days (default) or as defined by the system administrator.

View File

@@ -13,20 +13,6 @@ Immich supports multiple users, each with their own library.
<UserCreate />
## Send new user email notification
:::note
This option is only available if an SMTP server has been configured in the administrator settings.
:::
Admin can send a welcome email if the Email option is set, you can learn here how to set up the SMTP server in Immich.
<img
src={require('./img/send-user-email-notification.webp').default}
width="40%"
title="Send user email notification"
/>
## Set Storage Quota For User
Admin can specify the storage quota for the user as the instance's admin; once the limit is reached, the user won't be able to upload to the instance anymore.

View File

@@ -3,7 +3,6 @@ sidebar_position: 1
---
import AppArchitecture from './img/app-architecture.png';
import MobileArchitecture from './img/immich_mobile_architecture.svg';
# Architecture
@@ -29,14 +28,7 @@ All three clients use [OpenAPI](./open-api.md) to auto-generate rest clients for
### Mobile App
The mobile app is written in [Dart](https://dart.dev/) using [Flutter](https://flutter.dev/). Below is an architecture overview:
<MobileArchitecture className="p-4 dark:bg-immich-dark-primary my-4" />
The diagrams shows the target architecture, the current state of the code-base is not always following the architecture yet. New code and contributions should follow this architecture.
Currently, it uses [Isar Database](https://isar.dev/) for a local database and [Riverpod](https://riverpod.dev/) for state management (providers).
Entities and Models are the two types of data classes used. While entities are stored in the on-device database, models are ephemeral and only kept in memory.
The Repositories should be the only place where other data classes are used internally (such as OpenAPI DTOs). However, their interfaces must not use foreign data classes!
The mobile app is written in [Flutter](https://flutter.dev/). It uses [Isar Database](https://isar.dev/) for a local database and [Riverpod](https://riverpod.dev/) for state management.
### Web Client

View File

@@ -15,7 +15,7 @@ Our [GitHub Repository](https://github.com/immich-app/immich) is a [monorepo](ht
| `design/` | Screenshots and logos for the README |
| `docs/` | Source code for the [https://immich.app](https://immich.app) website |
| `machine-learning/` | Source code for the `immich-machine-learning` docker image |
| `misc/release/` | Scripts for version bumps and draft releases |
| `misc/release/` | Scripts for version pumps and draft releases |
| `mobile/` | Source code for the mobile app, both Android and iOS |
| `server/` | Source code for the `immich-server` docker image |
| `web/` | Source code for the `web` |

View File

@@ -1,104 +0,0 @@
<mxfile host="app.diagrams.net" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" version="24.7.16">
<diagram name="Page-1" id="Bp2gX--FtC4sSMWxsLrs">
<mxGraphModel dx="1728" dy="954" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="0" pageScale="1" pageWidth="850" pageHeight="1100" background="none" math="0" shadow="0">
<root>
<mxCell id="0" />
<mxCell id="1" parent="0" />
<mxCell id="zHhczcy2-Jv_nqmJUiNH-1" value="" style="verticalLabelPosition=bottom;verticalAlign=top;html=1;shape=mxgraph.basic.polygon;polyCoords=[[0.25,0],[0.75,0],[1,0.25],[1,0.75],[0.75,1],[0.25,1],[0,0.75],[0,0.25]];polyline=0;strokeWidth=4;rounded=1;fillColor=#4251B0;" vertex="1" parent="1">
<mxGeometry x="280" y="217.5" width="465" height="465" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-2" value="&lt;b&gt;&lt;font style=&quot;font-size: 22px;&quot;&gt;Mobile App&lt;/font&gt;&lt;/b&gt;" style="text;html=1;align=center;verticalAlign=middle;resizable=0;points=[];autosize=1;strokeColor=none;fillColor=none;rounded=1;fontColor=#ffffff;" vertex="1" parent="1">
<mxGeometry x="442.5" y="225" width="140" height="40" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-25" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-4" target="zHhczcy2-Jv_nqmJUiNH-5">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-4" value="Services" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#FFB400;" vertex="1" parent="1">
<mxGeometry x="530" y="420" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-26" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-5" target="zHhczcy2-Jv_nqmJUiNH-12">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-5" value="Repositories" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#1E83F7;" vertex="1" parent="1">
<mxGeometry x="650" y="420" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-24" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-6" target="zHhczcy2-Jv_nqmJUiNH-4">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-6" value="Providers" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#ED79B5;" vertex="1" parent="1">
<mxGeometry x="410" y="420" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-29" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-7" target="zHhczcy2-Jv_nqmJUiNH-8">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-30" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.75;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-7" target="zHhczcy2-Jv_nqmJUiNH-6">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-7" value="Pages" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#FA2921;" vertex="1" parent="1">
<mxGeometry x="290" y="480" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-31" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.25;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-8" target="zHhczcy2-Jv_nqmJUiNH-6">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-8" value="Widgets" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#FA2921;" vertex="1" parent="1">
<mxGeometry x="290" y="360" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-11" value="User" style="shape=umlActor;verticalLabelPosition=bottom;verticalAlign=top;html=1;outlineConnect=0;rounded=1;fillColor=#4251B0;" vertex="1" parent="1">
<mxGeometry x="180" y="368.5" width="81.5" height="163" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-12" value="platform&lt;div&gt;system&lt;/div&gt;" style="rhombus;whiteSpace=wrap;html=1;rounded=1;fillColor=#ED79B5;" vertex="1" parent="1">
<mxGeometry x="800" y="410" width="80" height="80" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-13" value="on-device&lt;div&gt;database&lt;/div&gt;" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;rounded=1;fillColor=#FA2921;" vertex="1" parent="1">
<mxGeometry x="810" y="310" width="60" height="80" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-14" value="server" style="ellipse;shape=cloud;whiteSpace=wrap;html=1;rounded=1;fillColor=#FFB400;" vertex="1" parent="1">
<mxGeometry x="780" y="500" width="120" height="80" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-16" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.75;exitDx=0;exitDy=0;entryX=0.07;entryY=0.4;entryDx=0;entryDy=0;entryPerimeter=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-5" target="zHhczcy2-Jv_nqmJUiNH-14">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-39" value="OpenAPI" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];rounded=1;labelBackgroundColor=#1E83F7;" vertex="1" connectable="0" parent="zHhczcy2-Jv_nqmJUiNH-16">
<mxGeometry x="0.0697" y="1" relative="1" as="geometry">
<mxPoint x="8" y="10" as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-23" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-6" target="zHhczcy2-Jv_nqmJUiNH-6">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-27" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.25;exitDx=0;exitDy=0;entryX=0;entryY=1;entryDx=0;entryDy=-15;entryPerimeter=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-5" target="zHhczcy2-Jv_nqmJUiNH-13">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-34" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;dashed=1;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-3">
<mxGeometry relative="1" as="geometry">
<mxPoint x="810" y="360" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-36" value="" style="endArrow=none;dashed=1;html=1;rounded=1;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-9">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="512.08" y="665" as="sourcePoint" />
<mxPoint x="512.08" y="265" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-37" value="UI part" style="text;html=1;align=center;verticalAlign=middle;resizable=0;points=[];autosize=1;strokeColor=none;fillColor=none;fontStyle=1;fontSize=14;fontColor=#FFFFFF;" vertex="1" parent="1">
<mxGeometry x="387.5" y="640" width="70" height="30" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-38" value="non-UI part" style="text;html=1;align=center;verticalAlign=middle;resizable=0;points=[];autosize=1;strokeColor=none;fillColor=none;fontStyle=1;fontSize=14;fontColor=#FFFFFF;" vertex="1" parent="1">
<mxGeometry x="550" y="640" width="90" height="30" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-41" value="" style="endArrow=none;dashed=1;html=1;rounded=1;" edge="1" parent="1" target="zHhczcy2-Jv_nqmJUiNH-9">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="512.08" y="665" as="sourcePoint" />
<mxPoint x="512.08" y="265" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-9" value="Models" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#18C249;" vertex="1" parent="1">
<mxGeometry x="470" y="510" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-3" value="Entities" style="rounded=1;whiteSpace=wrap;html=1;gradientColor=none;fillColor=#18C249;" vertex="1" parent="1">
<mxGeometry x="472.5" y="330" width="80" height="60" as="geometry" />
</mxCell>
</root>
</mxGraphModel>
</diagram>
</mxfile>

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 20 KiB

View File

@@ -24,7 +24,7 @@ This environment includes the services below. Additional details are available i
- Web app - [`/web`](https://github.com/immich-app/immich/tree/main/web)
- Machine learning - [`/machine-learning`](https://github.com/immich-app/immich/tree/main/machine-learning)
- Redis
- PostgreSQL development database with exposed port `5432` so you can use any database client to access it
- PostgreSQL development database with exposed port `5432` so you can use any database client to acess it
All the services are packaged to run as with single Docker Compose command.
@@ -106,7 +106,7 @@ in User `settings.json` (`cmd + shift + p` and search for `Open User Settings JS
"editor.suggest.snippetsPreventQuickSuggestions": false,
"editor.suggestSelection": "first",
"editor.tabCompletion": "onlySnippets",
"editor.wordBasedSuggestions": "off",
"editor.wordBasedSuggestions": false,
"editor.defaultFormatter": "Dart-Code.dart-code"
}
}

View File

@@ -4,8 +4,7 @@
### Unit tests
Unit are run by calling `npm run test` from the `server/` directory.
You need to run `npm install` (in `server/`) before _once_.
Unit are run by calling `npm run test` from the `server` directory.
### End to end tests
@@ -15,11 +14,6 @@ The e2e tests can be run by first starting up a test production environment via:
make e2e
```
Before you can run the tests, you need to run the following commands _once_:
- `npm install` (in `e2e/`)
- `make open-api` (in the project root `/`)
Once the test environment is running, the e2e tests can be run via:
```bash

View File

@@ -1,21 +0,0 @@
# Translations
:::tip
You can request a new language [here](https://hosted.weblate.org/new-lang/immich/immich/).
:::
## Weblate
[Weblate](https://weblate.org/) is a "libre software web-based continuous localization system". Immich localization efforts are managed on their [hosted platform](https://hosted.weblate.org/projects/immich/immich/).
## International message format
Plurals, numbers, dates and other locale specific message formats can be handled by using the [ICU message format](https://unicode-org.github.io/icu/userguide/format_parse/messages/). Internally, this is handled by the [intl-messageformat](https://www.npmjs.com/package/intl-messageformat) library. Their [documentation](https://formatjs.io/docs/intl-messageformat/) includes common, editable examples via a "live editor" feature, which can be useful to test and debug message formats.
## Progress
Immich currently supports the following languages:
<a href="https://hosted.weblate.org/engage/immich/">
<img src="https://hosted.weblate.org/widget/immich/immich/multi-auto.svg" alt="Translation status" />
</a>

View File

@@ -1,7 +1,7 @@
# Troubleshooting
:::tip
A great option to get assistance with troubleshooting is to join our [Discord](https://discord.immich.app) server, where we have a dedicated channel for `#contributing`.
A great option to get assistance with troubleshooting is to join our [Discord](https://discord.gg/D8JsnBEuKb) server, where we have a dedicated channel for `#contributing`.
:::
## Known Issues

View File

@@ -2,7 +2,7 @@
## Overview
Immich recognizes faces in your photos and videos and groups them together into people. You can then assign names to these people and search for them.
Immich recognizes faces in your photos and videos and groups them together. You can then assign names to the faces and search for them.
The list of people is shown in the Explore page.
@@ -18,75 +18,13 @@ The asset detail view will also show the faces that are recognized in the asset.
## Actions
Additional actions you can do include:
Additional actions you can do with a detected person are:
- Changing the feature photo of the person
- Setting a person's date of birth
- Merging two or more detected faces into one person
- Hiding the faces of a person from the Explore page and detail view
- Assigning an unrecognized face to a person
- Change the feature face photo of the person
- Set date of birth
- Merge two or more detected faces into one person
- Hide face
It can be found from the app bar when you access the detail view of a person.
<img src={require('./img/facial-recognition-4.png').default} title='Facial Recognition 4' width="70%"/>
## How Face Detection Works
Face detection sends the generated preview image to the machine learning service for processing. The service checks if it has the relevant model downloaded and downloads it if not. The image is decoded, pre-processed and passed to the face detection model (with hardware acceleration if configured). The bounding boxes and scores outputted from this model are used to crop and preprocess the image once again to be passed to a facial recognition model (also accelerated if configured). The embeddings from the recognition model, together with the bounding boxes and scores from the face detection model, are then sent back to the server to be added to the database. The embeddings in particular are indexed so they can be searched quickly during facial recognition clustering.
## How Facial Recognition Works
The facial recognition algorithm we use is derived from [DBSCAN](https://www.youtube.com/watch?v=RDZUdRSDOok), a popular clustering algorithm. It essentially treats each detected face as a point in a graph and aims to group points that are close to each other.
:::note
An important concept is whether something is a _core point_. A core point has a minimum number of points around it within a certain distance. A non-core point can only be assigned to a cluster if it can reach a core point; a non-core point can't be used to extend a cluster even if it's part of one. In Immich, the _Minimum Recognized Faces_ setting controls the threshold to be considered a core point.
:::
For each face, it looks around it to find other faces within a certain distance. Faces within this distance are considered similar, so it then checks if any of these faces are associated with a person.
If there is an existing person, it assigns the person of the most similar face to the face being processed.
If there is none, then it has to determine something from the DBSCAN algorithm: whether the face is a _core point_. If there are a certain number of similar faces (by default 3, including the face being considered), then this face is a core point. A new person is created for this face and the face is assigned to it. When other faces are processed, if they're similar to this face, they'll see that it has an associated person and can be assigned to that person.
However, if there aren't enough similar faces, no new person will be created. Instead, the face will wait for all the other faces to be processed to see if any matches that previously didn't have an associated person now do. If they do, then the face will be assigned to that person. If not, this face will be considered an outlier, such as a stranger in the background of an image.
The algorithm has some subtle differences compared to DBSCAN:
- DBSCAN doesn't have a concept of incremental clustering: it clusters all points at once. In contrast, facial recognition has to evolve as more assets are added without re-clustering everything each time.
- The algorithm described above works within a set of queued assets. Once these faces are processed and a new round of faces are detected, the behavior will not be the same as traditional DBSCAN since it preserves the clusters (people) generated from the previous round.
- Facial recognition tries to wait for face detection and thumbnail generation to complete before starting for this reason: the larger the set of faces in the queue, the better the results will be.
- Re-running facial recognition on all assets afterwards does behave like DBSCAN, however.
- DBSCAN is designed for range-based searches (i.e. points within a distance), but high-dimensional vector indices are generally optimized for getting the closest K results. The recognition algorithm doesn't try to get _all_ similar faces within a distance for performance reasons. Instead, it searches for a small number of matches for each face. The end result should be very similar if not identical, but with possibly different performance characteristics.
- Because of this, part of the recognition process is handled during a nightly job to ensure that unassigned faces with potential matches can be recognized.
:::tip
If you didn't import your assets at once or if the server was able to process jobs faster than you could upload them, it's possible that the clustering was suboptimal. If you haven't put effort into the current results, it may be worth re-running facial recognition on all assets for the best starting point. If it's too late for that, you can also manually assign a selection of unassigned faces and queue _Missing_ for Facial Recognition to help it learn and assign more faces automatically.
:::
## Configuration
Navigating to Administration > Settings > Machine Learning Settings > Facial Recognition will show the options available.
:::tip
It's better to only tweak the parameters here than to set them to something very different unless you're ready to test a variety of options. If you do need to set a parameter to a strict setting, relaxing other settings can be a good option to compensate, and vice versa.
:::
### Facial recognition model
There are a few different models available; the default is typically considered the best. On more constrained systems where the default is too intensive, you can choose a smaller model instead.
### Minimum detection score
This setting affects whether a result from the face detecton model is filtered out as a false positive. It may seem tempting to set this low to detect more faces, but it can lead to false positives that are difficult to deal with and can harm facial recognition. It is strongly recommended not to go below 0.5 for this setting. Setting it to a very high number like 0.9 is also not recommended: the default is already biased toward precision, so a threshold that high leads to many undetected faces.
After changing this setting, it will only apply to new face detection jobs. To apply the new setting to all assets, you need to re-run face detection for all assets.
### Maximum recognition distance
The distance threshold described in How Facial Recognition Works. The default works well for most people, but it may be worth lowering it if the library has twins or otherwise very similar looking people. A threshold that's too low just means needing to merge duplicate people after facial recognition, whereas a threshold too high can produce unsalvageable results. It is strongly recommended not to go below 0.3 or above 0.7.
### Minimum recognized faces
The core point threshold described in How Facial Recognition Works. This setting has a few implications. First, it takes effect immediately in that people with fewer faces than this are hidden from view. Secondly, it makes clustering more robust as it prevents loosely-related faces from being linked to each other by requiring a certain level of density.
Increasing this setting is a good idea if you increase the recognition distance or reduce the minimum detection score. Setting it to 1 effectively disables the concept of core points, but can be an option if you prefer a more hands-on approach.

View File

@@ -23,7 +23,7 @@ You do not need to redo any transcoding jobs after enabling hardware acceleratio
- Raspberry Pi is currently not supported.
- Two-pass mode is only supported for NVENC. Other APIs will ignore this setting.
- By default, only encoding is currently hardware accelerated. This means the CPU is still used for software decoding and tone-mapping.
- You can benefit from end-to-end acceleration by enabling hardware decoding in the video transcoding settings.
- NVENC and RKMPP can be fully accelerated by enabling hardware decoding in the video transcoding settings.
- Hardware dependent
- Codec support varies, but H.264 and HEVC are usually supported.
- Notably, NVIDIA and AMD GPUs do not support VP9 encoding.
@@ -49,7 +49,7 @@ For RKMPP to work:
- You must have a supported Rockchip ARM SoC.
- Only RK3588 supports hardware tonemapping, other SoCs use slower software tonemapping while still using hardware encoding.
- Tonemapping requires `/usr/lib/aarch64-linux-gnu/libmali.so.1` to be present on your host system. Install the [`libmali`][libmali-rockchip] release that corresponds to your Mali GPU (`libmali-valhall-g610-g13p0-gbm` on RK3588) and modify the [`hwaccel.transcoding.yml`][hw-file] file:
- Tonemapping requires `/usr/lib/aarch64-linux-gnu/libmali.so.1` to be present on your host system. Install [`libmali-valhall-g610-g6p0-gbm`][libmali-rockchip] and modify the [`hwaccel.transcoding.yml`][hw-file] file:
- under `rkmpp` uncomment the 3 lines required for OpenCL tonemapping by removing the `#` symbol at the beginning of each line
- `- /dev/mali0:/dev/mali0`
- `- /etc/OpenCL:/etc/OpenCL:ro`
@@ -60,20 +60,17 @@ For RKMPP to work:
#### Basic Setup
1. If you do not already have it, download the latest [`hwaccel.transcoding.yml`][hw-file] file and ensure it's in the same folder as the `docker-compose.yml`.
2. In the `docker-compose.yml` under `immich-server`, uncomment the `extends` section and change `cpu` to the appropriate backend.
2. In the `docker-compose.yml` under `immich-microservices`, uncomment the `extends` section and change `cpu` to the appropriate backend.
Note: For VAAPI on WSL2, be sure to use `vaapi-wsl` rather than `vaapi`
- For VAAPI on WSL2, be sure to use `vaapi-wsl` rather than `vaapi`
3. Redeploy the `immich-server` container with these updated settings.
3. Redeploy the `immich-microservices` container with these updated settings.
4. In the Admin page under `Video transcoding settings`, change the hardware acceleration setting to the appropriate option and save.
Note: For Jasper Lake and Elkhart Lake CPUs, you will need to set the `Hardware Acceleration` -> `Constant quality mode` to `CQP`
5. (Optional) Enable hardware decoding for optimal performance.
5. (Optional) If using a compatible backend, you may enable hardware decoding for optimal performance.
#### Single Compose File
Some platforms, including Unraid and Portainer, do not support multiple Compose files as of writing. As an alternative, you can "inline" the relevant contents of the [`hwaccel.transcoding.yml`][hw-file] file into the `immich-server` service directly.
Some platforms, including Unraid and Portainer, do not support multiple Compose files as of writing. As an alternative, you can "inline" the relevant contents of the [`hwaccel.transcoding.yml`][hw-file] file into the `immich-microservices` service directly.
For example, the `qsv` section in this file is:
@@ -82,17 +79,25 @@ devices:
- /dev/dri:/dev/dri
```
You can add this to the `immich-server` service instead of extending from `hwaccel.transcoding.yml`:
You can add this to the `immich-microservices` service instead of extending from `hwaccel.transcoding.yml`:
```yaml
immich-server:
container_name: immich_server
immich-microservices:
container_name: immich_microservices
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# Note the lack of an `extends` section
devices:
- /dev/dri:/dev/dri
command: ['start.sh', 'microservices']
volumes:
...
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
depends_on:
- redis
- database
restart: always
```
Once this is done, you can continue to step 3 of "Basic Setup".
@@ -117,7 +122,6 @@ Once this is done, you can continue to step 3 of "Basic Setup".
- You may want to choose a slower preset than for software transcoding to maintain quality and efficiency
- While you can use VAAPI with NVIDIA and Intel devices, prefer the more specific APIs since they're more optimized for their respective devices
- You can confirm the device is being recognized and used by checking its utilization (via `nvtop` for NVIDIA, `intel_gpu_top` for Intel, etc.) when transcoding. A lack of error logs when transcoding also indicates that it's being used.
[hw-file]: https://github.com/immich-app/immich/releases/latest/download/hwaccel.transcoding.yml
[nvct]: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.5 MiB

View File

@@ -1,14 +1,18 @@
# External Libraries
# Libraries
External libraries track assets stored in the filesystem outside of Immich. When the external library is scanned, Immich will load videos and photos from disk and create the corresponding assets. These assets will then be shown in the main timeline, and they will look and behave like any other asset, including viewing on the map, adding to albums, etc. Later, if a file is modified outside of Immich, you need to scan the library for the changes to show up.
## Overview
If an external asset is deleted from disk, Immich will move it to trash on rescan. To restore the asset, you need to restore the original file. After 30 days the file will be removed from trash, and any changes to metadata within Immich will be lost.
Immich supports the creation of libraries which is a top-level asset container. Currently, there are two types of libraries: traditional upload libraries that can sync with a mobile device, and external libraries, that keeps up to date with files on disk. Libraries are different from albums in that an asset can belong to multiple albums but only one library, and deleting a library deletes all assets contained within. As of August 2023, this is a new feature and libraries have a lot of potential for future development beyond what is documented here. This document attempts to describe the current state of libraries.
:::caution
## External Libraries
If you add metadata to an external asset in any way (i.e. add it to an album or edit the description), that metadata is only stored inside Immich and will not be persisted to the external asset file. If you move an asset to another location within the library all such metadata will be lost upon rescan. This is because the asset is considered a new asset after the move. This is a known issue and will be fixed in a future release.
External libraries tracks assets stored outside of Immich, i.e. in the file system. When the external library is scanned, Immich will read the metadata from the file and create an asset in the library for each image or video file. These items will then be shown in the main timeline, and they will look and behave like any other asset, including viewing on the map, adding to albums, etc.
:::
If a file is modified outside of Immich, the changes will not be reflected in immich until the library is scanned again. There are different ways to scan a library depending on the use case:
- Scan Library Files: This is the default scan method and also the quickest. It will scan all files in the library and add new files to the library. It will notice if any files are missing (see below) but not check existing assets
- Scan All Library Files: Same as above, but will check each existing asset to see if the modification time has changed. If it has, the asset will be updated. Since it has to check each asset, this is slower than Scan Library Files.
- Force Scan All Library Files: Same as above, but will read each asset from disk no matter the modification time. This is useful in some cases where an asset has been modified externally but the modification time has not changed. This is the slowest way to scan because it reads each asset from disk.
:::caution
@@ -16,6 +20,22 @@ Due to aggressive caching it can take some time for a refreshed asset to appear
:::
In external libraries, the file path is used for duplicate detection. This means that if a file is moved to a different location, it will be added as a new asset. If the file is moved back to its original location, it will be added as a new asset. In contrast to upload libraries, two identical files can be uploaded if they are in different locations. This is a deliberate design choice to make Immich reflect the file system as closely as possible. Remember that duplication detection is only done within the same library, so if you have multiple external libraries, the same file can be added to multiple libraries.
:::caution
If you add assets from an external library to an album and then move the asset to another location within the library, the asset will be removed from the album upon rescan. This is because the asset is considered a new asset after the move. This is a known issue and will be fixed in a future release.
:::
### Deleted External Assets
Note: Either a manual or scheduled library scan must have been performed to identify offline assets before this process will work.
In all above scan methods, Immich will check if any files are missing. This can happen if files are deleted, or if they are on a storage location that is currently unavailable, like a network drive that is not mounted, or a USB drive that has been unplugged. In order to prevent accidental deletion of assets, Immich will not immediately delete an asset from the library if the file is missing. Instead, the asset will be internally marked as offline and will still be visible in the main timeline. If the file is moved back to its original location and the library is scanned again, the asset will be restored.
Finally, files can be deleted from Immich via the `Remove Offline Files` job. This job can be found by the three dots menu for the associated external storage that was configured under Administration > Libraries (the same location described at [create external libraries](#create-external-libraries)). When this job is run, any assets marked as offline will then be removed from Immich. Run this job whenever files have been deleted from the file system and you want to remove them from Immich.
### Import Paths
External libraries use import paths to determine which files to scan. Each library can have multiple import paths so that files from different locations can be added to the same library. Import paths are scanned recursively, and if a file is in multiple import paths, it will only be added once. Each import file must be a readable directory that exists on the filesystem; the import path dialog will alert you of any paths that are not accessible.
@@ -46,13 +66,9 @@ Some basic examples:
- `**/Raw/**` will exclude all files in any directory named `Raw`
- `**/*.{tif,jpg}` will exclude all files with the extension `.tif` or `.jpg`
Special characters such as @ should be escaped, for instance:
- `**/\@eadir/**` will exclude all files in any directory named `@eadir`
### Automatic watching (EXPERIMENTAL)
This feature - currently hidden in the config file - is considered experimental and for advanced users only. If enabled, it will allow automatic watching of the filesystem which means new assets are automatically imported to Immich without needing to rescan.
This feature - currently hidden in the config file - is considered experimental and for advanced users only. If enabled, it will allow automatic watching of the filesystem which means new assets are automatically imported to Immich without needing to rescan. Deleted assets are, as always, marked as offline and can be removed with the "Remove offline files" button.
If your photos are on a network drive, automatic file watching likely won't work. In that case, you will have to rely on a periodic library refresh to pull in your changes.
@@ -68,7 +84,7 @@ In rare cases, the library watcher can hang, preventing Immich from starting up.
### Nightly job
There is an automatic scan job that is scheduled to run once a day. This job also cleans up any libraries stuck in deletion.
There is an automatic job that's run once a day and refreshes all modified files in all libraries as well as cleans up any libraries stuck in deletion.
## Usage
@@ -91,20 +107,18 @@ The `immich-server` container will need access to the gallery. Modify your docke
+ - /mnt/nas/christmas-trip:/mnt/media/christmas-trip:ro
+ - /home/user/old-pics:/mnt/media/old-pics:ro
+ - /mnt/media/videos:/mnt/media/videos:ro
+ - /mnt/media/videos2:/mnt/media/videos2 # the files in this folder can be deleted, as it does not end with :ro
+ - "C:/Users/user_name/Desktop/my media:/mnt/media/my-media:ro" # import path in Windows system.
```
:::tip
The `ro` flag at the end only gives read-only access to the volumes.
This will disallow the images from being deleted in the web UI, or adding metadata to the library ([XMP sidecars](/docs/features/xmp-sidecars)).
The `ro` flag at the end only gives read-only access to the volumes. While Immich does not modify files, it's a good practice to mount read-only.
:::
:::info
_Remember to run `docker compose up -d` to register the changes. Make sure you can see the mounted path in the container._
_Remember to bring the container `docker compose down/up` to register the changes. Make sure you can see the mounted path in the container._
:::
### Create A New Library
### Create External Libraries
These actions must be performed by the Immich administrator.
@@ -128,7 +142,7 @@ Next, we'll add an exclusion pattern to filter out raw files.
- Enter `**/Raw/**` and click save.
- Click save
- Click the drop-down menu on the newly created library
- Click on Scan
- Click on Scan Library Files
The christmas trip library will now be scanned in the background. In the meantime, let's add the videos and old photos to another library.
@@ -145,26 +159,10 @@ If you get an error here, please rename the other external library to something
- Click on Add Path
- Enter `/mnt/media/videos` then click Add
- Click Save
- Click on Scan
- Click on Scan Library Files
Within seconds, the assets from the old-pics and videos folders should show up in the main timeline.
### Folder view
:::info
This feature also exists for assets uploaded other than through external libraries.
:::tip
You can use the storage template migration feature for the best experience with uploaded assets in this view.
:::
You can browse your photos and videos by folder like in a file explorer.
Enable this feature from the Users Settings > Features > Folders.
The UI is currently only available for the web; mobile will come in a subsequent release.
<img src={require('./img/folder-view.png').default} width="75%" title='Folder-view' />
### Set Custom Scan Interval
:::note

View File

@@ -32,7 +32,6 @@ You do not need to redo any machine learning jobs after enabling hardware accele
- Where and how you can get this file depends on device and vendor, but typically, the device vendor also supplies these
- The `hwaccel.ml.yml` file assumes the path to it is `/usr/lib/libmali.so`, so update accordingly if it is elsewhere
- The `hwaccel.ml.yml` file assumes an additional file `/lib/firmware/mali_csffw.bin`, so update accordingly if your device's driver does not require this file
- Optional: Configure your `.env` file, see [environment variables](/docs/install/environment-variables) for ARM NN specific settings
#### CUDA
@@ -53,12 +52,6 @@ You do not need to redo any machine learning jobs after enabling hardware accele
3. Still in `immich-machine-learning`, add one of -[armnn, cuda, openvino] to the `image` section's tag at the end of the line.
4. Redeploy the `immich-machine-learning` container with these updated settings.
### Confirming Device Usage
You can confirm the device is being recognized and used by checking its utilization. There are many tools to display this, such as `nvtop` for NVIDIA or Intel and `intel_gpu_top` for Intel.
You can also check the logs of the `immich-machine-learning` container. When a Smart Search or Face Detection job begins, or when you search with text in Immich, you should either see a log for `Available ORT providers` containing the relevant provider (e.g. `CUDAExecutionProvider` in the case of CUDA), or a `Loaded ANN model` log entry without errors in the case of ARM NN.
#### Single Compose File
Some platforms, including Unraid and Portainer, do not support multiple Compose files as of writing. As an alternative, you can "inline" the relevant contents of the [`hwaccel.ml.yml`][hw-file] file into the `immich-machine-learning` service directly.
@@ -101,22 +94,9 @@ immich-machine-learning:
Once this is done, you can redeploy the `immich-machine-learning` container.
#### Multi-GPU
If you want to utilize multiple NVIDIA or Intel GPUs, you can set the `MACHINE_LEARNING_DEVICE_IDS` environmental variable to a comma-separated list of device IDs and set `MACHINE_LEARNING_WORKERS` to the number of listed devices. You can run a command such as `nvidia-smi -L` or `glxinfo -B` to see the currently available devices and their corresponding IDs.
For example, if you have devices 0 and 1, set the values as follows:
```
MACHINE_LEARNING_DEVICE_IDS=0,1
MACHINE_LEARNING_WORKERS=2
```
In this example, the machine learning service will spawn two workers, one of which will allocate models to device 0 and the other to device 1. Different requests will be processed by one worker or the other.
This approach can be used to simply specify a particular device as well. For example, setting `MACHINE_LEARNING_DEVICE_IDS=1` will ensure device 1 is always used instead of device 0.
Note that you should increase job concurrencies to increase overall utilization and more effectively distribute work across multiple GPUs. Additionally, each GPU must be able to load all models. It is not possible to distribute a single model to multiple GPUs that individually have insufficient VRAM, or to delegate a specific model to one GPU.
:::info
You can confirm the device is being recognized and used by checking its utilization (via `nvtop` for CUDA, `intel_gpu_top` for OpenVINO, etc.). You can also enable debug logging by setting `IMMICH_LOG_LEVEL=debug` in the `.env` file and restarting the `immich-machine-learning` container. When a Smart Search or Face Detection job begins, you should see a log for `Available ORT providers` containing the relevant provider. In the case of ARM NN, the absence of a `Could not load ANN shared libraries` log entry means it loaded successfully.
:::
[hw-file]: https://github.com/immich-app/immich/releases/latest/download/hwaccel.ml.yml
[nvct]: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html

View File

@@ -27,39 +27,3 @@ The beta release channel allows users to test upcoming changes before they are o
:::info
You can enable automatic backup on supported devices. For more information see [Automatic Backup](/docs/features/automatic-backup.md).
:::
## Album Sync
You can sync or mirror an album from your phone to the Immich server on your account. For example, if you select Recents, Camera and Videos album for backup, the corresponding album with the same name will be created on the server. Once the assets from those albums are uploaded, they will be put into the target albums automatically.
### Album Synchronization Highlights
- **One-Way Sync:** Synchronization is one-way, from the device to the server.
- **Name Matching:** If an album on the server has the same name as the album on the device, images from the device will be merged with the existing images in the server album.
- **Shared Albums:** If the matching album on the server is shared, the new photos merged into the album will also be shared.
- **Album Structure:** When an album is created for the first time, its structure is based on the initial state. Future updates made on the phone (such as deleting or repositioning photos) will not be reflected in Immich.
- **User-Specific Sync:** Album synchronization is unique to each server user and does not sync between different users or partners.
- **Mobile-Only Feature:** Album synchronization is currently only available on mobile. For similar options on a computer, refer to [Libraries](/docs/features/libraries) for further details.
### Synchronizing albums from the past
Albums can be synchronized to the server even if they did not exist on the server before. In order to apply this setting you have to:
Enter the cloud on the top right -> cog wheel on the top right -> select the sync option under Sync albums.
:::info Sync albums delete/move photos
If you delete/move photos in the local album on your device, it will not be reflected in the album on the server **even if** you click Sync albums
It will only reflect files you add.
:::
If the same asset is in more than one album it will only sync to the first album it's in, after that it won't sync again even if the user clicks sync albums manually.
To overcome this limitation, the files must be removed from the blacklist by
App settings -> Advanced -> Duplicate Assets -> Clear
:::info
Cleaning duplicate assets from the list will cause all the previously uploaded duplicate files to be re-uploaded, the files will not actually be uploaded and will be rejected on the server side (due to duplication) but will be synchronized to the album and at the end will be added to the black list again at the end of the synchronization.
:::

View File

@@ -25,10 +25,10 @@ The metrics in immich are grouped into API (endpoint calls and response times),
### Configuration
Immich will not expose an endpoint for metrics by default. To enable this endpoint, you can add the `IMMICH_TELEMETRY_INCLUDE=all` environmental variable to your `.env` file. Note that only the server container currently use this variable.
Immich will not expose an endpoint for metrics by default. To enable this endpoint, you can add the `IMMICH_METRICS=true` environmental variable to your `.env` file. Note that only the server and microservices containers currently use this variable.
:::tip
`IMMICH_TELEMETRY_INCLUDE=all` enables all metrics. For a more granular configuration you can enumerate the telemetry metrics that should be included as a comma separated list (e.g. `IMMICH_TELEMETRY_INCLUDE=repo,api`). Alternatively, you can also exclude specific metrics with `IMMICH_TELEMETRY_EXCLUDE`. For more information refer to the [environment section](/docs/install/environment-variables.md#prometheus).
`IMMICH_METRICS` enables all metrics, but there are also [environmental variables](/docs/install/environment-variables.md#prometheus) to toggle specific metric groups. If you'd like to only expose certain kinds of metrics, you can set only those environmental variables to `true`. Explicitly setting the environmental variable for a metric group overrides `IMMICH_METRICS` for that group. For example, setting `IMMICH_METRICS=true` and `IMMICH_API_METRICS=false` will enable all metrics except API metrics.
:::
The next step is to configure a new or existing Prometheus instance to scrape this endpoint. The following steps assume that you do not have an existing Prometheus instance, but the steps will be similar either way.
@@ -66,7 +66,7 @@ The provided file is just a starting point. There are a ton of ways to configure
After bringing down the containers with `docker compose down` and back up with `docker compose up -d`, a Prometheus instance will now collect metrics from the immich server and microservices containers. Note that we didn't need to expose any new ports for these containers - the communication is handled in the internal Docker network.
:::note
To see exactly what metrics are made available, you can additionally add `8081:8081` to the server container's ports and `8082:8082` to the microservices container's ports. Visiting the `/metrics` endpoint for these services will show the same raw data that Prometheus collects.
To see exactly what metrics are made available, you can additionally add `8081:8081` to the server container's ports and `8082:8081` to the microservices container's ports. Visiting the `/metrics` endpoint for these services will show the same raw data that Prometheus collects.
:::
### Usage

View File

@@ -16,7 +16,7 @@ When sharing shared albums, whats shared is:
- Download all assets as zip file (Web only).
:::info Archive size limited.
If the size of the album exceeds 4GB, the archive files will by default be divided into 4GB each. This can be changed on the user settings page.
If the size of the album exceeds 4GB, the archive files will be divided into 4GB each.
:::
- Add a description to the album (Web only).
- Slideshow view (Web only).
@@ -73,14 +73,14 @@ You can edit the link properties, options include;
- **Allow public user to download -** whether to allow whoever has the link to download all the images or a certain image (optional).
- **Allow public user to upload -** whether to allow whoever has the link to upload assets to the album (optional).
:::info
Whoever has the link and have uploaded files cannot delete them.
whoever has the link and have uploaded files cannot delete them.
:::
- **Expire after -** adding an expiration date to the link (optional).
## Share Specific Assets
A user can share specific assets without linking them to a specific album.
In order to do this:
in order to do so;
1. Go to the timeline
2. Select the assets (Shift can be used for multiple selection)
@@ -152,7 +152,7 @@ Some of the features are not available on mobile, to understand what the full fe
## Sharing Between Users
#### Add or remove users from the album
#### Add or remove users from the album.
:::info remove user(s)
When a user is removed from the album, the photos he uploaded will still appear in the album.

View File

@@ -7,30 +7,29 @@ Immich uses Postgres as its search database for both metadata and smart search.
Smart search is powered by the [pgvecto.rs](https://github.com/tensorchord/pgvecto.rs) extension, utilizing machine learning models like [CLIP](https://openai.com/research/clip) to provide relevant search results. This allows for freeform searches without requiring specific keywords in the image or video metadata.
Archived photos are not included in search results by default. To include them, mark the checkbox in [advanced search filters](/docs/features/smart-search#advanced-search-filters).
:::tip Alternative CLIP Models
More powerful models can be used for more accurate search results. For more information, see the related [FAQ](/docs/FAQ#can-i-use-a-custom-clip-model).
:::
:::info
Smart Search is currently limited to 5,000 results for a single search on the web.
:::
## Advanced Search Filters
In addition, Immich offers advanced search functionality, allowing you to find specific content using customizable search filters. These filters include location, one or more faces, specific albums, and more. You can try out the search filters on the [Demo site](https://demo.immich.app).
The filters smart search allows you to search by include:
Smart search features include:
- People
- Location
- Country
- State
- City
- Camera
- Make
- Model
- Date range
- File name or extension
- Media type
- Image (including live/motion photos)
- Video
- All
- Condition
- Not in any album
- Archived
- Favorited
- Search for one or more faces (with or without context search).
- Search by Country or State or City or by all three.
- Search by camera make and model.
- Search by date range.
- Search by file name.
- Search by media types: image, video or all (**Note:** Image includes live images).
- Search by condition: not in any album or archive or Favorite or all conditions.
<Tabs>
<TabItem value="Computer" label="Computer" default>
@@ -48,27 +47,3 @@ Some search examples:
</TabItem>
</Tabs>
## Configuration
Navigating to `Administration > Settings > Machine Learning Settings > Smart Search` will show the options available.
### CLIP model
More powerful models can be used for more accurate search results, but are slower and can require more server resources. Check out the models [here][huggingface-clip] for more options!
[Multilingual models][huggingface-multilingual-clip] are also available so users can search in their native language. These models support over 100 languages; the `nllb` models in particular support 200.
:::note
Multilingual models are much slower and larger and perform slightly worse for English than English-only models. For this reason, only use them if you actually intend to search in a language besides English.
As a special case, the `ViT-H-14-quickgelu__dfn5b` and `ViT-H-14-378-quickgelu__dfn5b` models are excellent at many European languages despite not specifically being multilingual. They're very intensive regardless, however - especially the latter.
:::
Once you've chosen a model, change this setting to the name of the model you chose. Be sure to re-run Smart Search on all assets after this change.
:::note
Feel free to make a feature request if there's a model you want to use that we don't currently support.
:::
[huggingface-clip]: https://huggingface.co/collections/immich-app/clip-654eaefb077425890874cd07
[huggingface-multilingual-clip]: https://huggingface.co/collections/immich-app/multilingual-clip-654eb08c2382f591eeb8c2a7

View File

@@ -13,14 +13,14 @@ In our `.env` file, we will define variables that will help us in the future whe
# Custom location where your uploaded, thumbnails, and transcoded video files are stored
- UPLOAD_LOCATION=./library
+ UPLOAD_LOCATION=/custom/path/immich/immich_files
+ THUMB_LOCATION=/custom/path/immich/thumbs
+ ENCODED_VIDEO_LOCATION=/custom/path/immich/encoded-video
+ PROFILE_LOCATION=/custom/path/immich/profile
+ UPLOAD_LOCATION=/custom/location/on/your/system/immich/immich_files
+ THUMB_LOCATION=/custom/location/on/your/system/immich/thumbs
+ ENCODED_VIDEO_LOCATION=/custom/location/on/your/system/immich/encoded-video
+ PROFILE_LOCATION=/custom/location/on/your/system/immich/profile
...
```
After defining the locations for these files, we will edit the `docker-compose.yml` file accordingly and add the new variables to the `immich-server` container.
After defining the locations for these files, we will edit the `docker-compose.yml` file accordingly and add the new variables to the `immich-server` and `immich-microservices` containers.
```diff title="docker-compose.yml"
services:
@@ -29,6 +29,16 @@ services:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
+ - ${THUMB_LOCATION}:/usr/src/app/upload/thumbs
+ - ${ENCODED_VIDEO_LOCATION}:/usr/src/app/upload/encoded-video
+ - ${PROFILE_LOCATION}:/usr/src/app/upload/profile
- /etc/localtime:/etc/localtime:ro
...
immich-microservices:
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
+ - ${THUMB_LOCATION}:/usr/src/app/upload/thumbs
+ - ${ENCODED_VIDEO_LOCATION}:/usr/src/app/upload/encoded-video
+ - ${PROFILE_LOCATION}:/usr/src/app/upload/profile
- /etc/localtime:/etc/localtime:ro
```
@@ -36,6 +46,7 @@ services:
Restart Immich to register the changes.
```
docker compose down
docker compose up -d
```

View File

@@ -1,22 +1,8 @@
# Custom Map Styles
# Create Custom Map Styles for Immich Using Maptiler
You may decide that you'd like to modify the style document which is used to
draw the maps in Immich. In addition to visual customization, this also allows
you to pick your own map tile provider instead of the default one. The default
`style.json` for [light theme](https://github.com/immich-app/immich/tree/main/server/resources/style-light.json)
and [dark theme](https://github.com/immich-app/immich/blob/main/server/resources/style-dark.json)
can be used as a basis for creating your own style.
You may decide that you'd like to modify the style document which is used to draw the maps in Immich. This can be done easily using Maptiler, if you do not want to write an entire JSON document by hand.
There are several sources for already-made `style.json` map themes, as well as
online generators you can use.
1. In **Immich**, navigate to **Administration --> Settings --> Map & GPS Settings** and expand the **Map Settings** subsection.
2. Paste the link to your JSON style in either the **Light Style** or **Dark Style**. (You can add different styles which will help make the map style more appropriate depending on whether you set **Immich** to Light or Dark mode.)
3. Save your selections. Reload the map, and enjoy your custom map style!
## Use Maptiler to build a custom style
Customizing the map style can be done easily using Maptiler, if you do not want to write an entire JSON document by hand.
## Steps
1. Create a free account at https://cloud.maptiler.com
2. Once logged in, you can either create a brand new map by clicking on **New Map**, selecting a starter map, and then clicking **Customize**, OR by selecting a **Standard Map** and customizing it from there.
@@ -25,3 +11,6 @@ Customizing the map style can be done easily using Maptiler, if you do not want
5. Next, **Publish** your style using the **Publish** button at the top right. This will deploy it to production, which means it is able to be exposed over the Internet. Maptiler will present an interactive side-by-side map with the original and your changes prior to publication.<br/>![Maptiler Publication Settings](img/immich_map_styles_publish.png)
6. Maptiler will warn you that changing the map will change it across all apps using the map. Since no apps are using the map yet, this is okay.
7. Clicking on the name of your new map at the top left will bring you to the item's **details** page. From here, copy the link to the JSON style under **Use vector style**. This link will automatically contain your personal API key to Maptiler.
8. In **Immich**, navigate to **Administration --> Settings --> Map & GPS Settings** and expand the **Map Settings** subsection.
9. Paste the link to your JSON style in either the **Light Style** or **Dark Style**. (You can add different styles which will help make the map style more appropriate depending on whether you set **Immich** to Light or Dark mode.
10. Save your selections. Reload the map, and enjoy your custom map style!

View File

@@ -5,7 +5,7 @@ Keep in mind that mucking around in the database might set the moon on fire. Avo
:::
:::tip
Run `docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME>` to connect to the database via the container directly.
Run `docker exec -it immich_postgres psql immich <DB_USERNAME>` to connect to the database via the container directly.
(Replace `<DB_USERNAME>` with the value from your [`.env` file](/docs/install/environment-variables#database)).
:::
@@ -23,7 +23,7 @@ SELECT * FROM "assets" WHERE "originalFileName" LIKE '%_2023_%'; -- all files wi
```
```sql title="Find by path"
SELECT * FROM "assets" WHERE "originalPath" = 'upload/library/admin/2023/2023-09-03/PXL_2023.jpg';
SELECT * FROM "assets" WHERE "originalPath" = 'upload/library/admin/2023/2023-09-03/PXL_20230903_232542848.jpg';
SELECT * FROM "assets" WHERE "originalPath" LIKE 'upload/library/admin/2023/%';
```
@@ -37,12 +37,6 @@ SELECT * FROM "assets" WHERE "checksum" = decode('69de19c87658c4c15d9cacb9967b8e
SELECT * FROM "assets" WHERE "checksum" = '\x69de19c87658c4c15d9cacb9967b8e033bf74dd1'; -- alternate notation
```
```sql title="Find duplicate assets with identical checksum (SHA-1) (excluding trashed files)"
SELECT T1."checksum", array_agg(T2."id") ids FROM "assets" T1
INNER JOIN "assets" T2 ON T1."checksum" = T2."checksum" AND T1."id" != T2."id" AND T2."deletedAt" IS NULL
WHERE T1."deletedAt" IS NULL GROUP BY T1."checksum";
```
```sql title="Live photos"
SELECT * FROM "assets" WHERE "livePhotoVideoId" IS NOT NULL;
```
@@ -85,7 +79,8 @@ SELECT "assets"."type", COUNT(*) FROM "assets" GROUP BY "assets"."type";
```sql title="Count by type (per user)"
SELECT "users"."email", "assets"."type", COUNT(*) FROM "assets"
JOIN "users" ON "assets"."ownerId" = "users"."id"
GROUP BY "assets"."type", "users"."email" ORDER BY "users"."email";
GROUP BY "assets"."type", "users"."email"
ORDER BY "users"."email";
```
```sql title="Failed file movements"
@@ -111,9 +106,3 @@ SELECT "key", "value" FROM "system_metadata" WHERE "key" = 'system-config';
```sql title="Delete person and unset it for the faces it was associated with"
DELETE FROM "person" WHERE "name" = 'PersonNameHere';
```
## Postgres internal
```sql title="Change DB_PASSWORD"
ALTER USER <DB_USERNAME> WITH ENCRYPTED PASSWORD 'newpasswordhere';
```

Some files were not shown because too many files have changed in this diff Show More