Compare commits

..

8 Commits

Author SHA1 Message Date
mertalev
72269ab58c add cli 2024-07-12 16:50:48 -04:00
mertalev
3db69b94ed support resnet models, test failed models 2024-07-12 16:50:48 -04:00
mertalev
b5acb71b05 prevent multidimensional bias 2024-07-12 16:50:48 -04:00
mertalev
b39cca1b43 fixes 2024-07-12 16:50:47 -04:00
mertalev
3d62011ae3 handle gather at the end 2024-07-12 16:50:47 -04:00
mertalev
1ad348c407 gather -> slice 2024-07-12 16:50:47 -04:00
mertalev
5dae920ac6 onnx2tf, 4d transpose 2024-07-12 16:50:47 -04:00
mertalev
956480ab2c enhance armnn conversion 2024-07-12 16:50:46 -04:00
564 changed files with 23683 additions and 36272 deletions

View File

@@ -29,4 +29,3 @@ web/node_modules/
web/coverage/
web/.svelte-kit
web/build/
web/.env

View File

@@ -1,13 +1,11 @@
title: "[Feature] feature-name-goes-here"
title: "[Feature] <feature-name-goes-here>"
labels: ["feature"]
body:
- type: markdown
attributes:
value: |
Please use this form to request new feature for Immich.
Stick to only a single feature per request. If you list multiple different features at once,
your request will be closed.
Please use this form to request new feature for Immich
- type: checkboxes
attributes:

View File

@@ -56,10 +56,10 @@ jobs:
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.2.0
uses: docker/setup-qemu-action@v3.1.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.6.1
uses: docker/setup-buildx-action@v3.4.0
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
@@ -88,7 +88,7 @@ jobs:
type=raw,value=latest,enable=${{ github.event_name == 'release' }}
- name: Build and push image
uses: docker/build-push-action@v6.6.1
uses: docker/build-push-action@v6.3.0
with:
file: cli/Dockerfile
platforms: linux/amd64,linux/arm64

View File

@@ -35,7 +35,7 @@ jobs:
steps:
- name: Clean temporary images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/ephemeral@v0.8.0
uses: stumpylog/image-cleaner-action/ephemeral@v0.7.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"
@@ -64,7 +64,7 @@ jobs:
steps:
- name: Clean untagged images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/untagged@v0.8.0
uses: stumpylog/image-cleaner-action/untagged@v0.7.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"

View File

@@ -63,10 +63,10 @@ jobs:
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.2.0
uses: docker/setup-qemu-action@v3.1.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.6.1
uses: docker/setup-buildx-action@v3.4.0
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
@@ -115,7 +115,7 @@ jobs:
fi
- name: Build and push image
uses: docker/build-push-action@v6.6.1
uses: docker/build-push-action@v6.3.0
with:
context: ${{ matrix.context }}
file: ${{ matrix.file }}

1
cli/.eslintignore Normal file
View File

@@ -0,0 +1 @@
/dist

28
cli/.eslintrc.cjs Normal file
View File

@@ -0,0 +1,28 @@
module.exports = {
parser: '@typescript-eslint/parser',
parserOptions: {
project: 'tsconfig.json',
sourceType: 'module',
tsconfigRootDir: __dirname,
},
plugins: ['@typescript-eslint/eslint-plugin'],
extends: ['plugin:@typescript-eslint/recommended', 'plugin:prettier/recommended', 'plugin:unicorn/recommended'],
root: true,
env: {
node: true,
},
ignorePatterns: ['.eslintrc.js'],
rules: {
'@typescript-eslint/interface-name-prefix': 'off',
'@typescript-eslint/explicit-function-return-type': 'off',
'@typescript-eslint/explicit-module-boundary-types': 'off',
'@typescript-eslint/no-explicit-any': 'off',
'@typescript-eslint/no-floating-promises': 'error',
'unicorn/prefer-module': 'off',
'unicorn/prevent-abbreviations': 'off',
'unicorn/no-process-exit': 'off',
'unicorn/import-style': 'off',
curly: 2,
'prettier/prettier': 0,
},
};

View File

@@ -1 +1 @@
20.16.0
20.15.1

View File

@@ -1,4 +1,4 @@
FROM node:20.16.0-alpine3.20@sha256:eb8101caae9ac02229bd64c024919fe3d4504ff7f329da79ca60a04db08cef52 AS core
FROM node:20.15.1-alpine3.20@sha256:34b7aa411056c85dbf71d240d26516949b3f72b318d796c26b57caaa1df5639a as core
WORKDIR /usr/src/open-api/typescript-sdk
COPY open-api/typescript-sdk/package*.json open-api/typescript-sdk/tsconfig*.json ./

View File

@@ -1,60 +0,0 @@
import { FlatCompat } from '@eslint/eslintrc';
import js from '@eslint/js';
import typescriptEslint from '@typescript-eslint/eslint-plugin';
import tsParser from '@typescript-eslint/parser';
import globals from 'globals';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const compat = new FlatCompat({
baseDirectory: __dirname,
recommendedConfig: js.configs.recommended,
allConfig: js.configs.all,
});
export default [
{
ignores: ['eslint.config.mjs', 'dist'],
},
...compat.extends(
'plugin:@typescript-eslint/recommended',
'plugin:prettier/recommended',
'plugin:unicorn/recommended',
),
{
plugins: {
'@typescript-eslint': typescriptEslint,
},
languageOptions: {
globals: {
...globals.node,
},
parser: tsParser,
ecmaVersion: 5,
sourceType: 'module',
parserOptions: {
project: 'tsconfig.json',
tsconfigRootDir: __dirname,
},
},
rules: {
'@typescript-eslint/interface-name-prefix': 'off',
'@typescript-eslint/explicit-function-return-type': 'off',
'@typescript-eslint/explicit-module-boundary-types': 'off',
'@typescript-eslint/no-explicit-any': 'off',
'@typescript-eslint/no-floating-promises': 'error',
'unicorn/prefer-module': 'off',
'unicorn/prevent-abbreviations': 'off',
'unicorn/no-process-exit': 'off',
'unicorn/import-style': 'off',
curly: 2,
'prettier/prettier': 0,
},
},
];

1553
cli/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "@immich/cli",
"version": "2.2.13",
"version": "2.2.8",
"description": "Command Line Interface (CLI) for Immich",
"type": "module",
"exports": "./dist/index.js",
@@ -13,33 +13,30 @@
"cli"
],
"devDependencies": {
"@eslint/eslintrc": "^3.1.0",
"@eslint/js": "^9.8.0",
"@immich/sdk": "file:../open-api/typescript-sdk",
"@types/byte-size": "^8.1.0",
"@types/cli-progress": "^3.11.0",
"@types/lodash-es": "^4.17.12",
"@types/mock-fs": "^4.13.1",
"@types/node": "^20.14.13",
"@typescript-eslint/eslint-plugin": "^8.0.0",
"@typescript-eslint/parser": "^8.0.0",
"@vitest/coverage-v8": "^2.0.5",
"byte-size": "^9.0.0",
"@types/node": "^20.14.10",
"@typescript-eslint/eslint-plugin": "^7.0.0",
"@typescript-eslint/parser": "^7.0.0",
"@vitest/coverage-v8": "^1.2.2",
"byte-size": "^8.1.1",
"cli-progress": "^3.12.0",
"commander": "^12.0.0",
"eslint": "^9.0.0",
"eslint": "^8.56.0",
"eslint-config-prettier": "^9.1.0",
"eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-unicorn": "^55.0.0",
"globals": "^15.9.0",
"eslint-plugin-unicorn": "^54.0.0",
"mock-fs": "^5.2.0",
"prettier": "^3.2.5",
"prettier-plugin-organize-imports": "^4.0.0",
"typescript": "^5.3.3",
"vite": "^5.0.12",
"vite-tsconfig-paths": "^4.3.2",
"vitest": "^2.0.5",
"vitest-fetch-mock": "^0.3.0",
"vitest": "^1.2.2",
"vitest-fetch-mock": "^0.2.2",
"yaml": "^2.3.1"
},
"scripts": {
@@ -67,6 +64,6 @@
"lodash-es": "^4.17.21"
},
"volta": {
"node": "20.16.0"
"node": "20.15.1"
}
}

View File

@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.38.0"
constraints = "4.38.0"
version = "4.36.0"
constraints = "4.36.0"
hashes = [
"h1:+27KAHKHBDvv3dqyJv5vhtdKQZJzoZXoMqIyronlHNw=",
"h1:/uV9RgOUhkxElkHhWs8fs5ZbX9vj6RCBfP0oJO0JF30=",
"h1:1DNAdMugJJOAWD/XYiZenYYZLy7fw2ctjT4YZmkRCVQ=",
"h1:1wn4PmCLdT7mvd74JkCGmJDJxTQDkcxc+1jNbmwnMHA=",
"h1:BIHB4fBxHg2bA9KbL92njhyctxKC8b6hNDp60y5QBss=",
"h1:HCQpvKPsMsR4HO5eDqt+Kao7T7CYeEH7KZIO7xMcC6M=",
"h1:HTomuzocukpNLwtWzeSF3yteCVsyVKbwKmN66u9iPac=",
"h1:YDxsUBhBAwHSXLzVwrSlSBOwv1NvLyry7s5SfCV7VqQ=",
"h1:dchVhxo+Acd1l2RuZ88tW9lWj4422QMfgtxKvKCjYrw=",
"h1:eypa+P4ZpsEGMPFuCE+6VkRefu0TZRFmVBOpK+PDOPY=",
"h1:f3yjse2OsRZj7ZhR7BLintJMlI4fpyt8HyDP/zcEavw=",
"h1:mSJ7xj8K+xcnEmGg7lH0jjzyQb157wH94ULTAlIV+HQ=",
"h1:tt+2J2Ze8VIdDq2Hr6uHlTJzAMBRpErBwTYx0uD5ilE=",
"h1:uQW8SKxmulqrAisO+365mIf2FueINAp5PY28bqCPCug=",
"zh:171ab67cccceead4514fafb2d39e4e708a90cce79000aaf3c29aab7ed4457071",
"zh:18aa7228447baaaefc49a43e8eff970817a7491a63d8937e796357a3829dd979",
"zh:2cbaab6092e81ba6f41fa60a50f14e980c8ec327ee11d0b21f16a478be4b7567",
"zh:53b8e49c06f5b31a8c681f8c0669cf43e78abe71657b8182a221d096bb514965",
"zh:6037cfc60b4b647aabae155fcb46d649ed7c650e0287f05db52b2068f1e27c8a",
"zh:62460982ce1a869eebfca675603fbbd50416cf6b69459fb855bfbe5ae2b97607",
"zh:65f6f3a8470917b6398baa5eb4f74b3932b213eac7c0202798bfad6fd1ee17df",
"h1:00/Y+l17VV4RquGSfwDnYsGYzyf2ZmdQwUgeIzXC7eg=",
"h1:489GpKItA/VRIUA5S4+F8MsnurGVciRvUFyIV81MJTU=",
"h1:7cnczyKGj3+gvaJ0r5JIVWLXPbQfkHYejac76MJx+I8=",
"h1:8rmr1PjJc14Xmor2eEvo5/WBojylt1eYdx6VbSU3Ulo=",
"h1:HjgphNjtgny5tkcUAQoGgBdcuQ+0IyhL8yLsiBqWAP0=",
"h1:LH3umxdBnJcAyeVoBLVn+PC0F0CzN6v9UN6lb6CqQPE=",
"h1:Xx6WUD/zB8fM9SjkFx06Fgx2K7aGJIVvsJS2pwqALEM=",
"h1:YizL5YN9zQ8YkSR6V/G201YrCVdnkF9EUIK4lpROWiA=",
"h1:aPcXVGjYcCJdqvWSzc/dEjwj05LnbWZje8IanygVjcI=",
"h1:eKCvfashdCqfDcFGXE2gq+XxAURD5SzuaQ9Brs3zLos=",
"h1:gpKcBYkBcfn/uF1A8W7MD/OysMZW7EU4QVYvPEEnxGc=",
"h1:kCkcxZZnkKAnMz9scUQHb19d9/l9FPOHovAyrvtA618=",
"h1:t8mXXnICTeKqoD29uvyLFHVWMfMzTUrJuHje8lpI0zU=",
"h1:zjzavjIdLDGRYsWd3v0HJz6ul12Cewj9RW/cqAQ4DxI=",
"zh:02665712b3893307596b3caab99cf1f2502d5caca18e22d4b37bb535e628e102",
"zh:1514b0d3ef62934484ac471113ee68cddec0c21e56b4f710922741fe9b6e6fdf",
"zh:1fab4dfcecbcea13267b42e5ff05ba0692aa2dcb247b8e633fea0daf49feb156",
"zh:24d8367295fe1f1b2be37802aecb96edf32f743364663ffe781d1bb92438395d",
"zh:34e84e7940c99dcf65663cfd25afac22bf5c8a5ff2cd21900c67180d3a072be9",
"zh:3d71d63204a329acf1d1de8638f2c725243cb94cf444d2d7acde54b3d1ac1696",
"zh:57831ba88e779a762bcfa224ba9eac8bc22ef9cd70cd541d848b351e0ba6a75c",
"zh:6407560f2e548afcb4852c91efc664627a9ee565c31a9c81fc9ea1806fca0567",
"zh:738ddbc664d75f4859aa09444a27809bc398795a8ea8f5be8531040690287712",
"zh:841ca2b2d78b6f8d33ec3435bc090c5e04a3a7d85c80df11227a7ea00d36f6b1",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8b5cebe64bf04105a49178a165b6a8800a9a33bae6767143a47fe4977755f805",
"zh:a5596635db0993ee3c3060fbc2227d91b239466e96d2d82642625a5aa2486988",
"zh:b3a9c63038441f13c311fd4b2c7e69e571445e5a7365a20c7cc9046b7e6c8aba",
"zh:b585e7e4d7648a540b14b9182819214896ca9337729eeb1f2034833b17db754d",
"zh:d2c3c545318ac8542369e9fc8228e29ee585febdf203a450fad3e0eded71ce02",
"zh:e95dd2d6c3525073af47d47b763cb81b6a51b20cabf76f789c69328922da9ecf",
"zh:eee6e590b36d6c6168a7daae8afa74a8721fd7aa9f62a710f04a311975100722",
"zh:8b3d3d63354032ab9b2403c50728e9aa4e83c7367eaad2d18794221addeafc0f",
"zh:9e293443fe3127e488f540229983c1b9688268185f87567bb3d18e794697acd2",
"zh:b3a22439156e46461213db183e2e89569cd2e8d7cbcfc4b9f90469090e105807",
"zh:f430feb5d51891e84028459e57039045dea4f1f5fcf671161d8ac2d8f28763f3",
]
}

View File

@@ -5,7 +5,7 @@ terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "4.38.0"
version = "4.36.0"
}
}
}

View File

@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.38.0"
constraints = "4.38.0"
version = "4.36.0"
constraints = "4.36.0"
hashes = [
"h1:+27KAHKHBDvv3dqyJv5vhtdKQZJzoZXoMqIyronlHNw=",
"h1:/uV9RgOUhkxElkHhWs8fs5ZbX9vj6RCBfP0oJO0JF30=",
"h1:1DNAdMugJJOAWD/XYiZenYYZLy7fw2ctjT4YZmkRCVQ=",
"h1:1wn4PmCLdT7mvd74JkCGmJDJxTQDkcxc+1jNbmwnMHA=",
"h1:BIHB4fBxHg2bA9KbL92njhyctxKC8b6hNDp60y5QBss=",
"h1:HCQpvKPsMsR4HO5eDqt+Kao7T7CYeEH7KZIO7xMcC6M=",
"h1:HTomuzocukpNLwtWzeSF3yteCVsyVKbwKmN66u9iPac=",
"h1:YDxsUBhBAwHSXLzVwrSlSBOwv1NvLyry7s5SfCV7VqQ=",
"h1:dchVhxo+Acd1l2RuZ88tW9lWj4422QMfgtxKvKCjYrw=",
"h1:eypa+P4ZpsEGMPFuCE+6VkRefu0TZRFmVBOpK+PDOPY=",
"h1:f3yjse2OsRZj7ZhR7BLintJMlI4fpyt8HyDP/zcEavw=",
"h1:mSJ7xj8K+xcnEmGg7lH0jjzyQb157wH94ULTAlIV+HQ=",
"h1:tt+2J2Ze8VIdDq2Hr6uHlTJzAMBRpErBwTYx0uD5ilE=",
"h1:uQW8SKxmulqrAisO+365mIf2FueINAp5PY28bqCPCug=",
"zh:171ab67cccceead4514fafb2d39e4e708a90cce79000aaf3c29aab7ed4457071",
"zh:18aa7228447baaaefc49a43e8eff970817a7491a63d8937e796357a3829dd979",
"zh:2cbaab6092e81ba6f41fa60a50f14e980c8ec327ee11d0b21f16a478be4b7567",
"zh:53b8e49c06f5b31a8c681f8c0669cf43e78abe71657b8182a221d096bb514965",
"zh:6037cfc60b4b647aabae155fcb46d649ed7c650e0287f05db52b2068f1e27c8a",
"zh:62460982ce1a869eebfca675603fbbd50416cf6b69459fb855bfbe5ae2b97607",
"zh:65f6f3a8470917b6398baa5eb4f74b3932b213eac7c0202798bfad6fd1ee17df",
"h1:00/Y+l17VV4RquGSfwDnYsGYzyf2ZmdQwUgeIzXC7eg=",
"h1:489GpKItA/VRIUA5S4+F8MsnurGVciRvUFyIV81MJTU=",
"h1:7cnczyKGj3+gvaJ0r5JIVWLXPbQfkHYejac76MJx+I8=",
"h1:8rmr1PjJc14Xmor2eEvo5/WBojylt1eYdx6VbSU3Ulo=",
"h1:HjgphNjtgny5tkcUAQoGgBdcuQ+0IyhL8yLsiBqWAP0=",
"h1:LH3umxdBnJcAyeVoBLVn+PC0F0CzN6v9UN6lb6CqQPE=",
"h1:Xx6WUD/zB8fM9SjkFx06Fgx2K7aGJIVvsJS2pwqALEM=",
"h1:YizL5YN9zQ8YkSR6V/G201YrCVdnkF9EUIK4lpROWiA=",
"h1:aPcXVGjYcCJdqvWSzc/dEjwj05LnbWZje8IanygVjcI=",
"h1:eKCvfashdCqfDcFGXE2gq+XxAURD5SzuaQ9Brs3zLos=",
"h1:gpKcBYkBcfn/uF1A8W7MD/OysMZW7EU4QVYvPEEnxGc=",
"h1:kCkcxZZnkKAnMz9scUQHb19d9/l9FPOHovAyrvtA618=",
"h1:t8mXXnICTeKqoD29uvyLFHVWMfMzTUrJuHje8lpI0zU=",
"h1:zjzavjIdLDGRYsWd3v0HJz6ul12Cewj9RW/cqAQ4DxI=",
"zh:02665712b3893307596b3caab99cf1f2502d5caca18e22d4b37bb535e628e102",
"zh:1514b0d3ef62934484ac471113ee68cddec0c21e56b4f710922741fe9b6e6fdf",
"zh:1fab4dfcecbcea13267b42e5ff05ba0692aa2dcb247b8e633fea0daf49feb156",
"zh:24d8367295fe1f1b2be37802aecb96edf32f743364663ffe781d1bb92438395d",
"zh:34e84e7940c99dcf65663cfd25afac22bf5c8a5ff2cd21900c67180d3a072be9",
"zh:3d71d63204a329acf1d1de8638f2c725243cb94cf444d2d7acde54b3d1ac1696",
"zh:57831ba88e779a762bcfa224ba9eac8bc22ef9cd70cd541d848b351e0ba6a75c",
"zh:6407560f2e548afcb4852c91efc664627a9ee565c31a9c81fc9ea1806fca0567",
"zh:738ddbc664d75f4859aa09444a27809bc398795a8ea8f5be8531040690287712",
"zh:841ca2b2d78b6f8d33ec3435bc090c5e04a3a7d85c80df11227a7ea00d36f6b1",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8b5cebe64bf04105a49178a165b6a8800a9a33bae6767143a47fe4977755f805",
"zh:a5596635db0993ee3c3060fbc2227d91b239466e96d2d82642625a5aa2486988",
"zh:b3a9c63038441f13c311fd4b2c7e69e571445e5a7365a20c7cc9046b7e6c8aba",
"zh:b585e7e4d7648a540b14b9182819214896ca9337729eeb1f2034833b17db754d",
"zh:d2c3c545318ac8542369e9fc8228e29ee585febdf203a450fad3e0eded71ce02",
"zh:e95dd2d6c3525073af47d47b763cb81b6a51b20cabf76f789c69328922da9ecf",
"zh:eee6e590b36d6c6168a7daae8afa74a8721fd7aa9f62a710f04a311975100722",
"zh:8b3d3d63354032ab9b2403c50728e9aa4e83c7367eaad2d18794221addeafc0f",
"zh:9e293443fe3127e488f540229983c1b9688268185f87567bb3d18e794697acd2",
"zh:b3a22439156e46461213db183e2e89569cd2e8d7cbcfc4b9f90469090e105807",
"zh:f430feb5d51891e84028459e57039045dea4f1f5fcf671161d8ac2d8f28763f3",
]
}

View File

@@ -5,7 +5,7 @@ terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "4.38.0"
version = "4.36.0"
}
}
}

View File

@@ -46,8 +46,6 @@ services:
depends_on:
- redis
- database
healthcheck:
disable: false
immich-web:
container_name: immich_web
@@ -93,12 +91,10 @@ services:
depends_on:
- database
restart: unless-stopped
healthcheck:
disable: false
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:e3b17ba9479deec4b7d1eeec1548a253acc5374d68d3b27937fcfe4df8d18c7e
image: redis:6.2-alpine@sha256:328fe6a5822256d065debb36617a8169dbfbd77b797c525288e465f56c1d392b
healthcheck:
test: redis-cli ping || exit 1

View File

@@ -21,8 +21,6 @@ services:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
@@ -35,19 +33,15 @@ services:
dockerfile: Dockerfile
args:
- DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
ports:
- 3003:3003
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:e3b17ba9479deec4b7d1eeec1548a253acc5374d68d3b27937fcfe4df8d18c7e
image: redis:6.2-alpine@sha256:328fe6a5822256d065debb36617a8169dbfbd77b797c525288e465f56c1d392b
healthcheck:
test: redis-cli ping || exit 1
restart: always
@@ -71,7 +65,7 @@ services:
interval: 5m
start_interval: 30s
start_period: 5m
command: ["postgres", "-c", "shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
command: ["postgres", "-c" ,"shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
restart: always
# set IMMICH_METRICS=true in .env to enable metrics
@@ -91,7 +85,7 @@ services:
command: ['./run.sh', '-disable-reporting']
ports:
- 3000:3000
image: grafana/grafana:11.1.3-ubuntu@sha256:e10453733015f31103cb530425f32c994816b50102886fa885dafea2c50a711c
image: grafana/grafana:11.1.0-ubuntu@sha256:c7fc29ec783d5e7fc1bdfaad6f92345a345cffbc5d21c388ca228175006fc107
volumes:
- grafana-data:/var/lib/grafana

View File

@@ -16,7 +16,6 @@ services:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
@@ -27,8 +26,6 @@ services:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
@@ -43,12 +40,10 @@ services:
env_file:
- .env
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:e3b17ba9479deec4b7d1eeec1548a253acc5374d68d3b27937fcfe4df8d18c7e
image: docker.io/redis:6.2-alpine@sha256:328fe6a5822256d065debb36617a8169dbfbd77b797c525288e465f56c1d392b
healthcheck:
test: redis-cli ping || exit 1
restart: always
@@ -62,14 +57,13 @@ services:
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: ["postgres", "-c", "shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
command: ["postgres", "-c" ,"shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
restart: always
volumes:

View File

@@ -1 +1 @@
20.16.0
20.15.1

View File

@@ -1,7 +1,7 @@
---
title: The Immich core team goes full-time
authors: [alextran]
tags: [update, announcement, FUTO]
tags: [update, announcement, futo]
date: 2024-05-01T00:00
---

View File

@@ -1,91 +0,0 @@
---
title: Licensing announcement - Purchase a license to support Immich
authors: [alextran]
tags: [update, announcement, FUTO]
date: 2024-07-18T00:00
---
Hello everybody,
Firstly, on behalf of the Immich team, I'd like to thank everybody for your continuous support of Immich since the very first day! Your contributions, encouragement, and community engagement have helped bring Immich to its current state. The team and I are forever grateful for that.
Since our [last announcement of the core team joining FUTO to work on Immich full-time](https://immich.app/blog/2024/immich-core-team-goes-fulltime), one of the goals of our new position is to foster a healthy relationship between the developers and the users. We believe that this enables us to create great software, establish transparent policies and build trust.
We want to build a great software application that brings value to you and your loved ones' lives. We are not using you as a product, i.e., selling or tracking your data. We are not putting annoying ads into our software. We respect your privacy. We want to be compensated for the hard work we put in to build Immich for you.
With those notes, we have enabled a way for you to financially support the continued development of Immich, ensuring the software can move forward and will be maintained, by offering a lifetime license of the software. We think if you like and use software, you should pay for it, but _we're never going to force anyone to pay or try to limit Immich for those who don't._
There are two types of license that you can choose to purchase: **Server License** and **Individual License**.
### Server License
This is a lifetime license costing **$99.99**. The license is applied to the whole server. You and all users that use your server are licensed.
### Individual License
This is a lifetime license costing **$24.99**. The license is applied to a single user, and can be used on any server they choose to connect to.
<img
width="837"
alt="license-social-gh"
src="https://github.com/user-attachments/assets/241932ed-ef3b-44ec-a9e2-ee80754e0cca"
/>
You can purchase the license on [our page - https://buy.immich.app](https://buy.immich.app).
Starting with release `v1.109.0` you can purchase and enter your purchased license key directly in the app.
<img
width="1414"
alt="license-page-gh"
src="https://github.com/user-attachments/assets/364fc32a-f6ef-4594-9fea-28d5a26ad77c"
/>
## Thank you
Thank you again for your support, this will help create a strong foundation and stability for the Immich team to continue developing and maintaining the project that you love to use.
<p align="center">
<img
src="https://media.giphy.com/media/v1.Y2lkPTc5MGI3NjExbjY2eWc5Y2F0ZW56MmR4aWE0dDhzZXlidXRmYWZyajl1bWZidXZpcyZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9Zw/87CKDqErVfMqY/giphy.gif"
width="550"
title="SUPPORT THE PROJECT!"
/>
</p>
<br />
<br />
Cheers! 🎉
Immich team
# FAQ
### 1. Where can I purchase a license?
There are several places where you can purchase the license from
- [https://buy.immich.app](https://buy.immich.app)
- [https://pay.futo.org](https://pay.futo.org/)
- or directly from the app.
### 2. Do I need both _Individual License_ and _Server License_?
No,
If you are the admin and the sole user, or your instance has less than a total of 4 users, you can buy the **Individual License** for each user.
If your instance has more than 4 users, it is more cost-effective to buy the **Server License**, which will license all the users on your instance.
### 3. What do I do if I don't pay?
You can continue using Immich without any restriction.
### 4. Will there be any paywalled features?
No, there will never be any paywalled features.
### 5. Where can I get support regarding payment issues?
You can email us with your `orderId` and your email address `billing@futo.org` or on our Discord server.

View File

@@ -1,7 +1,6 @@
---
title: Immich Update - July 2024
authors: [alextran]
date: 2024-07-01T00:00
tags: [update, v1.106.0]
---

View File

@@ -167,7 +167,7 @@ Immich uses CLIP models. For more information about CLIP and its capabilities, r
### How does facial recognition work?
See [How Facial Recognition Works](/docs/features/facial-recognition#How-Facial-Recognition-Works) for details.
For face detection and recognition, Immich uses [InsightFace models](https://github.com/deepinsight/insightface/tree/master/model_zoo).
### How can I disable machine learning?
@@ -181,15 +181,19 @@ However, disabling all jobs will not disable the machine learning service itself
### I'm getting errors about models being corrupt or failing to download. What do I do?
You can delete the model cache volume, where models are downloaded. This will give the service a clean environment to download the model again. If models are failing to download entirely, you can manually download them from [Hugging Face][huggingface] and place them in the cache folder.
You can delete the model cache volume, where models are downloaded. This will give the service a clean environment to download the model again. If models are failing to download entirely, you can manually download them from [Huggingface][huggingface] and place them in the cache folder.
### Can I use a custom CLIP model?
No, this is not supported. Only models listed in the [Hugging Face][huggingface] page are compatible. Feel free to make a feature request if there's a model not listed here that you think should be added.
No, this is not supported. Only models listed in the [Huggingface][huggingface] page are compatible. Feel free to make a feature request if there's a model not listed here that you think should be added.
### I want to be able to search in other languages besides English. How can I do that?
You can change to a multilingual CLIP model. See [here](/docs/features/smart-search#CLIP-model) for instructions.
You can change to a multilingual model listed [here](https://huggingface.co/collections/immich-app/multilingual-clip-654eb08c2382f591eeb8c2a7) by going to Administration > Machine Learning Settings > Smart Search and replacing the name of the model. Be sure to re-run Smart Search on all assets after this change. You can then search in over 100 languages.
:::note
Feel free to make a feature request if there's a model you want to use that isn't in [Immich Huggingface list][huggingface].
:::
### Does Immich support Facial Recognition for videos?
@@ -230,7 +234,7 @@ ls clip/ facial-recognition/
### Why is Immich slow on low-memory systems like the Raspberry Pi?
Immich optionally uses transcoding and machine learning for several features. However, it can be too heavy to run on a Raspberry Pi. You can [mitigate](/docs/FAQ#can-i-lower-cpu-and-ram-usage) this or host Immich's machine-learning container on a [more powerful system](/docs/guides/remote-machine-learning), or [disable](/docs/FAQ#how-can-i-disable-machine-learning) machine learning entirely.
Immich optionally uses machine learning for several features. However, it can be too heavy to run on a Raspberry Pi. You can [mitigate](/docs/FAQ#can-i-lower-cpu-and-ram-usage) this or host Immich's machine-learning container on a [more powerful system](/docs/guides/remote-machine-learning), or [disable](/docs/FAQ#how-can-i-disable-machine-learning) machine learning entirely.
### Can I lower CPU and RAM usage?
@@ -239,12 +243,10 @@ The initial backup is the most intensive due to the number of jobs running. The
- Lower the job concurrency for these jobs to 1.
- Under Settings > Transcoding Settings > Threads, set the number of threads to a low number like 1 or 2.
- Under Settings > Machine Learning Settings > Facial Recognition > Model Name, you can change the facial recognition model to `buffalo_s` instead of `buffalo_l`. The former is a smaller and faster model, albeit not as good.
- For facial recognition on new images to work properly, You must re-run the Face Detection job for all images after this.
- At the container level, you can [set resource constraints](/docs/FAQ#can-i-limit-cpu-and-ram-usage) to lower usage further.
- It's recommended to only apply these constraints _after_ taking some of the measures here for best performance.
- For facial recognition on new images to work properly, You must re-run the Face Detection job for all images after this.
- If these changes are not enough, see [below](/docs/FAQ#how-can-i-disable-machine-learning) for instructions on how to disable machine learning.
### Can I limit CPU and RAM usage?
### Can I limit the amount of CPU and RAM usage?
By default, a container has no resource constraints and can use as much of a given resource as the host's kernel scheduler allows. To limit this, you can add the following to the `docker-compose.yml` block of any containers that you want to have limited resources.
@@ -264,8 +266,6 @@ deploy:
</details>
For more details, you can look at the [original docker docs](https://docs.docker.com/config/containers/resource_constraints/) or use this [guide](https://www.baeldung.com/ops/docker-memory-limit).
Note that memory constraints work by terminating the container, so this can introduce instability if set too low.
### How can I boost machine learning speed?
:::note
@@ -275,16 +275,21 @@ This advice improves throughput, not latency. This is to say that it will make S
You can increase throughput by increasing the job concurrency for machine learning jobs (Smart Search, Face Detection). With higher concurrency, the host will work on more assets in parallel. You can do this by navigating to Administration > Settings > Job Settings and increasing concurrency as needed.
:::danger
On a normal machine, 2 or 3 concurrent jobs can probably max the CPU. Storage speed and latency can quickly become the limiting factor beyond this, particularly when using HDDs.
On a normal machine, 2 or 3 concurrent jobs can probably max the CPU. Beyond this, note that storage speed and latency may quickly become the limiting factor; particularly when using HDDs.
The concurrency can be increased more comfortably with a GPU, but should still not be above 16 in most cases.
Do not exaggerate with the amount of jobs because you're probably thoroughly overloading the server.
Do not exaggerate with the job concurrency because you're probably thoroughly overloading the server.
More details can be found [here](https://discord.com/channels/979116623879368755/994044917355663450/1174711719994605708)
:::
### My server shows Server Status Offline | Version Unknown. What can I do?
### Why is Immich using so much of my CPU?
You need to enable WebSockets on your reverse proxy.
When a large number of assets are uploaded to Immich, it makes sense that the CPU and RAM will be heavily used for machine learning work and creating image thumbnails.
Once this process is completed, the percentage of CPU usage will drop to around 3-5% usage
### My server shows Server Status Offline | Version Unknown what can I do?
You need to enable Websocket on your reverse proxy.
---
@@ -294,12 +299,6 @@ You need to enable WebSockets on your reverse proxy.
Immich components are typically deployed using docker. To see logs for deployed docker containers, you can use the [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/), specifically the `docker logs` command. For examples, see [Docker Help](/docs/guides/docker-help.md).
### How can I reduce the log verbosity of Redis?
To decrease Redis logs, you can add the following line to the `redis:` section of the `docker-compose.yml`:
` command: redis-server --loglevel warning`
### How can I run Immich as a non-root user?
You can change the user in the container by setting the `user` argument in `docker-compose.yml` for each service.

View File

@@ -149,21 +149,9 @@ for more info read the [release notes](https://github.com/immich-app/immich/rele
- Preview images (small thumbnails and large previews) for each asset and thumbnails for recognized faces.
- Stored in `UPLOAD_LOCATION/thumbs/<userID>`.
- **Encoded Assets:**
- Videos that have been re-encoded from the original for wider compatibility. The original is not removed.
- Stored in `UPLOAD_LOCATION/encoded-video/<userID>`.
- **Postgres**
- The Immich database containing all the information to allow the system to function properly.
**Note:** This folder will only appear to users who have made the changes mentioned in [v1.102.0](https://github.com/immich-app/immich/discussions/8930) (an optional, non-mandatory change) or who started with this version.
- Stored in `UPLOAD_LOCATION/postgres`.
:::danger
A backup of this folder does not constitute a backup of your database!
Follow the instructions listed [here](/docs/administration/backup-and-restore#database) to learn how to perform a proper backup.
:::
</TabItem>
<TabItem value="Storage Template On" label="Storage Template On">
@@ -199,19 +187,8 @@ When you turn off the storage template engine, it will leave the assets in `UPLO
- Files uploaded through mobile apps.
- Temporarily located in `UPLOAD_LOCATION/upload/<userID>`.
- Transferred to `UPLOAD_LOCATION/library/<userID>` upon successful upload.
- **Postgres**
- The Immich database containing all the information to allow the system to function properly.
**Note:** This folder will only appear to users who have made the changes mentioned in [v1.102.0](https://github.com/immich-app/immich/discussions/8930) (an optional, non-mandatory change) or who started with this version.
- Stored in `UPLOAD_LOCATION/postgres`.
:::danger
A backup of this folder does not constitute a backup of your database!
Follow the instructions listed [here](/docs/administration/backup-and-restore#database) to learn how to perform a proper backup.
:::
</TabItem>
</Tabs>
:::danger

View File

@@ -24,7 +24,7 @@ This environment includes the services below. Additional details are available i
- Web app - [`/web`](https://github.com/immich-app/immich/tree/main/web)
- Machine learning - [`/machine-learning`](https://github.com/immich-app/immich/tree/main/machine-learning)
- Redis
- PostgreSQL development database with exposed port `5432` so you can use any database client to access it
- PostgreSQL development database with exposed port `5432` so you can use any database client to acess it
All the services are packaged to run as with single Docker Compose command.

View File

@@ -2,7 +2,7 @@
## Overview
Immich recognizes faces in your photos and videos and groups them together into people. You can then assign names to these people and search for them.
Immich recognizes faces in your photos and videos and groups them together. You can then assign names to the faces and search for them.
The list of people is shown in the Explore page.
@@ -18,75 +18,13 @@ The asset detail view will also show the faces that are recognized in the asset.
## Actions
Additional actions you can do include:
Additional actions you can do with a detected person are:
- Changing the feature photo of the person
- Setting a person's date of birth
- Merging two or more detected faces into one person
- Hiding the faces of a person from the Explore page and detail view
- Assigning an unrecognized face to a person
- Change the feature face photo of the person
- Set date of birth
- Merge two or more detected faces into one person
- Hide face
It can be found from the app bar when you access the detail view of a person.
<img src={require('./img/facial-recognition-4.png').default} title='Facial Recognition 4' width="70%"/>
## How Face Detection Works
Face detection sends the generated preview image to the machine learning service for processing. The service checks if it has the relevant model downloaded and downloads it if not. The image is decoded, pre-processed and passed to the face detection model (with hardware acceleration if configured). The bounding boxes and scores outputted from this model are used to crop and preprocess the image once again to be passed to a facial recognition model (also accelerated if configured). The embeddings from the recognition model, together with the bounding boxes and scores from the face detection model, are then sent back to the server to be added to the database. The embeddings in particular are indexed so they can be searched quickly during facial recognition clustering.
## How Facial Recognition Works
The facial recognition algorithm we use is derived from [DBSCAN](https://www.youtube.com/watch?v=RDZUdRSDOok), a popular clustering algorithm. It essentially treats each detected face as a point in a graph and aims to group points that are close to each other.
:::note
An important concept is whether something is a _core point_. A core point has a minimum number of points around it within a certain distance. A non-core point can only be assigned to a cluster if it can reach a core point; a non-core point can't be used to extend a cluster even if it's part of one. In Immich, the _Minimum Recognized Faces_ setting controls the threshold to be considered a core point.
:::
For each face, it looks around it to find other faces within a certain distance. Faces within this distance are considered similar, so it then checks if any of these faces are associated with a person.
If there is an existing person, it assigns the person of the most similar face to the face being processed.
If there is none, then it has to determine something from the DBSCAN algorithm: whether the face is a _core point_. If there are a certain number of similar faces (by default 3, including the face being considered), then this face is a core point. A new person is created for this face and the face is assigned to it. When other faces are processed, if they're similar to this face, they'll see that it has an associated person and can be assigned to that person.
However, if there aren't enough similar faces, no new person will be created. Instead, the face will wait for all the other faces to be processed to see if any matches that previously didn't have an associated person now do. If they do, then the face will be assigned to that person. If not, this face will be considered an outlier, such as a stranger in the background of an image.
The algorithm has some subtle differences compared to DBSCAN:
- DBSCAN doesn't have a concept of incremental clustering: it clusters all points at once. In contrast, facial recognition has to evolve as more assets are added without re-clustering everything each time.
- The algorithm described above works within a set of queued assets. Once these faces are processed and a new round of faces are detected, the behavior will not be the same as traditional DBSCAN since it preserves the clusters (people) generated from the previous round.
- Facial recognition tries to wait for face detection and thumbnail generation to complete before starting for this reason: the larger the set of faces in the queue, the better the results will be.
- Re-running facial recognition on all assets afterwards does behave like DBSCAN, however.
- DBSCAN is designed for range-based searches (i.e. points within a distance), but high-dimensional vector indices are generally optimized for getting the closest K results. The recognition algorithm doesn't try to get _all_ similar faces within a distance for performance reasons. Instead, it searches for a small number of matches for each face. The end result should be very similar if not identical, but with possibly different performance characteristics.
- Because of this, part of the recognition process is handled during a nightly job to ensure that unassigned faces with potential matches can be recognized.
:::tip
If you didn't import your assets at once or if the server was able to process jobs faster than you could upload them, it's possible that the clustering was suboptimal. If you haven't put effort into the current results, it may be worth re-running facial recognition on all assets for the best starting point. If it's too late for that, you can also manually assign a selection of unassigned faces and queue _Missing_ for Facial Recognition to help it learn and assign more faces automatically.
:::
## Configuration
Navigating to Administration > Settings > Machine Learning Settings > Facial Recognition will show the options available.
:::tip
It's better to only tweak the parameters here than to set them to something very different unless you're ready to test a variety of options. If you do need to set a parameter to a strict setting, relaxing other settings can be a good option to compensate, and vice versa.
:::
### Facial recognition model
There are a few different models available; the default is typically considered the best. On more constrained systems where the default is too intensive, you can choose a smaller model instead.
### Minimum detection score
This setting affects whether a result from the face detecton model is filtered out as a false positive. It may seem tempting to set this low to detect more faces, but it can lead to false positives that are difficult to deal with and can harm facial recognition. It is strongly recommended not to go below 0.5 for this setting. Setting it to a very high number like 0.9 is also not recommended: the default is already biased toward precision, so a threshold that high leads to many undetected faces.
After changing this setting, it will only apply to new face detection jobs. To apply the new setting to all assets, you need to re-run face detection for all assets.
### Maximum recognition distance
The distance threshold described in How Facial Recognition Works. The default works well for most people, but it may be worth lowering it if the library has twins or otherwise very similar looking people. A threshold that's too low just means needing to merge duplicate people after facial recognition, whereas a threshold too high can produce unsalvageable results. It is strongly recommended not to go below 0.3 or above 0.7.
### Minimum recognized faces
The core point threshold described in How Facial Recognition Works. This setting has a few implications. First, it takes effect immediately in that people with fewer faces than this are hidden from view. Secondly, it makes clustering more robust as it prevents loosely-related faces from being linked to each other by requiring a certain level of density.
Increasing this setting is a good idea if you increase the recognition distance or reduce the minimum detection score. Setting it to 1 effectively disables the concept of core points, but can be an option if you prefer a more hands-on approach.

View File

@@ -123,7 +123,6 @@ Once this is done, you can continue to step 3 of "Basic Setup".
- You may want to choose a slower preset than for software transcoding to maintain quality and efficiency
- While you can use VAAPI with NVIDIA and Intel devices, prefer the more specific APIs since they're more optimized for their respective devices
- You can confirm the device is being recognized and used by checking its utilization (via `nvtop` for NVIDIA, `intel_gpu_top` for Intel, etc.) when transcoding. A lack of error logs when transcoding also indicates that it's being used.
[hw-file]: https://github.com/immich-app/immich/releases/latest/download/hwaccel.transcoding.yml
[nvct]: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html

View File

@@ -104,19 +104,18 @@ The `immich-server` container will need access to the gallery. Modify your docke
immich-server:
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
+ - /mnt/nas/christmas-trip:/mnt/nas/christmas-trip:ro
+ - /home/user/old-pics:/home/user/old-pics:ro
+ - /mnt/nas/christmas-trip:/mnt/media/christmas-trip:ro
+ - /home/user/old-pics:/mnt/media/old-pics:ro
+ - /mnt/media/videos:/mnt/media/videos:ro
+ - /mnt/media/videos2:/mnt/media/videos2 # the files in this folder can be deleted, as it does not end with :ro
+ - "C:/Users/user_name/Desktop/my media:/mnt/media/my-media:ro" # import path in Windows system.
```
:::tip
The `ro` flag at the end only gives read-only access to the volumes. This will disallow the images from being deleted in the web UI.
The `ro` flag at the end only gives read-only access to the volumes. While Immich does not modify files, it's a good practice to mount read-only.
:::
:::info
_Remember to run `docker compose up -d` to register the changes. Make sure you can see the mounted path in the container._
_Remember to bring the container `docker compose down/up` to register the changes. Make sure you can see the mounted path in the container._
:::
### Create External Libraries

View File

@@ -32,13 +32,12 @@ You do not need to redo any machine learning jobs after enabling hardware accele
- Where and how you can get this file depends on device and vendor, but typically, the device vendor also supplies these
- The `hwaccel.ml.yml` file assumes the path to it is `/usr/lib/libmali.so`, so update accordingly if it is elsewhere
- The `hwaccel.ml.yml` file assumes an additional file `/lib/firmware/mali_csffw.bin`, so update accordingly if your device's driver does not require this file
- Optional: Configure your `.env` file, see [environment variables](/docs/install/environment-variables) for ARM NN specific settings
#### CUDA
- The GPU must have compute capability 5.2 or greater.
- The server must have the official NVIDIA driver installed.
- The installed driver must be >= 545 (it must support CUDA 12.3.2).
- The installed driver must be >= 535 (it must support CUDA 12.2).
- On Linux (except for WSL2), you also need to have [NVIDIA Container Toolkit][nvct] installed.
#### OpenVINO

View File

@@ -66,7 +66,7 @@ The provided file is just a starting point. There are a ton of ways to configure
After bringing down the containers with `docker compose down` and back up with `docker compose up -d`, a Prometheus instance will now collect metrics from the immich server and microservices containers. Note that we didn't need to expose any new ports for these containers - the communication is handled in the internal Docker network.
:::note
To see exactly what metrics are made available, you can additionally add `8081:8081` to the server container's ports and `8082:8082` to the microservices container's ports. Visiting the `/metrics` endpoint for these services will show the same raw data that Prometheus collects.
To see exactly what metrics are made available, you can additionally add `8081:8081` to the server container's ports and `8082:8081` to the microservices container's ports. Visiting the `/metrics` endpoint for these services will show the same raw data that Prometheus collects.
:::
### Usage

View File

@@ -7,30 +7,29 @@ Immich uses Postgres as its search database for both metadata and smart search.
Smart search is powered by the [pgvecto.rs](https://github.com/tensorchord/pgvecto.rs) extension, utilizing machine learning models like [CLIP](https://openai.com/research/clip) to provide relevant search results. This allows for freeform searches without requiring specific keywords in the image or video metadata.
Archived photos are not included in search results by default. To include them, mark the checkbox in [advanced search filters](/docs/features/smart-search#advanced-search-filters).
:::tip Alternative CLIP Models
More powerful models can be used for more accurate search results. For more information, see the related [FAQ](/docs/FAQ#can-i-use-a-custom-clip-model).
:::
:::info
Smart Search is currently limited to 5,000 results for a single search on the web.
:::
## Advanced Search Filters
In addition, Immich offers advanced search functionality, allowing you to find specific content using customizable search filters. These filters include location, one or more faces, specific albums, and more. You can try out the search filters on the [Demo site](https://demo.immich.app).
The filters smart search allows you to search by include:
Smart search features include:
- People
- Location
- Country
- State
- City
- Camera
- Make
- Model
- Date range
- File name or extension
- Media type
- Image (including live/motion photos)
- Video
- All
- Condition
- Not in any album
- Archived
- Favorited
- Search for one or more faces (with or without context search).
- Search by Country or State or City or by all three.
- Search by camera make and model.
- Search by date range.
- Search by file name.
- Search by media types: image, video or all (**Note:** Image includes live images).
- Search by condition: not in any album or archive or Favorite or all conditions.
<Tabs>
<TabItem value="Computer" label="Computer" default>
@@ -48,27 +47,3 @@ Some search examples:
</TabItem>
</Tabs>
## Configuration
Navigating to `Administration > Settings > Machine Learning Settings > Smart Search` will show the options available.
### CLIP model
More powerful models can be used for more accurate search results, but are slower and can require more server resources. Check out the models [here][huggingface-clip] for more options!
[Multilingual models][huggingface-multilingual-clip] are also available so users can search in their native language. These models support over 100 languages; the `nllb` models in particular support 200.
:::note
Multilingual models are much slower and larger and perform slightly worse for English than English-only models. For this reason, only use them if you actually intend to search in a language besides English.
As a special case, the `ViT-H-14-quickgelu__dfn5b` and `ViT-H-14-378-quickgelu__dfn5b` models are excellent at many European languages despite not specifically being multilingual. They're very intensive regardless, however - especially the latter.
:::
Once you've chosen a model, change this setting to the name of the model you chose. Be sure to re-run Smart Search on all assets after this change.
:::note
Feel free to make a feature request if there's a model you want to use that we don't currently support.
:::
[huggingface-clip]: https://huggingface.co/collections/immich-app/clip-654eaefb077425890874cd07
[huggingface-multilingual-clip]: https://huggingface.co/collections/immich-app/multilingual-clip-654eb08c2382f591eeb8c2a7

View File

@@ -13,14 +13,14 @@ In our `.env` file, we will define variables that will help us in the future whe
# Custom location where your uploaded, thumbnails, and transcoded video files are stored
- UPLOAD_LOCATION=./library
+ UPLOAD_LOCATION=/custom/path/immich/immich_files
+ THUMB_LOCATION=/custom/path/immich/thumbs
+ ENCODED_VIDEO_LOCATION=/custom/path/immich/encoded-video
+ PROFILE_LOCATION=/custom/path/immich/profile
+ UPLOAD_LOCATION=/custom/location/on/your/system/immich/immich_files
+ THUMB_LOCATION=/custom/location/on/your/system/immich/thumbs
+ ENCODED_VIDEO_LOCATION=/custom/location/on/your/system/immich/encoded-video
+ PROFILE_LOCATION=/custom/location/on/your/system/immich/profile
...
```
After defining the locations for these files, we will edit the `docker-compose.yml` file accordingly and add the new variables to the `immich-server` container.
After defining the locations for these files, we will edit the `docker-compose.yml` file accordingly and add the new variables to the `immich-server` and `immich-microservices` containers.
```diff title="docker-compose.yml"
services:
@@ -29,6 +29,16 @@ services:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
+ - ${THUMB_LOCATION}:/usr/src/app/upload/thumbs
+ - ${ENCODED_VIDEO_LOCATION}:/usr/src/app/upload/encoded-video
+ - ${PROFILE_LOCATION}:/usr/src/app/upload/profile
- /etc/localtime:/etc/localtime:ro
...
immich-microservices:
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
+ - ${THUMB_LOCATION}:/usr/src/app/upload/thumbs
+ - ${ENCODED_VIDEO_LOCATION}:/usr/src/app/upload/encoded-video
+ - ${PROFILE_LOCATION}:/usr/src/app/upload/profile
- /etc/localtime:/etc/localtime:ro
```
@@ -36,6 +46,7 @@ services:
Restart Immich to register the changes.
```
docker compose down
docker compose up -d
```

View File

@@ -5,7 +5,7 @@ Keep in mind that mucking around in the database might set the moon on fire. Avo
:::
:::tip
Run `docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME>` to connect to the database via the container directly.
Run `docker exec -it immich_postgres psql immich <DB_USERNAME>` to connect to the database via the container directly.
(Replace `<DB_USERNAME>` with the value from your [`.env` file](/docs/install/environment-variables#database)).
:::
@@ -106,9 +106,3 @@ SELECT "key", "value" FROM "system_metadata" WHERE "key" = 'system-config';
```sql title="Delete person and unset it for the faces it was associated with"
DELETE FROM "person" WHERE "name" = 'PersonNameHere';
```
## Postgres internal
```sql title="Change DB_PASSWORD"
ALTER USER <DB_USERNAME> WITH ENCRYPTED PASSWORD 'newpasswordhere';
```

View File

@@ -6,18 +6,36 @@ in a directory on the same machine.
# Mount the directory into the containers.
Edit `docker-compose.yml` to add one or more new mount points in the section `immich-server:` under `volumes:`.
If you want Immich to be able to delete the images in the external library, remove `:ro` from the end of the mount point.
Edit `docker-compose.yml` to add two new mount points in the section `immich-server:` under `volumes:`
```diff
immich-server:
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
+ - /home/user/photos1:/home/user/photos1:ro
+ - /mnt/photos2:/mnt/photos2:ro # you can delete this line if you only have one mount point, or you can add more lines if you have more than two
+ - ${EXTERNAL_PATH}:/usr/src/app/external
```
Restart Immich by running `docker compose up -d`.
Edit `.env` to define `EXTERNAL_PATH`, substituting in the correct path for your computer:
```
EXTERNAL_PATH=<your-path-here>
```
On my computer, for example, I use this path:
```
EXTERNAL_PATH=/home/tenino/photos
```
:::info EXTERNAL_PATH design
The design choice to put the EXTERNAL_PATH into .env rather than put two copies of the absolute path in the yml file in order to make everything easier, so if you have two copies of the same path that have to be kept in sync, then someday later when you move the data, update only one of the paths, without everything will break mysteriously.
:::
Restart Immich.
```
docker compose down
docker compose up -d
```
# Create the library

View File

@@ -4,11 +4,14 @@ To alleviate [performance issues on low-memory systems](/docs/FAQ.mdx#why-is-imm
- Set the URL in Machine Learning Settings on the Admin Settings page to point to the designated ML system, e.g. `http://workstation:3003`.
- Copy the following `docker-compose.yml` to your ML system.
- If using [hardware acceleration](/docs/features/ml-hardware-acceleration), the [hwaccel.ml.yml](https://github.com/immich-app/immich/releases/latest/download/hwaccel.ml.yml) file also needs to be added
- Start the container by running `docker compose up -d`.
:::info
Smart Search and Face Detection will use this feature, but Facial Recognition is handled in the server.
Starting with version v1.93.0 face detection work and face recognize were split. From now on face detection is done in the immich_machine_learning container, but facial recognition is done in the `microservices` worker.
:::
:::note
The [hwaccel.ml.yml](https://github.com/immich-app/immich/releases/latest/download/hwaccel.ml.yml) file also needs to be in the same folder if trying to use [hardware acceleration](/docs/features/ml-hardware-acceleration).
:::
```yaml
@@ -34,7 +37,3 @@ volumes:
```
Please note that version mismatches between both hosts may cause instabilities and bugs, so make sure to always perform updates together.
:::caution
As an internal service, the machine learning container has no security measures whatsoever. Please be mindful of where it's deployed and who can access it.
:::

View File

@@ -38,19 +38,17 @@ Regardless of filesystem, it is not recommended to use a network share for your
## General
| Variable | Description | Default | Containers | Workers |
| :---------------------------------- | :-------------------------------------------------- | :--------------------------: | :----------------------- | :----------------- |
| `TZ` | Timezone | | server | microservices |
| `IMMICH_ENV` | Environment (production, development) | `production` | server, machine learning | api, microservices |
| `IMMICH_LOG_LEVEL` | Log Level (verbose, debug, log, warn, error) | `log` | server, machine learning | api, microservices |
| `IMMICH_MEDIA_LOCATION` | Media Location | `./upload`<sup>\*1</sup> | server | api, microservices |
| `IMMICH_CONFIG_FILE` | Path to config file | | server | api, microservices |
| `NO_COLOR` | Set to `true` to disable color-coded log output | `false` | server, machine learning | |
| `CPU_CORES` | Amount of cores available to the immich server | auto-detected cpu core count | server | |
| `IMMICH_API_METRICS_PORT` | Port for the OTEL metrics | `8081` | server | api |
| `IMMICH_MICROSERVICES_METRICS_PORT` | Port for the OTEL metrics | `8082` | server | microservices |
| `IMMICH_PROCESS_INVALID_IMAGES` | When `true`, generate thumbnails for invalid images | | server | microservices |
| `IMMICH_TRUSTED_PROXIES` | List of comma separated IPs set as trusted proxies | | server | api |
| Variable | Description | Default | Containers | Workers |
| :---------------------------------- | :---------------------------------------------- | :--------------------------: | :----------------------- | :----------------- |
| `TZ` | Timezone | | server | microservices |
| `IMMICH_ENV` | Environment (production, development) | `production` | server, machine learning | api, microservices |
| `IMMICH_LOG_LEVEL` | Log Level (verbose, debug, log, warn, error) | `log` | server, machine learning | api, microservices |
| `IMMICH_MEDIA_LOCATION` | Media Location | `./upload`<sup>\*1</sup> | server | api, microservices |
| `IMMICH_CONFIG_FILE` | Path to config file | | server | api, microservices |
| `NO_COLOR` | Set to `true` to disable color-coded log output | `false` | server, machine learning | |
| `CPU_CORES` | Amount of cores available to the immich server | auto-detected cpu core count | server | |
| `IMMICH_API_METRICS_PORT` | Port for the OTEL metrics | `8081` | server | api |
| `IMMICH_MICROSERVICES_METRICS_PORT` | Port for the OTEL metrics | `8082` | server | microservices |
\*1: With the default `WORKDIR` of `/usr/src/app`, this path will resolve to `/usr/src/app/upload`.
It only need to be set if the Immich deployment method is changing.
@@ -157,21 +155,18 @@ Redis (Sentinel) URL example JSON before encoding:
## Machine Learning
| Variable | Description | Default | Containers |
| :----------------------------------------------- | :-------------------------------------------------------------------------------------------------- | :-----------------------------------: | :--------------- |
| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning |
| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning |
| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning |
| `MACHINE_LEARNING_REQUEST_THREADS`<sup>\*1</sup> | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning |
| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning |
| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning |
| `MACHINE_LEARNING_WORKERS`<sup>\*2</sup> | Number of worker processes to spawn | `1` | machine learning |
| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` (`300` if using OpenVINO image) | machine learning |
| `MACHINE_LEARNING_PRELOAD__CLIP` | Name of a CLIP model to be preloaded and kept in cache | | machine learning |
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION` | Name of a facial recognition model to be preloaded and kept in cache | | machine learning |
| `MACHINE_LEARNING_ANN` | Enable ARM-NN hardware acceleration if supported | `True` | machine learning |
| `MACHINE_LEARNING_ANN_FP16_TURBO` | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) | `False` | machine learning |
| `MACHINE_LEARNING_ANN_TUNING_LEVEL` | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive) | `2` | machine learning |
| Variable | Description | Default | Containers |
| :----------------------------------------------- | :------------------------------------------------------------------- | :-----------------: | :--------------- |
| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning |
| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning |
| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning |
| `MACHINE_LEARNING_REQUEST_THREADS`<sup>\*1</sup> | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning |
| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning |
| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning |
| `MACHINE_LEARNING_WORKERS`<sup>\*2</sup> | Number of worker processes to spawn | `1` | machine learning |
| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` | machine learning |
| `MACHINE_LEARNING_PRELOAD__CLIP` | Name of a CLIP model to be preloaded and kept in cache | | machine learning |
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION` | Name of a facial recognition model to be preloaded and kept in cache | | machine learning |
\*1: It is recommended to begin with this parameter when changing the concurrency levels of the machine learning service and then tune the other ones.

View File

@@ -8,10 +8,6 @@ sidebar_position: 20
This method is experimental and not currently recommended for production use. For production, please refer to installing with [Docker Compose](/docs/install/docker-compose.mdx).
:::
:::note
The install script only supports Linux operating systems and requires Docker to be already installed on the system.
:::
In the shell, from a directory of your choice, run the following command:
```bash

View File

@@ -145,44 +145,11 @@ const config = {
label: 'Installation',
to: '/docs/install/requirements',
},
{
label: 'Contributing',
to: '/docs/overview/support-the-project',
},
{
label: 'Privacy Policy',
to: '/privacy-policy',
},
],
},
{
title: 'Documentation',
title: 'Community',
items: [
{
label: 'Roadmap',
to: '/roadmap',
},
{
label: 'API',
to: '/docs/api',
},
{
label: 'Cursed Knowledge',
to: '/cursed-knowledge',
},
],
},
{
title: 'Links',
items: [
{
label: 'GitHub',
href: 'https://github.com/immich-app/immich',
},
{
label: 'YouTube',
href: 'https://www.youtube.com/@immich-app',
},
{
label: 'Discord',
href: 'https://discord.immich.app',
@@ -193,6 +160,23 @@ const config = {
},
],
},
{
title: 'Links',
items: [
// {
// label: "Blog",
// to: "/blog",
// },
{
label: 'GitHub',
href: 'https://github.com/immich-app/immich',
},
{
label: 'YouTube',
href: 'https://www.youtube.com/@immich-app',
},
],
},
],
copyright: `Immich is available as open source under the terms of the GNU AGPL v3 License.`,
},

46
docs/package-lock.json generated
View File

@@ -12754,9 +12754,9 @@
}
},
"node_modules/postcss": {
"version": "8.4.40",
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.40.tgz",
"integrity": "sha512-YF2kKIUzAofPMpfH6hOi2cGnv/HrUlfucspc7pDyvv7kGdqXrfj8SCl/t8owkEgKEuu8ZcRjSOxFxVLqwChZ2Q==",
"version": "8.4.39",
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.39.tgz",
"integrity": "sha512-0vzE+lAiG7hZl1/9I8yzKLx3aR9Xbof3fBHKunvMfOCYAtMhrsnccJY2iTURb9EZd5+pLuiNV9/c/GZJOHsgIw==",
"funding": [
{
"type": "opencollective",
@@ -13600,9 +13600,9 @@
}
},
"node_modules/prettier": {
"version": "3.3.3",
"resolved": "https://registry.npmjs.org/prettier/-/prettier-3.3.3.tgz",
"integrity": "sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==",
"version": "3.3.2",
"resolved": "https://registry.npmjs.org/prettier/-/prettier-3.3.2.tgz",
"integrity": "sha512-rAVeHYMcv8ATV5d508CFdn+8/pHPpXeIid1DdrPwXnaAdH7cqjVbpJaT5eq4yRAFU/lsbwYwSF/n5iNrdJHPQA==",
"dev": true,
"license": "MIT",
"bin": {
@@ -13747,10 +13747,9 @@
}
},
"node_modules/qs": {
"version": "6.13.0",
"resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz",
"integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==",
"license": "BSD-3-Clause",
"version": "6.12.1",
"resolved": "https://registry.npmjs.org/qs/-/qs-6.12.1.tgz",
"integrity": "sha512-zWmv4RSuB9r2mYQw3zxQuHWeU+42aKi1wWig/j4ele4ygELZ7PEO6MM7rim9oAQH2A5MWfsAVf/jPvTPgCbvUQ==",
"dependencies": {
"side-channel": "^1.0.6"
},
@@ -16015,9 +16014,9 @@
}
},
"node_modules/tailwindcss": {
"version": "3.4.7",
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.7.tgz",
"integrity": "sha512-rxWZbe87YJb4OcSopb7up2Ba4U82BoiSGUdoDr3Ydrg9ckxFS/YWsvhN323GMcddgU65QRy7JndC7ahhInhvlQ==",
"version": "3.4.4",
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.4.tgz",
"integrity": "sha512-ZoyXOdJjISB7/BcLTR6SEsLgKtDStYyYZVLsUtWChO4Ps20CBad7lfJKVDiejocV4ME1hLmyY0WJE3hSDcmQ2A==",
"license": "MIT",
"dependencies": {
"@alloc/quick-lru": "^5.2.0",
@@ -16377,9 +16376,9 @@
}
},
"node_modules/typescript": {
"version": "5.5.4",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.4.tgz",
"integrity": "sha512-Mtq29sKDAEYP7aljRgtPOpTvOfbwRWlS6dPRzwjdE+C0R4brX/GUyhHSecbHMFLNBLcJIPt9nl9yG5TZ1weH+Q==",
"version": "5.5.3",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.3.tgz",
"integrity": "sha512-/hreyEujaB0w76zKo6717l3L0o/qEUtRgdvUBvlkhoWeOVMjMuHNHk0BRBzikzuGDqNmPQbg5ifMEqsHLiIUcQ==",
"license": "Apache-2.0",
"bin": {
"tsc": "bin/tsc",
@@ -16715,16 +16714,12 @@
}
},
"node_modules/url": {
"version": "0.11.4",
"resolved": "https://registry.npmjs.org/url/-/url-0.11.4.tgz",
"integrity": "sha512-oCwdVC7mTuWiPyjLUz/COz5TLk6wgp0RCsN+wHZ2Ekneac9w8uuV0njcbbie2ME+Vs+d6duwmYuR3HgQXs1fOg==",
"license": "MIT",
"version": "0.11.3",
"resolved": "https://registry.npmjs.org/url/-/url-0.11.3.tgz",
"integrity": "sha512-6hxOLGfZASQK/cijlZnZJTq8OXAkt/3YGfQX45vvMYXpZoo8NdWZcY73K108Jf759lS1Bv/8wXnHDTSz17dSRw==",
"dependencies": {
"punycode": "^1.4.1",
"qs": "^6.12.3"
},
"engines": {
"node": ">= 0.4"
"qs": "^6.11.2"
}
},
"node_modules/url-loader": {
@@ -16788,8 +16783,7 @@
"node_modules/url/node_modules/punycode": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz",
"integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==",
"license": "MIT"
"integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ=="
},
"node_modules/util": {
"version": "0.10.4",

View File

@@ -56,6 +56,6 @@
"node": ">=20"
},
"volta": {
"node": "20.16.0"
"node": "20.15.1"
}
}

View File

@@ -63,11 +63,6 @@ const projects: CommunityProjectProps[] = [
description: 'Powershell Module for the Immich API',
url: 'https://github.com/hanpq/PSImmich',
},
{
title: 'Immich Distribution',
description: 'Snap package for easy install and zero-care auto updates of Immich. Self-hosted photo management.',
url: 'https://immich-distribution.nsg.cc',
},
];
function CommunityProject({ title, description, url }: CommunityProjectProps): JSX.Element {

View File

@@ -1,13 +1,4 @@
import {
mdiCalendarToday,
mdiCrosshairsOff,
mdiLeadPencil,
mdiLockOff,
mdiLockOutline,
mdiSpeedometerSlow,
mdiWeb,
mdiWrap,
} from '@mdi/js';
import { mdiCalendarToday, mdiLeadPencil, mdiLockOutline, mdiSpeedometerSlow, mdiWeb } from '@mdi/js';
import Layout from '@theme/Layout';
import React from 'react';
import { Item as TimelineItem, Timeline } from '../components/timeline';
@@ -17,41 +8,6 @@ const withLanguage = (date: Date) => (language: string) => date.toLocaleDateStri
type Item = Omit<TimelineItem, 'done' | 'getDateLabel'> & { date: Date };
const items: Item[] = [
{
icon: mdiWrap,
iconColor: 'gray',
title: 'Carriage returns in bash scripts are cursed',
description: 'Git can be configured to automatically convert LF to CRLF on checkout and CRLF breaks bash scripts.',
link: {
url: 'https://github.com/immich-app/immich/pull/11613',
text: '#11613',
},
date: new Date(2024, 7, 7),
},
{
icon: mdiLockOff,
iconColor: 'red',
title: 'Fetch inside Cloudflare Workers is cursed',
description:
'Fetch requests in Cloudflare Workers use http by default, even if you explicitly specify https, which can often cause redirect loops.',
link: {
url: 'https://community.cloudflare.com/t/does-cloudflare-worker-allow-secure-https-connection-to-fetch-even-on-flexible-ssl/68051/5',
text: 'Cloudflare',
},
date: new Date(2024, 7, 7),
},
{
icon: mdiCrosshairsOff,
iconColor: 'gray',
title: 'GPS sharing on mobile is cursed',
description:
'Some phones will silently strip GPS data from images when apps without location permission try to access them.',
link: {
url: 'https://github.com/immich-app/immich/discussions/11268',
text: '#11268',
},
date: new Date(2024, 6, 21),
},
{
icon: mdiLeadPencil,
iconColor: 'gold',

View File

@@ -41,7 +41,7 @@ function HomepageHeader() {
Discord
</Link>
</div>
<img src="/img/immich-screenshots.webp" alt="screenshots" width={'70%'} />
<img src="/img/immich-screenshots.png" alt="screenshots" width={'70%'} />
<div className="flex flex-col sm:flex-row place-items-center place-content-center mt-4 gap-1">
<div className="h-24">
<a href="https://play.google.com/store/apps/details?id=app.alextran.immich">

View File

@@ -1,114 +0,0 @@
import React from 'react';
import Link from '@docusaurus/Link';
import Layout from '@theme/Layout';
import { useColorMode } from '@docusaurus/theme-common';
function HomepageHeader() {
const { isDarkTheme } = useColorMode();
return (
<header>
<section className="max-w-[900px] m-4 p-4 md:p-6 md:m-auto md:my-12 border border-red-400 rounded-2xl bg-slate-200 dark:bg-immich-dark-gray">
<section>
<h1>Privacy Policy</h1>
<p>Last updated: July 31st 2024</p>
<p>
Welcome to Immich. We are committed to respecting your privacy. This Privacy Policy sets out how we collect,
use, and share information when you use our Immich app.
</p>
</section>
{/* 1. Scope of This Policy */}
<section>
<h2>1. Scope of This Policy</h2>
<p>
This Privacy Policy applies to the Immich app ("we", "our", or "us") and covers our collection, use, and
disclosure of your information. This Policy does not cover any third-party websites, services, or
applications that can be accessed through our app, or third-party services you may access through Immich.
</p>
</section>
{/* 2. Information We Collect */}
<section>
<h2>2. Information We Collect</h2>
<div>
<p>
<strong>Locally Stored Data</strong>: Immich stores all your photos, albums, settings, and locally on your
device. We do not have access to this data, nor do we transmit or store it on any of our servers.
</p>
</div>
<div>
<p>
<strong>Purchase Information:</strong> When you make a purchase within the{' '}
<a href="https://buy.immich.app">https://buy.immich.app</a>, we collect the following information for tax
calculation purposes:
</p>
<ul>
<li>Country of origin</li>
<li>Postal code (if the user is from Canada or the United States)</li>
</ul>
</div>
</section>
{/* 3. Use of Your Information */}
<section>
<h2>3. Use of Your Information</h2>
<p>
<strong>Tax Calculation:</strong> The country of origin and postal code (for users from Canada or the United
States) are collected solely for determining the applicable tax rates on your purchase.
</p>
</section>
{/* 4. Sharing of Your Information */}
<section>
<h2>4. Sharing of Your Information</h2>
<ul>
<li>
<strong>Tax Authorities:</strong> The purchase information may be shared with tax authorities as required
by law.
</li>
<li>
<strong>Payment Providers:</strong> The purchase information may be shared with payment providers where
required.
</li>
</ul>
</section>
{/* 5. Changes to This Policy */}
<section>
<h2>5. Changes to This Policy</h2>
<p>
We may update our Privacy Policy from time to time. If we make any changes, we will notify you by revising
the "Last updated" date at the top of this policy. It's encouraged that users frequently check this page for
any changes to stay informed about how we are helping to protect the personal information we collect.
</p>
</section>
{/* 6. Contact Us */}
<section>
<h2>6. Contact Us</h2>
<p>
If you have any questions about this Privacy Policy, please contact us at{' '}
<a href="mailto:immich@futo.org">immich@futo.org</a>
</p>
</section>
</section>
</header>
);
}
export default function Home(): JSX.Element {
return (
<Layout
title="Home"
description="immich Self-hosted photo and video backup solution directly from your mobile phone "
noFooter={true}
>
<HomepageHeader />
<div className="flex flex-col place-items-center place-content-center">
<p>This project is available under GNU AGPL v3 license.</p>
<p className="text-xs">Privacy should not be a luxury</p>
</div>
</Layout>
);
}

View File

@@ -66,16 +66,12 @@ import {
mdiVectorCombine,
mdiVideo,
mdiWeb,
mdiLicense,
} from '@mdi/js';
import Layout from '@theme/Layout';
import React from 'react';
import { Item, Timeline } from '../components/timeline';
const releases = {
// TODO
'v1.110.0': new Date(2024, 5, 11),
'v1.109.0': new Date(2024, 6, 18),
'v1.106.1': new Date(2024, 5, 11),
'v1.104.0': new Date(2024, 4, 13),
'v1.103.0': new Date(2024, 3, 29),
@@ -224,20 +220,6 @@ const roadmap: Item[] = [
];
const milestones: Item[] = [
{
icon: mdiStar,
iconColor: 'gold',
title: '40,000 Stars',
description: 'Reached 40K Stars on GitHub!',
getDateLabel: withLanguage(new Date(2024, 6, 21)),
},
withRelease({
icon: mdiLicense,
iconColor: 'gold',
title: 'Supporter Badge',
description: 'The option to buy Immich to support its development!',
release: 'v1.109.0',
}),
withRelease({
icon: mdiHistory,
title: 'Versioned documentation',
@@ -254,7 +236,7 @@ const milestones: Item[] = [
withRelease({
icon: mdiContentDuplicate,
title: 'Similar image detection',
description: "Detect duplicate assets that aren't exactly identical",
description: 'Detect duplicate assets that arent exactly identical',
release: 'v1.106.1',
}),
withRelease({

View File

@@ -1,24 +1,4 @@
[
{
"label": "v1.111.0",
"url": "https://v1.111.0.archive.immich.app"
},
{
"label": "v1.110.0",
"url": "https://v1.110.0.archive.immich.app"
},
{
"label": "v1.109.2",
"url": "https://v1.109.2.archive.immich.app"
},
{
"label": "v1.109.1",
"url": "https://v1.109.1.archive.immich.app"
},
{
"label": "v1.109.0",
"url": "https://v1.109.0.archive.immich.app"
},
{
"label": "v1.108.0",
"url": "https://v1.108.0.archive.immich.app"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 196 KiB

32
e2e/.eslintrc.cjs Normal file
View File

@@ -0,0 +1,32 @@
module.exports = {
parser: '@typescript-eslint/parser',
parserOptions: {
project: 'tsconfig.json',
sourceType: 'module',
tsconfigRootDir: __dirname,
},
plugins: ['@typescript-eslint/eslint-plugin'],
extends: ['plugin:@typescript-eslint/recommended', 'plugin:prettier/recommended', 'plugin:unicorn/recommended'],
root: true,
env: {
node: true,
},
ignorePatterns: ['.eslintrc.js'],
rules: {
'@typescript-eslint/interface-name-prefix': 'off',
'@typescript-eslint/explicit-function-return-type': 'off',
'@typescript-eslint/explicit-module-boundary-types': 'off',
'@typescript-eslint/no-explicit-any': 'off',
'@typescript-eslint/no-floating-promises': 'error',
'unicorn/prefer-module': 'off',
'unicorn/import-style': 'off',
curly: 2,
'prettier/prettier': 0,
'unicorn/prevent-abbreviations': 'off',
'unicorn/filename-case': 'off',
'unicorn/no-null': 'off',
'unicorn/prefer-top-level-await': 'off',
'unicorn/prefer-event-target': 'off',
'unicorn/no-thenable': 'off',
},
};

View File

@@ -1 +1 @@
20.16.0
20.15.1

View File

@@ -35,7 +35,7 @@ services:
- 2283:3001
redis:
image: redis:6.2-alpine@sha256:e3b17ba9479deec4b7d1eeec1548a253acc5374d68d3b27937fcfe4df8d18c7e
image: redis:6.2-alpine@sha256:328fe6a5822256d065debb36617a8169dbfbd77b797c525288e465f56c1d392b
database:
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0

View File

@@ -1,64 +0,0 @@
import { FlatCompat } from '@eslint/eslintrc';
import js from '@eslint/js';
import typescriptEslint from '@typescript-eslint/eslint-plugin';
import tsParser from '@typescript-eslint/parser';
import globals from 'globals';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const compat = new FlatCompat({
baseDirectory: __dirname,
recommendedConfig: js.configs.recommended,
allConfig: js.configs.all,
});
export default [
{
ignores: ['eslint.config.mjs'],
},
...compat.extends(
'plugin:@typescript-eslint/recommended',
'plugin:prettier/recommended',
'plugin:unicorn/recommended',
),
{
plugins: {
'@typescript-eslint': typescriptEslint,
},
languageOptions: {
globals: {
...globals.node,
},
parser: tsParser,
ecmaVersion: 5,
sourceType: 'module',
parserOptions: {
project: 'tsconfig.json',
tsconfigRootDir: __dirname,
},
},
rules: {
'@typescript-eslint/interface-name-prefix': 'off',
'@typescript-eslint/explicit-function-return-type': 'off',
'@typescript-eslint/explicit-module-boundary-types': 'off',
'@typescript-eslint/no-explicit-any': 'off',
'@typescript-eslint/no-floating-promises': 'error',
'unicorn/prefer-module': 'off',
'unicorn/import-style': 'off',
curly: 2,
'prettier/prettier': 0,
'unicorn/prevent-abbreviations': 'off',
'unicorn/filename-case': 'off',
'unicorn/no-null': 'off',
'unicorn/prefer-top-level-await': 'off',
'unicorn/prefer-event-target': 'off',
'unicorn/no-thenable': 'off',
},
},
];

1745
e2e/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "immich-e2e",
"version": "1.111.0",
"version": "1.108.0",
"description": "",
"main": "index.js",
"type": "module",
@@ -19,26 +19,23 @@
"author": "",
"license": "GNU Affero General Public License version 3",
"devDependencies": {
"@eslint/eslintrc": "^3.1.0",
"@eslint/js": "^9.8.0",
"@immich/cli": "file:../cli",
"@immich/sdk": "file:../open-api/typescript-sdk",
"@playwright/test": "^1.44.1",
"@types/luxon": "^3.4.2",
"@types/node": "^20.14.13",
"@types/node": "^20.14.10",
"@types/oidc-provider": "^8.5.1",
"@types/pg": "^8.11.0",
"@types/pngjs": "^6.0.4",
"@types/supertest": "^6.0.2",
"@typescript-eslint/eslint-plugin": "^8.0.0",
"@typescript-eslint/parser": "^8.0.0",
"@vitest/coverage-v8": "^2.0.5",
"eslint": "^9.0.0",
"@typescript-eslint/eslint-plugin": "^7.1.0",
"@typescript-eslint/parser": "^7.1.0",
"@vitest/coverage-v8": "^1.3.0",
"eslint": "^8.57.0",
"eslint-config-prettier": "^9.1.0",
"eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-unicorn": "^55.0.0",
"exiftool-vendored": "^28.0.0",
"globals": "^15.9.0",
"eslint-plugin-unicorn": "^54.0.0",
"exiftool-vendored": "^27.0.0",
"jose": "^5.6.3",
"luxon": "^3.4.4",
"oidc-provider": "^8.5.1",
@@ -50,9 +47,9 @@
"supertest": "^7.0.0",
"typescript": "^5.3.3",
"utimes": "^5.2.1",
"vitest": "^2.0.5"
"vitest": "^1.3.0"
},
"volta": {
"node": "20.16.0"
"node": "20.15.1"
}
}

View File

@@ -472,44 +472,6 @@ describe('/asset', () => {
expect(status).toEqual(200);
});
it('should update date time original when sidecar file contains DateTimeOriginal', async () => {
const sidecarData = `<?xpacket begin='?' id='W5M0MpCehiHzreSzNTczkc9d'?>
<x:xmpmeta xmlns:x='adobe:ns:meta/' x:xmptk='Image::ExifTool 12.40'>
<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#'>
<rdf:Description rdf:about=''
xmlns:exif='http://ns.adobe.com/exif/1.0/'>
<exif:ExifVersion>0220</exif:ExifVersion> <exif:DateTimeOriginal>2024-07-11T10:32:52Z</exif:DateTimeOriginal>
<exif:GPSVersionID>2.3.0.0</exif:GPSVersionID>
</rdf:Description>
</rdf:RDF>
</x:xmpmeta>
<?xpacket end='w'?>`;
const { id } = await utils.createAsset(user1.accessToken, {
sidecarData: {
bytes: Buffer.from(sidecarData),
filename: 'example.xmp',
},
});
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const assetInfo = await utils.getAssetInfo(user1.accessToken, id);
expect(assetInfo.exifInfo?.dateTimeOriginal).toBe('2024-07-11T10:32:52.000Z');
const { status, body } = await request(app)
.put(`/assets/${id}`)
.set('Authorization', `Bearer ${user1.accessToken}`)
.send({ dateTimeOriginal: '2023-11-19T18:11:00.000-07:00' });
expect(body).toMatchObject({
id,
exifInfo: expect.objectContaining({
dateTimeOriginal: '2023-11-20T01:11:00.000Z',
}),
});
expect(status).toEqual(200);
});
it('should reject invalid gps coordinates', async () => {
for (const test of [
{ latitude: 12 },
@@ -545,22 +507,6 @@ describe('/asset', () => {
expect(status).toEqual(200);
});
it.skip('should geocode country from gps data in the middle of nowhere', async () => {
const { status } = await request(app)
.put(`/assets/${user1Assets[0].id}`)
.set('Authorization', `Bearer ${user1.accessToken}`)
.send({ latitude: 42, longitude: 69 });
expect(status).toEqual(200);
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const asset = await getAssetInfo({ id: user1Assets[0].id }, { headers: asBearerAuth(user1.accessToken) });
expect(asset).toMatchObject({
id: user1Assets[0].id,
exifInfo: expect.objectContaining({ city: null, country: 'Kazakhstan' }),
});
});
it('should set the description', async () => {
const { status, body } = await request(app)
.put(`/assets/${user1Assets[0].id}`)
@@ -1137,7 +1083,7 @@ describe('/asset', () => {
type: AssetTypeEnum.Image,
originalFileName: '14bit-uncompressed-(3_2).arw',
resized: true,
fileCreatedAt: '2016-01-08T14:08:01.000Z',
fileCreatedAt: '2016-01-08T15:08:01.000Z',
exifInfo: {
make: 'SONY',
model: 'ILCE-7M2',
@@ -1149,7 +1095,7 @@ describe('/asset', () => {
iso: 100,
lensModel: 'E 25mm F2',
fileSizeInByte: 49_512_448,
dateTimeOriginal: '2016-01-08T14:08:01.000Z',
dateTimeOriginal: '2016-01-08T15:08:01.000Z',
latitude: null,
longitude: null,
orientation: '1',
@@ -1224,25 +1170,17 @@ describe('/asset', () => {
// into the test here.
it.each([
{
filepath: 'formats/motionphoto/samsung-one-ui-5.jpg',
filepath: 'formats/motionphoto/Samsung One UI 5.jpg',
checksum: 'fr14niqCq6N20HB8rJYEvpsUVtI=',
},
{
filepath: 'formats/motionphoto/samsung-one-ui-6.jpg',
filepath: 'formats/motionphoto/Samsung One UI 6.jpg',
checksum: 'lT9Uviw/FFJYCjfIxAGPTjzAmmw=',
},
{
filepath: 'formats/motionphoto/samsung-one-ui-6.heic',
filepath: 'formats/motionphoto/Samsung One UI 6.heic',
checksum: '/ejgzywvgvzvVhUYVfvkLzFBAF0=',
},
{
filepath: 'formats/motionphoto/pixel-6-pro.jpg',
checksum: 'bFhLGbdK058PSk4FTfrSnoKWykc=',
},
{
filepath: 'formats/motionphoto/pixel-8a.jpg',
checksum: '7YdY+WF0h+CXHbiXpi0HiCMTTjs=',
},
])(`should extract motionphoto video from $filepath`, async ({ filepath, checksum }) => {
const response = await utils.createAsset(admin.accessToken, {
assetData: {

View File

@@ -159,75 +159,4 @@ describe('/map', () => {
expect(body).toEqual(expect.objectContaining({ id: 'immich-map-dark' }));
});
});
describe('GET /map/reverse-geocode', () => {
it('should require authentication', async () => {
const { status, body } = await request(app).get('/map/reverse-geocode');
expect(status).toBe(401);
expect(body).toEqual(errorDto.unauthorized);
});
it('should throw an error if a lat is not provided', async () => {
const { status, body } = await request(app)
.get('/map/reverse-geocode?lon=123')
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(status).toBe(400);
expect(body).toEqual(errorDto.badRequest(['lat must be a number between -90 and 90']));
});
it('should throw an error if a lat is not a number', async () => {
const { status, body } = await request(app)
.get('/map/reverse-geocode?lat=abc&lon=123.456')
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(status).toBe(400);
expect(body).toEqual(errorDto.badRequest(['lat must be a number between -90 and 90']));
});
it('should throw an error if a lat is out of range', async () => {
const { status, body } = await request(app)
.get('/map/reverse-geocode?lat=91&lon=123.456')
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(status).toBe(400);
expect(body).toEqual(errorDto.badRequest(['lat must be a number between -90 and 90']));
});
it('should throw an error if a lon is not provided', async () => {
const { status, body } = await request(app)
.get('/map/reverse-geocode?lat=75')
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(status).toBe(400);
expect(body).toEqual(errorDto.badRequest(['lon must be a number between -180 and 180']));
});
const reverseGeocodeTestCases = [
{
name: 'Vaucluse',
lat: -33.858_977_058_663_13,
lon: 151.278_490_730_270_48,
results: [{ city: 'Vaucluse', state: 'New South Wales', country: 'Australia' }],
},
{
name: 'Ravenhall',
lat: -37.765_732_399_174_75,
lon: 144.752_453_164_883_3,
results: [{ city: 'Ravenhall', state: 'Victoria', country: 'Australia' }],
},
{
name: 'Scarborough',
lat: -31.894_346_156_789_997,
lon: 115.757_617_103_904_64,
results: [{ city: 'Scarborough', state: 'Western Australia', country: 'Australia' }],
},
];
it.each(reverseGeocodeTestCases)(`should resolve to $name`, async ({ lat, lon, results }) => {
const { status, body } = await request(app)
.get(`/map/reverse-geocode?lat=${lat}&lon=${lon}`)
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(status).toBe(200);
expect(Array.isArray(body)).toBe(true);
expect(body.length).toBe(results.length);
expect(body).toEqual(results);
});
});
});

View File

@@ -6,19 +6,10 @@ import request from 'supertest';
import { beforeAll, beforeEach, describe, expect, it } from 'vitest';
const invalidBirthday = [
{
birthDate: 'false',
response: ['birthDate must be a string in the format yyyy-MM-dd', 'Birth date cannot be in the future'],
},
{
birthDate: '123567',
response: ['birthDate must be a string in the format yyyy-MM-dd', 'Birth date cannot be in the future'],
},
{
birthDate: 123_567,
response: ['birthDate must be a string in the format yyyy-MM-dd', 'Birth date cannot be in the future'],
},
{ birthDate: '9999-01-01', response: ['Birth date cannot be in the future'] },
{ birthDate: 'false', response: 'birthDate must be a date string' },
{ birthDate: '123567', response: 'birthDate must be a date string' },
{ birthDate: 123_567, response: 'birthDate must be a date string' },
{ birthDate: new Date(9999, 0, 0).toISOString(), response: ['Birth date cannot be in the future'] },
];
describe('/people', () => {
@@ -74,7 +65,6 @@ describe('/people', () => {
expect(status).toBe(200);
expect(body).toEqual({
hasNextPage: false,
total: 3,
hidden: 1,
people: [
@@ -90,7 +80,6 @@ describe('/people', () => {
expect(status).toBe(200);
expect(body).toEqual({
hasNextPage: false,
total: 3,
hidden: 1,
people: [
@@ -99,21 +88,6 @@ describe('/people', () => {
],
});
});
it('should support pagination', async () => {
const { status, body } = await request(app)
.get('/people')
.set('Authorization', `Bearer ${admin.accessToken}`)
.query({ withHidden: true, page: 2, size: 1 });
expect(status).toBe(200);
expect(body).toEqual({
hasNextPage: true,
total: 3,
hidden: 1,
people: [expect.objectContaining({ name: 'visible_person' })],
});
});
});
describe('GET /people/:id', () => {
@@ -194,13 +168,13 @@ describe('/people', () => {
.set('Authorization', `Bearer ${admin.accessToken}`)
.send({
name: 'New Person',
birthDate: '1990-01-01',
birthDate: '1990-01-01T05:00:00.000Z',
});
expect(status).toBe(201);
expect(body).toMatchObject({
id: expect.any(String),
name: 'New Person',
birthDate: '1990-01-01',
birthDate: '1990-01-01T05:00:00.000Z',
});
});
});
@@ -242,7 +216,7 @@ describe('/people', () => {
const { status, body } = await request(app)
.put(`/people/${visiblePerson.id}`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send({ birthDate: '1990-01-01' });
.send({ birthDate: '1990-01-01T05:00:00.000Z' });
expect(status).toBe(200);
expect(body).toMatchObject({ birthDate: '1990-01-01' });
});

View File

@@ -1,4 +1,4 @@
import { AssetMediaResponseDto, LoginResponseDto, deleteAssets, updateAsset } from '@immich/sdk';
import { AssetMediaResponseDto, LoginResponseDto, deleteAssets, getMapMarkers, updateAsset } from '@immich/sdk';
import { DateTime } from 'luxon';
import { readFile } from 'node:fs/promises';
import { join } from 'node:path';
@@ -32,6 +32,9 @@ describe('/search', () => {
let assetOneJpg5: AssetMediaResponseDto;
let assetSprings: AssetMediaResponseDto;
let assetLast: AssetMediaResponseDto;
let cities: string[];
let states: string[];
let countries: string[];
beforeAll(async () => {
await utils.resetDatabase();
@@ -46,9 +49,9 @@ describe('/search', () => {
{ filename: '/albums/nature/silver_fir.jpg' },
{ filename: '/formats/heic/IMG_2682.heic' },
{ filename: '/formats/jpg/el_torcal_rocks.jpg' },
{ filename: '/formats/motionphoto/samsung-one-ui-6.jpg' },
{ filename: '/formats/motionphoto/samsung-one-ui-6.heic' },
{ filename: '/formats/motionphoto/samsung-one-ui-5.jpg' },
{ filename: '/formats/motionphoto/Samsung One UI 6.jpg' },
{ filename: '/formats/motionphoto/Samsung One UI 6.heic' },
{ filename: '/formats/motionphoto/Samsung One UI 5.jpg' },
{ filename: '/metadata/gps-position/thompson-springs.jpg', dto: { isArchived: true } },
@@ -82,7 +85,7 @@ describe('/search', () => {
// note: the coordinates here are not the actual coordinates of the images and are random for most of them
const coordinates = [
{ latitude: 48.853_41, longitude: 2.3488 }, // paris
{ latitude: 35.6895, longitude: 139.691_71 }, // tokyo
{ latitude: 63.0695, longitude: -151.0074 }, // denali
{ latitude: 52.524_37, longitude: 13.410_53 }, // berlin
{ latitude: 1.314_663_1, longitude: 103.845_409_3 }, // singapore
{ latitude: 41.013_84, longitude: 28.949_66 }, // istanbul
@@ -98,15 +101,16 @@ describe('/search', () => {
{ latitude: 31.634_16, longitude: -7.999_94 }, // marrakesh
{ latitude: 38.523_735_4, longitude: -78.488_619_4 }, // tanners ridge
{ latitude: 59.938_63, longitude: 30.314_13 }, // st. petersburg
{ latitude: 35.6895, longitude: 139.691_71 }, // tokyo
];
const updates = coordinates.map((dto, i) =>
updateAsset({ id: assets[i].id, updateAssetDto: dto }, { headers: asBearerAuth(admin.accessToken) }),
const updates = assets.map((asset, i) =>
updateAsset({ id: asset.id, updateAssetDto: coordinates[i] }, { headers: asBearerAuth(admin.accessToken) }),
);
await Promise.all(updates);
for (const [i] of coordinates.entries()) {
await utils.waitForWebsocketEvent({ event: 'assetUpdate', id: assets[i].id });
for (const asset of assets) {
await utils.waitForWebsocketEvent({ event: 'assetUpdate', id: asset.id });
}
[
@@ -133,6 +137,12 @@ describe('/search', () => {
assetLast = assets.at(-1) as AssetMediaResponseDto;
await deleteAssets({ assetBulkDeleteDto: { ids: [assetSilver.id] } }, { headers: asBearerAuth(admin.accessToken) });
const mapMarkers = await getMapMarkers({}, { headers: asBearerAuth(admin.accessToken) });
const nonTrashed = mapMarkers.filter((mark) => mark.id !== assetSilver.id);
cities = [...new Set(nonTrashed.map((mark) => mark.city).filter((entry): entry is string => !!entry))].sort();
states = [...new Set(nonTrashed.map((mark) => mark.state).filter((entry): entry is string => !!entry))].sort();
countries = [...new Set(nonTrashed.map((mark) => mark.country).filter((entry): entry is string => !!entry))].sort();
}, 30_000);
afterAll(async () => {
@@ -305,126 +315,29 @@ describe('/search', () => {
{
should: 'should search by originalFilename with spaces',
deferred: () => ({
dto: { originalFileName: 'samsung-one', type: 'IMAGE' },
dto: { originalFileName: 'Samsung One', type: 'IMAGE' },
assets: [assetOneJpg5, assetOneJpg6, assetOneHeic6],
}),
},
{
should: 'should search by city',
deferred: () => ({
dto: {
city: 'Accra',
includeNull: true,
},
assets: [assetHeic],
}),
},
{
should: "should search city ('')",
deferred: () => ({
dto: {
city: '',
isVisible: true,
includeNull: true,
},
assets: [assetLast],
}),
},
{
should: 'should search city (null)',
deferred: () => ({
dto: {
city: null,
isVisible: true,
includeNull: true,
},
assets: [assetLast],
}),
deferred: () => ({ dto: { city: 'Accra' }, assets: [assetHeic] }),
},
{
should: 'should search by state',
deferred: () => ({
dto: {
state: 'New York',
includeNull: true,
},
assets: [assetDensity],
}),
},
{
should: "should search state ('')",
deferred: () => ({
dto: {
state: '',
isVisible: true,
withExif: true,
includeNull: true,
},
assets: [assetLast, assetNotocactus],
}),
},
{
should: 'should search state (null)',
deferred: () => ({
dto: {
state: null,
isVisible: true,
includeNull: true,
},
assets: [assetLast, assetNotocactus],
}),
deferred: () => ({ dto: { state: 'New York' }, assets: [assetDensity] }),
},
{
should: 'should search by country',
deferred: () => ({
dto: {
country: 'France',
includeNull: true,
},
assets: [assetFalcon],
}),
},
{
should: "should search country ('')",
deferred: () => ({
dto: {
country: '',
isVisible: true,
includeNull: true,
},
assets: [assetLast],
}),
},
{
should: 'should search country (null)',
deferred: () => ({
dto: {
country: null,
isVisible: true,
includeNull: true,
},
assets: [assetLast],
}),
deferred: () => ({ dto: { country: 'France' }, assets: [assetFalcon] }),
},
{
should: 'should search by make',
deferred: () => ({
dto: {
make: 'Canon',
includeNull: true,
},
assets: [assetFalcon, assetDenali],
}),
deferred: () => ({ dto: { make: 'Canon' }, assets: [assetFalcon, assetDenali] }),
},
{
should: 'should search by model',
deferred: () => ({
dto: {
model: 'Canon EOS 7D',
includeNull: true,
},
assets: [assetDenali],
}),
deferred: () => ({ dto: { model: 'Canon EOS 7D' }, assets: [assetDenali] }),
},
{
should: 'should allow searching the upload library (libraryId: null)',
@@ -537,79 +450,32 @@ describe('/search', () => {
it('should get suggestions for country', async () => {
const { status, body } = await request(app)
.get('/search/suggestions?type=country&includeNull=true')
.get('/search/suggestions?type=country')
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(body).toEqual([
'Cuba',
'France',
'Georgia',
'Germany',
'Ghana',
'Japan',
'Morocco',
"People's Republic of China",
'Russian Federation',
'Singapore',
'Spain',
'Switzerland',
'United States of America',
null,
]);
expect(body).toEqual(countries);
expect(status).toBe(200);
});
it('should get suggestions for state', async () => {
const { status, body } = await request(app)
.get('/search/suggestions?type=state&includeNull=true')
.get('/search/suggestions?type=state')
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(body).toEqual([
'Andalusia',
'Berlin',
'Glarus',
'Greater Accra',
'Havana',
'Île-de-France',
'Marrakesh-Safi',
'Mississippi',
'New York',
'Shanghai',
'St.-Petersburg',
'Tbilisi',
'Tokyo',
'Virginia',
null,
]);
expect(body).toHaveLength(states.length);
expect(body).toEqual(expect.arrayContaining(states));
expect(status).toBe(200);
});
it('should get suggestions for city', async () => {
const { status, body } = await request(app)
.get('/search/suggestions?type=city&includeNull=true')
.get('/search/suggestions?type=city')
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(body).toEqual([
'Accra',
'Berlin',
'Glarus',
'Havana',
'Marrakesh',
'Montalbán de Córdoba',
'New York City',
'Novena',
'Paris',
'Philadelphia',
'Saint Petersburg',
'Shanghai',
'Stanley',
'Tbilisi',
'Tokyo',
null,
]);
expect(body).toEqual(cities);
expect(status).toBe(200);
});
it('should get suggestions for camera make', async () => {
const { status, body } = await request(app)
.get('/search/suggestions?type=camera-make&includeNull=true')
.get('/search/suggestions?type=camera-make')
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(body).toEqual([
'Apple',
@@ -619,14 +485,13 @@ describe('/search', () => {
'PENTAX Corporation',
'samsung',
'SONY',
null,
]);
expect(status).toBe(200);
});
it('should get suggestions for camera model', async () => {
const { status, body } = await request(app)
.get('/search/suggestions?type=camera-model&includeNull=true')
.get('/search/suggestions?type=camera-model')
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(body).toEqual([
'Canon EOS 7D',
@@ -641,7 +506,6 @@ describe('/search', () => {
'SM-F711N',
'SM-S906U',
'SM-T970',
null,
]);
expect(status).toBe(200);
});

View File

@@ -254,7 +254,7 @@ describe('/server', () => {
.set('Authorization', `Bearer ${admin.accessToken}`)
.send(serverLicense);
const { status } = await request(app).get('/server/license').set('Authorization', `Bearer ${admin.accessToken}`);
expect(status).toBe(404);
expect(status).toBe(200);
});
});

View File

@@ -112,13 +112,6 @@ describe('/shared-links', () => {
expect(resp.header['content-type']).toContain('text/html');
expect(resp.text).toContain(`<meta name="description" content="1 shared photos & videos" />`);
});
it('should have fqdn og:image meta tag for shared asset', async () => {
const resp = await request(shareUrl).get(`/${linkWithAssets.key}`);
expect(resp.status).toBe(200);
expect(resp.header['content-type']).toContain('text/html');
expect(resp.text).toContain(`<meta property="og:image" content="http://`);
});
});
describe('GET /shared-links', () => {

View File

@@ -50,7 +50,7 @@ type CommandResponse = { stdout: string; stderr: string; exitCode: number | null
type EventType = 'assetUpload' | 'assetUpdate' | 'assetDelete' | 'userDelete' | 'assetHidden';
type WaitOptions = { event: EventType; id?: string; total?: number; timeout?: number };
type AdminSetupOptions = { onboarding?: boolean };
type FileData = { bytes?: Buffer; filename: string };
type AssetData = { bytes?: Buffer; filename: string };
const dbUrl = 'postgres://postgres:postgres@127.0.0.1:5433/immich';
export const baseUrl = 'http://127.0.0.1:2283';
@@ -291,10 +291,7 @@ export const utils = {
createAsset: async (
accessToken: string,
dto?: Partial<Omit<AssetMediaCreateDto, 'assetData' | 'sidecarData'>> & {
assetData?: FileData;
sidecarData?: FileData;
},
dto?: Partial<Omit<AssetMediaCreateDto, 'assetData'>> & { assetData?: AssetData },
) => {
const _dto = {
deviceAssetId: 'test-1',
@@ -316,10 +313,6 @@ export const utils = {
.attach('assetData', assetData, filename)
.set('Authorization', `Bearer ${accessToken}`);
if (dto?.sidecarData?.bytes) {
void builder.attach('sidecarData', dto.sidecarData.bytes, dto.sidecarData.filename);
}
for (const [key, value] of Object.entries(_dto)) {
void builder.field(key, String(value));
}
@@ -332,7 +325,7 @@ export const utils = {
replaceAsset: async (
accessToken: string,
assetId: string,
dto?: Partial<Omit<AssetMediaCreateDto, 'assetData'>> & { assetData?: FileData },
dto?: Partial<Omit<AssetMediaCreateDto, 'assetData'>> & { assetData?: AssetData },
) => {
const _dto = {
deviceAssetId: 'test-1',
@@ -424,12 +417,12 @@ export const utils = {
createPartner: (accessToken: string, id: string) => createPartner({ id }, { headers: asBearerAuth(accessToken) }),
setAuthCookies: async (context: BrowserContext, accessToken: string, domain = '127.0.0.1') =>
setAuthCookies: async (context: BrowserContext, accessToken: string) =>
await context.addCookies([
{
name: 'immich_access_token',
value: accessToken,
domain,
domain: '127.0.0.1',
path: '/',
expires: 1_742_402_728,
httpOnly: true,
@@ -439,7 +432,7 @@ export const utils = {
{
name: 'immich_auth_type',
value: 'password',
domain,
domain: '127.0.0.1',
path: '/',
expires: 1_742_402_728,
httpOnly: true,
@@ -449,7 +442,7 @@ export const utils = {
{
name: 'immich_is_authenticated',
value: 'true',
domain,
domain: '127.0.0.1',
path: '/',
expires: 1_742_402_728,
httpOnly: false,

View File

@@ -10,9 +10,6 @@ test.describe('Asset Viewer Navbar', () => {
utils.initSdk();
await utils.resetDatabase();
admin = await utils.adminSetup();
});
test.beforeEach(async () => {
asset = await utils.createAsset(admin.accessToken);
});
@@ -52,14 +49,4 @@ test.describe('Asset Viewer Navbar', () => {
}
});
});
test.describe('actions', () => {
test('favorite asset with shortcut', async ({ context, page }) => {
await utils.setAuthCookies(context, admin.accessToken);
await page.goto(`/photos/${asset.id}`);
await page.waitForSelector('#immich-asset-viewer');
await page.keyboard.press('f');
await expect(page.locator('#notification-list').getByTestId('message')).toHaveText('Added to favorites');
});
});
});

View File

@@ -1,56 +0,0 @@
import { AssetMediaResponseDto, LoginResponseDto } from '@immich/sdk';
import { expect, type Page, test } from '@playwright/test';
import { utils } from 'src/utils';
test.describe('Slideshow', () => {
let admin: LoginResponseDto;
let asset: AssetMediaResponseDto;
test.beforeAll(async () => {
utils.initSdk();
await utils.resetDatabase();
admin = await utils.adminSetup();
asset = await utils.createAsset(admin.accessToken);
});
const openSlideshow = async (page: Page) => {
await page.goto(`/photos/${asset.id}`);
await page.waitForSelector('#immich-asset-viewer');
await page.getByRole('button', { name: 'More' }).click();
await page.getByRole('menuitem', { name: 'Slideshow' }).click();
};
test('open slideshow', async ({ context, page }) => {
await utils.setAuthCookies(context, admin.accessToken);
await openSlideshow(page);
await expect(page.getByRole('button', { name: 'Exit Slideshow' })).toBeVisible();
});
test('exit slideshow with button', async ({ context, page }) => {
await utils.setAuthCookies(context, admin.accessToken);
await openSlideshow(page);
const exitButton = page.getByRole('button', { name: 'Exit Slideshow' });
await exitButton.click();
await expect(exitButton).not.toBeVisible();
});
test('exit slideshow with shortcut', async ({ context, page }) => {
await utils.setAuthCookies(context, admin.accessToken);
await openSlideshow(page);
const exitButton = page.getByRole('button', { name: 'Exit Slideshow' });
await expect(exitButton).toBeVisible();
await page.keyboard.press('Escape');
await expect(exitButton).not.toBeVisible();
});
test('favorite shortcut is disabled', async ({ context, page }) => {
await utils.setAuthCookies(context, admin.accessToken);
await openSlideshow(page);
await expect(page.getByRole('button', { name: 'Exit Slideshow' })).toBeVisible();
await page.keyboard.press('f');
await expect(page.locator('#notification-list')).not.toBeVisible();
});
});

View File

@@ -13,7 +13,7 @@ test.describe('Registration', () => {
test('admin registration', async ({ page }) => {
// welcome
await page.goto('/');
await page.getByRole('link', { name: 'Getting Started' }).click();
await page.getByRole('button', { name: 'Getting Started' }).click();
// register
await expect(page).toHaveTitle(/Admin Registration/);

View File

@@ -25,7 +25,7 @@ test.describe('Photo Viewer', () => {
test('initially shows a loading spinner', async ({ page }) => {
await page.route(`/api/assets/${asset.id}/thumbnail**`, async (route) => {
// slow down the request for thumbnail, so spinner has chance to show up
// slow down the request for thumbnail, so spiner has chance to show up
await new Promise((f) => setTimeout(f, 2000));
await route.continue();
});
@@ -40,7 +40,7 @@ test.describe('Photo Viewer', () => {
await page.goto(`/photos/${asset.id}`);
await expect.poll(async () => await imageLocator(page).getAttribute('src')).toContain('thumbnail');
const box = await imageLocator(page).boundingBox();
expect(box).toBeTruthy();
expect(box).toBeTruthy;
const { x, y, width, height } = box!;
await page.mouse.move(x + width / 2, y + height / 2);
await page.mouse.wheel(0, -1);

View File

@@ -1,25 +0,0 @@
import { LoginResponseDto } from '@immich/sdk';
import { expect, test } from '@playwright/test';
import { utils } from 'src/utils';
test.describe('Websocket', () => {
let admin: LoginResponseDto;
test.beforeAll(async () => {
utils.initSdk();
await utils.resetDatabase();
admin = await utils.adminSetup();
});
test('connects using ipv4', async ({ page, context }) => {
await utils.setAuthCookies(context, admin.accessToken);
await page.goto('http://127.0.0.1:2283/');
await expect(page.locator('#sidebar')).toContainText('Server Online');
});
test('connects using ipv6', async ({ page, context }) => {
await utils.setAuthCookies(context, admin.accessToken, '[::1]');
await page.goto('http://[::1]:2283/');
await expect(page.locator('#sidebar')).toContainText('Server Online');
});
});

View File

@@ -13,7 +13,6 @@ export default defineConfig({
include: ['src/{api,cli,immich-admin}/specs/*.e2e-spec.ts'],
globalSetup,
testTimeout: 15_000,
pool: 'threads',
poolOptions: {
threads: {
singleThread: true,

View File

@@ -1,21 +1,23 @@
ARG DEVICE=cpu
FROM python:3.11-bookworm@sha256:d0131ce0ff4bdb5e9eae6bc86ebde891c207d5cac1f3f582b5de0f903cc68384 AS builder-cpu
FROM python:3.11-bookworm@sha256:7bec1574675e7fd9e3a540a03cd7d6811c59ca261bd300cd665369d8f435298a as builder-cpu
FROM builder-cpu AS builder-openvino
FROM openvino/ubuntu22_runtime:2023.3.0@sha256:176646df619032ea6c10faf842867119c393e7497b7f88b5e307e932a0fd5aa8 as builder-openvino
USER root
RUN apt-get update && apt-get install -y --no-install-recommends python3-dev
FROM builder-cpu AS builder-cuda
FROM builder-cpu as builder-cuda
FROM builder-cpu AS builder-armnn
FROM builder-cpu as builder-armnn
ENV ARMNN_PATH=/opt/armnn
COPY ann /opt/ann
RUN mkdir /opt/armnn && \
curl -SL "https://github.com/ARM-software/armnn/releases/download/v24.05/ArmNN-linux-aarch64.tar.gz" | tar -zx -C /opt/armnn && \
curl -SL "https://github.com/ARM-software/armnn/releases/download/v23.11/ArmNN-linux-aarch64.tar.gz" | tar -zx -C /opt/armnn && \
cd /opt/ann && \
sh build.sh
FROM builder-${DEVICE} AS builder
FROM builder-${DEVICE} as builder
ARG DEVICE
ENV PYTHONDONTWRITEBYTECODE=1 \
@@ -34,27 +36,25 @@ RUN python3 -m venv /opt/venv
COPY poetry.lock pyproject.toml ./
RUN poetry install --sync --no-interaction --no-ansi --no-root --with ${DEVICE} --without dev
FROM python:3.11-slim-bookworm@sha256:a90e299af8a9cd6b59c4aaed2b024c78561476978244a1ab89742a4a5ac8c974 AS prod-cpu
FROM python:3.11-slim-bookworm@sha256:17ec9dc2367aa748559d0212f34665ec4df801129de32db705ea34654b5bc77a as prod-cpu
FROM prod-cpu AS prod-openvino
FROM openvino/ubuntu22_runtime:2023.3.0@sha256:176646df619032ea6c10faf842867119c393e7497b7f88b5e307e932a0fd5aa8 as prod-openvino
USER root
# TODO: remove this once the image has the fix for https://github.com/intel/compute-runtime/issues/710
ENV NEOReadDebugKeys=1 \
OverrideGpuAddressSpace=48
COPY scripts/configure-apt.sh ./
RUN ./configure-apt.sh && \
apt-get update && \
apt-get install -t unstable --no-install-recommends -yqq intel-opencl-icd && \
rm configure-apt.sh
FROM nvidia/cuda:12.3.2-cudnn9-runtime-ubuntu22.04@sha256:fa44193567d1908f7ca1f3abf8623ce9c63bc8cba7bcfdb32702eb04d326f7a8 AS prod-cuda
FROM nvidia/cuda:12.2.2-cudnn8-runtime-ubuntu22.04@sha256:2d913b09e6be8387e1a10976933642c73c840c0b735f0bf3c28d97fc9bc422e0 as prod-cuda
COPY --from=builder-cuda /usr/local/bin/python3 /usr/local/bin/python3
COPY --from=builder-cuda /usr/local/lib/python3.11 /usr/local/lib/python3.11
COPY --from=builder-cuda /usr/local/lib/libpython3.11.so /usr/local/lib/libpython3.11.so
FROM prod-cpu AS prod-armnn
FROM prod-cpu as prod-armnn
ENV LD_LIBRARY_PATH=/opt/armnn
RUN apt-get update && apt-get install -y --no-install-recommends ocl-icd-libopencl1 mesa-opencl-icd libgomp1 && \
RUN apt-get update && apt-get install -y --no-install-recommends ocl-icd-libopencl1 mesa-opencl-icd && \
rm -rf /var/lib/apt/lists/* && \
mkdir --parents /etc/OpenCL/vendors && \
echo "/usr/lib/libmali.so" > /etc/OpenCL/vendors/mali.icd && \
@@ -70,13 +70,10 @@ COPY --from=builder-armnn \
/opt/ann/build.sh \
/opt/armnn/
FROM prod-${DEVICE} AS prod
ARG DEVICE
FROM prod-${DEVICE} as prod
RUN apt-get update && \
apt-get install -y --no-install-recommends tini $(if ! [ "$DEVICE" = "openvino" ]; then echo "libmimalloc2.0"; fi) && \
apt-get autoremove -yqq && \
apt-get clean && \
apt-get install -y --no-install-recommends tini libmimalloc2.0 && \
rm -rf /var/lib/apt/lists/*
WORKDIR /usr/src/app

View File

@@ -48,22 +48,21 @@ public:
bool saveCachedNetwork,
const char *cachedNetworkPath)
{
NetworkId netId = -2;
while (netId == -2)
INetworkPtr network = loadModel(modelPath);
IOptimizedNetworkPtr optNet = OptimizeNetwork(network.get(), fastMath, fp16, saveCachedNetwork, cachedNetworkPath);
const IOInfos infos = getIOInfos(optNet.get());
NetworkId netId;
mutex.lock();
Status status = runtime->LoadNetwork(netId, std::move(optNet));
mutex.unlock();
if (status != Status::Success)
{
try
{
netId = loadInternal(modelPath, fastMath, fp16, saveCachedNetwork, cachedNetworkPath);
}
catch (InvalidArgumentException e)
{
// fp16 models do not support the forced fp16-turbo (runtime fp32->fp16 conversion)
if (fp16)
fp16 = false;
else
netId = -1;
}
return -1;
}
spinLock.lock();
ioInfos[netId] = infos;
mutexes.emplace(netId, std::make_unique<std::mutex>());
spinLock.unlock();
return netId;
}
@@ -118,8 +117,6 @@ public:
Ann(int tuningLevel, const char *tuningFile)
{
IRuntime::CreationOptions runtimeOptions;
runtimeOptions.m_ProfilingOptions.m_EnableProfiling = false;
runtimeOptions.m_ProfilingOptions.m_TimelineEnabled = false;
BackendOptions backendOptions{"GpuAcc",
{
{"TuningLevel", tuningLevel},
@@ -136,30 +133,6 @@ public:
};
private:
int loadInternal(const char *modelPath,
bool fastMath,
bool fp16,
bool saveCachedNetwork,
const char *cachedNetworkPath)
{
NetworkId netId = -1;
INetworkPtr network = loadModel(modelPath);
IOptimizedNetworkPtr optNet = OptimizeNetwork(network.get(), fastMath, fp16, saveCachedNetwork, cachedNetworkPath);
const IOInfos infos = getIOInfos(optNet.get());
mutex.lock();
Status status = runtime->LoadNetwork(netId, std::move(optNet));
mutex.unlock();
if (status != Status::Success)
{
return -1;
}
spinLock.lock();
ioInfos[netId] = infos;
mutexes.emplace(netId, std::make_unique<std::mutex>());
spinLock.unlock();
return netId;
}
INetworkPtr loadModel(const char *modelPath)
{
const auto path = std::string(modelPath);
@@ -199,8 +172,6 @@ private:
options.SetReduceFp32ToFp16(fp16);
options.SetShapeInferenceMethod(shapeInferenceMethod);
options.SetAllowExpandedDims(allowExpandedDims);
options.SetDebugToFileEnabled(false);
options.SetProfilingEnabled(false);
BackendOptions gpuAcc("GpuAcc", {{"FastMathEnabled", fastMath}});
if (cachedNetworkPath)
@@ -261,8 +232,8 @@ private:
IRuntime *runtime;
std::map<NetworkId, IOInfos> ioInfos;
std::map<NetworkId, std::unique_ptr<std::mutex>> mutexes; // mutex per network to not execute the same the same network concurrently
std::mutex mutex; // global mutex for load/unload calls to the runtime
SpinLock spinLock; // fast spin lock to guard access to the ioInfos and mutexes maps
std::mutex mutex; // global mutex for load/unload calls to the runtime
SpinLock spinLock; // fast spin lock to guard access to the ioInfos and mutexes maps
};
extern "C" void *init(int logLevel, int tuningLevel, const char *tuningFile)

View File

@@ -65,7 +65,7 @@ class Ann(metaclass=_Singleton):
self.input_shapes: dict[int, tuple[tuple[int], ...]] = {}
self.ann: int | None = None
self.new()
if self.tuning_file is not None:
# make sure tuning file exists (without clearing contents)
# once filled, the tuning file reduces the cost/time of the first
@@ -105,7 +105,7 @@ class Ann(metaclass=_Singleton):
raise ValueError("model_path must be a file with extension .armnn, .tflite or .onnx")
if not exists(model_path):
raise ValueError("model_path must point to an existing file!")
save_cached_network = False
if cached_network_path is not None and not exists(cached_network_path):
save_cached_network = True
@@ -120,8 +120,6 @@ class Ann(metaclass=_Singleton):
save_cached_network,
cached_network_path.encode() if cached_network_path is not None else None,
)
if net_id < 0:
raise ValueError("Cannot load model!")
self.input_shapes[net_id] = tuple(
self.shape(net_id, input=True, index=i) for i in range(self.tensors(net_id, input=True))

View File

@@ -1,3 +0,0 @@
#!/usr/bin/env sh
g++ -shared -O3 -o libann.so -fuse-ld=gold -std=c++17 -I"$ARMNN_PATH"/include -larmnn -larmnnDeserializer -larmnnTfLiteParser -larmnnOnnxParser -L"$ARMNN_PATH" ann.cpp

View File

@@ -1,4 +0,0 @@
#!/usr/bin/env sh
cd armnn-23.11/ || exit
g++ -o ../armnnconverter -O1 -DARMNN_ONNX_PARSER -DARMNN_SERIALIZER -DARMNN_TF_LITE_PARSER -fuse-ld=gold -std=c++17 -Iinclude -Isrc/armnnUtils -Ithird-party -larmnn -larmnnDeserializer -larmnnTfLiteParser -larmnnOnnxParser -larmnnSerializer -L../armnn src/armnnConverter/ArmnnConverter.cpp

View File

@@ -1,201 +0,0 @@
name: annexport
channels:
- pytorch
- nvidia
- conda-forge
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_kmp_llvm
- aiohttp=3.9.1=py310h2372a71_0
- aiosignal=1.3.1=pyhd8ed1ab_0
- arpack=3.8.0=nompi_h0baa96a_101
- async-timeout=4.0.3=pyhd8ed1ab_0
- attrs=23.1.0=pyh71513ae_1
- aws-c-auth=0.7.3=h28f7589_1
- aws-c-cal=0.6.1=hc309b26_1
- aws-c-common=0.9.0=hd590300_0
- aws-c-compression=0.2.17=h4d4d85c_2
- aws-c-event-stream=0.3.1=h2e3709c_4
- aws-c-http=0.7.11=h00aa349_4
- aws-c-io=0.13.32=he9a53bd_1
- aws-c-mqtt=0.9.3=hb447be9_1
- aws-c-s3=0.3.14=hf3aad02_1
- aws-c-sdkutils=0.1.12=h4d4d85c_1
- aws-checksums=0.1.17=h4d4d85c_1
- aws-crt-cpp=0.21.0=hb942446_5
- aws-sdk-cpp=1.10.57=h85b1a90_19
- blas=2.120=openblas
- blas-devel=3.9.0=20_linux64_openblas
- brotli-python=1.0.9=py310hd8f1fbe_9
- bzip2=1.0.8=hd590300_5
- c-ares=1.23.0=hd590300_0
- ca-certificates=2023.11.17=hbcca054_0
- certifi=2023.11.17=pyhd8ed1ab_0
- charset-normalizer=3.3.2=pyhd8ed1ab_0
- click=8.1.7=unix_pyh707e725_0
- colorama=0.4.6=pyhd8ed1ab_0
- coloredlogs=15.0.1=pyhd8ed1ab_3
- cuda-cudart=11.7.99=0
- cuda-cupti=11.7.101=0
- cuda-libraries=11.7.1=0
- cuda-nvrtc=11.7.99=0
- cuda-nvtx=11.7.91=0
- cuda-runtime=11.7.1=0
- dataclasses=0.8=pyhc8e2a94_3
- datasets=2.14.7=pyhd8ed1ab_0
- dill=0.3.7=pyhd8ed1ab_0
- filelock=3.13.1=pyhd8ed1ab_0
- flatbuffers=23.5.26=h59595ed_1
- freetype=2.12.1=h267a509_2
- frozenlist=1.4.0=py310h2372a71_1
- fsspec=2023.10.0=pyhca7485f_0
- ftfy=6.1.3=pyhd8ed1ab_0
- gflags=2.2.2=he1b5a44_1004
- glog=0.6.0=h6f12383_0
- glpk=5.0=h445213a_0
- gmp=6.3.0=h59595ed_0
- gmpy2=2.1.2=py310h3ec546c_1
- huggingface_hub=0.17.3=pyhd8ed1ab_0
- humanfriendly=10.0=pyhd8ed1ab_6
- icu=73.2=h59595ed_0
- idna=3.6=pyhd8ed1ab_0
- importlib-metadata=7.0.0=pyha770c72_0
- importlib_metadata=7.0.0=hd8ed1ab_0
- joblib=1.3.2=pyhd8ed1ab_0
- keyutils=1.6.1=h166bdaf_0
- krb5=1.21.2=h659d440_0
- lcms2=2.15=h7f713cb_2
- ld_impl_linux-64=2.40=h41732ed_0
- lerc=4.0.0=h27087fc_0
- libabseil=20230125.3=cxx17_h59595ed_0
- libarrow=12.0.1=hb87d912_8_cpu
- libblas=3.9.0=20_linux64_openblas
- libbrotlicommon=1.0.9=h166bdaf_9
- libbrotlidec=1.0.9=h166bdaf_9
- libbrotlienc=1.0.9=h166bdaf_9
- libcblas=3.9.0=20_linux64_openblas
- libcrc32c=1.1.2=h9c3ff4c_0
- libcublas=11.10.3.66=0
- libcufft=10.7.2.124=h4fbf590_0
- libcufile=1.8.1.2=0
- libcurand=10.3.4.101=0
- libcurl=8.5.0=hca28451_0
- libcusolver=11.4.0.1=0
- libcusparse=11.7.4.91=0
- libdeflate=1.19=hd590300_0
- libedit=3.1.20191231=he28a2e2_2
- libev=4.33=hd590300_2
- libevent=2.1.12=hf998b51_1
- libffi=3.4.2=h7f98852_5
- libgcc-ng=13.2.0=h807b86a_3
- libgfortran-ng=13.2.0=h69a702a_3
- libgfortran5=13.2.0=ha4646dd_3
- libgoogle-cloud=2.12.0=hac9eb74_1
- libgrpc=1.54.3=hb20ce57_0
- libhwloc=2.9.3=default_h554bfaf_1009
- libiconv=1.17=hd590300_1
- libjpeg-turbo=2.1.5.1=hd590300_1
- liblapack=3.9.0=20_linux64_openblas
- liblapacke=3.9.0=20_linux64_openblas
- libnghttp2=1.58.0=h47da74e_1
- libnpp=11.7.4.75=0
- libnsl=2.0.1=hd590300_0
- libnuma=2.0.16=h0b41bf4_1
- libnvjpeg=11.8.0.2=0
- libopenblas=0.3.25=pthreads_h413a1c8_0
- libpng=1.6.39=h753d276_0
- libprotobuf=3.21.12=hfc55251_2
- libsentencepiece=0.1.99=h180e1df_0
- libsqlite=3.44.2=h2797004_0
- libssh2=1.11.0=h0841786_0
- libstdcxx-ng=13.2.0=h7e041cc_3
- libthrift=0.18.1=h8fd135c_2
- libtiff=4.6.0=h29866fb_1
- libutf8proc=2.8.0=h166bdaf_0
- libuuid=2.38.1=h0b41bf4_0
- libwebp-base=1.3.2=hd590300_0
- libxcb=1.15=h0b41bf4_0
- libxml2=2.11.6=h232c23b_0
- libzlib=1.2.13=hd590300_5
- llvm-openmp=17.0.6=h4dfa4b3_0
- lz4-c=1.9.4=hcb278e6_0
- mkl=2022.2.1=h84fe81f_16997
- mkl-devel=2022.2.1=ha770c72_16998
- mkl-include=2022.2.1=h84fe81f_16997
- mpc=1.3.1=hfe3b2da_0
- mpfr=4.2.1=h9458935_0
- mpmath=1.3.0=pyhd8ed1ab_0
- multidict=6.0.4=py310h2372a71_1
- multiprocess=0.70.15=py310h2372a71_1
- ncurses=6.4=h59595ed_2
- numpy=1.26.2=py310hb13e2d6_0
- onnx=1.14.0=py310ha3deec4_1
- onnx2torch=1.5.13=pyhd8ed1ab_0
- onnxruntime=1.16.3=py310hd4b7fbc_1_cpu
- open-clip-torch=2.23.0=pyhd8ed1ab_1
- openblas=0.3.25=pthreads_h7a3da1a_0
- openjpeg=2.5.0=h488ebb8_3
- openssl=3.2.0=hd590300_1
- orc=1.9.0=h2f23424_1
- packaging=23.2=pyhd8ed1ab_0
- pandas=2.1.4=py310hcc13569_0
- pillow=10.0.1=py310h29da1c1_1
- pip=23.3.1=pyhd8ed1ab_0
- protobuf=4.21.12=py310heca2aa9_0
- pthread-stubs=0.4=h36c2ea0_1001
- pyarrow=12.0.1=py310h0576679_8_cpu
- pyarrow-hotfix=0.6=pyhd8ed1ab_0
- pysocks=1.7.1=pyha2e5f31_6
- python=3.10.13=hd12c33a_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python-flatbuffers=23.5.26=pyhd8ed1ab_0
- python-tzdata=2023.3=pyhd8ed1ab_0
- python-xxhash=3.4.1=py310h2372a71_0
- python_abi=3.10=4_cp310
- pytorch=1.13.1=cpu_py310hd11e9c7_1
- pytorch-cuda=11.7=h778d358_5
- pytorch-mutex=1.0=cuda
- pytz=2023.3.post1=pyhd8ed1ab_0
- pyyaml=6.0.1=py310h2372a71_1
- rdma-core=28.9=h59595ed_1
- re2=2023.03.02=h8c504da_0
- readline=8.2=h8228510_1
- regex=2023.10.3=py310h2372a71_0
- requests=2.31.0=pyhd8ed1ab_0
- s2n=1.3.49=h06160fa_0
- sacremoses=0.0.53=pyhd8ed1ab_0
- safetensors=0.3.3=py310hcb5633a_1
- sentencepiece=0.1.99=hff52083_0
- sentencepiece-python=0.1.99=py310hebdb9f0_0
- sentencepiece-spm=0.1.99=h180e1df_0
- setuptools=68.2.2=pyhd8ed1ab_0
- six=1.16.0=pyh6c4a22f_0
- sleef=3.5.1=h9b69904_2
- snappy=1.1.10=h9fff704_0
- sympy=1.12=pypyh9d50eac_103
- tbb=2021.11.0=h00ab1b0_0
- texttable=1.7.0=pyhd8ed1ab_0
- timm=0.9.12=pyhd8ed1ab_0
- tk=8.6.13=noxft_h4845f30_101
- tokenizers=0.14.1=py310h320607d_2
- torchvision=0.14.1=cpu_py310hd3d2ac3_1
- tqdm=4.66.1=pyhd8ed1ab_0
- transformers=4.35.2=pyhd8ed1ab_0
- typing-extensions=4.9.0=hd8ed1ab_0
- typing_extensions=4.9.0=pyha770c72_0
- tzdata=2023c=h71feb2d_0
- ucx=1.14.1=h64cca9d_5
- urllib3=2.1.0=pyhd8ed1ab_0
- wcwidth=0.2.12=pyhd8ed1ab_0
- wheel=0.42.0=pyhd8ed1ab_0
- xorg-libxau=1.0.11=hd590300_0
- xorg-libxdmcp=1.1.3=h7f98852_0
- xxhash=0.8.2=hd590300_0
- xz=5.2.6=h166bdaf_0
- yaml=0.2.5=h7f98852_2
- yarl=1.9.3=py310h2372a71_0
- zipp=3.17.0=pyhd8ed1ab_0
- zlib=1.2.13=hd590300_5
- zstd=1.5.5=hfc55251_0
- pip:
- git+https://github.com/fyfrey/TinyNeuralNetwork.git

View File

@@ -1,157 +0,0 @@
import logging
import os
import platform
import subprocess
from abc import abstractmethod
import onnx
import open_clip
import torch
from onnx2torch import convert
from onnxruntime.tools.onnx_model_utils import fix_output_shapes, make_input_shape_fixed
from tinynn.converter import TFLiteConverter
class ExportBase(torch.nn.Module):
input_shape: tuple[int, ...]
def __init__(self, device: torch.device, name: str):
super().__init__()
self.device = device
self.name = name
self.optimize = 5
self.nchw_transpose = False
@abstractmethod
def forward(self, input_tensor: torch.Tensor) -> torch.Tensor | tuple[torch.Tensor]:
pass
def dummy_input(self) -> torch.FloatTensor:
return torch.rand((1, 3, 224, 224), device=self.device)
class ArcFace(ExportBase):
input_shape = (1, 3, 112, 112)
def __init__(self, onnx_model_path: str, device: torch.device):
name, _ = os.path.splitext(os.path.basename(onnx_model_path))
super().__init__(device, name)
onnx_model = onnx.load_model(onnx_model_path)
make_input_shape_fixed(onnx_model.graph, onnx_model.graph.input[0].name, self.input_shape)
fix_output_shapes(onnx_model)
self.model = convert(onnx_model).to(device)
if self.device.type == "cuda":
self.model = self.model.half()
def forward(self, input_tensor: torch.Tensor) -> torch.FloatTensor:
embedding: torch.FloatTensor = self.model(
input_tensor.half() if self.device.type == "cuda" else input_tensor
).float()
assert isinstance(embedding, torch.FloatTensor)
return embedding
def dummy_input(self) -> torch.FloatTensor:
return torch.rand(self.input_shape, device=self.device)
class RetinaFace(ExportBase):
input_shape = (1, 3, 640, 640)
def __init__(self, onnx_model_path: str, device: torch.device):
name, _ = os.path.splitext(os.path.basename(onnx_model_path))
super().__init__(device, name)
self.optimize = 3
self.model = convert(onnx_model_path).eval().to(device)
if self.device.type == "cuda":
self.model = self.model.half()
def forward(self, input_tensor: torch.Tensor) -> tuple[torch.FloatTensor]:
out: torch.Tensor = self.model(input_tensor.half() if self.device.type == "cuda" else input_tensor)
return tuple(o.float() for o in out)
def dummy_input(self) -> torch.FloatTensor:
return torch.rand(self.input_shape, device=self.device)
class ClipVision(ExportBase):
input_shape = (1, 3, 224, 224)
def __init__(self, model_name: str, weights: str, device: torch.device):
super().__init__(device, model_name + "__" + weights)
self.model = open_clip.create_model(
model_name,
weights,
precision="fp16" if device.type == "cuda" else "fp32",
jit=False,
require_pretrained=True,
device=device,
)
def forward(self, input_tensor: torch.Tensor) -> torch.FloatTensor:
embedding: torch.Tensor = self.model.encode_image(
input_tensor.half() if self.device.type == "cuda" else input_tensor,
normalize=True,
).float()
return embedding
def export(model: ExportBase) -> None:
model.eval()
for param in model.parameters():
param.requires_grad = False
dummy_input = model.dummy_input()
model(dummy_input)
jit = torch.jit.trace(model, dummy_input) # type: ignore[no-untyped-call,attr-defined]
tflite_model_path = f"output/{model.name}.tflite"
os.makedirs("output", exist_ok=True)
converter = TFLiteConverter(
jit,
dummy_input,
tflite_model_path,
optimize=model.optimize,
nchw_transpose=model.nchw_transpose,
)
# segfaults on ARM, must run on x86_64 / AMD64
converter.convert()
armnn_model_path = f"output/{model.name}.armnn"
os.environ["LD_LIBRARY_PATH"] = "armnn"
subprocess.run(
[
"./armnnconverter",
"-f",
"tflite-binary",
"-m",
tflite_model_path,
"-i",
"input_tensor",
"-o",
"output_tensor",
"-p",
armnn_model_path,
]
)
def main() -> None:
if platform.machine() not in ("x86_64", "AMD64"):
raise RuntimeError(f"Can only run on x86_64 / AMD64, not {platform.machine()}")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if device.type != "cuda":
logging.warning(
"No CUDA available, cannot create fp16 model! proceeding to create a fp32 model (use only for testing)"
)
models = [
ClipVision("ViT-B-32", "openai", device),
ArcFace("buffalo_l_rec.onnx", device),
RetinaFace("buffalo_l_det.onnx", device),
]
for model in models:
export(model)
if __name__ == "__main__":
with torch.no_grad():
main()

View File

@@ -30,8 +30,6 @@ class Settings(BaseSettings):
model_inter_op_threads: int = 0
model_intra_op_threads: int = 0
ann: bool = True
ann_fp16_turbo: bool = False
ann_tuning_level: int = 2
preload: PreloadModelData | None = None
class Config:

View File

@@ -2,64 +2,53 @@ from app.config import clean_name
from app.schemas import ModelSource
_OPENCLIP_MODELS = {
"RN101__openai",
"RN101__yfcc15m",
"RN50__cc12m",
"RN50__openai",
"RN50__yfcc15m",
"RN50x16__openai",
"RN50__cc12m",
"RN101__openai",
"RN101__yfcc15m",
"RN50x4__openai",
"RN50x16__openai",
"RN50x64__openai",
"ViT-B-16-SigLIP-256__webli",
"ViT-B-16-SigLIP-384__webli",
"ViT-B-16-SigLIP-512__webli",
"ViT-B-16-SigLIP-i18n-256__webli",
"ViT-B-16-SigLIP__webli",
"ViT-B-16-plus-240__laion400m_e31",
"ViT-B-16-plus-240__laion400m_e32",
"ViT-B-16__laion400m_e31",
"ViT-B-16__laion400m_e32",
"ViT-B-16__openai",
"ViT-B-32__laion2b-s34b-b79k",
"ViT-B-32__openai",
"ViT-B-32__laion2b_e16",
"ViT-B-32__laion400m_e31",
"ViT-B-32__laion400m_e32",
"ViT-B-32__openai",
"ViT-H-14-378-quickgelu__dfn5b",
"ViT-H-14-quickgelu__dfn5b",
"ViT-H-14__laion2b-s32b-b79k",
"ViT-L-14-336__openai",
"ViT-L-14-quickgelu__dfn2b",
"ViT-L-14__laion2b-s32b-b82k",
"ViT-B-32__laion2b-s34b-b79k",
"ViT-B-16__openai",
"ViT-B-16__laion400m_e31",
"ViT-B-16__laion400m_e32",
"ViT-B-16-plus-240__laion400m_e31",
"ViT-B-16-plus-240__laion400m_e32",
"ViT-L-14__openai",
"ViT-L-14__laion400m_e31",
"ViT-L-14__laion400m_e32",
"ViT-L-14__openai",
"ViT-L-16-SigLIP-256__webli",
"ViT-L-16-SigLIP-384__webli",
"ViT-SO400M-14-SigLIP-384__webli",
"ViT-L-14__laion2b-s32b-b82k",
"ViT-L-14-336__openai",
"ViT-H-14__laion2b-s32b-b79k",
"ViT-g-14__laion2b-s12b-b42k",
"XLM-Roberta-Base-ViT-B-32__laion5b_s13b_b90k",
"ViT-L-14-quickgelu__dfn2b",
"ViT-H-14-quickgelu__dfn5b",
"ViT-H-14-378-quickgelu__dfn5b",
"XLM-Roberta-Large-ViT-H-14__frozen_laion5b_s13b_b90k",
"nllb-clip-base-siglip__mrl",
"nllb-clip-base-siglip__v1",
"nllb-clip-large-siglip__mrl",
"nllb-clip-large-siglip__v1",
}
_MCLIP_MODELS = {
"LABSE-Vit-L-14",
"XLM-Roberta-Large-Vit-B-16Plus",
"XLM-Roberta-Large-Vit-B-32",
"XLM-Roberta-Large-Vit-B-16Plus",
"XLM-Roberta-Large-Vit-L-14",
}
_INSIGHTFACE_MODELS = {
"antelopev2",
"buffalo_s",
"buffalo_m",
"buffalo_l",
"buffalo_m",
"buffalo_s",
}

View File

@@ -47,8 +47,8 @@ def pil_to_cv2(image: Image.Image) -> NDArray[np.uint8]:
def decode_pil(image_bytes: bytes | IO[bytes] | Image.Image) -> Image.Image:
if isinstance(image_bytes, Image.Image):
return image_bytes
image: Image.Image = Image.open(BytesIO(image_bytes) if isinstance(image_bytes, bytes) else image_bytes)
image.load()
image = Image.open(BytesIO(image_bytes) if isinstance(image_bytes, bytes) else image_bytes)
image.load() # type: ignore
if not image.mode == "RGB":
image = image.convert("RGB")
return image

View File

@@ -20,13 +20,12 @@ class AnnSession:
def __init__(self, model_path: Path, cache_dir: Path = settings.cache_folder) -> None:
self.model_path = model_path
self.cache_dir = cache_dir
self.ann = Ann(tuning_level=settings.ann_tuning_level, tuning_file=(cache_dir / "gpu-tuning.ann").as_posix())
self.ann = Ann(tuning_level=3, tuning_file=(cache_dir / "gpu-tuning.ann").as_posix())
log.info("Loading ANN model %s ...", model_path)
self.model = self.ann.load(
model_path.as_posix(),
cached_network_path=model_path.with_suffix(".anncache").as_posix(),
fp16=settings.ann_fp16_turbo,
)
log.info("Loaded ANN model with ID %d", self.model)

View File

@@ -83,21 +83,17 @@ class OrtSession:
@property
def _provider_options_default(self) -> list[dict[str, Any]]:
provider_options = []
options = []
for provider in self.providers:
match provider:
case "CPUExecutionProvider" | "CUDAExecutionProvider":
options = {"arena_extend_strategy": "kSameAsRequested"}
option = {"arena_extend_strategy": "kSameAsRequested"}
case "OpenVINOExecutionProvider":
options = {
"device_type": "GPU",
"precision": "FP32",
"cache_dir": (self.model_path.parent / "openvino").as_posix(),
}
option = {"device_type": "GPU_FP32", "cache_dir": (self.model_path.parent / "openvino").as_posix()}
case _:
options = {}
provider_options.append(options)
return provider_options
option = {}
options.append(option)
return options
@property
def sess_options(self) -> ort.SessionOptions:

View File

@@ -212,7 +212,7 @@ class TestOrtSession:
session = OrtSession(model_path, providers=["OpenVINOExecutionProvider", "CPUExecutionProvider"])
assert session.provider_options == [
{"device_type": "GPU", "precision": "FP32", "cache_dir": "/cache/ViT-B-32__openai/openvino"},
{"device_type": "GPU_FP32", "cache_dir": "/cache/ViT-B-32__openai/openvino"},
{"arena_extend_strategy": "kSameAsRequested"},
]
@@ -268,9 +268,9 @@ class TestAnnSession:
AnnSession(model_path, cache_dir)
ann_session.assert_called_once_with(tuning_level=2, tuning_file=(cache_dir / "gpu-tuning.ann").as_posix())
ann_session.assert_called_once_with(tuning_level=3, tuning_file=(cache_dir / "gpu-tuning.ann").as_posix())
ann_session.return_value.load.assert_called_once_with(
model_path.as_posix(), cached_network_path=model_path.with_suffix(".anncache").as_posix(), fp16=False
model_path.as_posix(), cached_network_path=model_path.with_suffix(".anncache").as_posix()
)
info.assert_has_calls(
[

View File

@@ -0,0 +1,35 @@
FROM mambaorg/micromamba:bookworm-slim@sha256:333f7598ff2c2400fb10bfe057709c68b7daab5d847143af85abcf224a07271a as builder
USER root
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
cmake \
curl \
git
USER $MAMBA_USER
WORKDIR /home/mambauser
ENV ARMNN_PATH=armnn
COPY --chown=$MAMBA_USER:$MAMBA_USER scripts/* .
RUN ./download-armnn.sh && \
./build-converter.sh && \
./build.sh
COPY --chown=$MAMBA_USER:$MAMBA_USER conda-lock.yml .
RUN micromamba create -y -p /home/mambauser/venv -f conda-lock.yml && \
micromamba clean --all --yes
ENV PATH="/home/mambauser/venv/bin:${PATH}"
FROM gcr.io/distroless/base-debian12
# FROM mambaorg/micromamba:bookworm-slim@sha256:333f7598ff2c2400fb10bfe057709c68b7daab5d847143af85abcf224a07271a
WORKDIR /export/ann
ENV PYTHONDONTWRITEBYTECODE=1 \
LD_LIBRARY_PATH=/export/ann/armnn \
PATH="/opt/venv/bin:${PATH}"
COPY --from=builder /home/mambauser/armnnconverter /home/mambauser/armnn ./
COPY --from=builder /home/mambauser/venv /opt/venv
COPY --chown=$MAMBA_USER:$MAMBA_USER onnx2ann onnx2ann
ENTRYPOINT ["python", "-m", "onnx2ann"]

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
name: onnx2ann
channels:
- conda-forge
dependencies:
- python>=3.11,<4.0
- onnx>=1.16.1
# - onnxruntime>=1.18.1 # conda only has gpu version
- psutil>=6.0.0
- flatbuffers>=24.3.25
- ml_dtypes>=0.3.1
- typer-slim>=0.12.3
- huggingface_hub>=0.23.4
- pip
- pip:
- onnxruntime>=1.18.1 # conda only has gpu version
- onnxsim>=0.4.36
- onnx2tf>=1.24.1
- onnx_graphsurgeon>=0.5.2
- simple_onnx_processing_tools>=1.1.32
- tf_keras>=2.16.0
- git+https://github.com/microsoft/onnxconverter-common.git

View File

@@ -0,0 +1,99 @@
import os
import platform
from typing import Annotated, Optional
import typer
from onnx2ann.export import Exporter, ModelType, Precision
app = typer.Typer(add_completion=False, pretty_exceptions_show_locals=False)
@app.command()
def export(
model_name: Annotated[
str, typer.Argument(..., help="The name of the model to be exported as it exists in Hugging Face.")
],
model_type: Annotated[ModelType, typer.Option(..., "--type", "-t", help="The type of model to be exported.")],
input_shapes: Annotated[
list[str],
typer.Option(
...,
"--input-shape",
"-s",
help="The shape of an input tensor to the model, each dimension separated by commas. "
"Multiple shapes can be provided for multiple inputs.",
),
],
precision: Annotated[
Precision,
typer.Option(
...,
"--precision",
"-p",
help="The precision of the exported model. `float16` requires a GPU.",
),
] = Precision.FLOAT32,
cache_dir: Annotated[
str,
typer.Option(
...,
"--cache-dir",
"-c",
help="Directory where pre-export models will be stored.",
envvar="CACHE_DIR",
show_envvar=True,
),
] = "~/.cache/huggingface",
output_dir: Annotated[
str,
typer.Option(
...,
"--output-dir",
"-o",
help="Directory where exported models will be stored.",
),
] = "output",
auth_token: Annotated[
Optional[str],
typer.Option(
...,
"--auth-token",
"-t",
help="If uploading models to Hugging Face, the auth token of the user or organisation.",
envvar="HF_AUTH_TOKEN",
show_envvar=True,
),
] = None,
force_export: Annotated[
bool,
typer.Option(
...,
"--force-export",
"-f",
help="Export the model even if an exported model already exists in the output directory.",
),
] = False,
) -> None:
if platform.machine() not in ("x86_64", "AMD64"):
msg = f"Can only run on x86_64 / AMD64, not {platform.machine()}"
raise RuntimeError(msg)
os.environ.setdefault("LD_LIBRARY_PATH", "armnn")
parsed_input_shapes = [tuple(map(int, shape.split(","))) for shape in input_shapes]
model = Exporter(
model_name, model_type, input_shapes=parsed_input_shapes, cache_dir=cache_dir, force_export=force_export
)
model_dir = os.path.join("output", model_name)
output_dir = os.path.join(model_dir, model_type)
armnn_model = model.to_armnn(output_dir, precision)
if not auth_token:
return
from huggingface_hub import upload_file
relative_path = os.path.relpath(armnn_model, start=model_dir)
upload_file(path_or_fileobj=armnn_model, path_in_repo=relative_path, repo_id=model.repo_name, token=auth_token)
app()

View File

@@ -0,0 +1,129 @@
import os
import subprocess
from enum import StrEnum
from onnx2ann.helpers import onnx_make_armnn_compatible, onnx_make_inputs_fixed
class ModelType(StrEnum):
VISUAL = "visual"
TEXTUAL = "textual"
RECOGNITION = "recognition"
DETECTION = "detection"
class Precision(StrEnum):
FLOAT16 = "float16"
FLOAT32 = "float32"
class Exporter:
def __init__(
self,
model_name: str,
model_type: str,
input_shapes: list[tuple[int, ...]],
optimization_level: int = 5,
cache_dir: str = os.environ.get("CACHE_DIR", "~/.cache/huggingface"),
force_export: bool = False,
):
self.model_name = model_name.split("/")[-1]
self.model_type = model_type
self.optimize = optimization_level
self.input_shapes = input_shapes
self.cache_dir = os.path.join(cache_dir, self.repo_name)
self.force_export = force_export
def download(self) -> str:
model_path = os.path.join(self.cache_dir, self.model_type, "model.onnx")
if os.path.isfile(model_path):
print(f"Model is already downloaded at {model_path}")
return model_path
from huggingface_hub import snapshot_download
snapshot_download(
self.repo_name, cache_dir=self.cache_dir, local_dir=self.cache_dir, local_dir_use_symlinks=False
)
return model_path
def to_onnx_static(self, precision: Precision) -> str:
import onnx
from onnxconverter_common import float16
onnx_path_original = self.download()
static_dir = os.path.join(self.cache_dir, self.model_type, "static")
static_path = os.path.join(static_dir, f"model.onnx")
if self.force_export and not os.path.isfile(static_path):
print(f"Making {self} static")
os.makedirs(static_dir, exist_ok=True)
onnx_make_inputs_fixed(onnx_path_original, static_path, self.input_shapes)
onnx_make_armnn_compatible(static_path)
print(f"Finished making {self} static")
model = onnx.load(static_path)
self.inputs = [input_.name for input_ in model.graph.input]
self.outputs = [output_.name for output_ in model.graph.output]
if precision == Precision.FLOAT16:
static_path = os.path.join(static_dir, f"model_{precision}.onnx")
print(f"Converting {self} to {precision} precision")
model = float16.convert_float_to_float16(model, keep_io_types=True, disable_shape_infer=True)
onnx.save(model, static_path)
print(f"Finished converting {self} to {precision} precision")
# self.inputs, self.outputs = onnx_get_inputs_outputs(static_path)
return static_path
def to_tflite(self, output_dir: str, precision: Precision) -> str:
onnx_model = self.to_onnx_static(precision)
tflite_dir = os.path.join(output_dir, precision)
tflite_model = os.path.join(tflite_dir, f"model_{precision}.tflite")
if self.force_export or not os.path.isfile(tflite_model):
import onnx2tf
print(f"Exporting {self} to TFLite with {precision} precision (this might take a few minutes)")
onnx2tf.convert(
input_onnx_file_path=onnx_model,
output_folder_path=tflite_dir,
keep_shape_absolutely_input_names=self.inputs,
# verbosity="warn",
copy_onnx_input_output_names_to_tflite=True,
output_signaturedefs=True,
not_use_onnxsim=True,
)
print(f"Finished exporting {self} to TFLite with {precision} precision")
return tflite_model
def to_armnn(self, output_dir: str, precision: Precision) -> tuple[str, str]:
armnn_model = os.path.join(output_dir, "model.armnn")
if not self.force_export and os.path.isfile(armnn_model):
return armnn_model
tflite_model_dir = os.path.join(output_dir, "tflite")
tflite_model = self.to_tflite(tflite_model_dir, precision)
args = ["./armnnconverter", "-f", "tflite-binary", "-m", tflite_model, "-p", armnn_model]
args.append("-i")
args.extend(self.inputs)
args.append("-o")
args.extend(self.outputs)
print(f"Exporting {self} to ARM NN with {precision} precision")
try:
if (stdout := subprocess.check_output(args, stderr=subprocess.STDOUT).decode()):
print(stdout)
print(f"Finished exporting {self} to ARM NN with {precision} precision")
except subprocess.CalledProcessError as e:
print(e.output.decode())
try:
from shutil import rmtree
rmtree(tflite_model_dir, ignore_errors=True)
finally:
raise e
@property
def repo_name(self) -> str:
return f"immich-app/{self.model_name}"
def __repr__(self) -> str:
return f"{self.model_name} ({self.model_type})"

View File

@@ -0,0 +1,260 @@
from typing import Any
def onnx_make_armnn_compatible(model_path: str) -> None:
"""
i can explain
armnn only supports up to 4d tranposes, but the model has a 5d transpose due to a redundant unsqueeze
this function folds the unsqueeze+transpose+squeeze into a single 4d transpose
it also switches from gather ops to slices since armnn has different dimension semantics for gathers
also fixes batch normalization being in training mode
"""
import numpy as np
import onnx
from onnx_graphsurgeon import Constant, Node, Variable, export_onnx, import_onnx
proto = onnx.load(model_path)
graph = import_onnx(proto)
gather_idx = 1
squeeze_idx = 1
for node in graph.nodes:
for link1 in node.outputs:
if "Unsqueeze" in link1.name:
for node1 in link1.outputs:
for link2 in node1.outputs:
if "Transpose" in link2.name:
for node2 in link2.outputs:
if node2.attrs.get("perm") == [3, 1, 2, 0, 4]:
node2.attrs["perm"] = [2, 0, 1, 3]
link2.shape = link1.shape
for link3 in node2.outputs:
if "Squeeze" in link3.name:
link3.shape = [link3.shape[x] for x in [0, 1, 2, 4]]
for node3 in link3.outputs:
for link4 in node3.outputs:
link4.shape = link3.shape
try:
idx = link2.inputs.index(node1)
link2.inputs[idx] = node
except ValueError:
pass
node.outputs = [link2]
if "Gather" in link4.name:
for node4 in link4.outputs:
axis = node1.attrs.get("axis", 0)
index = node4.inputs[1].values
slice_link = Variable(
f"onnx::Slice_123{gather_idx}",
dtype=link4.dtype,
shape=[1] + link3.shape[1:],
)
slice_node = Node(
op="Slice",
inputs=[
link3,
Constant(
f"SliceStart_123{gather_idx}",
np.array([index]),
),
Constant(
f"SliceEnd_123{gather_idx}",
np.array([index + 1]),
),
Constant(
f"SliceAxis_123{gather_idx}",
np.array([axis]),
),
],
outputs=[slice_link],
name=f"Slice_123{gather_idx}",
)
graph.nodes.append(slice_node)
gather_idx += 1
for link5 in node4.outputs:
for node5 in link5.outputs:
try:
idx = node5.inputs.index(link5)
node5.inputs[idx] = slice_link
except ValueError:
pass
elif node.op == "LayerNormalization":
for node1 in link1.outputs:
if node1.op == "Gather":
for link2 in node1.outputs:
for node2 in link2.outputs:
axis = node1.attrs.get("axis", 0)
index = node1.inputs[1].values
slice_link = Variable(
f"onnx::Slice_123{gather_idx}",
dtype=link2.dtype,
shape=[1, *link2.shape],
)
slice_node = Node(
op="Slice",
inputs=[
node1.inputs[0],
Constant(
f"SliceStart_123{gather_idx}",
np.array([index]),
),
Constant(
f"SliceEnd_123{gather_idx}",
np.array([index + 1]),
),
Constant(
f"SliceAxis_123{gather_idx}",
np.array([axis]),
),
],
outputs=[slice_link],
name=f"Slice_123{gather_idx}",
)
graph.nodes.append(slice_node)
gather_idx += 1
squeeze_link = Variable(
f"onnx::Squeeze_123{squeeze_idx}",
dtype=link2.dtype,
shape=link2.shape,
)
squeeze_node = Node(
op="Squeeze",
inputs=[
slice_link,
Constant(
f"SqueezeAxis_123{squeeze_idx}",
np.array([0]),
),
],
outputs=[squeeze_link],
name=f"Squeeze_123{squeeze_idx}",
)
graph.nodes.append(squeeze_node)
squeeze_idx += 1
try:
idx = node2.inputs.index(link2)
node2.inputs[idx] = squeeze_link
except ValueError:
pass
elif node.op == "Reshape":
for node1 in link1.outputs:
if node1.op == "Gather":
node2s = [n for link in node1.outputs for n in link.outputs]
if any(n.op == "Abs" for n in node2s):
axis = node1.attrs.get("axis", 0)
index = node1.inputs[1].values
slice_link = Variable(
f"onnx::Slice_123{gather_idx}",
dtype=node1.outputs[0].dtype,
shape=[1, *node1.outputs[0].shape],
)
slice_node = Node(
op="Slice",
inputs=[
node1.inputs[0],
Constant(
f"SliceStart_123{gather_idx}",
np.array([index]),
),
Constant(
f"SliceEnd_123{gather_idx}",
np.array([index + 1]),
),
Constant(
f"SliceAxis_123{gather_idx}",
np.array([axis]),
),
],
outputs=[slice_link],
name=f"Slice_123{gather_idx}",
)
graph.nodes.append(slice_node)
gather_idx += 1
squeeze_link = Variable(
f"onnx::Squeeze_123{squeeze_idx}",
dtype=node1.outputs[0].dtype,
shape=node1.outputs[0].shape,
)
squeeze_node = Node(
op="Squeeze",
inputs=[
slice_link,
Constant(
f"SqueezeAxis_123{squeeze_idx}",
np.array([0]),
),
],
outputs=[squeeze_link],
name=f"Squeeze_123{squeeze_idx}",
)
graph.nodes.append(squeeze_node)
squeeze_idx += 1
for node2 in node2s:
node2.inputs[0] = squeeze_link
elif node.op == "BatchNormalization" and node.attrs.get("training_mode") == 1:
node.attrs["training_mode"] = 0
node.outputs = node.outputs[:1]
graph.cleanup(remove_unused_node_outputs=True, recurse_subgraphs=True, recurse_functions=True)
graph.toposort()
graph.fold_constants()
updated = export_onnx(graph)
onnx_save(updated, model_path)
# for some reason, reloading the model is necessary to apply the correct shape
proto = onnx.load(model_path)
graph = import_onnx(proto)
for node in graph.nodes:
if node.op == "Slice":
for link in node.outputs:
if "Slice_123" in link.name and link.shape[0] == 3: # noqa: PLR2004
link.shape[0] = 1
graph.cleanup(remove_unused_node_outputs=True, recurse_subgraphs=True, recurse_functions=True)
graph.toposort()
graph.fold_constants()
updated = export_onnx(graph)
onnx_save(updated, model_path)
onnx.shape_inference.infer_shapes_path(model_path, check_type=True, strict_mode=True, data_prop=True)
def onnx_make_inputs_fixed(input_path: str, output_path: str, input_shapes: list[tuple[int, ...]]) -> None:
import onnx
import onnxsim
from onnxruntime.tools.onnx_model_utils import fix_output_shapes, make_input_shape_fixed
model, success = onnxsim.simplify(input_path)
if not success:
msg = f"Failed to simplify {input_path}"
raise RuntimeError(msg)
onnx_save(model, output_path)
onnx.shape_inference.infer_shapes_path(output_path, check_type=True, strict_mode=True, data_prop=True)
model = onnx.load_model(output_path)
for input_node, shape in zip(model.graph.input, input_shapes, strict=False):
make_input_shape_fixed(model.graph, input_node.name, shape)
fix_output_shapes(model)
onnx_save(model, output_path)
onnx.shape_inference.infer_shapes_path(output_path, check_type=True, strict_mode=True, data_prop=True)
def onnx_get_inputs_outputs(model_path: str) -> tuple[list[str], list[str]]:
import onnx
model = onnx.load(model_path)
inputs = [input_.name for input_ in model.graph.input]
outputs = [output_.name for output_ in model.graph.output]
return inputs, outputs
def onnx_save(model: Any, output_path: str) -> None:
import onnx
try:
onnx.save(model, output_path)
except:
onnx.save(model, output_path, save_as_external_data=True, all_tensors_to_one_file=False, size_threshold=1_000_000)

View File

@@ -0,0 +1,56 @@
[project]
name = "onnx2ann"
version = "1.107.2"
dependencies = [
"onnx>=1.16.1",
"psutil>=6.0.0",
"flatbuffers>=24.3.25",
"ml_dtypes>=0.3.1,<1.0.0",
"typer-slim>=0.12.3,<1.0.0",
"huggingface_hub>=0.23.4,<1.0.0",
"onnxruntime>=1.18.1",
"onnxsim>=0.4.36,<1.0.0",
"onnx2tf>=1.24.0",
"onnx_graphsurgeon>=0.5.2,<1.0.0",
"simple_onnx_processing_tools>=1.1.32",
"tf_keras>=2.16.0",
"onnxconverter-common @ git+https://github.com/microsoft/onnxconverter-common"
]
requires-python = ">=3.11"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.sdist]
only-include = ["onnx2ann"]
[tool.hatch.metadata]
allow-direct-references = true
[tool.mypy]
python_version = "3.12"
follow_imports = "silent"
warn_redundant_casts = true
disallow_any_generics = true
check_untyped_defs = true
disallow_untyped_defs = true
ignore_missing_imports = true
[tool.pydantic-mypy]
init_forbid_extra = true
init_typed = true
warn_required_dynamic_aliases = true
warn_untyped_fields = true
[tool.ruff]
line-length = 120
target-version = "py312"
[tool.ruff.lint]
extend-select = ["E", "F", "I"]
extend-ignore = ["FBT001", "FBT002"]
[tool.black]
line-length = 120
target-version = ['py312']

View File

@@ -0,0 +1,281 @@
#include <fstream>
#include <mutex>
#include <atomic>
#include "armnn/IRuntime.hpp"
#include "armnn/INetwork.hpp"
#include "armnn/Types.hpp"
#include "armnnDeserializer/IDeserializer.hpp"
#include "armnnTfLiteParser/ITfLiteParser.hpp"
#include "armnnOnnxParser/IOnnxParser.hpp"
using namespace armnn;
struct IOInfos
{
std::vector<BindingPointInfo> inputInfos;
std::vector<BindingPointInfo> outputInfos;
};
// from https://rigtorp.se/spinlock/
struct SpinLock
{
std::atomic<bool> lock_ = {false};
void lock()
{
for (;;)
{
if (!lock_.exchange(true, std::memory_order_acquire))
{
break;
}
while (lock_.load(std::memory_order_relaxed))
;
}
}
void unlock() { lock_.store(false, std::memory_order_release); }
};
class Ann
{
public:
int load(const char *modelPath,
bool fastMath,
bool fp16,
bool saveCachedNetwork,
const char *cachedNetworkPath)
{
INetworkPtr network = loadModel(modelPath);
IOptimizedNetworkPtr optNet = OptimizeNetwork(network.get(), fastMath, fp16, saveCachedNetwork, cachedNetworkPath);
const IOInfos infos = getIOInfos(optNet.get());
NetworkId netId;
mutex.lock();
Status status = runtime->LoadNetwork(netId, std::move(optNet));
mutex.unlock();
if (status != Status::Success)
{
return -1;
}
spinLock.lock();
ioInfos[netId] = infos;
mutexes.emplace(netId, std::make_unique<std::mutex>());
spinLock.unlock();
return netId;
}
void execute(NetworkId netId, const void **inputData, void **outputData)
{
spinLock.lock();
const IOInfos *infos = &ioInfos[netId];
auto m = mutexes[netId].get();
spinLock.unlock();
InputTensors inputTensors;
inputTensors.reserve(infos->inputInfos.size());
size_t i = 0;
for (const BindingPointInfo &info : infos->inputInfos)
inputTensors.emplace_back(info.first, ConstTensor(info.second, inputData[i++]));
OutputTensors outputTensors;
outputTensors.reserve(infos->outputInfos.size());
i = 0;
for (const BindingPointInfo &info : infos->outputInfos)
outputTensors.emplace_back(info.first, Tensor(info.second, outputData[i++]));
m->lock();
runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
m->unlock();
}
void unload(NetworkId netId)
{
mutex.lock();
runtime->UnloadNetwork(netId);
mutex.unlock();
}
int tensors(NetworkId netId, bool isInput = false)
{
spinLock.lock();
const IOInfos *infos = &ioInfos[netId];
spinLock.unlock();
return (int)(isInput ? infos->inputInfos.size() : infos->outputInfos.size());
}
unsigned long shape(NetworkId netId, bool isInput = false, int index = 0)
{
spinLock.lock();
const IOInfos *infos = &ioInfos[netId];
spinLock.unlock();
const TensorShape shape = (isInput ? infos->inputInfos : infos->outputInfos)[index].second.GetShape();
unsigned long s = 0;
for (unsigned int d = 0; d < shape.GetNumDimensions(); d++)
s |= ((unsigned long)shape[d]) << (d * 16); // stores up to 4 16-bit values in a 64-bit value
return s;
}
Ann(int tuningLevel, const char *tuningFile)
{
IRuntime::CreationOptions runtimeOptions;
BackendOptions backendOptions{"GpuAcc",
{
{"TuningLevel", tuningLevel},
{"MemoryOptimizerStrategy", "ConstantMemoryStrategy"}, // SingleAxisPriorityList or ConstantMemoryStrategy
}};
if (tuningFile)
backendOptions.AddOption({"TuningFile", tuningFile});
runtimeOptions.m_BackendOptions.emplace_back(backendOptions);
runtime = IRuntime::CreateRaw(runtimeOptions);
};
~Ann()
{
IRuntime::Destroy(runtime);
};
private:
INetworkPtr loadModel(const char *modelPath)
{
const auto path = std::string(modelPath);
if (path.rfind(".tflite") == path.length() - 7) // endsWith()
{
auto parser = armnnTfLiteParser::ITfLiteParser::CreateRaw();
return parser->CreateNetworkFromBinaryFile(modelPath);
}
else if (path.rfind(".onnx") == path.length() - 5) // endsWith()
{
auto parser = armnnOnnxParser::IOnnxParser::CreateRaw();
return parser->CreateNetworkFromBinaryFile(modelPath);
}
else
{
std::ifstream ifs(path, std::ifstream::in | std::ifstream::binary);
auto parser = armnnDeserializer::IDeserializer::CreateRaw();
return parser->CreateNetworkFromBinary(ifs);
}
}
static BindingPointInfo getInputTensorInfo(LayerBindingId inputBindingId, TensorInfo info)
{
const auto newInfo = TensorInfo{info.GetShape(), info.GetDataType(),
info.GetQuantizationScale(),
info.GetQuantizationOffset(),
true};
return {inputBindingId, newInfo};
}
IOptimizedNetworkPtr OptimizeNetwork(INetwork *network, bool fastMath, bool fp16, bool saveCachedNetwork, const char *cachedNetworkPath)
{
const bool allowExpandedDims = false;
const ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly;
OptimizerOptionsOpaque options;
options.SetReduceFp32ToFp16(fp16);
options.SetShapeInferenceMethod(shapeInferenceMethod);
options.SetAllowExpandedDims(allowExpandedDims);
BackendOptions gpuAcc("GpuAcc", {{"FastMathEnabled", fastMath}});
if (cachedNetworkPath)
{
gpuAcc.AddOption({"SaveCachedNetwork", saveCachedNetwork});
gpuAcc.AddOption({"CachedNetworkFilePath", cachedNetworkPath});
}
options.AddModelOption(gpuAcc);
// No point in using ARMNN for CPU, use ONNX (quantized) instead.
// BackendOptions cpuAcc("CpuAcc",
// {
// {"FastMathEnabled", fastMath},
// {"NumberOfThreads", 0},
// });
// options.AddModelOption(cpuAcc);
BackendOptions allowExDimOpt("AllowExpandedDims",
{{"AllowExpandedDims", allowExpandedDims}});
options.AddModelOption(allowExDimOpt);
BackendOptions shapeInferOpt("ShapeInferenceMethod",
{{"InferAndValidate", shapeInferenceMethod == ShapeInferenceMethod::InferAndValidate}});
options.AddModelOption(shapeInferOpt);
std::vector<BackendId> backends = {
BackendId("GpuAcc"),
// BackendId("CpuAcc"),
// BackendId("CpuRef"),
};
return Optimize(*network, backends, runtime->GetDeviceSpec(), options);
}
IOInfos getIOInfos(IOptimizedNetwork *optNet)
{
struct InfoStrategy : IStrategy
{
void ExecuteStrategy(const IConnectableLayer *layer,
const BaseDescriptor &descriptor,
const std::vector<ConstTensor> &constants,
const char *name,
const LayerBindingId id = 0) override
{
IgnoreUnused(descriptor, constants, id);
const LayerType lt = layer->GetType();
if (lt == LayerType::Input)
ioInfos.inputInfos.push_back(getInputTensorInfo(id, layer->GetOutputSlot(0).GetTensorInfo()));
else if (lt == LayerType::Output)
ioInfos.outputInfos.push_back({id, layer->GetInputSlot(0).GetTensorInfo()});
}
IOInfos ioInfos;
};
InfoStrategy infoStrategy;
optNet->ExecuteStrategy(infoStrategy);
return infoStrategy.ioInfos;
}
IRuntime *runtime;
std::map<NetworkId, IOInfos> ioInfos;
std::map<NetworkId, std::unique_ptr<std::mutex>> mutexes; // mutex per network to not execute the same the same network concurrently
std::mutex mutex; // global mutex for load/unload calls to the runtime
SpinLock spinLock; // fast spin lock to guard access to the ioInfos and mutexes maps
};
extern "C" void *init(int logLevel, int tuningLevel, const char *tuningFile)
{
LogSeverity level = static_cast<LogSeverity>(logLevel);
ConfigureLogging(true, true, level);
Ann *ann = new Ann(tuningLevel, tuningFile);
return ann;
}
extern "C" void destroy(void *ann)
{
delete ((Ann *)ann);
}
extern "C" int load(void *ann,
const char *path,
bool fastMath,
bool fp16,
bool saveCachedNetwork,
const char *cachedNetworkPath)
{
return ((Ann *)ann)->load(path, fastMath, fp16, saveCachedNetwork, cachedNetworkPath);
}
extern "C" void unload(void *ann, NetworkId netId)
{
((Ann *)ann)->unload(netId);
}
extern "C" void execute(void *ann, NetworkId netId, const void **inputData, void **outputData)
{
((Ann *)ann)->execute(netId, inputData, outputData);
}
extern "C" unsigned long shape(void *ann, NetworkId netId, bool isInput, int index)
{
return ((Ann *)ann)->shape(netId, isInput, index);
}
extern "C" int tensors(void *ann, NetworkId netId, bool isInput)
{
return ((Ann *)ann)->tensors(netId, isInput);
}

View File

@@ -0,0 +1,4 @@
#!/usr/bin/env sh
cd armnn-23.11/ || exit
g++ -o ../armnnconverter -fPIC -O1 -DARMNN_ONNX_PARSER -DARMNN_SERIALIZER -DARMNN_TF_LITE_PARSER -fuse-ld=gold -std=c++17 -Iinclude -Isrc/armnnUtils -Ithird-party -larmnn -larmnnDeserializer -larmnnTfLiteParser -larmnnOnnxParser -larmnnSerializer -L../armnn src/armnnConverter/ArmnnConverter.cpp

View File

@@ -0,0 +1,3 @@
#!/usr/bin/env sh
g++ -shared -O3 -fPIC -o libann.so -fuse-ld=gold -std=c++17 -I"$ARMNN_PATH"/include -larmnn -larmnnDeserializer -larmnnTfLiteParser -larmnnOnnxParser -L"$ARMNN_PATH" ann.cpp

View File

@@ -1,4 +1,4 @@
FROM mambaorg/micromamba:bookworm-slim@sha256:954e438daab0ad0835430ea84acb27dd47d1ea35a7120c3c9dd9d1a5578f4b13 AS builder
FROM mambaorg/micromamba:bookworm-slim@sha256:333f7598ff2c2400fb10bfe057709c68b7daab5d847143af85abcf224a07271a as builder
ENV TRANSFORMERS_CACHE=/cache \
PYTHONDONTWRITEBYTECODE=1 \

View File

@@ -2,7 +2,7 @@ name: base
channels:
- conda-forge
- nvidia
- pytorch
- pytorch-nightly
platforms:
- linux-64
dependencies:
@@ -13,7 +13,7 @@ dependencies:
- orjson==3.*
- pip
- python==3.11.*
- pytorch>=2.3
- pytorch
- rich==13.*
- safetensors==0.*
- setuptools==68.*
@@ -21,5 +21,5 @@ dependencies:
- transformers==4.*
- pip:
- multilingual-clip
- onnxsim
- onnx-simplifier
category: main

View File

@@ -1,4 +1,3 @@
import os
import tempfile
import warnings
from pathlib import Path
@@ -9,6 +8,7 @@ from transformers import AutoTokenizer
from .openclip import OpenCLIPModelConfig
from .openclip import to_onnx as openclip_to_onnx
from .optimize import optimize
from .util import get_model_path
_MCLIP_TO_OPENCLIP = {
@@ -19,39 +19,44 @@ _MCLIP_TO_OPENCLIP = {
}
def to_onnx(
model_name: str,
output_dir_visual: Path | str,
output_dir_textual: Path | str,
) -> tuple[Path, Path]:
textual_path = get_model_path(output_dir_textual)
with tempfile.TemporaryDirectory() as tmpdir:
model = MultilingualCLIP.from_pretrained(model_name, cache_dir=os.environ.get("CACHE_DIR", tmpdir))
AutoTokenizer.from_pretrained(model_name).save_pretrained(output_dir_textual)
model.eval()
for param in model.parameters():
param.requires_grad_(False)
export_text_encoder(model, textual_path)
visual_path, _ = openclip_to_onnx(_MCLIP_TO_OPENCLIP[model_name], output_dir_visual)
assert visual_path is not None, "Visual model export failed"
return visual_path, textual_path
def export_text_encoder(model: MultilingualCLIP, output_path: Path | str) -> None:
output_path = Path(output_path)
def forward(self: MultilingualCLIP, input_ids: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
def forward(self: MultilingualCLIP, input_ids: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
embs = self.transformer(input_ids, attention_mask)[0]
embs = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
embs = self.LinearTransformation(embs)
return torch.nn.functional.normalize(embs, dim=-1)
# unfortunately need to monkeypatch for tracing to work here
# otherwise it hits the 2GiB protobuf serialization limit
MultilingualCLIP.forward = forward
# unfortunately need to monkeypatch for tracing to work here
# otherwise it hits the 2GiB protobuf serialization limit
MultilingualCLIP.forward = forward
def to_torchscript(model_name: str) -> torch.jit.ScriptModule:
with tempfile.TemporaryDirectory() as tmpdir:
model = MultilingualCLIP.from_pretrained(model_name, cache_dir=tmpdir)
model.eval()
for param in model.parameters():
param.requires_grad_(False)
return model
def to_onnx(
model_name: str,
output_dir_visual: Path | str,
output_dir_textual: Path | str,
) -> None:
textual_path = get_model_path(output_dir_textual)
model = to_torchscript(model_name)
AutoTokenizer.from_pretrained(model_name).save_pretrained(output_dir_textual)
_text_encoder_to_onnx(model, textual_path)
openclip_to_onnx(_MCLIP_TO_OPENCLIP[model_name], output_dir_visual)
optimize(textual_path)
def _text_encoder_to_onnx(model: MultilingualCLIP, output_path: Path | str) -> None:
output_path = Path(output_path)
args = (torch.ones(1, 77, dtype=torch.int32), torch.ones(1, 77, dtype=torch.int32))
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
@@ -60,10 +65,10 @@ def export_text_encoder(model: MultilingualCLIP, output_path: Path | str) -> Non
args,
output_path.as_posix(),
input_names=["input_ids", "attention_mask"],
output_names=["embedding"],
output_names=["text_embedding"],
opset_version=17,
# dynamic_axes={
# "input_ids": {0: "batch_size", 1: "sequence_length"},
# "attention_mask": {0: "batch_size", 1: "sequence_length"},
# },
dynamic_axes={
"input_ids": {0: "batch_size", 1: "sequence_length"},
"attention_mask": {0: "batch_size", 1: "sequence_length"},
},
)

Some files were not shown because too many files have changed in this diff Show More