mirror of
https://github.com/immich-app/immich.git
synced 2025-12-09 09:13:08 +03:00
Compare commits
458 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
31dd15ce8a | ||
|
|
6108587c8b | ||
|
|
3e50f668d9 | ||
|
|
9b82617e22 | ||
|
|
76cb32d8d0 | ||
|
|
e8f3348833 | ||
|
|
9922c8de59 | ||
|
|
3f4bbab4eb | ||
|
|
2da9e3152b | ||
|
|
56b85f7479 | ||
|
|
8b43066632 | ||
|
|
20acdcd884 | ||
|
|
22d348beca | ||
|
|
3b0af1c8a9 | ||
|
|
61c8237a4d | ||
|
|
d740f0283a | ||
|
|
4ada28ac99 | ||
|
|
63c01b78e2 | ||
|
|
1423cfd53c | ||
|
|
867eec86f5 | ||
|
|
86e8effd8e | ||
|
|
49d393216a | ||
|
|
75c9f63757 | ||
|
|
63984890df | ||
|
|
1356468c38 | ||
|
|
c23c53bf6f | ||
|
|
0dcfc43461 | ||
|
|
d1fd0076cc | ||
|
|
ff19502035 | ||
|
|
6ef069b537 | ||
|
|
a03e999bde | ||
|
|
ad1ba4be5f | ||
|
|
f89e74181b | ||
|
|
e2c34f17ba | ||
|
|
23b1256592 | ||
|
|
7bbc1d9f68 | ||
|
|
8b24c31d20 | ||
|
|
7f61ac6983 | ||
|
|
4db8f0c666 | ||
|
|
3d6a6f77a8 | ||
|
|
5698f446f7 | ||
|
|
eb74fafb00 | ||
|
|
24da25dbbf | ||
|
|
9b842d4cca | ||
|
|
a99bd94717 | ||
|
|
4b568dcbb3 | ||
|
|
12ab56c885 | ||
|
|
eed6465b41 | ||
|
|
5f6c16080b | ||
|
|
a2aab1f373 | ||
|
|
8e076ecfe4 | ||
|
|
fe702ba6d7 | ||
|
|
869839f642 | ||
|
|
8885e3105e | ||
|
|
6e51c4ec71 | ||
|
|
6bf2e8dbcb | ||
|
|
366f23774a | ||
|
|
fd5e931617 | ||
|
|
d8d87bb565 | ||
|
|
6cc1978b2d | ||
|
|
506d2d0f81 | ||
|
|
f13d13b2ea | ||
|
|
2510684bf7 | ||
|
|
c8eef5ad4d | ||
|
|
0cb3dc6211 | ||
|
|
f11080cc2d | ||
|
|
efcf773ea0 | ||
|
|
dc143046e3 | ||
|
|
e684062569 | ||
|
|
5c0538e52c | ||
|
|
84cf0d1670 | ||
|
|
bfcde05b1c | ||
|
|
b3b15e9b61 | ||
|
|
819e56d9ca | ||
|
|
9a98712db7 | ||
|
|
a185e06399 | ||
|
|
f2be9f7ad1 | ||
|
|
5c879acd5b | ||
|
|
28c664c769 | ||
|
|
fbd85a89e0 | ||
|
|
1c86293035 | ||
|
|
4a9d80298b | ||
|
|
362feb1e62 | ||
|
|
5503bf7a60 | ||
|
|
d20e2e268a | ||
|
|
a708649504 | ||
|
|
a808b8610e | ||
|
|
c70c9067b0 | ||
|
|
082471dfd9 | ||
|
|
9a098b4658 | ||
|
|
9d705097e8 | ||
|
|
6050485ad8 | ||
|
|
fb907d707d | ||
|
|
7d6cfd09e6 | ||
|
|
967c69317b | ||
|
|
128d653fc6 | ||
|
|
8b69114924 | ||
|
|
4b55888d16 | ||
|
|
8fbd650483 | ||
|
|
c778516ce2 | ||
|
|
2969e25ff7 | ||
|
|
c055e1aefe | ||
|
|
5f7f88ff17 | ||
|
|
5053130e35 | ||
|
|
4ef7eb56a3 | ||
|
|
8ecc67a364 | ||
|
|
90f7c3d9ae | ||
|
|
d0381fddec | ||
|
|
7c851893b4 | ||
|
|
ae61ea7984 | ||
|
|
bbcaee82f0 | ||
|
|
16266c9f5a | ||
|
|
6c64a6dab8 | ||
|
|
c0fe98fe27 | ||
|
|
579321251f | ||
|
|
392f9f205c | ||
|
|
57829cee26 | ||
|
|
4be2351d21 | ||
|
|
edbcf17e3a | ||
|
|
eef74ee0ba | ||
|
|
ec58e1065f | ||
|
|
4376fd72b7 | ||
|
|
e4b6efc1f5 | ||
|
|
caea3a0812 | ||
|
|
9c2c85cbe1 | ||
|
|
d350022dec | ||
|
|
502f6e020d | ||
|
|
ca9e02379d | ||
|
|
36ec407c66 | ||
|
|
007eaaceb9 | ||
|
|
94c0e8253a | ||
|
|
5acf6868b7 | ||
|
|
616905211d | ||
|
|
3925445de8 | ||
|
|
52f21fb331 | ||
|
|
ac36effb45 | ||
|
|
02cd8da871 | ||
|
|
17a2043e76 | ||
|
|
34b88bb47a | ||
|
|
f6ba071569 | ||
|
|
6b7a7b0cbc | ||
|
|
b0102f8025 | ||
|
|
9c95adc7fb | ||
|
|
376282e538 | ||
|
|
76d95cd348 | ||
|
|
31dc83f3f2 | ||
|
|
aeb3e0a84f | ||
|
|
8634c59850 | ||
|
|
b13a98646f | ||
|
|
7bf142dc43 | ||
|
|
d8cda6ee40 | ||
|
|
a31bc94460 | ||
|
|
516709ffe1 | ||
|
|
425cf62482 | ||
|
|
58242b3b4a | ||
|
|
9d4aee36e2 | ||
|
|
70d08a2b2a | ||
|
|
f1b98d5f45 | ||
|
|
749eff03d5 | ||
|
|
5f257b9a84 | ||
|
|
0cae20033c | ||
|
|
115ee0d6cc | ||
|
|
bfdd6eac01 | ||
|
|
9eab770e79 | ||
|
|
efd8d8b884 | ||
|
|
25e1c8cc7f | ||
|
|
7c26663013 | ||
|
|
2c88ce8559 | ||
|
|
50b072803d | ||
|
|
1689cecaf7 | ||
|
|
5cd1018db3 | ||
|
|
31e6270a28 | ||
|
|
b3fbd0809b | ||
|
|
129a4a82e0 | ||
|
|
924d11a913 | ||
|
|
425c87bce4 | ||
|
|
25fcda6eeb | ||
|
|
f386b4d377 | ||
|
|
c524fcf084 | ||
|
|
194c567a45 | ||
|
|
411f96ef49 | ||
|
|
4f912de018 | ||
|
|
47203d2760 | ||
|
|
8ab87a8803 | ||
|
|
5b4f894211 | ||
|
|
b1f05fc18b | ||
|
|
dbbefde98d | ||
|
|
5407a28533 | ||
|
|
f5edc87e4d | ||
|
|
bf16b61d43 | ||
|
|
8c882b54cd | ||
|
|
2d7c333c8c | ||
|
|
7c821dd205 | ||
|
|
703361da1a | ||
|
|
fa5aeaf539 | ||
|
|
5f3a42a132 | ||
|
|
9d85272c2b | ||
|
|
d2575d8f00 | ||
|
|
f0a4c945bd | ||
|
|
a3766b879e | ||
|
|
1a190c33a0 | ||
|
|
17a63e37b2 | ||
|
|
bf1f8da884 | ||
|
|
2271984dbd | ||
|
|
b40963ec52 | ||
|
|
735f8d661e | ||
|
|
8794c84e9d | ||
|
|
cef19eed97 | ||
|
|
90c607c1a6 | ||
|
|
52b650093d | ||
|
|
fe4c49c8e3 | ||
|
|
4cad23aaa3 | ||
|
|
feba590de7 | ||
|
|
64f0333306 | ||
|
|
758bcd1e97 | ||
|
|
fb21950ad8 | ||
|
|
758449e9f0 | ||
|
|
d7d4d22fe0 | ||
|
|
03948a69e2 | ||
|
|
61b8eb85b5 | ||
|
|
c5360e78c5 | ||
|
|
23014c263b | ||
|
|
2e5007adef | ||
|
|
c4531fc4d3 | ||
|
|
252d3f5f2c | ||
|
|
ef6c2bf547 | ||
|
|
6aad9fae8e | ||
|
|
45f7401513 | ||
|
|
3c7edba388 | ||
|
|
76a70703a5 | ||
|
|
f78066d4b9 | ||
|
|
48d421e28c | ||
|
|
1492b55c07 | ||
|
|
1d6a4e9318 | ||
|
|
fe42e7410b | ||
|
|
58bf58b393 | ||
|
|
99de52479e | ||
|
|
97574d7296 | ||
|
|
5015210f37 | ||
|
|
0bb1219b5f | ||
|
|
b730aa60ed | ||
|
|
7ec3610753 | ||
|
|
69e88ef985 | ||
|
|
9358b4dc7e | ||
|
|
06f077bac2 | ||
|
|
47f6181d42 | ||
|
|
aac029d92b | ||
|
|
ef245ea2d2 | ||
|
|
e8d05e78ad | ||
|
|
52c9fbea5f | ||
|
|
882163f545 | ||
|
|
96a6cc20b7 | ||
|
|
4efacfbb91 | ||
|
|
a808a840c8 | ||
|
|
3f18acdb1a | ||
|
|
2b41b5efe1 | ||
|
|
9ac95d6845 | ||
|
|
221e197633 | ||
|
|
1b141d5ca9 | ||
|
|
098bab7c9b | ||
|
|
4fccc09fc1 | ||
|
|
c016b65ef2 | ||
|
|
844eed8707 | ||
|
|
6e31ac4c75 | ||
|
|
b287c0cbe8 | ||
|
|
1fcc75fb44 | ||
|
|
ca79e25a6e | ||
|
|
4fd8c1b3c1 | ||
|
|
f3ba994186 | ||
|
|
b4a4abbf51 | ||
|
|
a0aea021a1 | ||
|
|
9033a99587 | ||
|
|
cc0cbd705e | ||
|
|
da580d4685 | ||
|
|
cb6d94c7a7 | ||
|
|
060300de8a | ||
|
|
c2ba1cc202 | ||
|
|
08db77db23 | ||
|
|
92dff839d0 | ||
|
|
fe1e09e51f | ||
|
|
f44669447f | ||
|
|
92412ca2f7 | ||
|
|
64d926581f | ||
|
|
c139e05170 | ||
|
|
0fe62298e1 | ||
|
|
e5794e6cfc | ||
|
|
f6cbc9db06 | ||
|
|
8dab5d3798 | ||
|
|
e864811a85 | ||
|
|
72a55c13b6 | ||
|
|
206412267a | ||
|
|
f780a56e24 | ||
|
|
7bbffccf76 | ||
|
|
05a446c259 | ||
|
|
4f725b95e1 | ||
|
|
64b92cb24c | ||
|
|
19f2f888ee | ||
|
|
d12b1c907d | ||
|
|
947c053c15 | ||
|
|
79592701dd | ||
|
|
39697cd973 | ||
|
|
10e518db42 | ||
|
|
72fa31f9e9 | ||
|
|
9871a04d54 | ||
|
|
ba01b40e7c | ||
|
|
f5a3d7ba23 | ||
|
|
d4a9eed4a1 | ||
|
|
9d8072b994 | ||
|
|
3c1fa22109 | ||
|
|
c0210bd6c0 | ||
|
|
a6ace5151c | ||
|
|
ede9c99adb | ||
|
|
ec7ab209f3 | ||
|
|
61bc24d7ea | ||
|
|
6c95eb22b7 | ||
|
|
aaea5cf1ad | ||
|
|
96d2e9b4c5 | ||
|
|
19740a3560 | ||
|
|
8a481e2ea1 | ||
|
|
ba105d9f19 | ||
|
|
065d885ca0 | ||
|
|
a07ae9b5b2 | ||
|
|
1869b1b41a | ||
|
|
995314446b | ||
|
|
a1691ddc0f | ||
|
|
071b271484 | ||
|
|
50a2f6193f | ||
|
|
907fed1081 | ||
|
|
49a16045bd | ||
|
|
a47aa86392 | ||
|
|
f32c5d97cd | ||
|
|
afc6e91c66 | ||
|
|
1311189fab | ||
|
|
fa3b5a4c8f | ||
|
|
d3446f3092 | ||
|
|
b31414af8f | ||
|
|
cf99dcb279 | ||
|
|
dc56ed5d45 | ||
|
|
d1d26c60d6 | ||
|
|
66849d0d45 | ||
|
|
30b8864d2d | ||
|
|
78464a4ba3 | ||
|
|
1f19a65d1a | ||
|
|
ca3619658b | ||
|
|
c7a1f2944f | ||
|
|
7b71c145c8 | ||
|
|
49a6961ec6 | ||
|
|
7b882b35e5 | ||
|
|
443aad5794 | ||
|
|
8d6cbb51e2 | ||
|
|
c8abe9a2fd | ||
|
|
58a75d59bd | ||
|
|
36058b9b59 | ||
|
|
8440f146e2 | ||
|
|
3da17da7b4 | ||
|
|
ccf6d71c3c | ||
|
|
5171630b98 | ||
|
|
9a27a99cab | ||
|
|
332a865ce6 | ||
|
|
0c152366ec | ||
|
|
c35fd6cbdb | ||
|
|
58d5cc1e4b | ||
|
|
9a1068c867 | ||
|
|
1745f48f3d | ||
|
|
b0cdd8f475 | ||
|
|
318dd32363 | ||
|
|
887267b133 | ||
|
|
1d0d4fc281 | ||
|
|
345791c0e6 | ||
|
|
07698f8a40 | ||
|
|
6fdb8f83f0 | ||
|
|
a0b2c69b99 | ||
|
|
70809c1465 | ||
|
|
97ec3b147c | ||
|
|
d249b63c99 | ||
|
|
0f803a4f5e | ||
|
|
8eac82c5a3 | ||
|
|
3d13da7f11 | ||
|
|
430d0b86ee | ||
|
|
f40fdce658 | ||
|
|
097183b31d | ||
|
|
d5a9294eeb | ||
|
|
c5582fc8d9 | ||
|
|
6993726d50 | ||
|
|
c821458e6c | ||
|
|
efbc0cb192 | ||
|
|
fd99bd05cf | ||
|
|
3a2bf91889 | ||
|
|
378bd3c993 | ||
|
|
89f40b311c | ||
|
|
6ce1533117 | ||
|
|
0ce62d8efd | ||
|
|
e151248b16 | ||
|
|
a2207f2eef | ||
|
|
81568dbda3 | ||
|
|
a60da1ccab | ||
|
|
2d2966caa0 | ||
|
|
7d087371b5 | ||
|
|
93e2545275 | ||
|
|
43b3181f45 | ||
|
|
2903ad8156 | ||
|
|
c5476a99b1 | ||
|
|
5d2e421800 | ||
|
|
b9000d8770 | ||
|
|
073fccb517 | ||
|
|
3e11b90851 | ||
|
|
19e2504583 | ||
|
|
4279cd6e1e | ||
|
|
f70ee3f350 | ||
|
|
9e1651ef66 | ||
|
|
a35af2b242 | ||
|
|
fc99c5f530 | ||
|
|
e978b8c685 | ||
|
|
3b06220219 | ||
|
|
dc53e2a9b9 | ||
|
|
28b08ed417 | ||
|
|
b74f013b53 | ||
|
|
79726acc72 | ||
|
|
36eef9807b | ||
|
|
3da750117f | ||
|
|
a6c8eb57f1 | ||
|
|
efe4396e54 | ||
|
|
c4a8fdf0f3 | ||
|
|
abf5b0afe1 | ||
|
|
77d4eb8787 | ||
|
|
e7abfe3067 | ||
|
|
be1187bc46 | ||
|
|
fef36e6a37 | ||
|
|
a39fbcb8ac | ||
|
|
ca75bba3b0 | ||
|
|
f3dbbfa16d | ||
|
|
8b4390c247 | ||
|
|
581d32269d | ||
|
|
2b76112014 | ||
|
|
2301affd7e | ||
|
|
2f9a66e961 | ||
|
|
0b8cfc6b82 | ||
|
|
cab201270c | ||
|
|
beb31cebed | ||
|
|
e51091b6e5 | ||
|
|
cc6a8b0c74 | ||
|
|
930f979960 | ||
|
|
3030e74fc3 | ||
|
|
f9db60f25b | ||
|
|
7d50d3032b | ||
|
|
1fb2b3f899 | ||
|
|
2e12c46980 | ||
|
|
1489d69f81 | ||
|
|
8d836ae04f | ||
|
|
cc473c42b5 | ||
|
|
bab04378dc | ||
|
|
cc10fc15c3 | ||
|
|
8cb9196bcb | ||
|
|
6cce24f391 | ||
|
|
36a9ae7c54 | ||
|
|
4aabbec742 | ||
|
|
c5baf79f61 | ||
|
|
e183c9b917 |
2
.devcontainer/.gitignore
vendored
Normal file
2
.devcontainer/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.env
|
||||
library
|
||||
@@ -1,2 +1,16 @@
|
||||
ARG BASEIMAGE=mcr.microsoft.com/devcontainers/typescript-node:22@sha256:9791f4aa527774bc370c6bd2f6705ce5a686f1e6f204badd8dfaacce28c631ae
|
||||
FROM ${BASEIMAGE}
|
||||
|
||||
# Flutter SDK
|
||||
# https://flutter.dev/docs/development/tools/sdk/releases?tab=linux
|
||||
ENV FLUTTER_CHANNEL="stable"
|
||||
ENV FLUTTER_VERSION="3.24.5"
|
||||
ENV FLUTTER_HOME=/flutter
|
||||
ENV PATH=${PATH}:${FLUTTER_HOME}/bin
|
||||
|
||||
# Flutter SDK
|
||||
RUN mkdir -p ${FLUTTER_HOME} \
|
||||
&& curl -C - --output flutter.tar.xz https://storage.googleapis.com/flutter_infra_release/releases/${FLUTTER_CHANNEL}/linux/flutter_linux_${FLUTTER_VERSION}-${FLUTTER_CHANNEL}.tar.xz \
|
||||
&& tar -xf flutter.tar.xz --strip-components=1 -C ${FLUTTER_HOME} \
|
||||
&& rm flutter.tar.xz \
|
||||
&& chown -R 1000:1000 ${FLUTTER_HOME}
|
||||
|
||||
@@ -1,20 +1,26 @@
|
||||
{
|
||||
"name": "Immich devcontainers",
|
||||
"build": {
|
||||
"dockerfile": "Dockerfile",
|
||||
"args": {
|
||||
"BASEIMAGE": "mcr.microsoft.com/devcontainers/typescript-node:22"
|
||||
}
|
||||
},
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
"svelte.svelte-vscode"
|
||||
]
|
||||
}
|
||||
},
|
||||
"forwardPorts": [],
|
||||
"postCreateCommand": "make install-all",
|
||||
"remoteUser": "node"
|
||||
"name": "Immich",
|
||||
"service": "immich-devcontainer",
|
||||
"dockerComposeFile": [
|
||||
"docker-compose.yml",
|
||||
"../docker/docker-compose.dev.yml"
|
||||
],
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
"Dart-Code.dart-code",
|
||||
"Dart-Code.flutter",
|
||||
"dbaeumer.vscode-eslint",
|
||||
"dcmdev.dcm-vscode-extension",
|
||||
"esbenp.prettier-vscode",
|
||||
"svelte.svelte-vscode"
|
||||
]
|
||||
}
|
||||
},
|
||||
"forwardPorts": [],
|
||||
"initializeCommand": "bash .devcontainer/scripts/initializeCommand.sh",
|
||||
"onCreateCommand": "bash .devcontainer/scripts/onCreateCommand.sh",
|
||||
"overrideCommand": true,
|
||||
"workspaceFolder": "/immich",
|
||||
"remoteUser": "node"
|
||||
}
|
||||
|
||||
|
||||
8
.devcontainer/docker-compose.yml
Normal file
8
.devcontainer/docker-compose.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
services:
|
||||
immich-devcontainer:
|
||||
build:
|
||||
dockerfile: Dockerfile
|
||||
extra_hosts:
|
||||
- 'host.docker.internal:host-gateway'
|
||||
volumes:
|
||||
- ..:/immich:cached
|
||||
6
.devcontainer/scripts/initializeCommand.sh
Normal file
6
.devcontainer/scripts/initializeCommand.sh
Normal file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
# If .env file does not exist, create it by copying example.env from the docker folder
|
||||
if [ ! -f ".devcontainer/.env" ]; then
|
||||
cp docker/example.env .devcontainer/.env
|
||||
fi
|
||||
25
.devcontainer/scripts/onCreateCommand.sh
Normal file
25
.devcontainer/scripts/onCreateCommand.sh
Normal file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Enable multiarch for arm64 if necessary
|
||||
if [ "$(dpkg --print-architecture)" = "arm64" ]; then
|
||||
sudo dpkg --add-architecture amd64 && \
|
||||
sudo apt-get update && \
|
||||
sudo apt-get install -y --no-install-recommends \
|
||||
qemu-user-static \
|
||||
libc6:amd64 \
|
||||
libstdc++6:amd64 \
|
||||
libgcc1:amd64
|
||||
fi
|
||||
|
||||
# Install DCM
|
||||
wget -qO- https://dcm.dev/pgp-key.public | sudo gpg --dearmor -o /usr/share/keyrings/dcm.gpg
|
||||
sudo echo 'deb [signed-by=/usr/share/keyrings/dcm.gpg arch=amd64] https://dcm.dev/debian stable main' | sudo tee /etc/apt/sources.list.d/dart_stable.list
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get install dcm
|
||||
|
||||
dart --disable-analytics
|
||||
|
||||
# Install immich
|
||||
cd /immich || exit
|
||||
make install-all
|
||||
@@ -11,7 +11,7 @@ body:
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: I have searched the existing feature requests to make sure this is not a duplicate request.
|
||||
label: I have searched the existing feature requests, both open and closed, to make sure this is not a duplicate request.
|
||||
options:
|
||||
- label: "Yes"
|
||||
required: true
|
||||
|
||||
2
.github/FUNDING.yml
vendored
2
.github/FUNDING.yml
vendored
@@ -1 +1 @@
|
||||
custom: ['https://buy.immich.app']
|
||||
custom: ['https://buy.immich.app', 'https://immich.store']
|
||||
|
||||
7
.github/ISSUE_TEMPLATE/bug_report.yaml
vendored
7
.github/ISSUE_TEMPLATE/bug_report.yaml
vendored
@@ -1,6 +1,13 @@
|
||||
name: Report an issue with Immich
|
||||
description: Report an issue with Immich
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: I have searched the existing issues, both open and closed, to make sure this is not a duplicate report.
|
||||
options:
|
||||
- label: "Yes"
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
|
||||
1
.github/PULL_REQUEST_TEMPLATE/config.yml
vendored
1
.github/PULL_REQUEST_TEMPLATE/config.yml
vendored
@@ -1,2 +1 @@
|
||||
blank_issues_enabled: false
|
||||
blank_pull_request_template_enabled: false
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
## Description
|
||||
<!--- Describe your changes in detail -->
|
||||
<!--- Why is this change required? What problem does it solve? -->
|
||||
<!--- If it fixes an open issue, please link to the issue here. -->
|
||||
|
||||
Fixes # (issue)
|
||||
|
||||
|
||||
## How Has This Been Tested?
|
||||
|
||||
<!-- Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration -->
|
||||
|
||||
- [ ] Test A
|
||||
- [ ] Test B
|
||||
|
||||
## Screenshots (if appropriate):
|
||||
|
||||
|
||||
## Checklist:
|
||||
|
||||
- [ ] I have performed a self-review of my own code
|
||||
- [ ] I have made corresponding changes to the documentation if applicable
|
||||
36
.github/pull_request_template.md
vendored
Normal file
36
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
## Description
|
||||
|
||||
<!--- Describe your changes in detail -->
|
||||
<!--- Why is this change required? What problem does it solve? -->
|
||||
<!--- If it fixes an open issue, please link to the issue here. -->
|
||||
|
||||
Fixes # (issue)
|
||||
|
||||
## How Has This Been Tested?
|
||||
|
||||
<!-- Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration -->
|
||||
|
||||
- [ ] Test A
|
||||
- [ ] Test B
|
||||
|
||||
<details><summary><h2>Screenshots (if appropriate)</h2></summary>
|
||||
|
||||
<!-- Images go below this line. -->
|
||||
|
||||
</details>
|
||||
|
||||
<!-- API endpoint changes (if relevant)
|
||||
## API Changes
|
||||
The `/api/something` endpoint is now `/api/something-else`
|
||||
-->
|
||||
|
||||
## Checklist:
|
||||
|
||||
- [ ] I have performed a self-review of my own code
|
||||
- [ ] I have made corresponding changes to the documentation if applicable
|
||||
- [ ] I have no unrelated changes in the PR.
|
||||
- [ ] I have confirmed that any new dependencies are strictly necessary.
|
||||
- [ ] I have written tests for new code (if applicable)
|
||||
- [ ] I have followed naming conventions/patterns in the surrounding code
|
||||
- [ ] All code in `src/services` uses repositories implementations for database calls, filesystem operations, etc.
|
||||
- [ ] All code in `src/repositories/` is pretty basic/simple and does not have any immich specific logic (that belongs in `src/services`)
|
||||
4
.github/workflows/build-mobile.yml
vendored
4
.github/workflows/build-mobile.yml
vendored
@@ -29,9 +29,11 @@ jobs:
|
||||
filters: |
|
||||
mobile:
|
||||
- 'mobile/**'
|
||||
workflow:
|
||||
- '.github/workflows/build-mobile.yml'
|
||||
- name: Check if we should force jobs to run
|
||||
id: should_force
|
||||
run: echo "should_force=${{ github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
|
||||
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
build-sign-android:
|
||||
name: Build and sign Android
|
||||
|
||||
6
.github/workflows/cli.yml
vendored
6
.github/workflows/cli.yml
vendored
@@ -56,10 +56,10 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.2.0
|
||||
uses: docker/setup-qemu-action@v3.5.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.8.0
|
||||
uses: docker/setup-buildx-action@v3.10.0
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
@@ -88,7 +88,7 @@ jobs:
|
||||
type=raw,value=latest,enable=${{ github.event_name == 'release' }}
|
||||
|
||||
- name: Build and push image
|
||||
uses: docker/build-push-action@v6.10.0
|
||||
uses: docker/build-push-action@v6.15.0
|
||||
with:
|
||||
file: cli/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
73
.github/workflows/docker-cleanup.yml
vendored
73
.github/workflows/docker-cleanup.yml
vendored
@@ -1,73 +0,0 @@
|
||||
# This workflow runs on certain conditions to check for and potentially
|
||||
# delete container images from the GHCR which no longer have an associated
|
||||
# code branch.
|
||||
# Requires a PAT with the correct scope set in the secrets.
|
||||
#
|
||||
# This workflow will not trigger runs on forked repos.
|
||||
|
||||
name: Docker Cleanup
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- "closed"
|
||||
push:
|
||||
paths:
|
||||
- ".github/workflows/docker-cleanup.yml"
|
||||
|
||||
concurrency:
|
||||
group: registry-tags-cleanup
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
cleanup-images:
|
||||
name: Cleanup Stale Images Tags for ${{ matrix.primary-name }}
|
||||
runs-on: ubuntu-24.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- primary-name: "immich-server"
|
||||
- primary-name: "immich-machine-learning"
|
||||
env:
|
||||
# Requires a personal access token with the OAuth scope delete:packages
|
||||
TOKEN: ${{ secrets.PACKAGE_DELETE_TOKEN }}
|
||||
steps:
|
||||
- name: Clean temporary images
|
||||
if: "${{ env.TOKEN != '' }}"
|
||||
uses: stumpylog/image-cleaner-action/ephemeral@v0.9.0
|
||||
with:
|
||||
token: "${{ env.TOKEN }}"
|
||||
owner: "immich-app"
|
||||
is_org: "true"
|
||||
do_delete: "true"
|
||||
package_name: "${{ matrix.primary-name }}"
|
||||
scheme: "pull_request"
|
||||
repo_name: "immich"
|
||||
match_regex: '^pr-(\d+)$|^(\d+)$'
|
||||
|
||||
cleanup-untagged-images:
|
||||
name: Cleanup Untagged Images Tags for ${{ matrix.primary-name }}
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- cleanup-images
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- primary-name: "immich-server"
|
||||
- primary-name: "immich-machine-learning"
|
||||
- primary-name: "immich-build-cache"
|
||||
env:
|
||||
# Requires a personal access token with the OAuth scope delete:packages
|
||||
TOKEN: ${{ secrets.PACKAGE_DELETE_TOKEN }}
|
||||
steps:
|
||||
- name: Clean untagged images
|
||||
if: "${{ env.TOKEN != '' }}"
|
||||
uses: stumpylog/image-cleaner-action/untagged@v0.9.0
|
||||
with:
|
||||
token: "${{ env.TOKEN }}"
|
||||
owner: "immich-app"
|
||||
do_delete: "true"
|
||||
is_org: "true"
|
||||
package_name: "${{ matrix.primary-name }}"
|
||||
351
.github/workflows/docker.yml
vendored
351
.github/workflows/docker.yml
vendored
@@ -5,7 +5,6 @@ on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
@@ -36,10 +35,12 @@ jobs:
|
||||
- 'i18n/**'
|
||||
machine-learning:
|
||||
- 'machine-learning/**'
|
||||
workflow:
|
||||
- '.github/workflows/docker.yml'
|
||||
|
||||
- name: Check if we should force jobs to run
|
||||
id: should_force
|
||||
run: echo "should_force=${{ github.event_name == 'workflow_dispatch' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
|
||||
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
retag_ml:
|
||||
name: Re-Tag ML
|
||||
@@ -61,8 +62,10 @@ jobs:
|
||||
REGISTRY_NAME="ghcr.io"
|
||||
REPOSITORY=${{ github.repository_owner }}/immich-machine-learning
|
||||
TAG_OLD=main${{ matrix.suffix }}
|
||||
TAG_NEW=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
|
||||
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_NEW $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
|
||||
TAG_PR=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
|
||||
TAG_COMMIT=commit-${{ github.event_name != 'pull_request' && github.sha || github.event.pull_request.head.sha }}${{ matrix.suffix }}
|
||||
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_PR $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
|
||||
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_COMMIT $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
|
||||
|
||||
retag_server:
|
||||
name: Re-Tag Server
|
||||
@@ -84,107 +87,100 @@ jobs:
|
||||
REGISTRY_NAME="ghcr.io"
|
||||
REPOSITORY=${{ github.repository_owner }}/immich-server
|
||||
TAG_OLD=main${{ matrix.suffix }}
|
||||
TAG_NEW=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
|
||||
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_NEW $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
|
||||
|
||||
TAG_PR=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
|
||||
TAG_COMMIT=commit-${{ github.event_name != 'pull_request' && github.sha || github.event.pull_request.head.sha }}${{ matrix.suffix }}
|
||||
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_PR $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
|
||||
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_COMMIT $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
|
||||
|
||||
build_and_push_ml:
|
||||
name: Build and Push ML
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ matrix.runner }}
|
||||
env:
|
||||
image: immich-machine-learning
|
||||
context: machine-learning
|
||||
file: machine-learning/Dockerfile
|
||||
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-machine-learning
|
||||
strategy:
|
||||
# Prevent a failure in one image from stopping the other builds
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platforms: linux/amd64,linux/arm64
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
device: cpu
|
||||
|
||||
- platforms: linux/amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
device: cpu
|
||||
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
device: cuda
|
||||
suffix: -cuda
|
||||
|
||||
- platforms: linux/amd64
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
device: openvino
|
||||
suffix: -openvino
|
||||
|
||||
- platforms: linux/arm64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
device: armnn
|
||||
suffix: -armnn
|
||||
|
||||
steps:
|
||||
- name: Prepare
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.2.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.8.0
|
||||
|
||||
- name: Login to Docker Hub
|
||||
# Only push to Docker Hub when making a release
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
uses: docker/setup-buildx-action@v3.10.0
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
# Skip when PR from a fork
|
||||
if: ${{ !github.event.pull_request.head.repo.fork }}
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate docker image tags
|
||||
id: metadata
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
flavor: |
|
||||
# Disable latest tag
|
||||
latest=false
|
||||
images: |
|
||||
name=ghcr.io/${{ github.repository_owner }}/${{env.image}}
|
||||
name=altran1502/${{env.image}},enable=${{ github.event_name == 'release' }}
|
||||
tags: |
|
||||
# Tag with branch name
|
||||
type=ref,event=branch,suffix=${{ matrix.suffix }}
|
||||
# Tag with pr-number
|
||||
type=ref,event=pr,suffix=${{ matrix.suffix }}
|
||||
# Tag with git tag on release
|
||||
type=ref,event=tag,suffix=${{ matrix.suffix }}
|
||||
type=raw,value=release,enable=${{ github.event_name == 'release' }},suffix=${{ matrix.suffix }}
|
||||
|
||||
- name: Determine build cache output
|
||||
id: cache-target
|
||||
- name: Generate cache key suffix
|
||||
run: |
|
||||
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||
# Essentially just ignore the cache output (PR can't write to registry cache)
|
||||
echo "CACHE_KEY_SUFFIX=pr-${{ github.event.number }}" >> $GITHUB_ENV
|
||||
else
|
||||
echo "CACHE_KEY_SUFFIX=$(echo ${{ github.ref_name }} | sed 's/[^a-zA-Z0-9]/-/g')" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Generate cache target
|
||||
id: cache-target
|
||||
run: |
|
||||
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
|
||||
# Essentially just ignore the cache output (forks can't write to registry cache)
|
||||
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ env.image }}" >> $GITHUB_OUTPUT
|
||||
echo "cache-to=type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-${{ env.CACHE_KEY_SUFFIX }},mode=max,compression=zstd" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Build and push image
|
||||
uses: docker/build-push-action@v6.10.0
|
||||
id: build
|
||||
uses: docker/build-push-action@v6.15.0
|
||||
with:
|
||||
context: ${{ env.context }}
|
||||
file: ${{ env.file }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
# Skip pushing when PR from a fork
|
||||
push: ${{ !github.event.pull_request.head.repo.fork }}
|
||||
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{env.image}}
|
||||
cache-to: ${{ steps.cache-target.outputs.cache-to }}
|
||||
tags: ${{ steps.metadata.outputs.tags }}
|
||||
labels: ${{ steps.metadata.outputs.labels }}
|
||||
cache-to: ${{ steps.cache-target.outputs.cache-to }}
|
||||
cache-from: |
|
||||
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-${{ env.CACHE_KEY_SUFFIX }}
|
||||
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-main
|
||||
outputs: type=image,"name=${{ env.GHCR_REPO }}",push-by-digest=true,name-canonical=true,push=${{ !github.event.pull_request.head.repo.fork }}
|
||||
build-args: |
|
||||
DEVICE=${{ matrix.device }}
|
||||
BUILD_ID=${{ github.run_id }}
|
||||
@@ -192,100 +188,249 @@ jobs:
|
||||
BUILD_SOURCE_REF=${{ github.ref_name }}
|
||||
BUILD_SOURCE_COMMIT=${{ github.sha }}
|
||||
|
||||
- name: Export digest
|
||||
run: |
|
||||
mkdir -p ${{ runner.temp }}/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "${{ runner.temp }}/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ml-digests-${{ matrix.device }}-${{ env.PLATFORM_PAIR }}
|
||||
path: ${{ runner.temp }}/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
merge_ml:
|
||||
name: Merge & Push ML
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' && !github.event.pull_request.head.repo.fork }}
|
||||
env:
|
||||
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-machine-learning
|
||||
DOCKER_REPO: altran1502/immich-machine-learning
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- device: cpu
|
||||
- device: cuda
|
||||
suffix: -cuda
|
||||
- device: openvino
|
||||
suffix: -openvino
|
||||
- device: armnn
|
||||
suffix: -armnn
|
||||
needs:
|
||||
- build_and_push_ml
|
||||
steps:
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ${{ runner.temp }}/digests
|
||||
pattern: ml-digests-${{ matrix.device }}-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Generate docker image tags
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_PR_HEAD_SHA: "true"
|
||||
with:
|
||||
flavor: |
|
||||
# Disable latest tag
|
||||
latest=false
|
||||
images: |
|
||||
name=${{ env.GHCR_REPO }}
|
||||
name=${{ env.DOCKER_REPO }},enable=${{ github.event_name == 'release' }}
|
||||
tags: |
|
||||
# Tag with branch name
|
||||
type=ref,event=branch,suffix=${{ matrix.suffix }}
|
||||
# Tag with pr-number
|
||||
type=ref,event=pr,suffix=${{ matrix.suffix }}
|
||||
# Tag with long commit sha hash
|
||||
type=sha,format=long,prefix=commit-,suffix=${{ matrix.suffix }}
|
||||
# Tag with git tag on release
|
||||
type=ref,event=tag,suffix=${{ matrix.suffix }}
|
||||
type=raw,value=release,enable=${{ github.event_name == 'release' }},suffix=${{ matrix.suffix }}
|
||||
|
||||
- name: Create manifest list and push
|
||||
working-directory: ${{ runner.temp }}/digests
|
||||
run: |
|
||||
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
||||
$(printf '${{ env.GHCR_REPO }}@sha256:%s ' *)
|
||||
|
||||
build_and_push_server:
|
||||
name: Build and Push Server
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ matrix.runner }}
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
|
||||
env:
|
||||
image: immich-server
|
||||
context: .
|
||||
file: server/Dockerfile
|
||||
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-server
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platforms: linux/amd64,linux/arm64
|
||||
device: cpu
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
steps:
|
||||
- name: Prepare
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.2.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.8.0
|
||||
|
||||
- name: Login to Docker Hub
|
||||
# Only push to Docker Hub when making a release
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
# Skip when PR from a fork
|
||||
if: ${{ !github.event.pull_request.head.repo.fork }}
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate docker image tags
|
||||
id: metadata
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
flavor: |
|
||||
# Disable latest tag
|
||||
latest=false
|
||||
images: |
|
||||
name=ghcr.io/${{ github.repository_owner }}/${{env.image}}
|
||||
name=altran1502/${{env.image}},enable=${{ github.event_name == 'release' }}
|
||||
tags: |
|
||||
# Tag with branch name
|
||||
type=ref,event=branch,suffix=${{ matrix.suffix }}
|
||||
# Tag with pr-number
|
||||
type=ref,event=pr,suffix=${{ matrix.suffix }}
|
||||
# Tag with git tag on release
|
||||
type=ref,event=tag,suffix=${{ matrix.suffix }}
|
||||
type=raw,value=release,enable=${{ github.event_name == 'release' }},suffix=${{ matrix.suffix }}
|
||||
|
||||
- name: Determine build cache output
|
||||
id: cache-target
|
||||
- name: Generate cache key suffix
|
||||
run: |
|
||||
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||
# Essentially just ignore the cache output (PR can't write to registry cache)
|
||||
echo "CACHE_KEY_SUFFIX=pr-${{ github.event.number }}" >> $GITHUB_ENV
|
||||
else
|
||||
echo "CACHE_KEY_SUFFIX=$(echo ${{ github.ref_name }} | sed 's/[^a-zA-Z0-9]/-/g')" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Generate cache target
|
||||
id: cache-target
|
||||
run: |
|
||||
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
|
||||
# Essentially just ignore the cache output (forks can't write to registry cache)
|
||||
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ env.image }}" >> $GITHUB_OUTPUT
|
||||
echo "cache-to=type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-${{ env.CACHE_KEY_SUFFIX }},mode=max,compression=zstd" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Build and push image
|
||||
uses: docker/build-push-action@v6.10.0
|
||||
id: build
|
||||
uses: docker/build-push-action@v6.15.0
|
||||
with:
|
||||
context: ${{ env.context }}
|
||||
file: ${{ env.file }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
# Skip pushing when PR from a fork
|
||||
push: ${{ !github.event.pull_request.head.repo.fork }}
|
||||
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{env.image}}
|
||||
cache-to: ${{ steps.cache-target.outputs.cache-to }}
|
||||
tags: ${{ steps.metadata.outputs.tags }}
|
||||
platforms: ${{ matrix.platform }}
|
||||
labels: ${{ steps.metadata.outputs.labels }}
|
||||
cache-to: ${{ steps.cache-target.outputs.cache-to }}
|
||||
cache-from: |
|
||||
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ env.CACHE_KEY_SUFFIX }}
|
||||
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-main
|
||||
outputs: type=image,"name=${{ env.GHCR_REPO }}",push-by-digest=true,name-canonical=true,push=${{ !github.event.pull_request.head.repo.fork }}
|
||||
build-args: |
|
||||
DEVICE=${{ matrix.device }}
|
||||
DEVICE=cpu
|
||||
BUILD_ID=${{ github.run_id }}
|
||||
BUILD_IMAGE=${{ github.event_name == 'release' && github.ref_name || steps.metadata.outputs.tags }}
|
||||
BUILD_SOURCE_REF=${{ github.ref_name }}
|
||||
BUILD_SOURCE_COMMIT=${{ github.sha }}
|
||||
|
||||
- name: Export digest
|
||||
run: |
|
||||
mkdir -p ${{ runner.temp }}/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "${{ runner.temp }}/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: server-digests-${{ env.PLATFORM_PAIR }}
|
||||
path: ${{ runner.temp }}/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
merge_server:
|
||||
name: Merge & Push Server
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ needs.pre-job.outputs.should_run_server == 'true' && !github.event.pull_request.head.repo.fork }}
|
||||
env:
|
||||
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-server
|
||||
DOCKER_REPO: altran1502/immich-server
|
||||
needs:
|
||||
- build_and_push_server
|
||||
steps:
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ${{ runner.temp }}/digests
|
||||
pattern: server-digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Generate docker image tags
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_PR_HEAD_SHA: "true"
|
||||
with:
|
||||
flavor: |
|
||||
# Disable latest tag
|
||||
latest=false
|
||||
images: |
|
||||
name=${{ env.GHCR_REPO }}
|
||||
name=${{ env.DOCKER_REPO }},enable=${{ github.event_name == 'release' }}
|
||||
tags: |
|
||||
# Tag with branch name
|
||||
type=ref,event=branch,suffix=${{ matrix.suffix }}
|
||||
# Tag with pr-number
|
||||
type=ref,event=pr,suffix=${{ matrix.suffix }}
|
||||
# Tag with long commit sha hash
|
||||
type=sha,format=long,prefix=commit-,suffix=${{ matrix.suffix }}
|
||||
# Tag with git tag on release
|
||||
type=ref,event=tag,suffix=${{ matrix.suffix }}
|
||||
type=raw,value=release,enable=${{ github.event_name == 'release' }},suffix=${{ matrix.suffix }}
|
||||
|
||||
- name: Create manifest list and push
|
||||
working-directory: ${{ runner.temp }}/digests
|
||||
run: |
|
||||
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
||||
$(printf '${{ env.GHCR_REPO }}@sha256:%s ' *)
|
||||
|
||||
success-check-server:
|
||||
name: Docker Build & Push Server Success
|
||||
needs: [build_and_push_server, retag_server]
|
||||
needs: [merge_server, retag_server]
|
||||
runs-on: ubuntu-latest
|
||||
if: always()
|
||||
steps:
|
||||
@@ -298,7 +443,7 @@ jobs:
|
||||
|
||||
success-check-ml:
|
||||
name: Docker Build & Push ML Success
|
||||
needs: [build_and_push_ml, retag_ml]
|
||||
needs: [merge_ml, retag_ml]
|
||||
runs-on: ubuntu-latest
|
||||
if: always()
|
||||
steps:
|
||||
|
||||
6
.github/workflows/docs-build.yml
vendored
6
.github/workflows/docs-build.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
pre-job:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_run: ${{ steps.found_paths.outputs.docs == 'true' || steps.should_force.outputs.should_force == 'true' }}
|
||||
should_run: ${{ steps.found_paths.outputs.docs == 'true' || steps.should_force.outputs.should_force == 'true' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -25,9 +25,11 @@ jobs:
|
||||
filters: |
|
||||
docs:
|
||||
- 'docs/**'
|
||||
workflow:
|
||||
- '.github/workflows/docs-build.yml'
|
||||
- name: Check if we should force jobs to run
|
||||
id: should_force
|
||||
run: echo "should_force=${{ github.event_name == 'release' || github.ref_name == 'main' }}" >> "$GITHUB_OUTPUT"
|
||||
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'release' || github.ref_name == 'main' }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
build:
|
||||
name: Docs Build
|
||||
|
||||
9
.github/workflows/prepare-release.yml
vendored
9
.github/workflows/prepare-release.yml
vendored
@@ -68,10 +68,17 @@ jobs:
|
||||
needs: build_mobile
|
||||
|
||||
steps:
|
||||
- name: Generate a token
|
||||
id: generate-token
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
|
||||
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
token: ${{ secrets.ORG_RELEASE_TOKEN }}
|
||||
token: ${{ steps.generate-token.outputs.token }}
|
||||
|
||||
- name: Download APK
|
||||
uses: actions/download-artifact@v4
|
||||
|
||||
33
.github/workflows/preview-label.yaml
vendored
Normal file
33
.github/workflows/preview-label.yaml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: Preview label
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
comment-status:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'labeled' && github.event.label.name == 'preview' }}
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: mshick/add-pr-comment@v2
|
||||
with:
|
||||
message-id: "preview-status"
|
||||
message: "Deploying preview environment to https://pr-${{ github.event.pull_request.number }}.preview.internal.immich.cloud/"
|
||||
|
||||
remove-label:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'closed' && contains(github.event.pull_request.labels.*.name, 'preview') }}
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
github.rest.issues.removeLabel({
|
||||
issue_number: context.payload.pull_request.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
name: 'preview'
|
||||
})
|
||||
4
.github/workflows/static_analysis.yml
vendored
4
.github/workflows/static_analysis.yml
vendored
@@ -23,9 +23,11 @@ jobs:
|
||||
filters: |
|
||||
mobile:
|
||||
- 'mobile/**'
|
||||
workflow:
|
||||
- '.github/workflows/static_analysis.yml'
|
||||
- name: Check if we should force jobs to run
|
||||
id: should_force
|
||||
run: echo "should_force=${{ github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
|
||||
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
mobile-dart-analyze:
|
||||
name: Run Dart Code Analysis
|
||||
|
||||
28
.github/workflows/test.yml
vendored
28
.github/workflows/test.yml
vendored
@@ -43,10 +43,12 @@ jobs:
|
||||
- 'mobile/**'
|
||||
machine-learning:
|
||||
- 'machine-learning/**'
|
||||
workflow:
|
||||
- '.github/workflows/test.yml'
|
||||
|
||||
- name: Check if we should force jobs to run
|
||||
id: should_force
|
||||
run: echo "should_force=${{ github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
|
||||
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
server-unit-tests:
|
||||
name: Test & Lint Server
|
||||
@@ -244,25 +246,30 @@ jobs:
|
||||
run: npm run check
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
medium-tests-server:
|
||||
server-medium-tests:
|
||||
name: Medium Tests (Server)
|
||||
needs: pre-job
|
||||
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
|
||||
runs-on: mich
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./server
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
|
||||
- name: Production build
|
||||
if: ${{ !cancelled() }}
|
||||
run: docker compose -f e2e/docker-compose.yml build
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version-file: './server/.nvmrc'
|
||||
|
||||
- name: Run npm install
|
||||
run: npm ci
|
||||
|
||||
- name: Run medium tests
|
||||
run: npm run test:medium
|
||||
if: ${{ !cancelled() }}
|
||||
run: make test-medium
|
||||
|
||||
e2e-tests-server-cli:
|
||||
name: End-to-End Tests (Server & CLI)
|
||||
@@ -450,7 +457,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
postgres:
|
||||
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
|
||||
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
|
||||
env:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_USER: postgres
|
||||
@@ -502,6 +509,7 @@ jobs:
|
||||
run: |
|
||||
echo "ERROR: Generated migration files not up to date!"
|
||||
echo "Changed files: ${{ steps.verify-changed-files.outputs.changed_files }}"
|
||||
cat ./src/migrations/*-TestMigration.ts
|
||||
exit 1
|
||||
|
||||
- name: Run SQL generation
|
||||
|
||||
50
.github/workflows/weblate-lock.yml
vendored
Normal file
50
.github/workflows/weblate-lock.yml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
name: Weblate checks
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
pre-job:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_run: ${{ steps.found_paths.outputs.i18n == 'true' && github.head_ref != 'chore/translations'}}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- id: found_paths
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
i18n:
|
||||
- 'i18n/!(en)**\.json'
|
||||
enforce-lock:
|
||||
name: Check Weblate Lock
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ needs.pre-job.outputs.should_run == 'true' }}
|
||||
steps:
|
||||
- name: Check weblate lock
|
||||
run: |
|
||||
if [[ "false" = $(curl https://hosted.weblate.org/api/components/immich/immich/lock/ | jq .locked) ]]; then
|
||||
exit 1
|
||||
fi
|
||||
- name: Find Pull Request
|
||||
uses: juliangruber/find-pull-request-action@v1
|
||||
id: find-pr
|
||||
with:
|
||||
branch: chore/translations
|
||||
- name: Fail if existing weblate PR
|
||||
if: ${{ steps.find-pr.outputs.number }}
|
||||
run: exit 1
|
||||
success-check-lock:
|
||||
name: Weblate Lock Check Success
|
||||
needs: [ enforce-lock ]
|
||||
runs-on: ubuntu-latest
|
||||
if: always()
|
||||
steps:
|
||||
- name: Any jobs failed?
|
||||
if: ${{ contains(needs.*.result, 'failure') }}
|
||||
run: exit 1
|
||||
- name: All jobs passed or skipped
|
||||
if: ${{ !(contains(needs.*.result, 'failure')) }}
|
||||
run: echo "All jobs passed or skipped" && echo "${{ toJSON(needs.*.result) }}"
|
||||
@@ -29,6 +29,7 @@
|
||||
<a href="readme_i18n/README_nl_NL.md">Nederlands</a>
|
||||
<a href="readme_i18n/README_tr_TR.md">Türkçe</a>
|
||||
<a href="readme_i18n/README_zh_CN.md">中文</a>
|
||||
<a href="readme_i18n/README_uk_UA.md">Українська</a>
|
||||
<a href="readme_i18n/README_ru_RU.md">Русский</a>
|
||||
<a href="readme_i18n/README_pt_BR.md">Português Brasileiro</a>
|
||||
<a href="readme_i18n/README_sv_SE.md">Svenska</a>
|
||||
|
||||
@@ -1 +1 @@
|
||||
22.12.0
|
||||
22.14.0
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM node:22.12.0-alpine3.20@sha256:96cc8323e25c8cc6ddcb8b965e135cfd57846e8003ec0d7bcec16c5fd5f6d39f AS core
|
||||
FROM node:22.14.0-alpine3.20@sha256:40be979442621049f40b1d51a26b55e281246b5de4e5f51a18da7beb6e17e3f9 AS core
|
||||
|
||||
WORKDIR /usr/src/open-api/typescript-sdk
|
||||
COPY open-api/typescript-sdk/package*.json open-api/typescript-sdk/tsconfig*.json ./
|
||||
|
||||
1216
cli/package-lock.json
generated
1216
cli/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@immich/cli",
|
||||
"version": "2.2.38",
|
||||
"version": "2.2.53",
|
||||
"description": "Command Line Interface (CLI) for Immich",
|
||||
"type": "module",
|
||||
"exports": "./dist/index.js",
|
||||
@@ -19,26 +19,27 @@
|
||||
"@types/byte-size": "^8.1.0",
|
||||
"@types/cli-progress": "^3.11.0",
|
||||
"@types/lodash-es": "^4.17.12",
|
||||
"@types/micromatch": "^4.0.9",
|
||||
"@types/mock-fs": "^4.13.1",
|
||||
"@types/node": "^22.10.2",
|
||||
"@types/node": "^22.13.5",
|
||||
"@typescript-eslint/eslint-plugin": "^8.15.0",
|
||||
"@typescript-eslint/parser": "^8.15.0",
|
||||
"@vitest/coverage-v8": "^2.0.5",
|
||||
"@vitest/coverage-v8": "^3.0.0",
|
||||
"byte-size": "^9.0.0",
|
||||
"cli-progress": "^3.12.0",
|
||||
"commander": "^12.0.0",
|
||||
"eslint": "^9.14.0",
|
||||
"eslint-config-prettier": "^9.1.0",
|
||||
"eslint-config-prettier": "^10.0.0",
|
||||
"eslint-plugin-prettier": "^5.1.3",
|
||||
"eslint-plugin-unicorn": "^56.0.1",
|
||||
"globals": "^15.9.0",
|
||||
"globals": "^16.0.0",
|
||||
"mock-fs": "^5.2.0",
|
||||
"prettier": "^3.2.5",
|
||||
"prettier-plugin-organize-imports": "^4.0.0",
|
||||
"typescript": "^5.3.3",
|
||||
"vite": "^5.0.12",
|
||||
"vite": "^6.0.0",
|
||||
"vite-tsconfig-paths": "^5.0.0",
|
||||
"vitest": "^2.0.5",
|
||||
"vitest": "^3.0.0",
|
||||
"vitest-fetch-mock": "^0.4.0",
|
||||
"yaml": "^2.3.1"
|
||||
},
|
||||
@@ -62,11 +63,13 @@
|
||||
"node": ">=20.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"chokidar": "^4.0.3",
|
||||
"fast-glob": "^3.3.2",
|
||||
"fastq": "^1.17.1",
|
||||
"lodash-es": "^4.17.21"
|
||||
"lodash-es": "^4.17.21",
|
||||
"micromatch": "^4.0.8"
|
||||
},
|
||||
"volta": {
|
||||
"node": "22.12.0"
|
||||
"node": "22.14.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
import * as fs from 'node:fs';
|
||||
import * as os from 'node:os';
|
||||
import * as path from 'node:path';
|
||||
import { describe, expect, it, vi } from 'vitest';
|
||||
import { setTimeout as sleep } from 'node:timers/promises';
|
||||
import { describe, expect, it, MockedFunction, vi } from 'vitest';
|
||||
|
||||
import { Action, checkBulkUpload, defaults, Reason } from '@immich/sdk';
|
||||
import { Action, checkBulkUpload, defaults, getSupportedMediaTypes, Reason } from '@immich/sdk';
|
||||
import createFetchMock from 'vitest-fetch-mock';
|
||||
|
||||
import { checkForDuplicates, getAlbumName, uploadFiles, UploadOptionsDto } from './asset';
|
||||
import { checkForDuplicates, getAlbumName, startWatch, uploadFiles, UploadOptionsDto } from 'src/commands/asset';
|
||||
|
||||
vi.mock('@immich/sdk');
|
||||
|
||||
@@ -199,3 +200,112 @@ describe('checkForDuplicates', () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('startWatch', () => {
|
||||
let testFolder: string;
|
||||
let checkBulkUploadMocked: MockedFunction<typeof checkBulkUpload>;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.restoreAllMocks();
|
||||
|
||||
vi.mocked(getSupportedMediaTypes).mockResolvedValue({
|
||||
image: ['.jpg'],
|
||||
sidecar: ['.xmp'],
|
||||
video: ['.mp4'],
|
||||
});
|
||||
|
||||
testFolder = await fs.promises.mkdtemp(path.join(os.tmpdir(), 'test-startWatch-'));
|
||||
checkBulkUploadMocked = vi.mocked(checkBulkUpload);
|
||||
checkBulkUploadMocked.mockResolvedValue({
|
||||
results: [],
|
||||
});
|
||||
});
|
||||
|
||||
it('should start watching a directory and upload new files', async () => {
|
||||
const testFilePath = path.join(testFolder, 'test.jpg');
|
||||
|
||||
await startWatch([testFolder], { concurrency: 1 }, { batchSize: 1, debounceTimeMs: 10 });
|
||||
await sleep(100); // to debounce the watcher from considering the test file as a existing file
|
||||
await fs.promises.writeFile(testFilePath, 'testjpg');
|
||||
|
||||
await vi.waitUntil(() => checkBulkUploadMocked.mock.calls.length > 0, 3000);
|
||||
expect(checkBulkUpload).toHaveBeenCalledWith({
|
||||
assetBulkUploadCheckDto: {
|
||||
assets: [
|
||||
expect.objectContaining({
|
||||
id: testFilePath,
|
||||
}),
|
||||
],
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should filter out unsupported files', async () => {
|
||||
const testFilePath = path.join(testFolder, 'test.jpg');
|
||||
const unsupportedFilePath = path.join(testFolder, 'test.txt');
|
||||
|
||||
await startWatch([testFolder], { concurrency: 1 }, { batchSize: 1, debounceTimeMs: 10 });
|
||||
await sleep(100); // to debounce the watcher from considering the test file as a existing file
|
||||
await fs.promises.writeFile(testFilePath, 'testjpg');
|
||||
await fs.promises.writeFile(unsupportedFilePath, 'testtxt');
|
||||
|
||||
await vi.waitUntil(() => checkBulkUploadMocked.mock.calls.length > 0, 3000);
|
||||
expect(checkBulkUpload).toHaveBeenCalledWith({
|
||||
assetBulkUploadCheckDto: {
|
||||
assets: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: testFilePath,
|
||||
}),
|
||||
]),
|
||||
},
|
||||
});
|
||||
|
||||
expect(checkBulkUpload).not.toHaveBeenCalledWith({
|
||||
assetBulkUploadCheckDto: {
|
||||
assets: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: unsupportedFilePath,
|
||||
}),
|
||||
]),
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should filger out ignored patterns', async () => {
|
||||
const testFilePath = path.join(testFolder, 'test.jpg');
|
||||
const ignoredPattern = 'ignored';
|
||||
const ignoredFolder = path.join(testFolder, ignoredPattern);
|
||||
await fs.promises.mkdir(ignoredFolder, { recursive: true });
|
||||
const ignoredFilePath = path.join(ignoredFolder, 'ignored.jpg');
|
||||
|
||||
await startWatch([testFolder], { concurrency: 1, ignore: ignoredPattern }, { batchSize: 1, debounceTimeMs: 10 });
|
||||
await sleep(100); // to debounce the watcher from considering the test file as a existing file
|
||||
await fs.promises.writeFile(testFilePath, 'testjpg');
|
||||
await fs.promises.writeFile(ignoredFilePath, 'ignoredjpg');
|
||||
|
||||
await vi.waitUntil(() => checkBulkUploadMocked.mock.calls.length > 0, 3000);
|
||||
expect(checkBulkUpload).toHaveBeenCalledWith({
|
||||
assetBulkUploadCheckDto: {
|
||||
assets: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: testFilePath,
|
||||
}),
|
||||
]),
|
||||
},
|
||||
});
|
||||
|
||||
expect(checkBulkUpload).not.toHaveBeenCalledWith({
|
||||
assetBulkUploadCheckDto: {
|
||||
assets: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: ignoredFilePath,
|
||||
}),
|
||||
]),
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await fs.promises.rm(testFolder, { recursive: true, force: true });
|
||||
});
|
||||
});
|
||||
|
||||
@@ -12,13 +12,18 @@ import {
|
||||
getSupportedMediaTypes,
|
||||
} from '@immich/sdk';
|
||||
import byteSize from 'byte-size';
|
||||
import { Matcher, watch as watchFs } from 'chokidar';
|
||||
import { MultiBar, Presets, SingleBar } from 'cli-progress';
|
||||
import { chunk } from 'lodash-es';
|
||||
import micromatch from 'micromatch';
|
||||
import { Stats, createReadStream } from 'node:fs';
|
||||
import { stat, unlink } from 'node:fs/promises';
|
||||
import path, { basename } from 'node:path';
|
||||
import { Queue } from 'src/queue';
|
||||
import { BaseOptions, authenticate, crawl, sha1 } from 'src/utils';
|
||||
import { BaseOptions, Batcher, authenticate, crawl, sha1 } from 'src/utils';
|
||||
|
||||
const UPLOAD_WATCH_BATCH_SIZE = 100;
|
||||
const UPLOAD_WATCH_DEBOUNCE_TIME_MS = 10_000;
|
||||
|
||||
const s = (count: number) => (count === 1 ? '' : 's');
|
||||
|
||||
@@ -36,6 +41,8 @@ export interface UploadOptionsDto {
|
||||
albumName?: string;
|
||||
includeHidden?: boolean;
|
||||
concurrency: number;
|
||||
progress?: boolean;
|
||||
watch?: boolean;
|
||||
}
|
||||
|
||||
class UploadFile extends File {
|
||||
@@ -55,19 +62,94 @@ class UploadFile extends File {
|
||||
}
|
||||
}
|
||||
|
||||
const uploadBatch = async (files: string[], options: UploadOptionsDto) => {
|
||||
const { newFiles, duplicates } = await checkForDuplicates(files, options);
|
||||
const newAssets = await uploadFiles(newFiles, options);
|
||||
await updateAlbums([...newAssets, ...duplicates], options);
|
||||
await deleteFiles(newFiles, options);
|
||||
};
|
||||
|
||||
export const startWatch = async (
|
||||
paths: string[],
|
||||
options: UploadOptionsDto,
|
||||
{
|
||||
batchSize = UPLOAD_WATCH_BATCH_SIZE,
|
||||
debounceTimeMs = UPLOAD_WATCH_DEBOUNCE_TIME_MS,
|
||||
}: { batchSize?: number; debounceTimeMs?: number } = {},
|
||||
) => {
|
||||
const watcherIgnored: Matcher[] = [];
|
||||
const { image, video } = await getSupportedMediaTypes();
|
||||
const extensions = new Set([...image, ...video]);
|
||||
|
||||
if (options.ignore) {
|
||||
watcherIgnored.push((path) => micromatch.contains(path, `**/${options.ignore}`));
|
||||
}
|
||||
|
||||
const pathsBatcher = new Batcher<string>({
|
||||
batchSize,
|
||||
debounceTimeMs,
|
||||
onBatch: async (paths: string[]) => {
|
||||
const uniquePaths = [...new Set(paths)];
|
||||
await uploadBatch(uniquePaths, options);
|
||||
},
|
||||
});
|
||||
|
||||
const onFile = async (path: string, stats?: Stats) => {
|
||||
if (stats?.isDirectory()) {
|
||||
return;
|
||||
}
|
||||
const ext = '.' + path.split('.').pop()?.toLowerCase();
|
||||
if (!ext || !extensions.has(ext)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!options.progress) {
|
||||
// logging when progress is disabled as it can cause issues with the progress bar rendering
|
||||
console.log(`Change detected: ${path}`);
|
||||
}
|
||||
pathsBatcher.add(path);
|
||||
};
|
||||
const fsWatcher = watchFs(paths, {
|
||||
ignoreInitial: true,
|
||||
ignored: watcherIgnored,
|
||||
alwaysStat: true,
|
||||
awaitWriteFinish: true,
|
||||
depth: options.recursive ? undefined : 1,
|
||||
persistent: true,
|
||||
})
|
||||
.on('add', onFile)
|
||||
.on('change', onFile)
|
||||
.on('error', (error) => console.error(`Watcher error: ${error}`));
|
||||
|
||||
process.on('SIGINT', async () => {
|
||||
console.log('Exiting...');
|
||||
await fsWatcher.close();
|
||||
process.exit();
|
||||
});
|
||||
};
|
||||
|
||||
export const upload = async (paths: string[], baseOptions: BaseOptions, options: UploadOptionsDto) => {
|
||||
await authenticate(baseOptions);
|
||||
|
||||
const scanFiles = await scan(paths, options);
|
||||
|
||||
if (scanFiles.length === 0) {
|
||||
console.log('No files found, exiting');
|
||||
return;
|
||||
if (options.watch) {
|
||||
console.log('No files found initially.');
|
||||
} else {
|
||||
console.log('No files found, exiting');
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const { newFiles, duplicates } = await checkForDuplicates(scanFiles, options);
|
||||
const newAssets = await uploadFiles(newFiles, options);
|
||||
await updateAlbums([...newAssets, ...duplicates], options);
|
||||
await deleteFiles(newFiles, options);
|
||||
if (options.watch) {
|
||||
console.log('Watching for changes...');
|
||||
await startWatch(paths, options);
|
||||
// watcher does not handle the initial scan
|
||||
// as the scan() is a more efficient quick start with batched results
|
||||
}
|
||||
|
||||
await uploadBatch(scanFiles, options);
|
||||
};
|
||||
|
||||
const scan = async (pathsToCrawl: string[], options: UploadOptionsDto) => {
|
||||
@@ -85,19 +167,25 @@ const scan = async (pathsToCrawl: string[], options: UploadOptionsDto) => {
|
||||
return files;
|
||||
};
|
||||
|
||||
export const checkForDuplicates = async (files: string[], { concurrency, skipHash }: UploadOptionsDto) => {
|
||||
export const checkForDuplicates = async (files: string[], { concurrency, skipHash, progress }: UploadOptionsDto) => {
|
||||
if (skipHash) {
|
||||
console.log('Skipping hash check, assuming all files are new');
|
||||
return { newFiles: files, duplicates: [] };
|
||||
}
|
||||
|
||||
const multiBar = new MultiBar(
|
||||
{ format: '{message} | {bar} | {percentage}% | ETA: {eta}s | {value}/{total} assets' },
|
||||
Presets.shades_classic,
|
||||
);
|
||||
let multiBar: MultiBar | undefined;
|
||||
|
||||
const hashProgressBar = multiBar.create(files.length, 0, { message: 'Hashing files ' });
|
||||
const checkProgressBar = multiBar.create(files.length, 0, { message: 'Checking for duplicates' });
|
||||
if (progress) {
|
||||
multiBar = new MultiBar(
|
||||
{ format: '{message} | {bar} | {percentage}% | ETA: {eta}s | {value}/{total} assets' },
|
||||
Presets.shades_classic,
|
||||
);
|
||||
} else {
|
||||
console.log(`Received ${files.length} files, hashing...`);
|
||||
}
|
||||
|
||||
const hashProgressBar = multiBar?.create(files.length, 0, { message: 'Hashing files ' });
|
||||
const checkProgressBar = multiBar?.create(files.length, 0, { message: 'Checking for duplicates' });
|
||||
|
||||
const newFiles: string[] = [];
|
||||
const duplicates: Asset[] = [];
|
||||
@@ -117,7 +205,7 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
|
||||
}
|
||||
}
|
||||
|
||||
checkProgressBar.increment(assets.length);
|
||||
checkProgressBar?.increment(assets.length);
|
||||
},
|
||||
{ concurrency, retry: 3 },
|
||||
);
|
||||
@@ -137,7 +225,7 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
|
||||
void checkBulkUploadQueue.push(batch);
|
||||
}
|
||||
|
||||
hashProgressBar.increment();
|
||||
hashProgressBar?.increment();
|
||||
return results;
|
||||
},
|
||||
{ concurrency, retry: 3 },
|
||||
@@ -155,7 +243,7 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
|
||||
|
||||
await checkBulkUploadQueue.drained();
|
||||
|
||||
multiBar.stop();
|
||||
multiBar?.stop();
|
||||
|
||||
console.log(`Found ${newFiles.length} new files and ${duplicates.length} duplicate${s(duplicates.length)}`);
|
||||
|
||||
@@ -171,7 +259,10 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
|
||||
return { newFiles, duplicates };
|
||||
};
|
||||
|
||||
export const uploadFiles = async (files: string[], { dryRun, concurrency }: UploadOptionsDto): Promise<Asset[]> => {
|
||||
export const uploadFiles = async (
|
||||
files: string[],
|
||||
{ dryRun, concurrency, progress }: UploadOptionsDto,
|
||||
): Promise<Asset[]> => {
|
||||
if (files.length === 0) {
|
||||
console.log('All assets were already uploaded, nothing to do.');
|
||||
return [];
|
||||
@@ -191,12 +282,20 @@ export const uploadFiles = async (files: string[], { dryRun, concurrency }: Uplo
|
||||
return files.map((filepath) => ({ id: '', filepath }));
|
||||
}
|
||||
|
||||
const uploadProgress = new SingleBar(
|
||||
{ format: 'Uploading assets | {bar} | {percentage}% | ETA: {eta_formatted} | {value_formatted}/{total_formatted}' },
|
||||
Presets.shades_classic,
|
||||
);
|
||||
uploadProgress.start(totalSize, 0);
|
||||
uploadProgress.update({ value_formatted: 0, total_formatted: byteSize(totalSize) });
|
||||
let uploadProgress: SingleBar | undefined;
|
||||
|
||||
if (progress) {
|
||||
uploadProgress = new SingleBar(
|
||||
{
|
||||
format: 'Uploading assets | {bar} | {percentage}% | ETA: {eta_formatted} | {value_formatted}/{total_formatted}',
|
||||
},
|
||||
Presets.shades_classic,
|
||||
);
|
||||
} else {
|
||||
console.log(`Uploading ${files.length} asset${s(files.length)} (${byteSize(totalSize)})`);
|
||||
}
|
||||
uploadProgress?.start(totalSize, 0);
|
||||
uploadProgress?.update({ value_formatted: 0, total_formatted: byteSize(totalSize) });
|
||||
|
||||
let duplicateCount = 0;
|
||||
let duplicateSize = 0;
|
||||
@@ -222,7 +321,7 @@ export const uploadFiles = async (files: string[], { dryRun, concurrency }: Uplo
|
||||
successSize += stats.size ?? 0;
|
||||
}
|
||||
|
||||
uploadProgress.update(successSize, { value_formatted: byteSize(successSize + duplicateSize) });
|
||||
uploadProgress?.update(successSize, { value_formatted: byteSize(successSize + duplicateSize) });
|
||||
|
||||
return response;
|
||||
},
|
||||
@@ -235,7 +334,7 @@ export const uploadFiles = async (files: string[], { dryRun, concurrency }: Uplo
|
||||
|
||||
await queue.drained();
|
||||
|
||||
uploadProgress.stop();
|
||||
uploadProgress?.stop();
|
||||
|
||||
console.log(`Successfully uploaded ${successCount} new asset${s(successCount)} (${byteSize(successSize)})`);
|
||||
if (duplicateCount > 0) {
|
||||
|
||||
@@ -69,6 +69,13 @@ program
|
||||
.default(4),
|
||||
)
|
||||
.addOption(new Option('--delete', 'Delete local assets after upload').env('IMMICH_DELETE_ASSETS'))
|
||||
.addOption(new Option('--no-progress', 'Hide progress bars').env('IMMICH_PROGRESS_BAR').default(true))
|
||||
.addOption(
|
||||
new Option('--watch', 'Watch for changes and upload automatically')
|
||||
.env('IMMICH_WATCH_CHANGES')
|
||||
.default(false)
|
||||
.implies({ progress: false }),
|
||||
)
|
||||
.argument('[paths...]', 'One or more paths to assets to be uploaded')
|
||||
.action((paths, options) => upload(paths, program.opts(), options));
|
||||
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
import mockfs from 'mock-fs';
|
||||
import { readFileSync } from 'node:fs';
|
||||
import { CrawlOptions, crawl } from 'src/utils';
|
||||
import { Batcher, CrawlOptions, crawl } from 'src/utils';
|
||||
import { Mock } from 'vitest';
|
||||
|
||||
interface Test {
|
||||
test: string;
|
||||
options: Omit<CrawlOptions, 'extensions'>;
|
||||
files: Record<string, boolean>;
|
||||
skipOnWin32?: boolean;
|
||||
}
|
||||
|
||||
const cwd = process.cwd();
|
||||
@@ -48,6 +50,18 @@ const tests: Test[] = [
|
||||
'/photos/image.jpg': true,
|
||||
},
|
||||
},
|
||||
{
|
||||
test: 'should crawl folders with quotes',
|
||||
options: {
|
||||
pathsToCrawl: ["/photo's/", '/photo"s/', '/photo`s/'],
|
||||
},
|
||||
files: {
|
||||
"/photo's/image1.jpg": true,
|
||||
'/photo"s/image2.jpg': true,
|
||||
'/photo`s/image3.jpg': true,
|
||||
},
|
||||
skipOnWin32: true, // single quote interferes with mockfs root on Windows
|
||||
},
|
||||
{
|
||||
test: 'should crawl a single file',
|
||||
options: {
|
||||
@@ -270,8 +284,12 @@ describe('crawl', () => {
|
||||
});
|
||||
|
||||
describe('crawl', () => {
|
||||
for (const { test, options, files } of tests) {
|
||||
it(test, async () => {
|
||||
for (const { test: name, options, files, skipOnWin32 } of tests) {
|
||||
if (process.platform === 'win32' && skipOnWin32) {
|
||||
test.skip(name);
|
||||
continue;
|
||||
}
|
||||
it(name, async () => {
|
||||
// The file contents is the same as the path.
|
||||
mockfs(Object.fromEntries(Object.keys(files).map((file) => [file, file])));
|
||||
|
||||
@@ -286,3 +304,38 @@ describe('crawl', () => {
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('Batcher', () => {
|
||||
let batcher: Batcher;
|
||||
let onBatch: Mock;
|
||||
beforeEach(() => {
|
||||
onBatch = vi.fn();
|
||||
batcher = new Batcher({ batchSize: 2, onBatch });
|
||||
});
|
||||
|
||||
it('should trigger onBatch() when a batch limit is reached', async () => {
|
||||
batcher.add('a');
|
||||
batcher.add('b');
|
||||
batcher.add('c');
|
||||
expect(onBatch).toHaveBeenCalledOnce();
|
||||
expect(onBatch).toHaveBeenCalledWith(['a', 'b']);
|
||||
});
|
||||
|
||||
it('should trigger onBatch() when flush() is called', async () => {
|
||||
batcher.add('a');
|
||||
batcher.flush();
|
||||
expect(onBatch).toHaveBeenCalledOnce();
|
||||
expect(onBatch).toHaveBeenCalledWith(['a']);
|
||||
});
|
||||
|
||||
it('should trigger onBatch() when debounce time reached', async () => {
|
||||
vi.useFakeTimers();
|
||||
batcher = new Batcher({ batchSize: 2, debounceTimeMs: 100, onBatch });
|
||||
batcher.add('a');
|
||||
expect(onBatch).not.toHaveBeenCalled();
|
||||
vi.advanceTimersByTime(200);
|
||||
expect(onBatch).toHaveBeenCalledOnce();
|
||||
expect(onBatch).toHaveBeenCalledWith(['a']);
|
||||
vi.useRealTimers();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -146,7 +146,7 @@ export const crawl = async (options: CrawlOptions): Promise<string[]> => {
|
||||
}
|
||||
|
||||
const searchPatterns = patterns.map((pattern) => {
|
||||
let escapedPattern = pattern;
|
||||
let escapedPattern = pattern.replaceAll("'", "[']").replaceAll('"', '["]').replaceAll('`', '[`]');
|
||||
if (recursive) {
|
||||
escapedPattern = escapedPattern + '/**';
|
||||
}
|
||||
@@ -172,3 +172,64 @@ export const sha1 = (filepath: string) => {
|
||||
rs.on('end', () => resolve(hash.digest('hex')));
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Batches items and calls onBatch to process them
|
||||
* when the batch size is reached or the debounce time has passed.
|
||||
*/
|
||||
export class Batcher<T = unknown> {
|
||||
private items: T[] = [];
|
||||
private readonly batchSize: number;
|
||||
private readonly debounceTimeMs?: number;
|
||||
private readonly onBatch: (items: T[]) => void;
|
||||
private debounceTimer?: NodeJS.Timeout;
|
||||
|
||||
constructor({
|
||||
batchSize,
|
||||
debounceTimeMs,
|
||||
onBatch,
|
||||
}: {
|
||||
batchSize: number;
|
||||
debounceTimeMs?: number;
|
||||
onBatch: (items: T[]) => Promise<void>;
|
||||
}) {
|
||||
this.batchSize = batchSize;
|
||||
this.debounceTimeMs = debounceTimeMs;
|
||||
this.onBatch = onBatch;
|
||||
}
|
||||
|
||||
private setDebounceTimer() {
|
||||
if (this.debounceTimer) {
|
||||
clearTimeout(this.debounceTimer);
|
||||
}
|
||||
if (this.debounceTimeMs) {
|
||||
this.debounceTimer = setTimeout(() => this.flush(), this.debounceTimeMs);
|
||||
}
|
||||
}
|
||||
|
||||
private clearDebounceTimer() {
|
||||
if (this.debounceTimer) {
|
||||
clearTimeout(this.debounceTimer);
|
||||
this.debounceTimer = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
add(item: T) {
|
||||
this.items.push(item);
|
||||
this.setDebounceTimer();
|
||||
if (this.items.length >= this.batchSize) {
|
||||
this.flush();
|
||||
}
|
||||
}
|
||||
|
||||
flush() {
|
||||
this.clearDebounceTimer();
|
||||
if (this.items.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.onBatch(this.items);
|
||||
|
||||
this.items = [];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,37 +2,37 @@
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.opentofu.org/cloudflare/cloudflare" {
|
||||
version = "4.48.0"
|
||||
constraints = "4.48.0"
|
||||
version = "4.52.0"
|
||||
constraints = "4.52.0"
|
||||
hashes = [
|
||||
"h1:0IKUOR32xEI1suS5QCOjfxjQ2mRd058btXk8hVnaOJ4=",
|
||||
"h1:3YG6vu/bFPcYOeLdSUZhiAWiWKaFlOAR34z2o8cbE9k=",
|
||||
"h1:FvGy06/i9AMtVkSIUnCrXNv5xF6jqBqMH8oPVLyeeAg=",
|
||||
"h1:GXH7nIF0ocMqebbA41+fSGIYfM+VAM/PvTe7fJr8UrQ=",
|
||||
"h1:H0ll0ph4404vFE868W3qJ3zhOyy4jbXrOMtdkViEZsU=",
|
||||
"h1:SX42e3k73IcFcrQlZ2e/Veqt2tvCMy6fwlo5yNUktCE=",
|
||||
"h1:Uu/gjBc99GefdPdSrlBwU75DWU0ZcwGcrd3ZFyTeL0s=",
|
||||
"h1:VZw0uN41PWRmNlhg7Ze0Eh7cdoklX1oZbfNAXNYnU1I=",
|
||||
"h1:cMdV7ql6PsFa4qtb0EoZSctvTaTqV7yplBSDwcLRCLc=",
|
||||
"h1:ePGvSurmlqOCkD761vkhRmz7bsK36/EnIvx2Xy8TdXo=",
|
||||
"h1:fOYufF+1bzw2N3aHLpkLB6E8VbZ4ysXDODYQOlwhwd4=",
|
||||
"h1:qe8RbnWq0T4xhqjn9QcbO6YW5YDx47P+eJ0NUMIfwCc=",
|
||||
"h1:tRD2av6PafHDP/b9jDQsG5/aX+lHeKxpbIEHYYLBVUc=",
|
||||
"h1:zyl6Gvx/CFpwYW8pFFDesfO8Lxv+a6CopyAsIMhp54s=",
|
||||
"zh:04c0a49c2b23140b2f21cfd0d52f9798d70d3bdae3831613e156aabe519bbc6c",
|
||||
"zh:185f21b4834ba63e8df1f84aa34639d8a7e126429a4007bb5f9ad82f2602a997",
|
||||
"zh:234724f52cb4c0c3f7313d3b2697caef26d921d134f26ae14801e7afac522f7b",
|
||||
"zh:38a56fcd1b3e40706af995611c977816543b53f1e55fe2720944aae2b6828fcb",
|
||||
"zh:419938f5430fc78eff933470aefbf94a460a478f867cf7761a3dea177b4eb153",
|
||||
"zh:4b46d92bfde1deab7de7ba1a6bbf4ba7c711e4fd925341ddf09d4cc28dae03d8",
|
||||
"zh:537acd4a31c752f1bae305ba7190f60b71ad1a459f22d464f3f914336c9e919f",
|
||||
"zh:5ff36b005aad07697dd0b30d4f0c35dbcdc30dc52b41722552060792fa87ce04",
|
||||
"zh:635c5ee419daea098060f794d9d7d999275301181e49562c4e4c08f043076937",
|
||||
"zh:859277c330d61f91abe9e799389467ca11b77131bf34bedbef52f8da68b2bb49",
|
||||
"h1:2BEJyXJtYC4B4nda/WCYUmuJYDaYk88F8t1pwPzr0iQ=",
|
||||
"h1:4IASk5SESeWKQ7JU0+M7KApuF5mZyklvwMXPBabim3c=",
|
||||
"h1:5ImZxxALSnWfH/4EXw/wFirSmk5Tr0ACmcysy51AafE=",
|
||||
"h1:6TJ3dxLSin4ZKBJLsZDn95H2ZYnGm8S7GGHvvXuuMQU=",
|
||||
"h1:IzTUjg9kQ4N3qizP9CjYLeHwjsuGgtxwXvfUQWyOLcA=",
|
||||
"h1:NTaOQfYINA0YTG/V1/9+SYtgX1it63+cBugj4WK4FWc=",
|
||||
"h1:PXH48LuJn329sCfMXprdMDk51EZaWFyajVvS03qhQLs=",
|
||||
"h1:Pi5M+GeoMSN2eJ6QnIeXjBf19O+rby/74CfB2ocpv20=",
|
||||
"h1:ShXZ2ZjBvm3thfoPPzPT8+OhyismnydQVkUAfI8X12w=",
|
||||
"h1:WQ9hu0Wge2msBbODfottCSKgu8oKUrw4Opz+fDPVVHk=",
|
||||
"h1:Z5yXML2DE0uH9UU+M0ut9JMQAORcwVZz1CxBHzeBmao=",
|
||||
"h1:jqI2qKknpleS3JDSplyGYHMu0u9K/tor1ZOjFwDgEMk=",
|
||||
"h1:kgfutDh14Q5nw4eg6qGFamFxIiY8Ae0FPKRBLDOzpcI=",
|
||||
"h1:zCAO7GZmfYhWb+i6TfqlqhMeDyPZWGio2IzEzAh3YTs=",
|
||||
"zh:19be1a91c982b902c42aba47766860dfa5dc151eed1e95fd39ca642229381ef0",
|
||||
"zh:1de451c4d1ecf7efbe67b6dace3426ba810711afdd644b0f1b870364c8ae91f8",
|
||||
"zh:352b4a2120173298622e669258744554339d959ac3a95607b117a48ee4a83238",
|
||||
"zh:3c6f1346d9154afbd2d558fabb4b0150fc8d559aa961254144fe1bc17fe6032f",
|
||||
"zh:4c4c92d53fb535b1e0eff26f222bbd627b97d3b4c891ec9c321268676d06152f",
|
||||
"zh:53276f68006c9ceb7cdb10a6ccf91a5c1eadd1407a28edb5741e84e88d7e29e8",
|
||||
"zh:7925a97773948171a63d4f65bb81ee92fd6d07a447e36012977313293a5435c9",
|
||||
"zh:7dfb0a4496cfe032437386d0a2cd9229a1956e9c30bd920923c141b0f0440060",
|
||||
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
|
||||
"zh:927dfdb8d9aef37ead03fceaa29e87ba076a3dd24e19b6cefdbb0efe9987ff8c",
|
||||
"zh:bbf2226f07f6b1e721877328e69ded4b64f9c196634d2e2429e3cfabbe41e532",
|
||||
"zh:daeed873d6f38604232b46ee4a5830c85d195b967f8dbcafe2fcffa98daf9c5f",
|
||||
"zh:f8f2fc4646c1ba44085612fa7f4dbb7cbcead43b4e661f2b98ddfb4f68afc758",
|
||||
"zh:8d4aa79f0a414bb4163d771063c70cd991c8fac6c766e685bac2ee12903c5bd6",
|
||||
"zh:a67540c13565616a7e7e51ee9366e88b0dc60046e1d75c72680e150bd02725bb",
|
||||
"zh:a936383a4767f5393f38f622e92bf2d0c03fe04b69c284951f27345766c7b31b",
|
||||
"zh:d4887d73c466ff036eecf50ad6404ba38fd82ea4855296b1846d244b0f13c380",
|
||||
"zh:e9093c8bd5b6cd99c81666e315197791781b8f93afa14fc2e0f732d1bb2a44b7",
|
||||
"zh:efd3b3f1ec59a37f635aa1d4efcf178734c2fcf8ddb0d56ea690bec342da8672",
|
||||
]
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ terraform {
|
||||
required_providers {
|
||||
cloudflare = {
|
||||
source = "cloudflare/cloudflare"
|
||||
version = "4.48.0"
|
||||
version = "4.52.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,37 +2,37 @@
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.opentofu.org/cloudflare/cloudflare" {
|
||||
version = "4.48.0"
|
||||
constraints = "4.48.0"
|
||||
version = "4.52.0"
|
||||
constraints = "4.52.0"
|
||||
hashes = [
|
||||
"h1:0IKUOR32xEI1suS5QCOjfxjQ2mRd058btXk8hVnaOJ4=",
|
||||
"h1:3YG6vu/bFPcYOeLdSUZhiAWiWKaFlOAR34z2o8cbE9k=",
|
||||
"h1:FvGy06/i9AMtVkSIUnCrXNv5xF6jqBqMH8oPVLyeeAg=",
|
||||
"h1:GXH7nIF0ocMqebbA41+fSGIYfM+VAM/PvTe7fJr8UrQ=",
|
||||
"h1:H0ll0ph4404vFE868W3qJ3zhOyy4jbXrOMtdkViEZsU=",
|
||||
"h1:SX42e3k73IcFcrQlZ2e/Veqt2tvCMy6fwlo5yNUktCE=",
|
||||
"h1:Uu/gjBc99GefdPdSrlBwU75DWU0ZcwGcrd3ZFyTeL0s=",
|
||||
"h1:VZw0uN41PWRmNlhg7Ze0Eh7cdoklX1oZbfNAXNYnU1I=",
|
||||
"h1:cMdV7ql6PsFa4qtb0EoZSctvTaTqV7yplBSDwcLRCLc=",
|
||||
"h1:ePGvSurmlqOCkD761vkhRmz7bsK36/EnIvx2Xy8TdXo=",
|
||||
"h1:fOYufF+1bzw2N3aHLpkLB6E8VbZ4ysXDODYQOlwhwd4=",
|
||||
"h1:qe8RbnWq0T4xhqjn9QcbO6YW5YDx47P+eJ0NUMIfwCc=",
|
||||
"h1:tRD2av6PafHDP/b9jDQsG5/aX+lHeKxpbIEHYYLBVUc=",
|
||||
"h1:zyl6Gvx/CFpwYW8pFFDesfO8Lxv+a6CopyAsIMhp54s=",
|
||||
"zh:04c0a49c2b23140b2f21cfd0d52f9798d70d3bdae3831613e156aabe519bbc6c",
|
||||
"zh:185f21b4834ba63e8df1f84aa34639d8a7e126429a4007bb5f9ad82f2602a997",
|
||||
"zh:234724f52cb4c0c3f7313d3b2697caef26d921d134f26ae14801e7afac522f7b",
|
||||
"zh:38a56fcd1b3e40706af995611c977816543b53f1e55fe2720944aae2b6828fcb",
|
||||
"zh:419938f5430fc78eff933470aefbf94a460a478f867cf7761a3dea177b4eb153",
|
||||
"zh:4b46d92bfde1deab7de7ba1a6bbf4ba7c711e4fd925341ddf09d4cc28dae03d8",
|
||||
"zh:537acd4a31c752f1bae305ba7190f60b71ad1a459f22d464f3f914336c9e919f",
|
||||
"zh:5ff36b005aad07697dd0b30d4f0c35dbcdc30dc52b41722552060792fa87ce04",
|
||||
"zh:635c5ee419daea098060f794d9d7d999275301181e49562c4e4c08f043076937",
|
||||
"zh:859277c330d61f91abe9e799389467ca11b77131bf34bedbef52f8da68b2bb49",
|
||||
"h1:2BEJyXJtYC4B4nda/WCYUmuJYDaYk88F8t1pwPzr0iQ=",
|
||||
"h1:4IASk5SESeWKQ7JU0+M7KApuF5mZyklvwMXPBabim3c=",
|
||||
"h1:5ImZxxALSnWfH/4EXw/wFirSmk5Tr0ACmcysy51AafE=",
|
||||
"h1:6TJ3dxLSin4ZKBJLsZDn95H2ZYnGm8S7GGHvvXuuMQU=",
|
||||
"h1:IzTUjg9kQ4N3qizP9CjYLeHwjsuGgtxwXvfUQWyOLcA=",
|
||||
"h1:NTaOQfYINA0YTG/V1/9+SYtgX1it63+cBugj4WK4FWc=",
|
||||
"h1:PXH48LuJn329sCfMXprdMDk51EZaWFyajVvS03qhQLs=",
|
||||
"h1:Pi5M+GeoMSN2eJ6QnIeXjBf19O+rby/74CfB2ocpv20=",
|
||||
"h1:ShXZ2ZjBvm3thfoPPzPT8+OhyismnydQVkUAfI8X12w=",
|
||||
"h1:WQ9hu0Wge2msBbODfottCSKgu8oKUrw4Opz+fDPVVHk=",
|
||||
"h1:Z5yXML2DE0uH9UU+M0ut9JMQAORcwVZz1CxBHzeBmao=",
|
||||
"h1:jqI2qKknpleS3JDSplyGYHMu0u9K/tor1ZOjFwDgEMk=",
|
||||
"h1:kgfutDh14Q5nw4eg6qGFamFxIiY8Ae0FPKRBLDOzpcI=",
|
||||
"h1:zCAO7GZmfYhWb+i6TfqlqhMeDyPZWGio2IzEzAh3YTs=",
|
||||
"zh:19be1a91c982b902c42aba47766860dfa5dc151eed1e95fd39ca642229381ef0",
|
||||
"zh:1de451c4d1ecf7efbe67b6dace3426ba810711afdd644b0f1b870364c8ae91f8",
|
||||
"zh:352b4a2120173298622e669258744554339d959ac3a95607b117a48ee4a83238",
|
||||
"zh:3c6f1346d9154afbd2d558fabb4b0150fc8d559aa961254144fe1bc17fe6032f",
|
||||
"zh:4c4c92d53fb535b1e0eff26f222bbd627b97d3b4c891ec9c321268676d06152f",
|
||||
"zh:53276f68006c9ceb7cdb10a6ccf91a5c1eadd1407a28edb5741e84e88d7e29e8",
|
||||
"zh:7925a97773948171a63d4f65bb81ee92fd6d07a447e36012977313293a5435c9",
|
||||
"zh:7dfb0a4496cfe032437386d0a2cd9229a1956e9c30bd920923c141b0f0440060",
|
||||
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
|
||||
"zh:927dfdb8d9aef37ead03fceaa29e87ba076a3dd24e19b6cefdbb0efe9987ff8c",
|
||||
"zh:bbf2226f07f6b1e721877328e69ded4b64f9c196634d2e2429e3cfabbe41e532",
|
||||
"zh:daeed873d6f38604232b46ee4a5830c85d195b967f8dbcafe2fcffa98daf9c5f",
|
||||
"zh:f8f2fc4646c1ba44085612fa7f4dbb7cbcead43b4e661f2b98ddfb4f68afc758",
|
||||
"zh:8d4aa79f0a414bb4163d771063c70cd991c8fac6c766e685bac2ee12903c5bd6",
|
||||
"zh:a67540c13565616a7e7e51ee9366e88b0dc60046e1d75c72680e150bd02725bb",
|
||||
"zh:a936383a4767f5393f38f622e92bf2d0c03fe04b69c284951f27345766c7b31b",
|
||||
"zh:d4887d73c466ff036eecf50ad6404ba38fd82ea4855296b1846d244b0f13c380",
|
||||
"zh:e9093c8bd5b6cd99c81666e315197791781b8f93afa14fc2e0f732d1bb2a44b7",
|
||||
"zh:efd3b3f1ec59a37f635aa1d4efcf178734c2fcf8ddb0d56ea690bec342da8672",
|
||||
]
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ terraform {
|
||||
required_providers {
|
||||
cloudflare = {
|
||||
source = "cloudflare/cloudflare"
|
||||
version = "4.48.0"
|
||||
version = "4.52.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,13 @@
|
||||
# See:
|
||||
#
|
||||
# WARNING: To install Immich, follow our guide: https://immich.app/docs/install/docker-compose
|
||||
#
|
||||
# Make sure to use the docker-compose.yml of the current release:
|
||||
#
|
||||
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
|
||||
#
|
||||
# The compose file on main may not be compatible with the latest release.
|
||||
|
||||
# For development see:
|
||||
# - https://immich.app/docs/developer/setup
|
||||
# - https://immich.app/docs/developer/troubleshooting
|
||||
|
||||
@@ -71,6 +80,7 @@ services:
|
||||
- ../web:/usr/src/app
|
||||
- ../i18n:/usr/src/i18n
|
||||
- ../open-api/:/usr/src/open-api/
|
||||
# - ../../ui:/usr/ui
|
||||
- /usr/src/app/node_modules
|
||||
ulimits:
|
||||
nofile:
|
||||
@@ -106,13 +116,13 @@ services:
|
||||
|
||||
redis:
|
||||
container_name: immich_redis
|
||||
image: redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8
|
||||
image: redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
|
||||
healthcheck:
|
||||
test: redis-cli ping || exit 1
|
||||
|
||||
database:
|
||||
container_name: immich_postgres
|
||||
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
|
||||
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
|
||||
@@ -1,3 +1,12 @@
|
||||
#
|
||||
# WARNING: To install Immich, follow our guide: https://immich.app/docs/install/docker-compose
|
||||
#
|
||||
# Make sure to use the docker-compose.yml of the current release:
|
||||
#
|
||||
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
|
||||
#
|
||||
# The compose file on main may not be compatible with the latest release.
|
||||
|
||||
name: immich-prod
|
||||
|
||||
services:
|
||||
@@ -47,14 +56,14 @@ services:
|
||||
|
||||
redis:
|
||||
container_name: immich_redis
|
||||
image: redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8
|
||||
image: redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
|
||||
healthcheck:
|
||||
test: redis-cli ping || exit 1
|
||||
restart: always
|
||||
|
||||
database:
|
||||
container_name: immich_postgres
|
||||
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
|
||||
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
@@ -91,7 +100,7 @@ services:
|
||||
container_name: immich_prometheus
|
||||
ports:
|
||||
- 9090:9090
|
||||
image: prom/prometheus@sha256:565ee86501224ebbb98fc10b332fa54440b100469924003359edf49cbce374bd
|
||||
image: prom/prometheus@sha256:6927e0919a144aa7616fd0137d4816816d42f6b816de3af269ab065250859a62
|
||||
volumes:
|
||||
- ./prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
- prometheus-data:/prometheus
|
||||
@@ -103,7 +112,7 @@ services:
|
||||
command: ['./run.sh', '-disable-reporting']
|
||||
ports:
|
||||
- 3000:3000
|
||||
image: grafana/grafana:11.4.0-ubuntu@sha256:afccec22ba0e4815cca1d2bf3836e414322390dc78d77f1851976ffa8d61051c
|
||||
image: grafana/grafana:11.5.2-ubuntu@sha256:8b5858c447e06fd7a89006b562ba7bba7c4d5813600c7982374c41852adefaeb
|
||||
volumes:
|
||||
- grafana-data:/var/lib/grafana
|
||||
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
#
|
||||
# WARNING: Make sure to use the docker-compose.yml of the current release:
|
||||
# WARNING: To install Immich, follow our guide: https://immich.app/docs/install/docker-compose
|
||||
#
|
||||
# Make sure to use the docker-compose.yml of the current release:
|
||||
#
|
||||
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
|
||||
#
|
||||
# The compose file on main may not be compatible with the latest release.
|
||||
#
|
||||
|
||||
name: immich
|
||||
|
||||
@@ -48,14 +49,14 @@ services:
|
||||
|
||||
redis:
|
||||
container_name: immich_redis
|
||||
image: docker.io/redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8
|
||||
image: docker.io/redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
|
||||
healthcheck:
|
||||
test: redis-cli ping || exit 1
|
||||
restart: always
|
||||
|
||||
database:
|
||||
container_name: immich_postgres
|
||||
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
|
||||
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
|
||||
environment:
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
||||
POSTGRES_USER: ${DB_USERNAME}
|
||||
|
||||
@@ -48,6 +48,7 @@ services:
|
||||
vaapi-wsl: # use this for VAAPI if you're running Immich in WSL2
|
||||
devices:
|
||||
- /dev/dri:/dev/dri
|
||||
- /dev/dxg:/dev/dxg
|
||||
volumes:
|
||||
- /usr/lib/wsl:/usr/lib/wsl
|
||||
environment:
|
||||
|
||||
@@ -1 +1 @@
|
||||
22.12.0
|
||||
22.14.0
|
||||
|
||||
@@ -53,10 +53,18 @@ On iOS (iPhone and iPad), the operating system determines if a particular app ca
|
||||
- Disable Background App Refresh for apps that don't need background tasks to run. This will reduce the competition for background task invocation for Immich.
|
||||
- Use the Immich app more often.
|
||||
|
||||
### Why are features not working with a self-signed cert or mTLS?
|
||||
### Why are features in the mobile app not working with a self-signed certificate, Basic Auth, custom headers, or mutual TLS?
|
||||
|
||||
Due to limitations in the upstream app/video library, using a self-signed TLS certificate or mutual TLS may break video playback or asset upload (both foreground and/or background).
|
||||
We recommend using a real SSL certificate from a free provider, for example [Let's Encrypt](https://letsencrypt.org/).
|
||||
These network features are experimental. They often do not work with video playback, asset upload or download, and other features.
|
||||
Many of these limitations are tracked in [#15230](https://github.com/immich-app/immich/issues/15230).
|
||||
Instead of these experimental features, we recommend using the URL switching feature, a VPN, or a [free trusted SSL certificate](https://letsencrypt.org/) for your domain.
|
||||
|
||||
We are not actively developing these features and will not be able to provide support, but welcome contributions to improve them.
|
||||
Please discuss any large PRs with our dev team to ensure your time is not wasted.
|
||||
|
||||
### Why isn't the mobile app updated yet?
|
||||
|
||||
The app stores can take a few days to approve new builds of the app. If you're impatient, android APKs can be downloaded from the GitHub releases.
|
||||
|
||||
---
|
||||
|
||||
@@ -89,7 +97,7 @@ Make sure to [set your reverse proxy](/docs/administration/reverse-proxy/) to al
|
||||
Also, check the disk space of your reverse proxy.
|
||||
In some cases, proxies cache requests to disk before passing them on, and if disk space runs out, the request fails.
|
||||
|
||||
If you are using Cloudflare Tunnel, please know that they set a maxiumum filesize of 100 MB that cannot be changed.
|
||||
If you are using Cloudflare Tunnel, please know that they set a maximum filesize of 100 MB that cannot be changed.
|
||||
At times, files larger than this may work, potentially up to 1 GB. However, the official limit is 100 MB.
|
||||
If you are having issues, we recommend switching to a different network deployment.
|
||||
|
||||
@@ -156,6 +164,35 @@ For example, say you have existing transcodes with the policy "Videos higher tha
|
||||
|
||||
No. Our design principle is that the original assets should always be untouched.
|
||||
|
||||
### How can I mount a CIFS/Samba volume within Docker?
|
||||
|
||||
If you aren't able to or prefer not to mount Samba on the host (such as Windows environment), you can mount the volume within Docker.
|
||||
Below is an example in the `docker-compose.yml`.
|
||||
|
||||
Change your username, password, local IP, and share name, and see below where the line `- originals:/usr/src/app/originals`,
|
||||
correlates to the section where the volume `originals` was created. You can call this whatever you like, and map it to the docker container as you like.
|
||||
For example you could change `originals:` to `Photos:`, and change `- originals:/usr/src/app/originals` to `Photos:/usr/src/app/photos`.
|
||||
|
||||
```diff
|
||||
...
|
||||
services:
|
||||
immich-server:
|
||||
...
|
||||
volumes:
|
||||
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
|
||||
- ${UPLOAD_LOCATION}:/usr/src/app/upload
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
+ - originals:/usr/src/app/originals
|
||||
...
|
||||
volumes:
|
||||
model-cache:
|
||||
+ originals:
|
||||
+ driver_opts:
|
||||
+ type: cifs
|
||||
+ o: 'iocharset=utf8,username=USERNAMEHERE,password=PASSWORDHERE,rw' # change to `ro` if read only desired
|
||||
+ device: '//localipaddress/sharename'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Albums
|
||||
@@ -278,7 +315,7 @@ The initial backup is the most intensive due to the number of jobs running. The
|
||||
- For facial recognition on new images to work properly, You must re-run the Face Detection job for all images after this.
|
||||
- At the container level, you can [set resource constraints](/docs/FAQ#can-i-limit-cpu-and-ram-usage) to lower usage further.
|
||||
- It's recommended to only apply these constraints _after_ taking some of the measures here for best performance.
|
||||
- If these changes are not enough, see [below](/docs/FAQ#how-can-i-disable-machine-learning) for instructions on how to disable machine learning.
|
||||
- If these changes are not enough, see [above](/docs/FAQ#how-can-i-disable-machine-learning) for instructions on how to disable machine learning.
|
||||
|
||||
### Can I limit CPU and RAM usage?
|
||||
|
||||
@@ -421,7 +458,7 @@ A result of `on` means that checksums are enabled.
|
||||
<summary>Check if checksums are enabled</summary>
|
||||
|
||||
```bash
|
||||
docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME> --command="show data_checksums"
|
||||
docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME> --command="show data_checksums"
|
||||
data_checksums
|
||||
----------------
|
||||
on
|
||||
@@ -436,7 +473,7 @@ If checksums are enabled, you can check the status of the database with the foll
|
||||
<summary>Check for database corruption</summary>
|
||||
|
||||
```bash
|
||||
docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME> --command="SELECT datname, checksum_failures, checksum_last_failure FROM pg_stat_database WHERE datname IS NOT NULL"
|
||||
docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME> --command="SELECT datname, checksum_failures, checksum_last_failure FROM pg_stat_database WHERE datname IS NOT NULL"
|
||||
datname | checksum_failures | checksum_last_failure
|
||||
-----------+-------------------+-----------------------
|
||||
postgres | 0 |
|
||||
|
||||
@@ -55,7 +55,7 @@ sleep 10 # Wait for Postgres server to start up
|
||||
# Check the database user if you deviated from the default
|
||||
gunzip < "/path/to/backup/dump.sql.gz" \
|
||||
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
|
||||
| docker exec -i immich_postgres psql --username=postgres # Restore Backup
|
||||
| docker exec -i immich_postgres psql --dbname=postgres --username=<DB_USERNAME> # Restore Backup
|
||||
docker compose up -d # Start remainder of Immich apps
|
||||
```
|
||||
|
||||
@@ -70,18 +70,16 @@ docker compose up -d # Start remainder of Immich apps
|
||||
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch
|
||||
## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database
|
||||
# Remove-Item -Recurse -Force DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch
|
||||
## You should mount the backup (as a volume, example: - 'C:\path\to\backup\dump.sql':/dump.sql) into the immich_postgres container using the docker-compose.yml
|
||||
docker compose pull # Update to latest version of Immich (if desired)
|
||||
docker compose create # Create Docker containers for Immich apps without running them
|
||||
docker start immich_postgres # Start Postgres server
|
||||
sleep 10 # Wait for Postgres server to start up
|
||||
docker exec -it immich_postgres bash # Enter the Docker shell and run the following command
|
||||
# Check the database user if you deviated from the default
|
||||
cat "/dump.sql" \
|
||||
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
|
||||
| psql --username=postgres # Restore Backup
|
||||
exit # Exit the Docker shell
|
||||
docker compose up -d # Start remainder of Immich apps
|
||||
## You should mount the backup (as a volume, example: `- 'C:\path\to\backup\dump.sql:/dump.sql'`) into the immich_postgres container using the docker-compose.yml
|
||||
docker compose pull # Update to latest version of Immich (if desired)
|
||||
docker compose create # Create Docker containers for Immich apps without running them
|
||||
docker start immich_postgres # Start Postgres server
|
||||
sleep 10 # Wait for Postgres server to start up
|
||||
docker exec -it immich_postgres bash # Enter the Docker shell and run the following command
|
||||
# Check the database user if you deviated from the default. If your backup ends in `.gz`, replace `cat` with `gunzip`
|
||||
cat < "/dump.sql" | sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" | psql --dbname=postgres --username=<DB_USERNAME>
|
||||
exit # Exit the Docker shell
|
||||
docker compose up -d # Start remainder of Immich apps
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
@@ -95,12 +93,14 @@ Some deployment methods make it difficult to start the database without also sta
|
||||
|
||||
## Filesystem
|
||||
|
||||
Immich stores two types of content in the filesystem: (1) original, unmodified assets (photos and videos), and (2) generated content. Only the original content needs to be backed-up, which is stored in the following folders:
|
||||
Immich stores two types of content in the filesystem: (a) original, unmodified assets (photos and videos), and (b) generated content. We recommend backing up the entire contents of `UPLOAD_LOCATION`, but only the original content is critical, which is stored in the following folders:
|
||||
|
||||
1. `UPLOAD_LOCATION/library`
|
||||
2. `UPLOAD_LOCATION/upload`
|
||||
3. `UPLOAD_LOCATION/profile`
|
||||
|
||||
If you choose to back up only those folders, you will need to rerun the transcoding and thumbnail generation jobs for all assets after you restore from a backup.
|
||||
|
||||
:::caution
|
||||
If you moved some of these folders onto a different storage device, such as `profile/`, make sure to adjust the backup path to match your setup
|
||||
:::
|
||||
|
||||
@@ -70,4 +70,4 @@ When installing a new version of pgvecto.rs, you will need to manually update th
|
||||
|
||||
If you get the error `driverError: error: permission denied for view pg_vector_index_stat`, you can fix this by connecting to the Immich database and running `GRANT SELECT ON TABLE pg_vector_index_stat TO <immichdbusername>;`.
|
||||
|
||||
[vectors-install]: https://docs.pgvecto.rs/getting-started/installation.html
|
||||
[vectors-install]: https://docs.vectorchord.ai/getting-started/installation.html
|
||||
|
||||
@@ -98,6 +98,14 @@ The default Immich log level is `Log` (commonly known as `Info`). The Immich adm
|
||||
Through this setting, you can manage all the settings related to machine learning in Immich, from the setting of remote machine learning to the model and its parameters
|
||||
You can choose to disable a certain type of machine learning, for example smart search or facial recognition.
|
||||
|
||||
### URL
|
||||
|
||||
The built in (`http://immich-machine-learning:3003`) machine learning server will be configured by default, but you can change this or add additional servers.
|
||||
|
||||
Hosting the `immich-machine-learning` container on a machine with a more powerful GPU can be helpful to for processing a large number of photos (such as during batch import) or for faster search.
|
||||
|
||||
If more than one URL is provided, each server will be attempted one-at-a-time until one responds successfully, in order from first to last. Servers that don't respond will be temporarily ignored until they come back online.
|
||||
|
||||
### Smart Search
|
||||
|
||||
The [smart search](/docs/features/searching) settings allow you to change the [CLIP model](https://openai.com/research/clip). Larger models will typically provide [more accurate search results](https://github.com/immich-app/immich/discussions/11862) but consume more processing power and RAM. When [changing the CLIP model](/docs/FAQ#can-i-use-a-custom-clip-model) it is mandatory to re-run the Smart Search job on all images to fully apply the change.
|
||||
|
||||
@@ -50,19 +50,18 @@ The Immich CLI is an [npm](https://www.npmjs.com/) package that lets users contr
|
||||
|
||||
The Immich backend is divided into several services, which are run as individual docker containers.
|
||||
|
||||
1. `immich-server` - Handle and respond to REST API requests
|
||||
1. `immich-microservices` - Execute background jobs (thumbnail generation, metadata extraction, transcoding, etc.)
|
||||
1. `immich-server` - Handle and respond to REST API requests, execute background jobs (thumbnail generation, metadata extraction, transcoding, etc.)
|
||||
1. `immich-machine-learning` - Execute machine learning models
|
||||
1. `postgres` - Persistent data storage
|
||||
1. `redis`- Queue management for `immich-microservices`
|
||||
1. `redis`- Queue management for background jobs
|
||||
|
||||
### Immich Server
|
||||
|
||||
The Immich Server is a [TypeScript](https://www.typescriptlang.org/) project written for [Node.js](https://nodejs.org/). It uses the [Nest.js](https://nestjs.com) framework, with [TypeORM](https://typeorm.io/) for database management. The server codebase also loosely follows the [Hexagonal Architecture](<https://en.wikipedia.org/wiki/Hexagonal_architecture_(software)>). Specifically, we aim to separate technology specific implementations (`infra/`) from core business logic (`domain/`).
|
||||
The Immich Server is a [TypeScript](https://www.typescriptlang.org/) project written for [Node.js](https://nodejs.org/). It uses the [Nest.js](https://nestjs.com) framework, [Express](https://expressjs.com/) server, and the query builder [Kysely](https://kysely.dev/). The server codebase also loosely follows the [Hexagonal Architecture](<https://en.wikipedia.org/wiki/Hexagonal_architecture_(software)>). Specifically, we aim to separate technology specific implementations (`src/repositories`) from core business logic (`src/services`).
|
||||
|
||||
#### REST Endpoints
|
||||
### API Endpoints
|
||||
|
||||
The server is a list of HTTP endpoints and associated handlers (controllers). Each controller usually implements the following CRUD operations:
|
||||
An incoming HTTP request is mapped to a controller (`src/controllers`). Controllers are collections of HTTP endpoints. Each controller usually implements the following CRUD operations for its respective resource type:
|
||||
|
||||
- `POST` `/<type>` - **Create**
|
||||
- `GET` `/<type>` - **Read** (all)
|
||||
@@ -70,13 +69,13 @@ The server is a list of HTTP endpoints and associated handlers (controllers). Ea
|
||||
- `PUT` `/<type>/:id` - **Updated** (by id)
|
||||
- `DELETE` `/<type>/:id` - **Delete** (by id)
|
||||
|
||||
#### DTOs
|
||||
### Domain Transfer Objects (DTOs)
|
||||
|
||||
The server uses [Domain Transfer Objects](https://en.wikipedia.org/wiki/Data_transfer_object) as public interfaces for the inputs (query, params, and body) and outputs (response) for each endpoint. DTOs translate to [OpenAPI](./open-api.md) schemas and control the generated code used by each client.
|
||||
|
||||
### Microservices
|
||||
### Background Jobs
|
||||
|
||||
The Immich Microservices image uses the same `Dockerfile` as the Immich Server, but with a different entrypoint. The Immich Microservices service mainly handles executing jobs, which include the following:
|
||||
Immich uses a [worker](https://github.com/immich-app/immich/blob/main/server/src/utils/misc.ts#L266) to run background jobs. These jobs include:
|
||||
|
||||
- Thumbnail Generation
|
||||
- Metadata Extraction
|
||||
|
||||
@@ -63,9 +63,20 @@ If you only want to do web development connected to an existing, remote backend,
|
||||
IMMICH_SERVER_URL=https://demo.immich.app/ npm run dev
|
||||
```
|
||||
|
||||
#### `@immich/ui`
|
||||
|
||||
To see local changes to `@immich/ui` in Immich, do the following:
|
||||
|
||||
1. Install `@immich/ui` as a sibling to `immich/`, for example `/home/user/immich` and `/home/user/ui`
|
||||
1. Build the `@immich/ui` project via `npm run build`
|
||||
1. Uncomment the corresponding volume in web service of the `docker/docker-compose.dev.yaml` file (`../../ui:/usr/ui`)
|
||||
1. Uncomment the corresponding alias in the `web/vite.config.js` file (`'@immich/ui': path.resolve(\_\_dirname, '../../ui')`)
|
||||
1. Start up the stack via `make dev`
|
||||
1. After making changes in `@immich/ui`, rebuild it (`npm run build`)
|
||||
|
||||
### Mobile app
|
||||
|
||||
The mobile app `(/mobile)` will required Flutter toolchain 3.13.x to be installed on your system.
|
||||
The mobile app `(/mobile)` will required Flutter toolchain 3.13.x and FVM to be installed on your system.
|
||||
|
||||
Please refer to the [Flutter's official documentation](https://flutter.dev/docs/get-started/install) for more information on setting up the toolchain on your machine.
|
||||
|
||||
|
||||
@@ -69,6 +69,8 @@ Navigating to Administration > Settings > Machine Learning Settings > Facial Rec
|
||||
|
||||
:::tip
|
||||
It's better to only tweak the parameters here than to set them to something very different unless you're ready to test a variety of options. If you do need to set a parameter to a strict setting, relaxing other settings can be a good option to compensate, and vice versa.
|
||||
|
||||
You can learn how the tune the result in this [Guide](/docs/guides/better-facial-clusters)
|
||||
:::
|
||||
|
||||
### Facial recognition model
|
||||
|
||||
@@ -58,7 +58,7 @@ If your photos are on a network drive, automatic file watching likely won't work
|
||||
|
||||
#### Troubleshooting
|
||||
|
||||
If you encounter an `ENOSPC` error, you need to increase your file watcher limit. In sysctl, this key is called `fs.inotify.max_user_watched` and has a default value of 8192. Increase this number to a suitable value greater than the number of files you will be watching. Note that Immich has to watch all files in your import paths including any ignored files.
|
||||
If you encounter an `ENOSPC` error, you need to increase your file watcher limit. In sysctl, this key is called `fs.inotify.max_user_watches` and has a default value of 8192. Increase this number to a suitable value greater than the number of files you will be watching. Note that Immich has to watch all files in your import paths including any ignored files.
|
||||
|
||||
```
|
||||
ERROR [LibraryService] Library watcher for library c69faf55-f96d-4aa0-b83b-2d80cbc27d98 encountered error: Error: ENOSPC: System limit for number of file watchers reached, watch '/media/photo.jpg'
|
||||
@@ -68,7 +68,7 @@ In rare cases, the library watcher can hang, preventing Immich from starting up.
|
||||
|
||||
### Nightly job
|
||||
|
||||
There is an automatic scan job that is scheduled to run once a day. This job also cleans up any libraries stuck in deletion.
|
||||
There is an automatic scan job that is scheduled to run once a day. This job also cleans up any libraries stuck in deletion. It is possible to trigger the cleanup by clicking "Scan all libraries" in the library managment page.
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -111,11 +111,10 @@ These actions must be performed by the Immich administrator.
|
||||
- Click on Administration -> Libraries
|
||||
- Click on Create External Library
|
||||
- Select which user owns the library, this can not be changed later
|
||||
- Enter `/mnt/media/christmas-trip` then click Add
|
||||
- Click on Save
|
||||
- Click the drop-down menu on the newly created library
|
||||
- Click on Rename Library and rename it to "Christmas Trip"
|
||||
- Click Edit Import Paths
|
||||
- Click on Add Path
|
||||
- Enter `/mnt/media/christmas-trip` then click Add
|
||||
|
||||
NOTE: We have to use the `/mnt/media/christmas-trip` path and not the `/mnt/nas/christmas-trip` path since all paths have to be what the Docker containers see.
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ You do not need to redo any machine learning jobs after enabling hardware accele
|
||||
|
||||
- ARM NN (Mali)
|
||||
- CUDA (NVIDIA GPUs with [compute capability](https://developer.nvidia.com/cuda-gpus) 5.2 or higher)
|
||||
- OpenVINO (Intel discrete GPUs such as Iris Xe and Arc)
|
||||
- OpenVINO (Intel GPUs such as Iris Xe and Arc)
|
||||
|
||||
## Limitations
|
||||
|
||||
@@ -43,8 +43,9 @@ You do not need to redo any machine learning jobs after enabling hardware accele
|
||||
|
||||
#### OpenVINO
|
||||
|
||||
- The server must have a discrete GPU, i.e. Iris Xe or Arc. Expect issues when attempting to use integrated graphics.
|
||||
- Integrated GPUs are more likely to experience issues than discrete GPUs, especially for older processors or servers with low RAM.
|
||||
- Ensure the server's kernel version is new enough to use the device for hardware accceleration.
|
||||
- Expect higher RAM usage when using OpenVINO compared to CPU processing.
|
||||
|
||||
## Setup
|
||||
|
||||
|
||||
@@ -36,11 +36,15 @@ You can enable automatic backup on supported devices. For more information see [
|
||||
If you have a large number of photos on the device, and you would prefer not to backup all the photos, then it might be prudent to only backup selected photos from device to the Immich server.
|
||||
|
||||
First, you need to enable the Storage Indicator in your app's settings. Navigate to **<ins>Settings -> Photo Grid</ins>** and enable **"Show Storage indicator on asset tiles"**; this makes it easy to distinguish local-only assets and synced assets.
|
||||
|
||||
:::note
|
||||
|
||||
This will enable a small cloud icon on the bottom right corner of the asset tile, indicating that the asset is synced to the server:
|
||||
|
||||
1. <Icon path={mdiCloudOffOutline} size={1} /> - Local-only asset; not synced to the server
|
||||
2. <Icon path={mdiCloudCheckOutline} size={1} /> - Asset is synced to the server :::
|
||||
2. <Icon path={mdiCloudCheckOutline} size={1} /> - Asset is synced to the server
|
||||
|
||||
:::
|
||||
|
||||
Now make sure that the local album is selected in the backup screen (steps 1-2 above). You can find these albums listed in **<ins>Library -> On this device</ins>**. To selectively upload photos from these albums, simply select the local-only photos and tap on "Upload" button in the dynamic bottom menu.
|
||||
|
||||
|
||||
@@ -68,7 +68,7 @@ After bringing down the containers with `docker compose down` and back up with `
|
||||
:::note
|
||||
To see exactly what metrics are made available, you can additionally add `8081:8081` to the server container's ports and `8082:8082` to the microservices container's ports.
|
||||
Visiting the `/metrics` endpoint for these services will show the same raw data that Prometheus collects.
|
||||
To configure these ports see [`IMMICH_API_METRICS_PORT` & `IMMICH_MICROSERVICES_METRICS_PORT`](../install/environment-variables/#general).
|
||||
To configure these ports see [`IMMICH_API_METRICS_PORT` & `IMMICH_MICROSERVICES_METRICS_PORT`](/docs/install/environment-variables/#general).
|
||||
:::
|
||||
|
||||
### Usage
|
||||
|
||||
@@ -31,6 +31,7 @@ The filters smart search allows you to search by include:
|
||||
- Not in any album
|
||||
- Archived
|
||||
- Favorited
|
||||
- Rating
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="Computer" label="Computer" default>
|
||||
|
||||
@@ -8,22 +8,23 @@ For the full list, refer to the [Immich source code](https://github.com/immich-a
|
||||
|
||||
## Image formats
|
||||
|
||||
| Format | Extension(s) | Supported? | Notes |
|
||||
| :-------- | :---------------------------- | :----------------: | :-------------- |
|
||||
| `AVIF` | `.avif` | :white_check_mark: | |
|
||||
| `BMP` | `.bmp` | :white_check_mark: | |
|
||||
| `GIF` | `.gif` | :white_check_mark: | |
|
||||
| `HEIC` | `.heic` | :white_check_mark: | |
|
||||
| `HEIF` | `.heif` | :white_check_mark: | |
|
||||
| `JPEG` | `.webp` `.jpg` `.jpe` `.insp` | :white_check_mark: | |
|
||||
| `JPEG XL` | `.jxl` | :white_check_mark: | |
|
||||
| `PNG` | `.webp` | :white_check_mark: | |
|
||||
| `PSD` | `.psd` | :white_check_mark: | Adobe Photoshop |
|
||||
| `RAW` | `.raw` | :white_check_mark: | |
|
||||
| `RW2` | `.rw2` | :white_check_mark: | |
|
||||
| `SVG` | `.svg` | :white_check_mark: | |
|
||||
| `TIFF` | `.tif` `.tiff` | :white_check_mark: | |
|
||||
| `WEBP` | `.webp` | :white_check_mark: | |
|
||||
| Format | Extension(s) | Supported? | Notes |
|
||||
| :---------- | :---------------------------- | :----------------: | :-------------- |
|
||||
| `AVIF` | `.avif` | :white_check_mark: | |
|
||||
| `BMP` | `.bmp` | :white_check_mark: | |
|
||||
| `GIF` | `.gif` | :white_check_mark: | |
|
||||
| `HEIC` | `.heic` | :white_check_mark: | |
|
||||
| `HEIF` | `.heif` | :white_check_mark: | |
|
||||
| `JPEG 2000` | `.jp2` | :white_check_mark: | |
|
||||
| `JPEG` | `.webp` `.jpg` `.jpe` `.insp` | :white_check_mark: | |
|
||||
| `JPEG XL` | `.jxl` | :white_check_mark: | |
|
||||
| `PNG` | `.webp` | :white_check_mark: | |
|
||||
| `PSD` | `.psd` | :white_check_mark: | Adobe Photoshop |
|
||||
| `RAW` | `.raw` | :white_check_mark: | |
|
||||
| `RW2` | `.rw2` | :white_check_mark: | |
|
||||
| `SVG` | `.svg` | :white_check_mark: | |
|
||||
| `TIFF` | `.tif` `.tiff` | :white_check_mark: | |
|
||||
| `WEBP` | `.webp` | :white_check_mark: | |
|
||||
|
||||
## Video formats
|
||||
|
||||
|
||||
72
docs/docs/guides/better-facial-clusters.md
Normal file
72
docs/docs/guides/better-facial-clusters.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# Better Facial Recognition Clusters
|
||||
|
||||
## Purpose
|
||||
|
||||
This guide explains how to optimize facial recognition in systems with large image libraries. By following these steps, you'll achieve better clustering of faces, reducing the need for manual merging.
|
||||
|
||||
---
|
||||
|
||||
## Important Notes
|
||||
|
||||
- **Best Suited For:** Large image libraries after importing a significant number of images.
|
||||
- **Warning:** This method deletes all previously assigned names.
|
||||
- **Tip:** **Always take a [backup](/docs/administration/backup-and-restore#database) before proceeding!**
|
||||
|
||||
---
|
||||
|
||||
## Step-by-Step Instructions
|
||||
|
||||
### Objective
|
||||
|
||||
To enhance face clustering and ensure the model effectively identifies faces using qualitative initial data.
|
||||
|
||||
---
|
||||
|
||||
### Steps
|
||||
|
||||
#### 1. Adjust Machine Learning Settings
|
||||
|
||||
Navigate to:
|
||||
**Admin → Administration → Settings → Machine Learning Settings**
|
||||
|
||||
Make the following changes:
|
||||
|
||||
- **Maximum recognition distance (Optional):**
|
||||
Lower this value, e.g., to **0.4**, if the library contains people with similar facial features.
|
||||
- **Minimum recognized faces:**
|
||||
Set this to a **high value** (e.g., 20 For libraries with a large amount of assets (~100K+), and 10 for libraries with medium amount of assets (~40K+)).
|
||||
> A high value ensures clusters only include faces that appear at least 20/`value` times in the library, improving the initial clustering process.
|
||||
|
||||
---
|
||||
|
||||
#### 2. Run Reset Jobs
|
||||
|
||||
Go to:
|
||||
**Admin → Administration → Settings → Jobs**
|
||||
|
||||
Perform the following:
|
||||
|
||||
1. **FACIAL RECOGNITION → Reset**
|
||||
|
||||
> These reset jobs rebuild the recognition model based on the new settings.
|
||||
|
||||
---
|
||||
|
||||
#### 3. Refine Recognition with Lower Thresholds
|
||||
|
||||
Once the reset jobs are complete, refine the recognition as follows:
|
||||
|
||||
- **Step 1:**
|
||||
Return to **Minimum recognized faces** in Machine Learning Settings and lower the value to **10** (In medium libraries we will lower the value from 10 to 5).
|
||||
|
||||
> Run the job: **FACIAL RECOGNITION → MISSING Mode**
|
||||
|
||||
- **Step 2:**
|
||||
Lower the value again to **3**.
|
||||
> Run the job: **FACIAL RECOGNITION → MISSING Mode**
|
||||
|
||||
:::tip try different values
|
||||
For certain libraries with a larger or smaller amount of assets, other settings will be better or worse. It is recommended to try different values **before assigning names** and see which settings work best for your library.
|
||||
:::
|
||||
|
||||
---
|
||||
@@ -6,7 +6,7 @@ This guide explains how to store generated and raw files with docker's volume mo
|
||||
It is important to remember to update the backup settings after following the guide to back up the new backup paths if using automatic backup tools, especially `profile/`.
|
||||
:::
|
||||
|
||||
In our `.env` file, we will define variables that will help us in the future when we want to move to a more advanced server
|
||||
In our `.env` file, we will define the paths we want to use. Note that you don't have to define all of these: UPLOAD_LOCATION will be the base folder that files are stored in by default, with the other paths acting as overrides.
|
||||
|
||||
```diff title=".env"
|
||||
# You can find documentation for all the supported environment variables [here](/docs/install/environment-variables)
|
||||
@@ -21,7 +21,7 @@ In our `.env` file, we will define variables that will help us in the future whe
|
||||
...
|
||||
```
|
||||
|
||||
After defining the locations of these files, we will edit the `docker-compose.yml` file accordingly and add the new variables to the `immich-server` container.
|
||||
After defining the locations of these files, we will edit the `docker-compose.yml` file accordingly and add the new variables to the `immich-server` container. These paths are where the mount attaches inside of the container, so don't change those.
|
||||
|
||||
```diff title="docker-compose.yml"
|
||||
services:
|
||||
@@ -35,7 +35,8 @@ services:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
```
|
||||
|
||||
Restart Immich to register the changes.
|
||||
After making this change, you have to move the files over to the new folders to make sure Immich can find everything it needs. If you haven't uploaded anything important yet, you can also reset Immich entirely by deleting the database folder.
|
||||
Then restart Immich to register the changes:
|
||||
|
||||
```
|
||||
docker compose up -d
|
||||
@@ -49,5 +50,3 @@ The `thumbs/` folder contains both the small thumbnails displayed in the timelin
|
||||
|
||||
The storage metrics of the Immich server will track available storage at `UPLOAD_LOCATION`, so the administrator must set up some sort of monitoring to ensure the storage does not run out of space. The `profile/` folder is much smaller, usually less than 1 MB.
|
||||
:::
|
||||
|
||||
Thanks to [Jrasm91](https://github.com/immich-app/immich/discussions/2110#discussioncomment-5477767) for writing the guide.
|
||||
|
||||
@@ -5,9 +5,9 @@ Keep in mind that mucking around in the database might set the moon on fire. Avo
|
||||
:::
|
||||
|
||||
:::tip
|
||||
Run `docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME>` to connect to the database via the container directly.
|
||||
Run `docker exec -it immich_postgres psql --dbname=<DB_DATABASE_NAME> --username=<DB_USERNAME>` to connect to the database via the container directly.
|
||||
|
||||
(Replace `<DB_USERNAME>` with the value from your [`.env` file](/docs/install/environment-variables#database)).
|
||||
(Replace `<DB_DATABASE_NAME>` and `<DB_USERNAME>` with the values from your [`.env` file](/docs/install/environment-variables#database)).
|
||||
:::
|
||||
|
||||
## Assets
|
||||
@@ -27,6 +27,14 @@ SELECT * FROM "assets" WHERE "originalPath" = 'upload/library/admin/2023/2023-09
|
||||
SELECT * FROM "assets" WHERE "originalPath" LIKE 'upload/library/admin/2023/%';
|
||||
```
|
||||
|
||||
```sql title="Find by ID"
|
||||
SELECT * FROM "assets" WHERE "id" = '9f94e60f-65b6-47b7-ae44-a4df7b57f0e9';
|
||||
```
|
||||
|
||||
```sql title="Find by partial ID"
|
||||
SELECT * FROM "assets" WHERE "id"::text LIKE '%ab431d3a%';
|
||||
```
|
||||
|
||||
:::note
|
||||
You can calculate the checksum for a particular file by using the command `sha1sum <filename>`.
|
||||
:::
|
||||
|
||||
@@ -37,7 +37,7 @@ You can alternatively download these two files from your browser and move them t
|
||||
</CodeBlock>
|
||||
|
||||
- Populate `UPLOAD_LOCATION` with your preferred location for storing backup assets. It should be a new directory on the server with enough free space.
|
||||
- Consider changing `DB_PASSWORD` to a custom value. Postgres is not publically exposed, so this password is only used for local authentication.
|
||||
- Consider changing `DB_PASSWORD` to a custom value. Postgres is not publicly exposed, so this password is only used for local authentication.
|
||||
To avoid issues with Docker parsing this value, it is best to use only the characters `A-Za-z0-9`. `pwgen` is a handy utility for this.
|
||||
- Set your timezone by uncommenting the `TZ=` line.
|
||||
- Populate custom database information if necessary.
|
||||
|
||||
@@ -11,7 +11,7 @@ Just restarting the containers does not replace the environment within the conta
|
||||
|
||||
In order to recreate the container using docker compose, run `docker compose up -d`.
|
||||
In most cases docker will recognize that the `.env` file has changed and recreate the affected containers.
|
||||
If this should not work, try running `docker compose up -d --force-recreate`.
|
||||
If this does not work, try running `docker compose up -d --force-recreate`.
|
||||
|
||||
:::
|
||||
|
||||
@@ -20,8 +20,8 @@ If this should not work, try running `docker compose up -d --force-recreate`.
|
||||
| Variable | Description | Default | Containers |
|
||||
| :----------------- | :------------------------------ | :-------: | :----------------------- |
|
||||
| `IMMICH_VERSION` | Image tags | `release` | server, machine learning |
|
||||
| `UPLOAD_LOCATION` | Host Path for uploads | | server |
|
||||
| `DB_DATA_LOCATION` | Host Path for Postgres database | | database |
|
||||
| `UPLOAD_LOCATION` | Host path for uploads | | server |
|
||||
| `DB_DATA_LOCATION` | Host path for Postgres database | | database |
|
||||
|
||||
:::tip
|
||||
These environment variables are used by the `docker-compose.yml` file and do **NOT** affect the containers directly.
|
||||
@@ -33,15 +33,15 @@ These environment variables are used by the `docker-compose.yml` file and do **N
|
||||
| :---------------------------------- | :---------------------------------------------------------------------------------------- | :--------------------------: | :----------------------- | :----------------- |
|
||||
| `TZ` | Timezone | <sup>\*1</sup> | server | microservices |
|
||||
| `IMMICH_ENV` | Environment (production, development) | `production` | server, machine learning | api, microservices |
|
||||
| `IMMICH_LOG_LEVEL` | Log Level (verbose, debug, log, warn, error) | `log` | server, machine learning | api, microservices |
|
||||
| `IMMICH_MEDIA_LOCATION` | Media Location inside the container ⚠️**You probably shouldn't set this**<sup>\*2</sup>⚠️ | `./upload`<sup>\*3</sup> | server | api, microservices |
|
||||
| `IMMICH_LOG_LEVEL` | Log level (verbose, debug, log, warn, error) | `log` | server, machine learning | api, microservices |
|
||||
| `IMMICH_MEDIA_LOCATION` | Media location inside the container ⚠️**You probably shouldn't set this**<sup>\*2</sup>⚠️ | `./upload`<sup>\*3</sup> | server | api, microservices |
|
||||
| `IMMICH_CONFIG_FILE` | Path to config file | | server | api, microservices |
|
||||
| `NO_COLOR` | Set to `true` to disable color-coded log output | `false` | server, machine learning | |
|
||||
| `CPU_CORES` | Amount of cores available to the immich server | auto-detected cpu core count | server | |
|
||||
| `CPU_CORES` | Number of cores available to the Immich server | auto-detected CPU core count | server | |
|
||||
| `IMMICH_API_METRICS_PORT` | Port for the OTEL metrics | `8081` | server | api |
|
||||
| `IMMICH_MICROSERVICES_METRICS_PORT` | Port for the OTEL metrics | `8082` | server | microservices |
|
||||
| `IMMICH_PROCESS_INVALID_IMAGES` | When `true`, generate thumbnails for invalid images | | server | microservices |
|
||||
| `IMMICH_TRUSTED_PROXIES` | List of comma separated IPs set as trusted proxies | | server | api |
|
||||
| `IMMICH_TRUSTED_PROXIES` | List of comma-separated IPs set as trusted proxies | | server | api |
|
||||
| `IMMICH_IGNORE_MOUNT_CHECK_ERRORS` | See [System Integrity](/docs/administration/system-integrity) | | server | api, microservices |
|
||||
|
||||
\*1: `TZ` should be set to a `TZ identifier` from [this list][tz-list]. For example, `TZ="Etc/UTC"`.
|
||||
@@ -50,7 +50,7 @@ These environment variables are used by the `docker-compose.yml` file and do **N
|
||||
\*2: This path is where the Immich code looks for the files, which is internal to the docker container. Setting it to a path on your host will certainly break things, you should use the `UPLOAD_LOCATION` variable instead.
|
||||
|
||||
\*3: With the default `WORKDIR` of `/usr/src/app`, this path will resolve to `/usr/src/app/upload`.
|
||||
It only need to be set if the Immich deployment method is changing.
|
||||
It only needs to be set if the Immich deployment method is changing.
|
||||
|
||||
## Workers
|
||||
|
||||
@@ -75,12 +75,12 @@ Information on the current workers can be found [here](/docs/administration/jobs
|
||||
| Variable | Description | Default | Containers |
|
||||
| :---------------------------------- | :----------------------------------------------------------------------- | :----------: | :----------------------------- |
|
||||
| `DB_URL` | Database URL | | server |
|
||||
| `DB_HOSTNAME` | Database Host | `database` | server |
|
||||
| `DB_PORT` | Database Port | `5432` | server |
|
||||
| `DB_USERNAME` | Database User | `postgres` | server, database<sup>\*1</sup> |
|
||||
| `DB_PASSWORD` | Database Password | `postgres` | server, database<sup>\*1</sup> |
|
||||
| `DB_DATABASE_NAME` | Database Name | `immich` | server, database<sup>\*1</sup> |
|
||||
| `DB_VECTOR_EXTENSION`<sup>\*2</sup> | Database Vector Extension (one of [`pgvector`, `pgvecto.rs`]) | `pgvecto.rs` | server |
|
||||
| `DB_HOSTNAME` | Database host | `database` | server |
|
||||
| `DB_PORT` | Database port | `5432` | server |
|
||||
| `DB_USERNAME` | Database user | `postgres` | server, database<sup>\*1</sup> |
|
||||
| `DB_PASSWORD` | Database password | `postgres` | server, database<sup>\*1</sup> |
|
||||
| `DB_DATABASE_NAME` | Database name | `immich` | server, database<sup>\*1</sup> |
|
||||
| `DB_VECTOR_EXTENSION`<sup>\*2</sup> | Database vector extension (one of [`pgvector`, `pgvecto.rs`]) | `pgvecto.rs` | server |
|
||||
| `DB_SKIP_MIGRATIONS` | Whether to skip running migrations on startup (one of [`true`, `false`]) | `false` | server |
|
||||
|
||||
\*1: The values of `DB_USERNAME`, `DB_PASSWORD`, and `DB_DATABASE_NAME` are passed to the Postgres container as the variables `POSTGRES_USER`, `POSTGRES_PASSWORD`, and `POSTGRES_DB` in `docker-compose.yml`.
|
||||
@@ -103,18 +103,18 @@ When `DB_URL` is defined, the `DB_HOSTNAME`, `DB_PORT`, `DB_USERNAME`, `DB_PASSW
|
||||
| Variable | Description | Default | Containers |
|
||||
| :--------------- | :------------- | :-----: | :--------- |
|
||||
| `REDIS_URL` | Redis URL | | server |
|
||||
| `REDIS_SOCKET` | Redis Socket | | server |
|
||||
| `REDIS_HOSTNAME` | Redis Host | `redis` | server |
|
||||
| `REDIS_PORT` | Redis Port | `6379` | server |
|
||||
| `REDIS_USERNAME` | Redis Username | | server |
|
||||
| `REDIS_PASSWORD` | Redis Password | | server |
|
||||
| `REDIS_DBINDEX` | Redis DB Index | `0` | server |
|
||||
| `REDIS_SOCKET` | Redis socket | | server |
|
||||
| `REDIS_HOSTNAME` | Redis host | `redis` | server |
|
||||
| `REDIS_PORT` | Redis port | `6379` | server |
|
||||
| `REDIS_USERNAME` | Redis username | | server |
|
||||
| `REDIS_PASSWORD` | Redis password | | server |
|
||||
| `REDIS_DBINDEX` | Redis DB index | `0` | server |
|
||||
|
||||
:::info
|
||||
All `REDIS_` variables must be provided to all Immich workers, including `api` and `microservices`.
|
||||
|
||||
`REDIS_URL` must start with `ioredis://` and then include a `base64` encoded JSON string for the configuration.
|
||||
More info can be found in the upstream [ioredis] documentation.
|
||||
More information can be found in the upstream [ioredis] documentation.
|
||||
|
||||
When `REDIS_URL` or `REDIS_SOCKET` are defined, the `REDIS_HOSTNAME`, `REDIS_PORT`, `REDIS_USERNAME`, `REDIS_PASSWORD`, and `REDIS_DBINDEX` variables are ignored.
|
||||
:::
|
||||
@@ -148,24 +148,28 @@ Redis (Sentinel) URL example JSON before encoding:
|
||||
|
||||
## Machine Learning
|
||||
|
||||
| Variable | Description | Default | Containers |
|
||||
| :-------------------------------------------------------- | :-------------------------------------------------------------------------------------------------- | :-----------------------------: | :--------------- |
|
||||
| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning |
|
||||
| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning |
|
||||
| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning |
|
||||
| `MACHINE_LEARNING_REQUEST_THREADS`<sup>\*1</sup> | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning |
|
||||
| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning |
|
||||
| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning |
|
||||
| `MACHINE_LEARNING_WORKERS`<sup>\*2</sup> | Number of worker processes to spawn | `1` | machine learning |
|
||||
| `MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S`<sup>\*3</sup> | HTTP Keep-alive time in seconds | `2` | machine learning |
|
||||
| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` (`300` if using OpenVINO) | machine learning |
|
||||
| `MACHINE_LEARNING_PRELOAD__CLIP` | Name of a CLIP model to be preloaded and kept in cache | | machine learning |
|
||||
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION` | Name of a facial recognition model to be preloaded and kept in cache | | machine learning |
|
||||
| `MACHINE_LEARNING_ANN` | Enable ARM-NN hardware acceleration if supported | `True` | machine learning |
|
||||
| `MACHINE_LEARNING_ANN_FP16_TURBO` | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) | `False` | machine learning |
|
||||
| `MACHINE_LEARNING_ANN_TUNING_LEVEL` | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive) | `2` | machine learning |
|
||||
| `MACHINE_LEARNING_DEVICE_IDS`<sup>\*4</sup> | Device IDs to use in multi-GPU environments | `0` | machine learning |
|
||||
| `MACHINE_LEARNING_MAX_BATCH_SIZE__FACIAL_RECOGNITION` | Set the maximum number of faces that will be processed at once by the facial recognition model | None (`1` if using OpenVINO) | machine learning |
|
||||
| Variable | Description | Default | Containers |
|
||||
| :---------------------------------------------------------- | :-------------------------------------------------------------------------------------------------- | :-----------------------------: | :--------------- |
|
||||
| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning |
|
||||
| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning |
|
||||
| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning |
|
||||
| `MACHINE_LEARNING_REQUEST_THREADS`<sup>\*1</sup> | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning |
|
||||
| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning |
|
||||
| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning |
|
||||
| `MACHINE_LEARNING_WORKERS`<sup>\*2</sup> | Number of worker processes to spawn | `1` | machine learning |
|
||||
| `MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S`<sup>\*3</sup> | HTTP Keep-alive time in seconds | `2` | machine learning |
|
||||
| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` (`300` if using OpenVINO) | machine learning |
|
||||
| `MACHINE_LEARNING_PRELOAD__CLIP__TEXTUAL` | Comma-separated list of (textual) CLIP model(s) to preload and cache | | machine learning |
|
||||
| `MACHINE_LEARNING_PRELOAD__CLIP__VISUAL` | Comma-separated list of (visual) CLIP model(s) to preload and cache | | machine learning |
|
||||
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__RECOGNITION` | Comma-separated list of (recognition) facial recognition model(s) to preload and cache | | machine learning |
|
||||
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__DETECTION` | Comma-separated list of (detection) facial recognition model(s) to preload and cache | | machine learning |
|
||||
| `MACHINE_LEARNING_ANN` | Enable ARM-NN hardware acceleration if supported | `True` | machine learning |
|
||||
| `MACHINE_LEARNING_ANN_FP16_TURBO` | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) | `False` | machine learning |
|
||||
| `MACHINE_LEARNING_ANN_TUNING_LEVEL` | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive) | `2` | machine learning |
|
||||
| `MACHINE_LEARNING_DEVICE_IDS`<sup>\*4</sup> | Device IDs to use in multi-GPU environments | `0` | machine learning |
|
||||
| `MACHINE_LEARNING_MAX_BATCH_SIZE__FACIAL_RECOGNITION` | Set the maximum number of faces that will be processed at once by the facial recognition model | None (`1` if using OpenVINO) | machine learning |
|
||||
| `MACHINE_LEARNING_PING_TIMEOUT` | How long (ms) to wait for a PING response when checking if an ML server is available | `2000` | server |
|
||||
| `MACHINE_LEARNING_AVAILABILITY_BACKOFF_TIME` | How long to ignore ML servers that are offline before trying again | `30000` | server |
|
||||
|
||||
\*1: It is recommended to begin with this parameter when changing the concurrency levels of the machine learning service and then tune the other ones.
|
||||
|
||||
@@ -177,7 +181,11 @@ Redis (Sentinel) URL example JSON before encoding:
|
||||
|
||||
:::info
|
||||
|
||||
Other machine learning parameters can be tuned from the admin UI.
|
||||
While the `textual` model is the only one required for smart search, some users may experience slow first searches
|
||||
due to backups triggering loading of the other models into memory, which blocks other requests until completed.
|
||||
To avoid this, you can preload the other models (`visual`, `recognition`, and `detection`) if you have enough RAM to do so.
|
||||
|
||||
Additional machine learning parameters can be tuned from the admin UI.
|
||||
|
||||
:::
|
||||
|
||||
@@ -208,7 +216,7 @@ the `_FILE` variable should be set to the path of a file containing the variable
|
||||
details on how to use Docker Secrets in the Postgres image.
|
||||
|
||||
\*2: See [this comment][docker-secrets-example] for an example of how
|
||||
to use use a Docker secret for the password in the Redis container.
|
||||
to use a Docker secret for the password in the Redis container.
|
||||
|
||||
[tz-list]: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
|
||||
[docker-secrets-example]: https://github.com/docker-library/redis/issues/46#issuecomment-335326234
|
||||
|
||||
@@ -27,7 +27,7 @@ The script will perform the following actions:
|
||||
1. Download [docker-compose.yml](https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml), and the [.env](https://github.com/immich-app/immich/releases/latest/download/example.env) file from the main branch of the [repository](https://github.com/immich-app/immich).
|
||||
2. Start the containers.
|
||||
|
||||
The web application will be available at `http://<machine-ip-address>:2283`, and the server URL for the mobile app will be `http://<machine-ip-address>:2283/api`
|
||||
The web application and mobile app will be available at `http://<machine-ip-address>:2283`
|
||||
|
||||
The directory which is used to store the library files is `./immich-app` relative to the current directory.
|
||||
|
||||
|
||||
76
docs/docs/install/synology.md
Normal file
76
docs/docs/install/synology.md
Normal file
@@ -0,0 +1,76 @@
|
||||
---
|
||||
sidebar_position: 85
|
||||
---
|
||||
|
||||
# Synology [Community]
|
||||
|
||||
:::note
|
||||
This is a community contribution and not officially supported by the Immich team, but included here for convenience.
|
||||
|
||||
Community support can be found in the dedicated channel on the [Discord Server](https://discord.immich.app/).
|
||||
|
||||
**Please report app issues to the corresponding [Github Repository](https://github.com/truenas/charts/tree/master/community/immich).**
|
||||
:::
|
||||
|
||||
Immich can easily be installed on a Synology NAS using Container Manager within DSM. If you have not installed Container Manager already, you can install it in the Packages Center. Refer to the [Container Manager docs](https://kb.synology.com/en-us/DSM/help/ContainerManager/docker_desc?version=7) for more information on using Container Manager.
|
||||
|
||||
## Step 1 - Download the required files
|
||||
|
||||
Create a directory of your choice (e.g. `./immich-app`) to house Immich. In general, it's a best practice to have all Docker-based applications running under the `./docker` directory, so in this case, your directory structure will look like `./docker/immich-app`.
|
||||
|
||||
Now create a `./postgres` and `./library` directory as sub-directories of the `./docker/immich-app`.
|
||||
|
||||
When you're all done, you should have the following:
|
||||
|
||||
- `./docker/immich-app/postgres`
|
||||
- `./docker/immich-app/library`
|
||||
|
||||
Download [`docker-compose.yml`](https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml) and [`example.env`](https://github.com/immich-app/immich/releases/latest/download/example.env) to your computer. Upload the files to the `./docker/immich-app` directory.
|
||||
|
||||
## Step 2 - Populate the .env file with custom values
|
||||
|
||||
Follow [Step 2 in Docker Compose](./docker-compose#step-2---populate-the-env-file-with-custom-values) for instructions on customizing the `.env` file, and then return back to this guide to continue.
|
||||
|
||||
## Step 3 - Create a new project in Container Manager
|
||||
|
||||
Open Container Manager, and select the "**Project**" action on the left navigation bar and then click "**Create**".
|
||||

|
||||
|
||||
In the settings of your new project, set "**Project name**" to a name you'll remember, such as _immich-app_. When setting the "**Path**", select the `./docker/immich-app` directory you created earlier. Doing so will prompt a message to use the existing `docker-compose.yml` already present in the directory for your project. Click "**OK**" to continue.
|
||||
|
||||

|
||||
|
||||
The following screen will give you the option to further customize your `docker-compose.yml` file, giving you a warning regarding the `start_interval` property. Under the `healthcheck` heading, remove the `start_interval: 30s` completely and click "**Next**".
|
||||
|
||||

|
||||
|
||||
Skip the section asking to set-up a portal for Web Station, and then complete the wizard which will build and start the containers for your project.
|
||||
|
||||
Once your containers are successfully running, navigate to the "**Container**" section of Container Manager, right-click on the "**immich-server**" container, and choose the "**Details**".
|
||||
|
||||
Scroll to the bottom of the "**Details**" section, and find the `IP Address` of the container, located in the `Network` section. Take note of the container's IP address as you will need it for **Step 4**.
|
||||
|
||||

|
||||
|
||||
## Step 4 - Configure Firewall Settings
|
||||
|
||||
Once your project completes the build process, your containers will start. In order to be able to access Immich from your browser, you need to configure the firewall settings for your Synology NAS.
|
||||
|
||||
Open "**Control Panel**" on your Synology NAS, and select "**Security**". Navigate to "**Firewall**"
|
||||
|
||||

|
||||
|
||||
Click "**Edit Rules**" and add the following firewall rules:
|
||||
|
||||
- Add a "**Source IP**" rule for the IP address of your container that you obtained in Step 3 above
|
||||
- Add a "**Ports**" rule for the port specified in the `docker-compose.yml`, which should be `2283`
|
||||
|
||||
## Next Steps
|
||||
|
||||
Read the [Post Installation](/docs/install/post-install.mdx) steps or setup optional features below.
|
||||
|
||||
### Setting up optional features
|
||||
|
||||
- [External Libraries](/docs/features/libraries.md): Adding your existing photo library to Immich
|
||||
- [Hardware Transcoding](/docs/features/hardware-transcoding.md): Speeding up video transcoding
|
||||
- [Hardware-Accelerated Machine Learning](/docs/features/ml-hardware-acceleration.md): Speeding up various machine learning tasks in Immich
|
||||
@@ -41,7 +41,7 @@ className="border rounded-xl"
|
||||
:::info Permissions
|
||||
The **pgData** dataset must be owned by the user `netdata` (UID 999) for postgres to start. The other datasets must be owned by the user `root` (UID 0) or a group that includes the user `root` (UID 0) for immich to have the necessary permissions.
|
||||
|
||||
If the **library** dataset uses ACL it must have [ACL mode](https://www.truenas.com/docs/core/coretutorials/storage/pools/permissions/#access-control-lists) set to `Passthrough` if you plan on using a [storage template](/docs/administration/storage-template.mdx) and the dataset is configured for network sharing (its ACL type is set to `SMB/NFSv4`). When the template is applied and files need to be moved from **upload** to **library**, immich performs `chmod` internally and needs to be allowed to execute the command. [More info.](https://github.com/immich-app/immich/pull/13017)
|
||||
If the **library** dataset uses ACL it must have [ACL mode](https://www.truenas.com/docs/core/coretutorials/storage/pools/permissions/#access-control-lists) set to `Passthrough` if you plan on using a [storage template](/docs/administration/storage-template.mdx) and the dataset is configured for network sharing (its ACL type is set to `SMB/NFSv4`). When the template is applied and files need to be moved from **upload** to **library**, Immich performs `chmod` internally and needs to be allowed to execute the command. [More info.](https://github.com/immich-app/immich/pull/13017)
|
||||
:::
|
||||
|
||||
## Installing the Immich Application
|
||||
@@ -160,6 +160,10 @@ The image above has example values.
|
||||
|
||||
### Additional Storage [(External Libraries)](/docs/features/libraries)
|
||||
|
||||
:::danger Advanced Users Only
|
||||
This feature should only be used by advanced users. If this is your first time installing Immich, then DO NOT mount an external library until you have a working setup. Also, your mount path MUST be something unique and should NOT be your library or upload location or a Linux directory like `/lib`. The picture below shows a valid example.
|
||||
:::
|
||||
|
||||
<img
|
||||
src={require('./img/truenas10.webp').default}
|
||||
width="40%"
|
||||
@@ -168,7 +172,7 @@ className="border rounded-xl"
|
||||
/>
|
||||
|
||||
You may configure [External Libraries](/docs/features/libraries) by mounting them using **Additional Storage**.
|
||||
The **Mount Path** is the loaction you will need to copy and paste into the External Library settings within Immich.
|
||||
The **Mount Path** is the location you will need to copy and paste into the External Library settings within Immich.
|
||||
The **Host Path** is the location on the TrueNAS SCALE server where your external library is located.
|
||||
|
||||
<!-- A section for Labels would go here but I don't know what they do. -->
|
||||
@@ -194,7 +198,7 @@ The **CPU** value was specified in a different format with a default of `4000m`
|
||||
The **Memory** value was specified in a different format with a default of `8Gi` which is 8 GiB of RAM. The value was specified in bytes or a number with a measurement suffix. Examples: `129M`, `123Mi`, `1000000000`
|
||||
:::
|
||||
|
||||
Enable **GPU Configuration** options if you have a GPU that you will use for [Hardware Transcoding](/docs/features/hardware-transcoding) and/or [Hardware-Accelerated Machine Learning](/docs/features/ml-hardware-acceleration.md). More info: [GPU Passtrough Docs for TrueNAS Apps](https://www.truenas.com/docs/truenasapps/#gpu-passthrough)
|
||||
Enable **GPU Configuration** options if you have a GPU that you will use for [Hardware Transcoding](/docs/features/hardware-transcoding) and/or [Hardware-Accelerated Machine Learning](/docs/features/ml-hardware-acceleration.md). More info: [GPU Passthrough Docs for TrueNAS Apps](https://www.truenas.com/docs/truenasapps/#gpu-passthrough)
|
||||
|
||||
### Install
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ alt="Select Plugins > Compose.Manager > Add New Stack > Label it Immich"
|
||||
</ul>
|
||||
</details>
|
||||
|
||||
5. Click "**Save Changes**", you will be promoted to edit stack UI labels, just leave this blank and click "**Ok**"
|
||||
5. Click "**Save Changes**", you will be prompted to edit stack UI labels, just leave this blank and click "**Ok**"
|
||||
6. Select the cog ⚙️ next to Immich, click "**Edit Stack**", then click "**Env File**"
|
||||
7. Paste the entire contents of the [Immich example.env](https://github.com/immich-app/immich/releases/latest/download/example.env) file into the Unraid editor, then **before saving** edit the following:
|
||||
|
||||
@@ -111,7 +111,7 @@ alt="Go to Docker Tab and visit the address listed next to immich-web"
|
||||
|
||||
<details >
|
||||
<summary>Using the FolderView plugin for organizing your Docker containers? Click me! Otherwise you're complete!</summary>
|
||||
<p>If you are using the FolderView plugin go the Docker tab and select "<b>New Folder</b>".<br />Label it <i>"Immich"</i> and use this URL as the logo: https://raw.githubusercontent.com/immich-app/immich/main/design/immich-logo.webp<br/>Then simply select all the Immich related containers before clicking "<b>Submit</b>"</p>
|
||||
<p>If you are using the FolderView plugin go the Docker tab and select "<b>New Folder</b>".<br />Label it <i>"Immich"</i> and use this URL as the logo: https://raw.githubusercontent.com/immich-app/immich/main/design/immich-logo.png<br/>Then simply select all the Immich related containers before clicking "<b>Submit</b>"</p>
|
||||
<img
|
||||
src={require('./img/unraid07.webp').default}
|
||||
width="80%"
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
sidebar_position: 2
|
||||
---
|
||||
|
||||
# Comparison
|
||||
|
||||
If you're new here and came from other asset self-hosting alternatives you might want to look at a comparison between Immich and your current solution.
|
||||
Here you can see a [comparison between the various OpenSource Photo Libraries](https://meichthys.github.io/foss_photo_libraries/) including Immich.
|
||||
@@ -1,3 +1,3 @@
|
||||
Login to the mobile app with the server endpoint URL at `http://<machine-ip-address>:2283/api`
|
||||
Login to the mobile app with the server endpoint URL at `http://<machine-ip-address>:2283`
|
||||
|
||||
<img src={require('./img/sign-in-phone.webp').default} width='50%' title='Mobile App Sign In' />
|
||||
|
||||
@@ -110,9 +110,9 @@ const config = {
|
||||
label: 'API',
|
||||
},
|
||||
{
|
||||
to: '/blog',
|
||||
href: 'https://immich.store',
|
||||
position: 'right',
|
||||
label: 'Blog',
|
||||
label: 'Merch',
|
||||
},
|
||||
{
|
||||
href: 'https://github.com/immich-app/immich',
|
||||
|
||||
6636
docs/package-lock.json
generated
6636
docs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -16,8 +16,8 @@
|
||||
"write-heading-ids": "docusaurus write-heading-ids"
|
||||
},
|
||||
"dependencies": {
|
||||
"@docusaurus/core": "~3.5.2",
|
||||
"@docusaurus/preset-classic": "~3.5.2",
|
||||
"@docusaurus/core": "~3.7.0",
|
||||
"@docusaurus/preset-classic": "~3.7.0",
|
||||
"@mdi/js": "^7.3.67",
|
||||
"@mdi/react": "^1.6.1",
|
||||
"@mdx-js/react": "^3.0.0",
|
||||
@@ -35,7 +35,9 @@
|
||||
"url": "^0.11.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@docusaurus/module-type-aliases": "~3.5.2",
|
||||
"@docusaurus/module-type-aliases": "~3.7.0",
|
||||
"@docusaurus/tsconfig": "^3.7.0",
|
||||
"@docusaurus/types": "^3.7.0",
|
||||
"prettier": "^3.2.4",
|
||||
"typescript": "^5.1.6"
|
||||
},
|
||||
@@ -55,6 +57,6 @@
|
||||
"node": ">=20"
|
||||
},
|
||||
"volta": {
|
||||
"node": "22.12.0"
|
||||
"node": "22.14.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,6 +53,11 @@ const guides: CommunityGuidesProps[] = [
|
||||
description: 'How to configure an existing fail2ban installation to block incorrect login attempts.',
|
||||
url: 'https://github.com/immich-app/immich/discussions/3243#discussioncomment-6681948',
|
||||
},
|
||||
{
|
||||
title: 'Immich remote access with NordVPN Meshnet',
|
||||
description: 'Access Immich with an end-to-end encrypted connection.',
|
||||
url: 'https://meshnet.nordvpn.com/how-to/remote-files-media-access/immich-remote-access',
|
||||
},
|
||||
];
|
||||
|
||||
function CommunityGuide({ title, description, url }: CommunityGuidesProps): JSX.Element {
|
||||
|
||||
@@ -99,6 +99,11 @@ const projects: CommunityProjectProps[] = [
|
||||
description: 'Downloads a configurable number of random photos based on people or album ID.',
|
||||
url: 'https://github.com/jon6fingrs/immich-dl',
|
||||
},
|
||||
{
|
||||
title: 'Immich Upload Optimizer',
|
||||
description: 'Automatically optimize files uploaded to Immich in order to save storage space',
|
||||
url: 'https://github.com/miguelangel-nubla/immich-upload-optimizer',
|
||||
},
|
||||
];
|
||||
|
||||
function CommunityProject({ title, description, url }: CommunityProjectProps): JSX.Element {
|
||||
|
||||
@@ -24,10 +24,13 @@ export default function VersionSwitcher(): JSX.Element {
|
||||
{ label: 'Next', url: 'https://main.preview.immich.app' },
|
||||
{ label: 'Latest', url: 'https://immich.app' },
|
||||
...archiveVersions,
|
||||
];
|
||||
].map(({ label, url }) => ({
|
||||
label,
|
||||
url: new URL(url),
|
||||
}));
|
||||
setVersions(allVersions);
|
||||
|
||||
const activeVersion = allVersions.find((version) => new URL(version.url).origin === window.location.origin);
|
||||
const activeVersion = allVersions.find((version) => version.url.origin === window.location.origin);
|
||||
if (activeVersion) {
|
||||
setLabel(activeVersion.label);
|
||||
}
|
||||
@@ -44,12 +47,12 @@ export default function VersionSwitcher(): JSX.Element {
|
||||
return (
|
||||
versions.length > 0 && (
|
||||
<DropdownNavbarItem
|
||||
className="navbar__item"
|
||||
className="version-switcher-34ab39"
|
||||
label={label}
|
||||
mobile={windowSize === 'mobile'}
|
||||
items={versions.map(({ label, url }) => ({
|
||||
label,
|
||||
to: url,
|
||||
to: new URL(location.pathname + location.search + location.hash, url).href,
|
||||
target: '_self',
|
||||
}))}
|
||||
/>
|
||||
|
||||
@@ -75,6 +75,11 @@ div[class^='announcementBar_'] {
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
/* workaround for version switcher PR 15894 */
|
||||
div[class*='navbar__items'] > li:has(a[class*='version-switcher-34ab39']) {
|
||||
display: none;
|
||||
}
|
||||
|
||||
code {
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
@@ -50,6 +50,13 @@ function HomepageHeader() {
|
||||
>
|
||||
Demo
|
||||
</Link>
|
||||
|
||||
<Link
|
||||
className="flex place-items-center place-content-center py-3 px-8 border bg-immich-primary/10 dark:bg-gray-300 rounded-xl hover:no-underline text-immich-primary dark:text-immich-dark-bg font-bold uppercase"
|
||||
to="https://immich.store"
|
||||
>
|
||||
Buy Merch
|
||||
</Link>
|
||||
</div>
|
||||
|
||||
<div className="my-12 flex gap-1 font-medium place-items-center place-content-center text-immich-primary dark:text-immich-dark-primary">
|
||||
@@ -73,9 +80,9 @@ function HomepageHeader() {
|
||||
/>
|
||||
|
||||
<div>
|
||||
<p className="font-bold text-2xl md:text-5xl ">Download mobile app</p>
|
||||
<p className="font-bold text-2xl md:text-5xl ">Download the mobile app</p>
|
||||
<p className="text-lg">
|
||||
Download Immich app and start backing up your photos and videos securely to your own server
|
||||
Download the Immich app and start backing up your photos and videos securely to your own server
|
||||
</p>
|
||||
</div>
|
||||
<div className="flex flex-col sm:flex-row place-items-center place-content-center mt-4 gap-1">
|
||||
|
||||
@@ -242,6 +242,13 @@ const roadmap: Item[] = [
|
||||
];
|
||||
|
||||
const milestones: Item[] = [
|
||||
{
|
||||
icon: mdiStar,
|
||||
iconColor: 'gold',
|
||||
title: '60,000 Stars',
|
||||
description: 'Reached 60K Stars on GitHub!',
|
||||
getDateLabel: withLanguage(new Date(2025, 2, 4)),
|
||||
},
|
||||
withRelease({
|
||||
icon: mdiLinkEdit,
|
||||
iconColor: 'crimson',
|
||||
|
||||
74
docs/static/archived-versions.json
vendored
74
docs/static/archived-versions.json
vendored
@@ -1,4 +1,56 @@
|
||||
[
|
||||
{
|
||||
"label": "v1.129.0",
|
||||
"url": "https://v1.129.0.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.128.0",
|
||||
"url": "https://v1.128.0.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.127.0",
|
||||
"url": "https://v1.127.0.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.126.1",
|
||||
"url": "https://v1.126.1.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.126.0",
|
||||
"url": "https://v1.126.0.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.125.7",
|
||||
"url": "https://v1.125.7.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.125.6",
|
||||
"url": "https://v1.125.6.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.125.5",
|
||||
"url": "https://v1.125.5.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.125.3",
|
||||
"url": "https://v1.125.3.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.125.2",
|
||||
"url": "https://v1.125.2.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.125.1",
|
||||
"url": "https://v1.125.1.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.124.2",
|
||||
"url": "https://v1.124.2.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.124.1",
|
||||
"url": "https://v1.124.1.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.124.0",
|
||||
"url": "https://v1.124.0.archive.immich.app"
|
||||
@@ -153,46 +205,46 @@
|
||||
},
|
||||
{
|
||||
"label": "v1.105.1",
|
||||
"url": "https://v1.105.1.archive.immich.app/"
|
||||
"url": "https://v1.105.1.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.105.0",
|
||||
"url": "https://v1.105.0.archive.immich.app/"
|
||||
"url": "https://v1.105.0.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.104.0",
|
||||
"url": "https://v1.104.0.archive.immich.app/"
|
||||
"url": "https://v1.104.0.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.103.1",
|
||||
"url": "https://v1.103.1.archive.immich.app/"
|
||||
"url": "https://v1.103.1.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.103.0",
|
||||
"url": "https://v1.103.0.archive.immich.app/"
|
||||
"url": "https://v1.103.0.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.102.3",
|
||||
"url": "https://v1.102.3.archive.immich.app/"
|
||||
"url": "https://v1.102.3.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.102.2",
|
||||
"url": "https://v1.102.2.archive.immich.app/"
|
||||
"url": "https://v1.102.2.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.102.1",
|
||||
"url": "https://v1.102.1.archive.immich.app/"
|
||||
"url": "https://v1.102.1.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.102.0",
|
||||
"url": "https://v1.102.0.archive.immich.app/"
|
||||
"url": "https://v1.102.0.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.101.0",
|
||||
"url": "https://v1.101.0.archive.immich.app/"
|
||||
"url": "https://v1.101.0.archive.immich.app"
|
||||
},
|
||||
{
|
||||
"label": "v1.100.0",
|
||||
"url": "https://v1.100.0.archive.immich.app/"
|
||||
"url": "https://v1.100.0.archive.immich.app"
|
||||
}
|
||||
]
|
||||
|
||||
BIN
docs/static/img/synology-container-manager-container-details.png
vendored
Normal file
BIN
docs/static/img/synology-container-manager-container-details.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 164 KiB |
BIN
docs/static/img/synology-container-manager-create-project.png
vendored
Normal file
BIN
docs/static/img/synology-container-manager-create-project.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 112 KiB |
BIN
docs/static/img/synology-container-manager-customize-docker-compose.png
vendored
Normal file
BIN
docs/static/img/synology-container-manager-customize-docker-compose.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 50 KiB |
BIN
docs/static/img/synology-container-manager-set-path.png
vendored
Normal file
BIN
docs/static/img/synology-container-manager-set-path.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 49 KiB |
BIN
docs/static/img/synology-firewall-rules.png
vendored
Normal file
BIN
docs/static/img/synology-firewall-rules.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 81 KiB |
@@ -5,7 +5,7 @@ module.exports = {
|
||||
preflight: false, // disable Tailwind's reset
|
||||
},
|
||||
content: ['./src/**/*.{js,jsx,ts,tsx}', './{docs,blog}/**/*.{md,mdx}'], // my markdown stuff is in ../docs, not /src
|
||||
darkMode: ['class', '[data-theme="dark"]'], // hooks into docusaurus' dark mode settigns
|
||||
darkMode: ['class', '[data-theme="dark"]'], // hooks into docusaurus' dark mode settings
|
||||
theme: {
|
||||
extend: {
|
||||
colors: {
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
{
|
||||
// This file is not used in compilation. It is here just for a nice editor experience.
|
||||
"extends": "@tsconfig/docusaurus/tsconfig.json",
|
||||
"extends": "@docusaurus/tsconfig",
|
||||
|
||||
"compilerOptions": {
|
||||
"baseUrl": ".",
|
||||
"module": "Node16"
|
||||
"baseUrl": "."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +1 @@
|
||||
22.12.0
|
||||
22.14.0
|
||||
|
||||
@@ -34,10 +34,10 @@ services:
|
||||
- 2285:2285
|
||||
|
||||
redis:
|
||||
image: redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8
|
||||
image: redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
|
||||
|
||||
database:
|
||||
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
|
||||
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
|
||||
command: -c fsync=off -c shared_preload_libraries=vectors.so
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
1213
e2e/package-lock.json
generated
1213
e2e/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "immich-e2e",
|
||||
"version": "1.124.0",
|
||||
"version": "1.129.0",
|
||||
"description": "",
|
||||
"main": "index.js",
|
||||
"type": "module",
|
||||
@@ -25,20 +25,20 @@
|
||||
"@immich/sdk": "file:../open-api/typescript-sdk",
|
||||
"@playwright/test": "^1.44.1",
|
||||
"@types/luxon": "^3.4.2",
|
||||
"@types/node": "^22.10.2",
|
||||
"@types/node": "^22.13.5",
|
||||
"@types/oidc-provider": "^8.5.1",
|
||||
"@types/pg": "^8.11.0",
|
||||
"@types/pngjs": "^6.0.4",
|
||||
"@types/supertest": "^6.0.2",
|
||||
"@typescript-eslint/eslint-plugin": "^8.15.0",
|
||||
"@typescript-eslint/parser": "^8.15.0",
|
||||
"@vitest/coverage-v8": "^2.0.5",
|
||||
"@vitest/coverage-v8": "^3.0.0",
|
||||
"eslint": "^9.14.0",
|
||||
"eslint-config-prettier": "^9.1.0",
|
||||
"eslint-config-prettier": "^10.0.0",
|
||||
"eslint-plugin-prettier": "^5.1.3",
|
||||
"eslint-plugin-unicorn": "^56.0.1",
|
||||
"exiftool-vendored": "^28.3.1",
|
||||
"globals": "^15.9.0",
|
||||
"globals": "^16.0.0",
|
||||
"jose": "^5.6.3",
|
||||
"luxon": "^3.4.4",
|
||||
"oidc-provider": "^8.5.1",
|
||||
@@ -50,9 +50,9 @@
|
||||
"supertest": "^7.0.0",
|
||||
"typescript": "^5.3.3",
|
||||
"utimes": "^5.2.1",
|
||||
"vitest": "^2.0.5"
|
||||
"vitest": "^3.0.0"
|
||||
},
|
||||
"volta": {
|
||||
"node": "22.12.0"
|
||||
"node": "22.14.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,79 +22,92 @@ const user1NotShared = 'user1NotShared';
|
||||
const user2SharedUser = 'user2SharedUser';
|
||||
const user2SharedLink = 'user2SharedLink';
|
||||
const user2NotShared = 'user2NotShared';
|
||||
const user4DeletedAsset = 'user4DeletedAsset';
|
||||
const user4Empty = 'user4Empty';
|
||||
|
||||
describe('/albums', () => {
|
||||
let admin: LoginResponseDto;
|
||||
let user1: LoginResponseDto;
|
||||
let user1Asset1: AssetMediaResponseDto;
|
||||
let user1Asset2: AssetMediaResponseDto;
|
||||
let user4Asset1: AssetMediaResponseDto;
|
||||
let user1Albums: AlbumResponseDto[];
|
||||
let user2: LoginResponseDto;
|
||||
let user2Albums: AlbumResponseDto[];
|
||||
let deletedAssetAlbum: AlbumResponseDto;
|
||||
let user3: LoginResponseDto; // deleted
|
||||
let user4: LoginResponseDto;
|
||||
|
||||
beforeAll(async () => {
|
||||
await utils.resetDatabase();
|
||||
|
||||
admin = await utils.adminSetup();
|
||||
|
||||
[user1, user2, user3] = await Promise.all([
|
||||
[user1, user2, user3, user4] = await Promise.all([
|
||||
utils.userSetup(admin.accessToken, createUserDto.user1),
|
||||
utils.userSetup(admin.accessToken, createUserDto.user2),
|
||||
utils.userSetup(admin.accessToken, createUserDto.user3),
|
||||
utils.userSetup(admin.accessToken, createUserDto.user4),
|
||||
]);
|
||||
|
||||
[user1Asset1, user1Asset2] = await Promise.all([
|
||||
[user1Asset1, user1Asset2, user4Asset1] = await Promise.all([
|
||||
utils.createAsset(user1.accessToken, { isFavorite: true }),
|
||||
utils.createAsset(user1.accessToken),
|
||||
utils.createAsset(user1.accessToken),
|
||||
]);
|
||||
|
||||
user1Albums = await Promise.all([
|
||||
utils.createAlbum(user1.accessToken, {
|
||||
albumName: user1SharedEditorUser,
|
||||
albumUsers: [{ userId: user2.userId, role: AlbumUserRole.Editor }],
|
||||
assetIds: [user1Asset1.id],
|
||||
}),
|
||||
utils.createAlbum(user1.accessToken, {
|
||||
albumName: user1SharedLink,
|
||||
assetIds: [user1Asset1.id],
|
||||
}),
|
||||
utils.createAlbum(user1.accessToken, {
|
||||
albumName: user1NotShared,
|
||||
assetIds: [user1Asset1.id, user1Asset2.id],
|
||||
}),
|
||||
utils.createAlbum(user1.accessToken, {
|
||||
albumName: user1SharedViewerUser,
|
||||
albumUsers: [{ userId: user2.userId, role: AlbumUserRole.Viewer }],
|
||||
assetIds: [user1Asset1.id],
|
||||
[user1Albums, user2Albums, deletedAssetAlbum] = await Promise.all([
|
||||
Promise.all([
|
||||
utils.createAlbum(user1.accessToken, {
|
||||
albumName: user1SharedEditorUser,
|
||||
albumUsers: [
|
||||
{ userId: admin.userId, role: AlbumUserRole.Editor },
|
||||
{ userId: user2.userId, role: AlbumUserRole.Editor },
|
||||
],
|
||||
assetIds: [user1Asset1.id],
|
||||
}),
|
||||
utils.createAlbum(user1.accessToken, {
|
||||
albumName: user1SharedLink,
|
||||
assetIds: [user1Asset1.id],
|
||||
}),
|
||||
utils.createAlbum(user1.accessToken, {
|
||||
albumName: user1NotShared,
|
||||
assetIds: [user1Asset1.id, user1Asset2.id],
|
||||
}),
|
||||
utils.createAlbum(user1.accessToken, {
|
||||
albumName: user1SharedViewerUser,
|
||||
albumUsers: [{ userId: user2.userId, role: AlbumUserRole.Viewer }],
|
||||
assetIds: [user1Asset1.id],
|
||||
}),
|
||||
]),
|
||||
Promise.all([
|
||||
utils.createAlbum(user2.accessToken, {
|
||||
albumName: user2SharedUser,
|
||||
albumUsers: [
|
||||
{ userId: user1.userId, role: AlbumUserRole.Editor },
|
||||
{ userId: user3.userId, role: AlbumUserRole.Editor },
|
||||
],
|
||||
}),
|
||||
utils.createAlbum(user2.accessToken, { albumName: user2SharedLink }),
|
||||
utils.createAlbum(user2.accessToken, { albumName: user2NotShared }),
|
||||
]),
|
||||
utils.createAlbum(user4.accessToken, { albumName: user4DeletedAsset }),
|
||||
utils.createAlbum(user4.accessToken, { albumName: user4Empty }),
|
||||
utils.createAlbum(user3.accessToken, {
|
||||
albumName: 'Deleted',
|
||||
albumUsers: [{ userId: user1.userId, role: AlbumUserRole.Editor }],
|
||||
}),
|
||||
]);
|
||||
|
||||
user2Albums = await Promise.all([
|
||||
utils.createAlbum(user2.accessToken, {
|
||||
albumName: user2SharedUser,
|
||||
albumUsers: [
|
||||
{ userId: user1.userId, role: AlbumUserRole.Editor },
|
||||
{ userId: user3.userId, role: AlbumUserRole.Editor },
|
||||
],
|
||||
}),
|
||||
utils.createAlbum(user2.accessToken, { albumName: user2SharedLink }),
|
||||
utils.createAlbum(user2.accessToken, { albumName: user2NotShared }),
|
||||
]);
|
||||
|
||||
await utils.createAlbum(user3.accessToken, {
|
||||
albumName: 'Deleted',
|
||||
albumUsers: [{ userId: user1.userId, role: AlbumUserRole.Editor }],
|
||||
});
|
||||
|
||||
await addAssetsToAlbum(
|
||||
{ id: user2Albums[0].id, bulkIdsDto: { ids: [user1Asset1.id, user1Asset2.id] } },
|
||||
{ headers: asBearerAuth(user1.accessToken) },
|
||||
);
|
||||
|
||||
user2Albums[0] = await getAlbumInfo({ id: user2Albums[0].id }, { headers: asBearerAuth(user2.accessToken) });
|
||||
|
||||
await Promise.all([
|
||||
addAssetsToAlbum(
|
||||
{ id: user2Albums[0].id, bulkIdsDto: { ids: [user1Asset1.id, user1Asset2.id] } },
|
||||
{ headers: asBearerAuth(user1.accessToken) },
|
||||
),
|
||||
addAssetsToAlbum(
|
||||
{ id: deletedAssetAlbum.id, bulkIdsDto: { ids: [user4Asset1.id] } },
|
||||
{ headers: asBearerAuth(user4.accessToken) },
|
||||
),
|
||||
// add shared link to user1SharedLink album
|
||||
utils.createSharedLink(user1.accessToken, {
|
||||
type: SharedLinkType.Album,
|
||||
@@ -107,7 +120,11 @@ describe('/albums', () => {
|
||||
}),
|
||||
]);
|
||||
|
||||
await deleteUserAdmin({ id: user3.userId, userAdminDeleteDto: {} }, { headers: asBearerAuth(admin.accessToken) });
|
||||
[user2Albums[0]] = await Promise.all([
|
||||
getAlbumInfo({ id: user2Albums[0].id }, { headers: asBearerAuth(user2.accessToken) }),
|
||||
deleteUserAdmin({ id: user3.userId, userAdminDeleteDto: {} }, { headers: asBearerAuth(admin.accessToken) }),
|
||||
utils.deleteAssets(user1.accessToken, [user4Asset1.id]),
|
||||
]);
|
||||
});
|
||||
|
||||
describe('GET /albums', () => {
|
||||
@@ -142,6 +159,10 @@ describe('/albums', () => {
|
||||
...user1Albums[0],
|
||||
assets: [expect.objectContaining({ isFavorite: false })],
|
||||
lastModifiedAssetTimestamp: expect.any(String),
|
||||
startDate: expect.any(String),
|
||||
endDate: expect.any(String),
|
||||
shared: true,
|
||||
albumUsers: expect.any(Array),
|
||||
});
|
||||
});
|
||||
|
||||
@@ -280,6 +301,25 @@ describe('/albums', () => {
|
||||
expect(status).toBe(200);
|
||||
expect(body).toHaveLength(5);
|
||||
});
|
||||
|
||||
it('should return empty albums and albums where all assets are deleted', async () => {
|
||||
const { status, body } = await request(app).get('/albums').set('Authorization', `Bearer ${user4.accessToken}`);
|
||||
expect(status).toBe(200);
|
||||
expect(body).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
ownerId: user4.userId,
|
||||
albumName: user4DeletedAsset,
|
||||
shared: false,
|
||||
}),
|
||||
expect.objectContaining({
|
||||
ownerId: user4.userId,
|
||||
albumName: user4Empty,
|
||||
shared: false,
|
||||
}),
|
||||
]),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('GET /albums/:id', () => {
|
||||
@@ -299,6 +339,10 @@ describe('/albums', () => {
|
||||
...user1Albums[0],
|
||||
assets: [expect.objectContaining({ id: user1Albums[0].assets[0].id })],
|
||||
lastModifiedAssetTimestamp: expect.any(String),
|
||||
startDate: expect.any(String),
|
||||
endDate: expect.any(String),
|
||||
albumUsers: expect.any(Array),
|
||||
shared: true,
|
||||
});
|
||||
});
|
||||
|
||||
@@ -330,6 +374,10 @@ describe('/albums', () => {
|
||||
...user1Albums[0],
|
||||
assets: [expect.objectContaining({ id: user1Albums[0].assets[0].id })],
|
||||
lastModifiedAssetTimestamp: expect.any(String),
|
||||
startDate: expect.any(String),
|
||||
endDate: expect.any(String),
|
||||
albumUsers: expect.any(Array),
|
||||
shared: true,
|
||||
});
|
||||
});
|
||||
|
||||
@@ -344,6 +392,30 @@ describe('/albums', () => {
|
||||
assets: [],
|
||||
assetCount: 1,
|
||||
lastModifiedAssetTimestamp: expect.any(String),
|
||||
endDate: expect.any(String),
|
||||
startDate: expect.any(String),
|
||||
albumUsers: expect.any(Array),
|
||||
shared: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('should not count trashed assets', async () => {
|
||||
await utils.deleteAssets(user1.accessToken, [user1Asset2.id]);
|
||||
|
||||
const { status, body } = await request(app)
|
||||
.get(`/albums/${user2Albums[0].id}?withoutAssets=true`)
|
||||
.set('Authorization', `Bearer ${user1.accessToken}`);
|
||||
|
||||
expect(status).toBe(200);
|
||||
expect(body).toEqual({
|
||||
...user2Albums[0],
|
||||
assets: [],
|
||||
assetCount: 1,
|
||||
lastModifiedAssetTimestamp: expect.any(String),
|
||||
endDate: expect.any(String),
|
||||
startDate: expect.any(String),
|
||||
albumUsers: expect.any(Array),
|
||||
shared: true,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -3,11 +3,10 @@ import {
|
||||
AssetMediaStatus,
|
||||
AssetResponseDto,
|
||||
AssetTypeEnum,
|
||||
getAssetInfo,
|
||||
getMyUser,
|
||||
LoginResponseDto,
|
||||
SharedLinkType,
|
||||
getAssetInfo,
|
||||
getConfig,
|
||||
getMyUser,
|
||||
updateConfig,
|
||||
} from '@immich/sdk';
|
||||
import { exiftool } from 'exiftool-vendored';
|
||||
@@ -19,7 +18,7 @@ import { Socket } from 'socket.io-client';
|
||||
import { createUserDto, uuidDto } from 'src/fixtures';
|
||||
import { makeRandomImage } from 'src/generators';
|
||||
import { errorDto } from 'src/responses';
|
||||
import { app, asBearerAuth, tempDir, testAssetDir, utils } from 'src/utils';
|
||||
import { app, asBearerAuth, tempDir, TEN_TIMES, testAssetDir, utils } from 'src/utils';
|
||||
import request from 'supertest';
|
||||
import { afterAll, beforeAll, describe, expect, it } from 'vitest';
|
||||
|
||||
@@ -41,14 +40,10 @@ const makeUploadDto = (options?: { omit: string }): Record<string, any> => {
|
||||
return dto;
|
||||
};
|
||||
|
||||
const TEN_TIMES = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
|
||||
const locationAssetFilepath = `${testAssetDir}/metadata/gps-position/thompson-springs.jpg`;
|
||||
const ratingAssetFilepath = `${testAssetDir}/metadata/rating/mongolels.jpg`;
|
||||
const facesAssetFilepath = `${testAssetDir}/metadata/faces/portrait.jpg`;
|
||||
|
||||
const getSystemConfig = (accessToken: string) => getConfig({ headers: asBearerAuth(accessToken) });
|
||||
|
||||
const readTags = async (bytes: Buffer, filename: string) => {
|
||||
const filepath = join(tempDir, filename);
|
||||
await writeFile(filepath, bytes);
|
||||
@@ -230,7 +225,7 @@ describe('/asset', () => {
|
||||
});
|
||||
|
||||
it('should get the asset faces', async () => {
|
||||
const config = await getSystemConfig(admin.accessToken);
|
||||
const config = await utils.getSystemConfig(admin.accessToken);
|
||||
config.metadata.faces.import = true;
|
||||
await updateConfig({ systemConfigDto: config }, { headers: asBearerAuth(admin.accessToken) });
|
||||
|
||||
@@ -538,7 +533,7 @@ describe('/asset', () => {
|
||||
expect(body).toMatchObject({
|
||||
id: user1Assets[0].id,
|
||||
exifInfo: expect.objectContaining({
|
||||
dateTimeOriginal: '2023-11-20T01:11:00.000Z',
|
||||
dateTimeOriginal: '2023-11-20T01:11:00+00:00',
|
||||
}),
|
||||
});
|
||||
expect(status).toEqual(200);
|
||||
@@ -608,7 +603,7 @@ describe('/asset', () => {
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
|
||||
const assetInfo = await utils.getAssetInfo(user1.accessToken, id);
|
||||
expect(assetInfo.exifInfo?.dateTimeOriginal).toBe('2024-07-11T10:32:52.000Z');
|
||||
expect(assetInfo.exifInfo?.dateTimeOriginal).toBe('2024-07-11T10:32:52+00:00');
|
||||
|
||||
const { status, body } = await request(app)
|
||||
.put(`/assets/${id}`)
|
||||
@@ -618,7 +613,7 @@ describe('/asset', () => {
|
||||
expect(body).toMatchObject({
|
||||
id,
|
||||
exifInfo: expect.objectContaining({
|
||||
dateTimeOriginal: '2023-11-20T01:11:00.000Z',
|
||||
dateTimeOriginal: '2023-11-20T01:11:00+00:00',
|
||||
}),
|
||||
});
|
||||
expect(status).toEqual(200);
|
||||
@@ -703,6 +698,20 @@ describe('/asset', () => {
|
||||
expect(status).toEqual(200);
|
||||
});
|
||||
|
||||
it('should set the negative rating', async () => {
|
||||
const { status, body } = await request(app)
|
||||
.put(`/assets/${user1Assets[0].id}`)
|
||||
.set('Authorization', `Bearer ${user1.accessToken}`)
|
||||
.send({ rating: -1 });
|
||||
expect(body).toMatchObject({
|
||||
id: user1Assets[0].id,
|
||||
exifInfo: expect.objectContaining({
|
||||
rating: -1,
|
||||
}),
|
||||
});
|
||||
expect(status).toEqual(200);
|
||||
});
|
||||
|
||||
it('should reject invalid rating', async () => {
|
||||
for (const test of [{ rating: 7 }, { rating: 3.5 }, { rating: null }]) {
|
||||
const { status, body } = await request(app)
|
||||
@@ -985,8 +994,6 @@ describe('/asset', () => {
|
||||
exifImageHeight: 1080,
|
||||
exifImageWidth: 1617,
|
||||
fileSizeInByte: 862_424,
|
||||
latitude: null,
|
||||
longitude: null,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -996,11 +1003,9 @@ describe('/asset', () => {
|
||||
type: AssetTypeEnum.Image,
|
||||
originalFileName: 'el_torcal_rocks.jpg',
|
||||
exifInfo: {
|
||||
dateTimeOriginal: '2012-08-05T11:39:59.000Z',
|
||||
dateTimeOriginal: '2012-08-05T11:39:59+00:00',
|
||||
exifImageWidth: 512,
|
||||
exifImageHeight: 341,
|
||||
latitude: null,
|
||||
longitude: null,
|
||||
focalLength: 75,
|
||||
iso: 200,
|
||||
fNumber: 11,
|
||||
@@ -1008,7 +1013,6 @@ describe('/asset', () => {
|
||||
fileSizeInByte: 53_493,
|
||||
make: 'SONY',
|
||||
model: 'DSLR-A550',
|
||||
orientation: null,
|
||||
description: 'SONY DSC',
|
||||
},
|
||||
},
|
||||
@@ -1023,8 +1027,6 @@ describe('/asset', () => {
|
||||
exifImageHeight: 1080,
|
||||
exifImageWidth: 1440,
|
||||
fileSizeInByte: 1_780_777,
|
||||
latitude: null,
|
||||
longitude: null,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1035,7 +1037,7 @@ describe('/asset', () => {
|
||||
originalFileName: 'IMG_2682.heic',
|
||||
fileCreatedAt: '2019-03-21T16:04:22.348Z',
|
||||
exifInfo: {
|
||||
dateTimeOriginal: '2019-03-21T16:04:22.348Z',
|
||||
dateTimeOriginal: '2019-03-21T16:04:22.348+00:00',
|
||||
exifImageWidth: 4032,
|
||||
exifImageHeight: 3024,
|
||||
latitude: 41.2203,
|
||||
@@ -1060,8 +1062,6 @@ describe('/asset', () => {
|
||||
exifInfo: {
|
||||
exifImageWidth: 800,
|
||||
exifImageHeight: 800,
|
||||
latitude: null,
|
||||
longitude: null,
|
||||
fileSizeInByte: 25_408,
|
||||
},
|
||||
},
|
||||
@@ -1080,9 +1080,7 @@ describe('/asset', () => {
|
||||
focalLength: 18,
|
||||
iso: 100,
|
||||
fileSizeInByte: 9_057_784,
|
||||
dateTimeOriginal: '2010-07-20T17:27:12.000Z',
|
||||
latitude: null,
|
||||
longitude: null,
|
||||
dateTimeOriginal: '2010-07-20T17:27:12+00:00',
|
||||
orientation: '1',
|
||||
},
|
||||
},
|
||||
@@ -1101,9 +1099,7 @@ describe('/asset', () => {
|
||||
focalLength: 85,
|
||||
iso: 200,
|
||||
fileSizeInByte: 15_856_335,
|
||||
dateTimeOriginal: '2016-09-22T21:10:29.060Z',
|
||||
latitude: null,
|
||||
longitude: null,
|
||||
dateTimeOriginal: '2016-09-22T21:10:29.06+00:00',
|
||||
orientation: '1',
|
||||
timeZone: 'UTC-4',
|
||||
},
|
||||
@@ -1125,9 +1121,7 @@ describe('/asset', () => {
|
||||
focalLength: 35,
|
||||
iso: 400,
|
||||
fileSizeInByte: 19_587_072,
|
||||
dateTimeOriginal: '2018-05-10T08:42:37.842Z',
|
||||
latitude: null,
|
||||
longitude: null,
|
||||
dateTimeOriginal: '2018-05-10T08:42:37.842+00:00',
|
||||
orientation: '1',
|
||||
},
|
||||
},
|
||||
@@ -1149,9 +1143,7 @@ describe('/asset', () => {
|
||||
iso: 100,
|
||||
lensModel: 'E PZ 18-105mm F4 G OSS',
|
||||
fileSizeInByte: 25_001_984,
|
||||
dateTimeOriginal: '2016-09-27T10:51:44.000Z',
|
||||
latitude: null,
|
||||
longitude: null,
|
||||
dateTimeOriginal: '2016-09-27T10:51:44+00:00',
|
||||
orientation: '1',
|
||||
},
|
||||
},
|
||||
@@ -1173,9 +1165,7 @@ describe('/asset', () => {
|
||||
iso: 100,
|
||||
lensModel: 'E 25mm F2',
|
||||
fileSizeInByte: 49_512_448,
|
||||
dateTimeOriginal: '2016-01-08T14:08:01.000Z',
|
||||
latitude: null,
|
||||
longitude: null,
|
||||
dateTimeOriginal: '2016-01-08T14:08:01+00:00',
|
||||
orientation: '1',
|
||||
},
|
||||
},
|
||||
@@ -1197,7 +1187,7 @@ describe('/asset', () => {
|
||||
iso: 80,
|
||||
lensModel: null,
|
||||
fileSizeInByte: 11_113_617,
|
||||
dateTimeOriginal: '2015-12-27T09:55:40.000Z',
|
||||
dateTimeOriginal: '2015-12-27T09:55:40+00:00',
|
||||
latitude: null,
|
||||
longitude: null,
|
||||
orientation: '1',
|
||||
@@ -1221,7 +1211,7 @@ describe('/asset', () => {
|
||||
iso: 160,
|
||||
lensModel: null,
|
||||
fileSizeInByte: 13_551_312,
|
||||
dateTimeOriginal: '2024-10-12T21:01:01.000Z',
|
||||
dateTimeOriginal: '2024-10-12T21:01:01+00:00',
|
||||
latitude: null,
|
||||
longitude: null,
|
||||
orientation: '6',
|
||||
@@ -1235,7 +1225,7 @@ describe('/asset', () => {
|
||||
originalFileName: 'Ricoh_GR3-450.DNG',
|
||||
fileCreatedAt: '2024-06-08T13:48:39.000Z',
|
||||
exifInfo: {
|
||||
dateTimeOriginal: '2024-06-08T13:48:39.000Z',
|
||||
dateTimeOriginal: '2024-06-08T13:48:39+00:00',
|
||||
exifImageHeight: 4064,
|
||||
exifImageWidth: 6112,
|
||||
exposureTime: '1/400',
|
||||
|
||||
225
e2e/src/api/specs/jobs.e2e-spec.ts
Normal file
225
e2e/src/api/specs/jobs.e2e-spec.ts
Normal file
@@ -0,0 +1,225 @@
|
||||
import { JobCommand, JobName, LoginResponseDto, updateConfig } from '@immich/sdk';
|
||||
import { cpSync, rmSync } from 'node:fs';
|
||||
import { readFile } from 'node:fs/promises';
|
||||
import { basename } from 'node:path';
|
||||
import { errorDto } from 'src/responses';
|
||||
import { app, asBearerAuth, testAssetDir, utils } from 'src/utils';
|
||||
import request from 'supertest';
|
||||
import { afterEach, beforeAll, describe, expect, it } from 'vitest';
|
||||
|
||||
describe('/jobs', () => {
|
||||
let admin: LoginResponseDto;
|
||||
|
||||
beforeAll(async () => {
|
||||
await utils.resetDatabase();
|
||||
admin = await utils.adminSetup({ onboarding: false });
|
||||
});
|
||||
|
||||
describe('PUT /jobs', () => {
|
||||
afterEach(async () => {
|
||||
await utils.jobCommand(admin.accessToken, JobName.MetadataExtraction, {
|
||||
command: JobCommand.Resume,
|
||||
force: false,
|
||||
});
|
||||
|
||||
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
|
||||
command: JobCommand.Resume,
|
||||
force: false,
|
||||
});
|
||||
|
||||
await utils.jobCommand(admin.accessToken, JobName.FaceDetection, {
|
||||
command: JobCommand.Resume,
|
||||
force: false,
|
||||
});
|
||||
|
||||
await utils.jobCommand(admin.accessToken, JobName.SmartSearch, {
|
||||
command: JobCommand.Resume,
|
||||
force: false,
|
||||
});
|
||||
|
||||
await utils.jobCommand(admin.accessToken, JobName.DuplicateDetection, {
|
||||
command: JobCommand.Resume,
|
||||
force: false,
|
||||
});
|
||||
|
||||
const config = await utils.getSystemConfig(admin.accessToken);
|
||||
config.machineLearning.duplicateDetection.enabled = false;
|
||||
config.machineLearning.enabled = false;
|
||||
config.metadata.faces.import = false;
|
||||
config.machineLearning.clip.enabled = false;
|
||||
await updateConfig({ systemConfigDto: config }, { headers: asBearerAuth(admin.accessToken) });
|
||||
});
|
||||
|
||||
it('should require authentication', async () => {
|
||||
const { status, body } = await request(app).put('/jobs/metadataExtraction');
|
||||
expect(status).toBe(401);
|
||||
expect(body).toEqual(errorDto.unauthorized);
|
||||
});
|
||||
|
||||
it('should queue metadata extraction for missing assets', async () => {
|
||||
const path = `${testAssetDir}/formats/raw/Nikon/D700/philadelphia.nef`;
|
||||
|
||||
await utils.jobCommand(admin.accessToken, JobName.MetadataExtraction, {
|
||||
command: JobCommand.Pause,
|
||||
force: false,
|
||||
});
|
||||
|
||||
const { id } = await utils.createAsset(admin.accessToken, {
|
||||
assetData: { bytes: await readFile(path), filename: basename(path) },
|
||||
});
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
|
||||
{
|
||||
const asset = await utils.getAssetInfo(admin.accessToken, id);
|
||||
|
||||
expect(asset.exifInfo).toBeDefined();
|
||||
expect(asset.exifInfo?.make).toBeNull();
|
||||
}
|
||||
|
||||
await utils.jobCommand(admin.accessToken, JobName.MetadataExtraction, {
|
||||
command: JobCommand.Empty,
|
||||
force: false,
|
||||
});
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
|
||||
await utils.jobCommand(admin.accessToken, JobName.MetadataExtraction, {
|
||||
command: JobCommand.Resume,
|
||||
force: false,
|
||||
});
|
||||
|
||||
await utils.jobCommand(admin.accessToken, JobName.MetadataExtraction, {
|
||||
command: JobCommand.Start,
|
||||
force: false,
|
||||
});
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
|
||||
{
|
||||
const asset = await utils.getAssetInfo(admin.accessToken, id);
|
||||
|
||||
expect(asset.exifInfo).toBeDefined();
|
||||
expect(asset.exifInfo?.make).toBe('NIKON CORPORATION');
|
||||
}
|
||||
});
|
||||
|
||||
it('should not re-extract metadata for existing assets', async () => {
|
||||
const path = `${testAssetDir}/temp/metadata/asset.jpg`;
|
||||
|
||||
cpSync(`${testAssetDir}/formats/raw/Nikon/D700/philadelphia.nef`, path);
|
||||
|
||||
const { id } = await utils.createAsset(admin.accessToken, {
|
||||
assetData: { bytes: await readFile(path), filename: basename(path) },
|
||||
});
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
|
||||
{
|
||||
const asset = await utils.getAssetInfo(admin.accessToken, id);
|
||||
|
||||
expect(asset.exifInfo).toBeDefined();
|
||||
expect(asset.exifInfo?.model).toBe('NIKON D700');
|
||||
}
|
||||
|
||||
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, path);
|
||||
|
||||
await utils.jobCommand(admin.accessToken, JobName.MetadataExtraction, {
|
||||
command: JobCommand.Start,
|
||||
force: false,
|
||||
});
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
|
||||
{
|
||||
const asset = await utils.getAssetInfo(admin.accessToken, id);
|
||||
|
||||
expect(asset.exifInfo).toBeDefined();
|
||||
expect(asset.exifInfo?.model).toBe('NIKON D700');
|
||||
}
|
||||
|
||||
rmSync(path);
|
||||
});
|
||||
|
||||
it('should queue thumbnail extraction for assets missing thumbs', async () => {
|
||||
const path = `${testAssetDir}/albums/nature/tanners_ridge.jpg`;
|
||||
|
||||
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
|
||||
command: JobCommand.Pause,
|
||||
force: false,
|
||||
});
|
||||
|
||||
const { id } = await utils.createAsset(admin.accessToken, {
|
||||
assetData: { bytes: await readFile(path), filename: basename(path) },
|
||||
});
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, JobName.MetadataExtraction);
|
||||
await utils.waitForQueueFinish(admin.accessToken, JobName.ThumbnailGeneration);
|
||||
|
||||
const assetBefore = await utils.getAssetInfo(admin.accessToken, id);
|
||||
expect(assetBefore.thumbhash).toBeNull();
|
||||
|
||||
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
|
||||
command: JobCommand.Empty,
|
||||
force: false,
|
||||
});
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, JobName.MetadataExtraction);
|
||||
await utils.waitForQueueFinish(admin.accessToken, JobName.ThumbnailGeneration);
|
||||
|
||||
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
|
||||
command: JobCommand.Resume,
|
||||
force: false,
|
||||
});
|
||||
|
||||
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
|
||||
command: JobCommand.Start,
|
||||
force: false,
|
||||
});
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, JobName.MetadataExtraction);
|
||||
await utils.waitForQueueFinish(admin.accessToken, JobName.ThumbnailGeneration);
|
||||
|
||||
const assetAfter = await utils.getAssetInfo(admin.accessToken, id);
|
||||
expect(assetAfter.thumbhash).not.toBeNull();
|
||||
});
|
||||
|
||||
it('should not reload existing thumbnail when running thumb job for missing assets', async () => {
|
||||
const path = `${testAssetDir}/temp/thumbs/asset1.jpg`;
|
||||
|
||||
cpSync(`${testAssetDir}/albums/nature/tanners_ridge.jpg`, path);
|
||||
|
||||
const { id } = await utils.createAsset(admin.accessToken, {
|
||||
assetData: { bytes: await readFile(path), filename: basename(path) },
|
||||
});
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, JobName.MetadataExtraction);
|
||||
await utils.waitForQueueFinish(admin.accessToken, JobName.ThumbnailGeneration);
|
||||
|
||||
const assetBefore = await utils.getAssetInfo(admin.accessToken, id);
|
||||
|
||||
cpSync(`${testAssetDir}/albums/nature/notocactus_minimus.jpg`, path);
|
||||
|
||||
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
|
||||
command: JobCommand.Resume,
|
||||
force: false,
|
||||
});
|
||||
|
||||
// This runs the missing thumbnail job
|
||||
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
|
||||
command: JobCommand.Start,
|
||||
force: false,
|
||||
});
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, JobName.MetadataExtraction);
|
||||
await utils.waitForQueueFinish(admin.accessToken, JobName.ThumbnailGeneration);
|
||||
|
||||
const assetAfter = await utils.getAssetInfo(admin.accessToken, id);
|
||||
|
||||
// Asset 1 thumbnail should be untouched since its thumb should not have been reloaded, even though the file was changed
|
||||
expect(assetAfter.thumbhash).toEqual(assetBefore.thumbhash);
|
||||
|
||||
rmSync(path);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,4 +1,4 @@
|
||||
import { LibraryResponseDto, LoginResponseDto, getAllLibraries, scanLibrary } from '@immich/sdk';
|
||||
import { LibraryResponseDto, LoginResponseDto, getAllLibraries } from '@immich/sdk';
|
||||
import { cpSync, existsSync, rmSync, unlinkSync } from 'node:fs';
|
||||
import { Socket } from 'socket.io-client';
|
||||
import { userDto, uuidDto } from 'src/fixtures';
|
||||
@@ -8,8 +8,6 @@ import request from 'supertest';
|
||||
import { utimes } from 'utimes';
|
||||
import { afterAll, beforeAll, beforeEach, describe, expect, it } from 'vitest';
|
||||
|
||||
const scan = async (accessToken: string, id: string) => scanLibrary({ id }, { headers: asBearerAuth(accessToken) });
|
||||
|
||||
describe('/libraries', () => {
|
||||
let admin: LoginResponseDto;
|
||||
let user: LoginResponseDto;
|
||||
@@ -298,13 +296,35 @@ describe('/libraries', () => {
|
||||
expect(status).toBe(204);
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, {
|
||||
originalPath: `${testAssetDirInternal}/temp/directoryA/assetA.png`,
|
||||
libraryId: library.id,
|
||||
});
|
||||
expect(assets.count).toBe(1);
|
||||
});
|
||||
|
||||
it('should process metadata and thumbnails for external asset', async () => {
|
||||
const library = await utils.createLibrary(admin.accessToken, {
|
||||
ownerId: admin.userId,
|
||||
importPaths: [`${testAssetDirInternal}/temp/directoryA`],
|
||||
});
|
||||
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, {
|
||||
originalPath: `${testAssetDirInternal}/temp/directoryA/assetA.png`,
|
||||
libraryId: library.id,
|
||||
});
|
||||
expect(assets.count).toBe(1);
|
||||
const asset = assets.items[0];
|
||||
expect(asset.exifInfo).not.toBe(null);
|
||||
expect(asset.exifInfo?.dateTimeOriginal).not.toBe(null);
|
||||
expect(asset.thumbhash).not.toBe(null);
|
||||
});
|
||||
|
||||
it('should scan external library with exclusion pattern', async () => {
|
||||
const library = await utils.createLibrary(admin.accessToken, {
|
||||
ownerId: admin.userId,
|
||||
@@ -312,13 +332,7 @@ describe('/libraries', () => {
|
||||
exclusionPatterns: ['**/directoryA'],
|
||||
});
|
||||
|
||||
const { status } = await request(app)
|
||||
.post(`/libraries/${library.id}/scan`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send();
|
||||
expect(status).toBe(204);
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
@@ -332,13 +346,7 @@ describe('/libraries', () => {
|
||||
importPaths: [`${testAssetDirInternal}/temp/directoryA`, `${testAssetDirInternal}/temp/directoryB`],
|
||||
});
|
||||
|
||||
const { status } = await request(app)
|
||||
.post(`/libraries/${library.id}/scan`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send();
|
||||
expect(status).toBe(204);
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
@@ -357,13 +365,7 @@ describe('/libraries', () => {
|
||||
utils.createImageFile(`${testAssetDir}/temp/folder, a/assetA.png`);
|
||||
utils.createImageFile(`${testAssetDir}/temp/folder, b/assetB.png`);
|
||||
|
||||
const { status } = await request(app)
|
||||
.post(`/libraries/${library.id}/scan`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send();
|
||||
expect(status).toBe(204);
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
@@ -385,13 +387,7 @@ describe('/libraries', () => {
|
||||
utils.createImageFile(`${testAssetDir}/temp/folder{ a/assetA.png`);
|
||||
utils.createImageFile(`${testAssetDir}/temp/folder} b/assetB.png`);
|
||||
|
||||
const { status } = await request(app)
|
||||
.post(`/libraries/${library.id}/scan`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send();
|
||||
expect(status).toBe(204);
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
@@ -443,13 +439,7 @@ describe('/libraries', () => {
|
||||
utils.createImageFile(`${testAssetDir}/temp/folder${char}1/asset1.png`);
|
||||
utils.createImageFile(`${testAssetDir}/temp/folder${char}2/asset2.png`);
|
||||
|
||||
const { status } = await request(app)
|
||||
.post(`/libraries/${library.id}/scan`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send();
|
||||
expect(status).toBe(204);
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
@@ -473,23 +463,12 @@ describe('/libraries', () => {
|
||||
utils.createImageFile(`${testAssetDir}/temp/reimport/asset.jpg`);
|
||||
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_000);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
cpSync(`${testAssetDir}/albums/nature/tanners_ridge.jpg`, `${testAssetDir}/temp/reimport/asset.jpg`);
|
||||
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_001);
|
||||
|
||||
const { status } = await request(app)
|
||||
.post(`/libraries/${library.id}/scan`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send();
|
||||
expect(status).toBe(204);
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, {
|
||||
libraryId: library.id,
|
||||
@@ -520,21 +499,12 @@ describe('/libraries', () => {
|
||||
utils.createImageFile(`${testAssetDir}/temp/reimport/asset.jpg`);
|
||||
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_000);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
cpSync(`${testAssetDir}/albums/nature/tanners_ridge.jpg`, `${testAssetDir}/temp/reimport/asset.jpg`);
|
||||
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_000);
|
||||
|
||||
const { status } = await request(app)
|
||||
.post(`/libraries/${library.id}/scan`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send();
|
||||
expect(status).toBe(204);
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, {
|
||||
libraryId: library.id,
|
||||
@@ -556,6 +526,47 @@ describe('/libraries', () => {
|
||||
utils.removeImageFile(`${testAssetDir}/temp/reimport/asset.jpg`);
|
||||
});
|
||||
|
||||
it('should not reimport a modified file more than once', async () => {
|
||||
const library = await utils.createLibrary(admin.accessToken, {
|
||||
ownerId: admin.userId,
|
||||
importPaths: [`${testAssetDirInternal}/temp/reimport`],
|
||||
});
|
||||
|
||||
utils.createImageFile(`${testAssetDir}/temp/reimport/asset.jpg`);
|
||||
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_000);
|
||||
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
cpSync(`${testAssetDir}/albums/nature/tanners_ridge.jpg`, `${testAssetDir}/temp/reimport/asset.jpg`);
|
||||
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_001);
|
||||
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
cpSync(`${testAssetDir}/albums/nature/el_torcal_rocks.jpg`, `${testAssetDir}/temp/reimport/asset.jpg`);
|
||||
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_001);
|
||||
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, {
|
||||
libraryId: library.id,
|
||||
});
|
||||
|
||||
expect(assets.count).toEqual(1);
|
||||
|
||||
const asset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
|
||||
|
||||
expect(asset).toEqual(
|
||||
expect.objectContaining({
|
||||
originalFileName: 'asset.jpg',
|
||||
exifInfo: expect.objectContaining({
|
||||
model: 'NIKON D750',
|
||||
}),
|
||||
}),
|
||||
);
|
||||
|
||||
utils.removeImageFile(`${testAssetDir}/temp/reimport/asset.jpg`);
|
||||
});
|
||||
|
||||
it('should set an asset offline if its file is missing', async () => {
|
||||
const library = await utils.createLibrary(admin.accessToken, {
|
||||
ownerId: admin.userId,
|
||||
@@ -564,21 +575,14 @@ describe('/libraries', () => {
|
||||
|
||||
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
expect(assets.count).toBe(1);
|
||||
|
||||
utils.removeImageFile(`${testAssetDir}/temp/offline/offline.png`);
|
||||
|
||||
const { status } = await request(app)
|
||||
.post(`/libraries/${library.id}/scan`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send();
|
||||
expect(status).toBe(204);
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const trashedAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
|
||||
expect(trashedAsset.originalPath).toBe(`${testAssetDirInternal}/temp/offline/offline.png`);
|
||||
@@ -596,8 +600,7 @@ describe('/libraries', () => {
|
||||
importPaths: [`${testAssetDirInternal}/temp/offline`],
|
||||
});
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
expect(assets.count).toBe(1);
|
||||
@@ -608,13 +611,7 @@ describe('/libraries', () => {
|
||||
importPaths: [`${testAssetDirInternal}/temp/another-path/`],
|
||||
});
|
||||
|
||||
const { status } = await request(app)
|
||||
.post(`/libraries/${library.id}/scan`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send();
|
||||
expect(status).toBe(204);
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const trashedAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
|
||||
expect(trashedAsset.originalPath).toBe(`${testAssetDirInternal}/temp/offline/offline.png`);
|
||||
@@ -634,8 +631,7 @@ describe('/libraries', () => {
|
||||
importPaths: [`${testAssetDirInternal}/temp`],
|
||||
});
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, {
|
||||
libraryId: library.id,
|
||||
@@ -645,8 +641,7 @@ describe('/libraries', () => {
|
||||
|
||||
await utils.updateLibrary(admin.accessToken, library.id, { exclusionPatterns: ['**/directoryB/**'] });
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const trashedAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
|
||||
expect(trashedAsset.isTrashed).toBe(true);
|
||||
@@ -668,19 +663,12 @@ describe('/libraries', () => {
|
||||
importPaths: [`${testAssetDirInternal}/temp`],
|
||||
});
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets: assetsBefore } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
expect(assetsBefore.count).toBeGreaterThan(1);
|
||||
|
||||
const { status } = await request(app)
|
||||
.post(`/libraries/${library.id}/scan`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send();
|
||||
expect(status).toBe(204);
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
@@ -697,11 +685,7 @@ describe('/libraries', () => {
|
||||
cpSync(`${testAssetDir}/metadata/xmp/dates/2000.xmp`, `${testAssetDir}/temp/xmp/glarus.xmp`);
|
||||
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
@@ -724,10 +708,7 @@ describe('/libraries', () => {
|
||||
cpSync(`${testAssetDir}/metadata/xmp/dates/2000.xmp`, `${testAssetDir}/temp/xmp/glarus.nef.xmp`);
|
||||
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
@@ -751,10 +732,7 @@ describe('/libraries', () => {
|
||||
cpSync(`${testAssetDir}/metadata/xmp/dates/2010.xmp`, `${testAssetDir}/temp/xmp/glarus.xmp`);
|
||||
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
@@ -778,19 +756,13 @@ describe('/libraries', () => {
|
||||
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
|
||||
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
cpSync(`${testAssetDir}/metadata/xmp/dates/2010.xmp`, `${testAssetDir}/temp/xmp/glarus.nef.xmp`);
|
||||
unlinkSync(`${testAssetDir}/temp/xmp/glarus.xmp`);
|
||||
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
@@ -813,18 +785,12 @@ describe('/libraries', () => {
|
||||
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
|
||||
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
cpSync(`${testAssetDir}/metadata/xmp/dates/2000.xmp`, `${testAssetDir}/temp/xmp/glarus.xmp`);
|
||||
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
@@ -847,18 +813,12 @@ describe('/libraries', () => {
|
||||
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
|
||||
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
cpSync(`${testAssetDir}/metadata/xmp/dates/2000.xmp`, `${testAssetDir}/temp/xmp/glarus.nef.xmp`);
|
||||
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
@@ -882,19 +842,13 @@ describe('/libraries', () => {
|
||||
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
|
||||
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
cpSync(`${testAssetDir}/metadata/xmp/dates/2010.xmp`, `${testAssetDir}/temp/xmp/glarus.xmp`);
|
||||
unlinkSync(`${testAssetDir}/temp/xmp/glarus.nef.xmp`);
|
||||
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
@@ -918,18 +872,12 @@ describe('/libraries', () => {
|
||||
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
|
||||
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
unlinkSync(`${testAssetDir}/temp/xmp/glarus.nef.xmp`);
|
||||
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
@@ -953,18 +901,12 @@ describe('/libraries', () => {
|
||||
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
|
||||
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
unlinkSync(`${testAssetDir}/temp/xmp/glarus.xmp`);
|
||||
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
@@ -987,22 +929,13 @@ describe('/libraries', () => {
|
||||
importPaths: [`${testAssetDirInternal}/temp/offline`],
|
||||
});
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
utils.renameImageFile(`${testAssetDir}/temp/offline/offline.png`, `${testAssetDir}/temp/offline.png`);
|
||||
|
||||
{
|
||||
const { status } = await request(app)
|
||||
.post(`/libraries/${library.id}/scan`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send();
|
||||
expect(status).toBe(204);
|
||||
}
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const offlineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
|
||||
expect(offlineAsset.isTrashed).toBe(true);
|
||||
@@ -1016,15 +949,7 @@ describe('/libraries', () => {
|
||||
|
||||
utils.renameImageFile(`${testAssetDir}/temp/offline.png`, `${testAssetDir}/temp/offline/offline.png`);
|
||||
|
||||
{
|
||||
const { status } = await request(app)
|
||||
.post(`/libraries/${library.id}/scan`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send();
|
||||
expect(status).toBe(204);
|
||||
}
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const backOnlineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
|
||||
|
||||
@@ -1046,22 +971,13 @@ describe('/libraries', () => {
|
||||
importPaths: [`${testAssetDirInternal}/temp/offline`],
|
||||
});
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
utils.renameImageFile(`${testAssetDir}/temp/offline/offline.png`, `${testAssetDir}/temp/offline.png`);
|
||||
|
||||
{
|
||||
const { status } = await request(app)
|
||||
.post(`/libraries/${library.id}/scan`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send();
|
||||
expect(status).toBe(204);
|
||||
}
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
{
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id, withDeleted: true });
|
||||
@@ -1082,15 +998,7 @@ describe('/libraries', () => {
|
||||
importPaths: [`${testAssetDirInternal}/temp/another-path`],
|
||||
});
|
||||
|
||||
{
|
||||
const { status } = await request(app)
|
||||
.post(`/libraries/${library.id}/scan`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send();
|
||||
expect(status).toBe(204);
|
||||
}
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const stillOfflineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
|
||||
|
||||
@@ -1114,22 +1022,13 @@ describe('/libraries', () => {
|
||||
importPaths: [`${testAssetDirInternal}/temp/offline`],
|
||||
});
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
|
||||
utils.renameImageFile(`${testAssetDir}/temp/offline/offline.png`, `${testAssetDir}/temp/offline.png`);
|
||||
|
||||
{
|
||||
const { status } = await request(app)
|
||||
.post(`/libraries/${library.id}/scan`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send();
|
||||
expect(status).toBe(204);
|
||||
}
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
{
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id, withDeleted: true });
|
||||
@@ -1146,15 +1045,7 @@ describe('/libraries', () => {
|
||||
|
||||
await utils.updateLibrary(admin.accessToken, library.id, { exclusionPatterns: ['**/offline/**'] });
|
||||
|
||||
{
|
||||
const { status } = await request(app)
|
||||
.post(`/libraries/${library.id}/scan`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send();
|
||||
expect(status).toBe(204);
|
||||
}
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const stillOfflineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
|
||||
|
||||
@@ -1274,8 +1165,7 @@ describe('/libraries', () => {
|
||||
importPaths: [`${testAssetDirInternal}/temp`],
|
||||
});
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { status, body } = await request(app)
|
||||
.delete(`/libraries/${library.id}`)
|
||||
|
||||
@@ -93,8 +93,6 @@ describe('/memories', () => {
|
||||
data: { year: 2021 },
|
||||
createdAt: expect.any(String),
|
||||
updatedAt: expect.any(String),
|
||||
deletedAt: null,
|
||||
seenAt: null,
|
||||
isSaved: false,
|
||||
memoryAt: expect.any(String),
|
||||
ownerId: user.userId,
|
||||
|
||||
@@ -13,8 +13,8 @@ import request from 'supertest';
|
||||
import { beforeAll, describe, expect, it } from 'vitest';
|
||||
|
||||
const authServer = {
|
||||
internal: 'http://auth-server:3000',
|
||||
external: 'http://127.0.0.1:3000',
|
||||
internal: 'http://auth-server:2286',
|
||||
external: 'http://127.0.0.1:2286',
|
||||
};
|
||||
|
||||
const mobileOverrideRedirectUri = 'https://photos.immich.app/oauth/mobile-redirect';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { LoginResponseDto, PersonResponseDto } from '@immich/sdk';
|
||||
import { getPerson, LoginResponseDto, PersonResponseDto } from '@immich/sdk';
|
||||
import { uuidDto } from 'src/fixtures';
|
||||
import { errorDto } from 'src/responses';
|
||||
import { app, utils } from 'src/utils';
|
||||
import { app, asBearerAuth, utils } from 'src/utils';
|
||||
import request from 'supertest';
|
||||
import { beforeAll, beforeEach, describe, expect, it } from 'vitest';
|
||||
|
||||
@@ -195,12 +195,29 @@ describe('/people', () => {
|
||||
.send({
|
||||
name: 'New Person',
|
||||
birthDate: '1990-01-01',
|
||||
color: '#333',
|
||||
});
|
||||
expect(status).toBe(201);
|
||||
expect(body).toMatchObject({
|
||||
id: expect.any(String),
|
||||
name: 'New Person',
|
||||
birthDate: '1990-01-01',
|
||||
birthDate: '1990-01-01T00:00:00.000Z',
|
||||
});
|
||||
});
|
||||
|
||||
it('should create a favorite person', async () => {
|
||||
const { status, body } = await request(app)
|
||||
.post(`/people`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send({
|
||||
name: 'New Favorite Person',
|
||||
isFavorite: true,
|
||||
});
|
||||
expect(status).toBe(201);
|
||||
expect(body).toMatchObject({
|
||||
id: expect.any(String),
|
||||
name: 'New Favorite Person',
|
||||
isFavorite: true,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -216,6 +233,7 @@ describe('/people', () => {
|
||||
{ key: 'name', type: 'string' },
|
||||
{ key: 'featureFaceAssetId', type: 'string' },
|
||||
{ key: 'isHidden', type: 'boolean value' },
|
||||
{ key: 'isFavorite', type: 'boolean value' },
|
||||
]) {
|
||||
it(`should not allow null ${key}`, async () => {
|
||||
const { status, body } = await request(app)
|
||||
@@ -244,7 +262,7 @@ describe('/people', () => {
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send({ birthDate: '1990-01-01' });
|
||||
expect(status).toBe(200);
|
||||
expect(body).toMatchObject({ birthDate: '1990-01-01' });
|
||||
expect(body).toMatchObject({ birthDate: '1990-01-01T00:00:00.000Z' });
|
||||
});
|
||||
|
||||
it('should clear a date of birth', async () => {
|
||||
@@ -255,6 +273,42 @@ describe('/people', () => {
|
||||
expect(status).toBe(200);
|
||||
expect(body).toMatchObject({ birthDate: null });
|
||||
});
|
||||
|
||||
it('should set a color', async () => {
|
||||
const { status, body } = await request(app)
|
||||
.put(`/people/${visiblePerson.id}`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send({ color: '#555' });
|
||||
expect(status).toBe(200);
|
||||
expect(body).toMatchObject({ color: '#555' });
|
||||
});
|
||||
|
||||
it('should clear a color', async () => {
|
||||
const { status, body } = await request(app)
|
||||
.put(`/people/${visiblePerson.id}`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send({ color: null });
|
||||
expect(status).toBe(200);
|
||||
expect(body.color).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should mark a person as favorite', async () => {
|
||||
const person = await utils.createPerson(admin.accessToken, {
|
||||
name: 'visible_person',
|
||||
});
|
||||
|
||||
expect(person.isFavorite).toBe(false);
|
||||
|
||||
const { status, body } = await request(app)
|
||||
.put(`/people/${person.id}`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`)
|
||||
.send({ isFavorite: true });
|
||||
expect(status).toBe(200);
|
||||
expect(body).toMatchObject({ isFavorite: true });
|
||||
|
||||
const person2 = await getPerson({ id: person.id }, { headers: asBearerAuth(admin.accessToken) });
|
||||
expect(person2).toMatchObject({ id: person.id, isFavorite: true });
|
||||
});
|
||||
});
|
||||
|
||||
describe('POST /people/:id/merge', () => {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import { AssetMediaResponseDto, LoginResponseDto, deleteAssets, updateAsset } from '@immich/sdk';
|
||||
import { AssetMediaResponseDto, AssetResponseDto, deleteAssets, LoginResponseDto, updateAsset } from '@immich/sdk';
|
||||
import { DateTime } from 'luxon';
|
||||
import { readFile } from 'node:fs/promises';
|
||||
import { join } from 'node:path';
|
||||
import { Socket } from 'socket.io-client';
|
||||
import { errorDto } from 'src/responses';
|
||||
import { app, asBearerAuth, testAssetDir, utils } from 'src/utils';
|
||||
import { app, asBearerAuth, TEN_TIMES, testAssetDir, utils } from 'src/utils';
|
||||
import request from 'supertest';
|
||||
import { afterAll, beforeAll, describe, expect, it } from 'vitest';
|
||||
const today = DateTime.now();
|
||||
@@ -462,6 +462,55 @@ describe('/search', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('POST /search/random', () => {
|
||||
beforeAll(async () => {
|
||||
await Promise.all([
|
||||
utils.createAsset(admin.accessToken),
|
||||
utils.createAsset(admin.accessToken),
|
||||
utils.createAsset(admin.accessToken),
|
||||
utils.createAsset(admin.accessToken),
|
||||
utils.createAsset(admin.accessToken),
|
||||
utils.createAsset(admin.accessToken),
|
||||
]);
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'thumbnailGeneration');
|
||||
});
|
||||
|
||||
it('should require authentication', async () => {
|
||||
const { status, body } = await request(app).post('/search/random').send({ size: 1 });
|
||||
|
||||
expect(status).toBe(401);
|
||||
expect(body).toEqual(errorDto.unauthorized);
|
||||
});
|
||||
|
||||
it.each(TEN_TIMES)('should return 1 random assets', async () => {
|
||||
const { status, body } = await request(app)
|
||||
.post('/search/random')
|
||||
.send({ size: 1 })
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`);
|
||||
|
||||
expect(status).toBe(200);
|
||||
|
||||
const assets: AssetResponseDto[] = body;
|
||||
expect(assets.length).toBe(1);
|
||||
expect(assets[0].ownerId).toBe(admin.userId);
|
||||
});
|
||||
|
||||
it.each(TEN_TIMES)('should return 2 random assets', async () => {
|
||||
const { status, body } = await request(app)
|
||||
.post('/search/random')
|
||||
.send({ size: 2 })
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`);
|
||||
|
||||
expect(status).toBe(200);
|
||||
|
||||
const assets: AssetResponseDto[] = body;
|
||||
expect(assets.length).toBe(2);
|
||||
expect(assets[0].ownerId).toBe(admin.userId);
|
||||
expect(assets[1].ownerId).toBe(admin.userId);
|
||||
});
|
||||
});
|
||||
|
||||
describe('GET /search/explore', () => {
|
||||
it('should require authentication', async () => {
|
||||
const { status, body } = await request(app).get('/search/explore');
|
||||
|
||||
@@ -89,13 +89,13 @@ describe('/shared-links', () => {
|
||||
await deleteUserAdmin({ id: user2.userId, userAdminDeleteDto: {} }, { headers: asBearerAuth(admin.accessToken) });
|
||||
});
|
||||
|
||||
describe('GET /share/${key}', () => {
|
||||
describe('GET /share/:key', () => {
|
||||
it('should have correct asset count in meta tag for non-empty album', async () => {
|
||||
const resp = await request(shareUrl).get(`/${linkWithMetadata.key}`);
|
||||
expect(resp.status).toBe(200);
|
||||
expect(resp.header['content-type']).toContain('text/html');
|
||||
expect(resp.text).toContain(
|
||||
`<meta name="description" content="${metadataAlbum.assets.length} shared photos & videos" />`,
|
||||
`<meta name="description" content="${metadataAlbum.assets.length} shared photos & videos" />`,
|
||||
);
|
||||
});
|
||||
|
||||
@@ -103,14 +103,14 @@ describe('/shared-links', () => {
|
||||
const resp = await request(shareUrl).get(`/${linkWithAlbum.key}`);
|
||||
expect(resp.status).toBe(200);
|
||||
expect(resp.header['content-type']).toContain('text/html');
|
||||
expect(resp.text).toContain(`<meta name="description" content="0 shared photos & videos" />`);
|
||||
expect(resp.text).toContain(`<meta name="description" content="0 shared photos & videos" />`);
|
||||
});
|
||||
|
||||
it('should have correct asset count in meta tag for shared asset', async () => {
|
||||
const resp = await request(shareUrl).get(`/${linkWithAssets.key}`);
|
||||
expect(resp.status).toBe(200);
|
||||
expect(resp.header['content-type']).toContain('text/html');
|
||||
expect(resp.text).toContain(`<meta name="description" content="1 shared photos & videos" />`);
|
||||
expect(resp.text).toContain(`<meta name="description" content="1 shared photos & videos" />`);
|
||||
});
|
||||
|
||||
it('should have fqdn og:image meta tag for shared asset', async () => {
|
||||
@@ -139,7 +139,10 @@ describe('/shared-links', () => {
|
||||
expect(body).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({ id: linkWithAlbum.id }),
|
||||
expect.objectContaining({ id: linkWithAssets.id }),
|
||||
expect.objectContaining({
|
||||
id: linkWithAssets.id,
|
||||
assets: expect.arrayContaining([expect.objectContaining({ id: asset1.id })]),
|
||||
}),
|
||||
expect.objectContaining({ id: linkWithPassword.id }),
|
||||
expect.objectContaining({ id: linkWithMetadata.id }),
|
||||
expect.objectContaining({ id: linkWithoutMetadata.id }),
|
||||
@@ -147,6 +150,30 @@ describe('/shared-links', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should filter on albumId', async () => {
|
||||
const { status, body } = await request(app)
|
||||
.get(`/shared-links?albumId=${album.id}`)
|
||||
.set('Authorization', `Bearer ${user1.accessToken}`);
|
||||
|
||||
expect(status).toBe(200);
|
||||
expect(body).toHaveLength(2);
|
||||
expect(body).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({ id: linkWithAlbum.id }),
|
||||
expect.objectContaining({ id: linkWithPassword.id }),
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
it('should find 0 albums', async () => {
|
||||
const { status, body } = await request(app)
|
||||
.get(`/shared-links?albumId=${uuidDto.notFound}`)
|
||||
.set('Authorization', `Bearer ${user1.accessToken}`);
|
||||
|
||||
expect(status).toBe(200);
|
||||
expect(body).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should not get shared links created by other users', async () => {
|
||||
const { status, body } = await request(app)
|
||||
.get('/shared-links')
|
||||
@@ -170,7 +197,7 @@ describe('/shared-links', () => {
|
||||
expect(status).toBe(200);
|
||||
expect(body).toEqual(
|
||||
expect.objectContaining({
|
||||
album,
|
||||
album: expect.objectContaining({ id: album.id }),
|
||||
userId: user1.userId,
|
||||
type: SharedLinkType.Album,
|
||||
}),
|
||||
@@ -208,7 +235,7 @@ describe('/shared-links', () => {
|
||||
expect(status).toBe(200);
|
||||
expect(body).toEqual(
|
||||
expect.objectContaining({
|
||||
album,
|
||||
album: expect.objectContaining({ id: album.id }),
|
||||
userId: user1.userId,
|
||||
type: SharedLinkType.Album,
|
||||
}),
|
||||
@@ -262,7 +289,7 @@ describe('/shared-links', () => {
|
||||
expect(status).toBe(200);
|
||||
expect(body).toEqual(
|
||||
expect.objectContaining({
|
||||
album,
|
||||
album: expect.objectContaining({ id: album.id }),
|
||||
userId: user1.userId,
|
||||
type: SharedLinkType.Album,
|
||||
}),
|
||||
|
||||
@@ -119,93 +119,84 @@ describe('/stacks', () => {
|
||||
const stacksAfter = await searchStacks({}, { headers: asBearerAuth(user1.accessToken) });
|
||||
expect(stacksAfter.length).toBe(stacksBefore.length);
|
||||
});
|
||||
|
||||
// it('should require a valid parent id', async () => {
|
||||
// const { status, body } = await request(app)
|
||||
// .put('/assets')
|
||||
// .set('Authorization', `Bearer ${user1.accessToken}`)
|
||||
// .send({ stackParentId: uuidDto.invalid, ids: [stackAssets[0].id] });
|
||||
|
||||
// expect(status).toBe(400);
|
||||
// expect(body).toEqual(errorDto.badRequest(['stackParentId must be a UUID']));
|
||||
// });
|
||||
});
|
||||
|
||||
// it('should require access to the parent', async () => {
|
||||
// const { status, body } = await request(app)
|
||||
// .put('/assets')
|
||||
// .set('Authorization', `Bearer ${user1.accessToken}`)
|
||||
// .send({ stackParentId: stackAssets[3].id, ids: [user1Assets[0].id] });
|
||||
describe('GET /assets/:id', () => {
|
||||
it('should include stack details for the primary asset', async () => {
|
||||
const [asset1, asset2] = await Promise.all([
|
||||
utils.createAsset(user1.accessToken),
|
||||
utils.createAsset(user1.accessToken),
|
||||
]);
|
||||
|
||||
// expect(status).toBe(400);
|
||||
// expect(body).toEqual(errorDto.noPermission);
|
||||
// });
|
||||
await utils.createStack(user1.accessToken, [asset1.id, asset2.id]);
|
||||
|
||||
// it('should add stack children', async () => {
|
||||
// const { status } = await request(app)
|
||||
// .put('/assets')
|
||||
// .set('Authorization', `Bearer ${stackUser.accessToken}`)
|
||||
// .send({ stackParentId: stackAssets[0].id, ids: [stackAssets[3].id] });
|
||||
const { status, body } = await request(app)
|
||||
.get(`/assets/${asset1.id}`)
|
||||
.set('Authorization', `Bearer ${user1.accessToken}`);
|
||||
|
||||
// expect(status).toBe(204);
|
||||
expect(status).toBe(200);
|
||||
expect(body).toEqual(
|
||||
expect.objectContaining({
|
||||
id: asset1.id,
|
||||
stack: {
|
||||
id: expect.any(String),
|
||||
assetCount: 2,
|
||||
primaryAssetId: asset1.id,
|
||||
},
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
// const asset = await getAssetInfo({ id: stackAssets[0].id }, { headers: asBearerAuth(stackUser.accessToken) });
|
||||
// expect(asset.stack).not.toBeUndefined();
|
||||
// expect(asset.stack).toEqual(expect.arrayContaining([expect.objectContaining({ id: stackAssets[3].id })]));
|
||||
// });
|
||||
it('should include stack details for a non-primary asset', async () => {
|
||||
const [asset1, asset2] = await Promise.all([
|
||||
utils.createAsset(user1.accessToken),
|
||||
utils.createAsset(user1.accessToken),
|
||||
]);
|
||||
|
||||
// it('should remove stack children', async () => {
|
||||
// const { status } = await request(app)
|
||||
// .put('/assets')
|
||||
// .set('Authorization', `Bearer ${stackUser.accessToken}`)
|
||||
// .send({ removeParent: true, ids: [stackAssets[1].id] });
|
||||
await utils.createStack(user1.accessToken, [asset1.id, asset2.id]);
|
||||
|
||||
// expect(status).toBe(204);
|
||||
const { status, body } = await request(app)
|
||||
.get(`/assets/${asset2.id}`)
|
||||
.set('Authorization', `Bearer ${user1.accessToken}`);
|
||||
|
||||
// const asset = await getAssetInfo({ id: stackAssets[0].id }, { headers: asBearerAuth(stackUser.accessToken) });
|
||||
// expect(asset.stack).not.toBeUndefined();
|
||||
// expect(asset.stack).toEqual(
|
||||
// expect.arrayContaining([
|
||||
// expect.objectContaining({ id: stackAssets[2].id }),
|
||||
// expect.objectContaining({ id: stackAssets[3].id }),
|
||||
// ]),
|
||||
// );
|
||||
// });
|
||||
expect(status).toBe(200);
|
||||
expect(body).toEqual(
|
||||
expect.objectContaining({
|
||||
id: asset2.id,
|
||||
stack: {
|
||||
id: expect.any(String),
|
||||
assetCount: 2,
|
||||
primaryAssetId: asset1.id,
|
||||
},
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// it('should remove all stack children', async () => {
|
||||
// const { status } = await request(app)
|
||||
// .put('/assets')
|
||||
// .set('Authorization', `Bearer ${stackUser.accessToken}`)
|
||||
// .send({ removeParent: true, ids: [stackAssets[2].id, stackAssets[3].id] });
|
||||
describe('GET /stacks/:id', () => {
|
||||
it('should include exifInfo in stack assets', async () => {
|
||||
const [asset1, asset2] = await Promise.all([
|
||||
utils.createAsset(user1.accessToken),
|
||||
utils.createAsset(user1.accessToken),
|
||||
]);
|
||||
|
||||
// expect(status).toBe(204);
|
||||
const stack = await utils.createStack(user1.accessToken, [asset1.id, asset2.id]);
|
||||
|
||||
// const asset = await getAssetInfo({ id: stackAssets[0].id }, { headers: asBearerAuth(stackUser.accessToken) });
|
||||
// expect(asset.stack).toBeUndefined();
|
||||
// });
|
||||
const { status, body } = await request(app)
|
||||
.get(`/stacks/${stack.id}`)
|
||||
.set('Authorization', `Bearer ${user1.accessToken}`);
|
||||
|
||||
// it('should merge stack children', async () => {
|
||||
// // create stack after previous test removed stack children
|
||||
// await updateAssets(
|
||||
// { assetBulkUpdateDto: { stackParentId: stackAssets[0].id, ids: [stackAssets[1].id, stackAssets[2].id] } },
|
||||
// { headers: asBearerAuth(stackUser.accessToken) },
|
||||
// );
|
||||
|
||||
// const { status } = await request(app)
|
||||
// .put('/assets')
|
||||
// .set('Authorization', `Bearer ${stackUser.accessToken}`)
|
||||
// .send({ stackParentId: stackAssets[3].id, ids: [stackAssets[0].id] });
|
||||
|
||||
// expect(status).toBe(204);
|
||||
|
||||
// const asset = await getAssetInfo({ id: stackAssets[3].id }, { headers: asBearerAuth(stackUser.accessToken) });
|
||||
// expect(asset.stack).not.toBeUndefined();
|
||||
// expect(asset.stack).toEqual(
|
||||
// expect.arrayContaining([
|
||||
// expect.objectContaining({ id: stackAssets[0].id }),
|
||||
// expect.objectContaining({ id: stackAssets[1].id }),
|
||||
// expect.objectContaining({ id: stackAssets[2].id }),
|
||||
// ]),
|
||||
// );
|
||||
// });
|
||||
expect(status).toBe(200);
|
||||
expect(body).toEqual(
|
||||
expect.objectContaining({
|
||||
id: stack.id,
|
||||
primaryAssetId: asset1.id,
|
||||
assets: expect.arrayContaining([
|
||||
expect.objectContaining({ id: asset1.id, exifInfo: expect.any(Object) }),
|
||||
expect.objectContaining({ id: asset2.id, exifInfo: expect.any(Object) }),
|
||||
]),
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -151,7 +151,7 @@ describe('/timeline', () => {
|
||||
it('should require authentication', async () => {
|
||||
const { status, body } = await request(app).get('/timeline/bucket').query({
|
||||
size: TimeBucketSize.Month,
|
||||
timeBucket: '1900-01-01T00:00:00.000Z',
|
||||
timeBucket: '1900-01-01',
|
||||
});
|
||||
|
||||
expect(status).toBe(401);
|
||||
@@ -161,7 +161,7 @@ describe('/timeline', () => {
|
||||
it('should handle 5 digit years', async () => {
|
||||
const { status, body } = await request(app)
|
||||
.get('/timeline/bucket')
|
||||
.query({ size: TimeBucketSize.Month, timeBucket: '+012345-01-01T00:00:00.000Z' })
|
||||
.query({ size: TimeBucketSize.Month, timeBucket: '012345-01-01' })
|
||||
.set('Authorization', `Bearer ${timeBucketUser.accessToken}`);
|
||||
|
||||
expect(status).toBe(200);
|
||||
@@ -183,7 +183,7 @@ describe('/timeline', () => {
|
||||
const { status, body } = await request(app)
|
||||
.get('/timeline/bucket')
|
||||
.set('Authorization', `Bearer ${timeBucketUser.accessToken}`)
|
||||
.query({ size: TimeBucketSize.Month, timeBucket: '1970-02-10T00:00:00.000Z' });
|
||||
.query({ size: TimeBucketSize.Month, timeBucket: '1970-02-10' });
|
||||
|
||||
expect(status).toBe(200);
|
||||
expect(body).toEqual([]);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { LoginResponseDto, getAssetInfo, getAssetStatistics, scanLibrary } from '@immich/sdk';
|
||||
import { LoginResponseDto, getAssetInfo, getAssetStatistics } from '@immich/sdk';
|
||||
import { existsSync } from 'node:fs';
|
||||
import { Socket } from 'socket.io-client';
|
||||
import { errorDto } from 'src/responses';
|
||||
@@ -6,8 +6,6 @@ import { app, asBearerAuth, testAssetDir, testAssetDirInternal, utils } from 'sr
|
||||
import request from 'supertest';
|
||||
import { afterAll, beforeAll, describe, expect, it } from 'vitest';
|
||||
|
||||
const scan = async (accessToken: string, id: string) => scanLibrary({ id }, { headers: asBearerAuth(accessToken) });
|
||||
|
||||
describe('/trash', () => {
|
||||
let admin: LoginResponseDto;
|
||||
let ws: Socket;
|
||||
@@ -81,8 +79,7 @@ describe('/trash', () => {
|
||||
|
||||
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
expect(assets.items.length).toBe(1);
|
||||
@@ -90,8 +87,7 @@ describe('/trash', () => {
|
||||
|
||||
await utils.updateLibrary(admin.accessToken, library.id, { exclusionPatterns: ['**/offline/**'] });
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const assetBefore = await utils.getAssetInfo(admin.accessToken, asset.id);
|
||||
expect(assetBefore).toMatchObject({ isTrashed: true, isOffline: true });
|
||||
@@ -116,8 +112,7 @@ describe('/trash', () => {
|
||||
|
||||
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
expect(assets.items.length).toBe(1);
|
||||
@@ -125,8 +120,7 @@ describe('/trash', () => {
|
||||
|
||||
await utils.updateLibrary(admin.accessToken, library.id, { exclusionPatterns: ['**/offline/**'] });
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const assetBefore = await utils.getAssetInfo(admin.accessToken, asset.id);
|
||||
expect(assetBefore).toMatchObject({ isTrashed: true, isOffline: true });
|
||||
@@ -180,8 +174,7 @@ describe('/trash', () => {
|
||||
|
||||
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
expect(assets.count).toBe(1);
|
||||
@@ -189,9 +182,7 @@ describe('/trash', () => {
|
||||
|
||||
await utils.updateLibrary(admin.accessToken, library.id, { exclusionPatterns: ['**/offline/**'] });
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
|
||||
const before = await getAssetInfo({ id: assetId }, { headers: asBearerAuth(admin.accessToken) });
|
||||
expect(before).toStrictEqual(expect.objectContaining({ id: assetId, isOffline: true }));
|
||||
@@ -201,6 +192,8 @@ describe('/trash', () => {
|
||||
|
||||
const after = await getAssetInfo({ id: assetId }, { headers: asBearerAuth(admin.accessToken) });
|
||||
expect(after).toStrictEqual(expect.objectContaining({ id: assetId, isOffline: true }));
|
||||
|
||||
utils.removeImageFile(`${testAssetDir}/temp/offline/offline.png`);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -238,7 +231,7 @@ describe('/trash', () => {
|
||||
|
||||
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
|
||||
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
|
||||
@@ -247,7 +240,7 @@ describe('/trash', () => {
|
||||
|
||||
await utils.updateLibrary(admin.accessToken, library.id, { exclusionPatterns: ['**/offline/**'] });
|
||||
|
||||
await scan(admin.accessToken, library.id);
|
||||
await utils.scan(admin.accessToken, library.id);
|
||||
await utils.waitForQueueFinish(admin.accessToken, 'library');
|
||||
|
||||
const before = await utils.getAssetInfo(admin.accessToken, assetId);
|
||||
@@ -261,6 +254,8 @@ describe('/trash', () => {
|
||||
|
||||
const after = await utils.getAssetInfo(admin.accessToken, assetId);
|
||||
expect(after.isTrashed).toBe(true);
|
||||
|
||||
utils.removeImageFile(`${testAssetDir}/temp/offline/offline.png`);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -356,5 +356,24 @@ describe('/admin/users', () => {
|
||||
expect(status).toBe(403);
|
||||
expect(body).toEqual(errorDto.forbidden);
|
||||
});
|
||||
|
||||
it('should restore a user', async () => {
|
||||
const user = await utils.userSetup(admin.accessToken, createUserDto.create('restore'));
|
||||
|
||||
await deleteUserAdmin({ id: user.userId, userAdminDeleteDto: {} }, { headers: asBearerAuth(admin.accessToken) });
|
||||
|
||||
const { status, body } = await request(app)
|
||||
.post(`/admin/users/${user.userId}/restore`)
|
||||
.set('Authorization', `Bearer ${admin.accessToken}`);
|
||||
expect(status).toBe(200);
|
||||
expect(body).toEqual(
|
||||
expect.objectContaining({
|
||||
id: user.userId,
|
||||
email: user.userEmail,
|
||||
status: 'active',
|
||||
deletedAt: null,
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -129,6 +129,8 @@ describe('/users', () => {
|
||||
expect(body).toEqual({
|
||||
...before,
|
||||
updatedAt: expect.any(String),
|
||||
profileChangedAt: expect.any(String),
|
||||
createdAt: expect.any(String),
|
||||
name: 'Name',
|
||||
});
|
||||
});
|
||||
@@ -177,6 +179,8 @@ describe('/users', () => {
|
||||
...before,
|
||||
email: 'non-admin@immich.cloud',
|
||||
updatedAt: expect.anything(),
|
||||
createdAt: expect.anything(),
|
||||
profileChangedAt: expect.anything(),
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user