feat(ml): ML on Rockchip NPUs (#15241)

This commit is contained in:
Yoni Yang
2025-03-18 00:04:08 +08:00
committed by GitHub
parent 1e184a70f1
commit 14c3b99c0f
43 changed files with 2417 additions and 4726 deletions

View File

@@ -0,0 +1 @@
3.12

View File

@@ -1,20 +0,0 @@
FROM mambaorg/micromamba:bookworm-slim@sha256:e3797091302382ea841498bc93a7b0a50f7c1448333d5e946d2d1608d0c5f43d AS builder
ENV TRANSFORMERS_CACHE=/cache \
PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
PATH="/opt/venv/bin:$PATH" \
PYTHONPATH=/usr/src
COPY --chown=$MAMBA_USER:$MAMBA_USER conda-lock.yml /tmp/conda-lock.yml
RUN micromamba install -y -n base -f /tmp/conda-lock.yml && \
micromamba remove -y -n base cxx-compiler && \
micromamba clean --all --yes
WORKDIR /usr/src/app
COPY --chown=$MAMBA_USER:$MAMBA_USER start.sh .
COPY --chown=$MAMBA_USER:$MAMBA_USER app .
ENTRYPOINT ["/usr/local/bin/_entrypoint.sh"]
CMD ["./start.sh"]

File diff suppressed because it is too large Load Diff

View File

@@ -1,15 +0,0 @@
name: base
channels:
- conda-forge
platforms:
- linux-64
- linux-aarch64
dependencies:
- black
- conda-lock
- mypy
- pytest
- pytest-cov
- pytest-mock
- ruff
category: dev

View File

@@ -1,25 +0,0 @@
name: base
channels:
- conda-forge
- nvidia
- pytorch
platforms:
- linux-64
dependencies:
- cxx-compiler
- onnx==1.*
- onnxruntime==1.*
- open-clip-torch==2.*
- orjson==3.*
- pip
- python==3.11.*
- pytorch>=2.3
- rich==13.*
- safetensors==0.*
- setuptools==68.*
- torchvision
- transformers==4.*
- pip:
- multilingual-clip
- onnxsim
category: main

View File

@@ -0,0 +1,98 @@
from pathlib import Path
import typer
from tenacity import retry, stop_after_attempt, wait_fixed
from typing_extensions import Annotated
from .exporters.constants import DELETE_PATTERNS, SOURCE_TO_METADATA, ModelSource
from .exporters.onnx import export as onnx_export
from .exporters.rknn import export as rknn_export
app = typer.Typer(pretty_exceptions_show_locals=False)
def generate_readme(model_name: str, model_source: ModelSource) -> str:
(name, link, type) = SOURCE_TO_METADATA[model_source]
match model_source:
case ModelSource.MCLIP:
tags = ["immich", "clip", "multilingual"]
case ModelSource.OPENCLIP:
tags = ["immich", "clip"]
lowered = model_name.lower()
if "xlm" in lowered or "nllb" in lowered:
tags.append("multilingual")
case ModelSource.INSIGHTFACE:
tags = ["immich", "facial-recognition"]
case _:
raise ValueError(f"Unsupported model source {model_source}")
return f"""---
tags:
{" - " + "\n - ".join(tags)}
---
# Model Description
This repo contains ONNX exports for the associated {type} model by {name}. See the [{name}]({link}) repo for more info.
This repo is specifically intended for use with [Immich](https://immich.app/), a self-hosted photo library.
"""
@app.command()
def main(
model_name: str,
model_source: ModelSource,
output_dir: Path = Path("./models"),
no_cache: bool = False,
hf_organization: str = "immich-app",
hf_auth_token: Annotated[str | None, typer.Option(envvar="HF_AUTH_TOKEN")] = None,
) -> None:
hf_model_name = model_name.split("/")[-1]
hf_model_name = hf_model_name.replace("xlm-roberta-large", "XLM-Roberta-Large")
hf_model_name = hf_model_name.replace("xlm-roberta-base", "XLM-Roberta-Base")
output_dir = output_dir / hf_model_name
match model_source:
case ModelSource.MCLIP | ModelSource.OPENCLIP:
output_dir.mkdir(parents=True, exist_ok=True)
onnx_export(model_name, model_source, output_dir, no_cache=no_cache)
case ModelSource.INSIGHTFACE:
from huggingface_hub import snapshot_download
# TODO: start from insightface dump instead of downloading from HF
snapshot_download(f"immich-app/{hf_model_name}", local_dir=output_dir)
case _:
raise ValueError(f"Unsupported model source {model_source}")
try:
rknn_export(output_dir, no_cache=no_cache)
except Exception as e:
print(f"Failed to export model {model_name} to rknn: {e}")
(output_dir / "rknpu").unlink(missing_ok=True)
readme_path = output_dir / "README.md"
if no_cache or not readme_path.exists():
with open(readme_path, "w") as f:
f.write(generate_readme(model_name, model_source))
if hf_auth_token is not None:
from huggingface_hub import create_repo, upload_folder
repo_id = f"{hf_organization}/{hf_model_name}"
@retry(stop=stop_after_attempt(5), wait=wait_fixed(5))
def upload_model() -> None:
create_repo(repo_id, exist_ok=True, token=hf_auth_token)
upload_folder(
repo_id=repo_id,
folder_path=output_dir,
# remote repo files to be deleted before uploading
# deletion is in the same commit as the upload, so it's atomic
delete_patterns=DELETE_PATTERNS,
token=hf_auth_token,
)
upload_model()
if __name__ == "__main__":
typer.run(main)

View File

@@ -0,0 +1,42 @@
from enum import StrEnum
from typing import NamedTuple
class ModelSource(StrEnum):
INSIGHTFACE = "insightface"
MCLIP = "mclip"
OPENCLIP = "openclip"
class SourceMetadata(NamedTuple):
name: str
link: str
type: str
SOURCE_TO_METADATA = {
ModelSource.MCLIP: SourceMetadata("M-CLIP", "https://huggingface.co/M-CLIP", "CLIP"),
ModelSource.OPENCLIP: SourceMetadata("OpenCLIP", "https://github.com/mlfoundations/open_clip", "CLIP"),
ModelSource.INSIGHTFACE: SourceMetadata(
"InsightFace", "https://github.com/deepinsight/insightface/tree/master", "facial recognition"
),
}
RKNN_SOCS = ["rk3566", "rk3568", "rk3576", "rk3588"]
# glob to delete old UUID blobs when reuploading models
_uuid_char = "[a-fA-F0-9]"
_uuid_glob = _uuid_char * 8 + "-" + _uuid_char * 4 + "-" + _uuid_char * 4 + "-" + _uuid_char * 4 + "-" + _uuid_char * 12
DELETE_PATTERNS = [
"**/*onnx*",
"**/Constant*",
"**/*.weight",
"**/*.bias",
"**/*.proj",
"**/*in_proj_bias",
"**/*.npy",
"**/*.latent",
"**/*.pos_embed",
f"**/{_uuid_glob}",
]

View File

@@ -0,0 +1,20 @@
from pathlib import Path
from ..constants import ModelSource
from .models import mclip, openclip
def export(
model_name: str, model_source: ModelSource, output_dir: Path, opset_version: int = 19, no_cache: bool = False
) -> None:
visual_dir = output_dir / "visual"
textual_dir = output_dir / "textual"
match model_source:
case ModelSource.MCLIP:
mclip.to_onnx(model_name, opset_version, visual_dir, textual_dir, no_cache=no_cache)
case ModelSource.OPENCLIP:
name, _, pretrained = model_name.partition("__")
config = openclip.OpenCLIPModelConfig(name, pretrained)
openclip.to_onnx(config, opset_version, visual_dir, textual_dir, no_cache=no_cache)
case _:
raise ValueError(f"Unsupported model source {model_source}")

View File

@@ -1,11 +1,6 @@
import os
import tempfile
import warnings
from pathlib import Path
import torch
from multilingual_clip.pt_multilingual_clip import MultilingualCLIP
from transformers import AutoTokenizer
from typing import Any
from .openclip import OpenCLIPModelConfig
from .openclip import to_onnx as openclip_to_onnx
@@ -21,25 +16,40 @@ _MCLIP_TO_OPENCLIP = {
def to_onnx(
model_name: str,
opset_version: int,
output_dir_visual: Path | str,
output_dir_textual: Path | str,
no_cache: bool = False,
) -> tuple[Path, Path]:
textual_path = get_model_path(output_dir_textual)
with tempfile.TemporaryDirectory() as tmpdir:
model = MultilingualCLIP.from_pretrained(model_name, cache_dir=os.environ.get("CACHE_DIR", tmpdir))
if no_cache or not textual_path.exists():
import torch
from multilingual_clip.pt_multilingual_clip import MultilingualCLIP
from transformers import AutoTokenizer
torch.backends.mha.set_fastpath_enabled(False)
model = MultilingualCLIP.from_pretrained(model_name)
AutoTokenizer.from_pretrained(model_name).save_pretrained(output_dir_textual)
model.eval()
for param in model.parameters():
param.requires_grad_(False)
export_text_encoder(model, textual_path)
visual_path, _ = openclip_to_onnx(_MCLIP_TO_OPENCLIP[model_name], output_dir_visual)
assert visual_path is not None, "Visual model export failed"
_export_text_encoder(model, textual_path, opset_version)
else:
print(f"Model {textual_path} already exists, skipping")
visual_path, _ = openclip_to_onnx(
_MCLIP_TO_OPENCLIP[model_name], opset_version, output_dir_visual, no_cache=no_cache
)
assert visual_path is not None, "Visual model export failed"
return visual_path, textual_path
def export_text_encoder(model: MultilingualCLIP, output_path: Path | str) -> None:
def _export_text_encoder(model: Any, output_path: Path | str, opset_version: int) -> None:
import torch
from multilingual_clip.pt_multilingual_clip import MultilingualCLIP
output_path = Path(output_path)
def forward(self: MultilingualCLIP, input_ids: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
@@ -61,7 +71,7 @@ def export_text_encoder(model: MultilingualCLIP, output_path: Path | str) -> Non
output_path.as_posix(),
input_names=["input_ids", "attention_mask"],
output_names=["embedding"],
opset_version=17,
opset_version=opset_version,
# dynamic_axes={
# "input_ids": {0: "batch_size", 1: "sequence_length"},
# "attention_mask": {0: "batch_size", 1: "sequence_length"},

View File

@@ -0,0 +1,153 @@
import warnings
from dataclasses import dataclass
from functools import cached_property
from pathlib import Path
from typing import Any
from .util import get_model_path, save_config
@dataclass
class OpenCLIPModelConfig:
name: str
pretrained: str
@cached_property
def model_config(self) -> dict[str, Any]:
import open_clip
config: dict[str, Any] | None = open_clip.get_model_config(self.name)
if config is None:
raise ValueError(f"Unknown model {self.name}")
return config
@property
def image_size(self) -> int:
image_size: int = self.model_config["vision_cfg"]["image_size"]
return image_size
@property
def sequence_length(self) -> int:
context_length: int = self.model_config["text_cfg"].get("context_length", 77)
return context_length
def to_onnx(
model_cfg: OpenCLIPModelConfig,
opset_version: int,
output_dir_visual: Path | str | None = None,
output_dir_textual: Path | str | None = None,
no_cache: bool = False,
) -> tuple[Path | None, Path | None]:
visual_path = None
textual_path = None
if output_dir_visual is not None:
output_dir_visual = Path(output_dir_visual)
visual_path = get_model_path(output_dir_visual)
if output_dir_textual is not None:
output_dir_textual = Path(output_dir_textual)
textual_path = get_model_path(output_dir_textual)
if not no_cache and (
(textual_path is None or textual_path.exists()) and (visual_path is None or visual_path.exists())
):
print(f"Models {textual_path} and {visual_path} already exist, skipping")
return visual_path, textual_path
import open_clip
import torch
from transformers import AutoTokenizer
torch.backends.mha.set_fastpath_enabled(False)
model = open_clip.create_model(
model_cfg.name,
pretrained=model_cfg.pretrained,
jit=False,
require_pretrained=True,
)
text_vision_cfg = open_clip.get_model_config(model_cfg.name)
model.eval()
for param in model.parameters():
param.requires_grad_(False)
if visual_path is not None and output_dir_visual is not None:
if no_cache or not visual_path.exists():
save_config(
open_clip.get_model_preprocess_cfg(model),
output_dir_visual / "preprocess_cfg.json",
)
save_config(text_vision_cfg, output_dir_visual.parent / "config.json")
_export_image_encoder(model, model_cfg, visual_path, opset_version)
else:
print(f"Model {visual_path} already exists, skipping")
if textual_path is not None and output_dir_textual is not None:
if no_cache or not textual_path.exists():
tokenizer_name = text_vision_cfg["text_cfg"].get("hf_tokenizer_name", "openai/clip-vit-base-patch32")
AutoTokenizer.from_pretrained(tokenizer_name).save_pretrained(output_dir_textual)
_export_text_encoder(model, model_cfg, textual_path, opset_version)
else:
print(f"Model {textual_path} already exists, skipping")
return visual_path, textual_path
def _export_image_encoder(
model: Any, model_cfg: OpenCLIPModelConfig, output_path: Path | str, opset_version: int
) -> None:
import torch
output_path = Path(output_path)
def encode_image(image: torch.Tensor) -> torch.Tensor:
output = model.encode_image(image, normalize=True)
assert isinstance(output, torch.Tensor)
return output
model.forward = encode_image
args = (torch.randn(1, 3, model_cfg.image_size, model_cfg.image_size),)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
torch.onnx.export(
model,
args,
output_path.as_posix(),
input_names=["image"],
output_names=["embedding"],
opset_version=opset_version,
# dynamic_axes={"image": {0: "batch_size"}},
)
def _export_text_encoder(
model: Any, model_cfg: OpenCLIPModelConfig, output_path: Path | str, opset_version: int
) -> None:
import torch
output_path = Path(output_path)
def encode_text(text: torch.Tensor) -> torch.Tensor:
output = model.encode_text(text, normalize=True)
assert isinstance(output, torch.Tensor)
return output
model.forward = encode_text
args = (torch.ones(1, model_cfg.sequence_length, dtype=torch.int32),)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
torch.onnx.export(
model,
args,
output_path.as_posix(),
input_names=["text"],
output_names=["embedding"],
opset_version=opset_version,
# dynamic_axes={"text": {0: "batch_size"}},
)

View File

@@ -0,0 +1,96 @@
from pathlib import Path
from .constants import RKNN_SOCS
def _export_platform(
model_dir: Path,
target_platform: str,
inputs: list[str] | None = None,
input_size_list: list[list[int]] | None = None,
fuse_matmul_softmax_matmul_to_sdpa: bool = True,
no_cache: bool = False,
) -> None:
from rknn.api import RKNN
input_path = model_dir / "model.onnx"
output_path = model_dir / "rknpu" / target_platform / "model.rknn"
if not no_cache and output_path.exists():
print(f"Model {input_path} already exists at {output_path}, skipping")
return
print(f"Exporting model {input_path} to {output_path}")
rknn = RKNN(verbose=False)
rknn.config(
target_platform=target_platform,
disable_rules=["fuse_matmul_softmax_matmul_to_sdpa"] if not fuse_matmul_softmax_matmul_to_sdpa else [],
enable_flash_attention=False,
model_pruning=True,
)
ret = rknn.load_onnx(model=input_path.as_posix(), inputs=inputs, input_size_list=input_size_list)
if ret != 0:
raise RuntimeError("Load failed!")
ret = rknn.build(do_quantization=False)
if ret != 0:
raise RuntimeError("Build failed!")
output_path.parent.mkdir(parents=True, exist_ok=True)
ret = rknn.export_rknn(output_path.as_posix())
if ret != 0:
raise RuntimeError("Export rknn model failed!")
def _export_platforms(
model_dir: Path,
inputs: list[str] | None = None,
input_size_list: list[list[int]] | None = None,
no_cache: bool = False,
) -> None:
fuse_matmul_softmax_matmul_to_sdpa = True
for soc in RKNN_SOCS:
try:
_export_platform(
model_dir,
soc,
inputs=inputs,
input_size_list=input_size_list,
fuse_matmul_softmax_matmul_to_sdpa=fuse_matmul_softmax_matmul_to_sdpa,
no_cache=no_cache,
)
except Exception as e:
print(f"Failed to export model for {soc}: {e}")
if "inputs or 'outputs' must be set" in str(e):
print("Retrying without fuse_matmul_softmax_matmul_to_sdpa")
fuse_matmul_softmax_matmul_to_sdpa = False
_export_platform(
model_dir,
soc,
inputs=inputs,
input_size_list=input_size_list,
fuse_matmul_softmax_matmul_to_sdpa=fuse_matmul_softmax_matmul_to_sdpa,
no_cache=no_cache,
)
def export(model_dir: Path, no_cache: bool = False) -> None:
textual = model_dir / "textual"
visual = model_dir / "visual"
detection = model_dir / "detection"
recognition = model_dir / "recognition"
if textual.is_dir():
_export_platforms(textual, no_cache=no_cache)
if visual.is_dir():
_export_platforms(visual, no_cache=no_cache)
if detection.is_dir():
_export_platforms(detection, inputs=["input.1"], input_size_list=[[1, 3, 640, 640]], no_cache=no_cache)
if recognition.is_dir():
_export_platforms(recognition, inputs=["input.1"], input_size_list=[[1, 3, 112, 112]], no_cache=no_cache)

View File

@@ -0,0 +1,88 @@
import subprocess
from exporters.constants import ModelSource
mclip = [
"M-CLIP/LABSE-Vit-L-14",
"M-CLIP/XLM-Roberta-Large-Vit-B-16Plus",
"M-CLIP/XLM-Roberta-Large-Vit-B-32",
"M-CLIP/XLM-Roberta-Large-Vit-L-14",
]
openclip = [
"RN101__openai",
"RN101__yfcc15m",
"RN50__cc12m",
"RN50__openai",
"RN50__yfcc15m",
"RN50x16__openai",
"RN50x4__openai",
"RN50x64__openai",
"ViT-B-16-SigLIP-256__webli",
"ViT-B-16-SigLIP-384__webli",
"ViT-B-16-SigLIP-512__webli",
"ViT-B-16-SigLIP-i18n-256__webli",
"ViT-B-16-SigLIP2__webli",
"ViT-B-16-SigLIP__webli",
"ViT-B-16-plus-240__laion400m_e31",
"ViT-B-16-plus-240__laion400m_e32",
"ViT-B-16__laion400m_e31",
"ViT-B-16__laion400m_e32",
"ViT-B-16__openai",
"ViT-B-32-SigLIP2-256__webli",
"ViT-B-32__laion2b-s34b-b79k",
"ViT-B-32__laion2b_e16",
"ViT-B-32__laion400m_e31",
"ViT-B-32__laion400m_e32",
"ViT-B-32__openai",
"ViT-H-14-378-quickgelu__dfn5b",
"ViT-H-14-quickgelu__dfn5b",
"ViT-H-14__laion2b-s32b-b79k",
"ViT-L-14-336__openai",
"ViT-L-14-quickgelu__dfn2b",
"ViT-L-14__laion2b-s32b-b82k",
"ViT-L-14__laion400m_e31",
"ViT-L-14__laion400m_e32",
"ViT-L-14__openai",
"ViT-L-16-SigLIP-256__webli",
"ViT-L-16-SigLIP-384__webli",
"ViT-L-16-SigLIP2-256__webli",
"ViT-L-16-SigLIP2-384__webli",
"ViT-L-16-SigLIP2-512__webli",
"ViT-SO400M-14-SigLIP-384__webli",
"ViT-SO400M-14-SigLIP2-378__webli",
"ViT-SO400M-14-SigLIP2__webli",
"ViT-SO400M-16-SigLIP2-256__webli",
"ViT-SO400M-16-SigLIP2-384__webli",
"ViT-SO400M-16-SigLIP2-512__webli",
"ViT-gopt-16-SigLIP2-256__webli",
"ViT-gopt-16-SigLIP2-384__webli",
"nllb-clip-base-siglip__mrl",
"nllb-clip-base-siglip__v1",
"nllb-clip-large-siglip__mrl",
"nllb-clip-large-siglip__v1",
"xlm-roberta-base-ViT-B-32__laion5b_s13b_b90k",
"xlm-roberta-large-ViT-H-14__frozen_laion5b_s13b_b90k",
]
insightface = [
"antelopev2",
"buffalo_l",
"buffalo_m",
"buffalo_s",
]
def export_models(models: list[str], source: ModelSource) -> None:
for model in models:
try:
print(f"Exporting model {model}")
subprocess.check_call(["python", "-m", "immich_model_exporter.export", model, source])
except Exception as e:
print(f"Failed to export model {model}: {e}")
if __name__ == "__main__":
export_models(mclip, ModelSource.MCLIP)
export_models(openclip, ModelSource.OPENCLIP)
export_models(insightface, ModelSource.INSIGHTFACE)

View File

@@ -1,114 +0,0 @@
import os
import tempfile
import warnings
from dataclasses import dataclass, field
from pathlib import Path
import open_clip
import torch
from transformers import AutoTokenizer
from .util import get_model_path, save_config
@dataclass
class OpenCLIPModelConfig:
name: str
pretrained: str
image_size: int = field(init=False)
sequence_length: int = field(init=False)
def __post_init__(self) -> None:
open_clip_cfg = open_clip.get_model_config(self.name)
if open_clip_cfg is None:
raise ValueError(f"Unknown model {self.name}")
self.image_size = open_clip_cfg["vision_cfg"]["image_size"]
self.sequence_length = open_clip_cfg["text_cfg"].get("context_length", 77)
def to_onnx(
model_cfg: OpenCLIPModelConfig,
output_dir_visual: Path | str | None = None,
output_dir_textual: Path | str | None = None,
) -> tuple[Path | None, Path | None]:
visual_path = None
textual_path = None
with tempfile.TemporaryDirectory() as tmpdir:
model = open_clip.create_model(
model_cfg.name,
pretrained=model_cfg.pretrained,
jit=False,
cache_dir=os.environ.get("CACHE_DIR", tmpdir),
require_pretrained=True,
)
text_vision_cfg = open_clip.get_model_config(model_cfg.name)
model.eval()
for param in model.parameters():
param.requires_grad_(False)
if output_dir_visual is not None:
output_dir_visual = Path(output_dir_visual)
visual_path = get_model_path(output_dir_visual)
save_config(open_clip.get_model_preprocess_cfg(model), output_dir_visual / "preprocess_cfg.json")
save_config(text_vision_cfg, output_dir_visual.parent / "config.json")
export_image_encoder(model, model_cfg, visual_path)
if output_dir_textual is not None:
output_dir_textual = Path(output_dir_textual)
textual_path = get_model_path(output_dir_textual)
tokenizer_name = text_vision_cfg["text_cfg"].get("hf_tokenizer_name", "openai/clip-vit-base-patch32")
AutoTokenizer.from_pretrained(tokenizer_name).save_pretrained(output_dir_textual)
export_text_encoder(model, model_cfg, textual_path)
return visual_path, textual_path
def export_image_encoder(model: open_clip.CLIP, model_cfg: OpenCLIPModelConfig, output_path: Path | str) -> None:
output_path = Path(output_path)
def encode_image(image: torch.Tensor) -> torch.Tensor:
output = model.encode_image(image, normalize=True)
assert isinstance(output, torch.Tensor)
return output
args = (torch.randn(1, 3, model_cfg.image_size, model_cfg.image_size),)
traced = torch.jit.trace(encode_image, args) # type: ignore[no-untyped-call]
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
torch.onnx.export(
traced,
args,
output_path.as_posix(),
input_names=["image"],
output_names=["embedding"],
opset_version=17,
# dynamic_axes={"image": {0: "batch_size"}},
)
def export_text_encoder(model: open_clip.CLIP, model_cfg: OpenCLIPModelConfig, output_path: Path | str) -> None:
output_path = Path(output_path)
def encode_text(text: torch.Tensor) -> torch.Tensor:
output = model.encode_text(text, normalize=True)
assert isinstance(output, torch.Tensor)
return output
args = (torch.ones(1, model_cfg.sequence_length, dtype=torch.int32),)
traced = torch.jit.trace(encode_text, args) # type: ignore[no-untyped-call]
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
torch.onnx.export(
traced,
args,
output_path.as_posix(),
input_names=["text"],
output_names=["embedding"],
opset_version=17,
# dynamic_axes={"text": {0: "batch_size"}},
)

View File

@@ -1,49 +0,0 @@
from pathlib import Path
import onnx
import onnxruntime as ort
import onnxsim
def save_onnx(model: onnx.ModelProto, output_path: Path | str) -> None:
try:
onnx.save(model, output_path)
except ValueError as e:
if "The proto size is larger than the 2 GB limit." in str(e):
onnx.save(model, output_path, save_as_external_data=True, size_threshold=1_000_000)
else:
raise e
def optimize_onnxsim(model_path: Path | str, output_path: Path | str) -> None:
model_path = Path(model_path)
output_path = Path(output_path)
model = onnx.load(model_path.as_posix())
model, check = onnxsim.simplify(model)
assert check, "Simplified ONNX model could not be validated"
for file in model_path.parent.iterdir():
if file.name.startswith("Constant") or "onnx" in file.name or file.suffix == ".weight":
file.unlink()
save_onnx(model, output_path)
def optimize_ort(
model_path: Path | str,
output_path: Path | str,
level: ort.GraphOptimizationLevel = ort.GraphOptimizationLevel.ORT_ENABLE_BASIC,
) -> None:
model_path = Path(model_path)
output_path = Path(output_path)
sess_options = ort.SessionOptions()
sess_options.graph_optimization_level = level
sess_options.optimized_model_filepath = output_path.as_posix()
ort.InferenceSession(model_path.as_posix(), providers=["CPUExecutionProvider"], sess_options=sess_options)
def optimize(model_path: Path | str) -> None:
model_path = Path(model_path)
optimize_ort(model_path, model_path)
optimize_onnxsim(model_path, model_path)

View File

@@ -0,0 +1,67 @@
[project]
name = "immich_model_exporter"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.10, <4.0"
dependencies = [
"huggingface-hub>=0.29.3",
"multilingual-clip>=1.0.10",
"onnx>=1.14.1",
"onnxruntime>=1.16.0",
"open-clip-torch>=2.31.0",
"typer>=0.15.2",
"rknn-toolkit2>=2.3.0",
"transformers>=4.49.0",
"tenacity>=9.0.0",
]
[dependency-groups]
dev = ["black>=23.3.0", "mypy>=1.3.0", "ruff>=0.0.272"]
[tool.uv]
override-dependencies = [
"onnx>=1.16.0,<2",
"onnxruntime>=1.18.2,<2",
"torch>=2.4",
"torchvision>=0.21",
]
[tool.uv.sources]
torch = [{ index = "pytorch-cpu" }]
torchvision = [{ index = "pytorch-cpu" }]
[[tool.uv.index]]
name = "pytorch-cpu"
url = "https://download.pytorch.org/whl/cpu"
explicit = true
[tool.hatch.build.targets.sdist]
include = ["immich_model_exporter"]
[tool.hatch.build.targets.wheel]
include = ["immich_model_exporter"]
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.mypy]
python_version = "3.12"
follow_imports = "silent"
warn_redundant_casts = true
disallow_any_generics = true
check_untyped_defs = true
disallow_untyped_defs = true
ignore_missing_imports = true
[tool.ruff]
line-length = 120
target-version = "py312"
[tool.ruff.lint]
select = ["E", "F", "I"]
[tool.black]
line-length = 120
target-version = ['py312']

View File

@@ -1,113 +0,0 @@
import gc
import os
from pathlib import Path
from tempfile import TemporaryDirectory
import torch
from huggingface_hub import create_repo, upload_folder
from models import mclip, openclip
from models.optimize import optimize
from rich.progress import Progress
models = [
"M-CLIP/LABSE-Vit-L-14",
"M-CLIP/XLM-Roberta-Large-Vit-B-16Plus",
"M-CLIP/XLM-Roberta-Large-Vit-B-32",
"M-CLIP/XLM-Roberta-Large-Vit-L-14",
"RN101::openai",
"RN101::yfcc15m",
"RN50::cc12m",
"RN50::openai",
"RN50::yfcc15m",
"RN50x16::openai",
"RN50x4::openai",
"RN50x64::openai",
"ViT-B-16-SigLIP-256::webli",
"ViT-B-16-SigLIP-384::webli",
"ViT-B-16-SigLIP-512::webli",
"ViT-B-16-SigLIP-i18n-256::webli",
"ViT-B-16-SigLIP::webli",
"ViT-B-16-plus-240::laion400m_e31",
"ViT-B-16-plus-240::laion400m_e32",
"ViT-B-16::laion400m_e31",
"ViT-B-16::laion400m_e32",
"ViT-B-16::openai",
"ViT-B-32::laion2b-s34b-b79k",
"ViT-B-32::laion2b_e16",
"ViT-B-32::laion400m_e31",
"ViT-B-32::laion400m_e32",
"ViT-B-32::openai",
"ViT-H-14-378-quickgelu::dfn5b",
"ViT-H-14-quickgelu::dfn5b",
"ViT-H-14::laion2b-s32b-b79k",
"ViT-L-14-336::openai",
"ViT-L-14-quickgelu::dfn2b",
"ViT-L-14::laion2b-s32b-b82k",
"ViT-L-14::laion400m_e31",
"ViT-L-14::laion400m_e32",
"ViT-L-14::openai",
"ViT-L-16-SigLIP-256::webli",
"ViT-L-16-SigLIP-384::webli",
"ViT-SO400M-14-SigLIP-384::webli",
"ViT-g-14::laion2b-s12b-b42k",
"nllb-clip-base-siglip::mrl",
"nllb-clip-base-siglip::v1",
"nllb-clip-large-siglip::mrl",
"nllb-clip-large-siglip::v1",
"xlm-roberta-base-ViT-B-32::laion5b_s13b_b90k",
"xlm-roberta-large-ViT-H-14::frozen_laion5b_s13b_b90k",
]
# glob to delete old UUID blobs when reuploading models
uuid_char = "[a-fA-F0-9]"
uuid_glob = uuid_char * 8 + "-" + uuid_char * 4 + "-" + uuid_char * 4 + "-" + uuid_char * 4 + "-" + uuid_char * 12
# remote repo files to be deleted before uploading
# deletion is in the same commit as the upload, so it's atomic
delete_patterns = ["**/*onnx*", "**/Constant*", "**/*.weight", "**/*.bias", f"**/{uuid_glob}"]
with Progress() as progress:
task = progress.add_task("[green]Exporting models...", total=len(models))
token = os.environ.get("HF_AUTH_TOKEN")
torch.backends.mha.set_fastpath_enabled(False)
with TemporaryDirectory() as tmp:
tmpdir = Path(tmp)
for model in models:
model_name = model.split("/")[-1].replace("::", "__")
hf_model_name = model_name.replace("xlm-roberta-large", "XLM-Roberta-Large")
hf_model_name = model_name.replace("xlm-roberta-base", "XLM-Roberta-Base")
config_path = tmpdir / model_name / "config.json"
def export() -> None:
progress.update(task, description=f"[green]Exporting {hf_model_name}")
visual_dir = tmpdir / hf_model_name / "visual"
textual_dir = tmpdir / hf_model_name / "textual"
if model.startswith("M-CLIP"):
visual_path, textual_path = mclip.to_onnx(model, visual_dir, textual_dir)
else:
name, _, pretrained = model_name.partition("__")
config = openclip.OpenCLIPModelConfig(name, pretrained)
visual_path, textual_path = openclip.to_onnx(config, visual_dir, textual_dir)
progress.update(task, description=f"[green]Optimizing {hf_model_name} (visual)")
optimize(visual_path)
progress.update(task, description=f"[green]Optimizing {hf_model_name} (textual)")
optimize(textual_path)
gc.collect()
def upload() -> None:
progress.update(task, description=f"[yellow]Uploading {hf_model_name}")
repo_id = f"immich-app/{hf_model_name}"
create_repo(repo_id, exist_ok=True)
upload_folder(
repo_id=repo_id,
folder_path=tmpdir / hf_model_name,
delete_patterns=delete_patterns,
token=token,
)
export()
if token is not None:
upload()
progress.update(task, advance=1)

1395
machine-learning/export/uv.lock generated Normal file

File diff suppressed because it is too large Load Diff