Compare commits

..

1 Commits

Author SHA1 Message Date
Elias Schneider
89dd07a7ba fix: make wildcard matching in callback URLs more strict 2025-12-22 08:46:45 +01:00
86 changed files with 2674 additions and 3197 deletions

View File

@@ -171,7 +171,7 @@ jobs:
run: |
DOCKER_COMPOSE_FILE=docker-compose.yml
echo "FILE_BACKEND=${{ matrix.storage }}" > .env
export FILE_BACKEND="${{ matrix.storage }}"
if [ "${{ matrix.db }}" = "postgres" ]; then
DOCKER_COMPOSE_FILE=docker-compose-postgres.yml
elif [ "${{ matrix.storage }}" = "s3" ]; then
@@ -179,20 +179,7 @@ jobs:
fi
docker compose -f "$DOCKER_COMPOSE_FILE" up -d
{
LOG_FILE="/tmp/backend.log"
while true; do
CID=$(docker compose -f "$DOCKER_COMPOSE_FILE" ps -q pocket-id)
if [ -n "$CID" ]; then
echo "[$(date)] Attaching logs for $CID" >> "$LOG_FILE"
docker logs -f --since=0 "$CID" >> "$LOG_FILE" 2>&1
else
echo "[$(date)] Container not yet running…" >> "$LOG_FILE"
fi
sleep 1
done
} &
docker compose -f "$DOCKER_COMPOSE_FILE" logs -f pocket-id &> /tmp/backend.log &
- name: Run Playwright tests
working-directory: ./tests

1
.gitignore vendored
View File

@@ -15,7 +15,6 @@ node_modules
/backend/bin
pocket-id
/tests/test-results/*.json
.tmp/
# OS
.DS_Store

View File

@@ -1,12 +1,9 @@
package main
import (
"fmt"
"os"
_ "time/tzdata"
"github.com/pocket-id/pocket-id/backend/internal/cmds"
"github.com/pocket-id/pocket-id/backend/internal/common"
)
// @title Pocket ID API
@@ -14,9 +11,5 @@ import (
// @description.markdown
func main() {
if err := common.ValidateEnvConfig(&common.EnvConfig); err != nil {
fmt.Fprintf(os.Stderr, "config error: %v\n", err)
os.Exit(1)
}
cmds.Execute()
}

View File

@@ -14,6 +14,7 @@ require (
github.com/disintegration/imaging v1.6.2
github.com/emersion/go-sasl v0.0.0-20241020182733-b788ff22d5a6
github.com/emersion/go-smtp v0.24.0
github.com/fxamacker/cbor/v2 v2.9.0
github.com/gin-contrib/slog v1.2.0
github.com/gin-gonic/gin v1.11.0
github.com/glebarez/go-sqlite v1.22.0
@@ -83,7 +84,6 @@ require (
github.com/disintegration/gift v1.2.1 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.11 // indirect
github.com/gin-contrib/sse v1.1.0 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect

View File

@@ -7,7 +7,6 @@ import (
"time"
_ "github.com/golang-migrate/migrate/v4/source/file"
"gorm.io/gorm"
"github.com/pocket-id/pocket-id/backend/internal/common"
"github.com/pocket-id/pocket-id/backend/internal/job"
@@ -16,16 +15,6 @@ import (
)
func Bootstrap(ctx context.Context) error {
var shutdownFns []utils.Service
defer func() { //nolint:contextcheck
// Invoke all shutdown functions on exit
shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := utils.NewServiceRunner(shutdownFns...).Run(shutdownCtx); err != nil {
slog.Error("Error during graceful shutdown", "error", err)
}
}()
// Initialize the observability stack, including the logger, distributed tracing, and metrics
shutdownFns, httpClient, err := initObservability(ctx, common.EnvConfig.MetricsEnabled, common.EnvConfig.TracingEnabled)
if err != nil {
@@ -33,80 +22,15 @@ func Bootstrap(ctx context.Context) error {
}
slog.InfoContext(ctx, "Pocket ID is starting")
// Connect to the database
db, err := NewDatabase()
if err != nil {
return fmt.Errorf("failed to initialize database: %w", err)
}
fileStorage, err := InitStorage(ctx, db)
if err != nil {
return fmt.Errorf("failed to initialize file storage (backend: %s): %w", common.EnvConfig.FileBackend, err)
}
// Initialize the file storage backend
var fileStorage storage.FileStorage
imageExtensions, err := initApplicationImages(ctx, fileStorage)
if err != nil {
return fmt.Errorf("failed to initialize application images: %w", err)
}
// Create all services
svc, err := initServices(ctx, db, httpClient, imageExtensions, fileStorage)
if err != nil {
return fmt.Errorf("failed to initialize services: %w", err)
}
waitUntil, err := svc.appLockService.Acquire(ctx, false)
if err != nil {
return fmt.Errorf("failed to acquire application lock: %w", err)
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(time.Until(waitUntil)):
}
shutdownFn := func(shutdownCtx context.Context) error {
sErr := svc.appLockService.Release(shutdownCtx)
if sErr != nil {
return fmt.Errorf("failed to release application lock: %w", sErr)
}
return nil
}
shutdownFns = append(shutdownFns, shutdownFn)
// Init the job scheduler
scheduler, err := job.NewScheduler()
if err != nil {
return fmt.Errorf("failed to create job scheduler: %w", err)
}
err = registerScheduledJobs(ctx, db, svc, httpClient, scheduler)
if err != nil {
return fmt.Errorf("failed to register scheduled jobs: %w", err)
}
// Init the router
router, err := initRouter(db, svc)
if err != nil {
return fmt.Errorf("failed to initialize router: %w", err)
}
// Run all background services
// This call blocks until the context is canceled
services := []utils.Service{svc.appLockService.RunRenewal, router}
if common.EnvConfig.AppEnv != "test" {
services = append(services, scheduler.Run)
}
err = utils.NewServiceRunner(services...).Run(ctx)
if err != nil {
return fmt.Errorf("failed to run services: %w", err)
}
return nil
}
func InitStorage(ctx context.Context, db *gorm.DB) (fileStorage storage.FileStorage, err error) {
switch common.EnvConfig.FileBackend {
case storage.TypeFileSystem:
fileStorage, err = storage.NewFilesystemStorage(common.EnvConfig.UploadPath)
@@ -128,8 +52,53 @@ func InitStorage(ctx context.Context, db *gorm.DB) (fileStorage storage.FileStor
err = fmt.Errorf("unknown file storage backend: %s", common.EnvConfig.FileBackend)
}
if err != nil {
return fileStorage, err
return fmt.Errorf("failed to initialize file storage (backend: %s): %w", common.EnvConfig.FileBackend, err)
}
return fileStorage, nil
imageExtensions, err := initApplicationImages(ctx, fileStorage)
if err != nil {
return fmt.Errorf("failed to initialize application images: %w", err)
}
// Create all services
svc, err := initServices(ctx, db, httpClient, imageExtensions, fileStorage)
if err != nil {
return fmt.Errorf("failed to initialize services: %w", err)
}
// Init the job scheduler
scheduler, err := job.NewScheduler()
if err != nil {
return fmt.Errorf("failed to create job scheduler: %w", err)
}
err = registerScheduledJobs(ctx, db, svc, httpClient, scheduler)
if err != nil {
return fmt.Errorf("failed to register scheduled jobs: %w", err)
}
// Init the router
router := initRouter(db, svc)
// Run all background services
// This call blocks until the context is canceled
err = utils.
NewServiceRunner(router, scheduler.Run).
Run(ctx)
if err != nil {
return fmt.Errorf("failed to run services: %w", err)
}
// Invoke all shutdown functions
// We give these a timeout of 5s
// Note: we use a background context because the run context has been canceled already
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second)
defer shutdownCancel()
err = utils.
NewServiceRunner(shutdownFns...).
Run(shutdownCtx) //nolint:contextcheck
if err != nil {
slog.Error("Error shutting down services", slog.Any("error", err))
}
return nil
}

View File

@@ -12,7 +12,12 @@ import (
"time"
"github.com/glebarez/sqlite"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database"
postgresMigrate "github.com/golang-migrate/migrate/v4/database/postgres"
sqliteMigrate "github.com/golang-migrate/migrate/v4/database/sqlite3"
_ "github.com/golang-migrate/migrate/v4/source/github"
"github.com/golang-migrate/migrate/v4/source/iofs"
slogGorm "github.com/orandin/slog-gorm"
"gorm.io/driver/postgres"
"gorm.io/gorm"
@@ -21,10 +26,11 @@ import (
"github.com/pocket-id/pocket-id/backend/internal/common"
"github.com/pocket-id/pocket-id/backend/internal/utils"
sqliteutil "github.com/pocket-id/pocket-id/backend/internal/utils/sqlite"
"github.com/pocket-id/pocket-id/backend/resources"
)
func NewDatabase() (db *gorm.DB, err error) {
db, err = ConnectDatabase()
db, err = connectDatabase()
if err != nil {
return nil, fmt.Errorf("failed to connect to database: %w", err)
}
@@ -33,15 +39,105 @@ func NewDatabase() (db *gorm.DB, err error) {
return nil, fmt.Errorf("failed to get sql.DB: %w", err)
}
// Choose the correct driver for the database provider
var driver database.Driver
switch common.EnvConfig.DbProvider {
case common.DbProviderSqlite:
driver, err = sqliteMigrate.WithInstance(sqlDb, &sqliteMigrate.Config{
NoTxWrap: true,
})
case common.DbProviderPostgres:
driver, err = postgresMigrate.WithInstance(sqlDb, &postgresMigrate.Config{})
default:
// Should never happen at this point
return nil, fmt.Errorf("unsupported database provider: %s", common.EnvConfig.DbProvider)
}
if err != nil {
return nil, fmt.Errorf("failed to create migration driver: %w", err)
}
// Run migrations
if err := utils.MigrateDatabase(sqlDb); err != nil {
if err := migrateDatabase(driver); err != nil {
return nil, fmt.Errorf("failed to run migrations: %w", err)
}
return db, nil
}
func ConnectDatabase() (db *gorm.DB, err error) {
func migrateDatabase(driver database.Driver) error {
// Embedded migrations via iofs
path := "migrations/" + string(common.EnvConfig.DbProvider)
source, err := iofs.New(resources.FS, path)
if err != nil {
return fmt.Errorf("failed to create embedded migration source: %w", err)
}
m, err := migrate.NewWithInstance("iofs", source, "pocket-id", driver)
if err != nil {
return fmt.Errorf("failed to create migration instance: %w", err)
}
requiredVersion, err := getRequiredMigrationVersion(path)
if err != nil {
return fmt.Errorf("failed to get last migration version: %w", err)
}
currentVersion, _, _ := m.Version()
if currentVersion > requiredVersion {
slog.Warn("Database version is newer than the application supports, possible downgrade detected", slog.Uint64("db_version", uint64(currentVersion)), slog.Uint64("app_version", uint64(requiredVersion)))
if !common.EnvConfig.AllowDowngrade {
return fmt.Errorf("database version (%d) is newer than application version (%d), downgrades are not allowed (set ALLOW_DOWNGRADE=true to enable)", currentVersion, requiredVersion)
}
slog.Info("Fetching migrations from GitHub to handle possible downgrades")
return migrateDatabaseFromGitHub(driver, requiredVersion)
}
if err := m.Migrate(requiredVersion); err != nil && !errors.Is(err, migrate.ErrNoChange) {
return fmt.Errorf("failed to apply embedded migrations: %w", err)
}
return nil
}
func migrateDatabaseFromGitHub(driver database.Driver, version uint) error {
srcURL := "github://pocket-id/pocket-id/backend/resources/migrations/" + string(common.EnvConfig.DbProvider)
m, err := migrate.NewWithDatabaseInstance(srcURL, "pocket-id", driver)
if err != nil {
return fmt.Errorf("failed to create GitHub migration instance: %w", err)
}
if err := m.Migrate(version); err != nil && !errors.Is(err, migrate.ErrNoChange) {
return fmt.Errorf("failed to apply GitHub migrations: %w", err)
}
return nil
}
// getRequiredMigrationVersion reads the embedded migration files and returns the highest version number found.
func getRequiredMigrationVersion(path string) (uint, error) {
entries, err := resources.FS.ReadDir(path)
if err != nil {
return 0, fmt.Errorf("failed to read migration directory: %w", err)
}
var maxVersion uint
for _, entry := range entries {
if entry.IsDir() {
continue
}
name := entry.Name()
var version uint
n, err := fmt.Sscanf(name, "%d_", &version)
if err == nil && n == 1 {
if version > maxVersion {
maxVersion = version
}
}
}
return maxVersion, nil
}
func connectDatabase() (db *gorm.DB, err error) {
var dialector gorm.Dialector
// Choose the correct database provider

View File

@@ -17,7 +17,7 @@ import (
func init() {
registerTestControllers = []func(apiGroup *gin.RouterGroup, db *gorm.DB, svc *services){
func(apiGroup *gin.RouterGroup, db *gorm.DB, svc *services) {
testService, err := service.NewTestService(db, svc.appConfigService, svc.jwtService, svc.ldapService, svc.appLockService, svc.fileStorage)
testService, err := service.NewTestService(db, svc.appConfigService, svc.jwtService, svc.ldapService, svc.fileStorage)
if err != nil {
slog.Error("Failed to initialize test service", slog.Any("error", err))
os.Exit(1)

View File

@@ -29,7 +29,16 @@ import (
// This is used to register additional controllers for tests
var registerTestControllers []func(apiGroup *gin.RouterGroup, db *gorm.DB, svc *services)
func initRouter(db *gorm.DB, svc *services) (utils.Service, error) {
func initRouter(db *gorm.DB, svc *services) utils.Service {
runner, err := initRouterInternal(db, svc)
if err != nil {
slog.Error("Failed to init router", "error", err)
os.Exit(1)
}
return runner
}
func initRouterInternal(db *gorm.DB, svc *services) (utils.Service, error) {
// Set the appropriate Gin mode based on the environment
switch common.EnvConfig.AppEnv {
case common.AppEnvProduction:

View File

@@ -27,7 +27,6 @@ type services struct {
apiKeyService *service.ApiKeyService
versionService *service.VersionService
fileStorage storage.FileStorage
appLockService *service.AppLockService
}
// Initializes all services
@@ -41,7 +40,6 @@ func initServices(ctx context.Context, db *gorm.DB, httpClient *http.Client, ima
svc.fileStorage = fileStorage
svc.appImagesService = service.NewAppImagesService(imageExtensions, fileStorage)
svc.appLockService = service.NewAppLockService(db)
svc.emailService, err = service.NewEmailService(db, svc.appConfigService)
if err != nil {

View File

@@ -1,70 +0,0 @@
package cmds
import (
"context"
"fmt"
"io"
"os"
"github.com/pocket-id/pocket-id/backend/internal/bootstrap"
"github.com/pocket-id/pocket-id/backend/internal/service"
"github.com/spf13/cobra"
)
type exportFlags struct {
Path string
}
func init() {
var flags exportFlags
exportCmd := &cobra.Command{
Use: "export",
Short: "Exports all data of Pocket ID into a ZIP file",
RunE: func(cmd *cobra.Command, args []string) error {
return runExport(cmd.Context(), flags)
},
}
exportCmd.Flags().StringVarP(&flags.Path, "path", "p", "pocket-id-export.zip", "Path to the ZIP file to export the data to, or '-' to write to stdout")
rootCmd.AddCommand(exportCmd)
}
// runExport orchestrates the export flow
func runExport(ctx context.Context, flags exportFlags) error {
db, err := bootstrap.NewDatabase()
if err != nil {
return fmt.Errorf("failed to connect to database: %w", err)
}
storage, err := bootstrap.InitStorage(ctx, db)
if err != nil {
return fmt.Errorf("failed to initialize storage: %w", err)
}
exportService := service.NewExportService(db, storage)
var w io.Writer
if flags.Path == "-" {
w = os.Stdout
} else {
file, err := os.Create(flags.Path)
if err != nil {
return fmt.Errorf("failed to create export file: %w", err)
}
defer file.Close()
w = file
}
if err := exportService.ExportToZip(ctx, w); err != nil {
return fmt.Errorf("failed to export data: %w", err)
}
if flags.Path != "-" {
fmt.Printf("Exported data to %s\n", flags.Path)
}
return nil
}

View File

@@ -1,191 +0,0 @@
package cmds
import (
"archive/zip"
"context"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"time"
"github.com/spf13/cobra"
"gorm.io/gorm"
"github.com/pocket-id/pocket-id/backend/internal/bootstrap"
"github.com/pocket-id/pocket-id/backend/internal/common"
"github.com/pocket-id/pocket-id/backend/internal/service"
"github.com/pocket-id/pocket-id/backend/internal/utils"
)
type importFlags struct {
Path string
Yes bool
ForcefullyAcquireLock bool
}
func init() {
var flags importFlags
importCmd := &cobra.Command{
Use: "import",
Short: "Imports all data of Pocket ID from a ZIP file",
RunE: func(cmd *cobra.Command, args []string) error {
return runImport(cmd.Context(), flags)
},
}
importCmd.Flags().StringVarP(&flags.Path, "path", "p", "pocket-id-export.zip", "Path to the ZIP file to import the data from, or '-' to read from stdin")
importCmd.Flags().BoolVarP(&flags.Yes, "yes", "y", false, "Skip confirmation prompts")
importCmd.Flags().BoolVarP(&flags.ForcefullyAcquireLock, "forcefully-acquire-lock", "", false, "Forcefully acquire the application lock by terminating the Pocket ID instance")
rootCmd.AddCommand(importCmd)
}
// runImport handles the high-level orchestration of the import process
func runImport(ctx context.Context, flags importFlags) error {
if !flags.Yes {
ok, err := askForConfirmation()
if err != nil {
return fmt.Errorf("failed to get confirmation: %w", err)
}
if !ok {
fmt.Println("Aborted")
os.Exit(1)
}
}
var (
zipReader *zip.ReadCloser
cleanup func()
err error
)
if flags.Path == "-" {
zipReader, cleanup, err = readZipFromStdin()
defer cleanup()
} else {
zipReader, err = zip.OpenReader(flags.Path)
}
if err != nil {
return fmt.Errorf("failed to open zip: %w", err)
}
defer zipReader.Close()
db, err := bootstrap.ConnectDatabase()
if err != nil {
return err
}
err = acquireImportLock(ctx, db, flags.ForcefullyAcquireLock)
if err != nil {
return err
}
storage, err := bootstrap.InitStorage(ctx, db)
if err != nil {
return fmt.Errorf("failed to initialize storage: %w", err)
}
importService := service.NewImportService(db, storage)
err = importService.ImportFromZip(ctx, &zipReader.Reader)
if err != nil {
return fmt.Errorf("failed to import data from zip: %w", err)
}
fmt.Println("Import completed successfully.")
return nil
}
func acquireImportLock(ctx context.Context, db *gorm.DB, force bool) error {
// Check if the kv table exists, in case we are starting from an empty database
exists, err := utils.DBTableExists(db, "kv")
if err != nil {
return fmt.Errorf("failed to check if kv table exists: %w", err)
}
if !exists {
// This either means the database is empty, or the import is into an old version of PocketID that doesn't support locks
// In either case, there's no lock to acquire
fmt.Println("Could not acquire a lock because the 'kv' table does not exist. This is fine if you're importing into a new database, but make sure that there isn't an instance of Pocket ID currently running and using the same database.")
return nil
}
// Note that we do not call a deferred Release if the data was imported
// This is because we are overriding the contents of the database, so the lock is automatically lost
appLockService := service.NewAppLockService(db)
opCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
waitUntil, err := appLockService.Acquire(opCtx, force)
if err != nil {
if errors.Is(err, service.ErrLockUnavailable) {
//nolint:staticcheck
return errors.New("Pocket ID must be stopped before importing data; please stop the running instance or run with --forcefully-acquire-lock to terminate the other instance")
}
return fmt.Errorf("failed to acquire application lock: %w", err)
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(time.Until(waitUntil)):
}
return nil
}
func askForConfirmation() (bool, error) {
fmt.Println("WARNING: This feature is experimental and may not work correctly. Please create a backup before proceeding and report any issues you encounter.")
fmt.Println()
fmt.Println("WARNING: Import will erase all existing data at the following locations:")
fmt.Printf("Database: %s\n", absolutePathOrOriginal(common.EnvConfig.DbConnectionString))
fmt.Printf("Uploads Path: %s\n", absolutePathOrOriginal(common.EnvConfig.UploadPath))
ok, err := utils.PromptForConfirmation("Do you want to continue?")
if err != nil {
return false, err
}
return ok, nil
}
// absolutePathOrOriginal returns the absolute path of the given path, or the original if it fails
func absolutePathOrOriginal(path string) string {
abs, err := filepath.Abs(path)
if err != nil {
return path
}
return abs
}
func readZipFromStdin() (*zip.ReadCloser, func(), error) {
tmpFile, err := os.CreateTemp("", "pocket-id-import-*.zip")
if err != nil {
return nil, nil, fmt.Errorf("failed to create temporary file: %w", err)
}
cleanup := func() {
_ = os.Remove(tmpFile.Name())
}
if _, err := io.Copy(tmpFile, os.Stdin); err != nil {
tmpFile.Close()
cleanup()
return nil, nil, fmt.Errorf("failed to read data from stdin: %w", err)
}
if err := tmpFile.Close(); err != nil {
cleanup()
return nil, nil, fmt.Errorf("failed to close temporary file: %w", err)
}
r, err := zip.OpenReader(tmpFile.Name())
if err != nil {
cleanup()
return nil, nil, err
}
return r, cleanup, nil
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"errors"
"fmt"
"os"
"strings"
"github.com/lestrrat-go/jwx/v3/jwa"
@@ -79,7 +78,7 @@ func keyRotate(ctx context.Context, flags keyRotateFlags, db *gorm.DB, envConfig
}
if !ok {
fmt.Println("Aborted")
os.Exit(1)
return nil
}
}

View File

@@ -1,6 +1,8 @@
package cmds
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
@@ -67,14 +69,78 @@ func TestKeyRotate(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
testKeyRotateWithDatabaseStorage(t, tt.flags, tt.wantErr, tt.errMsg)
t.Run("file storage", func(t *testing.T) {
testKeyRotateWithFileStorage(t, tt.flags, tt.wantErr, tt.errMsg)
})
t.Run("database storage", func(t *testing.T) {
testKeyRotateWithDatabaseStorage(t, tt.flags, tt.wantErr, tt.errMsg)
})
})
}
}
func testKeyRotateWithFileStorage(t *testing.T, flags keyRotateFlags, wantErr bool, errMsg string) {
// Create temporary directory for keys
tempDir := t.TempDir()
keysPath := filepath.Join(tempDir, "keys")
err := os.MkdirAll(keysPath, 0755)
require.NoError(t, err)
// Set up file storage config
envConfig := &common.EnvConfigSchema{
KeysStorage: "file",
KeysPath: keysPath,
}
// Create test database
db := testingutils.NewDatabaseForTest(t)
// Initialize app config service and create instance
appConfigService, err := service.NewAppConfigService(t.Context(), db)
require.NoError(t, err)
instanceID := appConfigService.GetDbConfig().InstanceID.Value
// Check if key exists before rotation
keyProvider, err := jwkutils.GetKeyProvider(db, envConfig, instanceID)
require.NoError(t, err)
// Run the key rotation
err = keyRotate(t.Context(), flags, db, envConfig)
if wantErr {
require.Error(t, err)
if errMsg != "" {
require.ErrorContains(t, err, errMsg)
}
return
}
require.NoError(t, err)
// Verify key was created
key, err := keyProvider.LoadKey()
require.NoError(t, err)
require.NotNil(t, key)
// Verify the algorithm matches what we requested
alg, _ := key.Algorithm()
assert.NotEmpty(t, alg)
if flags.Alg != "" {
expectedAlg := flags.Alg
if expectedAlg == "EdDSA" {
// EdDSA keys should have the EdDSA algorithm
assert.Equal(t, "EdDSA", alg.String())
} else {
assert.Equal(t, expectedAlg, alg.String())
}
}
}
func testKeyRotateWithDatabaseStorage(t *testing.T, flags keyRotateFlags, wantErr bool, errMsg string) {
// Set up database storage config
envConfig := &common.EnvConfigSchema{
KeysStorage: "database",
EncryptionKey: []byte("test-encryption-key-characters-long"),
}

View File

@@ -51,7 +51,7 @@ var oneTimeAccessTokenCmd = &cobra.Command{
}
// Create a new access token that expires in 1 hour
oneTimeAccessToken, txErr = service.NewOneTimeAccessToken(user.ID, time.Hour, false)
oneTimeAccessToken, txErr = service.NewOneTimeAccessToken(user.ID, time.Hour)
if txErr != nil {
return fmt.Errorf("failed to generate access token: %w", txErr)
}

View File

@@ -12,10 +12,9 @@ import (
)
var rootCmd = &cobra.Command{
Use: "pocket-id",
Short: "A simple and easy-to-use OIDC provider that allows users to authenticate with their passkeys to your services.",
Long: "By default, this command starts the pocket-id server.",
SilenceUsage: true,
Use: "pocket-id",
Short: "A simple and easy-to-use OIDC provider that allows users to authenticate with their passkeys to your services.",
Long: "By default, this command starts the pocket-id server.",
Run: func(cmd *cobra.Command, args []string) {
// Start the server
err := bootstrap.Bootstrap(cmd.Context())

View File

@@ -38,40 +38,39 @@ const (
)
type EnvConfigSchema struct {
AppEnv AppEnv `env:"APP_ENV" options:"toLower"`
LogLevel string `env:"LOG_LEVEL" options:"toLower"`
LogJSON bool `env:"LOG_JSON"`
AppURL string `env:"APP_URL" options:"toLower,trimTrailingSlash"`
DbProvider DbProvider
DbConnectionString string `env:"DB_CONNECTION_STRING" options:"file"`
EncryptionKey []byte `env:"ENCRYPTION_KEY" options:"file"`
Port string `env:"PORT"`
Host string `env:"HOST" options:"toLower"`
UnixSocket string `env:"UNIX_SOCKET"`
UnixSocketMode string `env:"UNIX_SOCKET_MODE"`
LocalIPv6Ranges string `env:"LOCAL_IPV6_RANGES"`
UiConfigDisabled bool `env:"UI_CONFIG_DISABLED"`
MetricsEnabled bool `env:"METRICS_ENABLED"`
TracingEnabled bool `env:"TRACING_ENABLED"`
TrustProxy bool `env:"TRUST_PROXY"`
AnalyticsDisabled bool `env:"ANALYTICS_DISABLED"`
AllowDowngrade bool `env:"ALLOW_DOWNGRADE"`
InternalAppURL string `env:"INTERNAL_APP_URL"`
MaxMindLicenseKey string `env:"MAXMIND_LICENSE_KEY" options:"file"`
GeoLiteDBPath string `env:"GEOLITE_DB_PATH"`
GeoLiteDBUrl string `env:"GEOLITE_DB_URL"`
FileBackend string `env:"FILE_BACKEND" options:"toLower"`
UploadPath string `env:"UPLOAD_PATH"`
S3Bucket string `env:"S3_BUCKET"`
S3Region string `env:"S3_REGION"`
S3Endpoint string `env:"S3_ENDPOINT"`
S3AccessKeyID string `env:"S3_ACCESS_KEY_ID"`
S3SecretAccessKey string `env:"S3_SECRET_ACCESS_KEY"`
S3ForcePathStyle bool `env:"S3_FORCE_PATH_STYLE"`
S3DisableDefaultIntegrityChecks bool `env:"S3_DISABLE_DEFAULT_INTEGRITY_CHECKS"`
AppEnv AppEnv `env:"APP_ENV" options:"toLower"`
LogLevel string `env:"LOG_LEVEL" options:"toLower"`
AppURL string `env:"APP_URL" options:"toLower,trimTrailingSlash"`
DbProvider DbProvider `env:"DB_PROVIDER" options:"toLower"`
DbConnectionString string `env:"DB_CONNECTION_STRING" options:"file"`
FileBackend string `env:"FILE_BACKEND" options:"toLower"`
UploadPath string `env:"UPLOAD_PATH"`
S3Bucket string `env:"S3_BUCKET"`
S3Region string `env:"S3_REGION"`
S3Endpoint string `env:"S3_ENDPOINT"`
S3AccessKeyID string `env:"S3_ACCESS_KEY_ID"`
S3SecretAccessKey string `env:"S3_SECRET_ACCESS_KEY"`
S3ForcePathStyle bool `env:"S3_FORCE_PATH_STYLE"`
S3DisableDefaultIntegrityChecks bool `env:"S3_DISABLE_DEFAULT_INTEGRITY_CHECKS"`
KeysPath string `env:"KEYS_PATH"`
KeysStorage string `env:"KEYS_STORAGE"`
EncryptionKey []byte `env:"ENCRYPTION_KEY" options:"file"`
Port string `env:"PORT"`
Host string `env:"HOST" options:"toLower"`
UnixSocket string `env:"UNIX_SOCKET"`
UnixSocketMode string `env:"UNIX_SOCKET_MODE"`
MaxMindLicenseKey string `env:"MAXMIND_LICENSE_KEY" options:"file"`
GeoLiteDBPath string `env:"GEOLITE_DB_PATH"`
GeoLiteDBUrl string `env:"GEOLITE_DB_URL"`
LocalIPv6Ranges string `env:"LOCAL_IPV6_RANGES"`
UiConfigDisabled bool `env:"UI_CONFIG_DISABLED"`
MetricsEnabled bool `env:"METRICS_ENABLED"`
TracingEnabled bool `env:"TRACING_ENABLED"`
LogJSON bool `env:"LOG_JSON"`
TrustProxy bool `env:"TRUST_PROXY"`
AnalyticsDisabled bool `env:"ANALYTICS_DISABLED"`
AllowDowngrade bool `env:"ALLOW_DOWNGRADE"`
InternalAppURL string `env:"INTERNAL_APP_URL"`
}
var EnvConfig = defaultConfig()
@@ -90,6 +89,7 @@ func defaultConfig() EnvConfigSchema {
LogLevel: "info",
DbProvider: "sqlite",
FileBackend: "filesystem",
KeysPath: "data/keys",
AppURL: AppUrl,
Port: "1411",
Host: "0.0.0.0",
@@ -117,28 +117,32 @@ func parseEnvConfig() error {
return fmt.Errorf("error preparing env config: %w", err)
}
err = validateEnvConfig(&EnvConfig)
if err != nil {
return err
}
return nil
}
// ValidateEnvConfig checks the EnvConfig for required fields and valid values
func ValidateEnvConfig(config *EnvConfigSchema) error {
// validateEnvConfig checks the EnvConfig for required fields and valid values
func validateEnvConfig(config *EnvConfigSchema) error {
if _, err := sloggin.ParseLevel(config.LogLevel); err != nil {
return errors.New("invalid LOG_LEVEL value. Must be 'debug', 'info', 'warn' or 'error'")
}
if len(config.EncryptionKey) < 16 {
return errors.New("ENCRYPTION_KEY must be at least 16 bytes long")
}
switch {
case config.DbConnectionString == "":
config.DbProvider = DbProviderSqlite
config.DbConnectionString = defaultSqliteConnString
case strings.HasPrefix(config.DbConnectionString, "postgres://") || strings.HasPrefix(config.DbConnectionString, "postgresql://"):
config.DbProvider = DbProviderPostgres
switch config.DbProvider {
case DbProviderSqlite:
if config.DbConnectionString == "" {
config.DbConnectionString = defaultSqliteConnString
}
case DbProviderPostgres:
if config.DbConnectionString == "" {
return errors.New("missing required env var 'DB_CONNECTION_STRING' for Postgres database")
}
default:
config.DbProvider = DbProviderSqlite
return errors.New("invalid DB_PROVIDER value. Must be 'sqlite' or 'postgres'")
}
parsedAppUrl, err := url.Parse(config.AppURL)
@@ -162,8 +166,27 @@ func ValidateEnvConfig(config *EnvConfigSchema) error {
}
}
switch config.KeysStorage {
// KeysStorage defaults to "file" if empty
case "":
config.KeysStorage = "file"
case "database":
if config.EncryptionKey == nil {
return errors.New("ENCRYPTION_KEY must be non-empty when KEYS_STORAGE is database")
}
case "file":
// All good, these are valid values
default:
return fmt.Errorf("invalid value for KEYS_STORAGE: %s", config.KeysStorage)
}
switch config.FileBackend {
case "s3", "database":
case "s3":
if config.KeysStorage == "file" {
return errors.New("KEYS_STORAGE cannot be 'file' when FILE_BACKEND is 's3'")
}
case "database":
// All good, these are valid values
case "", "filesystem":
if config.UploadPath == "" {
config.UploadPath = defaultFsUploadPath

View File

@@ -8,20 +8,6 @@ import (
"github.com/stretchr/testify/require"
)
func parseAndValidateEnvConfig(t *testing.T) error {
t.Helper()
if _, exists := os.LookupEnv("ENCRYPTION_KEY"); !exists {
t.Setenv("ENCRYPTION_KEY", "0123456789abcdef")
}
if err := parseEnvConfig(); err != nil {
return err
}
return ValidateEnvConfig(&EnvConfig)
}
func TestParseEnvConfig(t *testing.T) {
// Store original config to restore later
originalConfig := EnvConfig
@@ -31,10 +17,11 @@ func TestParseEnvConfig(t *testing.T) {
t.Run("should parse valid SQLite config correctly", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "SQLITE") // should be lowercased automatically
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
t.Setenv("APP_URL", "HTTP://LOCALHOST:3000")
err := parseAndValidateEnvConfig(t)
err := parseEnvConfig()
require.NoError(t, err)
assert.Equal(t, DbProviderSqlite, EnvConfig.DbProvider)
assert.Equal(t, "http://localhost:3000", EnvConfig.AppURL)
@@ -42,76 +29,147 @@ func TestParseEnvConfig(t *testing.T) {
t.Run("should parse valid Postgres config correctly", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "POSTGRES")
t.Setenv("DB_CONNECTION_STRING", "postgres://user:pass@localhost/db")
t.Setenv("APP_URL", "https://example.com")
err := parseAndValidateEnvConfig(t)
err := parseEnvConfig()
require.NoError(t, err)
assert.Equal(t, DbProviderPostgres, EnvConfig.DbProvider)
})
t.Run("should fail when ENCRYPTION_KEY is too short", func(t *testing.T) {
t.Run("should fail with invalid DB_PROVIDER", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
t.Setenv("DB_PROVIDER", "invalid")
t.Setenv("DB_CONNECTION_STRING", "test")
t.Setenv("APP_URL", "http://localhost:3000")
t.Setenv("ENCRYPTION_KEY", "short")
err := parseAndValidateEnvConfig(t)
err := parseEnvConfig()
require.Error(t, err)
assert.ErrorContains(t, err, "ENCRYPTION_KEY must be at least 16 bytes long")
assert.ErrorContains(t, err, "invalid DB_PROVIDER value")
})
t.Run("should set default SQLite connection string when DB_CONNECTION_STRING is empty", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "sqlite")
t.Setenv("APP_URL", "http://localhost:3000")
err := parseAndValidateEnvConfig(t)
err := parseEnvConfig()
require.NoError(t, err)
assert.Equal(t, defaultSqliteConnString, EnvConfig.DbConnectionString)
})
t.Run("should fail when Postgres DB_CONNECTION_STRING is missing", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "postgres")
t.Setenv("APP_URL", "http://localhost:3000")
err := parseEnvConfig()
require.Error(t, err)
assert.ErrorContains(t, err, "missing required env var 'DB_CONNECTION_STRING' for Postgres")
})
t.Run("should fail with invalid APP_URL", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "sqlite")
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
t.Setenv("APP_URL", "€://not-a-valid-url")
err := parseAndValidateEnvConfig(t)
err := parseEnvConfig()
require.Error(t, err)
assert.ErrorContains(t, err, "APP_URL is not a valid URL")
})
t.Run("should fail when APP_URL contains path", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "sqlite")
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
t.Setenv("APP_URL", "http://localhost:3000/path")
err := parseAndValidateEnvConfig(t)
err := parseEnvConfig()
require.Error(t, err)
assert.ErrorContains(t, err, "APP_URL must not contain a path")
})
t.Run("should fail with invalid INTERNAL_APP_URL", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "sqlite")
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
t.Setenv("INTERNAL_APP_URL", "€://not-a-valid-url")
err := parseAndValidateEnvConfig(t)
err := parseEnvConfig()
require.Error(t, err)
assert.ErrorContains(t, err, "INTERNAL_APP_URL is not a valid URL")
})
t.Run("should fail when INTERNAL_APP_URL contains path", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "sqlite")
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
t.Setenv("INTERNAL_APP_URL", "http://localhost:3000/path")
err := parseAndValidateEnvConfig(t)
err := parseEnvConfig()
require.Error(t, err)
assert.ErrorContains(t, err, "INTERNAL_APP_URL must not contain a path")
})
t.Run("should default KEYS_STORAGE to 'file' when empty", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "sqlite")
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
t.Setenv("APP_URL", "http://localhost:3000")
err := parseEnvConfig()
require.NoError(t, err)
assert.Equal(t, "file", EnvConfig.KeysStorage)
})
t.Run("should fail when KEYS_STORAGE is 'database' but no encryption key", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "sqlite")
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
t.Setenv("APP_URL", "http://localhost:3000")
t.Setenv("KEYS_STORAGE", "database")
err := parseEnvConfig()
require.Error(t, err)
assert.ErrorContains(t, err, "ENCRYPTION_KEY must be non-empty when KEYS_STORAGE is database")
})
t.Run("should accept valid KEYS_STORAGE values", func(t *testing.T) {
validStorageTypes := []string{"file", "database"}
for _, storage := range validStorageTypes {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "sqlite")
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
t.Setenv("APP_URL", "http://localhost:3000")
t.Setenv("KEYS_STORAGE", storage)
if storage == "database" {
t.Setenv("ENCRYPTION_KEY", "test-key")
}
err := parseEnvConfig()
require.NoError(t, err)
assert.Equal(t, storage, EnvConfig.KeysStorage)
}
})
t.Run("should fail with invalid KEYS_STORAGE value", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "sqlite")
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
t.Setenv("APP_URL", "http://localhost:3000")
t.Setenv("KEYS_STORAGE", "invalid")
err := parseEnvConfig()
require.Error(t, err)
assert.ErrorContains(t, err, "invalid value for KEYS_STORAGE")
})
t.Run("should parse boolean environment variables correctly", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "sqlite")
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
t.Setenv("APP_URL", "http://localhost:3000")
t.Setenv("UI_CONFIG_DISABLED", "true")
@@ -120,7 +178,7 @@ func TestParseEnvConfig(t *testing.T) {
t.Setenv("TRUST_PROXY", "true")
t.Setenv("ANALYTICS_DISABLED", "false")
err := parseAndValidateEnvConfig(t)
err := parseEnvConfig()
require.NoError(t, err)
assert.True(t, EnvConfig.UiConfigDisabled)
assert.True(t, EnvConfig.MetricsEnabled)
@@ -131,17 +189,19 @@ func TestParseEnvConfig(t *testing.T) {
t.Run("should parse string environment variables correctly", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "postgres")
t.Setenv("DB_CONNECTION_STRING", "postgres://test")
t.Setenv("APP_URL", "https://prod.example.com")
t.Setenv("APP_ENV", "PRODUCTION")
t.Setenv("UPLOAD_PATH", "/custom/uploads")
t.Setenv("KEYS_PATH", "/custom/keys")
t.Setenv("PORT", "8080")
t.Setenv("HOST", "LOCALHOST")
t.Setenv("UNIX_SOCKET", "/tmp/app.sock")
t.Setenv("MAXMIND_LICENSE_KEY", "test-license")
t.Setenv("GEOLITE_DB_PATH", "/custom/geolite.mmdb")
err := parseAndValidateEnvConfig(t)
err := parseEnvConfig()
require.NoError(t, err)
assert.Equal(t, AppEnvProduction, EnvConfig.AppEnv) // lowercased
assert.Equal(t, "/custom/uploads", EnvConfig.UploadPath)
@@ -151,24 +211,38 @@ func TestParseEnvConfig(t *testing.T) {
t.Run("should normalize file backend and default upload path", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "sqlite")
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
t.Setenv("APP_URL", "http://localhost:3000")
t.Setenv("FILE_BACKEND", "FILESYSTEM")
t.Setenv("UPLOAD_PATH", "")
err := parseAndValidateEnvConfig(t)
err := parseEnvConfig()
require.NoError(t, err)
assert.Equal(t, "filesystem", EnvConfig.FileBackend)
assert.Equal(t, defaultFsUploadPath, EnvConfig.UploadPath)
})
t.Run("should fail when FILE_BACKEND is s3 but keys are stored on filesystem", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "sqlite")
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
t.Setenv("APP_URL", "http://localhost:3000")
t.Setenv("FILE_BACKEND", "s3")
err := parseEnvConfig()
require.Error(t, err)
assert.ErrorContains(t, err, "KEYS_STORAGE cannot be 'file' when FILE_BACKEND is 's3'")
})
t.Run("should fail with invalid FILE_BACKEND value", func(t *testing.T) {
EnvConfig = defaultConfig()
t.Setenv("DB_PROVIDER", "sqlite")
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
t.Setenv("APP_URL", "http://localhost:3000")
t.Setenv("FILE_BACKEND", "invalid")
err := parseAndValidateEnvConfig(t)
err := parseEnvConfig()
require.Error(t, err)
assert.ErrorContains(t, err, "invalid FILE_BACKEND value")
})

View File

@@ -38,13 +38,6 @@ type TokenInvalidOrExpiredError struct{}
func (e *TokenInvalidOrExpiredError) Error() string { return "token is invalid or expired" }
func (e *TokenInvalidOrExpiredError) HttpStatusCode() int { return 400 }
type DeviceCodeInvalid struct{}
func (e *DeviceCodeInvalid) Error() string {
return "one time access code must be used on the device it was generated for"
}
func (e *DeviceCodeInvalid) HttpStatusCode() int { return 400 }
type TokenInvalidError struct{}
func (e *TokenInvalidError) Error() string {

View File

@@ -40,11 +40,6 @@ func (tc *TestController) resetAndSeedHandler(c *gin.Context) {
return
}
if err := tc.TestService.ResetLock(c.Request.Context()); err != nil {
_ = c.Error(err)
return
}
if err := tc.TestService.ResetApplicationImages(c.Request.Context()); err != nil {
_ = c.Error(err)
return
@@ -74,6 +69,8 @@ func (tc *TestController) resetAndSeedHandler(c *gin.Context) {
}
}
tc.TestService.SetJWTKeys()
c.Status(http.StatusNoContent)
}

View File

@@ -391,13 +391,12 @@ func (uc *UserController) RequestOneTimeAccessEmailAsUnauthenticatedUserHandler(
return
}
deviceToken, err := uc.userService.RequestOneTimeAccessEmailAsUnauthenticatedUser(c.Request.Context(), input.Email, input.RedirectPath)
err := uc.userService.RequestOneTimeAccessEmailAsUnauthenticatedUser(c.Request.Context(), input.Email, input.RedirectPath)
if err != nil {
_ = c.Error(err)
return
}
cookie.AddDeviceTokenCookie(c, deviceToken)
c.Status(http.StatusNoContent)
}
@@ -441,8 +440,7 @@ func (uc *UserController) RequestOneTimeAccessEmailAsAdminHandler(c *gin.Context
// @Success 200 {object} dto.UserDto
// @Router /api/one-time-access-token/{token} [post]
func (uc *UserController) exchangeOneTimeAccessTokenHandler(c *gin.Context) {
deviceToken, _ := c.Cookie(cookie.DeviceTokenCookieName)
user, token, err := uc.userService.ExchangeOneTimeAccessToken(c.Request.Context(), c.Param("token"), deviceToken, c.ClientIP(), c.Request.UserAgent())
user, token, err := uc.userService.ExchangeOneTimeAccessToken(c.Request.Context(), c.Param("token"), c.ClientIP(), c.Request.UserAgent())
if err != nil {
_ = c.Error(err)
return

View File

@@ -47,7 +47,7 @@ type AppConfigUpdateDto struct {
LdapAttributeGroupMember string `json:"ldapAttributeGroupMember"`
LdapAttributeGroupUniqueIdentifier string `json:"ldapAttributeGroupUniqueIdentifier"`
LdapAttributeGroupName string `json:"ldapAttributeGroupName"`
LdapAdminGroupName string `json:"ldapAdminGroupName"`
LdapAttributeAdminGroup string `json:"ldapAttributeAdminGroup"`
LdapSoftDeleteUsers string `json:"ldapSoftDeleteUsers"`
EmailOneTimeAccessAsAdminEnabled string `json:"emailOneTimeAccessAsAdminEnabled" binding:"required"`
EmailOneTimeAccessAsUnauthenticatedEnabled string `json:"emailOneTimeAccessAsUnauthenticatedEnabled" binding:"required"`

View File

@@ -77,7 +77,7 @@ type AppConfig struct {
LdapAttributeGroupMember AppConfigVariable `key:"ldapAttributeGroupMember"`
LdapAttributeGroupUniqueIdentifier AppConfigVariable `key:"ldapAttributeGroupUniqueIdentifier"`
LdapAttributeGroupName AppConfigVariable `key:"ldapAttributeGroupName"`
LdapAdminGroupName AppConfigVariable `key:"ldapAdminGroupName"`
LdapAttributeAdminGroup AppConfigVariable `key:"ldapAttributeAdminGroup"`
LdapSoftDeleteUsers AppConfigVariable `key:"ldapSoftDeleteUsers"`
}

View File

@@ -11,15 +11,6 @@ import (
// DateTime custom type for time.Time to store date as unix timestamp for sqlite and as date for postgres
type DateTime time.Time //nolint:recvcheck
func DateTimeFromString(str string) (DateTime, error) {
t, err := time.Parse(time.RFC3339Nano, str)
if err != nil {
return DateTime{}, fmt.Errorf("failed to parse date string: %w", err)
}
return DateTime(t), nil
}
func (date *DateTime) Scan(value any) (err error) {
switch v := value.(type) {
case time.Time:

View File

@@ -87,9 +87,8 @@ func (u User) Initials() string {
type OneTimeAccessToken struct {
Base
Token string
DeviceToken *string
ExpiresAt datatype.DateTime
Token string
ExpiresAt datatype.DateTime
UserID string
User User

View File

@@ -102,7 +102,7 @@ func (s *AppConfigService) getDefaultDbConfig() *model.AppConfig {
LdapAttributeGroupMember: model.AppConfigVariable{Value: "member"},
LdapAttributeGroupUniqueIdentifier: model.AppConfigVariable{},
LdapAttributeGroupName: model.AppConfigVariable{},
LdapAdminGroupName: model.AppConfigVariable{},
LdapAttributeAdminGroup: model.AppConfigVariable{},
LdapSoftDeleteUsers: model.AppConfigVariable{Value: "true"},
}
}

View File

@@ -1,296 +0,0 @@
package service
import (
"context"
"encoding/json"
"errors"
"fmt"
"log/slog"
"os"
"time"
"github.com/google/uuid"
"github.com/pocket-id/pocket-id/backend/internal/model"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
var (
ErrLockUnavailable = errors.New("lock is already held by another process")
ErrLockLost = errors.New("lock ownership lost")
)
const (
ttl = 30 * time.Second
renewInterval = 20 * time.Second
renewRetries = 3
lockKey = "application_lock"
)
type AppLockService struct {
db *gorm.DB
lockID string
processID int64
hostID string
}
func NewAppLockService(db *gorm.DB) *AppLockService {
host, err := os.Hostname()
if err != nil || host == "" {
host = "unknown-host"
}
return &AppLockService{
db: db,
processID: int64(os.Getpid()),
hostID: host,
lockID: uuid.NewString(),
}
}
type lockValue struct {
ProcessID int64 `json:"process_id"`
HostID string `json:"host_id"`
LockID string `json:"lock_id"`
ExpiresAt int64 `json:"expires_at"`
}
func (lv *lockValue) Marshal() (string, error) {
data, err := json.Marshal(lv)
if err != nil {
return "", err
}
return string(data), nil
}
func (lv *lockValue) Unmarshal(raw string) error {
if raw == "" {
return nil
}
return json.Unmarshal([]byte(raw), lv)
}
// Acquire obtains the lock. When force is true, the lock is stolen from any existing owner.
// If the lock is forcefully acquired, it blocks until the previous lock has expired.
func (s *AppLockService) Acquire(ctx context.Context, force bool) (waitUntil time.Time, err error) {
tx := s.db.Begin()
defer func() {
tx.Rollback()
}()
var prevLockRaw string
err = tx.
WithContext(ctx).
Model(&model.KV{}).
Where("key = ?", lockKey).
Clauses(clause.Locking{Strength: "UPDATE"}).
Select("value").
Scan(&prevLockRaw).
Error
if err != nil {
return time.Time{}, fmt.Errorf("query existing lock: %w", err)
}
var prevLock lockValue
if prevLockRaw != "" {
if err := prevLock.Unmarshal(prevLockRaw); err != nil {
return time.Time{}, fmt.Errorf("decode existing lock value: %w", err)
}
}
now := time.Now()
nowUnix := now.Unix()
value := lockValue{
ProcessID: s.processID,
HostID: s.hostID,
LockID: s.lockID,
ExpiresAt: now.Add(ttl).Unix(),
}
raw, err := value.Marshal()
if err != nil {
return time.Time{}, fmt.Errorf("encode lock value: %w", err)
}
var query string
switch s.db.Name() {
case "sqlite":
query = `
INSERT INTO kv (key, value)
VALUES (?, ?)
ON CONFLICT(key) DO UPDATE SET
value = excluded.value
WHERE (json_extract(kv.value, '$.expires_at') < ?) OR ?
`
case "postgres":
query = `
INSERT INTO kv (key, value)
VALUES ($1, $2)
ON CONFLICT(key) DO UPDATE SET
value = excluded.value
WHERE ((kv.value::json->>'expires_at')::bigint < $3) OR ($4::boolean IS TRUE)
`
default:
return time.Time{}, fmt.Errorf("unsupported database dialect: %s", s.db.Name())
}
res := tx.WithContext(ctx).Exec(query, lockKey, raw, nowUnix, force)
if res.Error != nil {
return time.Time{}, fmt.Errorf("lock acquisition failed: %w", res.Error)
}
if err := tx.Commit().Error; err != nil {
return time.Time{}, fmt.Errorf("commit lock acquisition: %w", err)
}
// If there is a lock that is not expired and force is false, no rows will be affected
if res.RowsAffected == 0 {
return time.Time{}, ErrLockUnavailable
}
if force && prevLock.ExpiresAt > nowUnix && prevLock.LockID != s.lockID {
waitUntil = time.Unix(prevLock.ExpiresAt, 0)
}
attrs := []any{
slog.Int64("process_id", s.processID),
slog.String("host_id", s.hostID),
}
if wait := time.Until(waitUntil); wait > 0 {
attrs = append(attrs, slog.Duration("wait_before_proceeding", wait))
}
slog.Info("Acquired application lock", attrs...)
return waitUntil, nil
}
// RunRenewal keeps renewing the lock until the context is canceled.
func (s *AppLockService) RunRenewal(ctx context.Context) error {
ticker := time.NewTicker(renewInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return nil
case <-ticker.C:
if err := s.renew(ctx); err != nil {
return fmt.Errorf("renew lock: %w", err)
}
}
}
}
// Release releases the lock if it is held by this process.
func (s *AppLockService) Release(ctx context.Context) error {
opCtx, cancel := context.WithTimeout(ctx, 3*time.Second)
defer cancel()
var query string
switch s.db.Name() {
case "sqlite":
query = `
DELETE FROM kv
WHERE key = ?
AND json_extract(value, '$.lock_id') = ?
`
case "postgres":
query = `
DELETE FROM kv
WHERE key = $1
AND value::json->>'lock_id' = $2
`
default:
return fmt.Errorf("unsupported database dialect: %s", s.db.Name())
}
res := s.db.WithContext(opCtx).Exec(query, lockKey, s.lockID)
if res.Error != nil {
return fmt.Errorf("release lock failed: %w", res.Error)
}
if res.RowsAffected == 0 {
slog.Warn("Application lock not held by this process, cannot release",
slog.Int64("process_id", s.processID),
slog.String("host_id", s.hostID),
)
}
slog.Info("Released application lock",
slog.Int64("process_id", s.processID),
slog.String("host_id", s.hostID),
)
return nil
}
// renew tries to renew the lock, retrying up to renewRetries times (sleeping 1s between attempts).
func (s *AppLockService) renew(ctx context.Context) error {
var lastErr error
for attempt := 1; attempt <= renewRetries; attempt++ {
now := time.Now()
nowUnix := now.Unix()
expiresAt := now.Add(ttl).Unix()
value := lockValue{
LockID: s.lockID,
ProcessID: s.processID,
HostID: s.hostID,
ExpiresAt: expiresAt,
}
raw, err := value.Marshal()
if err != nil {
return fmt.Errorf("encode lock value: %w", err)
}
var query string
switch s.db.Name() {
case "sqlite":
query = `
UPDATE kv
SET value = ?
WHERE key = ?
AND json_extract(value, '$.lock_id') = ?
AND json_extract(value, '$.expires_at') > ?
`
case "postgres":
query = `
UPDATE kv
SET value = $1
WHERE key = $2
AND value::json->>'lock_id' = $3
AND ((value::json->>'expires_at')::bigint > $4)
`
default:
return fmt.Errorf("unsupported database dialect: %s", s.db.Name())
}
opCtx, cancel := context.WithTimeout(ctx, 3*time.Second)
res := s.db.WithContext(opCtx).Exec(query, raw, lockKey, s.lockID, nowUnix)
cancel()
switch {
case res.Error != nil:
lastErr = fmt.Errorf("lock renewal failed: %w", res.Error)
case res.RowsAffected == 0:
// Must be after checking res.Error
return ErrLockLost
default:
slog.Debug("Renewed application lock",
slog.Int64("process_id", s.processID),
slog.String("host_id", s.hostID),
)
return nil
}
// Wait before next attempt or cancel if context is done
if attempt < renewRetries {
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(1 * time.Second):
}
}
}
return lastErr
}

View File

@@ -1,189 +0,0 @@
package service
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/require"
"gorm.io/gorm"
"github.com/pocket-id/pocket-id/backend/internal/model"
testutils "github.com/pocket-id/pocket-id/backend/internal/utils/testing"
)
func newTestAppLockService(t *testing.T, db *gorm.DB) *AppLockService {
t.Helper()
return &AppLockService{
db: db,
processID: 1,
hostID: "test-host",
lockID: "a13c7673-c7ae-49f1-9112-2cd2d0d4b0c1",
}
}
func insertLock(t *testing.T, db *gorm.DB, value lockValue) {
t.Helper()
raw, err := value.Marshal()
require.NoError(t, err)
err = db.Create(&model.KV{Key: lockKey, Value: &raw}).Error
require.NoError(t, err)
}
func readLockValue(t *testing.T, db *gorm.DB) lockValue {
t.Helper()
var row model.KV
err := db.Take(&row, "key = ?", lockKey).Error
require.NoError(t, err)
require.NotNil(t, row.Value)
var value lockValue
err = value.Unmarshal(*row.Value)
require.NoError(t, err)
return value
}
func TestAppLockServiceAcquire(t *testing.T) {
t.Run("creates new lock when none exists", func(t *testing.T) {
db := testutils.NewDatabaseForTest(t)
service := newTestAppLockService(t, db)
_, err := service.Acquire(context.Background(), false)
require.NoError(t, err)
stored := readLockValue(t, db)
require.Equal(t, service.processID, stored.ProcessID)
require.Equal(t, service.hostID, stored.HostID)
require.Greater(t, stored.ExpiresAt, time.Now().Unix())
})
t.Run("returns ErrLockUnavailable when lock held by another process", func(t *testing.T) {
db := testutils.NewDatabaseForTest(t)
service := newTestAppLockService(t, db)
existing := lockValue{
ProcessID: 99,
HostID: "other-host",
ExpiresAt: time.Now().Add(ttl).Unix(),
}
insertLock(t, db, existing)
_, err := service.Acquire(context.Background(), false)
require.ErrorIs(t, err, ErrLockUnavailable)
current := readLockValue(t, db)
require.Equal(t, existing, current)
})
t.Run("force acquisition steals lock", func(t *testing.T) {
db := testutils.NewDatabaseForTest(t)
service := newTestAppLockService(t, db)
insertLock(t, db, lockValue{
ProcessID: 99,
HostID: "other-host",
ExpiresAt: time.Now().Unix(),
})
_, err := service.Acquire(context.Background(), true)
require.NoError(t, err)
stored := readLockValue(t, db)
require.Equal(t, service.processID, stored.ProcessID)
require.Equal(t, service.hostID, stored.HostID)
require.Greater(t, stored.ExpiresAt, time.Now().Unix())
})
}
func TestAppLockServiceRelease(t *testing.T) {
t.Run("removes owned lock", func(t *testing.T) {
db := testutils.NewDatabaseForTest(t)
service := newTestAppLockService(t, db)
_, err := service.Acquire(context.Background(), false)
require.NoError(t, err)
err = service.Release(context.Background())
require.NoError(t, err)
var row model.KV
err = db.Take(&row, "key = ?", lockKey).Error
require.ErrorIs(t, err, gorm.ErrRecordNotFound)
})
t.Run("ignores lock held by another owner", func(t *testing.T) {
db := testutils.NewDatabaseForTest(t)
service := newTestAppLockService(t, db)
existing := lockValue{
ProcessID: 2,
HostID: "other-host",
ExpiresAt: time.Now().Add(ttl).Unix(),
}
insertLock(t, db, existing)
err := service.Release(context.Background())
require.NoError(t, err)
stored := readLockValue(t, db)
require.Equal(t, existing, stored)
})
}
func TestAppLockServiceRenew(t *testing.T) {
t.Run("extends expiration when lock is still owned", func(t *testing.T) {
db := testutils.NewDatabaseForTest(t)
service := newTestAppLockService(t, db)
_, err := service.Acquire(context.Background(), false)
require.NoError(t, err)
before := readLockValue(t, db)
err = service.renew(context.Background())
require.NoError(t, err)
after := readLockValue(t, db)
require.Equal(t, service.processID, after.ProcessID)
require.Equal(t, service.hostID, after.HostID)
require.GreaterOrEqual(t, after.ExpiresAt, before.ExpiresAt)
})
t.Run("returns ErrLockLost when lock is missing", func(t *testing.T) {
db := testutils.NewDatabaseForTest(t)
service := newTestAppLockService(t, db)
err := service.renew(context.Background())
require.ErrorIs(t, err, ErrLockLost)
})
t.Run("returns ErrLockLost when ownership changed", func(t *testing.T) {
db := testutils.NewDatabaseForTest(t)
service := newTestAppLockService(t, db)
_, err := service.Acquire(context.Background(), false)
require.NoError(t, err)
// Simulate a different process taking the lock.
newOwner := lockValue{
ProcessID: 9,
HostID: "stolen-host",
ExpiresAt: time.Now().Add(ttl).Unix(),
}
raw, marshalErr := newOwner.Marshal()
require.NoError(t, marshalErr)
updateErr := db.Model(&model.KV{}).
Where("key = ?", lockKey).
Update("value", raw).Error
require.NoError(t, updateErr)
err = service.renew(context.Background())
require.ErrorIs(t, err, ErrLockLost)
})
}

View File

@@ -7,12 +7,14 @@ import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"encoding/base64"
"fmt"
"log/slog"
"path"
"time"
"github.com/fxamacker/cbor/v2"
"github.com/go-webauthn/webauthn/protocol"
"github.com/lestrrat-go/jwx/v3/jwa"
"github.com/lestrrat-go/jwx/v3/jwk"
@@ -34,17 +36,15 @@ type TestService struct {
appConfigService *AppConfigService
ldapService *LdapService
fileStorage storage.FileStorage
appLockService *AppLockService
externalIdPKey jwk.Key
}
func NewTestService(db *gorm.DB, appConfigService *AppConfigService, jwtService *JwtService, ldapService *LdapService, appLockService *AppLockService, fileStorage storage.FileStorage) (*TestService, error) {
func NewTestService(db *gorm.DB, appConfigService *AppConfigService, jwtService *JwtService, ldapService *LdapService, fileStorage storage.FileStorage) (*TestService, error) {
s := &TestService{
db: db,
appConfigService: appConfigService,
jwtService: jwtService,
ldapService: ldapService,
appLockService: appLockService,
fileStorage: fileStorage,
}
err := s.initExternalIdP()
@@ -288,8 +288,8 @@ func (s *TestService) SeedDatabase(baseURL string) error {
// openssl genpkey -algorithm EC -pkeyopt ec_paramgen_curve:P-256 | \
// openssl pkcs8 -topk8 -nocrypt | tee >(openssl pkey -pubout)
publicKeyPasskey1, _ := base64.StdEncoding.DecodeString("pQMmIAEhWCDBw6jkpXXr0pHrtAQetxiR5cTcILG/YGDCdKrhVhNDHCJYIIu12YrF6B7Frwl3AUqEpdrYEwj3Fo3XkGgvrBIJEUmGAQI=")
publicKeyPasskey2, _ := base64.StdEncoding.DecodeString("pSJYIPmc+FlEB0neERqqscxKckGF8yq1AYrANiloshAUAouHAQIDJiABIVggj4qA0PrZzg8Co1C27nyUbzrp8Ewjr7eOlGI2LfrzmbI=")
publicKeyPasskey1, _ := s.getCborPublicKey("MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEwcOo5KV169KR67QEHrcYkeXE3CCxv2BgwnSq4VYTQxyLtdmKxegexa8JdwFKhKXa2BMI9xaN15BoL6wSCRFJhg==")
publicKeyPasskey2, _ := s.getCborPublicKey("MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEj4qA0PrZzg8Co1C27nyUbzrp8Ewjr7eOlGI2LfrzmbL5nPhZRAdJ3hEaqrHMSnJBhfMqtQGKwDYpaLIQFAKLhw==")
webauthnCredentials := []model.WebauthnCredential{
{
Name: "Passkey 1",
@@ -318,10 +318,6 @@ func (s *TestService) SeedDatabase(baseURL string) error {
Challenge: "challenge",
ExpiresAt: datatype.DateTime(time.Now().Add(1 * time.Hour)),
UserVerification: "preferred",
CredentialParams: model.CredentialParameters{
{Type: "public-key", Algorithm: -7},
{Type: "public-key", Algorithm: -257},
},
}
if err := tx.Create(&webauthnSession).Error; err != nil {
return err
@@ -331,10 +327,9 @@ func (s *TestService) SeedDatabase(baseURL string) error {
Base: model.Base{
ID: "5f1fa856-c164-4295-961e-175a0d22d725",
},
Name: "Test API Key",
Key: "6c34966f57ef2bb7857649aff0e7ab3ad67af93c846342ced3f5a07be8706c20",
UserID: users[0].ID,
ExpiresAt: datatype.DateTime(time.Now().Add(30 * 24 * time.Hour)),
Name: "Test API Key",
Key: "6c34966f57ef2bb7857649aff0e7ab3ad67af93c846342ced3f5a07be8706c20",
UserID: users[0].ID,
}
if err := tx.Create(&apiKey).Error; err != nil {
return err
@@ -384,20 +379,6 @@ func (s *TestService) SeedDatabase(baseURL string) error {
}
}
keyValues := []model.KV{
{
Key: jwkutils.PrivateKeyDBKey,
// {"alg":"RS256","d":"mvMDWSdPPvcum0c0iEHE2gbqtV2NKMmLwrl9E6K7g8lTV95SePLnW_bwyMPV7EGp7PQk3l17I5XRhFjze7GqTnFIOgKzMianPs7jv2ELtBMGK0xOPATgu1iGb70xZ6vcvuEfRyY3dJ0zr4jpUdVuXwKmx9rK4IdZn2dFCKfvSuspqIpz11RhF1ALrqDLkxGVv7ZwNh0_VhJZU9hcjG5l6xc7rQEKpPRkZp0IdjkGS8Z0FskoVaiRIWAbZuiVFB9WCW8k1czC4HQTPLpII01bUQx2ludbm0UlXRgVU9ptUUbU7GAImQqTOW8LfPGklEvcgzlIlR_oqw4P9yBxLi-yMQ","dp":"pvNCSnnhbo8Igw9psPR-DicxFnkXlu_ix4gpy6efTrxA-z1VDFDioJ814vKQNioYDzpyAP1gfMPhRkvG_q0hRZsJah3Sb9dfA-WkhSWY7lURQP4yIBTMU0PF_rEATuS7lRciYk1SOx5fqXZd3m_LP0vpBC4Ujlq6NAq6CIjCnms","dq":"TtUVGCCkPNgfOLmkYXu7dxxUCV5kB01-xAEK2OY0n0pG8vfDophH4_D_ZC7nvJ8J9uDhs_3JStexq1lIvaWtG99RNTChIEDzpdn6GH9yaVcb_eB4uJjrNm64FhF8PGCCwxA-xMCZMaARKwhMB2_IOMkxUbWboL3gnhJ2rDO_QO0","e":"AQAB","kid":"8uHDw3M6rf8","kty":"RSA","n":"yaeEL0VKoPBXIAaWXsUgmu05lAvEIIdJn0FX9lHh4JE5UY9B83C5sCNdhs9iSWzpeP11EVjWp8i3Yv2CF7c7u50BXnVBGtxpZpFC-585UXacoJ0chUmarL9GRFJcM1nPHBTFu68aRrn1rIKNHUkNaaxFo0NFGl_4EDDTO8HwawTjwkPoQlRzeByhlvGPVvwgB3Fn93B8QJ_cZhXKxJvjjrC_8Pk76heC_ntEMru71Ix77BoC3j2TuyiN7m9RNBW8BU5q6lKoIdvIeZfTFLzi37iufyfvMrJTixp9zhNB1NxlLCeOZl2MXegtiGqd2H3cbAyqoOiv9ihUWTfXj7SxJw","p":"_Yylc9e07CKdqNRD2EosMC2mrhrEa9j5oY_l00Qyy4-jmCA59Q9viyqvveRo0U7cRvFA5BWgWN6GGLh1DG3X-QBqVr0dnk3uzbobb55RYUXyPLuBZI2q6w2oasbiDwPdY7KpkVv_H-bpITQlyDvO8hhucA6rUV7F6KTQVz8M3Ms","q":"y5p3hch-7jJ21TkAhp_Vk1fLCAuD4tbErwQs2of9ja8sB4iJOs5Wn6HD3P7Mc8Plye7qaLHvzc8I5g0tPKWvC0DPd_FLPXiWwMVAzee3NUX_oGeJNOQp11y1w_KqdO9qZqHSEPZ3NcFL_SZMFgggxhM1uzRiPzsVN0lnD_6prZU","qi":"2Grt6uXHm61ji3xSdkBWNtUnj19vS1-7rFJp5SoYztVQVThf_W52BAiXKBdYZDRVoItC_VS2NvAOjeJjhYO_xQ_q3hK7MdtuXfEPpLnyXKkmWo3lrJ26wbeF6l05LexCkI7ShsOuSt-dsyaTJTszuKDIA6YOfWvfo3aVZmlWRaI","use":"sig"}
Value: utils.Ptr("7d/5hl7diJ2rnFL14hEAQf9tzpu29aqXQ8jpJ2iqqKUNFZpdOkEpud0CmRv4H3r8yyk2u/Gqqj9klSy58DJkYXGF5PAYgLyoBIb7L3JXWRbxg4cQ3QJCug13l2OTmpAKoVc+rmX8c3j3h1sNqyJ+7Ql5sS0jSeyiYgIsFNCdnK5alBDyvtcpe/QDpklmP4JCeVpvmf2rLGplk3g5UO5ydJ8UiDXxfDmi+gF6NKJvrGnnah8Ar3G/x88z+tTJtp0DIQFwxXwUM2XZqzEVGm8K2r0w5o9/Keh6bBBaiuH2C78ZOaijGV3DovhR+e9J0cYUYGwT42MZMx9fSWQ/lvWGGnf+Uq3MXJfjWSREfhkp8KTQwR9F7+dnVJWswOEk7jPR8I7hCWTMxJyvaFX3wgAXIVmhrgXZQQbYOqTt56IoqUl0xOJku8dA8opg2UcLlmmuOh6+hfkXKsiiS/H/9c1BVIGj1fCOiT6IePh4wKKSTbwJnPD5EKmdJpgTsUpjcDnXQKY4ReO0UpdRdKxwRDDLeQuG6j+ljGxR9GPudCU9Nmci6rFVI6n5LWYkQxBA1O73RpmXRZPDzntDfpXMEonkmSvOoxaCK2Id7CRKMdqvR0kEouwnhk5WSFtsfi3sA0pkXzPFxwZeWM8vFtbffZOZzXaOhxCOfcj1NClZohlZhyc4jvkxmrpY7PSaAzih0AmHI7y0LYFi6fZu/K4EheVa1+KF55nWZ8ARikHMWKAKkyExkTak7xyN884TDmzURRaPlQg4jzQte5WMNjAG/hlHibdMBNvgwiYd49ZxteJ8ABdbiXVRl+2JGbdjl2ubpQZwOn7bJKlqO56bIwsZ+e4+pXsuOGdBahkHrUjtMEmH3DZbGc6CJLbcmdhdpApLQRRcLAazxJhzAwJ47FRYsHsj57LnYNvmcKdIxw8rxCdLUuzz95uw0T3ankEO5J9sjem+HMEuKdwXK1UcuOn2rjR8Sd/BuvQmeso27dFbPXqXYNS90Ml45YyTvcKSiopD181oZR703TFUSpR7dsiqROMr+p/2jN9h6a8WbQ8xpksyclaQByY/M77AssbXnG6wfhRsntNIINCZLbBnjXOyz6ZHIC5K4tSTdcnWaiYPeRPQmnw9UUvHAcNU2yMWsy0eU377yDS0WstTxOdQutTdkczl8kv5Lo26JiEK7mSIuRK19ffF9Zz8FG8+eKv5zdyIPjyQRDYBysUoDv5huKe2eoxJu/MWS2Pql/ZtUGeD6Ozm3mCvh0vQ9ceagBkY6Ocm3du0ziAKP29Ri0mjg4DizVorbLzsh+EQH/s2Pi9MnjUZDlEmuLl2Xfp7/w4j/8u0N0tVR70VDFuGdKpTjFY3vS8EJrPtyMTM51x1D9rb8gIql8aR/rJw4YF+huxg1mv5n6+tGVqg5msbPmF12eJijP4lkmaRwIpLW5pJTtaDkUj7uOeu1mm4k+Dt5nh0/0jPHzrv6bcTCcbV7UjMHDoTXXqEpFAAJ66rHR7zdAJu+YKsnTIZyLmOpcowq7LL8G9qTvV0OSpyQWUIavRSgbDHFqEqRs+JU94jAzkq8nCY5MTd9m5sIv9InfdT3k+pwpsE/FKge8nghFLtbUrafGkzTky8SE2druvVcIvbfXMfLIKRUYjJgnWc0gQzF5J6pzXM7D2r/RG6JDzASqjlbURq6v9bhNerlOVdMujWKEEVcKWIzlbt4RkihRjM8AUqIZQOyicGQ+4yfIjAHw5viuABONYs3OIWULnFqJxdvS9rNKhfxSjIq9cfqyzevq2xrRoMXEonobh6M3bD2Vang8OAeVeD1OXWPERi4pepCYFS9RJ/Xa/UWxptsqSNuGcb3fAzQSmLpXLGdWRoKXvSe7EYgc0bGcLOjSTu5RURKo+EF9i4KT9EJauf6VXw5dTf/CCIJRXE1bWzXhSCFYntohYhX2ldOCDYpi/jFBC6Vtkw0ud3/xq8Nmhd5gUk+SpngByCZH3Pm3H+jvlbMpiqkDkm1v74hDX13Xhrcw2eWyuqKBVoRCCniUvwpYNbGvBfjC6Hcizv0Aybciwj+4nybt5EPoEUm6S6Gs7fG7QpPdvrzpAxX70MlmdkF/gwyuhbEeJhLK+WL7qAsN5CvHPzVbsIf90x+nGTtMJPgpxVr0tJMj+vprXV4WxutfARBiOnqe58MhA857sd+MzKBgKnoLOBRTiC3qc/0/ULwbG2HCCD7nmwzz7M4nUuMvo8rgS7z0BF68OClT8X3JwSXbL5Wg=="),
},
}
for _, kv := range keyValues {
if err := tx.Create(&kv).Error; err != nil {
return err
}
}
return nil
})
@@ -483,29 +464,47 @@ func (s *TestService) ResetAppConfig(ctx context.Context) error {
return err
}
// Manually set instance ID
err = s.appConfigService.UpdateAppConfigValues(ctx, "instanceId", "test-instance-id")
if err != nil {
return err
}
// Reload the app config from the database after resetting the values
err = s.appConfigService.LoadDbConfig(ctx)
if err != nil {
return err
}
// Reload the JWK
if err := s.jwtService.LoadOrGenerateKey(); err != nil {
return err
}
return nil
return s.appConfigService.LoadDbConfig(ctx)
}
func (s *TestService) ResetLock(ctx context.Context) error {
_, err := s.appLockService.Acquire(ctx, true)
return err
func (s *TestService) SetJWTKeys() {
const privateKeyString = `{"alg":"RS256","d":"mvMDWSdPPvcum0c0iEHE2gbqtV2NKMmLwrl9E6K7g8lTV95SePLnW_bwyMPV7EGp7PQk3l17I5XRhFjze7GqTnFIOgKzMianPs7jv2ELtBMGK0xOPATgu1iGb70xZ6vcvuEfRyY3dJ0zr4jpUdVuXwKmx9rK4IdZn2dFCKfvSuspqIpz11RhF1ALrqDLkxGVv7ZwNh0_VhJZU9hcjG5l6xc7rQEKpPRkZp0IdjkGS8Z0FskoVaiRIWAbZuiVFB9WCW8k1czC4HQTPLpII01bUQx2ludbm0UlXRgVU9ptUUbU7GAImQqTOW8LfPGklEvcgzlIlR_oqw4P9yBxLi-yMQ","dp":"pvNCSnnhbo8Igw9psPR-DicxFnkXlu_ix4gpy6efTrxA-z1VDFDioJ814vKQNioYDzpyAP1gfMPhRkvG_q0hRZsJah3Sb9dfA-WkhSWY7lURQP4yIBTMU0PF_rEATuS7lRciYk1SOx5fqXZd3m_LP0vpBC4Ujlq6NAq6CIjCnms","dq":"TtUVGCCkPNgfOLmkYXu7dxxUCV5kB01-xAEK2OY0n0pG8vfDophH4_D_ZC7nvJ8J9uDhs_3JStexq1lIvaWtG99RNTChIEDzpdn6GH9yaVcb_eB4uJjrNm64FhF8PGCCwxA-xMCZMaARKwhMB2_IOMkxUbWboL3gnhJ2rDO_QO0","e":"AQAB","kid":"8uHDw3M6rf8","kty":"RSA","n":"yaeEL0VKoPBXIAaWXsUgmu05lAvEIIdJn0FX9lHh4JE5UY9B83C5sCNdhs9iSWzpeP11EVjWp8i3Yv2CF7c7u50BXnVBGtxpZpFC-585UXacoJ0chUmarL9GRFJcM1nPHBTFu68aRrn1rIKNHUkNaaxFo0NFGl_4EDDTO8HwawTjwkPoQlRzeByhlvGPVvwgB3Fn93B8QJ_cZhXKxJvjjrC_8Pk76heC_ntEMru71Ix77BoC3j2TuyiN7m9RNBW8BU5q6lKoIdvIeZfTFLzi37iufyfvMrJTixp9zhNB1NxlLCeOZl2MXegtiGqd2H3cbAyqoOiv9ihUWTfXj7SxJw","p":"_Yylc9e07CKdqNRD2EosMC2mrhrEa9j5oY_l00Qyy4-jmCA59Q9viyqvveRo0U7cRvFA5BWgWN6GGLh1DG3X-QBqVr0dnk3uzbobb55RYUXyPLuBZI2q6w2oasbiDwPdY7KpkVv_H-bpITQlyDvO8hhucA6rUV7F6KTQVz8M3Ms","q":"y5p3hch-7jJ21TkAhp_Vk1fLCAuD4tbErwQs2of9ja8sB4iJOs5Wn6HD3P7Mc8Plye7qaLHvzc8I5g0tPKWvC0DPd_FLPXiWwMVAzee3NUX_oGeJNOQp11y1w_KqdO9qZqHSEPZ3NcFL_SZMFgggxhM1uzRiPzsVN0lnD_6prZU","qi":"2Grt6uXHm61ji3xSdkBWNtUnj19vS1-7rFJp5SoYztVQVThf_W52BAiXKBdYZDRVoItC_VS2NvAOjeJjhYO_xQ_q3hK7MdtuXfEPpLnyXKkmWo3lrJ26wbeF6l05LexCkI7ShsOuSt-dsyaTJTszuKDIA6YOfWvfo3aVZmlWRaI","use":"sig"}`
privateKey, _ := jwk.ParseKey([]byte(privateKeyString))
_ = s.jwtService.SetKey(privateKey)
}
// getCborPublicKey decodes a Base64 encoded public key and returns the CBOR encoded COSE key
func (s *TestService) getCborPublicKey(base64PublicKey string) ([]byte, error) {
decodedKey, err := base64.StdEncoding.DecodeString(base64PublicKey)
if err != nil {
return nil, fmt.Errorf("failed to decode base64 key: %w", err)
}
pubKey, err := x509.ParsePKIXPublicKey(decodedKey)
if err != nil {
return nil, fmt.Errorf("failed to parse public key: %w", err)
}
ecdsaPubKey, ok := pubKey.(*ecdsa.PublicKey)
if !ok {
return nil, fmt.Errorf("not an ECDSA public key")
}
coseKey := map[int]interface{}{
1: 2, // Key type: EC2
3: -7, // Algorithm: ECDSA with SHA-256
-1: 1, // Curve: P-256
-2: ecdsaPubKey.X.Bytes(), // X coordinate
-3: ecdsaPubKey.Y.Bytes(), // Y coordinate
}
cborPublicKey, err := cbor.Marshal(coseKey)
if err != nil {
return nil, fmt.Errorf("failed to marshal COSE key: %w", err)
}
return cborPublicKey, nil
}
// SyncLdap triggers an LDAP synchronization
@@ -532,7 +531,7 @@ func (s *TestService) SetLdapTestConfig(ctx context.Context) error {
"ldapAttributeGroupUniqueIdentifier": "uuid",
"ldapAttributeGroupName": "uid",
"ldapAttributeGroupMember": "member",
"ldapAdminGroupName": "admin_group",
"ldapAttributeAdminGroup": "admin_group",
"ldapSoftDeleteUsers": "true",
"ldapEnabled": "true",
}

View File

@@ -1,217 +0,0 @@
package service
import (
"archive/zip"
"context"
"encoding/json"
"fmt"
"io"
"path/filepath"
"gorm.io/gorm"
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
"github.com/pocket-id/pocket-id/backend/internal/storage"
"github.com/pocket-id/pocket-id/backend/internal/utils"
)
// ExportService handles exporting Pocket ID data into a ZIP archive.
type ExportService struct {
db *gorm.DB
storage storage.FileStorage
}
func NewExportService(db *gorm.DB, storage storage.FileStorage) *ExportService {
return &ExportService{
db: db,
storage: storage,
}
}
// ExportToZip performs the full export process and writes the ZIP data to the given writer.
func (s *ExportService) ExportToZip(ctx context.Context, w io.Writer) error {
dbData, err := s.extractDatabase()
if err != nil {
return err
}
return s.writeExportZipStream(ctx, w, dbData)
}
// extractDatabase reads all tables into a DatabaseExport struct
func (s *ExportService) extractDatabase() (DatabaseExport, error) {
schema, err := utils.LoadDBSchemaTypes(s.db)
if err != nil {
return DatabaseExport{}, fmt.Errorf("failed to load schema types: %w", err)
}
version, err := s.schemaVersion()
if err != nil {
return DatabaseExport{}, err
}
out := DatabaseExport{
Provider: s.db.Name(),
Version: version,
Tables: map[string][]map[string]any{},
// These tables need to be inserted in a specific order because of foreign key constraints
// Not all tables are listed here, because not all tables are order-dependent
TableOrder: []string{"users", "user_groups", "oidc_clients"},
}
for table := range schema {
if table == "storage" || table == "schema_migrations" {
continue
}
err = s.dumpTable(table, schema[table], &out)
if err != nil {
return DatabaseExport{}, err
}
}
return out, nil
}
func (s *ExportService) schemaVersion() (uint, error) {
var version uint
if err := s.db.Raw("SELECT version FROM schema_migrations").Row().Scan(&version); err != nil {
return 0, fmt.Errorf("failed to query schema version: %w", err)
}
return version, nil
}
// dumpTable selects all rows from a table and appends them to out.Tables
func (s *ExportService) dumpTable(table string, types utils.DBSchemaTableTypes, out *DatabaseExport) error {
rows, err := s.db.Raw("SELECT * FROM " + table).Rows()
if err != nil {
return fmt.Errorf("failed to read table %s: %w", table, err)
}
defer rows.Close()
cols, _ := rows.Columns()
if len(cols) != len(types) {
// Should never happen...
return fmt.Errorf("mismatched columns in table (%d) and schema (%d)", len(cols), len(types))
}
for rows.Next() {
vals := s.getScanValuesForTable(cols, types)
err = rows.Scan(vals...)
if err != nil {
return fmt.Errorf("failed to scan row in table %s: %w", table, err)
}
rowMap := make(map[string]any, len(cols))
for i, col := range cols {
rowMap[col] = vals[i]
}
// Skip the app lock row in the kv table
if table == "kv" {
if keyPtr, ok := rowMap["key"].(*string); ok && keyPtr != nil && *keyPtr == lockKey {
continue
}
}
out.Tables[table] = append(out.Tables[table], rowMap)
}
return rows.Err()
}
func (s *ExportService) getScanValuesForTable(cols []string, types utils.DBSchemaTableTypes) []any {
res := make([]any, len(cols))
for i, col := range cols {
// Store a pointer
// Note: don't create a helper function for this switch, because it would return type "any" and mess everything up
// If the column is nullable, we need a pointer to a pointer!
switch types[col].Name {
case "boolean", "bool":
var x bool
if types[col].Nullable {
res[i] = utils.Ptr(utils.Ptr(x))
} else {
res[i] = utils.Ptr(x)
}
case "blob", "bytea", "jsonb":
// Treat jsonb columns as binary too
var x []byte
if types[col].Nullable {
res[i] = utils.Ptr(utils.Ptr(x))
} else {
res[i] = utils.Ptr(x)
}
case "timestamp", "timestamptz", "timestamp with time zone", "datetime":
var x datatype.DateTime
if types[col].Nullable {
res[i] = utils.Ptr(utils.Ptr(x))
} else {
res[i] = utils.Ptr(x)
}
case "integer", "int", "bigint":
var x int64
if types[col].Nullable {
res[i] = utils.Ptr(utils.Ptr(x))
} else {
res[i] = utils.Ptr(x)
}
default:
// Treat everything else as a string (including the "numeric" type)
var x string
if types[col].Nullable {
res[i] = utils.Ptr(utils.Ptr(x))
} else {
res[i] = utils.Ptr(x)
}
}
}
return res
}
func (s *ExportService) writeExportZipStream(ctx context.Context, w io.Writer, dbData DatabaseExport) error {
zipWriter := zip.NewWriter(w)
// Add database.json
jsonWriter, err := zipWriter.Create("database.json")
if err != nil {
return fmt.Errorf("failed to create database.json in zip: %w", err)
}
jsonEncoder := json.NewEncoder(jsonWriter)
jsonEncoder.SetEscapeHTML(false)
if err := jsonEncoder.Encode(dbData); err != nil {
return fmt.Errorf("failed to encode database.json: %w", err)
}
// Add uploaded files
if err := s.addUploadsToZip(ctx, zipWriter); err != nil {
return err
}
return zipWriter.Close()
}
// addUploadsToZip adds all files from the storage to the ZIP archive under the "uploads/" directory
func (s *ExportService) addUploadsToZip(ctx context.Context, zipWriter *zip.Writer) error {
return s.storage.Walk(ctx, "/", func(p storage.ObjectInfo) error {
zipPath := filepath.Join("uploads", p.Path)
w, err := zipWriter.Create(zipPath)
if err != nil {
return fmt.Errorf("failed to create zip entry for %s: %w", zipPath, err)
}
f, _, err := s.storage.Open(ctx, p.Path)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", zipPath, err)
}
defer f.Close()
if _, err := io.Copy(w, f); err != nil {
return fmt.Errorf("failed to copy file %s into zip: %w", zipPath, err)
}
return nil
})
}

View File

@@ -1,264 +0,0 @@
package service
import (
"archive/zip"
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"slices"
"strings"
"gorm.io/gorm"
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
"github.com/pocket-id/pocket-id/backend/internal/storage"
"github.com/pocket-id/pocket-id/backend/internal/utils"
)
// ImportService handles importing Pocket ID data from an exported ZIP archive.
type ImportService struct {
db *gorm.DB
storage storage.FileStorage
}
type DatabaseExport struct {
Provider string `json:"provider"`
Version uint `json:"version"`
Tables map[string][]map[string]any `json:"tables"`
TableOrder []string `json:"tableOrder"`
}
func NewImportService(db *gorm.DB, storage storage.FileStorage) *ImportService {
return &ImportService{
db: db,
storage: storage,
}
}
// ImportFromZip performs the full import process from the given ZIP reader.
func (s *ImportService) ImportFromZip(ctx context.Context, r *zip.Reader) error {
dbData, err := processZipDatabaseJson(r.File)
if err != nil {
return err
}
err = s.ImportDatabase(dbData)
if err != nil {
return err
}
err = s.importUploads(ctx, r.File)
if err != nil {
return err
}
return nil
}
// ImportDatabase only imports the database data from the given DatabaseExport struct.
func (s *ImportService) ImportDatabase(dbData DatabaseExport) error {
err := s.resetSchema(dbData.Version, dbData.Provider)
if err != nil {
return err
}
err = s.insertData(dbData)
if err != nil {
return err
}
return nil
}
// processZipDatabaseJson extracts database.json from the ZIP archive
func processZipDatabaseJson(files []*zip.File) (dbData DatabaseExport, err error) {
for _, f := range files {
if f.Name == "database.json" {
return parseDatabaseJsonStream(f)
}
}
return dbData, errors.New("database.json not found in the ZIP file")
}
func parseDatabaseJsonStream(f *zip.File) (dbData DatabaseExport, err error) {
rc, err := f.Open()
if err != nil {
return dbData, fmt.Errorf("failed to open database.json: %w", err)
}
defer rc.Close()
err = json.NewDecoder(rc).Decode(&dbData)
if err != nil {
return dbData, fmt.Errorf("failed to decode database.json: %w", err)
}
return dbData, nil
}
// importUploads imports files from the uploads/ directory in the ZIP archive
func (s *ImportService) importUploads(ctx context.Context, files []*zip.File) error {
const maxFileSize = 50 << 20 // 50 MiB
const uploadsPrefix = "uploads/"
for _, f := range files {
if !strings.HasPrefix(f.Name, uploadsPrefix) {
continue
}
if f.UncompressedSize64 > maxFileSize {
return fmt.Errorf("file %s too large (%d bytes)", f.Name, f.UncompressedSize64)
}
targetPath := strings.TrimPrefix(f.Name, uploadsPrefix)
if strings.HasSuffix(f.Name, "/") || targetPath == "" {
continue // Skip directories
}
err := s.storage.DeleteAll(ctx, targetPath)
if err != nil {
return fmt.Errorf("failed to delete existing file %s: %w", targetPath, err)
}
rc, err := f.Open()
if err != nil {
return err
}
buf, err := io.ReadAll(rc)
rc.Close()
if err != nil {
return fmt.Errorf("read file %s: %w", f.Name, err)
}
err = s.storage.Save(ctx, targetPath, bytes.NewReader(buf))
if err != nil {
return fmt.Errorf("failed to save file %s: %w", targetPath, err)
}
}
return nil
}
// resetSchema drops the existing schema and migrates to the target version
func (s *ImportService) resetSchema(targetVersion uint, exportDbProvider string) error {
sqlDb, err := s.db.DB()
if err != nil {
return fmt.Errorf("failed to get sql.DB: %w", err)
}
m, err := utils.GetEmbeddedMigrateInstance(sqlDb)
if err != nil {
return fmt.Errorf("failed to get migrate instance: %w", err)
}
err = m.Drop()
if err != nil {
return fmt.Errorf("failed to drop existing schema: %w", err)
}
// Needs to be called again to re-create the schema_migrations table
m, err = utils.GetEmbeddedMigrateInstance(sqlDb)
if err != nil {
return fmt.Errorf("failed to get migrate instance: %w", err)
}
err = m.Migrate(targetVersion)
if err != nil {
return fmt.Errorf("migration failed: %w", err)
}
return nil
}
// insertData populates the DB with the imported data
func (s *ImportService) insertData(dbData DatabaseExport) error {
schema, err := utils.LoadDBSchemaTypes(s.db)
if err != nil {
return fmt.Errorf("failed to load schema types: %w", err)
}
return s.db.Transaction(func(tx *gorm.DB) error {
// Iterate through all tables
// Some tables need to be processed in order
tables := make([]string, 0, len(dbData.Tables))
tables = append(tables, dbData.TableOrder...)
for t := range dbData.Tables {
// Skip tables already present where the order matters
// Also skip the schema_migrations table
if slices.Contains(dbData.TableOrder, t) || t == "schema_migrations" {
continue
}
tables = append(tables, t)
}
// Insert rows
for _, table := range tables {
for _, row := range dbData.Tables[table] {
err = normalizeRowWithSchema(row, table, schema)
if err != nil {
return fmt.Errorf("failed to normalize row for table '%s': %w", table, err)
}
err = tx.Table(table).Create(row).Error
if err != nil {
return fmt.Errorf("failed inserting into table '%s': %w", table, err)
}
}
}
return nil
})
}
// normalizeRowWithSchema converts row values based on the DB schema
func normalizeRowWithSchema(row map[string]any, table string, schema utils.DBSchemaTypes) error {
if schema[table] == nil {
return fmt.Errorf("schema not found for table '%s'", table)
}
for col, val := range row {
if val == nil {
// If the value is nil, skip the column
continue
}
colType := schema[table][col]
switch colType.Name {
case "timestamp", "timestamptz", "timestamp with time zone", "datetime":
// Dates are stored as strings
str, ok := val.(string)
if !ok {
return fmt.Errorf("value for column '%s/%s' was expected to be a string, but was '%T'", table, col, val)
}
d, err := datatype.DateTimeFromString(str)
if err != nil {
return fmt.Errorf("failed to decode value for column '%s/%s' as timestamp: %w", table, col, err)
}
row[col] = d
case "blob", "bytea", "jsonb":
// Binary data and jsonb data is stored in the file as base64-encoded string
str, ok := val.(string)
if !ok {
return fmt.Errorf("value for column '%s/%s' was expected to be a string, but was '%T'", table, col, val)
}
b, err := base64.StdEncoding.DecodeString(str)
if err != nil {
return fmt.Errorf("failed to decode value for column '%s/%s' from base64: %w", table, col, err)
}
// For jsonb, we additionally cast to json.RawMessage
if colType.Name == "jsonb" {
row[col] = json.RawMessage(b)
} else {
row[col] = b
}
}
}
return nil
}

View File

@@ -18,6 +18,14 @@ import (
)
const (
// PrivateKeyFile is the path in the data/keys folder where the key is stored
// This is a JSON file containing a key encoded as JWK
PrivateKeyFile = "jwt_private_key.json"
// PrivateKeyFileEncrypted is the path in the data/keys folder where the encrypted key is stored
// This is a encrypted JSON file containing a key encoded as JWK
PrivateKeyFileEncrypted = "jwt_private_key.json.enc"
// KeyUsageSigning is the usage for the private keys, for the "use" property
KeyUsageSigning = "sig"
@@ -48,7 +56,6 @@ const (
)
type JwtService struct {
db *gorm.DB
envConfig *common.EnvConfigSchema
privateKey jwk.Key
keyId string
@@ -59,6 +66,7 @@ type JwtService struct {
func NewJwtService(db *gorm.DB, appConfigService *AppConfigService) (*JwtService, error) {
service := &JwtService{}
// Ensure keys are generated or loaded
err := service.init(db, appConfigService, &common.EnvConfig)
if err != nil {
return nil, err
@@ -70,15 +78,14 @@ func NewJwtService(db *gorm.DB, appConfigService *AppConfigService) (*JwtService
func (s *JwtService) init(db *gorm.DB, appConfigService *AppConfigService, envConfig *common.EnvConfigSchema) (err error) {
s.appConfigService = appConfigService
s.envConfig = envConfig
s.db = db
// Ensure keys are generated or loaded
return s.LoadOrGenerateKey()
return s.loadOrGenerateKey(db)
}
func (s *JwtService) LoadOrGenerateKey() error {
func (s *JwtService) loadOrGenerateKey(db *gorm.DB) error {
// Get the key provider
keyProvider, err := jwkutils.GetKeyProvider(s.db, s.envConfig, s.appConfigService.GetDbConfig().InstanceID.Value)
keyProvider, err := jwkutils.GetKeyProvider(db, s.envConfig, s.appConfigService.GetDbConfig().InstanceID.Value)
if err != nil {
return fmt.Errorf("failed to get key provider: %w", err)
}
@@ -86,7 +93,7 @@ func (s *JwtService) LoadOrGenerateKey() error {
// Try loading a key
key, err := keyProvider.LoadKey()
if err != nil {
return fmt.Errorf("failed to load key: %w", err)
return fmt.Errorf("failed to load key (provider type '%s'): %w", s.envConfig.KeysStorage, err)
}
// If we have a key, store it in the object and we're done
@@ -107,7 +114,7 @@ func (s *JwtService) LoadOrGenerateKey() error {
// Save the newly-generated key
err = keyProvider.SaveKey(s.privateKey)
if err != nil {
return fmt.Errorf("failed to save private key: %w", err)
return fmt.Errorf("failed to save private key (provider type '%s'): %w", s.envConfig.KeysStorage, err)
}
return nil

File diff suppressed because it is too large Load Diff

View File

@@ -371,7 +371,7 @@ func (s *LdapService) SyncUsers(ctx context.Context, tx *gorm.DB, client *ldap.C
// Check if user is admin by checking if they are in the admin group
isAdmin := false
for _, group := range value.GetAttributeValues("memberOf") {
if getDNProperty(dbConfig.LdapAttributeGroupName.Value, group) == dbConfig.LdapAdminGroupName.Value {
if getDNProperty(dbConfig.LdapAttributeGroupName.Value, group) == dbConfig.LdapAttributeAdminGroup.Value {
isAdmin = true
break
}

View File

@@ -15,7 +15,6 @@ import (
"net/http"
"net/url"
"path"
"regexp"
"slices"
"strings"
"time"
@@ -1196,7 +1195,7 @@ func (s *OidcService) getCallbackURL(client *model.OidcClient, inputCallbackURL
// If URLs are already configured, validate against them
if len(client.CallbackURLs) > 0 {
matched, err := s.getCallbackURLFromList(client.CallbackURLs, inputCallbackURL)
matched, err := utils.GetCallbackURLFromList(client.CallbackURLs, inputCallbackURL)
if err != nil {
return "", err
} else if matched == "" {
@@ -1219,7 +1218,7 @@ func (s *OidcService) getLogoutCallbackURL(client *model.OidcClient, inputLogout
return client.LogoutCallbackURLs[0], nil
}
matched, err := s.getCallbackURLFromList(client.LogoutCallbackURLs, inputLogoutCallbackURL)
matched, err := utils.GetCallbackURLFromList(client.LogoutCallbackURLs, inputLogoutCallbackURL)
if err != nil {
return "", err
} else if matched == "" {
@@ -1229,21 +1228,6 @@ func (s *OidcService) getLogoutCallbackURL(client *model.OidcClient, inputLogout
return matched, nil
}
func (s *OidcService) getCallbackURLFromList(urls []string, inputCallbackURL string) (callbackURL string, err error) {
for _, callbackPattern := range urls {
regexPattern := "^" + strings.ReplaceAll(regexp.QuoteMeta(callbackPattern), `\*`, ".*") + "$"
matched, err := regexp.MatchString(regexPattern, inputCallbackURL)
if err != nil {
return "", err
}
if matched {
return inputCallbackURL, nil
}
}
return "", nil
}
func (s *OidcService) addCallbackURLToClient(ctx context.Context, client *model.OidcClient, callbackURL string, tx *gorm.DB) error {
// Add the new callback URL to the existing list
client.CallbackURLs = append(client.CallbackURLs, callbackURL)

View File

@@ -148,7 +148,6 @@ func TestOidcService_verifyClientCredentialsInternal(t *testing.T) {
var err error
// Create a test database
db := testutils.NewDatabaseForTest(t)
common.EnvConfig.EncryptionKey = []byte("0123456789abcdef0123456789abcdef")
// Create two JWKs for testing
privateJWK, jwkSetJSON := generateTestECDSAKey(t)

View File

@@ -432,36 +432,28 @@ func (s *UserService) RequestOneTimeAccessEmailAsAdmin(ctx context.Context, user
return &common.OneTimeAccessDisabledError{}
}
_, err := s.requestOneTimeAccessEmailInternal(ctx, userID, "", ttl, true)
return err
return s.requestOneTimeAccessEmailInternal(ctx, userID, "", ttl)
}
func (s *UserService) RequestOneTimeAccessEmailAsUnauthenticatedUser(ctx context.Context, userID, redirectPath string) (string, error) {
func (s *UserService) RequestOneTimeAccessEmailAsUnauthenticatedUser(ctx context.Context, userID, redirectPath string) error {
isDisabled := !s.appConfigService.GetDbConfig().EmailOneTimeAccessAsUnauthenticatedEnabled.IsTrue()
if isDisabled {
return "", &common.OneTimeAccessDisabledError{}
return &common.OneTimeAccessDisabledError{}
}
var userId string
err := s.db.Model(&model.User{}).Select("id").Where("email = ?", userID).First(&userId).Error
if errors.Is(err, gorm.ErrRecordNotFound) {
// Do not return error if user not found to prevent email enumeration
return "", nil
return nil
} else if err != nil {
return "", err
return err
}
deviceToken, err := s.requestOneTimeAccessEmailInternal(ctx, userId, redirectPath, 15*time.Minute, true)
if err != nil {
return "", err
} else if deviceToken == nil {
return "", errors.New("device token expected but not returned")
}
return *deviceToken, nil
return s.requestOneTimeAccessEmailInternal(ctx, userId, redirectPath, 15*time.Minute)
}
func (s *UserService) requestOneTimeAccessEmailInternal(ctx context.Context, userID, redirectPath string, ttl time.Duration, withDeviceToken bool) (*string, error) {
func (s *UserService) requestOneTimeAccessEmailInternal(ctx context.Context, userID, redirectPath string, ttl time.Duration) error {
tx := s.db.Begin()
defer func() {
tx.Rollback()
@@ -469,20 +461,21 @@ func (s *UserService) requestOneTimeAccessEmailInternal(ctx context.Context, use
user, err := s.GetUser(ctx, userID)
if err != nil {
return nil, err
return err
}
if user.Email == nil {
return nil, &common.UserEmailNotSetError{}
return &common.UserEmailNotSetError{}
}
oneTimeAccessToken, deviceToken, err := s.createOneTimeAccessTokenInternal(ctx, user.ID, ttl, withDeviceToken, tx)
oneTimeAccessToken, err := s.createOneTimeAccessTokenInternal(ctx, user.ID, ttl, tx)
if err != nil {
return nil, err
return err
}
err = tx.Commit().Error
if err != nil {
return nil, err
return err
}
// We use a background context here as this is running in a goroutine
@@ -515,29 +508,28 @@ func (s *UserService) requestOneTimeAccessEmailInternal(ctx context.Context, use
}
}()
return deviceToken, nil
return nil
}
func (s *UserService) CreateOneTimeAccessToken(ctx context.Context, userID string, ttl time.Duration) (token string, err error) {
token, _, err = s.createOneTimeAccessTokenInternal(ctx, userID, ttl, false, s.db)
return token, err
func (s *UserService) CreateOneTimeAccessToken(ctx context.Context, userID string, ttl time.Duration) (string, error) {
return s.createOneTimeAccessTokenInternal(ctx, userID, ttl, s.db)
}
func (s *UserService) createOneTimeAccessTokenInternal(ctx context.Context, userID string, ttl time.Duration, withDeviceToken bool, tx *gorm.DB) (token string, deviceToken *string, err error) {
oneTimeAccessToken, err := NewOneTimeAccessToken(userID, ttl, withDeviceToken)
func (s *UserService) createOneTimeAccessTokenInternal(ctx context.Context, userID string, ttl time.Duration, tx *gorm.DB) (string, error) {
oneTimeAccessToken, err := NewOneTimeAccessToken(userID, ttl)
if err != nil {
return "", nil, err
return "", err
}
err = tx.WithContext(ctx).Create(oneTimeAccessToken).Error
if err != nil {
return "", nil, err
return "", err
}
return oneTimeAccessToken.Token, oneTimeAccessToken.DeviceToken, nil
return oneTimeAccessToken.Token, nil
}
func (s *UserService) ExchangeOneTimeAccessToken(ctx context.Context, token, deviceToken, ipAddress, userAgent string) (model.User, string, error) {
func (s *UserService) ExchangeOneTimeAccessToken(ctx context.Context, token string, ipAddress, userAgent string) (model.User, string, error) {
tx := s.db.Begin()
defer func() {
tx.Rollback()
@@ -557,10 +549,6 @@ func (s *UserService) ExchangeOneTimeAccessToken(ctx context.Context, token, dev
}
return model.User{}, "", err
}
if oneTimeAccessToken.DeviceToken != nil && deviceToken != *oneTimeAccessToken.DeviceToken {
return model.User{}, "", &common.DeviceCodeInvalid{}
}
accessToken, err := s.jwtService.GenerateAccessToken(oneTimeAccessToken.User)
if err != nil {
return model.User{}, "", err
@@ -830,33 +818,23 @@ func (s *UserService) DeleteSignupToken(ctx context.Context, tokenID string) err
return s.db.WithContext(ctx).Delete(&model.SignupToken{}, "id = ?", tokenID).Error
}
func NewOneTimeAccessToken(userID string, ttl time.Duration, withDeviceToken bool) (*model.OneTimeAccessToken, error) {
func NewOneTimeAccessToken(userID string, ttl time.Duration) (*model.OneTimeAccessToken, error) {
// If expires at is less than 15 minutes, use a 6-character token instead of 16
tokenLength := 16
if ttl <= 15*time.Minute {
tokenLength = 6
}
token, err := utils.GenerateRandomAlphanumericString(tokenLength)
randomString, err := utils.GenerateRandomAlphanumericString(tokenLength)
if err != nil {
return nil, err
}
var deviceToken *string
if withDeviceToken {
dt, err := utils.GenerateRandomAlphanumericString(16)
if err != nil {
return nil, err
}
deviceToken = &dt
}
now := time.Now().Round(time.Second)
o := &model.OneTimeAccessToken{
UserID: userID,
ExpiresAt: datatype.DateTime(now.Add(ttl)),
Token: token,
DeviceToken: deviceToken,
UserID: userID,
ExpiresAt: datatype.DateTime(now.Add(ttl)),
Token: randomString,
}
return o, nil

View File

@@ -0,0 +1,199 @@
package utils
import (
"net"
"net/url"
"path"
"regexp"
"strings"
)
// GetCallbackURLFromList returns the first callback URL that matches the input callback URL
func GetCallbackURLFromList(urls []string, inputCallbackURL string) (callbackURL string, err error) {
// Special case for Loopback Interface Redirection. Quoting from RFC 8252 section 7.3:
// https://datatracker.ietf.org/doc/html/rfc8252#section-7.3
//
// The authorization server MUST allow any port to be specified at the
// time of the request for loopback IP redirect URIs, to accommodate
// clients that obtain an available ephemeral port from the operating
// system at the time of the request.
loopbackRedirect := ""
u, _ := url.Parse(inputCallbackURL)
if u != nil && u.Scheme == "http" {
host := u.Hostname()
ip := net.ParseIP(host)
if host == "localhost" || (ip != nil && ip.IsLoopback()) {
loopbackRedirect = u.String()
u.Host = host
inputCallbackURL = u.String()
}
}
for _, pattern := range urls {
matches, err := matchCallbackURL(pattern, inputCallbackURL)
if err != nil {
return "", err
} else if !matches {
continue
}
if loopbackRedirect != "" {
return loopbackRedirect, nil
}
return inputCallbackURL, nil
}
return "", nil
}
// matchCallbackURL checks if the input callback URL matches the given pattern.
// It supports wildcard matching for paths and query parameters.
//
// The base URL (scheme, userinfo, host, port) and query parameters supports single '*' wildcards only,
// while the path supports both single '*' and double '**' wildcards.
func matchCallbackURL(pattern string, inputCallbackURL string) (matches bool, err error) {
if pattern == inputCallbackURL || pattern == "*" {
return true, nil
}
// Strip fragment part
// The endpoint URI MUST NOT include a fragment component.
// https://datatracker.ietf.org/doc/html/rfc6749#section-3.1.2
pattern, _, _ = strings.Cut(pattern, "#")
inputCallbackURL, _, _ = strings.Cut(inputCallbackURL, "#")
// Store and strip query part
var patternQuery url.Values
if i := strings.Index(pattern, "?"); i >= 0 {
patternQuery, err = url.ParseQuery(pattern[i+1:])
if err != nil {
return false, err
}
pattern = pattern[:i]
}
var inputQuery url.Values
if i := strings.Index(inputCallbackURL, "?"); i >= 0 {
inputQuery, err = url.ParseQuery(inputCallbackURL[i+1:])
if err != nil {
return false, err
}
inputCallbackURL = inputCallbackURL[:i]
}
// Split both pattern and input parts
patternParts, patternPath := splitParts(pattern)
inputParts, inputPath := splitParts(inputCallbackURL)
// Verify everything except the path and query parameters
if len(patternParts) != len(inputParts) {
return false, nil
}
for i, patternPart := range patternParts {
matched, err := path.Match(patternPart, inputParts[i])
if err != nil || !matched {
return false, err
}
}
// Verify path with wildcard support
matched, err := matchPath(patternPath, inputPath)
if err != nil || !matched {
return false, err
}
// Verify query parameters
if len(patternQuery) != len(inputQuery) {
return false, nil
}
for patternKey, patternValues := range patternQuery {
inputValues, exists := inputQuery[patternKey]
if !exists {
return false, nil
}
if len(patternValues) != len(inputValues) {
return false, nil
}
for i := range patternValues {
matched, err := path.Match(patternValues[i], inputValues[i])
if err != nil || !matched {
return false, err
}
}
}
return true, nil
}
// matchPath matches the input path against the pattern with wildcard support
// Supported wildcards:
//
// '*' matches any sequence of characters except '/'
// '**' matches any sequence of characters including '/'
func matchPath(pattern string, input string) (matches bool, err error) {
var regexPattern strings.Builder
regexPattern.WriteString("^")
runes := []rune(pattern)
n := len(runes)
for i := 0; i < n; {
switch runes[i] {
case '*':
// Check if it's a ** (globstar)
if i+1 < n && runes[i+1] == '*' {
// globstar = .* (match slashes too)
regexPattern.WriteString(".*")
i += 2
} else {
// single * = [^/]* (no slash)
regexPattern.WriteString(`[^/]*`)
i++
}
default:
regexPattern.WriteString(regexp.QuoteMeta(string(runes[i])))
i++
}
}
regexPattern.WriteString("$")
matched, err := regexp.MatchString(regexPattern.String(), input)
return matched, err
}
// splitParts splits the URL into parts by special characters and returns the path separately
func splitParts(s string) (parts []string, path string) {
split := func(r rune) bool {
return r == ':' || r == '/' || r == '[' || r == ']' || r == '@' || r == '.'
}
pathStart := -1
// Look for scheme:// first
if i := strings.Index(s, "://"); i >= 0 {
// Look for the next slash after scheme://
rest := s[i+3:]
if j := strings.IndexRune(rest, '/'); j >= 0 {
pathStart = i + 3 + j
}
} else {
// Otherwise, first slash is path start
pathStart = strings.IndexRune(s, '/')
}
if pathStart >= 0 {
path = s[pathStart:]
base := s[:pathStart]
parts = strings.FieldsFunc(base, split)
} else {
parts = strings.FieldsFunc(s, split)
path = ""
}
return parts, path
}

View File

@@ -0,0 +1,784 @@
package utils
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMatchCallbackURL(t *testing.T) {
tests := []struct {
name string
pattern string
input string
shouldMatch bool
}{
// Basic matching
{
"exact match",
"https://example.com/callback",
"https://example.com/callback",
true,
},
{
"no match",
"https://example.org/callback",
"https://example.com/callback",
false,
},
// Scheme
{
"scheme mismatch",
"https://example.com/callback",
"http://example.com/callback",
false,
},
{
"wildcard scheme",
"*://example.com/callback",
"https://example.com/callback",
true,
},
// Hostname
{
"hostname mismatch",
"https://example.com/callback",
"https://malicious.com/callback",
false,
},
{
"wildcard subdomain",
"https://*.example.com/callback",
"https://subdomain.example.com/callback",
true,
},
{
"partial wildcard in hostname prefix",
"https://app*.example.com/callback",
"https://app1.example.com/callback",
true,
},
{
"partial wildcard in hostname suffix",
"https://*-prod.example.com/callback",
"https://api-prod.example.com/callback",
true,
},
{
"partial wildcard in hostname middle",
"https://app-*-server.example.com/callback",
"https://app-staging-server.example.com/callback",
true,
},
{
"subdomain wildcard doesn't match domain hijack attempt",
"https://*.example.com/callback",
"https://malicious.site?url=abc.example.com/callback",
false,
},
{
"hostname mismatch with confusable characters",
"https://example.com/callback",
"https://examp1e.com/callback",
false,
},
{
"hostname mismatch with homograph attack",
"https://example.com/callback",
"https://еxample.com/callback",
false,
},
// Port
{
"port mismatch",
"https://example.com:8080/callback",
"https://example.com:9090/callback",
false,
},
{
"wildcard port",
"https://example.com:*/callback",
"https://example.com:8080/callback",
true,
},
{
"partial wildcard in port prefix",
"https://example.com:80*/callback",
"https://example.com:8080/callback",
true,
},
// Path
{
"path mismatch",
"https://example.com/callback",
"https://example.com/other",
false,
},
{
"wildcard path segment",
"https://example.com/api/*/callback",
"https://example.com/api/v1/callback",
true,
},
{
"wildcard entire path",
"https://example.com/*",
"https://example.com/callback",
true,
},
{
"partial wildcard in path prefix",
"https://example.com/test*",
"https://example.com/testcase",
true,
},
{
"partial wildcard in path suffix",
"https://example.com/*-callback",
"https://example.com/oauth-callback",
true,
},
{
"partial wildcard in path middle",
"https://example.com/api-*-v1/callback",
"https://example.com/api-internal-v1/callback",
true,
},
{
"multiple partial wildcards in path",
"https://example.com/*/test*/callback",
"https://example.com/v1/testing/callback",
true,
},
{
"multiple wildcard segments in path",
"https://example.com/**/callback",
"https://example.com/api/v1/foo/bar/callback",
true,
},
{
"multiple wildcard segments in path",
"https://example.com/**/v1/**/callback",
"https://example.com/api/v1/foo/bar/callback",
true,
},
{
"partial wildcard matching full path segment",
"https://example.com/foo-*",
"https://example.com/foo-bar",
true,
},
// Credentials
{
"username mismatch",
"https://user:pass@example.com/callback",
"https://admin:pass@example.com/callback",
false,
},
{
"missing credentials",
"https://user:pass@example.com/callback",
"https://example.com/callback",
false,
},
{
"unexpected credentials",
"https://example.com/callback",
"https://user:pass@example.com/callback",
false,
},
{
"wildcard password",
"https://user:*@example.com/callback",
"https://user:secret123@example.com/callback",
true,
},
{
"partial wildcard in username",
"https://admin*:pass@example.com/callback",
"https://admin123:pass@example.com/callback",
true,
},
{
"partial wildcard in password",
"https://user:pass*@example.com/callback",
"https://user:password123@example.com/callback",
true,
},
{
"wildcard password doesn't allow domain hijack",
"https://user:*@example.com/callback",
"https://user:password@malicious.site#example.com/callback",
false,
},
{
"credentials with @ in password trying to hijack hostname",
"https://user:pass@example.com/callback",
"https://user:pass@evil.com@example.com/callback",
false,
},
// Query parameters
{
"extra query parameter",
"https://example.com/callback?code=*",
"https://example.com/callback?code=abc123&extra=value",
false,
},
{
"missing query parameter",
"https://example.com/callback?code=*&state=*",
"https://example.com/callback?code=abc123",
false,
},
{
"query parameter after fragment",
"https://example.com/callback?code=123",
"https://example.com/callback#section?code=123",
false,
},
{
"query parameter name mismatch",
"https://example.com/callback?code=*",
"https://example.com/callback?token=abc123",
false,
},
{
"wildcard query parameter",
"https://example.com/callback?code=*",
"https://example.com/callback?code=abc123",
true,
},
{
"multiple query parameters",
"https://example.com/callback?code=*&state=*",
"https://example.com/callback?code=abc123&state=xyz789",
true,
},
{
"query parameters in different order",
"https://example.com/callback?state=*&code=*",
"https://example.com/callback?code=abc123&state=xyz789",
true,
},
{
"exact query parameter value",
"https://example.com/callback?mode=production",
"https://example.com/callback?mode=production",
true,
},
{
"query parameter value mismatch",
"https://example.com/callback?mode=production",
"https://example.com/callback?mode=development",
false,
},
{
"mixed exact and wildcard query parameters",
"https://example.com/callback?mode=production&code=*",
"https://example.com/callback?mode=production&code=abc123",
true,
},
{
"mixed exact and wildcard with wrong exact value",
"https://example.com/callback?mode=production&code=*",
"https://example.com/callback?mode=development&code=abc123",
false,
},
{
"multiple values for same parameter",
"https://example.com/callback?param=*&param=*",
"https://example.com/callback?param=value1&param=value2",
true,
},
{
"unexpected query parameters",
"https://example.com/callback",
"https://example.com/callback?extra=value",
false,
},
{
"query parameter with redirect to external site",
"https://example.com/callback?code=*",
"https://example.com/callback?code=123&redirect=https://evil.com",
false,
},
{
"open redirect via encoded URL in query param",
"https://example.com/callback?state=*",
"https://example.com/callback?state=abc&next=//evil.com",
false,
},
// Fragment
{
"fragment ignored when both pattern and input have fragment",
"https://example.com/callback#fragment",
"https://example.com/callback#fragment",
true,
},
{
"fragment ignored when pattern has fragment but input doesn't",
"https://example.com/callback#fragment",
"https://example.com/callback",
true,
},
{
"fragment ignored when input has fragment but pattern doesn't",
"https://example.com/callback",
"https://example.com/callback#section",
true,
},
// Path traversal and injection attempts
{
"path traversal attempt",
"https://example.com/callback",
"https://example.com/../admin/callback",
false,
},
{
"backslash instead of forward slash",
"https://example.com/callback",
"https://example.com\\callback",
false,
},
{
"double slash in hostname (protocol smuggling)",
"https://example.com/callback",
"https://example.com//evil.com/callback",
false,
},
{
"CRLF injection attempt in path",
"https://example.com/callback",
"https://example.com/callback%0d%0aLocation:%20https://evil.com",
false,
},
{
"null byte injection",
"https://example.com/callback",
"https://example.com/callback%00.evil.com",
false,
},
}
for _, tt := range tests {
matches, err := matchCallbackURL(tt.pattern, tt.input)
require.NoError(t, err, tt.name)
assert.Equal(t, tt.shouldMatch, matches, tt.name)
}
}
func TestGetCallbackURLFromList_LoopbackSpecialHandling(t *testing.T) {
tests := []struct {
name string
urls []string
inputCallbackURL string
expectedURL string
expectMatch bool
}{
{
name: "127.0.0.1 with dynamic port - exact match",
urls: []string{"http://127.0.0.1/callback"},
inputCallbackURL: "http://127.0.0.1:8080/callback",
expectedURL: "http://127.0.0.1:8080/callback",
expectMatch: true,
},
{
name: "127.0.0.1 with different port",
urls: []string{"http://127.0.0.1/callback"},
inputCallbackURL: "http://127.0.0.1:9999/callback",
expectedURL: "http://127.0.0.1:9999/callback",
expectMatch: true,
},
{
name: "IPv6 loopback with dynamic port",
urls: []string{"http://[::1]/callback"},
inputCallbackURL: "http://[::1]:8080/callback",
expectedURL: "http://[::1]:8080/callback",
expectMatch: true,
},
{
name: "IPv6 loopback without brackets in input",
urls: []string{"http://[::1]/callback"},
inputCallbackURL: "http://::1:8080/callback",
expectedURL: "http://::1:8080/callback",
expectMatch: true,
},
{
name: "localhost with dynamic port",
urls: []string{"http://localhost/callback"},
inputCallbackURL: "http://localhost:8080/callback",
expectedURL: "http://localhost:8080/callback",
expectMatch: true,
},
{
name: "https loopback doesn't trigger special handling",
urls: []string{"https://127.0.0.1/callback"},
inputCallbackURL: "https://127.0.0.1:8080/callback",
expectedURL: "",
expectMatch: false,
},
{
name: "loopback with path match",
urls: []string{"http://127.0.0.1/auth/*"},
inputCallbackURL: "http://127.0.0.1:3000/auth/callback",
expectedURL: "http://127.0.0.1:3000/auth/callback",
expectMatch: true,
},
{
name: "loopback with path mismatch",
urls: []string{"http://127.0.0.1/callback"},
inputCallbackURL: "http://127.0.0.1:8080/different",
expectedURL: "",
expectMatch: false,
},
{
name: "non-loopback IP",
urls: []string{"http://192.168.1.1/callback"},
inputCallbackURL: "http://192.168.1.1:8080/callback",
expectedURL: "",
expectMatch: false,
},
{
name: "wildcard matches loopback",
urls: []string{"*"},
inputCallbackURL: "http://127.0.0.1:8080/callback",
expectedURL: "http://127.0.0.1:8080/callback",
expectMatch: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := GetCallbackURLFromList(tt.urls, tt.inputCallbackURL)
require.NoError(t, err)
if tt.expectMatch {
assert.Equal(t, tt.expectedURL, result)
} else {
assert.Empty(t, result)
}
})
}
}
func TestGetCallbackURLFromList_MultiplePatterns(t *testing.T) {
tests := []struct {
name string
urls []string
inputCallbackURL string
expectedURL string
expectMatch bool
}{
{
name: "matches first pattern",
urls: []string{
"https://example.com/callback",
"https://example.org/callback",
},
inputCallbackURL: "https://example.com/callback",
expectedURL: "https://example.com/callback",
expectMatch: true,
},
{
name: "matches second pattern",
urls: []string{
"https://example.com/callback",
"https://example.org/callback",
},
inputCallbackURL: "https://example.org/callback",
expectedURL: "https://example.org/callback",
expectMatch: true,
},
{
name: "matches none",
urls: []string{
"https://example.com/callback",
"https://example.org/callback",
},
inputCallbackURL: "https://malicious.com/callback",
expectedURL: "",
expectMatch: false,
},
{
name: "matches wildcard pattern",
urls: []string{
"https://example.com/callback",
"https://*.example.org/callback",
},
inputCallbackURL: "https://subdomain.example.org/callback",
expectedURL: "https://subdomain.example.org/callback",
expectMatch: true,
},
{
name: "empty pattern list",
urls: []string{},
inputCallbackURL: "https://example.com/callback",
expectedURL: "",
expectMatch: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := GetCallbackURLFromList(tt.urls, tt.inputCallbackURL)
require.NoError(t, err)
if tt.expectMatch {
assert.Equal(t, tt.expectedURL, result)
} else {
assert.Empty(t, result)
}
})
}
}
func TestMatchPath(t *testing.T) {
tests := []struct {
name string
pattern string
input string
shouldMatch bool
}{
// Exact matches
{
name: "exact match",
pattern: "/callback",
input: "/callback",
shouldMatch: true,
},
{
name: "exact mismatch",
pattern: "/callback",
input: "/other",
shouldMatch: false,
},
{
name: "empty paths",
pattern: "",
input: "",
shouldMatch: true,
},
// Single wildcard (*)
{
name: "single wildcard matches segment",
pattern: "/api/*/callback",
input: "/api/v1/callback",
shouldMatch: true,
},
{
name: "single wildcard doesn't match multiple segments",
pattern: "/api/*/callback",
input: "/api/v1/v2/callback",
shouldMatch: false,
},
{
name: "single wildcard at end",
pattern: "/callback/*",
input: "/callback/test",
shouldMatch: true,
},
{
name: "single wildcard at start",
pattern: "/*/callback",
input: "/api/callback",
shouldMatch: true,
},
{
name: "multiple single wildcards",
pattern: "/*/test/*",
input: "/api/test/callback",
shouldMatch: true,
},
{
name: "partial wildcard prefix",
pattern: "/test*",
input: "/testing",
shouldMatch: true,
},
{
name: "partial wildcard suffix",
pattern: "/*-callback",
input: "/oauth-callback",
shouldMatch: true,
},
{
name: "partial wildcard middle",
pattern: "/api-*-v1",
input: "/api-internal-v1",
shouldMatch: true,
},
// Double wildcard (**)
{
name: "double wildcard matches multiple segments",
pattern: "/api/**/callback",
input: "/api/v1/v2/v3/callback",
shouldMatch: true,
},
{
name: "double wildcard matches single segment",
pattern: "/api/**/callback",
input: "/api/v1/callback",
shouldMatch: true,
},
{
name: "double wildcard doesn't match when pattern has extra slashes",
pattern: "/api/**/callback",
input: "/api/callback",
shouldMatch: false,
},
{
name: "double wildcard at end",
pattern: "/api/**",
input: "/api/v1/v2/callback",
shouldMatch: true,
},
{
name: "double wildcard in middle",
pattern: "/api/**/v2/**/callback",
input: "/api/v1/v2/v3/v4/callback",
shouldMatch: true,
},
// Complex patterns
{
name: "mix of single and double wildcards",
pattern: "/*/api/**/callback",
input: "/app/api/v1/v2/callback",
shouldMatch: true,
},
{
name: "wildcard with special characters",
pattern: "/callback-*",
input: "/callback-123",
shouldMatch: true,
},
{
name: "path with query-like string (no special handling)",
pattern: "/callback?code=*",
input: "/callback?code=abc",
shouldMatch: true,
},
// Edge cases
{
name: "single wildcard matches empty segment",
pattern: "/api/*/callback",
input: "/api//callback",
shouldMatch: true,
},
{
name: "pattern longer than input",
pattern: "/api/v1/callback",
input: "/api",
shouldMatch: false,
},
{
name: "input longer than pattern",
pattern: "/api",
input: "/api/v1/callback",
shouldMatch: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
matches, err := matchPath(tt.pattern, tt.input)
require.NoError(t, err)
assert.Equal(t, tt.shouldMatch, matches)
})
}
}
func TestSplitParts(t *testing.T) {
tests := []struct {
name string
input string
expectedParts []string
expectedPath string
}{
{
name: "simple https URL",
input: "https://example.com/callback",
expectedParts: []string{"https", "example", "com"},
expectedPath: "/callback",
},
{
name: "URL with port",
input: "https://example.com:8080/callback",
expectedParts: []string{"https", "example", "com", "8080"},
expectedPath: "/callback",
},
{
name: "URL with subdomain",
input: "https://api.example.com/callback",
expectedParts: []string{"https", "api", "example", "com"},
expectedPath: "/callback",
},
{
name: "URL with credentials",
input: "https://user:pass@example.com/callback",
expectedParts: []string{"https", "user", "pass", "example", "com"},
expectedPath: "/callback",
},
{
name: "URL without path",
input: "https://example.com",
expectedParts: []string{"https", "example", "com"},
expectedPath: "",
},
{
name: "URL with deep path",
input: "https://example.com/api/v1/callback",
expectedParts: []string{"https", "example", "com"},
expectedPath: "/api/v1/callback",
},
{
name: "URL with path and query",
input: "https://example.com/callback?code=123",
expectedParts: []string{"https", "example", "com"},
expectedPath: "/callback?code=123",
},
{
name: "URL with trailing slash",
input: "https://example.com/",
expectedParts: []string{"https", "example", "com"},
expectedPath: "/",
},
{
name: "URL with multiple subdomains",
input: "https://api.v1.staging.example.com/callback",
expectedParts: []string{"https", "api", "v1", "staging", "example", "com"},
expectedPath: "/callback",
},
{
name: "URL with port and credentials",
input: "https://user:pass@example.com:8080/callback",
expectedParts: []string{"https", "user", "pass", "example", "com", "8080"},
expectedPath: "/callback",
},
{
name: "scheme with authority separator but no slash",
input: "http://example.com",
expectedParts: []string{"http", "example", "com"},
expectedPath: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
parts, path := splitParts(tt.input)
assert.Equal(t, tt.expectedParts, parts, "parts mismatch")
assert.Equal(t, tt.expectedPath, path, "path mismatch")
})
}
}

View File

@@ -1,8 +1,6 @@
package cookie
import (
"time"
"github.com/gin-gonic/gin"
)
@@ -13,7 +11,3 @@ func AddAccessTokenCookie(c *gin.Context, maxAgeInSeconds int, token string) {
func AddSessionIdCookie(c *gin.Context, maxAgeInSeconds int, sessionID string) {
c.SetCookie(SessionIdCookieName, sessionID, maxAgeInSeconds, "/", "", true, true)
}
func AddDeviceTokenCookie(c *gin.Context, deviceToken string) {
c.SetCookie(DeviceTokenCookieName, deviceToken, int(15*time.Minute.Seconds()), "/api/one-time-access-token", "", true, true)
}

View File

@@ -8,12 +8,10 @@ import (
var AccessTokenCookieName = "__Host-access_token"
var SessionIdCookieName = "__Host-session"
var DeviceTokenCookieName = "__Host-device_token" //nolint:gosec
func init() {
if strings.HasPrefix(common.EnvConfig.AppURL, "http://") {
AccessTokenCookieName = "access_token"
SessionIdCookieName = "session"
DeviceTokenCookieName = "device_token"
}
}

View File

@@ -1,130 +0,0 @@
package utils
import (
"database/sql"
"errors"
"fmt"
"log/slog"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database"
postgresMigrate "github.com/golang-migrate/migrate/v4/database/postgres"
sqliteMigrate "github.com/golang-migrate/migrate/v4/database/sqlite3"
"github.com/golang-migrate/migrate/v4/source/iofs"
"github.com/pocket-id/pocket-id/backend/internal/common"
"github.com/pocket-id/pocket-id/backend/resources"
)
// MigrateDatabase applies database migrations using embedded migration files or fetches them from GitHub if a downgrade is detected.
func MigrateDatabase(sqlDb *sql.DB) error {
m, err := GetEmbeddedMigrateInstance(sqlDb)
if err != nil {
return fmt.Errorf("failed to get migrate instance: %w", err)
}
path := "migrations/" + string(common.EnvConfig.DbProvider)
requiredVersion, err := getRequiredMigrationVersion(path)
if err != nil {
return fmt.Errorf("failed to get last migration version: %w", err)
}
currentVersion, _, _ := m.Version()
if currentVersion > requiredVersion {
slog.Warn("Database version is newer than the application supports, possible downgrade detected", slog.Uint64("db_version", uint64(currentVersion)), slog.Uint64("app_version", uint64(requiredVersion)))
if !common.EnvConfig.AllowDowngrade {
return fmt.Errorf("database version (%d) is newer than application version (%d), downgrades are not allowed (set ALLOW_DOWNGRADE=true to enable)", currentVersion, requiredVersion)
}
slog.Info("Fetching migrations from GitHub to handle possible downgrades")
return migrateDatabaseFromGitHub(sqlDb, requiredVersion)
}
if err := m.Migrate(requiredVersion); err != nil && !errors.Is(err, migrate.ErrNoChange) {
return fmt.Errorf("failed to apply embedded migrations: %w", err)
}
return nil
}
// GetEmbeddedMigrateInstance creates a migrate.Migrate instance using embedded migration files.
func GetEmbeddedMigrateInstance(sqlDb *sql.DB) (*migrate.Migrate, error) {
path := "migrations/" + string(common.EnvConfig.DbProvider)
source, err := iofs.New(resources.FS, path)
if err != nil {
return nil, fmt.Errorf("failed to create embedded migration source: %w", err)
}
driver, err := newMigrationDriver(sqlDb, common.EnvConfig.DbProvider)
if err != nil {
return nil, fmt.Errorf("failed to create migration driver: %w", err)
}
m, err := migrate.NewWithInstance("iofs", source, "pocket-id", driver)
if err != nil {
return nil, fmt.Errorf("failed to create migration instance: %w", err)
}
return m, nil
}
// newMigrationDriver creates a database.Driver instance based on the given database provider.
func newMigrationDriver(sqlDb *sql.DB, dbProvider common.DbProvider) (driver database.Driver, err error) {
switch dbProvider {
case common.DbProviderSqlite:
driver, err = sqliteMigrate.WithInstance(sqlDb, &sqliteMigrate.Config{
NoTxWrap: true,
})
case common.DbProviderPostgres:
driver, err = postgresMigrate.WithInstance(sqlDb, &postgresMigrate.Config{})
default:
// Should never happen at this point
return nil, fmt.Errorf("unsupported database provider: %s", common.EnvConfig.DbProvider)
}
if err != nil {
return nil, fmt.Errorf("failed to create migration driver: %w", err)
}
return driver, nil
}
// migrateDatabaseFromGitHub applies database migrations fetched from GitHub to handle downgrades.
func migrateDatabaseFromGitHub(sqlDb *sql.DB, version uint) error {
srcURL := "github://pocket-id/pocket-id/backend/resources/migrations/" + string(common.EnvConfig.DbProvider)
driver, err := newMigrationDriver(sqlDb, common.EnvConfig.DbProvider)
if err != nil {
return fmt.Errorf("failed to create migration driver: %w", err)
}
m, err := migrate.NewWithDatabaseInstance(srcURL, "pocket-id", driver)
if err != nil {
return fmt.Errorf("failed to create GitHub migration instance: %w", err)
}
if err := m.Migrate(version); err != nil && !errors.Is(err, migrate.ErrNoChange) {
return fmt.Errorf("failed to apply GitHub migrations: %w", err)
}
return nil
}
// getRequiredMigrationVersion reads the embedded migration files and returns the highest version number found.
func getRequiredMigrationVersion(path string) (uint, error) {
entries, err := resources.FS.ReadDir(path)
if err != nil {
return 0, fmt.Errorf("failed to read migration directory: %w", err)
}
var maxVersion uint
for _, entry := range entries {
if entry.IsDir() {
continue
}
name := entry.Name()
var version uint
n, err := fmt.Sscanf(name, "%d_", &version)
if err == nil && n == 1 {
if version > maxVersion {
maxVersion = version
}
}
}
return maxVersion, nil
}

View File

@@ -1,116 +0,0 @@
package utils
import (
"fmt"
"strings"
"gorm.io/gorm"
)
// DBTableExists checks if a table exists in the database
func DBTableExists(db *gorm.DB, tableName string) (exists bool, err error) {
switch db.Name() {
case "postgres":
query := `SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = 'public'
AND table_name = ?
)`
err = db.Raw(query, tableName).Scan(&exists).Error
if err != nil {
return false, err
}
case "sqlite":
query := `SELECT COUNT(*) > 0 FROM sqlite_master WHERE type='table' AND name=?`
err = db.Raw(query, tableName).Scan(&exists).Error
if err != nil {
return false, err
}
default:
return false, fmt.Errorf("unsupported database dialect: %s", db.Name())
}
return exists, nil
}
type DBSchemaColumn struct {
Name string
Nullable bool
}
type DBSchemaTableTypes = map[string]DBSchemaColumn
type DBSchemaTypes = map[string]DBSchemaTableTypes
// LoadDBSchemaTypes retrieves the column types for all tables in the DB
// Result is a map of "table --> column --> {name: column type name, nullable: boolean}"
func LoadDBSchemaTypes(db *gorm.DB) (result DBSchemaTypes, err error) {
result = make(DBSchemaTypes)
switch db.Name() {
case "postgres":
var rows []struct {
TableName string
ColumnName string
DataType string
Nullable bool
}
err := db.
Raw(`
SELECT table_name, column_name, data_type, is_nullable = 'YES' AS nullable
FROM information_schema.columns
WHERE table_schema = 'public';
`).
Scan(&rows).
Error
if err != nil {
return nil, err
}
for _, r := range rows {
t := strings.ToLower(r.DataType)
if result[r.TableName] == nil {
result[r.TableName] = make(map[string]DBSchemaColumn)
}
result[r.TableName][r.ColumnName] = DBSchemaColumn{
Name: strings.ToLower(t),
Nullable: r.Nullable,
}
}
case "sqlite":
var tables []string
err = db.
Raw(`SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%';`).
Scan(&tables).
Error
if err != nil {
return nil, err
}
for _, table := range tables {
var cols []struct {
Name string
Type string
Notnull bool
}
err := db.
Raw(`PRAGMA table_info("` + table + `");`).
Scan(&cols).
Error
if err != nil {
return nil, err
}
for _, c := range cols {
if result[table] == nil {
result[table] = make(map[string]DBSchemaColumn)
}
result[table][c.Name] = DBSchemaColumn{
Name: strings.ToLower(c.Type),
Nullable: !c.Notnull,
}
}
}
default:
return nil, fmt.Errorf("unsupported database dialect: %s", db.Name())
}
return result, nil
}

View File

@@ -28,14 +28,22 @@ func GetKeyProvider(db *gorm.DB, envConfig *common.EnvConfigSchema, instanceID s
return nil, fmt.Errorf("failed to load encryption key: %w", err)
}
keyProvider = &KeyProviderDatabase{}
// Get the key provider
switch envConfig.KeysStorage {
case "file", "":
keyProvider = &KeyProviderFile{}
case "database":
keyProvider = &KeyProviderDatabase{}
default:
return nil, fmt.Errorf("invalid key storage '%s'", envConfig.KeysStorage)
}
err = keyProvider.Init(KeyProviderOpts{
DB: db,
EnvConfig: envConfig,
Kek: kek,
})
if err != nil {
return nil, fmt.Errorf("failed to init key provider: %w", err)
return nil, fmt.Errorf("failed to init key provider of type '%s': %w", envConfig.KeysStorage, err)
}
return keyProvider, nil

View File

@@ -0,0 +1,202 @@
package jwk
import (
"encoding/base64"
"fmt"
"os"
"path/filepath"
"github.com/lestrrat-go/jwx/v3/jwk"
"github.com/pocket-id/pocket-id/backend/internal/common"
"github.com/pocket-id/pocket-id/backend/internal/utils"
cryptoutils "github.com/pocket-id/pocket-id/backend/internal/utils/crypto"
)
const (
// PrivateKeyFile is the path in the data/keys folder where the key is stored
// This is a JSON file containing a key encoded as JWK
PrivateKeyFile = "jwt_private_key.json"
// PrivateKeyFileEncrypted is the path in the data/keys folder where the encrypted key is stored
// This is a encrypted JSON file containing a key encoded as JWK
PrivateKeyFileEncrypted = "jwt_private_key.json.enc"
)
type KeyProviderFile struct {
envConfig *common.EnvConfigSchema
kek []byte
}
func (f *KeyProviderFile) Init(opts KeyProviderOpts) error {
f.envConfig = opts.EnvConfig
f.kek = opts.Kek
return nil
}
func (f *KeyProviderFile) LoadKey() (jwk.Key, error) {
if len(f.kek) > 0 {
return f.loadEncryptedKey()
}
return f.loadKey()
}
func (f *KeyProviderFile) SaveKey(key jwk.Key) error {
if len(f.kek) > 0 {
return f.saveKeyEncrypted(key)
}
return f.saveKey(key)
}
func (f *KeyProviderFile) loadKey() (jwk.Key, error) {
var key jwk.Key
// First, check if we have a JWK file
// If we do, then we just load that
jwkPath := f.jwkPath()
ok, err := utils.FileExists(jwkPath)
if err != nil {
return nil, fmt.Errorf("failed to check if private key file exists at path '%s': %w", jwkPath, err)
}
if !ok {
// File doesn't exist, no key was loaded
return nil, nil
}
data, err := os.ReadFile(jwkPath)
if err != nil {
return nil, fmt.Errorf("failed to read private key file at path '%s': %w", jwkPath, err)
}
key, err = jwk.ParseKey(data)
if err != nil {
return nil, fmt.Errorf("failed to parse private key file at path '%s': %w", jwkPath, err)
}
return key, nil
}
func (f *KeyProviderFile) loadEncryptedKey() (key jwk.Key, err error) {
// First, check if we have an encrypted JWK file
// If we do, then we just load that
encJwkPath := f.encJwkPath()
ok, err := utils.FileExists(encJwkPath)
if err != nil {
return nil, fmt.Errorf("failed to check if encrypted private key file exists at path '%s': %w", encJwkPath, err)
}
if ok {
encB64, err := os.ReadFile(encJwkPath)
if err != nil {
return nil, fmt.Errorf("failed to read encrypted private key file at path '%s': %w", encJwkPath, err)
}
// Decode from base64
enc := make([]byte, base64.StdEncoding.DecodedLen(len(encB64)))
n, err := base64.StdEncoding.Decode(enc, encB64)
if err != nil {
return nil, fmt.Errorf("failed to read encrypted private key file at path '%s': not a valid base64-encoded file: %w", encJwkPath, err)
}
// Decrypt the data
data, err := cryptoutils.Decrypt(f.kek, enc[:n], nil)
if err != nil {
return nil, fmt.Errorf("failed to decrypt private key file at path '%s': %w", encJwkPath, err)
}
// Parse the key
key, err = jwk.ParseKey(data)
if err != nil {
return nil, fmt.Errorf("failed to parse encrypted private key file at path '%s': %w", encJwkPath, err)
}
return key, nil
}
// Check if we have an un-encrypted JWK file
key, err = f.loadKey()
if err != nil {
return nil, fmt.Errorf("failed to load un-encrypted key file: %w", err)
}
if key == nil {
// No key exists, encrypted or un-encrypted
return nil, nil
}
// If we are here, we have loaded a key that was un-encrypted
// We need to replace the plaintext key with the encrypted one before we return
err = f.saveKeyEncrypted(key)
if err != nil {
return nil, fmt.Errorf("failed to save encrypted key file: %w", err)
}
jwkPath := f.jwkPath()
err = os.Remove(jwkPath)
if err != nil {
return nil, fmt.Errorf("failed to remove un-encrypted key file at path '%s': %w", jwkPath, err)
}
return key, nil
}
func (f *KeyProviderFile) saveKey(key jwk.Key) error {
err := os.MkdirAll(f.envConfig.KeysPath, 0700)
if err != nil {
return fmt.Errorf("failed to create directory '%s' for key file: %w", f.envConfig.KeysPath, err)
}
jwkPath := f.jwkPath()
keyFile, err := os.OpenFile(jwkPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return fmt.Errorf("failed to create key file at path '%s': %w", jwkPath, err)
}
defer keyFile.Close()
// Write the JSON file to disk
err = EncodeJWK(keyFile, key)
if err != nil {
return fmt.Errorf("failed to write key file at path '%s': %w", jwkPath, err)
}
return nil
}
func (f *KeyProviderFile) saveKeyEncrypted(key jwk.Key) error {
err := os.MkdirAll(f.envConfig.KeysPath, 0700)
if err != nil {
return fmt.Errorf("failed to create directory '%s' for encrypted key file: %w", f.envConfig.KeysPath, err)
}
// Encode the key to JSON
data, err := EncodeJWKBytes(key)
if err != nil {
return fmt.Errorf("failed to encode key to JSON: %w", err)
}
// Encrypt the key then encode to Base64
enc, err := cryptoutils.Encrypt(f.kek, data, nil)
if err != nil {
return fmt.Errorf("failed to encrypt key: %w", err)
}
encB64 := make([]byte, base64.StdEncoding.EncodedLen(len(enc)))
base64.StdEncoding.Encode(encB64, enc)
// Write to disk
encJwkPath := f.encJwkPath()
err = os.WriteFile(encJwkPath, encB64, 0600)
if err != nil {
return fmt.Errorf("failed to write encrypted key file at path '%s': %w", encJwkPath, err)
}
return nil
}
func (f *KeyProviderFile) jwkPath() string {
return filepath.Join(f.envConfig.KeysPath, PrivateKeyFile)
}
func (f *KeyProviderFile) encJwkPath() string {
return filepath.Join(f.envConfig.KeysPath, PrivateKeyFileEncrypted)
}
// Compile-time interface check
var _ KeyProvider = (*KeyProviderFile)(nil)

View File

@@ -0,0 +1,320 @@
package jwk
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"encoding/base64"
"os"
"path/filepath"
"testing"
"github.com/lestrrat-go/jwx/v3/jwk"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/pocket-id/pocket-id/backend/internal/common"
"github.com/pocket-id/pocket-id/backend/internal/utils"
cryptoutils "github.com/pocket-id/pocket-id/backend/internal/utils/crypto"
)
func TestKeyProviderFile_LoadKey(t *testing.T) {
// Generate a test key to use in our tests
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err)
key, err := jwk.Import(pk)
require.NoError(t, err)
t.Run("LoadKey with no existing key", func(t *testing.T) {
tempDir := t.TempDir()
provider := &KeyProviderFile{}
err := provider.Init(KeyProviderOpts{
EnvConfig: &common.EnvConfigSchema{
KeysPath: tempDir,
},
})
require.NoError(t, err)
// Load key when none exists
loadedKey, err := provider.LoadKey()
require.NoError(t, err)
assert.Nil(t, loadedKey, "Expected nil key when no key exists")
})
t.Run("LoadKey with no existing key (with kek)", func(t *testing.T) {
tempDir := t.TempDir()
provider := &KeyProviderFile{}
err = provider.Init(KeyProviderOpts{
EnvConfig: &common.EnvConfigSchema{
KeysPath: tempDir,
},
Kek: makeKEK(t),
})
require.NoError(t, err)
// Load key when none exists
loadedKey, err := provider.LoadKey()
require.NoError(t, err)
assert.Nil(t, loadedKey, "Expected nil key when no key exists")
})
t.Run("LoadKey with unencrypted key", func(t *testing.T) {
tempDir := t.TempDir()
provider := &KeyProviderFile{}
err := provider.Init(KeyProviderOpts{
EnvConfig: &common.EnvConfigSchema{
KeysPath: tempDir,
},
})
require.NoError(t, err)
// Save a key
err = provider.SaveKey(key)
require.NoError(t, err)
// Make sure the key file exists
keyPath := filepath.Join(tempDir, PrivateKeyFile)
exists, err := utils.FileExists(keyPath)
require.NoError(t, err)
assert.True(t, exists, "Expected key file to exist")
// Load the key
loadedKey, err := provider.LoadKey()
require.NoError(t, err)
assert.NotNil(t, loadedKey, "Expected non-nil key when key exists")
// Verify the loaded key is the same as the original
keyBytes, err := EncodeJWKBytes(key)
require.NoError(t, err)
loadedKeyBytes, err := EncodeJWKBytes(loadedKey)
require.NoError(t, err)
assert.Equal(t, keyBytes, loadedKeyBytes, "Expected loaded key to match original key")
})
t.Run("LoadKey with encrypted key", func(t *testing.T) {
tempDir := t.TempDir()
provider := &KeyProviderFile{}
err = provider.Init(KeyProviderOpts{
EnvConfig: &common.EnvConfigSchema{
KeysPath: tempDir,
},
Kek: makeKEK(t),
})
require.NoError(t, err)
// Save a key (will be encrypted)
err = provider.SaveKey(key)
require.NoError(t, err)
// Make sure the encrypted key file exists
encKeyPath := filepath.Join(tempDir, PrivateKeyFileEncrypted)
exists, err := utils.FileExists(encKeyPath)
require.NoError(t, err)
assert.True(t, exists, "Expected encrypted key file to exist")
// Make sure the unencrypted key file does not exist
keyPath := filepath.Join(tempDir, PrivateKeyFile)
exists, err = utils.FileExists(keyPath)
require.NoError(t, err)
assert.False(t, exists, "Expected unencrypted key file to not exist")
// Load the key
loadedKey, err := provider.LoadKey()
require.NoError(t, err)
assert.NotNil(t, loadedKey, "Expected non-nil key when encrypted key exists")
// Verify the loaded key is the same as the original
keyBytes, err := EncodeJWKBytes(key)
require.NoError(t, err)
loadedKeyBytes, err := EncodeJWKBytes(loadedKey)
require.NoError(t, err)
assert.Equal(t, keyBytes, loadedKeyBytes, "Expected loaded key to match original key")
})
t.Run("LoadKey replaces unencrypted key with encrypted key when kek is provided", func(t *testing.T) {
tempDir := t.TempDir()
// First, create an unencrypted key
providerNoKek := &KeyProviderFile{}
err := providerNoKek.Init(KeyProviderOpts{
EnvConfig: &common.EnvConfigSchema{
KeysPath: tempDir,
},
})
require.NoError(t, err)
// Save an unencrypted key
err = providerNoKek.SaveKey(key)
require.NoError(t, err)
// Verify unencrypted key exists
keyPath := filepath.Join(tempDir, PrivateKeyFile)
exists, err := utils.FileExists(keyPath)
require.NoError(t, err)
assert.True(t, exists, "Expected unencrypted key file to exist")
// Now create a provider with a kek
kek := make([]byte, 32)
_, err = rand.Read(kek)
require.NoError(t, err)
providerWithKek := &KeyProviderFile{}
err = providerWithKek.Init(KeyProviderOpts{
EnvConfig: &common.EnvConfigSchema{
KeysPath: tempDir,
},
Kek: kek,
})
require.NoError(t, err)
// Load the key - this should convert the unencrypted key to encrypted
loadedKey, err := providerWithKek.LoadKey()
require.NoError(t, err)
assert.NotNil(t, loadedKey, "Expected non-nil key when loading and converting key")
// Verify the unencrypted key no longer exists
exists, err = utils.FileExists(keyPath)
require.NoError(t, err)
assert.False(t, exists, "Expected unencrypted key file to be removed")
// Verify the encrypted key file exists
encKeyPath := filepath.Join(tempDir, PrivateKeyFileEncrypted)
exists, err = utils.FileExists(encKeyPath)
require.NoError(t, err)
assert.True(t, exists, "Expected encrypted key file to exist after conversion")
// Verify the key data
keyBytes, err := EncodeJWKBytes(key)
require.NoError(t, err)
loadedKeyBytes, err := EncodeJWKBytes(loadedKey)
require.NoError(t, err)
assert.Equal(t, keyBytes, loadedKeyBytes, "Expected loaded key to match original key after conversion")
})
}
func TestKeyProviderFile_SaveKey(t *testing.T) {
// Generate a test key to use in our tests
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err)
key, err := jwk.Import(pk)
require.NoError(t, err)
t.Run("SaveKey unencrypted", func(t *testing.T) {
tempDir := t.TempDir()
provider := &KeyProviderFile{}
err := provider.Init(KeyProviderOpts{
EnvConfig: &common.EnvConfigSchema{
KeysPath: tempDir,
},
})
require.NoError(t, err)
// Save the key
err = provider.SaveKey(key)
require.NoError(t, err)
// Verify the key file exists
keyPath := filepath.Join(tempDir, PrivateKeyFile)
exists, err := utils.FileExists(keyPath)
require.NoError(t, err)
assert.True(t, exists, "Expected key file to exist")
// Verify the content of the key file
data, err := os.ReadFile(keyPath)
require.NoError(t, err)
parsedKey, err := jwk.ParseKey(data)
require.NoError(t, err)
// Compare the saved key with the original
keyBytes, err := EncodeJWKBytes(key)
require.NoError(t, err)
parsedKeyBytes, err := EncodeJWKBytes(parsedKey)
require.NoError(t, err)
assert.Equal(t, keyBytes, parsedKeyBytes, "Expected saved key to match original key")
})
t.Run("SaveKey encrypted", func(t *testing.T) {
tempDir := t.TempDir()
// Generate a 64-byte kek
kek := makeKEK(t)
provider := &KeyProviderFile{}
err = provider.Init(KeyProviderOpts{
EnvConfig: &common.EnvConfigSchema{
KeysPath: tempDir,
},
Kek: kek,
})
require.NoError(t, err)
// Save the key (will be encrypted)
err = provider.SaveKey(key)
require.NoError(t, err)
// Verify the encrypted key file exists
encKeyPath := filepath.Join(tempDir, PrivateKeyFileEncrypted)
exists, err := utils.FileExists(encKeyPath)
require.NoError(t, err)
assert.True(t, exists, "Expected encrypted key file to exist")
// Verify the unencrypted key file doesn't exist
keyPath := filepath.Join(tempDir, PrivateKeyFile)
exists, err = utils.FileExists(keyPath)
require.NoError(t, err)
assert.False(t, exists, "Expected unencrypted key file to not exist")
// Manually decrypt the encrypted key file to verify it contains the correct key
encB64, err := os.ReadFile(encKeyPath)
require.NoError(t, err)
// Decode from base64
enc := make([]byte, base64.StdEncoding.DecodedLen(len(encB64)))
n, err := base64.StdEncoding.Decode(enc, encB64)
require.NoError(t, err)
enc = enc[:n] // Trim any padding
// Decrypt the data
data, err := cryptoutils.Decrypt(kek, enc, nil)
require.NoError(t, err)
// Parse the key
parsedKey, err := jwk.ParseKey(data)
require.NoError(t, err)
// Compare the decrypted key with the original
keyBytes, err := EncodeJWKBytes(key)
require.NoError(t, err)
parsedKeyBytes, err := EncodeJWKBytes(parsedKey)
require.NoError(t, err)
assert.Equal(t, keyBytes, parsedKeyBytes, "Expected decrypted key to match original key")
})
}
func makeKEK(t *testing.T) []byte {
t.Helper()
// Generate a 32-byte kek
kek := make([]byte, 32)
_, err := rand.Read(kek)
require.NoError(t, err)
return kek
}

View File

@@ -38,7 +38,6 @@ func (r *ServiceRunner) Run(ctx context.Context) error {
// Ignore context canceled errors here as they generally indicate that the service is stopping
if rErr != nil && !errors.Is(rErr, context.Canceled) {
cancel()
errCh <- rErr
return
}

View File

@@ -61,26 +61,6 @@ func TestServiceRunner_Run(t *testing.T) {
require.ErrorIs(t, err, expectedErr)
})
t.Run("service error cancels others", func(t *testing.T) {
expectedErr := errors.New("boom")
errorService := func(ctx context.Context) error {
return expectedErr
}
waitingService := func(ctx context.Context) error {
<-ctx.Done()
return ctx.Err()
}
runner := NewServiceRunner(errorService, waitingService)
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
defer cancel()
err := runner.Run(ctx)
require.Error(t, err)
require.ErrorIs(t, err, expectedErr)
})
t.Run("context canceled", func(t *testing.T) {
// Create a service that waits until context is canceled
waitingService := func(ctx context.Context) error {

View File

@@ -1 +0,0 @@
../../../tests/database.json

View File

@@ -1,72 +0,0 @@
package resources
import (
"embed"
"slices"
"strconv"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// This test is meant to enforce that for every new migration added, a file with the same migration number exists for all supported databases
// This is necessary to ensure import/export works correctly
// Note: if a migration is not needed for a database, ensure there's a file with an empty (no-op) migration (e.g. even just a comment)
func TestMigrationsMatchingVersions(t *testing.T) {
// We can ignore migrations with version below 20251115000000
const ignoreBefore = 20251115000000
// Scan postgres migrations
postgresMigrations := scanMigrations(t, FS, "migrations/postgres", ignoreBefore)
// Scan sqlite migrations
sqliteMigrations := scanMigrations(t, FS, "migrations/sqlite", ignoreBefore)
// Sort both lists for consistent comparison
slices.Sort(postgresMigrations)
slices.Sort(sqliteMigrations)
// Compare the lists
assert.Equal(t, postgresMigrations, sqliteMigrations, "Migration versions must match between Postgres and SQLite")
}
// scanMigrations scans a directory for migration files and returns a list of versions
func scanMigrations(t *testing.T, fs embed.FS, dir string, ignoreBefore int64) []int64 {
t.Helper()
entries, err := fs.ReadDir(dir)
require.NoErrorf(t, err, "Failed to read directory '%s'", dir)
// Divide by 2 because of up and down files
versions := make([]int64, 0, len(entries)/2)
for _, entry := range entries {
if entry.IsDir() {
continue
}
filename := entry.Name()
// Only consider .up.sql files
if !strings.HasSuffix(filename, ".up.sql") {
continue
}
// Extract version from filename (format: <version>_<anything>.up.sql)
versionString, _, ok := strings.Cut(filename, "_")
require.Truef(t, ok, "Migration file has unexpected format: %s", filename)
version, err := strconv.ParseInt(versionString, 10, 64)
require.NoErrorf(t, err, "Failed to parse version from filename '%s'", filename)
// Exclude migrations with version below ignoreBefore
if version < ignoreBefore {
continue
}
versions = append(versions, version)
}
return versions
}

View File

@@ -1 +0,0 @@
ALTER TABLE one_time_access_tokens DROP COLUMN device_token;

View File

@@ -1 +0,0 @@
ALTER TABLE one_time_access_tokens ADD COLUMN device_token VARCHAR(16);

View File

@@ -1,3 +0,0 @@
-- This migration is part of v2
-- No-op in Postgres

View File

@@ -1,3 +0,0 @@
-- This migration is part of v2
-- No-op in Postgres

View File

@@ -1 +0,0 @@
ALTER TABLE one_time_access_tokens DROP COLUMN device_token;

View File

@@ -1 +0,0 @@
ALTER TABLE one_time_access_tokens ADD COLUMN device_token TEXT;

View File

@@ -1,135 +0,0 @@
-- This migration is part of v2
PRAGMA foreign_keys = OFF;
BEGIN;
CREATE TABLE users_old
(
id TEXT NOT NULL PRIMARY KEY,
created_at DATETIME,
username TEXT COLLATE NOCASE NOT NULL UNIQUE,
email TEXT NOT NULL UNIQUE,
first_name TEXT,
last_name TEXT NOT NULL,
display_name TEXT NOT NULL,
is_admin NUMERIC DEFAULT 0 NOT NULL,
ldap_id TEXT,
locale TEXT,
disabled NUMERIC DEFAULT 0 NOT NULL
);
INSERT INTO users_old (
id,
created_at,
username,
email,
first_name,
last_name,
display_name,
is_admin,
ldap_id,
locale,
disabled
)
SELECT
id,
created_at,
username,
email,
first_name,
last_name,
display_name,
CASE WHEN is_admin THEN 1 ELSE 0 END,
ldap_id,
locale,
CASE WHEN disabled THEN 1 ELSE 0 END
FROM users;
DROP TABLE users;
ALTER TABLE users_old RENAME TO users;
CREATE UNIQUE INDEX users_ldap_id ON users (ldap_id);
CREATE TABLE webauthn_credentials_old
(
id TEXT PRIMARY KEY,
created_at DATETIME NOT NULL,
name TEXT NOT NULL,
credential_id TEXT NOT NULL UNIQUE,
public_key BLOB NOT NULL,
attestation_type TEXT NOT NULL,
transport BLOB NOT NULL,
user_id TEXT REFERENCES users ON DELETE CASCADE,
backup_eligible NUMERIC DEFAULT 0 NOT NULL,
backup_state NUMERIC DEFAULT 0 NOT NULL
);
INSERT INTO webauthn_credentials_old (
id,
created_at,
name,
credential_id,
public_key,
attestation_type,
transport,
user_id,
backup_eligible,
backup_state
)
SELECT
id,
created_at,
name,
credential_id,
public_key,
attestation_type,
transport,
user_id,
CASE WHEN backup_eligible THEN 1 ELSE 0 END,
CASE WHEN backup_state THEN 1 ELSE 0 END
FROM webauthn_credentials;
DROP TABLE webauthn_credentials;
ALTER TABLE webauthn_credentials_old RENAME TO webauthn_credentials;
CREATE TABLE webauthn_sessions_old
(
id TEXT NOT NULL PRIMARY KEY,
created_at DATETIME,
challenge TEXT NOT NULL UNIQUE,
expires_at DATETIME NOT NULL,
user_verification TEXT NOT NULL,
credential_params TEXT DEFAULT '[]' NOT NULL
);
INSERT INTO webauthn_sessions_old (
id,
created_at,
challenge,
expires_at,
user_verification,
credential_params
)
SELECT
id,
created_at,
challenge,
expires_at,
user_verification,
credential_params
FROM webauthn_sessions;
DROP TABLE webauthn_sessions;
ALTER TABLE webauthn_sessions_old RENAME TO webauthn_sessions;
COMMIT;
PRAGMA foreign_keys = ON;

View File

@@ -1,146 +0,0 @@
-- This migration is part of v2
PRAGMA foreign_keys = OFF;
BEGIN;
-- 1. Create a new table with BOOLEAN columns
CREATE TABLE users_new
(
id TEXT NOT NULL PRIMARY KEY,
created_at DATETIME,
username TEXT COLLATE NOCASE NOT NULL UNIQUE,
email TEXT NOT NULL UNIQUE,
first_name TEXT,
last_name TEXT NOT NULL,
display_name TEXT NOT NULL,
is_admin BOOLEAN DEFAULT FALSE NOT NULL,
ldap_id TEXT,
locale TEXT,
disabled BOOLEAN DEFAULT FALSE NOT NULL
);
-- 2. Copy all existing data, converting numeric bools to real booleans
INSERT INTO users_new (
id,
created_at,
username,
email,
first_name,
last_name,
display_name,
is_admin,
ldap_id,
locale,
disabled
)
SELECT
id,
created_at,
username,
email,
first_name,
last_name,
display_name,
CASE WHEN is_admin != 0 THEN TRUE ELSE FALSE END,
ldap_id,
locale,
CASE WHEN disabled != 0 THEN TRUE ELSE FALSE END
FROM users;
-- 3. Drop old table
DROP TABLE users;
-- 4. Rename new table to original name
ALTER TABLE users_new RENAME TO users;
-- 5. Recreate index
CREATE UNIQUE INDEX users_ldap_id ON users (ldap_id);
-- 6. Create temporary table with changed credential_id type to BLOB
CREATE TABLE webauthn_credentials_dg_tmp
(
id TEXT PRIMARY KEY,
created_at DATETIME NOT NULL,
name TEXT NOT NULL,
credential_id BLOB NOT NULL UNIQUE,
public_key BLOB NOT NULL,
attestation_type TEXT NOT NULL,
transport BLOB NOT NULL,
user_id TEXT REFERENCES users ON DELETE CASCADE,
backup_eligible BOOLEAN DEFAULT FALSE NOT NULL,
backup_state BOOLEAN DEFAULT FALSE NOT NULL
);
-- 7. Copy existing data into the temporary table
INSERT INTO webauthn_credentials_dg_tmp (
id,
created_at,
name,
credential_id,
public_key,
attestation_type,
transport,
user_id,
backup_eligible,
backup_state
)
SELECT
id,
created_at,
name,
credential_id,
public_key,
attestation_type,
transport,
user_id,
backup_eligible,
backup_state
FROM webauthn_credentials;
-- 8. Drop old table
DROP TABLE webauthn_credentials;
-- 9. Rename temporary table to original name
ALTER TABLE webauthn_credentials_dg_tmp
RENAME TO webauthn_credentials;
-- 10. Create temporary table with credential_params type changed to BLOB
CREATE TABLE webauthn_sessions_dg_tmp
(
id TEXT NOT NULL PRIMARY KEY,
created_at DATETIME,
challenge TEXT NOT NULL UNIQUE,
expires_at DATETIME NOT NULL,
user_verification TEXT NOT NULL,
credential_params BLOB DEFAULT '[]' NOT NULL
);
-- 11. Copy existing data into the temporary sessions table
INSERT INTO webauthn_sessions_dg_tmp (
id,
created_at,
challenge,
expires_at,
user_verification,
credential_params
)
SELECT
id,
created_at,
challenge,
expires_at,
user_verification,
credential_params
FROM webauthn_sessions;
-- 12. Drop old table
DROP TABLE webauthn_sessions;
-- 13. Rename temporary sessions table to original name
ALTER TABLE webauthn_sessions_dg_tmp
RENAME TO webauthn_sessions;
COMMIT;
PRAGMA foreign_keys = ON;

View File

@@ -349,8 +349,8 @@
"login_code_email_success": "The login code has been sent to the user.",
"send_email": "Send Email",
"show_code": "Show Code",
"callback_url_description": "URL(s) provided by your client. Will be automatically added if left blank. Wildcards (*) are supported, but best avoided for better security.",
"logout_callback_url_description": "URL(s) provided by your client for logout. Wildcards (*) are supported, but best avoided for better security.",
"callback_url_description": "URL(s) provided by your client. Will be automatically added if left blank. <link href='https://pocket-id.org/docs/advanced/callback-url-wildcards'>Wildcards</link> are supported.",
"logout_callback_url_description": "URL(s) provided by your client for logout. <link href='https://pocket-id.org/docs/advanced/callback-url-wildcards'>Wildcards</link> are supported.",
"api_key_expiration": "API Key Expiration",
"send_an_email_to_the_user_when_their_api_key_is_about_to_expire": "Send an email to the user when their API key is about to expire.",
"authorize_device": "Authorize Device",

View File

@@ -7,6 +7,7 @@
import { LucideExternalLink } from '@lucide/svelte';
import type { Snippet } from 'svelte';
import type { HTMLAttributes } from 'svelte/elements';
import FormattedMessage from '../formatted-message.svelte';
let {
input = $bindable(),
@@ -40,7 +41,7 @@
{/if}
{#if description}
<p class="text-muted-foreground mt-1 text-xs">
{description}
<FormattedMessage m={description} />
{#if docsLink}
<a
class="relative text-black after:absolute after:bottom-0 after:left-0 after:h-px after:w-full after:translate-y-[-1px] after:bg-white dark:text-white"

View File

@@ -47,7 +47,7 @@ export type AllAppConfig = AppConfig & {
ldapAttributeGroupMember: string;
ldapAttributeGroupUniqueIdentifier: string;
ldapAttributeGroupName: string;
ldapAdminGroupName: string;
ldapAttributeAdminGroup: string;
ldapSoftDeleteUsers: boolean;
};

View File

@@ -43,7 +43,7 @@
ldapAttributeGroupMember: z.string().optional(),
ldapAttributeGroupUniqueIdentifier: z.string().min(1),
ldapAttributeGroupName: z.string().min(1),
ldapAdminGroupName: z.string().optional(),
ldapAttributeAdminGroup: z.string().optional(),
ldapSoftDeleteUsers: z.boolean()
});
@@ -193,7 +193,7 @@
label={m.admin_group_name()}
description={m.members_of_this_group_will_have_admin_privileges_in_pocketid()}
placeholder="_admin_group_name"
bind:input={$inputs.ldapAdminGroupName}
bind:input={$inputs.ldapAttributeAdminGroup}
/>
</div>
</fieldset>

36
pnpm-lock.yaml generated
View File

@@ -184,20 +184,13 @@ importers:
version: 7.2.7(@types/node@24.10.2)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.21.0)(yaml@2.8.1)
tests:
dependencies:
adm-zip:
specifier: ^0.5.16
version: 0.5.16
devDependencies:
'@playwright/test':
specifier: ^1.57.0
version: 1.57.0
'@types/adm-zip':
specifier: ^0.5.7
version: 0.5.7
'@types/node':
specifier: ^22.18.12
version: 22.19.1
specifier: ^24.10.1
version: 24.10.1
dotenv:
specifier: ^17.2.3
version: 17.2.3
@@ -1336,9 +1329,6 @@ packages:
peerDependencies:
vite: ^5.2.0 || ^6 || ^7
'@types/adm-zip@0.5.7':
resolution: {integrity: sha512-DNEs/QvmyRLurdQPChqq0Md4zGvPwHerAJYWk9l2jCbD1VPpnzRJorOdiq4zsw09NFbYnhfsoEhWtxIzXpn2yw==}
'@types/cookie@0.6.0':
resolution: {integrity: sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==}
@@ -1354,9 +1344,6 @@ packages:
'@types/json-schema@7.0.15':
resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==}
'@types/node@22.19.1':
resolution: {integrity: sha512-LCCV0HdSZZZb34qifBsyWlUmok6W7ouER+oQIGBScS8EsZsQbrtFTUrDX4hOl+CS6p7cnNC4td+qrSVGSCTUfQ==}
'@types/node@24.10.1':
resolution: {integrity: sha512-GNWcUTRBgIRJD5zj+Tq0fKOJ5XZajIiBroOF0yvj2bSU1WvNdYS/dn9UxwsujGW4JX06dnHyjV2y9rRaybH0iQ==}
@@ -1474,10 +1461,6 @@ packages:
engines: {node: '>=0.4.0'}
hasBin: true
adm-zip@0.5.16:
resolution: {integrity: sha512-TGw5yVi4saajsSEgz25grObGHEUaDrniwvA2qwSC060KfqGPdglhvPMA2lPIoxs3PQIItj2iag35fONcQqgUaQ==}
engines: {node: '>=12.0'}
ajv-formats@3.0.1:
resolution: {integrity: sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==}
peerDependencies:
@@ -3005,9 +2988,6 @@ packages:
resolution: {integrity: sha512-rvKSBiC5zqCCiDZ9kAOszZcDvdAHwwIKJG33Ykj43OKcWsnmcBRL09YTU4nOeHZ8Y2a7l1MgTd08SBe9A8Qj6A==}
engines: {node: '>=18'}
undici-types@6.21.0:
resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==}
undici-types@7.16.0:
resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==}
@@ -4050,10 +4030,6 @@ snapshots:
tailwindcss: 4.1.17
vite: 7.2.7(@types/node@24.10.2)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.21.0)(yaml@2.8.1)
'@types/adm-zip@0.5.7':
dependencies:
'@types/node': 22.19.1
'@types/cookie@0.6.0': {}
'@types/cors@2.8.19':
@@ -4069,10 +4045,6 @@ snapshots:
'@types/json-schema@7.0.15': {}
'@types/node@22.19.1':
dependencies:
undici-types: 6.21.0
'@types/node@24.10.1':
dependencies:
undici-types: 7.16.0
@@ -4227,8 +4199,6 @@ snapshots:
acorn@8.15.0: {}
adm-zip@0.5.16: {}
ajv-formats@3.0.1(ajv@8.17.1):
optionalDependencies:
ajv: 8.17.1
@@ -5744,8 +5714,6 @@ snapshots:
uint8array-extras@1.5.0: {}
undici-types@6.21.0: {}
undici-types@7.16.0: {}
unplugin@2.3.10:

View File

@@ -14,16 +14,19 @@ PGID=${PGID:-1000}
# Check if the group with PGID exists; if not, create it
if ! getent group pocket-id-group > /dev/null 2>&1; then
echo "Creating group $PGID..."
addgroup -g "$PGID" pocket-id-group
fi
# Check if a user with PUID exists; if not, create it
if ! id -u pocket-id > /dev/null 2>&1; then
if ! getent passwd "$PUID" > /dev/null 2>&1; then
adduser -uD "$PUID" -G pocket-id-group pocket-id > /dev/null 2>&1
echo "Creating user $PUID..."
adduser -u "$PUID" -G pocket-id-group pocket-id > /dev/null 2>&1
else
# If a user with the PUID already exists, use that user
existing_user=$(getent passwd "$PUID" | cut -d: -f1)
echo "Using existing user: $existing_user"
fi
fi

View File

Before

Width:  |  Height:  |  Size: 528 KiB

After

Width:  |  Height:  |  Size: 528 KiB

View File

Before

Width:  |  Height:  |  Size: 88 KiB

After

Width:  |  Height:  |  Size: 88 KiB

View File

Before

Width:  |  Height:  |  Size: 58 KiB

After

Width:  |  Height:  |  Size: 58 KiB

View File

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

View File

@@ -8,13 +8,9 @@
},
"devDependencies": {
"@playwright/test": "^1.57.0",
"@types/adm-zip": "^0.5.7",
"@types/node": "^22.18.12",
"@types/node": "^24.10.1",
"dotenv": "^17.2.3",
"jose": "^6.1.2",
"prettier": "^3.7.0"
},
"dependencies": {
"adm-zip": "^0.5.16"
}
}

View File

@@ -21,15 +21,11 @@ export default defineConfig({
trace: 'on-first-retry'
},
projects: [
{ name: 'cli', testMatch: /cli\.spec\.ts/ },
{ name: 'auth-setup', testMatch: /auth\.setup\.ts/ },
{ name: 'setup', testMatch: /.*\.setup\.ts/ },
{
name: 'browser-chrome',
use: { ...devices['Desktop Chrome'], storageState: '.tmp/auth/user.json' },
testIgnore: /cli\.spec\.ts/,
dependencies: ['auth-setup']
name: 'chromium',
use: { ...devices['Desktop Chrome'], storageState: '.auth/user.json' },
dependencies: ['setup']
}
],
globalSetup: './specs/fixtures/global.setup.ts',
globalTeardown: './specs/fixtures/global.teardown.ts'
]
});

View File

@@ -1,312 +0,0 @@
{
"provider": "sqlite",
"version": 20251117141000,
"tableOrder": ["users", "user_groups", "oidc_clients"],
"tables": {
"api_keys": [
{
"created_at": "2025-11-25T12:39:02Z",
"description": null,
"expiration_email_sent": false,
"expires_at": "2025-12-25T12:39:02Z",
"id": "5f1fa856-c164-4295-961e-175a0d22d725",
"key": "6c34966f57ef2bb7857649aff0e7ab3ad67af93c846342ced3f5a07be8706c20",
"last_used_at": null,
"name": "Test API Key",
"user_id": "f4b89dc2-62fb-46bf-9f5f-c34f4eafe93e"
}
],
"app_config_variables": [
{
"key": "instanceId",
"value": "test-instance-id"
}
],
"kv": [
{
"key": "jwt_private_key.json",
"value": "7d/5hl7diJ2rnFL14hEAQf9tzpu29aqXQ8jpJ2iqqKUNFZpdOkEpud0CmRv4H3r8yyk2u/Gqqj9klSy58DJkYXGF5PAYgLyoBIb7L3JXWRbxg4cQ3QJCug13l2OTmpAKoVc+rmX8c3j3h1sNqyJ+7Ql5sS0jSeyiYgIsFNCdnK5alBDyvtcpe/QDpklmP4JCeVpvmf2rLGplk3g5UO5ydJ8UiDXxfDmi+gF6NKJvrGnnah8Ar3G/x88z+tTJtp0DIQFwxXwUM2XZqzEVGm8K2r0w5o9/Keh6bBBaiuH2C78ZOaijGV3DovhR+e9J0cYUYGwT42MZMx9fSWQ/lvWGGnf+Uq3MXJfjWSREfhkp8KTQwR9F7+dnVJWswOEk7jPR8I7hCWTMxJyvaFX3wgAXIVmhrgXZQQbYOqTt56IoqUl0xOJku8dA8opg2UcLlmmuOh6+hfkXKsiiS/H/9c1BVIGj1fCOiT6IePh4wKKSTbwJnPD5EKmdJpgTsUpjcDnXQKY4ReO0UpdRdKxwRDDLeQuG6j+ljGxR9GPudCU9Nmci6rFVI6n5LWYkQxBA1O73RpmXRZPDzntDfpXMEonkmSvOoxaCK2Id7CRKMdqvR0kEouwnhk5WSFtsfi3sA0pkXzPFxwZeWM8vFtbffZOZzXaOhxCOfcj1NClZohlZhyc4jvkxmrpY7PSaAzih0AmHI7y0LYFi6fZu/K4EheVa1+KF55nWZ8ARikHMWKAKkyExkTak7xyN884TDmzURRaPlQg4jzQte5WMNjAG/hlHibdMBNvgwiYd49ZxteJ8ABdbiXVRl+2JGbdjl2ubpQZwOn7bJKlqO56bIwsZ+e4+pXsuOGdBahkHrUjtMEmH3DZbGc6CJLbcmdhdpApLQRRcLAazxJhzAwJ47FRYsHsj57LnYNvmcKdIxw8rxCdLUuzz95uw0T3ankEO5J9sjem+HMEuKdwXK1UcuOn2rjR8Sd/BuvQmeso27dFbPXqXYNS90Ml45YyTvcKSiopD181oZR703TFUSpR7dsiqROMr+p/2jN9h6a8WbQ8xpksyclaQByY/M77AssbXnG6wfhRsntNIINCZLbBnjXOyz6ZHIC5K4tSTdcnWaiYPeRPQmnw9UUvHAcNU2yMWsy0eU377yDS0WstTxOdQutTdkczl8kv5Lo26JiEK7mSIuRK19ffF9Zz8FG8+eKv5zdyIPjyQRDYBysUoDv5huKe2eoxJu/MWS2Pql/ZtUGeD6Ozm3mCvh0vQ9ceagBkY6Ocm3du0ziAKP29Ri0mjg4DizVorbLzsh+EQH/s2Pi9MnjUZDlEmuLl2Xfp7/w4j/8u0N0tVR70VDFuGdKpTjFY3vS8EJrPtyMTM51x1D9rb8gIql8aR/rJw4YF+huxg1mv5n6+tGVqg5msbPmF12eJijP4lkmaRwIpLW5pJTtaDkUj7uOeu1mm4k+Dt5nh0/0jPHzrv6bcTCcbV7UjMHDoTXXqEpFAAJ66rHR7zdAJu+YKsnTIZyLmOpcowq7LL8G9qTvV0OSpyQWUIavRSgbDHFqEqRs+JU94jAzkq8nCY5MTd9m5sIv9InfdT3k+pwpsE/FKge8nghFLtbUrafGkzTky8SE2druvVcIvbfXMfLIKRUYjJgnWc0gQzF5J6pzXM7D2r/RG6JDzASqjlbURq6v9bhNerlOVdMujWKEEVcKWIzlbt4RkihRjM8AUqIZQOyicGQ+4yfIjAHw5viuABONYs3OIWULnFqJxdvS9rNKhfxSjIq9cfqyzevq2xrRoMXEonobh6M3bD2Vang8OAeVeD1OXWPERi4pepCYFS9RJ/Xa/UWxptsqSNuGcb3fAzQSmLpXLGdWRoKXvSe7EYgc0bGcLOjSTu5RURKo+EF9i4KT9EJauf6VXw5dTf/CCIJRXE1bWzXhSCFYntohYhX2ldOCDYpi/jFBC6Vtkw0ud3/xq8Nmhd5gUk+SpngByCZH3Pm3H+jvlbMpiqkDkm1v74hDX13Xhrcw2eWyuqKBVoRCCniUvwpYNbGvBfjC6Hcizv0Aybciwj+4nybt5EPoEUm6S6Gs7fG7QpPdvrzpAxX70MlmdkF/gwyuhbEeJhLK+WL7qAsN5CvHPzVbsIf90x+nGTtMJPgpxVr0tJMj+vprXV4WxutfARBiOnqe58MhA857sd+MzKBgKnoLOBRTiC3qc/0/ULwbG2HCCD7nmwzz7M4nUuMvo8rgS7z0BF68OClT8X3JwSXbL5Wg=="
}
],
"oidc_authorization_codes": [
{
"client_id": "3654a746-35d4-4321-ac61-0bdcff2b4055",
"code": "auth-code",
"code_challenge": null,
"code_challenge_method_sha256": null,
"created_at": "2025-11-25T12:39:02Z",
"expires_at": "2025-11-25T13:39:02Z",
"id": "6bdd221e-d9f7-4e3d-92c0-4be125802ba2",
"nonce": "nonce",
"scope": "openid profile",
"user_id": "f4b89dc2-62fb-46bf-9f5f-c34f4eafe93e"
},
{
"client_id": "7c21a609-96b5-4011-9900-272b8d31a9d1",
"code": "federated",
"code_challenge": null,
"code_challenge_method_sha256": null,
"created_at": "2025-11-25T12:39:02Z",
"expires_at": "2025-11-25T13:39:02Z",
"id": "37e914bd-ff2c-4653-8cd8-550f0213e430",
"nonce": "nonce",
"scope": "openid profile",
"user_id": "1cd19686-f9a6-43f4-a41f-14a0bf5b4036"
}
],
"oidc_clients": [
{
"callback_urls": "WyJodHRwOi8vbmV4dGNsb3VkL2F1dGgvY2FsbGJhY2siXQ==",
"created_at": "2025-11-25T12:39:02Z",
"created_by_id": "f4b89dc2-62fb-46bf-9f5f-c34f4eafe93e",
"credentials": "e30=",
"dark_image_type": null,
"id": "3654a746-35d4-4321-ac61-0bdcff2b4055",
"image_type": "png",
"is_public": false,
"launch_url": "https://nextcloud.local",
"logout_callback_urls": "WyJodHRwOi8vbmV4dGNsb3VkL2F1dGgvbG9nb3V0L2NhbGxiYWNrIl0=",
"name": "Nextcloud",
"pkce_enabled": false,
"requires_reauthentication": false,
"secret": "$2a$10$9dypwot8nGuCjT6wQWWpJOckZfRprhe2EkwpKizxS/fpVHrOLEJHC"
},
{
"callback_urls": "WyJodHRwOi8vaW1taWNoL2F1dGgvY2FsbGJhY2siXQ==",
"created_at": "2025-11-25T12:39:02Z",
"created_by_id": "1cd19686-f9a6-43f4-a41f-14a0bf5b4036",
"credentials": "e30=",
"dark_image_type": null,
"id": "606c7782-f2b1-49e5-8ea9-26eb1b06d018",
"image_type": null,
"is_public": false,
"launch_url": null,
"logout_callback_urls": "bnVsbA==",
"name": "Immich",
"pkce_enabled": false,
"requires_reauthentication": false,
"secret": "$2a$10$Ak.FP8riD1ssy2AGGbG.gOpnp/rBpymd74j0nxNMtW0GG1Lb4gzxe"
},
{
"callback_urls": "WyJodHRwOi8vdGFpbHNjYWxlL2F1dGgvY2FsbGJhY2siXQ==",
"created_at": "2025-11-25T12:39:02Z",
"created_by_id": "f4b89dc2-62fb-46bf-9f5f-c34f4eafe93e",
"credentials": "e30=",
"dark_image_type": null,
"id": "7c21a609-96b5-4011-9900-272b8d31a9d1",
"image_type": null,
"is_public": false,
"launch_url": null,
"logout_callback_urls": "WyJodHRwOi8vdGFpbHNjYWxlL2F1dGgvbG9nb3V0L2NhbGxiYWNrIl0=",
"name": "Tailscale",
"pkce_enabled": false,
"requires_reauthentication": false,
"secret": "$2a$10$xcRReBsvkI1XI6FG8xu/pOgzeF00bH5Wy4d/NThwcdi3ZBpVq/B9a"
},
{
"callback_urls": "WyJodHRwOi8vZmVkZXJhdGVkL2F1dGgvY2FsbGJhY2siXQ==",
"created_at": "2025-11-25T12:39:02Z",
"created_by_id": "1cd19686-f9a6-43f4-a41f-14a0bf5b4036",
"credentials": "eyJmZWRlcmF0ZWRJZGVudGl0aWVzIjpbeyJpc3N1ZXIiOiJodHRwczovL2V4dGVybmFsLWlkcC5sb2NhbCIsInN1YmplY3QiOiJjNDgyMzJmZi1mZjY1LTQ1ZWQtYWU5Ni03YWZhOGE5YjQ0M2IiLCJhdWRpZW5jZSI6ImFwaTovL1BvY2tldElEIiwiandrcyI6Imh0dHA6Ly9sb2NhbGhvc3Q6MTQxMS9hcGkvZXh0ZXJuYWxpZHAvandrcy5qc29uIn1dfQ==",
"dark_image_type": null,
"id": "c48232ff-ff65-45ed-ae96-7afa8a9b443b",
"image_type": null,
"is_public": false,
"launch_url": null,
"logout_callback_urls": "bnVsbA==",
"name": "Federated",
"pkce_enabled": false,
"requires_reauthentication": false,
"secret": "$2a$10$Ak.FP8riD1ssy2AGGbG.gOpnp/rBpymd74j0nxNMtW0GG1Lb4gzxe"
}
],
"oidc_clients_allowed_user_groups": [
{
"oidc_client_id": "606c7782-f2b1-49e5-8ea9-26eb1b06d018",
"user_group_id": "adab18bf-f89d-4087-9ee1-70ff15b48211"
}
],
"oidc_refresh_tokens": [
{
"client_id": "3654a746-35d4-4321-ac61-0bdcff2b4055",
"created_at": "2025-11-25T12:39:02Z",
"expires_at": "2025-11-26T12:39:02Z",
"id": "4928604e-e689-410c-9b25-5b9b6db9e46e",
"scope": "openid profile email",
"token": "fef6e2e37eb990f0bd7abd48a41d530c54b6a1f139b556e35e62475e6f4cb38d",
"user_id": "f4b89dc2-62fb-46bf-9f5f-c34f4eafe93e"
}
],
"one_time_access_tokens": [
{
"created_at": "2025-11-25T12:39:02Z",
"expires_at": "2025-11-25T13:39:02Z",
"id": "bf877753-4ea4-4c9c-bbbd-e198bb201cb8",
"token": "HPe6k6uiDRRVuAQV",
"user_id": "f4b89dc2-62fb-46bf-9f5f-c34f4eafe93e"
},
{
"created_at": "2025-11-25T12:39:02Z",
"expires_at": "2025-11-25T12:39:01Z",
"id": "d3afae24-fe2d-4a98-abec-cf0b8525096a",
"token": "YCGDtftvsvYWiXd0",
"user_id": "f4b89dc2-62fb-46bf-9f5f-c34f4eafe93e"
},
{
"created_at": "2025-11-25T12:39:02Z",
"expires_at": "2025-11-25T13:39:02Z",
"id": "defd5164-9d9b-4228-bbce-708e33f49360",
"token": "one-time-token",
"user_id": "f4b89dc2-62fb-46bf-9f5f-c34f4eafe93e"
}
],
"signup_tokens": [
{
"created_at": "2025-11-25T12:39:02Z",
"expires_at": "2025-11-26T12:39:02Z",
"id": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
"token": "VALID1234567890A",
"usage_count": 0,
"usage_limit": 1
},
{
"created_at": "2025-11-25T12:39:02Z",
"expires_at": "2025-12-02T12:39:02Z",
"id": "dc3c9c96-714e-48eb-926e-2d7c7858e6cf",
"token": "PARTIAL567890ABC",
"usage_count": 2,
"usage_limit": 5
},
{
"created_at": "2025-11-25T12:39:02Z",
"expires_at": "2025-11-24T12:39:02Z",
"id": "44de1863-ffa5-4db1-9507-4887cd7a1e3f",
"token": "EXPIRED34567890B",
"usage_count": 1,
"usage_limit": 3
},
{
"created_at": "2025-11-25T12:39:02Z",
"expires_at": "2025-11-26T12:39:02Z",
"id": "f1b1678b-7720-4d8b-8f91-1dbff1e2d02b",
"token": "FULLYUSED567890C",
"usage_count": 1,
"usage_limit": 1
}
],
"user_authorized_oidc_clients": [
{
"client_id": "3654a746-35d4-4321-ac61-0bdcff2b4055",
"last_used_at": "2025-08-01T13:00:00Z",
"scope": "openid profile email",
"user_id": "f4b89dc2-62fb-46bf-9f5f-c34f4eafe93e"
},
{
"client_id": "7c21a609-96b5-4011-9900-272b8d31a9d1",
"last_used_at": "2025-08-10T14:00:00Z",
"scope": "openid profile email",
"user_id": "f4b89dc2-62fb-46bf-9f5f-c34f4eafe93e"
},
{
"client_id": "c48232ff-ff65-45ed-ae96-7afa8a9b443b",
"last_used_at": "2025-08-12T12:00:00Z",
"scope": "openid profile email",
"user_id": "1cd19686-f9a6-43f4-a41f-14a0bf5b4036"
}
],
"user_groups": [
{
"created_at": "2025-11-25T12:39:02Z",
"friendly_name": "Developers",
"id": "c7ae7c01-28a3-4f3c-9572-1ee734ea8368",
"ldap_id": null,
"name": "developers"
},
{
"created_at": "2025-11-25T12:39:02Z",
"friendly_name": "Designers",
"id": "adab18bf-f89d-4087-9ee1-70ff15b48211",
"ldap_id": null,
"name": "designers"
}
],
"user_groups_users": [
{
"user_group_id": "c7ae7c01-28a3-4f3c-9572-1ee734ea8368",
"user_id": "f4b89dc2-62fb-46bf-9f5f-c34f4eafe93e"
},
{
"user_group_id": "c7ae7c01-28a3-4f3c-9572-1ee734ea8368",
"user_id": "1cd19686-f9a6-43f4-a41f-14a0bf5b4036"
},
{
"user_group_id": "adab18bf-f89d-4087-9ee1-70ff15b48211",
"user_id": "f4b89dc2-62fb-46bf-9f5f-c34f4eafe93e"
}
],
"users": [
{
"created_at": "2025-11-25T12:39:02Z",
"disabled": false,
"display_name": "Tim Cook",
"email": "tim.cook@test.com",
"first_name": "Tim",
"id": "f4b89dc2-62fb-46bf-9f5f-c34f4eafe93e",
"is_admin": true,
"last_name": "Cook",
"ldap_id": null,
"locale": null,
"username": "tim"
},
{
"created_at": "2025-11-25T12:39:02Z",
"disabled": false,
"display_name": "Craig Federighi",
"email": "craig.federighi@test.com",
"first_name": "Craig",
"id": "1cd19686-f9a6-43f4-a41f-14a0bf5b4036",
"is_admin": false,
"last_name": "Federighi",
"ldap_id": null,
"locale": null,
"username": "craig"
}
],
"webauthn_credentials": [
{
"attestation_type": "none",
"backup_eligible": false,
"backup_state": false,
"created_at": "2025-11-25T12:39:02Z",
"credential_id": "dGVzdC1jcmVkZW50aWFsLXRpbQ==",
"id": "fa7977f9-7cf8-40fa-abca-42b917b6e692",
"name": "Passkey 1",
"public_key": "pQMmIAEhWCDBw6jkpXXr0pHrtAQetxiR5cTcILG/YGDCdKrhVhNDHCJYIIu12YrF6B7Frwl3AUqEpdrYEwj3Fo3XkGgvrBIJEUmGAQI=",
"transport": "WyJpbnRlcm5hbCJd",
"user_id": "f4b89dc2-62fb-46bf-9f5f-c34f4eafe93e"
},
{
"attestation_type": "none",
"backup_eligible": false,
"backup_state": false,
"created_at": "2025-11-25T12:39:02Z",
"credential_id": "dGVzdC1jcmVkZW50aWFsLWNyYWln",
"id": "4bcc54ef-01d1-4970-be51-669ccd8c0198",
"name": "Passkey 2",
"public_key": "pSJYIPmc+FlEB0neERqqscxKckGF8yq1AYrANiloshAUAouHAQIDJiABIVggj4qA0PrZzg8Co1C27nyUbzrp8Ewjr7eOlGI2LfrzmbI=",
"transport": "WyJpbnRlcm5hbCJd",
"user_id": "1cd19686-f9a6-43f4-a41f-14a0bf5b4036"
}
],
"webauthn_sessions": [
{
"challenge": "challenge",
"created_at": "2025-11-25T12:39:02Z",
"credential_params": "W3sidHlwZSI6InB1YmxpYy1rZXkiLCJhbGciOi03fSx7InR5cGUiOiJwdWJsaWMta2V5IiwiYWxnIjotMjU3fV0=",
"expires_at": "2025-11-25T13:39:02Z",
"id": "267f6907-7bc8-4ea1-9d47-c42a172dc1c7",
"user_verification": "preferred"
}
]
}
}

View File

@@ -1 +0,0 @@
../../../../backend/resources/images/

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.7 KiB

View File

@@ -11,7 +11,7 @@ services:
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=pocket-id
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
test: ['CMD-SHELL', 'pg_isready -U postgres']
interval: 5s
timeout: 5s
retries: 5
@@ -21,11 +21,9 @@ services:
service: pocket-id
environment:
- APP_ENV=test
- DB_PROVIDER=postgres
- DB_CONNECTION_STRING=postgres://postgres:postgres@postgres:5432/pocket-id
- FILE_BACKEND=${FILE_BACKEND}
depends_on:
postgres:
condition: service_healthy
volumes:
pocket-id-test-data:

View File

@@ -28,16 +28,14 @@ services:
file: docker-compose.yml
service: pocket-id
environment:
FILE_BACKEND: s3
S3_BUCKET: pocket-id-test
S3_REGION: us-east-1
S3_ENDPOINT: http://localstack-s3:4566
S3_ACCESS_KEY_ID: test
S3_SECRET_ACCESS_KEY: test
S3_FORCE_PATH_STYLE: true
- S3_BUCKET=pocket-id-test
- S3_REGION=us-east-1
- S3_ENDPOINT=http://localstack-s3:4566
- S3_ACCESS_KEY_ID=test
- S3_SECRET_ACCESS_KEY=test
- S3_FORCE_PATH_STYLE=true
- KEYS_STORAGE=database
- ENCRYPTION_KEY=test1234test1234test1234test1234
depends_on:
create-bucket:
condition: service_completed_successfully
volumes:
pocket-id-test-data:

View File

@@ -11,18 +11,12 @@ services:
pocket-id:
image: pocket-id:test
ports:
- "1411:1411"
- '1411:1411'
environment:
APP_ENV: test
ENCRYPTION_KEY: test-encryption-key
FILE_BACKEND: ${FILE_BACKEND}
volumes:
- pocket-id-test-data:/app/data
- APP_ENV=test
- FILE_BACKEND=${FILE_BACKEND}
build:
args:
- BUILD_TAGS=e2etest
context: ../..
dockerfile: docker/Dockerfile
volumes:
pocket-id-test-data:

View File

@@ -119,11 +119,11 @@ test('Update email configuration', async ({ page }) => {
test('Update application images', async ({ page }) => {
await page.getByRole('button', { name: 'Expand card' }).nth(4).click();
await page.getByLabel('Favicon').setInputFiles('resources/images/w3-schools-favicon.ico');
await page.getByLabel('Light Mode Logo').setInputFiles('resources/images/pingvin-share-logo.png');
await page.getByLabel('Dark Mode Logo').setInputFiles('resources/images/nextcloud-logo.png');
await page.getByLabel('Default Profile Picture').setInputFiles('resources/images/pingvin-share-logo.png');
await page.getByLabel('Background Image').setInputFiles('resources/images/clouds.jpg');
await page.getByLabel('Favicon').setInputFiles('assets/w3-schools-favicon.ico');
await page.getByLabel('Light Mode Logo').setInputFiles('assets/pingvin-share-logo.png');
await page.getByLabel('Dark Mode Logo').setInputFiles('assets/nextcloud-logo.png');
await page.getByLabel('Default Profile Picture').setInputFiles('assets/pingvin-share-logo.png');
await page.getByLabel('Background Image').setInputFiles('assets/clouds.jpg');
await page.getByRole('button', { name: 'Save' }).last().click();
await expect(page.locator('[data-type="success"]')).toHaveText(

View File

@@ -1,9 +1,8 @@
import { test as setup } from '@playwright/test';
import { pathFromRoot } from 'utils/fs.util';
import authUtil from '../../utils/auth.util';
import { cleanupBackend } from '../../utils/cleanup.util';
import authUtil from '../utils/auth.util';
import { cleanupBackend } from '../utils/cleanup.util';
const authFile = pathFromRoot('.tmp/auth/user.json');
const authFile = './.auth/user.json';
setup('authenticate', async ({ page }) => {
await cleanupBackend();

View File

@@ -1,366 +0,0 @@
import { expect, test } from '@playwright/test';
import AdmZip from 'adm-zip';
import { execFileSync, ExecFileSyncOptions } from 'child_process';
import crypto from 'crypto';
import { users } from 'data';
import fs from 'fs';
import path from 'path';
import { cleanupBackend } from 'utils/cleanup.util';
import { pathFromRoot, tmpDir } from 'utils/fs.util';
const containerName = 'pocket-id';
const setupDir = pathFromRoot('setup');
const exampleExportPath = pathFromRoot('resources/export');
const dockerCommandMaxBuffer = 100 * 1024 * 1024;
let mode: 'sqlite' | 'postgres' | 's3' = 'sqlite';
test.beforeAll(() => {
const dockerComposeLs = runDockerCommand(['compose', 'ls', '--format', 'json']);
if (dockerComposeLs.includes('postgres')) {
mode = 'postgres';
} else if (dockerComposeLs.includes('s3')) {
mode = 's3';
}
console.log(`Running CLI tests in ${mode.toUpperCase()} mode`);
});
test('Export', async ({ baseURL }) => {
// Reset the backend but with LDAP setup because the example export has no LDAP data
await cleanupBackend({ skipLdapSetup: true });
// Fetch the profile pictures because they get generated on demand
await Promise.all([
fetch(`${baseURL}/api/users/${users.craig.id}/profile-picture.png`),
fetch(`${baseURL}/api/users/${users.tim.id}/profile-picture.png`)
]);
// Export the data from the seeded container
const exportPath = path.join(tmpDir, 'export.zip');
const extractPath = path.join(tmpDir, 'export-extracted');
runExport(exportPath);
unzipExport(exportPath, extractPath);
compareExports(exampleExportPath, extractPath);
});
test('Export via stdout', async ({ baseURL }) => {
await cleanupBackend({ skipLdapSetup: true });
await Promise.all([
fetch(`${baseURL}/api/users/${users.craig.id}/profile-picture.png`),
fetch(`${baseURL}/api/users/${users.tim.id}/profile-picture.png`)
]);
const stdoutBuffer = runExportToStdout();
const stdoutExtractPath = path.join(tmpDir, 'export-stdout-extracted');
unzipExportBuffer(stdoutBuffer, stdoutExtractPath);
compareExports(exampleExportPath, stdoutExtractPath);
});
test('Import', async () => {
// Reset the backend without seeding
await cleanupBackend({ skipSeed: true });
// Run the import with the example export data
const exampleExportArchivePath = path.join(tmpDir, 'example-export.zip');
archiveExampleExport(exampleExportArchivePath);
try {
runDockerComposeCommand(['stop', containerName]);
runImport(exampleExportArchivePath);
} finally {
runDockerComposeCommand(['up', '-d', containerName]);
}
// Export again from the imported instance
const exportPath = path.join(tmpDir, 'export.zip');
const exportExtracted = path.join(tmpDir, 'export-extracted');
runExport(exportPath);
unzipExport(exportPath, exportExtracted);
compareExports(exampleExportPath, exportExtracted);
});
test('Import via stdin', async () => {
await cleanupBackend({ skipSeed: true });
const exampleExportArchivePath = path.join(tmpDir, 'example-export-stdin.zip');
const exampleExportBuffer = archiveExampleExport(exampleExportArchivePath);
try {
runDockerComposeCommand(['stop', containerName]);
runImportFromStdin(exampleExportBuffer);
} finally {
runDockerComposeCommand(['up', '-d', containerName]);
}
const exportPath = path.join(tmpDir, 'export-from-stdin.zip');
const exportExtracted = path.join(tmpDir, 'export-from-stdin-extracted');
runExport(exportPath);
unzipExport(exportPath, exportExtracted);
compareExports(exampleExportPath, exportExtracted);
});
function compareExports(dir1: string, dir2: string): void {
const hashes1 = hashAllFiles(dir1);
const hashes2 = hashAllFiles(dir2);
const files1 = Object.keys(hashes1).sort();
const files2 = Object.keys(hashes2).sort();
expect(files2).toEqual(files1);
for (const file of files1) {
expect(hashes2[file], `${file} hash should match`).toEqual(hashes1[file]);
}
// Compare database.json contents
const expectedData = loadJSON(path.join(dir1, 'database.json'));
const actualData = loadJSON(path.join(dir2, 'database.json'));
// Check special fields
validateSpecialFields(actualData);
// Normalize and compare
const normalizedExpected = normalizeJSON(expectedData);
const normalizedActual = normalizeJSON(actualData);
expect(normalizedActual).toEqual(normalizedExpected);
}
function archiveExampleExport(outputPath: string): Buffer {
fs.rmSync(outputPath, { force: true });
const zip = new AdmZip();
const files = fs.readdirSync(exampleExportPath);
for (const file of files) {
const filePath = path.join(exampleExportPath, file);
if (fs.statSync(filePath).isFile()) {
zip.addLocalFile(filePath);
} else if (fs.statSync(filePath).isDirectory()) {
zip.addLocalFolder(filePath, file);
}
}
const buffer = zip.toBuffer();
fs.writeFileSync(outputPath, buffer);
return buffer;
}
// Helper to load JSON files
function loadJSON(path: string) {
return JSON.parse(fs.readFileSync(path, 'utf-8'));
}
function normalizeJSON(obj: any): any {
if (typeof obj === 'string') {
try {
// Normalize JSON strings
const parsed = JSON.parse(atob(obj));
return JSON.stringify(normalizeJSON(parsed));
} catch {
return obj;
}
}
if (Array.isArray(obj)) {
// Sort arrays to make order irrelevant
return obj
.map(normalizeJSON)
.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
} else if (obj && typeof obj === 'object') {
const ignoredKeys = ['id', 'created_at', 'expires_at', 'credentials', 'provider', 'version'];
// Sort and normalize object keys, skipping ignored ones
return Object.keys(obj)
.filter((key) => !ignoredKeys.includes(key))
.sort()
.reduce(
(acc, key) => {
acc[key] = normalizeJSON(obj[key]);
return acc;
},
{} as Record<string, any>
);
}
return obj;
}
function validateSpecialFields(obj: any): void {
if (Array.isArray(obj)) {
for (const item of obj) validateSpecialFields(item);
} else if (obj && typeof obj === 'object') {
for (const [key, value] of Object.entries(obj)) {
if (key === 'id') {
expect(isUUID(value), `Expected '${value}' to be a valid UUID`).toBe(true);
} else if (key === 'created_at' || key === 'expires_at') {
expect(
isValidISODate(value),
`Expected '${key}' = ${value} to be a valid ISO 8601 date string`
).toBe(true);
} else if (key === 'provider') {
expect(
['postgres', 'sqlite'].includes(value as string),
`Expected 'provider' to be either 'postgres' or 'sqlite', got '${value}'`
).toBe(true);
} else if (key === 'version') {
expect(value).toBeGreaterThanOrEqual(20251001000000);
} else {
validateSpecialFields(value);
}
}
}
}
function isUUID(value: any): boolean {
if (typeof value !== 'string') return false;
const uuidRegex = /^[^-]{8}-[^-]{4}-[^-]{4}-[^-]{4}-[^-]{12}$/;
return uuidRegex.test(value);
}
function isValidISODate(value: any): boolean {
const isoRegex = /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:\.\d+)?Z$/;
if (!isoRegex.test(value)) return false;
const date = new Date(value);
return !isNaN(date.getTime());
}
function runImport(pathToFile: string) {
const importContainerId = runDockerComposeCommand([
'run',
'-d',
'-v',
`${pathToFile}:/app/data/pocket-id-export.zip`,
containerName,
'/app/pocket-id',
'import',
'--path',
'/app/data/pocket-id-export.zip',
'--yes'
]);
try {
runDockerCommand(['wait', importContainerId]);
} finally {
runDockerCommand(['rm', '-f', importContainerId]);
}
}
function runImportFromStdin(archive: Buffer): void {
runDockerComposeCommandRaw(
['run', '--rm', '-T', containerName, '/app/pocket-id', 'import', '--yes', '--path', '-'],
{ input: archive }
);
}
function runExport(outputFile: string): void {
const containerId = runDockerComposeCommand([
'run',
'-d',
containerName,
'/app/pocket-id',
'export',
'--path',
'/app/data/pocket-id-export.zip'
]);
try {
// Wait until export finishes
runDockerCommand(['wait', containerId]);
runDockerCommand(['cp', `${containerId}:/app/data/pocket-id-export.zip`, outputFile]);
} finally {
runDockerCommand(['rm', '-f', containerId]);
}
expect(fs.existsSync(outputFile)).toBe(true);
}
function runExportToStdout(): Buffer {
const res = runDockerComposeCommandRaw([
'run',
'--rm',
'-T',
containerName,
'/app/pocket-id',
'export',
'--path',
'-'
]);
fs.writeFileSync('export-stdout.txt', res);
return res;
}
function unzipExport(zipFile: string, destDir: string): void {
fs.rmSync(destDir, { recursive: true, force: true });
const zip = new AdmZip(zipFile);
zip.extractAllTo(destDir, true);
}
function unzipExportBuffer(zipBuffer: Buffer, destDir: string): void {
fs.rmSync(destDir, { recursive: true, force: true });
const zip = new AdmZip(zipBuffer);
zip.extractAllTo(destDir, true);
}
function hashFile(filePath: string): string {
const buffer = fs.readFileSync(filePath);
return crypto.createHash('sha256').update(buffer).digest('hex');
}
function getAllFiles(dir: string, root = dir): string[] {
return fs.readdirSync(dir).flatMap((entry) => {
if (['.DS_Store', 'database.json'].includes(entry)) return [];
const fullPath = path.join(dir, entry);
const stat = fs.statSync(fullPath);
return stat.isDirectory() ? getAllFiles(fullPath, root) : [path.relative(root, fullPath)];
});
}
function hashAllFiles(dir: string): Record<string, string> {
const files = getAllFiles(dir);
const hashes: Record<string, string> = {};
for (const relativePath of files) {
const fullPath = path.join(dir, relativePath);
hashes[relativePath] = hashFile(fullPath);
}
return hashes;
}
function runDockerCommand(args: string[], options?: ExecFileSyncOptions): string {
return execFileSync('docker', args, {
cwd: setupDir,
stdio: 'pipe',
maxBuffer: dockerCommandMaxBuffer,
...options
})
.toString()
.trim();
}
function runDockerComposeCommand(args: string[]): string {
return runDockerComposeCommandRaw(args).toString().trim();
}
function runDockerComposeCommandRaw(args: string[], options?: ExecFileSyncOptions): Buffer {
return execFileSync('docker', dockerComposeArgs(args), {
cwd: setupDir,
stdio: 'pipe',
maxBuffer: dockerCommandMaxBuffer,
...options
}) as Buffer;
}
function dockerComposeArgs(args: string[]): string[] {
let dockerComposeFile = 'docker-compose.yml';
switch (mode) {
case 'postgres':
dockerComposeFile = 'docker-compose-postgres.yml';
break;
case 's3':
dockerComposeFile = 'docker-compose-s3.yml';
break;
}
return ['compose', '-f', dockerComposeFile, ...args];
}

View File

@@ -1,8 +0,0 @@
import fs from 'fs';
import { tmpDir } from 'utils/fs.util';
async function globalSetup() {
await fs.promises.mkdir(tmpDir, { recursive: true });
}
export default globalSetup;

View File

@@ -1,8 +0,0 @@
import fs from 'fs';
import { tmpDir } from 'utils/fs.util';
async function globalTeardown() {
await fs.promises.rm(tmpDir, { recursive: true, force: true });
}
export default globalTeardown;

View File

@@ -20,9 +20,9 @@ test.describe('Create OIDC client', () => {
await page.getByTestId('callback-url-2').fill(oidcClient.secondCallbackUrl);
await page.locator('[role="tab"][data-value="light-logo"]').first().click();
await page.setInputFiles('#oidc-client-logo-light', 'resources/images/pingvin-share-logo.png');
await page.setInputFiles('#oidc-client-logo-light', 'assets/pingvin-share-logo.png');
await page.locator('[role="tab"][data-value="dark-logo"]').first().click();
await page.setInputFiles('#oidc-client-logo-dark', 'resources/images/pingvin-share-logo.png');
await page.setInputFiles('#oidc-client-logo-dark', 'assets/pingvin-share-logo.png');
if (clientId) {
await page.getByRole('button', { name: 'Show Advanced Options' }).click();
@@ -71,9 +71,9 @@ test('Edit OIDC client', async ({ page }) => {
await page.getByLabel('Name').fill('Nextcloud updated');
await page.getByTestId('callback-url-1').first().fill('http://nextcloud-updated/auth/callback');
await page.locator('[role="tab"][data-value="light-logo"]').first().click();
await page.setInputFiles('#oidc-client-logo-light', 'resources/images/nextcloud-logo.png');
await page.setInputFiles('#oidc-client-logo-light', 'assets/nextcloud-logo.png');
await page.locator('[role="tab"][data-value="dark-logo"]').first().click();
await page.setInputFiles('#oidc-client-logo-dark', 'resources/images/nextcloud-logo.png');
await page.setInputFiles('#oidc-client-logo-dark', 'assets/nextcloud-logo.png');
await page.getByLabel('Client Launch URL').fill(oidcClient.launchURL);
await page.getByRole('button', { name: 'Save' }).click();

View File

@@ -25,7 +25,7 @@ test.describe('Initial User Signup', () => {
});
test('Initial Signup - success flow', async ({ page }) => {
await cleanupBackend({ skipSeed: true });
await cleanupBackend(true);
await page.goto('/setup');
await page.getByLabel('First name').fill('Jane');
await page.getByLabel('Last name').fill('Smith');

View File

@@ -1,10 +1,6 @@
{
"compilerOptions": {
"baseUrl": ".",
"lib": ["ES2022"],
"esModuleInterop": true,
"module": "es2022",
"moduleResolution": "node",
"target": "es2022"
"lib": ["ES2022"]
}
}

View File

@@ -1,9 +1,9 @@
import playwrightConfig from '../playwright.config';
export async function cleanupBackend({ skipSeed = false, skipLdapSetup = false } = {}) {
export async function cleanupBackend(skipSeed = false) {
const url = new URL('/api/test/reset', playwrightConfig.use!.baseURL);
if (process.env.SKIP_LDAP_TESTS === 'true' || skipSeed || skipLdapSetup) {
if (process.env.SKIP_LDAP_TESTS === 'true' || skipSeed) {
url.searchParams.append('skip-ldap', 'true');
}

View File

@@ -1,7 +0,0 @@
import path from 'path';
export const tmpDir = pathFromRoot('.tmp');
export function pathFromRoot(p: string): string {
return path.resolve(path.dirname(new URL(import.meta.url).pathname), '..', p);
}